4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/ptlrpcd.c
35 /** \defgroup ptlrpcd PortalRPC daemon
37 * ptlrpcd is a special thread with its own set where other user might add
38 * requests when they don't want to wait for their completion.
39 * PtlRPCD will take care of sending such requests and then processing their
40 * replies and calling completion callbacks as necessary.
41 * The callbacks are called directly from ptlrpcd context.
42 * It is important to never significantly block (esp. on RPCs!) within such
43 * completion handler or a deadlock might occur where ptlrpcd enters some
44 * callback that attempts to send another RPC and wait for it to return,
45 * during which time ptlrpcd is completely blocked, so e.g. if import
46 * fails, recovery cannot progress because connection requests are also
52 #define DEBUG_SUBSYSTEM S_RPC
54 #include <linux/libcfs/libcfs.h>
56 #include <lustre_net.h>
57 #include <lustre_lib.h>
58 #include <lustre_ha.h>
59 #include <obd_class.h> /* for obd_zombie */
60 #include <obd_support.h> /* for OBD_FAIL_CHECK */
61 #include <cl_object.h> /* cl_env_{get,put}() */
62 #include <lprocfs_status.h>
64 #include "ptlrpc_internal.h"
66 /* One of these per CPT. */
74 struct ptlrpcd_ctl pd_threads[0];
78 * max_ptlrpcds is obsolete, but retained to ensure that the kernel
79 * module will load on a system where it has been tuned.
80 * A value other than 0 implies it was tuned, in which case the value
81 * is used to derive a setting for ptlrpcd_per_cpt_max.
83 static int max_ptlrpcds;
84 module_param(max_ptlrpcds, int, 0644);
85 MODULE_PARM_DESC(max_ptlrpcds,
86 "Max ptlrpcd thread count to be started (obsolete).");
89 * ptlrpcd_bind_policy is obsolete, but retained to ensure that
90 * the kernel module will load on a system where it has been tuned.
91 * A value other than 0 implies it was tuned, in which case the value
92 * is used to derive a setting for ptlrpcd_partner_group_size.
94 static int ptlrpcd_bind_policy;
95 module_param(ptlrpcd_bind_policy, int, 0644);
96 MODULE_PARM_DESC(ptlrpcd_bind_policy,
97 "Ptlrpcd threads binding mode (obsolete).");
100 * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run
103 static int ptlrpcd_per_cpt_max;
104 module_param(ptlrpcd_per_cpt_max, int, 0644);
105 MODULE_PARM_DESC(ptlrpcd_per_cpt_max,
106 "Max ptlrpcd thread count to be started per CPT.");
109 * ptlrpcd_partner_group_size: The desired number of threads in each
110 * ptlrpcd partner thread group. Default is 2, corresponding to the
111 * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in
112 * a CPT partners of each other.
114 static int ptlrpcd_partner_group_size;
115 module_param(ptlrpcd_partner_group_size, int, 0644);
116 MODULE_PARM_DESC(ptlrpcd_partner_group_size,
117 "Number of ptlrpcd threads in a partner group.");
120 * ptlrpcd_cpts: A CPT string describing the CPU partitions that
121 * ptlrpcd threads should run on. Used to make ptlrpcd threads run on
122 * a subset of all CPTs.
126 * run ptlrpcd threads only on CPT 2.
130 * run ptlrpcd threads on CPTs 0, 1, 2, and 3.
132 * ptlrpcd_cpts=[0-3,5,7]
133 * run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7.
135 static char *ptlrpcd_cpts;
136 module_param(ptlrpcd_cpts, charp, 0644);
137 MODULE_PARM_DESC(ptlrpcd_cpts,
138 "CPU partitions ptlrpcd threads should run in");
140 /* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */
141 static int *ptlrpcds_cpt_idx;
143 /* ptlrpcds_num is the number of entries in the ptlrpcds array. */
144 static int ptlrpcds_num;
145 static struct ptlrpcd **ptlrpcds;
148 * In addition to the regular thread pool above, there is a single
149 * global recovery thread. Recovery isn't critical for performance,
150 * and doesn't block, but must always be able to proceed, and it is
151 * possible that all normal ptlrpcd threads are blocked. Hence the
152 * need for a dedicated thread.
154 static struct ptlrpcd_ctl ptlrpcd_rcv;
156 struct mutex ptlrpcd_mutex;
157 static int ptlrpcd_users;
159 void ptlrpcd_wake(struct ptlrpc_request *req)
161 struct ptlrpc_request_set *set = req->rq_set;
163 wake_up(&set->set_waitq);
165 EXPORT_SYMBOL(ptlrpcd_wake);
167 static struct ptlrpcd_ctl *
168 ptlrpcd_select_pc(struct ptlrpc_request *req)
174 if (req && req->rq_send_state != LUSTRE_IMP_FULL)
177 cpt = cfs_cpt_current(cfs_cpt_table, 1);
178 if (!ptlrpcds_cpt_idx)
181 idx = ptlrpcds_cpt_idx[cpt];
184 /* We do not care whether it is strict load balance. */
186 if (++idx == pd->pd_nthreads)
190 return &pd->pd_threads[idx];
194 * Return transferred RPCs count.
196 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
197 struct ptlrpc_request_set *src)
199 struct list_head *tmp, *pos;
200 struct ptlrpc_request *req;
203 spin_lock(&src->set_new_req_lock);
204 if (likely(!list_empty(&src->set_new_requests))) {
205 list_for_each_safe(pos, tmp, &src->set_new_requests) {
206 req = list_entry(pos, struct ptlrpc_request,
210 list_splice_init(&src->set_new_requests, &des->set_requests);
211 rc = atomic_read(&src->set_new_count);
212 atomic_add(rc, &des->set_remaining);
213 atomic_set(&src->set_new_count, 0);
215 spin_unlock(&src->set_new_req_lock);
220 * Requests that are added to the ptlrpcd queue are sent via
221 * ptlrpcd_check->ptlrpc_check_set().
223 void ptlrpcd_add_req(struct ptlrpc_request *req)
225 struct ptlrpcd_ctl *pc;
228 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
230 spin_lock(&req->rq_lock);
231 if (req->rq_invalid_rqset) {
232 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
233 back_to_sleep, NULL);
235 req->rq_invalid_rqset = 0;
236 spin_unlock(&req->rq_lock);
237 l_wait_event(req->rq_set_waitq, !req->rq_set, &lwi);
238 } else if (req->rq_set) {
239 /* If we have a valid "rq_set", just reuse it to avoid double
242 LASSERT(req->rq_phase == RQ_PHASE_NEW);
243 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
245 /* ptlrpc_check_set will decrease the count */
246 atomic_inc(&req->rq_set->set_remaining);
247 spin_unlock(&req->rq_lock);
248 wake_up(&req->rq_set->set_waitq);
251 spin_unlock(&req->rq_lock);
254 pc = ptlrpcd_select_pc(req);
256 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
257 req, pc->pc_name, pc->pc_index);
259 ptlrpc_set_add_new_req(pc, req);
261 EXPORT_SYMBOL(ptlrpcd_add_req);
263 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
265 atomic_inc(&set->set_refcount);
269 * Check if there is more work to do on ptlrpcd set.
272 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
274 struct list_head *tmp, *pos;
275 struct ptlrpc_request *req;
276 struct ptlrpc_request_set *set = pc->pc_set;
280 if (atomic_read(&set->set_new_count)) {
281 spin_lock(&set->set_new_req_lock);
282 if (likely(!list_empty(&set->set_new_requests))) {
283 list_splice_init(&set->set_new_requests,
285 atomic_add(atomic_read(&set->set_new_count),
286 &set->set_remaining);
287 atomic_set(&set->set_new_count, 0);
289 * Need to calculate its timeout.
293 spin_unlock(&set->set_new_req_lock);
296 /* We should call lu_env_refill() before handling new requests to make
297 * sure that env key the requests depending on really exists.
299 rc2 = lu_env_refill(env);
302 * XXX This is very awkward situation, because
303 * execution can neither continue (request
304 * interpreters assume that env is set up), nor repeat
305 * the loop (as this potentially results in a tight
306 * loop of -ENOMEM's).
308 * Fortunately, refill only ever does something when
309 * new modules are loaded, i.e., early during boot up.
311 CERROR("Failure to refill session: %d\n", rc2);
315 if (atomic_read(&set->set_remaining))
316 rc |= ptlrpc_check_set(env, set);
318 /* NB: ptlrpc_check_set has already moved completed request at the
319 * head of seq::set_requests
321 list_for_each_safe(pos, tmp, &set->set_requests) {
322 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
323 if (req->rq_phase != RQ_PHASE_COMPLETE)
326 list_del_init(&req->rq_set_chain);
328 ptlrpc_req_finished(req);
333 * If new requests have been added, make sure to wake up.
335 rc = atomic_read(&set->set_new_count);
337 /* If we have nothing to do, check whether we can take some
338 * work from our partner threads.
340 if (rc == 0 && pc->pc_npartners > 0) {
341 struct ptlrpcd_ctl *partner;
342 struct ptlrpc_request_set *ps;
343 int first = pc->pc_cursor;
346 partner = pc->pc_partners[pc->pc_cursor++];
347 if (pc->pc_cursor >= pc->pc_npartners)
352 spin_lock(&partner->pc_lock);
353 ps = partner->pc_set;
355 spin_unlock(&partner->pc_lock);
359 ptlrpc_reqset_get(ps);
360 spin_unlock(&partner->pc_lock);
362 if (atomic_read(&ps->set_new_count)) {
363 rc = ptlrpcd_steal_rqset(set, ps);
365 CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n",
366 rc, partner->pc_index,
369 ptlrpc_reqset_put(ps);
370 } while (rc == 0 && pc->pc_cursor != first);
378 * Main ptlrpcd thread.
379 * ptlrpc's code paths like to execute in process context, so we have this
380 * thread which spins on a set which contains the rpcs and sends them.
383 static int ptlrpcd(void *arg)
385 struct ptlrpcd_ctl *pc = arg;
386 struct ptlrpc_request_set *set;
387 struct lu_context ses = { 0 };
388 struct lu_env env = { .le_ses = &ses };
393 if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
394 CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
397 * Allocate the request set after the thread has been bound
398 * above. This is safe because no requests will be queued
399 * until all ptlrpcd threads have confirmed that they have
400 * successfully started.
402 set = ptlrpc_prep_set();
407 spin_lock(&pc->pc_lock);
409 spin_unlock(&pc->pc_lock);
411 * XXX So far only "client" ptlrpcd uses an environment. In
412 * the future, ptlrpcd thread (or a thread-set) has to given
413 * an argument, describing its "scope".
415 rc = lu_context_init(&env.le_ctx,
416 LCT_CL_THREAD | LCT_REMEMBER | LCT_NOREF);
418 rc = lu_context_init(env.le_ses,
419 LCT_SESSION | LCT_REMEMBER | LCT_NOREF);
421 lu_context_fini(&env.le_ctx);
427 complete(&pc->pc_starting);
430 * This mainloop strongly resembles ptlrpc_set_wait() except that our
431 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
432 * there are requests in the set. New requests come in on the set's
433 * new_req_list and ptlrpcd_check() moves them into the set.
436 struct l_wait_info lwi;
439 timeout = ptlrpc_set_next_timeout(set);
440 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
441 ptlrpc_expired_set, set);
443 lu_context_enter(&env.le_ctx);
444 lu_context_enter(env.le_ses);
445 l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
446 lu_context_exit(&env.le_ctx);
447 lu_context_exit(env.le_ses);
450 * Abort inflight rpcs for forced stop case.
452 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
453 if (test_bit(LIOD_FORCE, &pc->pc_flags))
454 ptlrpc_abort_set(set);
459 * Let's make one more loop to make sure that ptlrpcd_check()
460 * copied all raced new rpcs into the set so we can kill them.
465 * Wait for inflight requests to drain.
467 if (!list_empty(&set->set_requests))
468 ptlrpc_set_wait(set);
469 lu_context_fini(&env.le_ctx);
470 lu_context_fini(env.le_ses);
472 complete(&pc->pc_finishing);
477 complete(&pc->pc_starting);
481 static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt)
483 pc->pc_index = index;
485 init_completion(&pc->pc_starting);
486 init_completion(&pc->pc_finishing);
487 spin_lock_init(&pc->pc_lock);
490 /* Recovery thread. */
491 snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv");
493 /* Regular thread. */
494 snprintf(pc->pc_name, sizeof(pc->pc_name),
495 "ptlrpcd_%02d_%02d", cpt, index);
499 /* XXX: We want multiple CPU cores to share the async RPC load. So we
500 * start many ptlrpcd threads. We also want to reduce the ptlrpcd
501 * overhead caused by data transfer cross-CPU cores. So we bind
502 * all ptlrpcd threads to a CPT, in the expectation that CPTs
503 * will be defined in a way that matches these boundaries. Within
504 * a CPT a ptlrpcd thread can be scheduled on any available core.
506 * Each ptlrpcd thread has its own request queue. This can cause
507 * response delay if the thread is already busy. To help with
508 * this we define partner threads: these are other threads bound
509 * to the same CPT which will check for work in each other's
510 * request queues if they have no work to do.
512 * The desired number of partner threads can be tuned by setting
513 * ptlrpcd_partner_group_size. The default is to create pairs of
516 static int ptlrpcd_partners(struct ptlrpcd *pd, int index)
518 struct ptlrpcd_ctl *pc;
519 struct ptlrpcd_ctl **ppc;
525 LASSERT(index >= 0 && index < pd->pd_nthreads);
526 pc = &pd->pd_threads[index];
527 pc->pc_npartners = pd->pd_groupsize - 1;
529 if (pc->pc_npartners <= 0)
532 size = sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners;
533 pc->pc_partners = kzalloc_node(size, GFP_NOFS,
534 cfs_cpt_spread_node(cfs_cpt_table,
536 if (!pc->pc_partners) {
537 pc->pc_npartners = 0;
542 first = index - index % pd->pd_groupsize;
543 ppc = pc->pc_partners;
544 for (i = first; i < first + pd->pd_groupsize; i++) {
546 *ppc++ = &pd->pd_threads[i];
552 int ptlrpcd_start(struct ptlrpcd_ctl *pc)
554 struct task_struct *task;
558 * Do not allow start second thread for one pc.
560 if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
561 CWARN("Starting second thread (%s) for same pc %p\n",
566 task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
572 wait_for_completion(&pc->pc_starting);
581 struct ptlrpc_request_set *set = pc->pc_set;
583 spin_lock(&pc->pc_lock);
585 spin_unlock(&pc->pc_lock);
586 ptlrpc_set_destroy(set);
588 clear_bit(LIOD_START, &pc->pc_flags);
592 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
594 if (!test_bit(LIOD_START, &pc->pc_flags)) {
595 CWARN("Thread for pc %p was not started\n", pc);
599 set_bit(LIOD_STOP, &pc->pc_flags);
601 set_bit(LIOD_FORCE, &pc->pc_flags);
602 wake_up(&pc->pc_set->set_waitq);
605 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
607 struct ptlrpc_request_set *set = pc->pc_set;
609 if (!test_bit(LIOD_START, &pc->pc_flags)) {
610 CWARN("Thread for pc %p was not started\n", pc);
614 wait_for_completion(&pc->pc_finishing);
616 spin_lock(&pc->pc_lock);
618 spin_unlock(&pc->pc_lock);
619 ptlrpc_set_destroy(set);
621 clear_bit(LIOD_START, &pc->pc_flags);
622 clear_bit(LIOD_STOP, &pc->pc_flags);
623 clear_bit(LIOD_FORCE, &pc->pc_flags);
626 if (pc->pc_npartners > 0) {
627 LASSERT(pc->pc_partners);
629 kfree(pc->pc_partners);
630 pc->pc_partners = NULL;
632 pc->pc_npartners = 0;
636 static void ptlrpcd_fini(void)
642 for (i = 0; i < ptlrpcds_num; i++) {
645 for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
646 ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0);
647 for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
648 ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]);
656 ptlrpcd_stop(&ptlrpcd_rcv, 0);
657 ptlrpcd_free(&ptlrpcd_rcv);
659 kfree(ptlrpcds_cpt_idx);
660 ptlrpcds_cpt_idx = NULL;
663 static int ptlrpcd_init(void)
671 struct cfs_cpt_table *cptable;
678 * Determine the CPTs that ptlrpcd threads will run on.
680 cptable = cfs_cpt_table;
681 ncpts = cfs_cpt_number(cptable);
683 struct cfs_expr_list *el;
685 size = ncpts * sizeof(ptlrpcds_cpt_idx[0]);
686 ptlrpcds_cpt_idx = kzalloc(size, GFP_KERNEL);
687 if (!ptlrpcds_cpt_idx) {
692 rc = cfs_expr_list_parse(ptlrpcd_cpts,
693 strlen(ptlrpcd_cpts),
697 CERROR("ptlrpcd_cpts: invalid CPT pattern string: %s",
703 rc = cfs_expr_list_values(el, ncpts, &cpts);
704 cfs_expr_list_free(el);
706 CERROR("ptlrpcd_cpts: failed to parse CPT array %s: %d\n",
714 * Create the cpt-to-index map. When there is no match
715 * in the cpt table, pick a cpt at random. This could
716 * be changed to take the topology of the system into
719 for (cpt = 0; cpt < ncpts; cpt++) {
720 for (i = 0; i < rc; i++)
725 ptlrpcds_cpt_idx[cpt] = i;
728 cfs_expr_list_values_free(cpts, rc);
731 ptlrpcds_num = ncpts;
733 size = ncpts * sizeof(ptlrpcds[0]);
734 ptlrpcds = kzalloc(size, GFP_KERNEL);
741 * The max_ptlrpcds parameter is obsolete, but do something
742 * sane if it has been tuned, and complain if
743 * ptlrpcd_per_cpt_max has also been tuned.
745 if (max_ptlrpcds != 0) {
746 CWARN("max_ptlrpcds is obsolete.\n");
747 if (ptlrpcd_per_cpt_max == 0) {
748 ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts;
749 /* Round up if there is a remainder. */
750 if (max_ptlrpcds % ncpts != 0)
751 ptlrpcd_per_cpt_max++;
752 CWARN("Setting ptlrpcd_per_cpt_max = %d\n",
753 ptlrpcd_per_cpt_max);
755 CWARN("ptlrpd_per_cpt_max is also set!\n");
760 * The ptlrpcd_bind_policy parameter is obsolete, but do
761 * something sane if it has been tuned, and complain if
762 * ptlrpcd_partner_group_size is also tuned.
764 if (ptlrpcd_bind_policy != 0) {
765 CWARN("ptlrpcd_bind_policy is obsolete.\n");
766 if (ptlrpcd_partner_group_size == 0) {
767 switch (ptlrpcd_bind_policy) {
768 case 1: /* PDB_POLICY_NONE */
769 case 2: /* PDB_POLICY_FULL */
770 ptlrpcd_partner_group_size = 1;
772 case 3: /* PDB_POLICY_PAIR */
773 ptlrpcd_partner_group_size = 2;
775 case 4: /* PDB_POLICY_NEIGHBOR */
777 ptlrpcd_partner_group_size = -1; /* CPT */
779 ptlrpcd_partner_group_size = 3; /* Triplets */
782 default: /* Illegal value, use the default. */
783 ptlrpcd_partner_group_size = 2;
786 CWARN("Setting ptlrpcd_partner_group_size = %d\n",
787 ptlrpcd_partner_group_size);
789 CWARN("ptlrpcd_partner_group_size is also set!\n");
793 if (ptlrpcd_partner_group_size == 0)
794 ptlrpcd_partner_group_size = 2;
795 else if (ptlrpcd_partner_group_size < 0)
796 ptlrpcd_partner_group_size = -1;
797 else if (ptlrpcd_per_cpt_max > 0 &&
798 ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max)
799 ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max;
802 * Start the recovery thread first.
804 set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags);
805 ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY);
806 rc = ptlrpcd_start(&ptlrpcd_rcv);
810 for (i = 0; i < ncpts; i++) {
816 nthreads = cfs_cpt_weight(cptable, cpt);
817 if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads)
818 nthreads = ptlrpcd_per_cpt_max;
822 if (ptlrpcd_partner_group_size <= 0) {
823 groupsize = nthreads;
824 } else if (nthreads <= ptlrpcd_partner_group_size) {
825 groupsize = nthreads;
827 groupsize = ptlrpcd_partner_group_size;
828 if (nthreads % groupsize != 0)
829 nthreads += groupsize - (nthreads % groupsize);
832 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
833 pd = kzalloc_node(size, GFP_NOFS,
834 cfs_cpt_spread_node(cfs_cpt_table, cpt));
843 pd->pd_nthreads = nthreads;
844 pd->pd_groupsize = groupsize;
848 * The ptlrpcd threads in a partner group can access
849 * each other's struct ptlrpcd_ctl, so these must be
850 * initialized before any thread is started.
852 for (j = 0; j < nthreads; j++) {
853 ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt);
854 rc = ptlrpcd_partners(pd, j);
859 /* XXX: We start nthreads ptlrpc daemons.
860 * Each of them can process any non-recovery
861 * async RPC to improve overall async RPC
864 * But there are some issues with async I/O RPCs
865 * and async non-I/O RPCs processed in the same
866 * set under some cases. The ptlrpcd may be
867 * blocked by some async I/O RPC(s), then will
868 * cause other async non-I/O RPC(s) can not be
871 * Maybe we should distinguish blocked async RPCs
872 * from non-blocked async RPCs, and process them
873 * in different ptlrpcd sets to avoid unnecessary
874 * dependency. But how to distribute async RPCs
875 * load among all the ptlrpc daemons becomes
878 for (j = 0; j < nthreads; j++) {
879 rc = ptlrpcd_start(&pd->pd_threads[j]);
891 int ptlrpcd_addref(void)
895 mutex_lock(&ptlrpcd_mutex);
896 if (++ptlrpcd_users == 1) {
901 mutex_unlock(&ptlrpcd_mutex);
904 EXPORT_SYMBOL(ptlrpcd_addref);
906 void ptlrpcd_decref(void)
908 mutex_lock(&ptlrpcd_mutex);
909 if (--ptlrpcd_users == 0)
911 mutex_unlock(&ptlrpcd_mutex);
913 EXPORT_SYMBOL(ptlrpcd_decref);