2 * User interface for Resource Alloction in Resource Director Technology(RDT)
4 * Copyright (C) 2016 Intel Corporation
6 * Author: Fenghua Yu <fenghua.yu@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * More information about RDT be found in the Intel (R) x86 Architecture
18 * Software Developer Manual.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/cacheinfo.h>
24 #include <linux/cpu.h>
25 #include <linux/debugfs.h>
27 #include <linux/sysfs.h>
28 #include <linux/kernfs.h>
29 #include <linux/seq_buf.h>
30 #include <linux/seq_file.h>
31 #include <linux/sched/signal.h>
32 #include <linux/sched/task.h>
33 #include <linux/slab.h>
34 #include <linux/task_work.h>
36 #include <uapi/linux/magic.h>
38 #include <asm/intel_rdt_sched.h>
39 #include "intel_rdt.h"
41 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
42 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
43 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
44 static struct kernfs_root *rdt_root;
45 struct rdtgroup rdtgroup_default;
46 LIST_HEAD(rdt_all_groups);
48 /* Kernel fs node for "info" directory under root */
49 static struct kernfs_node *kn_info;
51 /* Kernel fs node for "mon_groups" directory under root */
52 static struct kernfs_node *kn_mongrp;
54 /* Kernel fs node for "mon_data" directory under root */
55 static struct kernfs_node *kn_mondata;
57 static struct seq_buf last_cmd_status;
58 static char last_cmd_status_buf[512];
60 struct dentry *debugfs_resctrl;
62 void rdt_last_cmd_clear(void)
64 lockdep_assert_held(&rdtgroup_mutex);
65 seq_buf_clear(&last_cmd_status);
68 void rdt_last_cmd_puts(const char *s)
70 lockdep_assert_held(&rdtgroup_mutex);
71 seq_buf_puts(&last_cmd_status, s);
74 void rdt_last_cmd_printf(const char *fmt, ...)
79 lockdep_assert_held(&rdtgroup_mutex);
80 seq_buf_vprintf(&last_cmd_status, fmt, ap);
85 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
86 * we can keep a bitmap of free CLOSIDs in a single integer.
88 * Using a global CLOSID across all resources has some advantages and
90 * + We can simply set "current->closid" to assign a task to a resource
92 * + Context switch code can avoid extra memory references deciding which
93 * CLOSID to load into the PQR_ASSOC MSR
94 * - We give up some options in configuring resource groups across multi-socket
96 * - Our choices on how to configure each resource become progressively more
97 * limited as the number of resources grows.
99 static int closid_free_map;
100 static int closid_free_map_len;
102 int closids_supported(void)
104 return closid_free_map_len;
107 static void closid_init(void)
109 struct rdt_resource *r;
110 int rdt_min_closid = 32;
112 /* Compute rdt_min_closid across all resources */
113 for_each_alloc_enabled_rdt_resource(r)
114 rdt_min_closid = min(rdt_min_closid, r->num_closid);
116 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
118 /* CLOSID 0 is always reserved for the default group */
119 closid_free_map &= ~1;
120 closid_free_map_len = rdt_min_closid;
123 static int closid_alloc(void)
125 u32 closid = ffs(closid_free_map);
130 closid_free_map &= ~(1 << closid);
135 void closid_free(int closid)
137 closid_free_map |= 1 << closid;
141 * closid_allocated - test if provided closid is in use
142 * @closid: closid to be tested
144 * Return: true if @closid is currently associated with a resource group,
145 * false if @closid is free
147 static bool closid_allocated(unsigned int closid)
149 return (closid_free_map & (1 << closid)) == 0;
153 * rdtgroup_mode_by_closid - Return mode of resource group with closid
154 * @closid: closid if the resource group
156 * Each resource group is associated with a @closid. Here the mode
157 * of a resource group can be queried by searching for it using its closid.
159 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
161 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
163 struct rdtgroup *rdtgrp;
165 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
166 if (rdtgrp->closid == closid)
170 return RDT_NUM_MODES;
173 static const char * const rdt_mode_str[] = {
174 [RDT_MODE_SHAREABLE] = "shareable",
175 [RDT_MODE_EXCLUSIVE] = "exclusive",
176 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
177 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
181 * rdtgroup_mode_str - Return the string representation of mode
182 * @mode: the resource group mode as &enum rdtgroup_mode
184 * Return: string representation of valid mode, "unknown" otherwise
186 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
188 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
191 return rdt_mode_str[mode];
194 /* set uid and gid of rdtgroup dirs and files to that of the creator */
195 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
197 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
198 .ia_uid = current_fsuid(),
199 .ia_gid = current_fsgid(), };
201 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
202 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
205 return kernfs_setattr(kn, &iattr);
208 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
210 struct kernfs_node *kn;
213 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
214 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
215 0, rft->kf_ops, rft, NULL, NULL);
219 ret = rdtgroup_kn_set_ugid(kn);
228 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
230 struct kernfs_open_file *of = m->private;
231 struct rftype *rft = of->kn->priv;
234 return rft->seq_show(of, m, arg);
238 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
239 size_t nbytes, loff_t off)
241 struct rftype *rft = of->kn->priv;
244 return rft->write(of, buf, nbytes, off);
249 static struct kernfs_ops rdtgroup_kf_single_ops = {
250 .atomic_write_len = PAGE_SIZE,
251 .write = rdtgroup_file_write,
252 .seq_show = rdtgroup_seqfile_show,
255 static struct kernfs_ops kf_mondata_ops = {
256 .atomic_write_len = PAGE_SIZE,
257 .seq_show = rdtgroup_mondata_show,
260 static bool is_cpu_list(struct kernfs_open_file *of)
262 struct rftype *rft = of->kn->priv;
264 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
267 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
268 struct seq_file *s, void *v)
270 struct rdtgroup *rdtgrp;
271 struct cpumask *mask;
274 rdtgrp = rdtgroup_kn_lock_live(of->kn);
277 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
278 if (!rdtgrp->plr->d) {
279 rdt_last_cmd_clear();
280 rdt_last_cmd_puts("Cache domain offline\n");
283 mask = &rdtgrp->plr->d->cpu_mask;
284 seq_printf(s, is_cpu_list(of) ?
285 "%*pbl\n" : "%*pb\n",
286 cpumask_pr_args(mask));
289 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
290 cpumask_pr_args(&rdtgrp->cpu_mask));
295 rdtgroup_kn_unlock(of->kn);
301 * This is safe against intel_rdt_sched_in() called from __switch_to()
302 * because __switch_to() is executed with interrupts disabled. A local call
303 * from update_closid_rmid() is proteced against __switch_to() because
304 * preemption is disabled.
306 static void update_cpu_closid_rmid(void *info)
308 struct rdtgroup *r = info;
311 this_cpu_write(pqr_state.default_closid, r->closid);
312 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
316 * We cannot unconditionally write the MSR because the current
317 * executing task might have its own closid selected. Just reuse
318 * the context switch code.
320 intel_rdt_sched_in();
324 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
326 * Per task closids/rmids must have been set up before calling this function.
329 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
333 if (cpumask_test_cpu(cpu, cpu_mask))
334 update_cpu_closid_rmid(r);
335 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
339 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
340 cpumask_var_t tmpmask)
342 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
343 struct list_head *head;
345 /* Check whether cpus belong to parent ctrl group */
346 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
347 if (cpumask_weight(tmpmask)) {
348 rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
352 /* Check whether cpus are dropped from this group */
353 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
354 if (cpumask_weight(tmpmask)) {
355 /* Give any dropped cpus to parent rdtgroup */
356 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
357 update_closid_rmid(tmpmask, prgrp);
361 * If we added cpus, remove them from previous group that owned them
362 * and update per-cpu rmid
364 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
365 if (cpumask_weight(tmpmask)) {
366 head = &prgrp->mon.crdtgrp_list;
367 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
370 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
373 update_closid_rmid(tmpmask, rdtgrp);
376 /* Done pushing/pulling - update this group with new mask */
377 cpumask_copy(&rdtgrp->cpu_mask, newmask);
382 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
384 struct rdtgroup *crgrp;
386 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
387 /* update the child mon group masks as well*/
388 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
389 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
392 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
393 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
395 struct rdtgroup *r, *crgrp;
396 struct list_head *head;
398 /* Check whether cpus are dropped from this group */
399 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
400 if (cpumask_weight(tmpmask)) {
401 /* Can't drop from default group */
402 if (rdtgrp == &rdtgroup_default) {
403 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
407 /* Give any dropped cpus to rdtgroup_default */
408 cpumask_or(&rdtgroup_default.cpu_mask,
409 &rdtgroup_default.cpu_mask, tmpmask);
410 update_closid_rmid(tmpmask, &rdtgroup_default);
414 * If we added cpus, remove them from previous group and
415 * the prev group's child groups that owned them
416 * and update per-cpu closid/rmid.
418 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
419 if (cpumask_weight(tmpmask)) {
420 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
423 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
424 if (cpumask_weight(tmpmask1))
425 cpumask_rdtgrp_clear(r, tmpmask1);
427 update_closid_rmid(tmpmask, rdtgrp);
430 /* Done pushing/pulling - update this group with new mask */
431 cpumask_copy(&rdtgrp->cpu_mask, newmask);
434 * Clear child mon group masks since there is a new parent mask
435 * now and update the rmid for the cpus the child lost.
437 head = &rdtgrp->mon.crdtgrp_list;
438 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
439 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
440 update_closid_rmid(tmpmask, rdtgrp);
441 cpumask_clear(&crgrp->cpu_mask);
447 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
448 char *buf, size_t nbytes, loff_t off)
450 cpumask_var_t tmpmask, newmask, tmpmask1;
451 struct rdtgroup *rdtgrp;
457 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
459 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
460 free_cpumask_var(tmpmask);
463 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
464 free_cpumask_var(tmpmask);
465 free_cpumask_var(newmask);
469 rdtgrp = rdtgroup_kn_lock_live(of->kn);
470 rdt_last_cmd_clear();
473 rdt_last_cmd_puts("directory was removed\n");
477 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
478 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
480 rdt_last_cmd_puts("pseudo-locking in progress\n");
485 ret = cpulist_parse(buf, newmask);
487 ret = cpumask_parse(buf, newmask);
490 rdt_last_cmd_puts("bad cpu list/mask\n");
494 /* check that user didn't specify any offline cpus */
495 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
496 if (cpumask_weight(tmpmask)) {
498 rdt_last_cmd_puts("can only assign online cpus\n");
502 if (rdtgrp->type == RDTCTRL_GROUP)
503 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
504 else if (rdtgrp->type == RDTMON_GROUP)
505 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
510 rdtgroup_kn_unlock(of->kn);
511 free_cpumask_var(tmpmask);
512 free_cpumask_var(newmask);
513 free_cpumask_var(tmpmask1);
515 return ret ?: nbytes;
519 * rdtgroup_remove - the helper to remove resource group safely
520 * @rdtgrp: resource group to remove
522 * On resource group creation via a mkdir, an extra kernfs_node reference is
523 * taken to ensure that the rdtgroup structure remains accessible for the
524 * rdtgroup_kn_unlock() calls where it is removed.
526 * Drop the extra reference here, then free the rdtgroup structure.
530 static void rdtgroup_remove(struct rdtgroup *rdtgrp)
532 kernfs_put(rdtgrp->kn);
536 static void _update_task_closid_rmid(void *task)
539 * If the task is still current on this CPU, update PQR_ASSOC MSR.
540 * Otherwise, the MSR is updated when the task is scheduled in.
543 intel_rdt_sched_in();
546 static void update_task_closid_rmid(struct task_struct *t)
548 if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
549 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
551 _update_task_closid_rmid(t);
554 static int __rdtgroup_move_task(struct task_struct *tsk,
555 struct rdtgroup *rdtgrp)
557 /* If the task is already in rdtgrp, no need to move the task. */
558 if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
559 tsk->rmid == rdtgrp->mon.rmid) ||
560 (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
561 tsk->closid == rdtgrp->mon.parent->closid))
565 * Set the task's closid/rmid before the PQR_ASSOC MSR can be
568 * For ctrl_mon groups, move both closid and rmid.
569 * For monitor groups, can move the tasks only from
570 * their parent CTRL group.
573 if (rdtgrp->type == RDTCTRL_GROUP) {
574 tsk->closid = rdtgrp->closid;
575 tsk->rmid = rdtgrp->mon.rmid;
576 } else if (rdtgrp->type == RDTMON_GROUP) {
577 if (rdtgrp->mon.parent->closid == tsk->closid) {
578 tsk->rmid = rdtgrp->mon.rmid;
580 rdt_last_cmd_puts("Can't move task to different control group\n");
586 * Ensure the task's closid and rmid are written before determining if
587 * the task is current that will decide if it will be interrupted.
588 * This pairs with the full barrier between the rq->curr update and
589 * resctrl_sched_in() during context switch.
594 * By now, the task's closid and rmid are set. If the task is current
595 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
596 * group go into effect. If the task is not current, the MSR will be
597 * updated when the task is scheduled in.
599 update_task_closid_rmid(tsk);
605 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
608 * Return: 1 if tasks have been assigned to @r, 0 otherwise
610 int rdtgroup_tasks_assigned(struct rdtgroup *r)
612 struct task_struct *p, *t;
615 lockdep_assert_held(&rdtgroup_mutex);
618 for_each_process_thread(p, t) {
619 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
620 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
630 static int rdtgroup_task_write_permission(struct task_struct *task,
631 struct kernfs_open_file *of)
633 const struct cred *tcred = get_task_cred(task);
634 const struct cred *cred = current_cred();
638 * Even if we're attaching all tasks in the thread group, we only
639 * need to check permissions on one of them.
641 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
642 !uid_eq(cred->euid, tcred->uid) &&
643 !uid_eq(cred->euid, tcred->suid)) {
644 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
652 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
653 struct kernfs_open_file *of)
655 struct task_struct *tsk;
660 tsk = find_task_by_vpid(pid);
663 rdt_last_cmd_printf("No task %d\n", pid);
670 get_task_struct(tsk);
673 ret = rdtgroup_task_write_permission(tsk, of);
675 ret = __rdtgroup_move_task(tsk, rdtgrp);
677 put_task_struct(tsk);
681 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
682 char *buf, size_t nbytes, loff_t off)
684 struct rdtgroup *rdtgrp;
688 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
690 rdtgrp = rdtgroup_kn_lock_live(of->kn);
692 rdtgroup_kn_unlock(of->kn);
695 rdt_last_cmd_clear();
697 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
698 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
700 rdt_last_cmd_puts("pseudo-locking in progress\n");
704 ret = rdtgroup_move_task(pid, rdtgrp, of);
707 rdtgroup_kn_unlock(of->kn);
709 return ret ?: nbytes;
712 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
714 struct task_struct *p, *t;
717 for_each_process_thread(p, t) {
718 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
719 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid))
720 seq_printf(s, "%d\n", t->pid);
725 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
726 struct seq_file *s, void *v)
728 struct rdtgroup *rdtgrp;
731 rdtgrp = rdtgroup_kn_lock_live(of->kn);
733 show_rdt_tasks(rdtgrp, s);
736 rdtgroup_kn_unlock(of->kn);
741 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
742 struct seq_file *seq, void *v)
746 mutex_lock(&rdtgroup_mutex);
747 len = seq_buf_used(&last_cmd_status);
749 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
751 seq_puts(seq, "ok\n");
752 mutex_unlock(&rdtgroup_mutex);
756 static int rdt_num_closids_show(struct kernfs_open_file *of,
757 struct seq_file *seq, void *v)
759 struct rdt_resource *r = of->kn->parent->priv;
761 seq_printf(seq, "%d\n", r->num_closid);
765 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
766 struct seq_file *seq, void *v)
768 struct rdt_resource *r = of->kn->parent->priv;
770 seq_printf(seq, "%x\n", r->default_ctrl);
774 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
775 struct seq_file *seq, void *v)
777 struct rdt_resource *r = of->kn->parent->priv;
779 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
783 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
784 struct seq_file *seq, void *v)
786 struct rdt_resource *r = of->kn->parent->priv;
788 seq_printf(seq, "%x\n", r->cache.shareable_bits);
793 * rdt_bit_usage_show - Display current usage of resources
795 * A domain is a shared resource that can now be allocated differently. Here
796 * we display the current regions of the domain as an annotated bitmask.
797 * For each domain of this resource its allocation bitmask
798 * is annotated as below to indicate the current usage of the corresponding bit:
799 * 0 - currently unused
800 * X - currently available for sharing and used by software and hardware
801 * H - currently used by hardware only but available for software use
802 * S - currently used and shareable by software only
803 * E - currently used exclusively by one resource group
804 * P - currently pseudo-locked by one resource group
806 static int rdt_bit_usage_show(struct kernfs_open_file *of,
807 struct seq_file *seq, void *v)
809 struct rdt_resource *r = of->kn->parent->priv;
811 * Use unsigned long even though only 32 bits are used to ensure
812 * test_bit() is used safely.
814 unsigned long sw_shareable = 0, hw_shareable = 0;
815 unsigned long exclusive = 0, pseudo_locked = 0;
816 struct rdt_domain *dom;
817 int i, hwb, swb, excl, psl;
818 enum rdtgrp_mode mode;
822 mutex_lock(&rdtgroup_mutex);
823 hw_shareable = r->cache.shareable_bits;
824 list_for_each_entry(dom, &r->domains, list) {
827 ctrl = dom->ctrl_val;
830 seq_printf(seq, "%d=", dom->id);
831 for (i = 0; i < closids_supported(); i++, ctrl++) {
832 if (!closid_allocated(i))
834 mode = rdtgroup_mode_by_closid(i);
836 case RDT_MODE_SHAREABLE:
837 sw_shareable |= *ctrl;
839 case RDT_MODE_EXCLUSIVE:
842 case RDT_MODE_PSEUDO_LOCKSETUP:
844 * RDT_MODE_PSEUDO_LOCKSETUP is possible
845 * here but not included since the CBM
846 * associated with this CLOSID in this mode
847 * is not initialized and no task or cpu can be
848 * assigned this CLOSID.
851 case RDT_MODE_PSEUDO_LOCKED:
854 "invalid mode for closid %d\n", i);
858 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
859 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
860 hwb = test_bit(i, &hw_shareable);
861 swb = test_bit(i, &sw_shareable);
862 excl = test_bit(i, &exclusive);
863 psl = test_bit(i, &pseudo_locked);
866 else if (hwb && !swb)
868 else if (!hwb && swb)
874 else /* Unused bits remain */
880 mutex_unlock(&rdtgroup_mutex);
884 static int rdt_min_bw_show(struct kernfs_open_file *of,
885 struct seq_file *seq, void *v)
887 struct rdt_resource *r = of->kn->parent->priv;
889 seq_printf(seq, "%u\n", r->membw.min_bw);
893 static int rdt_num_rmids_show(struct kernfs_open_file *of,
894 struct seq_file *seq, void *v)
896 struct rdt_resource *r = of->kn->parent->priv;
898 seq_printf(seq, "%d\n", r->num_rmid);
903 static int rdt_mon_features_show(struct kernfs_open_file *of,
904 struct seq_file *seq, void *v)
906 struct rdt_resource *r = of->kn->parent->priv;
907 struct mon_evt *mevt;
909 list_for_each_entry(mevt, &r->evt_list, list)
910 seq_printf(seq, "%s\n", mevt->name);
915 static int rdt_bw_gran_show(struct kernfs_open_file *of,
916 struct seq_file *seq, void *v)
918 struct rdt_resource *r = of->kn->parent->priv;
920 seq_printf(seq, "%u\n", r->membw.bw_gran);
924 static int rdt_delay_linear_show(struct kernfs_open_file *of,
925 struct seq_file *seq, void *v)
927 struct rdt_resource *r = of->kn->parent->priv;
929 seq_printf(seq, "%u\n", r->membw.delay_linear);
933 static int max_threshold_occ_show(struct kernfs_open_file *of,
934 struct seq_file *seq, void *v)
936 struct rdt_resource *r = of->kn->parent->priv;
938 seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
943 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
944 char *buf, size_t nbytes, loff_t off)
946 struct rdt_resource *r = of->kn->parent->priv;
950 ret = kstrtouint(buf, 0, &bytes);
954 if (bytes > (boot_cpu_data.x86_cache_size * 1024))
957 intel_cqm_threshold = bytes / r->mon_scale;
963 * rdtgroup_mode_show - Display mode of this resource group
965 static int rdtgroup_mode_show(struct kernfs_open_file *of,
966 struct seq_file *s, void *v)
968 struct rdtgroup *rdtgrp;
970 rdtgrp = rdtgroup_kn_lock_live(of->kn);
972 rdtgroup_kn_unlock(of->kn);
976 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
978 rdtgroup_kn_unlock(of->kn);
983 * rdt_cdp_peer_get - Retrieve CDP peer if it exists
984 * @r: RDT resource to which RDT domain @d belongs
985 * @d: Cache instance for which a CDP peer is requested
986 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
987 * Used to return the result.
988 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
989 * Used to return the result.
991 * RDT resources are managed independently and by extension the RDT domains
992 * (RDT resource instances) are managed independently also. The Code and
993 * Data Prioritization (CDP) RDT resources, while managed independently,
994 * could refer to the same underlying hardware. For example,
995 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
997 * When provided with an RDT resource @r and an instance of that RDT
998 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
999 * resource and the exact instance that shares the same hardware.
1001 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
1002 * If a CDP peer was found, @r_cdp will point to the peer RDT resource
1003 * and @d_cdp will point to the peer RDT domain.
1005 static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
1006 struct rdt_resource **r_cdp,
1007 struct rdt_domain **d_cdp)
1009 struct rdt_resource *_r_cdp = NULL;
1010 struct rdt_domain *_d_cdp = NULL;
1014 case RDT_RESOURCE_L3DATA:
1015 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
1017 case RDT_RESOURCE_L3CODE:
1018 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA];
1020 case RDT_RESOURCE_L2DATA:
1021 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE];
1023 case RDT_RESOURCE_L2CODE:
1024 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA];
1032 * When a new CPU comes online and CDP is enabled then the new
1033 * RDT domains (if any) associated with both CDP RDT resources
1034 * are added in the same CPU online routine while the
1035 * rdtgroup_mutex is held. It should thus not happen for one
1036 * RDT domain to exist and be associated with its RDT CDP
1037 * resource but there is no RDT domain associated with the
1038 * peer RDT CDP resource. Hence the WARN.
1040 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
1041 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
1055 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1056 * @r: Resource to which domain instance @d belongs.
1057 * @d: The domain instance for which @closid is being tested.
1058 * @cbm: Capacity bitmask being tested.
1059 * @closid: Intended closid for @cbm.
1060 * @exclusive: Only check if overlaps with exclusive resource groups
1062 * Checks if provided @cbm intended to be used for @closid on domain
1063 * @d overlaps with any other closids or other hardware usage associated
1064 * with this domain. If @exclusive is true then only overlaps with
1065 * resource groups in exclusive mode will be considered. If @exclusive
1066 * is false then overlaps with any resource group or hardware entities
1067 * will be considered.
1069 * @cbm is unsigned long, even if only 32 bits are used, to make the
1070 * bitmap functions work correctly.
1072 * Return: false if CBM does not overlap, true if it does.
1074 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1075 unsigned long cbm, int closid, bool exclusive)
1077 enum rdtgrp_mode mode;
1078 unsigned long ctrl_b;
1082 /* Check for any overlap with regions used by hardware directly */
1084 ctrl_b = r->cache.shareable_bits;
1085 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1089 /* Check for overlap with other resource groups */
1091 for (i = 0; i < closids_supported(); i++, ctrl++) {
1093 mode = rdtgroup_mode_by_closid(i);
1094 if (closid_allocated(i) && i != closid &&
1095 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1096 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1098 if (mode == RDT_MODE_EXCLUSIVE)
1111 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1112 * @r: Resource to which domain instance @d belongs.
1113 * @d: The domain instance for which @closid is being tested.
1114 * @cbm: Capacity bitmask being tested.
1115 * @closid: Intended closid for @cbm.
1116 * @exclusive: Only check if overlaps with exclusive resource groups
1118 * Resources that can be allocated using a CBM can use the CBM to control
1119 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1120 * for overlap. Overlap test is not limited to the specific resource for
1121 * which the CBM is intended though - when dealing with CDP resources that
1122 * share the underlying hardware the overlap check should be performed on
1123 * the CDP resource sharing the hardware also.
1125 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1128 * Return: true if CBM overlap detected, false if there is no overlap
1130 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1131 unsigned long cbm, int closid, bool exclusive)
1133 struct rdt_resource *r_cdp;
1134 struct rdt_domain *d_cdp;
1136 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
1139 if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
1142 return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
1146 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1148 * An exclusive resource group implies that there should be no sharing of
1149 * its allocated resources. At the time this group is considered to be
1150 * exclusive this test can determine if its current schemata supports this
1151 * setting by testing for overlap with all other resource groups.
1153 * Return: true if resource group can be exclusive, false if there is overlap
1154 * with allocations of other resource groups and thus this resource group
1155 * cannot be exclusive.
1157 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1159 int closid = rdtgrp->closid;
1160 struct rdt_resource *r;
1161 bool has_cache = false;
1162 struct rdt_domain *d;
1164 for_each_alloc_enabled_rdt_resource(r) {
1165 if (r->rid == RDT_RESOURCE_MBA)
1168 list_for_each_entry(d, &r->domains, list) {
1169 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
1170 rdtgrp->closid, false)) {
1171 rdt_last_cmd_puts("schemata overlaps\n");
1178 rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
1186 * rdtgroup_mode_write - Modify the resource group's mode
1189 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1190 char *buf, size_t nbytes, loff_t off)
1192 struct rdtgroup *rdtgrp;
1193 enum rdtgrp_mode mode;
1196 /* Valid input requires a trailing newline */
1197 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1199 buf[nbytes - 1] = '\0';
1201 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1203 rdtgroup_kn_unlock(of->kn);
1207 rdt_last_cmd_clear();
1209 mode = rdtgrp->mode;
1211 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1212 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1213 (!strcmp(buf, "pseudo-locksetup") &&
1214 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1215 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1218 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1219 rdt_last_cmd_printf("cannot change pseudo-locked group\n");
1224 if (!strcmp(buf, "shareable")) {
1225 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1226 ret = rdtgroup_locksetup_exit(rdtgrp);
1230 rdtgrp->mode = RDT_MODE_SHAREABLE;
1231 } else if (!strcmp(buf, "exclusive")) {
1232 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1236 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1237 ret = rdtgroup_locksetup_exit(rdtgrp);
1241 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1242 } else if (!strcmp(buf, "pseudo-locksetup")) {
1243 ret = rdtgroup_locksetup_enter(rdtgrp);
1246 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1248 rdt_last_cmd_printf("unknown/unsupported mode\n");
1253 rdtgroup_kn_unlock(of->kn);
1254 return ret ?: nbytes;
1258 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1259 * @r: RDT resource to which @d belongs.
1260 * @d: RDT domain instance.
1261 * @cbm: bitmask for which the size should be computed.
1263 * The bitmask provided associated with the RDT domain instance @d will be
1264 * translated into how many bytes it represents. The size in bytes is
1265 * computed by first dividing the total cache size by the CBM length to
1266 * determine how many bytes each bit in the bitmask represents. The result
1267 * is multiplied with the number of bits set in the bitmask.
1269 * @cbm is unsigned long, even if only 32 bits are used to make the
1270 * bitmap functions work correctly.
1272 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1273 struct rdt_domain *d, unsigned long cbm)
1275 struct cpu_cacheinfo *ci;
1276 unsigned int size = 0;
1279 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1280 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1281 for (i = 0; i < ci->num_leaves; i++) {
1282 if (ci->info_list[i].level == r->cache_level) {
1283 size = ci->info_list[i].size / r->cache.cbm_len * num_b;
1292 * rdtgroup_size_show - Display size in bytes of allocated regions
1294 * The "size" file mirrors the layout of the "schemata" file, printing the
1295 * size in bytes of each region instead of the capacity bitmask.
1298 static int rdtgroup_size_show(struct kernfs_open_file *of,
1299 struct seq_file *s, void *v)
1301 struct rdtgroup *rdtgrp;
1302 struct rdt_resource *r;
1303 struct rdt_domain *d;
1309 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1311 rdtgroup_kn_unlock(of->kn);
1315 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1316 if (!rdtgrp->plr->d) {
1317 rdt_last_cmd_clear();
1318 rdt_last_cmd_puts("Cache domain offline\n");
1321 seq_printf(s, "%*s:", max_name_width,
1322 rdtgrp->plr->r->name);
1323 size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
1326 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1331 for_each_alloc_enabled_rdt_resource(r) {
1333 seq_printf(s, "%*s:", max_name_width, r->name);
1334 list_for_each_entry(d, &r->domains, list) {
1337 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1340 ctrl = (!is_mba_sc(r) ?
1341 d->ctrl_val[rdtgrp->closid] :
1342 d->mbps_val[rdtgrp->closid]);
1343 if (r->rid == RDT_RESOURCE_MBA)
1346 size = rdtgroup_cbm_to_size(r, d, ctrl);
1348 seq_printf(s, "%d=%u", d->id, size);
1355 rdtgroup_kn_unlock(of->kn);
1360 /* rdtgroup information files for one cache resource. */
1361 static struct rftype res_common_files[] = {
1363 .name = "last_cmd_status",
1365 .kf_ops = &rdtgroup_kf_single_ops,
1366 .seq_show = rdt_last_cmd_status_show,
1367 .fflags = RF_TOP_INFO,
1370 .name = "num_closids",
1372 .kf_ops = &rdtgroup_kf_single_ops,
1373 .seq_show = rdt_num_closids_show,
1374 .fflags = RF_CTRL_INFO,
1377 .name = "mon_features",
1379 .kf_ops = &rdtgroup_kf_single_ops,
1380 .seq_show = rdt_mon_features_show,
1381 .fflags = RF_MON_INFO,
1384 .name = "num_rmids",
1386 .kf_ops = &rdtgroup_kf_single_ops,
1387 .seq_show = rdt_num_rmids_show,
1388 .fflags = RF_MON_INFO,
1393 .kf_ops = &rdtgroup_kf_single_ops,
1394 .seq_show = rdt_default_ctrl_show,
1395 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1398 .name = "min_cbm_bits",
1400 .kf_ops = &rdtgroup_kf_single_ops,
1401 .seq_show = rdt_min_cbm_bits_show,
1402 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1405 .name = "shareable_bits",
1407 .kf_ops = &rdtgroup_kf_single_ops,
1408 .seq_show = rdt_shareable_bits_show,
1409 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1412 .name = "bit_usage",
1414 .kf_ops = &rdtgroup_kf_single_ops,
1415 .seq_show = rdt_bit_usage_show,
1416 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1419 .name = "min_bandwidth",
1421 .kf_ops = &rdtgroup_kf_single_ops,
1422 .seq_show = rdt_min_bw_show,
1423 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1426 .name = "bandwidth_gran",
1428 .kf_ops = &rdtgroup_kf_single_ops,
1429 .seq_show = rdt_bw_gran_show,
1430 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1433 .name = "delay_linear",
1435 .kf_ops = &rdtgroup_kf_single_ops,
1436 .seq_show = rdt_delay_linear_show,
1437 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1440 .name = "max_threshold_occupancy",
1442 .kf_ops = &rdtgroup_kf_single_ops,
1443 .write = max_threshold_occ_write,
1444 .seq_show = max_threshold_occ_show,
1445 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
1450 .kf_ops = &rdtgroup_kf_single_ops,
1451 .write = rdtgroup_cpus_write,
1452 .seq_show = rdtgroup_cpus_show,
1453 .fflags = RFTYPE_BASE,
1456 .name = "cpus_list",
1458 .kf_ops = &rdtgroup_kf_single_ops,
1459 .write = rdtgroup_cpus_write,
1460 .seq_show = rdtgroup_cpus_show,
1461 .flags = RFTYPE_FLAGS_CPUS_LIST,
1462 .fflags = RFTYPE_BASE,
1467 .kf_ops = &rdtgroup_kf_single_ops,
1468 .write = rdtgroup_tasks_write,
1469 .seq_show = rdtgroup_tasks_show,
1470 .fflags = RFTYPE_BASE,
1475 .kf_ops = &rdtgroup_kf_single_ops,
1476 .write = rdtgroup_schemata_write,
1477 .seq_show = rdtgroup_schemata_show,
1478 .fflags = RF_CTRL_BASE,
1483 .kf_ops = &rdtgroup_kf_single_ops,
1484 .write = rdtgroup_mode_write,
1485 .seq_show = rdtgroup_mode_show,
1486 .fflags = RF_CTRL_BASE,
1491 .kf_ops = &rdtgroup_kf_single_ops,
1492 .seq_show = rdtgroup_size_show,
1493 .fflags = RF_CTRL_BASE,
1498 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
1500 struct rftype *rfts, *rft;
1503 rfts = res_common_files;
1504 len = ARRAY_SIZE(res_common_files);
1506 lockdep_assert_held(&rdtgroup_mutex);
1508 for (rft = rfts; rft < rfts + len; rft++) {
1509 if ((fflags & rft->fflags) == rft->fflags) {
1510 ret = rdtgroup_add_file(kn, rft);
1518 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
1519 while (--rft >= rfts) {
1520 if ((fflags & rft->fflags) == rft->fflags)
1521 kernfs_remove_by_name(kn, rft->name);
1527 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
1528 * @r: The resource group with which the file is associated.
1529 * @name: Name of the file
1531 * The permissions of named resctrl file, directory, or link are modified
1532 * to not allow read, write, or execute by any user.
1534 * WARNING: This function is intended to communicate to the user that the
1535 * resctrl file has been locked down - that it is not relevant to the
1536 * particular state the system finds itself in. It should not be relied
1537 * on to protect from user access because after the file's permissions
1538 * are restricted the user can still change the permissions using chmod
1539 * from the command line.
1541 * Return: 0 on success, <0 on failure.
1543 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
1545 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1546 struct kernfs_node *kn;
1549 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1553 switch (kernfs_type(kn)) {
1555 iattr.ia_mode = S_IFDIR;
1558 iattr.ia_mode = S_IFREG;
1561 iattr.ia_mode = S_IFLNK;
1565 ret = kernfs_setattr(kn, &iattr);
1571 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
1572 * @r: The resource group with which the file is associated.
1573 * @name: Name of the file
1574 * @mask: Mask of permissions that should be restored
1576 * Restore the permissions of the named file. If @name is a directory the
1577 * permissions of its parent will be used.
1579 * Return: 0 on success, <0 on failure.
1581 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
1584 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1585 struct kernfs_node *kn, *parent;
1586 struct rftype *rfts, *rft;
1589 rfts = res_common_files;
1590 len = ARRAY_SIZE(res_common_files);
1592 for (rft = rfts; rft < rfts + len; rft++) {
1593 if (!strcmp(rft->name, name))
1594 iattr.ia_mode = rft->mode & mask;
1597 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1601 switch (kernfs_type(kn)) {
1603 parent = kernfs_get_parent(kn);
1605 iattr.ia_mode |= parent->mode;
1608 iattr.ia_mode |= S_IFDIR;
1611 iattr.ia_mode |= S_IFREG;
1614 iattr.ia_mode |= S_IFLNK;
1618 ret = kernfs_setattr(kn, &iattr);
1623 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
1624 unsigned long fflags)
1626 struct kernfs_node *kn_subdir;
1629 kn_subdir = kernfs_create_dir(kn_info, name,
1631 if (IS_ERR(kn_subdir))
1632 return PTR_ERR(kn_subdir);
1634 ret = rdtgroup_kn_set_ugid(kn_subdir);
1638 ret = rdtgroup_add_files(kn_subdir, fflags);
1640 kernfs_activate(kn_subdir);
1645 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
1647 struct rdt_resource *r;
1648 unsigned long fflags;
1652 /* create the directory */
1653 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
1654 if (IS_ERR(kn_info))
1655 return PTR_ERR(kn_info);
1657 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
1661 for_each_alloc_enabled_rdt_resource(r) {
1662 fflags = r->fflags | RF_CTRL_INFO;
1663 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
1668 for_each_mon_enabled_rdt_resource(r) {
1669 fflags = r->fflags | RF_MON_INFO;
1670 sprintf(name, "%s_MON", r->name);
1671 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
1676 ret = rdtgroup_kn_set_ugid(kn_info);
1680 kernfs_activate(kn_info);
1685 kernfs_remove(kn_info);
1690 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
1691 char *name, struct kernfs_node **dest_kn)
1693 struct kernfs_node *kn;
1696 /* create the directory */
1697 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1704 ret = rdtgroup_kn_set_ugid(kn);
1708 kernfs_activate(kn);
1717 static void l3_qos_cfg_update(void *arg)
1721 wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
1724 static void l2_qos_cfg_update(void *arg)
1728 wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1731 static inline bool is_mba_linear(void)
1733 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
1736 static int set_cache_qos_cfg(int level, bool enable)
1738 void (*update)(void *arg);
1739 struct rdt_resource *r_l;
1740 cpumask_var_t cpu_mask;
1741 struct rdt_domain *d;
1744 if (level == RDT_RESOURCE_L3)
1745 update = l3_qos_cfg_update;
1746 else if (level == RDT_RESOURCE_L2)
1747 update = l2_qos_cfg_update;
1751 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1754 r_l = &rdt_resources_all[level];
1755 list_for_each_entry(d, &r_l->domains, list) {
1756 /* Pick one CPU from each domain instance to update MSR */
1757 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1760 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1761 if (cpumask_test_cpu(cpu, cpu_mask))
1763 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1764 smp_call_function_many(cpu_mask, update, &enable, 1);
1767 free_cpumask_var(cpu_mask);
1772 /* Restore the qos cfg state when a domain comes online */
1773 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
1775 if (!r->alloc_capable)
1778 if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
1779 l2_qos_cfg_update(&r->alloc_enabled);
1781 if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
1782 l3_qos_cfg_update(&r->alloc_enabled);
1786 * Enable or disable the MBA software controller
1787 * which helps user specify bandwidth in MBps.
1788 * MBA software controller is supported only if
1789 * MBM is supported and MBA is in linear scale.
1791 static int set_mba_sc(bool mba_sc)
1793 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
1794 struct rdt_domain *d;
1796 if (!is_mbm_enabled() || !is_mba_linear() ||
1797 mba_sc == is_mba_sc(r))
1800 r->membw.mba_sc = mba_sc;
1801 list_for_each_entry(d, &r->domains, list)
1802 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
1807 static int cdp_enable(int level, int data_type, int code_type)
1809 struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
1810 struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
1811 struct rdt_resource *r_l = &rdt_resources_all[level];
1814 if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
1815 !r_lcode->alloc_capable)
1818 ret = set_cache_qos_cfg(level, true);
1820 r_l->alloc_enabled = false;
1821 r_ldata->alloc_enabled = true;
1822 r_lcode->alloc_enabled = true;
1827 static int cdpl3_enable(void)
1829 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
1830 RDT_RESOURCE_L3CODE);
1833 static int cdpl2_enable(void)
1835 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
1836 RDT_RESOURCE_L2CODE);
1839 static void cdp_disable(int level, int data_type, int code_type)
1841 struct rdt_resource *r = &rdt_resources_all[level];
1843 r->alloc_enabled = r->alloc_capable;
1845 if (rdt_resources_all[data_type].alloc_enabled) {
1846 rdt_resources_all[data_type].alloc_enabled = false;
1847 rdt_resources_all[code_type].alloc_enabled = false;
1848 set_cache_qos_cfg(level, false);
1852 static void cdpl3_disable(void)
1854 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
1857 static void cdpl2_disable(void)
1859 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
1862 static void cdp_disable_all(void)
1864 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
1866 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
1870 static int parse_rdtgroupfs_options(char *data)
1872 char *token, *o = data;
1875 while ((token = strsep(&o, ",")) != NULL) {
1881 if (!strcmp(token, "cdp")) {
1882 ret = cdpl3_enable();
1885 } else if (!strcmp(token, "cdpl2")) {
1886 ret = cdpl2_enable();
1889 } else if (!strcmp(token, "mba_MBps")) {
1890 ret = set_mba_sc(true);
1902 pr_err("Invalid mount option \"%s\"\n", token);
1908 * We don't allow rdtgroup directories to be created anywhere
1909 * except the root directory. Thus when looking for the rdtgroup
1910 * structure for a kernfs node we are either looking at a directory,
1911 * in which case the rdtgroup structure is pointed at by the "priv"
1912 * field, otherwise we have a file, and need only look to the parent
1913 * to find the rdtgroup.
1915 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
1917 if (kernfs_type(kn) == KERNFS_DIR) {
1919 * All the resource directories use "kn->priv"
1920 * to point to the "struct rdtgroup" for the
1921 * resource. "info" and its subdirectories don't
1922 * have rdtgroup structures, so return NULL here.
1924 if (kn == kn_info || kn->parent == kn_info)
1929 return kn->parent->priv;
1933 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
1935 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1940 atomic_inc(&rdtgrp->waitcount);
1941 kernfs_break_active_protection(kn);
1943 mutex_lock(&rdtgroup_mutex);
1945 /* Was this group deleted while we waited? */
1946 if (rdtgrp->flags & RDT_DELETED)
1952 void rdtgroup_kn_unlock(struct kernfs_node *kn)
1954 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1959 mutex_unlock(&rdtgroup_mutex);
1961 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
1962 (rdtgrp->flags & RDT_DELETED)) {
1963 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
1964 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
1965 rdtgroup_pseudo_lock_remove(rdtgrp);
1966 kernfs_unbreak_active_protection(kn);
1967 rdtgroup_remove(rdtgrp);
1969 kernfs_unbreak_active_protection(kn);
1973 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
1974 struct rdtgroup *prgrp,
1975 struct kernfs_node **mon_data_kn);
1977 static struct dentry *rdt_mount(struct file_system_type *fs_type,
1978 int flags, const char *unused_dev_name,
1981 struct rdt_domain *dom;
1982 struct rdt_resource *r;
1983 struct dentry *dentry;
1987 mutex_lock(&rdtgroup_mutex);
1989 * resctrl file system can only be mounted once.
1991 if (static_branch_unlikely(&rdt_enable_key)) {
1992 dentry = ERR_PTR(-EBUSY);
1996 ret = parse_rdtgroupfs_options(data);
1998 dentry = ERR_PTR(ret);
2004 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2006 dentry = ERR_PTR(ret);
2010 if (rdt_mon_capable) {
2011 ret = mongroup_create_dir(rdtgroup_default.kn,
2012 &rdtgroup_default, "mon_groups",
2015 dentry = ERR_PTR(ret);
2019 ret = mkdir_mondata_all(rdtgroup_default.kn,
2020 &rdtgroup_default, &kn_mondata);
2022 dentry = ERR_PTR(ret);
2025 rdtgroup_default.mon.mon_data_kn = kn_mondata;
2028 ret = rdt_pseudo_lock_init();
2030 dentry = ERR_PTR(ret);
2034 dentry = kernfs_mount(fs_type, flags, rdt_root,
2035 RDTGROUP_SUPER_MAGIC, NULL);
2039 if (rdt_alloc_capable)
2040 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
2041 if (rdt_mon_capable)
2042 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
2044 if (rdt_alloc_capable || rdt_mon_capable)
2045 static_branch_enable_cpuslocked(&rdt_enable_key);
2047 if (is_mbm_enabled()) {
2048 r = &rdt_resources_all[RDT_RESOURCE_L3];
2049 list_for_each_entry(dom, &r->domains, list)
2050 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
2056 rdt_pseudo_lock_release();
2058 if (rdt_mon_capable)
2059 kernfs_remove(kn_mondata);
2061 if (rdt_mon_capable)
2062 kernfs_remove(kn_mongrp);
2064 kernfs_remove(kn_info);
2068 rdt_last_cmd_clear();
2069 mutex_unlock(&rdtgroup_mutex);
2075 static int reset_all_ctrls(struct rdt_resource *r)
2077 struct msr_param msr_param;
2078 cpumask_var_t cpu_mask;
2079 struct rdt_domain *d;
2082 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
2087 msr_param.high = r->num_closid;
2090 * Disable resource control for this resource by setting all
2091 * CBMs in all domains to the maximum mask value. Pick one CPU
2092 * from each domain to update the MSRs below.
2094 list_for_each_entry(d, &r->domains, list) {
2095 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
2097 for (i = 0; i < r->num_closid; i++)
2098 d->ctrl_val[i] = r->default_ctrl;
2101 /* Update CBM on this cpu if it's in cpu_mask. */
2102 if (cpumask_test_cpu(cpu, cpu_mask))
2103 rdt_ctrl_update(&msr_param);
2104 /* Update CBM on all other cpus in cpu_mask. */
2105 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
2108 free_cpumask_var(cpu_mask);
2113 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
2115 return (rdt_alloc_capable &&
2116 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
2119 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
2121 return (rdt_mon_capable &&
2122 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
2126 * Move tasks from one to the other group. If @from is NULL, then all tasks
2127 * in the systems are moved unconditionally (used for teardown).
2129 * If @mask is not NULL the cpus on which moved tasks are running are set
2130 * in that mask so the update smp function call is restricted to affected
2133 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2134 struct cpumask *mask)
2136 struct task_struct *p, *t;
2138 read_lock(&tasklist_lock);
2139 for_each_process_thread(p, t) {
2140 if (!from || is_closid_match(t, from) ||
2141 is_rmid_match(t, from)) {
2142 t->closid = to->closid;
2143 t->rmid = to->mon.rmid;
2146 * Order the closid/rmid stores above before the loads
2147 * in task_curr(). This pairs with the full barrier
2148 * between the rq->curr update and resctrl_sched_in()
2149 * during context switch.
2154 * If the task is on a CPU, set the CPU in the mask.
2155 * The detection is inaccurate as tasks might move or
2156 * schedule before the smp function call takes place.
2157 * In such a case the function call is pointless, but
2158 * there is no other side effect.
2160 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
2161 cpumask_set_cpu(task_cpu(t), mask);
2164 read_unlock(&tasklist_lock);
2167 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2169 struct rdtgroup *sentry, *stmp;
2170 struct list_head *head;
2172 head = &rdtgrp->mon.crdtgrp_list;
2173 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2174 free_rmid(sentry->mon.rmid);
2175 list_del(&sentry->mon.crdtgrp_list);
2177 if (atomic_read(&sentry->waitcount) != 0)
2178 sentry->flags = RDT_DELETED;
2180 rdtgroup_remove(sentry);
2185 * Forcibly remove all of subdirectories under root.
2187 static void rmdir_all_sub(void)
2189 struct rdtgroup *rdtgrp, *tmp;
2191 /* Move all tasks to the default resource group */
2192 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2194 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2195 /* Free any child rmids */
2196 free_all_child_rdtgrp(rdtgrp);
2198 /* Remove each rdtgroup other than root */
2199 if (rdtgrp == &rdtgroup_default)
2202 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2203 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2204 rdtgroup_pseudo_lock_remove(rdtgrp);
2207 * Give any CPUs back to the default group. We cannot copy
2208 * cpu_online_mask because a CPU might have executed the
2209 * offline callback already, but is still marked online.
2211 cpumask_or(&rdtgroup_default.cpu_mask,
2212 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2214 free_rmid(rdtgrp->mon.rmid);
2216 kernfs_remove(rdtgrp->kn);
2217 list_del(&rdtgrp->rdtgroup_list);
2219 if (atomic_read(&rdtgrp->waitcount) != 0)
2220 rdtgrp->flags = RDT_DELETED;
2222 rdtgroup_remove(rdtgrp);
2224 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2225 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
2227 kernfs_remove(kn_info);
2228 kernfs_remove(kn_mongrp);
2229 kernfs_remove(kn_mondata);
2232 static void rdt_kill_sb(struct super_block *sb)
2234 struct rdt_resource *r;
2237 mutex_lock(&rdtgroup_mutex);
2241 /*Put everything back to default values. */
2242 for_each_alloc_enabled_rdt_resource(r)
2246 rdt_pseudo_lock_release();
2247 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
2248 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
2249 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
2250 static_branch_disable_cpuslocked(&rdt_enable_key);
2252 mutex_unlock(&rdtgroup_mutex);
2256 static struct file_system_type rdt_fs_type = {
2259 .kill_sb = rdt_kill_sb,
2262 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
2265 struct kernfs_node *kn;
2268 kn = __kernfs_create_file(parent_kn, name, 0444,
2269 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
2270 &kf_mondata_ops, priv, NULL, NULL);
2274 ret = rdtgroup_kn_set_ugid(kn);
2284 * Remove all subdirectories of mon_data of ctrl_mon groups
2285 * and monitor groups with given domain id.
2287 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
2289 struct rdtgroup *prgrp, *crgrp;
2292 if (!r->mon_enabled)
2295 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2296 sprintf(name, "mon_%s_%02d", r->name, dom_id);
2297 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
2299 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
2300 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
2304 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
2305 struct rdt_domain *d,
2306 struct rdt_resource *r, struct rdtgroup *prgrp)
2308 union mon_data_bits priv;
2309 struct kernfs_node *kn;
2310 struct mon_evt *mevt;
2311 struct rmid_read rr;
2315 sprintf(name, "mon_%s_%02d", r->name, d->id);
2316 /* create the directory */
2317 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2321 ret = rdtgroup_kn_set_ugid(kn);
2325 if (WARN_ON(list_empty(&r->evt_list))) {
2330 priv.u.rid = r->rid;
2331 priv.u.domid = d->id;
2332 list_for_each_entry(mevt, &r->evt_list, list) {
2333 priv.u.evtid = mevt->evtid;
2334 ret = mon_addfile(kn, mevt->name, priv.priv);
2338 if (is_mbm_event(mevt->evtid))
2339 mon_event_read(&rr, d, prgrp, mevt->evtid, true);
2341 kernfs_activate(kn);
2350 * Add all subdirectories of mon_data for "ctrl_mon" groups
2351 * and "monitor" groups with given domain id.
2353 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
2354 struct rdt_domain *d)
2356 struct kernfs_node *parent_kn;
2357 struct rdtgroup *prgrp, *crgrp;
2358 struct list_head *head;
2360 if (!r->mon_enabled)
2363 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2364 parent_kn = prgrp->mon.mon_data_kn;
2365 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
2367 head = &prgrp->mon.crdtgrp_list;
2368 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
2369 parent_kn = crgrp->mon.mon_data_kn;
2370 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
2375 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
2376 struct rdt_resource *r,
2377 struct rdtgroup *prgrp)
2379 struct rdt_domain *dom;
2382 list_for_each_entry(dom, &r->domains, list) {
2383 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
2392 * This creates a directory mon_data which contains the monitored data.
2394 * mon_data has one directory for each domain whic are named
2395 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
2396 * with L3 domain looks as below:
2403 * Each domain directory has one file per event:
2408 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2409 struct rdtgroup *prgrp,
2410 struct kernfs_node **dest_kn)
2412 struct rdt_resource *r;
2413 struct kernfs_node *kn;
2417 * Create the mon_data directory first.
2419 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
2427 * Create the subdirectories for each domain. Note that all events
2428 * in a domain like L3 are grouped into a resource whose domain is L3
2430 for_each_mon_enabled_rdt_resource(r) {
2431 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
2444 * cbm_ensure_valid - Enforce validity on provided CBM
2445 * @_val: Candidate CBM
2446 * @r: RDT resource to which the CBM belongs
2448 * The provided CBM represents all cache portions available for use. This
2449 * may be represented by a bitmap that does not consist of contiguous ones
2450 * and thus be an invalid CBM.
2451 * Here the provided CBM is forced to be a valid CBM by only considering
2452 * the first set of contiguous bits as valid and clearing all bits.
2453 * The intention here is to provide a valid default CBM with which a new
2454 * resource group is initialized. The user can follow this with a
2455 * modification to the CBM if the default does not satisfy the
2458 static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
2460 unsigned long val = *_val;
2461 unsigned int cbm_len = r->cache.cbm_len;
2462 unsigned long first_bit, zero_bit;
2467 first_bit = find_first_bit(&val, cbm_len);
2468 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
2470 /* Clear any remaining bits to ensure contiguous region */
2471 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
2476 * rdtgroup_init_alloc - Initialize the new RDT group's allocations
2478 * A new RDT group is being created on an allocation capable (CAT)
2479 * supporting system. Set this group up to start off with all usable
2480 * allocations. That is, all shareable and unused bits.
2482 * All-zero CBM is invalid. If there are no more shareable bits available
2483 * on any domain then the entire allocation will fail.
2485 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2487 u32 used_b = 0, unused_b = 0;
2488 u32 closid = rdtgrp->closid;
2489 struct rdt_resource *r;
2490 unsigned long tmp_cbm;
2491 enum rdtgrp_mode mode;
2492 struct rdt_domain *d;
2496 for_each_alloc_enabled_rdt_resource(r) {
2498 * Only initialize default allocations for CBM cache
2501 if (r->rid == RDT_RESOURCE_MBA)
2503 list_for_each_entry(d, &r->domains, list) {
2504 d->have_new_ctrl = false;
2505 d->new_ctrl = r->cache.shareable_bits;
2506 used_b = r->cache.shareable_bits;
2508 for (i = 0; i < closids_supported(); i++, ctrl++) {
2509 if (closid_allocated(i) && i != closid) {
2510 mode = rdtgroup_mode_by_closid(i);
2511 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2514 if (mode == RDT_MODE_SHAREABLE)
2515 d->new_ctrl |= *ctrl;
2518 if (d->plr && d->plr->cbm > 0)
2519 used_b |= d->plr->cbm;
2520 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
2521 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
2522 d->new_ctrl |= unused_b;
2524 * Force the initial CBM to be valid, user can
2525 * modify the CBM based on system availability.
2527 cbm_ensure_valid(&d->new_ctrl, r);
2529 * Assign the u32 CBM to an unsigned long to ensure
2530 * that bitmap_weight() does not access out-of-bound
2533 tmp_cbm = d->new_ctrl;
2534 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
2535 r->cache.min_cbm_bits) {
2536 rdt_last_cmd_printf("no space on %s:%d\n",
2540 d->have_new_ctrl = true;
2544 for_each_alloc_enabled_rdt_resource(r) {
2546 * Only initialize default allocations for CBM cache
2549 if (r->rid == RDT_RESOURCE_MBA)
2551 ret = update_domains(r, rdtgrp->closid);
2553 rdt_last_cmd_puts("failed to initialize allocations\n");
2556 rdtgrp->mode = RDT_MODE_SHAREABLE;
2562 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
2563 struct kernfs_node *prgrp_kn,
2564 const char *name, umode_t mode,
2565 enum rdt_group_type rtype, struct rdtgroup **r)
2567 struct rdtgroup *prdtgrp, *rdtgrp;
2568 struct kernfs_node *kn;
2572 prdtgrp = rdtgroup_kn_lock_live(parent_kn);
2573 rdt_last_cmd_clear();
2576 rdt_last_cmd_puts("directory was removed\n");
2580 if (rtype == RDTMON_GROUP &&
2581 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2582 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
2584 rdt_last_cmd_puts("pseudo-locking in progress\n");
2588 /* allocate the rdtgroup. */
2589 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
2592 rdt_last_cmd_puts("kernel out of memory\n");
2596 rdtgrp->mon.parent = prdtgrp;
2597 rdtgrp->type = rtype;
2598 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
2600 /* kernfs creates the directory for rdtgrp */
2601 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
2604 rdt_last_cmd_puts("kernfs create error\n");
2610 * kernfs_remove() will drop the reference count on "kn" which
2611 * will free it. But we still need it to stick around for the
2612 * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
2613 * which will be dropped by kernfs_put() in rdtgroup_remove().
2617 ret = rdtgroup_kn_set_ugid(kn);
2619 rdt_last_cmd_puts("kernfs perm error\n");
2623 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
2624 ret = rdtgroup_add_files(kn, files);
2626 rdt_last_cmd_puts("kernfs fill error\n");
2630 if (rdt_mon_capable) {
2633 rdt_last_cmd_puts("out of RMIDs\n");
2636 rdtgrp->mon.rmid = ret;
2638 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
2640 rdt_last_cmd_puts("kernfs subdir error\n");
2644 kernfs_activate(kn);
2647 * The caller unlocks the parent_kn upon success.
2652 free_rmid(rdtgrp->mon.rmid);
2654 kernfs_put(rdtgrp->kn);
2655 kernfs_remove(rdtgrp->kn);
2659 rdtgroup_kn_unlock(parent_kn);
2663 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
2665 kernfs_remove(rgrp->kn);
2666 free_rmid(rgrp->mon.rmid);
2667 rdtgroup_remove(rgrp);
2671 * Create a monitor group under "mon_groups" directory of a control
2672 * and monitor group(ctrl_mon). This is a resource group
2673 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
2675 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
2676 struct kernfs_node *prgrp_kn,
2680 struct rdtgroup *rdtgrp, *prgrp;
2683 ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTMON_GROUP,
2688 prgrp = rdtgrp->mon.parent;
2689 rdtgrp->closid = prgrp->closid;
2692 * Add the rdtgrp to the list of rdtgrps the parent
2693 * ctrl_mon group has to track.
2695 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
2697 rdtgroup_kn_unlock(parent_kn);
2702 * These are rdtgroups created under the root directory. Can be used
2703 * to allocate and monitor resources.
2705 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
2706 struct kernfs_node *prgrp_kn,
2707 const char *name, umode_t mode)
2709 struct rdtgroup *rdtgrp;
2710 struct kernfs_node *kn;
2714 ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTCTRL_GROUP,
2720 ret = closid_alloc();
2722 rdt_last_cmd_puts("out of CLOSIDs\n");
2723 goto out_common_fail;
2728 rdtgrp->closid = closid;
2729 ret = rdtgroup_init_alloc(rdtgrp);
2733 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
2735 if (rdt_mon_capable) {
2737 * Create an empty mon_groups directory to hold the subset
2738 * of tasks and cpus to monitor.
2740 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
2742 rdt_last_cmd_puts("kernfs subdir error\n");
2750 list_del(&rdtgrp->rdtgroup_list);
2752 closid_free(closid);
2754 mkdir_rdt_prepare_clean(rdtgrp);
2756 rdtgroup_kn_unlock(parent_kn);
2761 * We allow creating mon groups only with in a directory called "mon_groups"
2762 * which is present in every ctrl_mon group. Check if this is a valid
2763 * "mon_groups" directory.
2765 * 1. The directory should be named "mon_groups".
2766 * 2. The mon group itself should "not" be named "mon_groups".
2767 * This makes sure "mon_groups" directory always has a ctrl_mon group
2770 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
2772 return (!strcmp(kn->name, "mon_groups") &&
2773 strcmp(name, "mon_groups"));
2776 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
2779 /* Do not accept '\n' to avoid unparsable situation. */
2780 if (strchr(name, '\n'))
2784 * If the parent directory is the root directory and RDT
2785 * allocation is supported, add a control and monitoring
2788 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
2789 return rdtgroup_mkdir_ctrl_mon(parent_kn, parent_kn, name, mode);
2792 * If RDT monitoring is supported and the parent directory is a valid
2793 * "mon_groups" directory, add a monitoring subdirectory.
2795 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
2796 return rdtgroup_mkdir_mon(parent_kn, parent_kn->parent, name, mode);
2801 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
2802 cpumask_var_t tmpmask)
2804 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
2807 /* Give any tasks back to the parent group */
2808 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
2810 /* Update per cpu rmid of the moved CPUs first */
2811 for_each_cpu(cpu, &rdtgrp->cpu_mask)
2812 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
2814 * Update the MSR on moved CPUs and CPUs which have moved
2815 * task running on them.
2817 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
2818 update_closid_rmid(tmpmask, NULL);
2820 rdtgrp->flags = RDT_DELETED;
2821 free_rmid(rdtgrp->mon.rmid);
2824 * Remove the rdtgrp from the parent ctrl_mon group's list
2826 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
2827 list_del(&rdtgrp->mon.crdtgrp_list);
2829 kernfs_remove(rdtgrp->kn);
2834 static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
2835 struct rdtgroup *rdtgrp)
2837 rdtgrp->flags = RDT_DELETED;
2838 list_del(&rdtgrp->rdtgroup_list);
2840 kernfs_remove(rdtgrp->kn);
2844 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
2845 cpumask_var_t tmpmask)
2849 /* Give any tasks back to the default group */
2850 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
2852 /* Give any CPUs back to the default group */
2853 cpumask_or(&rdtgroup_default.cpu_mask,
2854 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2856 /* Update per cpu closid and rmid of the moved CPUs first */
2857 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
2858 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
2859 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
2863 * Update the MSR on moved CPUs and CPUs which have moved
2864 * task running on them.
2866 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
2867 update_closid_rmid(tmpmask, NULL);
2869 closid_free(rdtgrp->closid);
2870 free_rmid(rdtgrp->mon.rmid);
2872 rdtgroup_ctrl_remove(kn, rdtgrp);
2875 * Free all the child monitor group rmids.
2877 free_all_child_rdtgrp(rdtgrp);
2882 static int rdtgroup_rmdir(struct kernfs_node *kn)
2884 struct kernfs_node *parent_kn = kn->parent;
2885 struct rdtgroup *rdtgrp;
2886 cpumask_var_t tmpmask;
2889 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
2892 rdtgrp = rdtgroup_kn_lock_live(kn);
2899 * If the rdtgroup is a ctrl_mon group and parent directory
2900 * is the root directory, remove the ctrl_mon group.
2902 * If the rdtgroup is a mon group and parent directory
2903 * is a valid "mon_groups" directory, remove the mon group.
2905 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
2906 rdtgrp != &rdtgroup_default) {
2907 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2908 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
2909 ret = rdtgroup_ctrl_remove(kn, rdtgrp);
2911 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
2913 } else if (rdtgrp->type == RDTMON_GROUP &&
2914 is_mon_groups(parent_kn, kn->name)) {
2915 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
2921 rdtgroup_kn_unlock(kn);
2922 free_cpumask_var(tmpmask);
2926 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
2928 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
2929 seq_puts(seq, ",cdp");
2931 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
2932 seq_puts(seq, ",cdpl2");
2934 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
2935 seq_puts(seq, ",mba_MBps");
2940 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
2941 .mkdir = rdtgroup_mkdir,
2942 .rmdir = rdtgroup_rmdir,
2943 .show_options = rdtgroup_show_options,
2946 static int __init rdtgroup_setup_root(void)
2950 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
2951 KERNFS_ROOT_CREATE_DEACTIVATED |
2952 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
2954 if (IS_ERR(rdt_root))
2955 return PTR_ERR(rdt_root);
2957 mutex_lock(&rdtgroup_mutex);
2959 rdtgroup_default.closid = 0;
2960 rdtgroup_default.mon.rmid = 0;
2961 rdtgroup_default.type = RDTCTRL_GROUP;
2962 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
2964 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
2966 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
2968 kernfs_destroy_root(rdt_root);
2972 rdtgroup_default.kn = rdt_root->kn;
2973 kernfs_activate(rdtgroup_default.kn);
2976 mutex_unlock(&rdtgroup_mutex);
2982 * rdtgroup_init - rdtgroup initialization
2984 * Setup resctrl file system including set up root, create mount point,
2985 * register rdtgroup filesystem, and initialize files under root directory.
2987 * Return: 0 on success or -errno
2989 int __init rdtgroup_init(void)
2993 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
2994 sizeof(last_cmd_status_buf));
2996 ret = rdtgroup_setup_root();
3000 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
3004 ret = register_filesystem(&rdt_fs_type);
3006 goto cleanup_mountpoint;
3009 * Adding the resctrl debugfs directory here may not be ideal since
3010 * it would let the resctrl debugfs directory appear on the debugfs
3011 * filesystem before the resctrl filesystem is mounted.
3012 * It may also be ok since that would enable debugging of RDT before
3013 * resctrl is mounted.
3014 * The reason why the debugfs directory is created here and not in
3015 * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
3016 * during the debugfs directory creation also &sb->s_type->i_mutex_key
3017 * (the lockdep class of inode->i_rwsem). Other filesystem
3018 * interactions (eg. SyS_getdents) have the lock ordering:
3019 * &sb->s_type->i_mutex_key --> &mm->mmap_sem
3020 * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
3021 * is taken, thus creating dependency:
3022 * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
3023 * issues considering the other two lock dependencies.
3024 * By creating the debugfs directory here we avoid a dependency
3025 * that may cause deadlock (even though file operations cannot
3026 * occur until the filesystem is mounted, but I do not know how to
3027 * tell lockdep that).
3029 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
3034 sysfs_remove_mount_point(fs_kobj, "resctrl");
3036 kernfs_destroy_root(rdt_root);
3041 void __exit rdtgroup_exit(void)
3043 debugfs_remove_recursive(debugfs_resctrl);
3044 unregister_filesystem(&rdt_fs_type);
3045 sysfs_remove_mount_point(fs_kobj, "resctrl");
3046 kernfs_destroy_root(rdt_root);