With the addition of a new "isolcpus" partition in a previous patch,
this patch adds the capability for a privileged user to pull isolated
CPUs from the "isolcpus" partition to an "isolated" partition if its
parent cannot satisfy its request directly.
The following conditions must be true for the pulling of isolated CPUs
from "isolcpus" partition to be successful.
(1) The value of "cpuset.cpus" must still be a subset of its parent's
"cpuset.cpus" to ensure proper inheritance even though these CPUs
cannot be used until the cpuset becomes an "isolated" partition.
(2) All the CPUs in "cpuset.cpus" are freely available in the
"isolcpus" partition, i.e. in its "cpuset.cpus.effective" and not
yet claimed by other isolated partitions.
With this change, the CPUs in an "isolated" partition can either
come from the "isolcpus" partition or from its direct parent, but not
both. Now the parent of an isolated partition does not need to be a
partition root anymore.
Because of the cpu exclusive nature of an "isolated" partition, these
isolated CPUs cannot be distributed to other siblings of that isolated
partition.
Changes to "cpuset.cpus" of such an isolated partition is allowed as
long as all the newly requested CPUs can be granted from the "isolcpus"
partition. Otherwise, the partition will become invalid.
This makes the management and distribution of isolated CPUs to those
applications that require them much easier.
An "isolated" partition that pulls CPUs from the special "isolcpus"
partition can now have 2 parents - the "isolcpus" partition where
it gets its isolated CPUs and its hierarchical parent where it gets
all the other resources. However, such an "isolated" partition cannot
have subpartitions as all the CPUs from "isolcpus" must be in the same
isolated state.
Signed-off-by: Waiman Long <longman(a)redhat.com>
---
kernel/cgroup/cpuset.c | 282 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 264 insertions(+), 18 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 444eae3a9a6b..a5bbd43ed46e 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -101,6 +101,7 @@ enum prs_errcode {
PERR_ISOLCPUS,
PERR_ISOLTASK,
PERR_ISOLCHILD,
+ PERR_ISOPARENT,
};
static const char * const perr_strings[] = {
@@ -114,6 +115,7 @@ static const char * const perr_strings[] = {
[PERR_ISOLCPUS] = "An isolcpus partition is already present",
[PERR_ISOLTASK] = "Isolcpus partition can't have tasks",
[PERR_ISOLCHILD] = "Isolcpus partition can't have children",
+ [PERR_ISOPARENT] = "Isolated/isolcpus parent can't have subpartition",
};
struct cpuset {
@@ -1333,6 +1335,195 @@ static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
rebuild_sched_domains_locked();
}
+/*
+ * isolcpus_pull - Enable or disable pulling of isolated cpus from isolcpus
+ * @cs: the cpuset to update
+ * @cmd: the command code (only partcmd_enable or partcmd_disable)
+ * Return: 1 if successful, 0 if error
+ *
+ * Note that pulling isolated cpus from isolcpus or cpus from parent does
+ * not require rebuilding sched domains. So we can change the flags directly.
+ */
+static int isolcpus_pull(struct cpuset *cs, enum subparts_cmd cmd)
+{
+ struct cpuset *parent = parent_cs(cs);
+
+ if (!isolcpus_cs)
+ return 0;
+
+ /*
+ * To enable pulling of isolated CPUs from isolcpus, cpus_allowed
+ * must be a subset of both its parent's cpus_allowed and isolcpus_cs's
+ * effective_cpus and the user has sysadmin privilege.
+ */
+ if ((cmd == partcmd_enable) && capable(CAP_SYS_ADMIN) &&
+ cpumask_subset(cs->cpus_allowed, isolcpus_cs->effective_cpus) &&
+ cpumask_subset(cs->cpus_allowed, parent->cpus_allowed)) {
+ /*
+ * Move cpus from effective_cpus to subparts_cpus & make
+ * cs a child of isolcpus partition.
+ */
+ spin_lock_irq(&callback_lock);
+ cpumask_andnot(isolcpus_cs->effective_cpus,
+ isolcpus_cs->effective_cpus, cs->cpus_allowed);
+ cpumask_or(isolcpus_cs->subparts_cpus,
+ isolcpus_cs->subparts_cpus, cs->cpus_allowed);
+ cpumask_copy(cs->effective_cpus, cs->cpus_allowed);
+ isolcpus_cs->nr_subparts_cpus
+ = cpumask_weight(isolcpus_cs->subparts_cpus);
+
+ if (cs->use_parent_ecpus) {
+ cs->use_parent_ecpus = false;
+ parent->child_ecpus_count--;
+ }
+ list_add(&cs->isol_sibling, &isol_children);
+ clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+ spin_unlock_irq(&callback_lock);
+ return 1;
+ }
+
+ if ((cmd == partcmd_disable) && !list_empty(&cs->isol_sibling)) {
+ /*
+ * This can be called after isolcpus shrinks its cpu list.
+ * So not all the cpus should be returned back to isolcpus.
+ */
+ WARN_ON_ONCE(cs->partition_root_state != PRS_ISOLATED);
+ spin_lock_irq(&callback_lock);
+ cpumask_andnot(isolcpus_cs->subparts_cpus,
+ isolcpus_cs->subparts_cpus, cs->cpus_allowed);
+ cpumask_or(isolcpus_cs->effective_cpus,
+ isolcpus_cs->effective_cpus, cs->effective_cpus);
+ cpumask_and(isolcpus_cs->effective_cpus,
+ isolcpus_cs->effective_cpus,
+ isolcpus_cs->cpus_allowed);
+ cpumask_and(isolcpus_cs->effective_cpus,
+ isolcpus_cs->effective_cpus, cpu_active_mask);
+ isolcpus_cs->nr_subparts_cpus
+ = cpumask_weight(isolcpus_cs->subparts_cpus);
+
+ if (!cpumask_and(cs->effective_cpus, parent->effective_cpus,
+ cs->cpus_allowed)) {
+ cs->use_parent_ecpus = true;
+ parent->child_ecpus_count++;
+ cpumask_copy(cs->effective_cpus,
+ parent->effective_cpus);
+ }
+ list_del_init(&cs->isol_sibling);
+ cs->partition_root_state = PRS_INVALID_ISOLATED;
+ cs->prs_err = PERR_INVCPUS;
+
+ set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+ clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
+ spin_unlock_irq(&callback_lock);
+ return 1;
+ }
+ return 0;
+}
+
+static void isolcpus_disable(void)
+{
+ struct cpuset *child, *next;
+
+ list_for_each_entry_safe(child, next, &isol_children, isol_sibling)
+ WARN_ON_ONCE(isolcpus_pull(child, partcmd_disable));
+
+ isolcpus_cs = NULL;
+}
+
+/*
+ * isolcpus_cpus_update - cpuset.cpus change in isolcpus partition
+ */
+static void isolcpus_cpus_update(struct cpuset *cs)
+{
+ struct cpuset *child, *next;
+
+ if (WARN_ON_ONCE(isolcpus_cs != cs))
+ return;
+
+ if (list_empty(&isol_children))
+ return;
+
+ /*
+ * Remove child isolated partitions that are not fully covered by
+ * subparts_cpus.
+ */
+ list_for_each_entry_safe(child, next, &isol_children,
+ isol_sibling) {
+ if (cpumask_subset(child->cpus_allowed,
+ cs->subparts_cpus))
+ continue;
+
+ isolcpus_pull(child, partcmd_disable);
+ }
+}
+
+/*
+ * isolated_cpus_update - cpuset.cpus change in isolated partition
+ *
+ * Return: 1 if no further action needs, 0 otherwise
+ */
+static int isolated_cpus_update(struct cpuset *cs, struct cpumask *newmask,
+ struct tmpmasks *tmp)
+{
+ struct cpumask *addmask = tmp->addmask;
+ struct cpumask *delmask = tmp->delmask;
+
+ if (WARN_ON_ONCE(cs->partition_root_state != PRS_ISOLATED) ||
+ list_empty(&cs->isol_sibling))
+ return 0;
+
+ if (WARN_ON_ONCE(!isolcpus_cs) || cpumask_empty(newmask)) {
+ isolcpus_pull(cs, partcmd_disable);
+ return 0;
+ }
+
+ if (cpumask_andnot(addmask, newmask, cs->cpus_allowed)) {
+ /*
+ * Check if isolcpus partition can provide the new CPUs
+ */
+ if (!cpumask_subset(addmask, isolcpus_cs->cpus_allowed) ||
+ cpumask_intersects(addmask, isolcpus_cs->subparts_cpus)) {
+ isolcpus_pull(cs, partcmd_disable);
+ return 0;
+ }
+
+ /*
+ * Pull addmask isolated CPUs from isolcpus partition
+ */
+ spin_lock_irq(&callback_lock);
+ cpumask_andnot(isolcpus_cs->subparts_cpus,
+ isolcpus_cs->subparts_cpus, addmask);
+ cpumask_andnot(isolcpus_cs->effective_cpus,
+ isolcpus_cs->effective_cpus, addmask);
+ isolcpus_cs->nr_subparts_cpus
+ = cpumask_weight(isolcpus_cs->subparts_cpus);
+ spin_unlock_irq(&callback_lock);
+ }
+
+ if (cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask)) {
+ /*
+ * Return isolated CPUs back to isolcpus partition
+ */
+ spin_lock_irq(&callback_lock);
+ cpumask_or(isolcpus_cs->subparts_cpus,
+ isolcpus_cs->subparts_cpus, delmask);
+ cpumask_or(isolcpus_cs->effective_cpus,
+ isolcpus_cs->effective_cpus, delmask);
+ cpumask_and(isolcpus_cs->effective_cpus,
+ isolcpus_cs->effective_cpus, cpu_active_mask);
+ isolcpus_cs->nr_subparts_cpus
+ = cpumask_weight(isolcpus_cs->subparts_cpus);
+ spin_unlock_irq(&callback_lock);
+ }
+
+ spin_lock_irq(&callback_lock);
+ cpumask_copy(cs->cpus_allowed, newmask);
+ cpumask_andnot(cs->effective_cpus, newmask, cs->subparts_cpus);
+ cpumask_and(cs->effective_cpus, cs->effective_cpus, cpu_active_mask);
+ spin_unlock_irq(&callback_lock);
+ return 1;
+}
+
/**
* update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
* @cs: The cpuset that requests change in partition root state
@@ -1579,7 +1770,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
spin_unlock_irq(&callback_lock);
if ((isolcpus_cs == cs) && (cs->partition_root_state != PRS_ISOLCPUS))
- isolcpus_cs = NULL;
+ isolcpus_disable();
if (adding || deleting)
update_tasks_cpumask(parent, tmp->addmask);
@@ -1625,6 +1816,12 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
struct cpuset *parent = parent_cs(cp);
bool update_parent = false;
+ /*
+ * Skip isolated cpuset that pull isolated CPUs from isolcpus
+ */
+ if (!list_empty(&cp->isol_sibling))
+ continue;
+
compute_effective_cpumask(tmp->new_cpus, cp, parent);
/*
@@ -1742,7 +1939,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
- update_tasks_cpumask(cp, tmp->new_cpus);
+ update_tasks_cpumask(cp, cp->effective_cpus);
/*
* On legacy hierarchy, if the effective cpumask of any non-
@@ -1888,6 +2085,10 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
return retval;
if (cs->partition_root_state) {
+ if (!list_empty(&cs->isol_sibling) &&
+ isolated_cpus_update(cs, trialcs->cpus_allowed, &tmp))
+ goto update_hier; /* CPUs update done */
+
if (invalidate)
update_parent_subparts_cpumask(cs, partcmd_invalidate,
NULL, &tmp);
@@ -1920,6 +2121,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
}
spin_unlock_irq(&callback_lock);
+update_hier:
#ifdef CONFIG_CPUMASK_OFFSTACK
/* Now trialcs->cpus_allowed is available */
tmp.new_cpus = trialcs->cpus_allowed;
@@ -1928,8 +2130,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
/* effective_cpus will be updated here */
update_cpumasks_hier(cs, &tmp, false);
- if (cs->partition_root_state) {
- bool force = (cs->partition_root_state == PRS_ISOLCPUS);
+ if (cs->partition_root_state && list_empty(&cs->isol_sibling)) {
struct cpuset *parent = parent_cs(cs);
/*
@@ -1937,8 +2138,12 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* cpusets if they use parent's effective_cpus or when
* the current cpuset is an isolcpus partition.
*/
- if (parent->child_ecpus_count || force)
- update_sibling_cpumasks(parent, cs, &tmp, force);
+ if (cs->partition_root_state == PRS_ISOLCPUS) {
+ update_sibling_cpumasks(parent, cs, &tmp, true);
+ isolcpus_cpus_update(cs);
+ } else if (parent->child_ecpus_count) {
+ update_sibling_cpumasks(parent, cs, &tmp, false);
+ }
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains */
update_partition_sd_lb(cs, old_prs);
@@ -2307,7 +2512,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
return err;
}
-/**
+/*
* update_prstate - update partition_root_state
* @cs: the cpuset to update
* @new_prs: new partition root state
@@ -2325,13 +2530,10 @@ static int update_prstate(struct cpuset *cs, int new_prs)
return 0;
/*
- * For a previously invalid partition root, leave it at being
- * invalid if new_prs is not "member".
+ * For a previously invalid partition root, treat it like a "member".
*/
- if (new_prs && is_prs_invalid(old_prs)) {
- cs->partition_root_state = -new_prs;
- return 0;
- }
+ if (new_prs && is_prs_invalid(old_prs))
+ old_prs = PRS_MEMBER;
if (alloc_cpumasks(NULL, &tmpmask))
return -ENOMEM;
@@ -2371,6 +2573,21 @@ static int update_prstate(struct cpuset *cs, int new_prs)
}
}
+ /*
+ * A parent isolated partition that gets its isolated CPUs from
+ * isolcpus cannot have subpartition.
+ */
+ if (new_prs && !list_empty(&parent->isol_sibling)) {
+ err = PERR_ISOPARENT;
+ goto out;
+ }
+
+ if ((old_prs == PRS_ISOLATED) && !list_empty(&cs->isol_sibling)) {
+ isolcpus_pull(cs, partcmd_disable);
+ old_prs = 0;
+ }
+ WARN_ON_ONCE(!list_empty(&cs->isol_sibling));
+
err = update_partition_exclusive(cs, new_prs);
if (err)
goto out;
@@ -2386,6 +2603,10 @@ static int update_prstate(struct cpuset *cs, int new_prs)
err = update_parent_subparts_cpumask(cs, partcmd_enable,
NULL, &tmpmask);
+ if (err && (new_prs == PRS_ISOLATED) &&
+ isolcpus_pull(cs, partcmd_enable))
+ err = 0; /* Successful isolcpus pull */
+
if (err)
goto out;
} else if (old_prs && new_prs) {
@@ -2445,7 +2666,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
if (new_prs == PRS_ISOLCPUS)
isolcpus_cs = cs;
else if (cs == isolcpus_cs)
- isolcpus_cs = NULL;
+ isolcpus_disable();
/*
* Update child cpusets, if present.
@@ -3674,8 +3895,31 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
}
parent = parent_cs(cs);
- compute_effective_cpumask(&new_cpus, cs, parent);
nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
+ /*
+ * In the special case of a valid isolated cpuset pulling isolated
+ * cpus from isolcpus. We just need to mask offline cpus from
+ * cpus_allowed unless all the isolated cpus are gone.
+ */
+ if (!list_empty(&cs->isol_sibling)) {
+ if (!cpumask_and(&new_cpus, cs->cpus_allowed, cpu_active_mask))
+ isolcpus_pull(cs, partcmd_disable);
+ } else if ((cs->partition_root_state == PRS_ISOLCPUS) &&
+ cpumask_empty(cs->cpus_allowed)) {
+ /*
+ * For isolcpus with empty cpus_allowed, just update
+ * effective_mems and be done with it.
+ */
+ spin_lock_irq(&callback_lock);
+ if (nodes_empty(new_mems))
+ cs->effective_mems = parent->effective_mems;
+ else
+ cs->effective_mems = new_mems;
+ spin_unlock_irq(&callback_lock);
+ goto unlock;
+ } else {
+ compute_effective_cpumask(&new_cpus, cs, parent);
+ }
if (cs->nr_subparts_cpus)
/*
@@ -3707,10 +3951,12 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
* the following conditions hold:
* 1) empty effective cpus but not valid empty partition.
* 2) parent is invalid or doesn't grant any cpus to child
- * partitions.
+ * partitions and not an isolated cpuset pulling cpus from
+ * isolcpus.
*/
- if (is_partition_valid(cs) && (!parent->nr_subparts_cpus ||
- (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
+ if (is_partition_valid(cs) &&
+ ((!parent->nr_subparts_cpus && list_empty(&cs->isol_sibling)) ||
+ (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
int old_prs, parent_prs;
update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
--
2.31.1
One can use "cpuset.cpus.partition" to create multiple scheduling domains
or to produce a set of isolated CPUs where load balancing is disabled.
The former use case is less common but the latter one can be frequently
used especially for the Telco use cases like DPDK.
The existing "isolated" partition can be used to produce isolated
CPUs if the applications have full control of a system. However, in a
containerized environment where all the apps are run in a container,
it is hard to distribute out isolated CPUs from the root down given
the unified hierarchy nature of cgroup v2.
The container running on isolated CPUs can be several layers down from
the root. The current partition feature requires that all the ancestors
of a leaf partition root must be parititon roots themselves. This can
be hard to manage.
This patch introduces a new special partition root state called
"isolcpus" that serves as a pool of isolated CPUs to be pulled into other
"isolated" partitions. At most one instance of the "isolcpus" partition
is allowed in a system preferrably as a child of the top cpuset.
In a valid "isolcpus" partition, "cpuset.cpus" contains the set of
isolated CPUs and "cpuset.cpus.effective" contains the set of freely
available isolated CPUs that have not yet been pulled into other
"isolated" cpusets.
The special "isolcpus" partition cannot have normal cpuset children. So
we are not allowed to enable child cpuset in its "cgroup.subtree_control"
file if it has children. Tasks are also not allowed in the "cgroup.procs"
of the "isolcpus" partition. Unlike other partition roots, empty
"cpuset.cpus" is allowed in the "isolcpus" partition as this special
cpuset is not designed to hold tasks.
The CPUs in the "isolcpus" partition are not exclusive so that those
isolated CPUs can be distributed down sibling hierarchies as usual even
though they will not show up in their "cpuset.cpus.effective".
Right now, an "isolcpus" partition only disable load balancing of
the isolated CPUs. In the near future, it may be extended to support
additional isolation attributes like those currently supported by the
"isolcpus" or related kernel boot command line options.
In a subsequent patch, a privileged user can change a "member" cpuset
to an "isolated" partition root by pulling isolated CPUs from the
"isolcpus" partition if its parent is not a partition root that can
directly satisfy the request.
Signed-off-by: Waiman Long <longman(a)redhat.com>
---
kernel/cgroup/cpuset.c | 158 ++++++++++++++++++++++++++++++++++-------
1 file changed, 133 insertions(+), 25 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 83a7193e0f2c..444eae3a9a6b 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -98,6 +98,9 @@ enum prs_errcode {
PERR_NOCPUS,
PERR_HOTPLUG,
PERR_CPUSEMPTY,
+ PERR_ISOLCPUS,
+ PERR_ISOLTASK,
+ PERR_ISOLCHILD,
};
static const char * const perr_strings[] = {
@@ -108,6 +111,9 @@ static const char * const perr_strings[] = {
[PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
[PERR_HOTPLUG] = "No cpu available due to hotplug",
[PERR_CPUSEMPTY] = "cpuset.cpus is empty",
+ [PERR_ISOLCPUS] = "An isolcpus partition is already present",
+ [PERR_ISOLTASK] = "Isolcpus partition can't have tasks",
+ [PERR_ISOLCHILD] = "Isolcpus partition can't have children",
};
struct cpuset {
@@ -198,6 +204,9 @@ struct cpuset {
/* Handle for cpuset.cpus.partition */
struct cgroup_file partition_file;
+
+ /* siblings list anchored at isol_children */
+ struct list_head isol_sibling;
};
/*
@@ -206,14 +215,26 @@ struct cpuset {
* 0 - member (not a partition root)
* 1 - partition root
* 2 - partition root without load balancing (isolated)
+ * 3 - isolated cpu pool (isolcpus)
* -1 - invalid partition root
* -2 - invalid isolated partition root
+ * -3 - invalid isolated cpu pool
+ *
+ * An isolated cpu pool is a special isolated partition root. At most one
+ * instance of it is allowed in a system. It provides a pool of isolated
+ * cpus that a normal isolated partition root can pull from, if privileged,
+ * in case its parent cannot fulfill its request.
*/
#define PRS_MEMBER 0
#define PRS_ROOT 1
#define PRS_ISOLATED 2
+#define PRS_ISOLCPUS 3
#define PRS_INVALID_ROOT -1
#define PRS_INVALID_ISOLATED -2
+#define PRS_INVALID_ISOLCPUS -3
+
+static struct cpuset *isolcpus_cs; /* System isolcpus partition root */
+static struct list_head isol_children; /* Children that pull isolated cpus */
static inline bool is_prs_invalid(int prs_state)
{
@@ -335,6 +356,7 @@ static struct cpuset top_cpuset = {
.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
(1 << CS_MEM_EXCLUSIVE)),
.partition_root_state = PRS_ROOT,
+ .isol_sibling = LIST_HEAD_INIT(top_cpuset.isol_sibling),
};
/**
@@ -1282,7 +1304,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
*/
static int update_partition_exclusive(struct cpuset *cs, int new_prs)
{
- bool exclusive = (new_prs > 0);
+ bool exclusive = (new_prs == PRS_ROOT) || (new_prs == PRS_ISOLATED);
if (exclusive && !is_cpu_exclusive(cs)) {
if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
@@ -1303,7 +1325,7 @@ static int update_partition_exclusive(struct cpuset *cs, int new_prs)
static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
{
int new_prs = cs->partition_root_state;
- bool new_lb = (new_prs != PRS_ISOLATED);
+ bool new_lb = (new_prs != PRS_ISOLATED) && (new_prs != PRS_ISOLCPUS);
if (new_lb != !!is_sched_load_balance(cs))
update_flag(CS_SCHED_LOAD_BALANCE, cs, new_lb);
@@ -1360,18 +1382,20 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
int part_error = PERR_NONE; /* Partition error? */
percpu_rwsem_assert_held(&cpuset_rwsem);
+ old_prs = new_prs = cs->partition_root_state;
/*
* The parent must be a partition root.
* The new cpumask, if present, or the current cpus_allowed must
- * not be empty.
+ * not be empty except for isolcpus partition.
*/
if (!is_partition_valid(parent)) {
return is_partition_invalid(parent)
? PERR_INVPARENT : PERR_NOTPART;
}
- if ((newmask && cpumask_empty(newmask)) ||
- (!newmask && cpumask_empty(cs->cpus_allowed)))
+ if ((new_prs != PRS_ISOLCPUS) &&
+ ((newmask && cpumask_empty(newmask)) ||
+ (!newmask && cpumask_empty(cs->cpus_allowed))))
return PERR_CPUSEMPTY;
/*
@@ -1379,7 +1403,6 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
* partcmd_invalidate commands.
*/
adding = deleting = false;
- old_prs = new_prs = cs->partition_root_state;
if (cmd == partcmd_enable) {
/*
* Enabling partition root is not allowed if cpus_allowed
@@ -1498,11 +1521,13 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
switch (cs->partition_root_state) {
case PRS_ROOT:
case PRS_ISOLATED:
+ case PRS_ISOLCPUS:
if (part_error)
new_prs = -old_prs;
break;
case PRS_INVALID_ROOT:
case PRS_INVALID_ISOLATED:
+ case PRS_INVALID_ISOLCPUS:
if (!part_error)
new_prs = -old_prs;
break;
@@ -1553,6 +1578,9 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
spin_unlock_irq(&callback_lock);
+ if ((isolcpus_cs == cs) && (cs->partition_root_state != PRS_ISOLCPUS))
+ isolcpus_cs = NULL;
+
if (adding || deleting)
update_tasks_cpumask(parent, tmp->addmask);
@@ -1640,7 +1668,14 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
*/
old_prs = new_prs = cp->partition_root_state;
if ((cp != cs) && old_prs) {
- switch (parent->partition_root_state) {
+ int parent_prs = parent->partition_root_state;
+
+ /*
+ * isolcpus partition parent can't have children
+ */
+ WARN_ON_ONCE(parent_prs == PRS_ISOLCPUS);
+
+ switch (parent_prs) {
case PRS_ROOT:
case PRS_ISOLATED:
update_parent = true;
@@ -1735,9 +1770,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
* @parent: Parent cpuset
* @cs: Current cpuset
* @tmp: Temp variables
+ * @force: Force update if set
*/
static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
- struct tmpmasks *tmp)
+ struct tmpmasks *tmp, bool force)
{
struct cpuset *sibling;
struct cgroup_subsys_state *pos_css;
@@ -1756,7 +1792,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
cpuset_for_each_child(sibling, pos_css, parent) {
if (sibling == cs)
continue;
- if (!sibling->use_parent_ecpus)
+ if (!sibling->use_parent_ecpus && !force)
continue;
if (!css_tryget_online(&sibling->css))
continue;
@@ -1893,14 +1929,16 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
update_cpumasks_hier(cs, &tmp, false);
if (cs->partition_root_state) {
+ bool force = (cs->partition_root_state == PRS_ISOLCPUS);
struct cpuset *parent = parent_cs(cs);
/*
* For partition root, update the cpumasks of sibling
- * cpusets if they use parent's effective_cpus.
+ * cpusets if they use parent's effective_cpus or when
+ * the current cpuset is an isolcpus partition.
*/
- if (parent->child_ecpus_count)
- update_sibling_cpumasks(parent, cs, &tmp);
+ if (parent->child_ecpus_count || force)
+ update_sibling_cpumasks(parent, cs, &tmp, force);
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains */
update_partition_sd_lb(cs, old_prs);
@@ -2298,6 +2336,41 @@ static int update_prstate(struct cpuset *cs, int new_prs)
if (alloc_cpumasks(NULL, &tmpmask))
return -ENOMEM;
+ /*
+ * Only one isolcpus partition is allowed and it can't have children
+ * or tasks in it. The isolcpus partition is also not exclusive so
+ * that the isolated but unused cpus can be distributed down the
+ * hierarchy.
+ */
+ if (new_prs == PRS_ISOLCPUS) {
+ if (isolcpus_cs)
+ err = PERR_ISOLCPUS;
+ else if (!list_empty(&cs->css.children))
+ err = PERR_ISOLCHILD;
+ else if (cs->css.cgroup->nr_populated_csets)
+ err = PERR_ISOLTASK;
+
+ if (err && old_prs) {
+ /*
+ * A previous valid partition root is now invalid
+ */
+ goto disable_partition;
+ } else if (err) {
+ goto out;
+ }
+
+ /*
+ * Unlike other partition types, an isolated cpu pool can
+ * be empty as it is essentially a place holder for isolated
+ * CPUs.
+ */
+ if (!old_prs && cpumask_empty(cs->cpus_allowed)) {
+ /* Force effective_cpus to be empty too */
+ cpumask_clear(cs->effective_cpus);
+ goto out;
+ }
+ }
+
err = update_partition_exclusive(cs, new_prs);
if (err)
goto out;
@@ -2316,11 +2389,9 @@ static int update_prstate(struct cpuset *cs, int new_prs)
if (err)
goto out;
} else if (old_prs && new_prs) {
- /*
- * A change in load balance state only, no change in cpumasks.
- */
- goto out;
+ goto out; /* Skip cpuset and sibling task update */
} else {
+disable_partition:
/*
* Switching back to member is always allowed even if it
* disables child partitions.
@@ -2342,8 +2413,13 @@ static int update_prstate(struct cpuset *cs, int new_prs)
update_tasks_cpumask(parent, tmpmask.new_cpus);
- if (parent->child_ecpus_count)
- update_sibling_cpumasks(parent, cs, &tmpmask);
+ /*
+ * Since isolcpus partition is not exclusive, we have to update
+ * sibling hierarchies as well.
+ */
+ if ((new_prs == PRS_ISOLCPUS) || parent->child_ecpus_count)
+ update_sibling_cpumasks(parent, cs, &tmpmask,
+ new_prs == PRS_ISOLCPUS);
out:
/*
@@ -2363,6 +2439,14 @@ static int update_prstate(struct cpuset *cs, int new_prs)
/* Update sched domains and load balance flag */
update_partition_sd_lb(cs, old_prs);
+ /*
+ * Check isolcpus_cs state
+ */
+ if (new_prs == PRS_ISOLCPUS)
+ isolcpus_cs = cs;
+ else if (cs == isolcpus_cs)
+ isolcpus_cs = NULL;
+
/*
* Update child cpusets, if present.
* Force update if switching back to member.
@@ -2486,7 +2570,12 @@ static struct cpuset *cpuset_attach_old_cs;
*/
static int cpuset_can_attach_check(struct cpuset *cs)
{
+ /*
+ * Task cannot be moved to a cpuset with empty effective cpus or
+ * is an isolcpus partition.
+ */
if (cpumask_empty(cs->effective_cpus) ||
+ (cs->partition_root_state == PRS_ISOLCPUS) ||
(!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
return -ENOSPC;
return 0;
@@ -2902,24 +2991,30 @@ static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
static int sched_partition_show(struct seq_file *seq, void *v)
{
struct cpuset *cs = css_cs(seq_css(seq));
+ int prs = cs->partition_root_state;
const char *err, *type = NULL;
- switch (cs->partition_root_state) {
+ switch (prs) {
case PRS_ROOT:
seq_puts(seq, "root\n");
break;
case PRS_ISOLATED:
seq_puts(seq, "isolated\n");
break;
+ case PRS_ISOLCPUS:
+ seq_puts(seq, "isolcpus\n");
+ break;
case PRS_MEMBER:
seq_puts(seq, "member\n");
break;
- case PRS_INVALID_ROOT:
- type = "root";
- fallthrough;
- case PRS_INVALID_ISOLATED:
- if (!type)
+ default:
+ if (prs == PRS_INVALID_ROOT)
+ type = "root";
+ else if (prs == PRS_INVALID_ISOLATED)
type = "isolated";
+ else
+ type = "isolcpus";
+
err = perr_strings[READ_ONCE(cs->prs_err)];
if (err)
seq_printf(seq, "%s invalid (%s)\n", type, err);
@@ -2948,6 +3043,8 @@ static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
val = PRS_MEMBER;
else if (!strcmp(buf, "isolated"))
val = PRS_ISOLATED;
+ else if (!strcmp(buf, "isolcpus"))
+ val = PRS_ISOLCPUS;
else
return -EINVAL;
@@ -3157,6 +3254,7 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
nodes_clear(cs->effective_mems);
fmeter_init(&cs->fmeter);
cs->relax_domain_level = -1;
+ INIT_LIST_HEAD(&cs->isol_sibling);
/* Set CS_MEMORY_MIGRATE for default hierarchy */
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
@@ -3171,6 +3269,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
struct cpuset *parent = parent_cs(cs);
struct cpuset *tmp_cs;
struct cgroup_subsys_state *pos_css;
+ int err = 0;
if (!parent)
return 0;
@@ -3178,6 +3277,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
+ /*
+ * An isolcpus partition cannot have direct children.
+ */
+ if (parent->partition_root_state == PRS_ISOLCPUS) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
set_bit(CS_ONLINE, &cs->flags);
if (is_spread_page(parent))
set_bit(CS_SPREAD_PAGE, &cs->flags);
@@ -3229,7 +3336,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
out_unlock:
percpu_up_write(&cpuset_rwsem);
cpus_read_unlock();
- return 0;
+ return err;
}
/*
@@ -3434,6 +3541,7 @@ int __init cpuset_init(void)
fmeter_init(&top_cpuset.fmeter);
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
top_cpuset.relax_domain_level = -1;
+ INIT_LIST_HEAD(&isol_children);
BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
--
2.31.1
I have been running hid-tools for a while, but it was in its own
separate repository for multiple reasons. And the past few weeks
I finally managed to make the kernel tests in that repo in a
state where we can merge them in the kernel tree directly:
- the tests run in ~2 to 3 minutes
- the tests are way more reliable than previously
- the tests are mostly self-contained now (to the exception
of the Sony ones)
To be able to run the tests we need to use the latest release
of hid-tools, as this project still keeps the HID parsing logic
and is capable of generating the HID events.
The series also ensures we can run the tests with vmtest.sh,
allowing for a quick development and test in the tree itself.
This should allow us to require tests to be added to a series
when we see fit and keep them alive properly instead of having
to deal with 2 repositories.
In Cc are all of the people who participated in the elaboration
of those tests, so please send back a signed-off-by for each
commit you are part of.
This series applies on top of the for-6.3/hid-bpf branch, which
is the one that added the tools/testing/selftests/hid directory.
Given that this is unlikely this series will make the cut for
6.3, we might just consider this series to be based on top of
the future 6.3-rc1.
Cheers,
Benjamin
Signed-off-by: Benjamin Tissoires <benjamin.tissoires(a)redhat.com>
---
Benjamin Tissoires (11):
selftests: hid: make vmtest rely on make
selftests: hid: import hid-tools hid-core tests
selftests: hid: import hid-tools hid-gamepad tests
selftests: hid: import hid-tools hid-keyboards tests
selftests: hid: import hid-tools hid-mouse tests
selftests: hid: import hid-tools hid-multitouch and hid-tablets tests
selftests: hid: import hid-tools wacom tests
selftests: hid: import hid-tools hid-apple tests
selftests: hid: import hid-tools hid-ite tests
selftests: hid: import hid-tools hid-sony and hid-playstation tests
selftests: hid: import hid-tools usb-crash tests
tools/testing/selftests/hid/Makefile | 12 +
tools/testing/selftests/hid/config | 11 +
tools/testing/selftests/hid/hid-apple.sh | 7 +
tools/testing/selftests/hid/hid-core.sh | 7 +
tools/testing/selftests/hid/hid-gamepad.sh | 7 +
tools/testing/selftests/hid/hid-ite.sh | 7 +
tools/testing/selftests/hid/hid-keyboard.sh | 7 +
tools/testing/selftests/hid/hid-mouse.sh | 7 +
tools/testing/selftests/hid/hid-multitouch.sh | 7 +
tools/testing/selftests/hid/hid-sony.sh | 7 +
tools/testing/selftests/hid/hid-tablet.sh | 7 +
tools/testing/selftests/hid/hid-usb_crash.sh | 7 +
tools/testing/selftests/hid/hid-wacom.sh | 7 +
tools/testing/selftests/hid/run-hid-tools-tests.sh | 28 +
tools/testing/selftests/hid/settings | 3 +
tools/testing/selftests/hid/tests/__init__.py | 2 +
tools/testing/selftests/hid/tests/base.py | 345 ++++
tools/testing/selftests/hid/tests/conftest.py | 81 +
.../selftests/hid/tests/descriptors_wacom.py | 1360 +++++++++++++
.../selftests/hid/tests/test_apple_keyboard.py | 440 +++++
tools/testing/selftests/hid/tests/test_gamepad.py | 209 ++
tools/testing/selftests/hid/tests/test_hid_core.py | 154 ++
.../selftests/hid/tests/test_ite_keyboard.py | 166 ++
tools/testing/selftests/hid/tests/test_keyboard.py | 485 +++++
tools/testing/selftests/hid/tests/test_mouse.py | 977 +++++++++
.../testing/selftests/hid/tests/test_multitouch.py | 2088 ++++++++++++++++++++
tools/testing/selftests/hid/tests/test_sony.py | 282 +++
tools/testing/selftests/hid/tests/test_tablet.py | 872 ++++++++
.../testing/selftests/hid/tests/test_usb_crash.py | 103 +
.../selftests/hid/tests/test_wacom_generic.py | 844 ++++++++
tools/testing/selftests/hid/vmtest.sh | 25 +-
31 files changed, 8554 insertions(+), 10 deletions(-)
---
base-commit: 2f7f4efb9411770b4ad99eb314d6418e980248b4
change-id: 20230217-import-hid-tools-tests-dc0cd4f3c8a8
Best regards,
--
Benjamin Tissoires <benjamin.tissoires(a)redhat.com>
memalign() is obsolete according to its manpage.
Replace memalign() with posix_memalign()
As a pointer is passed into posix_memalign(), initialize *one_page
to NULL to silence a warning about the function's return value being
used as uninitialized (which is not valid anyway because the error
is properly checked before p is returned).
Signed-off-by: Deming Wang <wangdeming(a)inspur.com>
---
tools/testing/selftests/mm/split_huge_page_test.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index cbb5e6893cbf..94c7dffc4d7d 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -96,10 +96,10 @@ void split_pmd_thp(void)
char *one_page;
size_t len = 4 * pmd_pagesize;
size_t i;
+ int ret;
- one_page = memalign(pmd_pagesize, len);
-
- if (!one_page) {
+ ret = posix_memalign((void **)&one_page, pmd_pagesize, len);
+ if (ret < 0) {
printf("Fail to allocate memory\n");
exit(EXIT_FAILURE);
}
--
2.27.0
memalign() is obsolete according to its manpage.
Replace memalign() with posix_memalign() and remove malloc.h include
that was there for memalign().
As a pointer is passed into posix_memalign(), initialize *s to NULL
to silence a warning about the function's return value being used as
uninitialized (which is not valid anyway because the error is properly
checked before p is returned).
Signed-off-by: Deming Wang <wangdeming(a)inspur.com>
---
tools/testing/selftests/powerpc/stringloops/strlen.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/powerpc/stringloops/strlen.c b/tools/testing/selftests/powerpc/stringloops/strlen.c
index 9055ebc484d0..f9c1f9cc2d32 100644
--- a/tools/testing/selftests/powerpc/stringloops/strlen.c
+++ b/tools/testing/selftests/powerpc/stringloops/strlen.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <malloc.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
@@ -51,10 +50,11 @@ static void bench_test(char *s)
static int testcase(void)
{
char *s;
+ int ret;
unsigned long i;
- s = memalign(128, SIZE);
- if (!s) {
+ ret = posix_memalign((void **)&s, 128, SIZE);
+ if (ret < 0) {
perror("memalign");
exit(1);
}
--
2.27.0
memalign() is obsolete according to its manpage.
Replace memalign() with posix_memalign().
As a pointer is passed into posix_memalign(),initialize *map to
NULL,to silence a warning about the function's return value being
used as uninitialized (which is not valid anyway because the
error is properly checked before p is returned).
Signed-off-by: Deming Wang <wangdeming(a)inspur.com>
---
tools/testing/selftests/mm/soft-dirty.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index 21d8830c5f24..c99350e110ec 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -80,9 +80,9 @@ static void test_hugepage(int pagemap_fd, int pagesize)
int i, ret;
size_t hpage_len = read_pmd_pagesize();
- map = memalign(hpage_len, hpage_len);
- if (!map)
- ksft_exit_fail_msg("memalign failed\n");
+ ret = posix_memalign((void **)(&map), hpage_len, hpage_len);
+ if (ret < 0)
+ ksft_exit_fail_msg("posix_memalign failed\n");
ret = madvise(map, hpage_len, MADV_HUGEPAGE);
if (ret)
--
2.27.0
The "test_encl.elf" file used by test_sgx is not installed in
INSTALL_PATH. Attempting to execute test_sgx causes false negative:
"
enclave executable open(): No such file or directory
main.c:188:unclobbered_vdso:Failed to load the test enclave.
"
Add "test_encl.elf" to TEST_FILES so that it will be installed.
Fixes: 2adcba79e69d ("selftests/x86: Add a selftest for SGX")
Signed-off-by: Yi Lai <yi1.lai(a)intel.com>
---
tools/testing/selftests/sgx/Makefile | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile
index 75af864e07b6..50aab6b57da3 100644
--- a/tools/testing/selftests/sgx/Makefile
+++ b/tools/testing/selftests/sgx/Makefile
@@ -17,6 +17,7 @@ ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
-fno-stack-protector -mrdrnd $(INCLUDES)
TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
+TEST_FILES := $(OUTPUT)/test_encl.elf
ifeq ($(CAN_BUILD_X86_64), 1)
all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
--
2.25.1
memalign() is obsolete according to its manpage.
Replace memalign() with posix_memalign() and remove malloc.h include
that was there for memalign().
As a pointer is passed into posix_memalign(), initialize *p to NULL
to silence a warning about the function's return value being used as
uninitialized (which is not valid anyway because the error is properly
checked before p is returned).
Signed-off-by: Deming Wang <wangdeming(a)inspur.com>
---
tools/testing/selftests/mm/split_huge_page_test.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index cbb5e6893cbf..8f48f07bc821 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -96,10 +96,10 @@ void split_pmd_thp(void)
char *one_page;
size_t len = 4 * pmd_pagesize;
size_t i;
+ int ret;
- one_page = memalign(pmd_pagesize, len);
-
- if (!one_page) {
+ ret = posix_memalign((void **)(&one_page), pmd_pagesize, len);
+ if (ret < 0) {
printf("Fail to allocate memory\n");
exit(EXIT_FAILURE);
}
--
2.27.0
memalign() is obsolete according to its manpage.
Replace memalign() with posix_memalign() and remove malloc.h include
that was there for memalign().
As a pointer is passed into posix_memalign(), initialize *p to NULL
to silence a warning about the function's return value being used as
uninitialized (which is not valid anyway because the error is properly
checked before p is returned).
Signed-off-by: Deming Wang <wangdeming(a)inspur.com>
---
tools/testing/selftests/mm/soft-dirty.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index 21d8830c5f24..4bb7421141a2 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -80,8 +80,8 @@ static void test_hugepage(int pagemap_fd, int pagesize)
int i, ret;
size_t hpage_len = read_pmd_pagesize();
- map = memalign(hpage_len, hpage_len);
- if (!map)
+ ret = posix_memalign((void *)(&map), hpage_len, hpage_len);
+ if (ret < 0)
ksft_exit_fail_msg("memalign failed\n");
ret = madvise(map, hpage_len, MADV_HUGEPAGE);
--
2.27.0