4.9-stable review patch. If anyone has any objections, please let me know.
------------------
From: Satheesh Rajendran sathnaga@linux.vnet.ibm.com
[ Upstream commit 321a7c35c90cc834851ceda18a8ee18f1d032b92 ]
Certain systems are designed to have sparse/discontiguous nodes. On such systems, 'perf bench numa' hangs, shows wrong number of nodes and shows values for non-existent nodes. Handle this by only taking nodes that are exposed by kernel to userspace.
Signed-off-by: Satheesh Rajendran sathnaga@linux.vnet.ibm.com Reviewed-by: Srikar Dronamraju srikar@linux.vnet.ibm.com Acked-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1edbcd353c009e109e93d78f2f46381930c340fe.1511368645... Signed-off-by: Balamuruhan S bala24@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- tools/perf/bench/numa.c | 56 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 51 insertions(+), 5 deletions(-)
--- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -211,6 +211,47 @@ static const char * const numa_usage[] = NULL };
+/* + * To get number of numa nodes present. + */ +static int nr_numa_nodes(void) +{ + int i, nr_nodes = 0; + + for (i = 0; i < g->p.nr_nodes; i++) { + if (numa_bitmask_isbitset(numa_nodes_ptr, i)) + nr_nodes++; + } + + return nr_nodes; +} + +/* + * To check if given numa node is present. + */ +static int is_node_present(int node) +{ + return numa_bitmask_isbitset(numa_nodes_ptr, node); +} + +/* + * To check given numa node has cpus. + */ +static bool node_has_cpus(int node) +{ + struct bitmask *cpu = numa_allocate_cpumask(); + unsigned int i; + + if (cpu && !numa_node_to_cpus(node, cpu)) { + for (i = 0; i < cpu->size; i++) { + if (numa_bitmask_isbitset(cpu, i)) + return true; + } + } + + return false; /* lets fall back to nocpus safely */ +} + static cpu_set_t bind_to_cpu(int target_cpu) { cpu_set_t orig_mask, mask; @@ -239,12 +280,12 @@ static cpu_set_t bind_to_cpu(int target_
static cpu_set_t bind_to_node(int target_node) { - int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; + int cpus_per_node = g->p.nr_cpus / nr_numa_nodes(); cpu_set_t orig_mask, mask; int cpu; int ret;
- BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); + BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus); BUG_ON(!cpus_per_node);
ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); @@ -644,7 +685,7 @@ static int parse_setup_node_list(void) int i;
for (i = 0; i < mul; i++) { - if (t >= g->p.nr_tasks) { + if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) { printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); goto out; } @@ -959,6 +1000,8 @@ static void calc_convergence(double runt sum = 0;
for (node = 0; node < g->p.nr_nodes; node++) { + if (!is_node_present(node)) + continue; nr = nodes[node]; nr_min = min(nr, nr_min); nr_max = max(nr, nr_max); @@ -979,8 +1022,11 @@ static void calc_convergence(double runt process_groups = 0;
for (node = 0; node < g->p.nr_nodes; node++) { - int processes = count_node_processes(node); + int processes;
+ if (!is_node_present(node)) + continue; + processes = count_node_processes(node); nr = nodes[node]; tprintf(" %2d/%-2d", nr, processes);
@@ -1286,7 +1332,7 @@ static void print_summary(void)
printf("\n ###\n"); printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", - g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); + g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus); printf(" # %5dx %5ldMB global shared mem operations\n", g->p.nr_loops, g->p.bytes_global/1024/1024); printf(" # %5dx %5ldMB process shared mem operations\n",