CAT test does not reset the CPU affinity after the benchmark. This is relatively harmless as is because CAT test is the last benchmark to run, however, more tests may be added later.
Store the CPU affinity the first time taskset_benchmark() is run and add taskset_restore() which the test can call to reset the CPU mask to its original value.
Signed-off-by: Ilpo Järvinen ilpo.jarvinen@linux.intel.com Reviewed-by: Reinette Chatre reinette.chatre@intel.com ---
v4: - Reworded error prints
v3: - New patch --- tools/testing/selftests/resctrl/cat_test.c | 13 +++++--- tools/testing/selftests/resctrl/resctrl.h | 3 +- tools/testing/selftests/resctrl/resctrl_val.c | 2 +- tools/testing/selftests/resctrl/resctrlfs.c | 33 +++++++++++++++++-- 4 files changed, 42 insertions(+), 9 deletions(-)
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c index b79916069788..fa95433297c9 100644 --- a/tools/testing/selftests/resctrl/cat_test.c +++ b/tools/testing/selftests/resctrl/cat_test.c @@ -156,6 +156,7 @@ static int cat_test(struct resctrl_val_param *param, size_t span, unsigned long char *resctrl_val = param->resctrl_val; struct perf_event_read pe_read; struct perf_event_attr pea; + cpu_set_t old_affinity; unsigned char *buf; char schemata[64]; int ret, i, pe_fd; @@ -167,7 +168,7 @@ static int cat_test(struct resctrl_val_param *param, size_t span, unsigned long bm_pid = getpid();
/* Taskset benchmark to specified cpu */ - ret = taskset_benchmark(bm_pid, param->cpu_no); + ret = taskset_benchmark(bm_pid, param->cpu_no, &old_affinity); if (ret) return ret;
@@ -175,13 +176,15 @@ static int cat_test(struct resctrl_val_param *param, size_t span, unsigned long ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp, resctrl_val); if (ret) - return ret; + goto reset_affinity;
perf_event_attr_initialize(&pea, PERF_COUNT_HW_CACHE_MISSES); perf_event_initialize_read_format(&pe_read); pe_fd = perf_open(&pea, bm_pid, param->cpu_no); - if (pe_fd < 0) - return pe_fd; + if (pe_fd < 0) { + ret = -1; + goto reset_affinity; + }
buf = alloc_buffer(span, 1); if (!buf) { @@ -220,6 +223,8 @@ static int cat_test(struct resctrl_val_param *param, size_t span, unsigned long free(buf); pe_close: close(pe_fd); +reset_affinity: + taskset_restore(bm_pid, &old_affinity);
return ret; } diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h index 4f040e999eea..94347acafe55 100644 --- a/tools/testing/selftests/resctrl/resctrl.h +++ b/tools/testing/selftests/resctrl/resctrl.h @@ -98,7 +98,8 @@ int umount_resctrlfs(void); int validate_bw_report_request(char *bw_report); bool validate_resctrl_feature_request(const char *resource, const char *feature); char *fgrep(FILE *inf, const char *str); -int taskset_benchmark(pid_t bm_pid, int cpu_no); +int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity); +int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity); int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val); int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp, diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c index f6859fe433d9..6574a13f3919 100644 --- a/tools/testing/selftests/resctrl/resctrl_val.c +++ b/tools/testing/selftests/resctrl/resctrl_val.c @@ -777,7 +777,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par value.sival_ptr = (void *)benchmark_cmd;
/* Taskset benchmark to specified cpu */ - ret = taskset_benchmark(bm_pid, param->cpu_no); + ret = taskset_benchmark(bm_pid, param->cpu_no, NULL); if (ret) goto out;
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c index 846281e429ca..1bcb9aa96cc2 100644 --- a/tools/testing/selftests/resctrl/resctrlfs.c +++ b/tools/testing/selftests/resctrl/resctrlfs.c @@ -345,15 +345,25 @@ int get_mask_no_shareable(const char *cache_type, unsigned long *mask)
/* * taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu - * @bm_pid: PID that should be binded - * @cpu_no: CPU number at which the PID would be binded + * @bm_pid: PID that should be binded + * @cpu_no: CPU number at which the PID would be binded + * @old_affinity: When not NULL, set to old CPU affinity * * Return: 0 on success, < 0 on error. */ -int taskset_benchmark(pid_t bm_pid, int cpu_no) +int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity) { cpu_set_t my_set;
+ if (old_affinity) { + CPU_ZERO(old_affinity); + if (sched_getaffinity(bm_pid, sizeof(*old_affinity), + old_affinity)) { + ksft_perror("Unable to read CPU affinity"); + return -1; + } + } + CPU_ZERO(&my_set); CPU_SET(cpu_no, &my_set);
@@ -366,6 +376,23 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no) return 0; }
+/* + * taskset_restore - Taskset PID to the earlier CPU affinity + * @bm_pid: PID that should be reset + * @old_affinity: The old CPU affinity to restore + * + * Return: 0 on success, < 0 on error. + */ +int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity) +{ + if (sched_setaffinity(bm_pid, sizeof(*old_affinity), old_affinity)) { + ksft_perror("Unable to restore CPU affinity"); + return -1; + } + + return 0; +} + /* * create_grp - Create a group only if one doesn't exist * @grp_name: Name of the group