selftests/resctrl: Restore the CPU affinity after CAT test
authorIlpo Järvinen <ilpo.jarvinen@linux.intel.com>
Fri, 15 Dec 2023 15:05:09 +0000 (17:05 +0200)
committerShuah Khan <skhan@linuxfoundation.org>
Tue, 13 Feb 2024 20:56:45 +0000 (13:56 -0700)
CAT test does not reset the CPU affinity after the benchmark.
This is relatively harmless as is because CAT test is the last
benchmark to run, however, more tests may be added later.

Store the CPU affinity the first time taskset_benchmark() is run and
add taskset_restore() which the test can call to reset the CPU mask to
its original value.

Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
tools/testing/selftests/resctrl/cat_test.c
tools/testing/selftests/resctrl/resctrl.h
tools/testing/selftests/resctrl/resctrl_val.c
tools/testing/selftests/resctrl/resctrlfs.c

index b79916069788218d4aeafa97780d16c8ca8aca06..fa95433297c91821571265c6363f36ca6b7d7708 100644 (file)
@@ -156,6 +156,7 @@ static int cat_test(struct resctrl_val_param *param, size_t span, unsigned long
        char *resctrl_val = param->resctrl_val;
        struct perf_event_read pe_read;
        struct perf_event_attr pea;
+       cpu_set_t old_affinity;
        unsigned char *buf;
        char schemata[64];
        int ret, i, pe_fd;
@@ -167,7 +168,7 @@ static int cat_test(struct resctrl_val_param *param, size_t span, unsigned long
        bm_pid = getpid();
 
        /* Taskset benchmark to specified cpu */
-       ret = taskset_benchmark(bm_pid, param->cpu_no);
+       ret = taskset_benchmark(bm_pid, param->cpu_no, &old_affinity);
        if (ret)
                return ret;
 
@@ -175,13 +176,15 @@ static int cat_test(struct resctrl_val_param *param, size_t span, unsigned long
        ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
                                      resctrl_val);
        if (ret)
-               return ret;
+               goto reset_affinity;
 
        perf_event_attr_initialize(&pea, PERF_COUNT_HW_CACHE_MISSES);
        perf_event_initialize_read_format(&pe_read);
        pe_fd = perf_open(&pea, bm_pid, param->cpu_no);
-       if (pe_fd < 0)
-               return pe_fd;
+       if (pe_fd < 0) {
+               ret = -1;
+               goto reset_affinity;
+       }
 
        buf = alloc_buffer(span, 1);
        if (!buf) {
@@ -220,6 +223,8 @@ free_buf:
        free(buf);
 pe_close:
        close(pe_fd);
+reset_affinity:
+       taskset_restore(bm_pid, &old_affinity);
 
        return ret;
 }
index 4f040e999eea55ca7934b472ceea0b0a99b55c8f..94347acafe55e44dc8e109e09478981235e3b1c1 100644 (file)
@@ -98,7 +98,8 @@ int umount_resctrlfs(void);
 int validate_bw_report_request(char *bw_report);
 bool validate_resctrl_feature_request(const char *resource, const char *feature);
 char *fgrep(FILE *inf, const char *str);
-int taskset_benchmark(pid_t bm_pid, int cpu_no);
+int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity);
+int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity);
 int write_schemata(char *ctrlgrp, char *schemata, int cpu_no,
                   char *resctrl_val);
 int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
index f6859fe433d9dcdefc25faf4428344b10550a552..6574a13f3919de564f1f7362225d329aee0fdb83 100644 (file)
@@ -777,7 +777,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par
        value.sival_ptr = (void *)benchmark_cmd;
 
        /* Taskset benchmark to specified cpu */
-       ret = taskset_benchmark(bm_pid, param->cpu_no);
+       ret = taskset_benchmark(bm_pid, param->cpu_no, NULL);
        if (ret)
                goto out;
 
index 846281e429ca946317a2e2cef929437b92150296..1bcb9aa96cc24735615042475b7cdb1fd8392159 100644 (file)
@@ -345,15 +345,25 @@ int get_mask_no_shareable(const char *cache_type, unsigned long *mask)
 
 /*
  * taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu
- * @bm_pid:    PID that should be binded
- * @cpu_no:    CPU number at which the PID would be binded
+ * @bm_pid:            PID that should be binded
+ * @cpu_no:            CPU number at which the PID would be binded
+ * @old_affinity:      When not NULL, set to old CPU affinity
  *
  * Return: 0 on success, < 0 on error.
  */
-int taskset_benchmark(pid_t bm_pid, int cpu_no)
+int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity)
 {
        cpu_set_t my_set;
 
+       if (old_affinity) {
+               CPU_ZERO(old_affinity);
+               if (sched_getaffinity(bm_pid, sizeof(*old_affinity),
+                                     old_affinity)) {
+                       ksft_perror("Unable to read CPU affinity");
+                       return -1;
+               }
+       }
+
        CPU_ZERO(&my_set);
        CPU_SET(cpu_no, &my_set);
 
@@ -366,6 +376,23 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no)
        return 0;
 }
 
+/*
+ * taskset_restore - Taskset PID to the earlier CPU affinity
+ * @bm_pid:            PID that should be reset
+ * @old_affinity:      The old CPU affinity to restore
+ *
+ * Return: 0 on success, < 0 on error.
+ */
+int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity)
+{
+       if (sched_setaffinity(bm_pid, sizeof(*old_affinity), old_affinity)) {
+               ksft_perror("Unable to restore CPU affinity");
+               return -1;
+       }
+
+       return 0;
+}
+
 /*
  * create_grp - Create a group only if one doesn't exist
  * @grp_name:  Name of the group