This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via e5dcd6f21ed52d068336a662976fd0e893f639f1 (commit) via 1a3f614126a1cf268f4829387642ab7111c63048 (commit) via a1845466e0410a891db5ebeeae0c3131f2c6ad64 (commit) via 92f040d4669c5e3df5b8f3986e1667f00237c9d1 (commit) from a8fca1ed79b6e5173c14fdfff43ff5460f61ef68 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit e5dcd6f21ed52d068336a662976fd0e893f639f1 Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jul 4 14:55:02 2019 +0300
test: sched_perf: add wait option to simulate work
Added -w option to simulate application work between schedule and queue enqueue calls. Wait is defined in nanoseconds. Wait time cycle consumption is measured and subtracted from test cycles for cycles per event result.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Suggested-by: Stanislaw Kardach skardach@marvell.com Reviewed-by: Stanislaw Kardach skardach@marvell.com
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c index 54ee801d5..04d125be0 100644 --- a/test/performance/odp_sched_perf.c +++ b/test/performance/odp_sched_perf.c @@ -31,6 +31,7 @@ typedef struct test_options_t { uint32_t queue_size; uint32_t tot_queue; uint32_t tot_event; + uint64_t wait_ns;
} test_options_t;
@@ -40,6 +41,7 @@ typedef struct test_stat_t { uint64_t events; uint64_t nsec; uint64_t cycles; + uint64_t waits;
} test_stat_t;
@@ -85,6 +87,7 @@ static void print_usage(void) " -b, --burst Maximum number of events per operation. Default: 100.\n" " -t, --type Queue type. 0: parallel, 1: atomic, 2: ordered. Default: 0.\n" " -f, --forward 0: Keep event in the original queue, 1: Forward event to the next queue. Default: 0.\n" + " -w, --wait_ns Number of nsec to wait before enqueueing events. Default: 0.\n" " -h, --help This help\n" "\n"); } @@ -107,11 +110,12 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {"burst", required_argument, NULL, 'b'}, {"type", required_argument, NULL, 't'}, {"forward", required_argument, NULL, 'f'}, + {"wait_ns", required_argument, NULL, 'w'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0} };
- static const char *shortopts = "+c:q:d:e:r:g:j:b:t:f:h"; + static const char *shortopts = "+c:q:d:e:r:g:j:b:t:f:w:h";
test_options->num_cpu = 1; test_options->num_queue = 1; @@ -123,6 +127,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) test_options->max_burst = 100; test_options->queue_type = 0; test_options->forward = 0; + test_options->wait_ns = 0;
while (1) { opt = getopt_long(argc, argv, shortopts, longopts, &long_index); @@ -161,6 +166,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) case 'f': test_options->forward = atoi(optarg); break; + case 'w': + test_options->wait_ns = atoll(optarg); + break; case 'h': /* fall through */ default: @@ -265,6 +273,7 @@ static int create_pool(test_global_t *global) uint32_t num_group = test_options->num_group; uint32_t num_join = test_options->num_join; int forward = test_options->forward; + uint64_t wait_ns = test_options->wait_ns;
printf("\nScheduler performance test\n"); printf(" num cpu %u\n", num_cpu); @@ -279,6 +288,7 @@ static int create_pool(test_global_t *global) printf(" total events %u\n", tot_event); printf(" num rounds %u\n", num_round); printf(" forward events %i\n", forward ? 1 : 0); + printf(" wait nsec %" PRIu64 "\n", wait_ns);
if (odp_pool_capability(&pool_capa)) { printf("Error: Pool capa failed.\n"); @@ -548,7 +558,7 @@ static int test_sched(void *arg) int num, num_enq, ret, thr; uint32_t i, rounds; uint64_t c1, c2, cycles, nsec; - uint64_t events, enqueues; + uint64_t events, enqueues, waits; odp_time_t t1, t2; odp_queue_t queue; odp_queue_t *next; @@ -559,6 +569,7 @@ static int test_sched(void *arg) uint32_t max_burst = test_options->max_burst; uint32_t num_group = test_options->num_group; int forward = test_options->forward; + uint64_t wait_ns = test_options->wait_ns; odp_event_t ev[max_burst];
thr = odp_thread_id(); @@ -598,6 +609,7 @@ static int test_sched(void *arg)
enqueues = 0; events = 0; + waits = 0; ret = 0;
/* Start all workers at the same time */ @@ -619,6 +631,11 @@ static int test_sched(void *arg) queue = *next; }
+ if (odp_unlikely(wait_ns)) { + waits++; + odp_time_wait_ns(wait_ns); + } + while (num) { num_enq = odp_queue_enq_multi(queue, &ev[i], num); @@ -661,6 +678,7 @@ static int test_sched(void *arg) global->stat[thr].events = events; global->stat[thr].nsec = nsec; global->stat[thr].cycles = cycles; + global->stat[thr].waits = waits;
/* Pause scheduling before thread exit */ odp_schedule_pause(); @@ -728,17 +746,51 @@ static int start_workers(test_global_t *global, odp_instance_t instance) return 0; }
+static double measure_wait_time_cycles(uint64_t wait_ns) +{ + uint64_t i, c1, c2, diff; + uint64_t rounds; + double wait_cycles; + + if (wait_ns == 0) + return 0.0; + + /* Run measurement for 100msec or at least two times, so that effect + * from CPU frequency scaling is minimized. */ + rounds = (100 * ODP_TIME_MSEC_IN_NS) / wait_ns; + if (rounds == 0) + rounds = 2; + + c1 = odp_cpu_cycles(); + + for (i = 0; i < rounds; i++) + odp_time_wait_ns(wait_ns); + + c2 = odp_cpu_cycles(); + diff = odp_cpu_cycles_diff(c2, c1); + wait_cycles = (double)diff / rounds; + + printf("\nMeasured wait cycles: %.3f\n", wait_cycles); + + return wait_cycles; +} + static void print_stat(test_global_t *global) { int i, num; double rounds_ave, enqueues_ave, events_ave, nsec_ave, cycles_ave; + double waits_ave, wait_cycles, wait_cycles_ave; test_options_t *test_options = &global->test_options; int num_cpu = test_options->num_cpu; + uint64_t wait_ns = test_options->wait_ns; uint64_t rounds_sum = 0; uint64_t enqueues_sum = 0; uint64_t events_sum = 0; uint64_t nsec_sum = 0; uint64_t cycles_sum = 0; + uint64_t waits_sum = 0; + + wait_cycles = measure_wait_time_cycles(wait_ns);
/* Averages */ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) { @@ -747,6 +799,7 @@ static void print_stat(test_global_t *global) events_sum += global->stat[i].events; nsec_sum += global->stat[i].nsec; cycles_sum += global->stat[i].cycles; + waits_sum += global->stat[i].waits; }
if (rounds_sum == 0) { @@ -759,6 +812,8 @@ static void print_stat(test_global_t *global) events_ave = events_sum / num_cpu; nsec_ave = nsec_sum / num_cpu; cycles_ave = cycles_sum / num_cpu; + waits_ave = waits_sum / num_cpu; + wait_cycles_ave = waits_ave * wait_cycles; num = 0;
printf("\n"); @@ -788,6 +843,10 @@ static void print_stat(test_global_t *global) cycles_ave / rounds_ave); printf(" cycles per event: %.3f\n", cycles_ave / events_ave); + if (wait_ns) { + printf(" without wait_ns cycles: %.3f\n", + (cycles_ave - wait_cycles_ave) / events_ave); + } printf(" ave events received: %.3f\n", events_ave / rounds_ave); printf(" rounds per sec: %.3f M\n",
commit 1a3f614126a1cf268f4829387642ab7111c63048 Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jul 4 09:52:09 2019 +0300
test: sched_perf: allocate queues and threads into groups
Implement queue and thread allocation into schedule groups. Spread all queues evenly into groups. Depending on the -j option, worker threads join either all groups (default) or only a portion of those.
By default, ODP_SCHED_GROUP_ALL is used as before.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Stanislaw Kardach skardach@marvell.com
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c index a65b064ef..54ee801d5 100644 --- a/test/performance/odp_sched_perf.c +++ b/test/performance/odp_sched_perf.c @@ -43,17 +43,23 @@ typedef struct test_stat_t {
} test_stat_t;
+typedef struct thread_arg_t { + void *global; + int first_group; + +} thread_arg_t; + typedef struct test_global_t { test_options_t test_options; - odp_schedule_config_t schedule_config; odp_barrier_t barrier; odp_pool_t pool; odp_cpumask_t cpumask; odp_queue_t queue[MAX_QUEUES]; odp_schedule_group_t group[MAX_GROUPS]; - odph_odpthread_t thread_tbl[ODP_THREAD_COUNT_MAX]; + odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX]; test_stat_t stat[ODP_THREAD_COUNT_MAX]; + thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
} test_global_t;
@@ -354,6 +360,7 @@ static int create_queues(test_global_t *global) uint32_t num_event = test_options->num_event; uint32_t queue_size = test_options->queue_size; uint32_t tot_queue = test_options->tot_queue; + uint32_t num_group = test_options->num_group; int type = test_options->queue_type; odp_pool_t pool = global->pool;
@@ -391,6 +398,14 @@ static int create_queues(test_global_t *global) queue_param.size = queue_size;
for (i = 0; i < tot_queue; i++) { + if (num_group) { + odp_schedule_group_t group; + + /* Divide all queues evenly into groups */ + group = global->group[i % num_group]; + queue_param.sched.group = group; + } + queue = odp_queue_create(NULL, &queue_param);
global->queue[i] = queue; @@ -440,6 +455,44 @@ static int create_queues(test_global_t *global) return 0; }
+static int join_group(test_global_t *global, int grp_index, int thr) +{ + odp_thrmask_t thrmask; + odp_schedule_group_t group; + + odp_thrmask_zero(&thrmask); + odp_thrmask_set(&thrmask, thr); + group = global->group[grp_index]; + + if (odp_schedule_group_join(group, &thrmask)) { + printf("Error: Group %i join failed (thr %i)\n", + grp_index, thr); + return -1; + } + + return 0; +} + +static int join_all_groups(test_global_t *global, int thr) +{ + uint32_t i; + test_options_t *test_options = &global->test_options; + uint32_t num_group = test_options->num_group; + + if (num_group == 0) + return 0; + + for (i = 0; i < num_group; i++) { + if (join_group(global, i, thr)) { + printf("Error: Group %u join failed (thr %i)\n", + i, thr); + return -1; + } + } + + return 0; +} + static int destroy_queues(test_global_t *global) { uint32_t i; @@ -447,6 +500,10 @@ static int destroy_queues(test_global_t *global) uint64_t wait; test_options_t *test_options = &global->test_options; uint32_t tot_queue = test_options->tot_queue; + int thr = odp_thread_id(); + + if (join_all_groups(global, thr)) + return -1;
wait = odp_schedule_wait_time(200 * ODP_TIME_MSEC_IN_NS);
@@ -495,15 +552,47 @@ static int test_sched(void *arg) odp_time_t t1, t2; odp_queue_t queue; odp_queue_t *next; - test_global_t *global = arg; + thread_arg_t *thread_arg = arg; + test_global_t *global = thread_arg->global; test_options_t *test_options = &global->test_options; uint32_t num_round = test_options->num_round; uint32_t max_burst = test_options->max_burst; + uint32_t num_group = test_options->num_group; int forward = test_options->forward; odp_event_t ev[max_burst];
thr = odp_thread_id();
+ if (num_group) { + uint32_t num_join = test_options->num_join; + + if (num_join) { + int pos = 0; + int n = 512; + char str[n]; + int group_index = thread_arg->first_group; + + pos += snprintf(&str[pos], n - pos, + "Thread %i joined groups:", thr); + + for (i = 0; i < num_join; i++) { + if (join_group(global, group_index, thr)) + return -1; + + pos += snprintf(&str[pos], n - pos, " %i", + group_index); + + group_index = (group_index + 1) % num_group; + } + + printf("%s\n", str); + + } else { + if (join_all_groups(global, thr)) + return -1; + } + } + for (i = 0; i < max_burst; i++) ev[i] = ODP_EVENT_INVALID;
@@ -595,19 +684,46 @@ static int test_sched(void *arg)
static int start_workers(test_global_t *global, odp_instance_t instance) { - odph_odpthread_params_t thr_params; + odph_thread_common_param_t thr_common; + int i, ret; test_options_t *test_options = &global->test_options; - int num_cpu = test_options->num_cpu; + uint32_t num_group = test_options->num_group; + uint32_t num_join = test_options->num_join; + int num_cpu = test_options->num_cpu; + odph_thread_param_t thr_param[num_cpu]; + + memset(global->thread_tbl, 0, sizeof(global->thread_tbl)); + memset(thr_param, 0, sizeof(thr_param)); + memset(&thr_common, 0, sizeof(thr_common)); + + thr_common.instance = instance; + thr_common.cpumask = &global->cpumask;
- memset(&thr_params, 0, sizeof(thr_params)); - thr_params.thr_type = ODP_THREAD_WORKER; - thr_params.instance = instance; - thr_params.start = test_sched; - thr_params.arg = global; + for (i = 0; i < num_cpu; i++) { + thr_param[i].start = test_sched; + thr_param[i].arg = &global->thread_arg[i]; + thr_param[i].thr_type = ODP_THREAD_WORKER;
- if (odph_odpthreads_create(global->thread_tbl, &global->cpumask, - &thr_params) != num_cpu) + global->thread_arg[i].global = global; + global->thread_arg[i].first_group = 0; + + if (num_group && num_join) { + /* Each thread joins only num_join groups, starting + * from this group index and wraping around the group + * table. */ + int first_group = (i * num_join) % num_group; + + global->thread_arg[i].first_group = first_group; + } + } + + ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param, + num_cpu); + + if (ret != num_cpu) { + printf("Error: thread create failed %i\n", ret); return -1; + }
return 0; } @@ -645,6 +761,7 @@ static void print_stat(test_global_t *global) cycles_ave = cycles_sum / num_cpu; num = 0;
+ printf("\n"); printf("RESULTS - per thread (Million events per sec):\n"); printf("----------------------------------------------\n"); printf(" 1 2 3 4 5 6 7 8 9 10"); @@ -734,7 +851,7 @@ int main(int argc, char **argv) start_workers(global, instance);
/* Wait workers to exit */ - odph_odpthreads_join(global->thread_tbl); + odph_thread_join(global->thread_tbl, global->test_options.num_cpu);
if (destroy_queues(global)) return -1;
commit a1845466e0410a891db5ebeeae0c3131f2c6ad64 Author: Petri Savolainen petri.savolainen@nokia.com Date: Wed Jul 3 15:51:54 2019 +0300
test: sched_perf: add schedule group options
Added -g and -j options to test scheduling with a number of schedule groups. This patch adds only option handling and group create/destroy. Queues and threads are not yet allocated into new groups.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Stanislaw Kardach skardach@marvell.com
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c index 3cc4630d5..a65b064ef 100644 --- a/test/performance/odp_sched_perf.c +++ b/test/performance/odp_sched_perf.c @@ -15,6 +15,7 @@ #include <odp/helper/odph_api.h>
#define MAX_QUEUES (256 * 1024) +#define MAX_GROUPS 256
typedef struct test_options_t { uint32_t num_cpu; @@ -22,6 +23,8 @@ typedef struct test_options_t { uint32_t num_dummy; uint32_t num_event; uint32_t num_round; + uint32_t num_group; + uint32_t num_join; uint32_t max_burst; int queue_type; int forward; @@ -48,6 +51,7 @@ typedef struct test_global_t { odp_pool_t pool; odp_cpumask_t cpumask; odp_queue_t queue[MAX_QUEUES]; + odp_schedule_group_t group[MAX_GROUPS]; odph_odpthread_t thread_tbl[ODP_THREAD_COUNT_MAX]; test_stat_t stat[ODP_THREAD_COUNT_MAX];
@@ -67,6 +71,11 @@ static void print_usage(void) " -d, --num_dummy Number of empty queues. Default: 0.\n" " -e, --num_event Number of events per queue. Default: 100.\n" " -r, --num_round Number of rounds\n" + " -g, --num_group Number of schedule groups. Round robins threads and queues into groups.\n" + " 0: SCHED_GROUP_ALL (default)\n" + " -j, --num_join Number of groups a thread joins. Threads are divide evenly into groups,\n" + " if num_cpu is multiple of num_group and num_group is multiple of num_join.\n" + " 0: join all groups (default)\n" " -b, --burst Maximum number of events per operation. Default: 100.\n" " -t, --type Queue type. 0: parallel, 1: atomic, 2: ordered. Default: 0.\n" " -f, --forward 0: Keep event in the original queue, 1: Forward event to the next queue. Default: 0.\n" @@ -79,6 +88,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) int opt; int long_index; int ret = 0; + uint32_t num_group, num_join;
static const struct option longopts[] = { {"num_cpu", required_argument, NULL, 'c'}, @@ -86,6 +96,8 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {"num_dummy", required_argument, NULL, 'd'}, {"num_event", required_argument, NULL, 'e'}, {"num_round", required_argument, NULL, 'r'}, + {"num_group", required_argument, NULL, 'g'}, + {"num_join", required_argument, NULL, 'j'}, {"burst", required_argument, NULL, 'b'}, {"type", required_argument, NULL, 't'}, {"forward", required_argument, NULL, 'f'}, @@ -93,13 +105,15 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {NULL, 0, NULL, 0} };
- static const char *shortopts = "+c:q:d:e:r:b:t:f:h"; + static const char *shortopts = "+c:q:d:e:r:g:j:b:t:f:h";
test_options->num_cpu = 1; test_options->num_queue = 1; test_options->num_dummy = 0; test_options->num_event = 100; test_options->num_round = 100000; + test_options->num_group = 0; + test_options->num_join = 0; test_options->max_burst = 100; test_options->queue_type = 0; test_options->forward = 0; @@ -126,6 +140,12 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) case 'r': test_options->num_round = atoi(optarg); break; + case 'g': + test_options->num_group = atoi(optarg); + break; + case 'j': + test_options->num_join = atoi(optarg); + break; case 'b': test_options->max_burst = atoi(optarg); break; @@ -145,11 +165,36 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) }
if ((test_options->num_queue + test_options->num_dummy) > MAX_QUEUES) { - printf("Error: Too many queues. Max supported %i\n.", + printf("Error: Too many queues. Max supported %i.\n", MAX_QUEUES); ret = -1; }
+ num_group = test_options->num_group; + num_join = test_options->num_join; + if (num_group > MAX_GROUPS) { + printf("Error: Too many groups. Max supported %i.\n", + MAX_GROUPS); + ret = -1; + } + + if (num_join > num_group) { + printf("Error: num_join (%u) larger than num_group (%u).\n", + num_join, num_group); + ret = -1; + } + + if (num_join && num_group > (test_options->num_cpu * num_join)) { + printf("WARNING: Too many groups (%u). Some groups (%u) are not served.\n\n", + num_group, + num_group - (test_options->num_cpu * num_join)); + + if (test_options->forward) { + printf("Error: Cannot forward when some queues are not served.\n"); + ret = -1; + } + } + test_options->tot_queue = test_options->num_queue + test_options->num_dummy; test_options->tot_event = test_options->num_queue * @@ -211,6 +256,8 @@ static int create_pool(test_global_t *global) uint32_t tot_queue = test_options->tot_queue; uint32_t tot_event = test_options->tot_event; uint32_t queue_size = test_options->queue_size; + uint32_t num_group = test_options->num_group; + uint32_t num_join = test_options->num_join; int forward = test_options->forward;
printf("\nScheduler performance test\n"); @@ -218,6 +265,8 @@ static int create_pool(test_global_t *global) printf(" num queues %u\n", num_queue); printf(" num empty queues %u\n", num_dummy); printf(" total queues %u\n", tot_queue); + printf(" num groups %u\n", num_group); + printf(" num join %u\n", num_join); printf(" events per queue %u\n", num_event); printf(" queue size %u\n", queue_size); printf(" max burst size %u\n", max_burst); @@ -253,6 +302,46 @@ static int create_pool(test_global_t *global) return 0; }
+static int create_groups(test_global_t *global) +{ + odp_schedule_capability_t sched_capa; + odp_thrmask_t thrmask; + uint32_t i; + test_options_t *test_options = &global->test_options; + uint32_t num_group = test_options->num_group; + + if (num_group == 0) + return 0; + + if (odp_schedule_capability(&sched_capa)) { + printf("Error: schedule capability failed\n"); + return -1; + } + + if (num_group > sched_capa.max_groups) { + printf("Error: Too many sched groups (max_groups capa %u)\n", + sched_capa.max_groups); + return -1; + } + + odp_thrmask_zero(&thrmask); + + for (i = 0; i < num_group; i++) { + odp_schedule_group_t group; + + group = odp_schedule_group_create("test_group", &thrmask); + + if (group == ODP_SCHED_GROUP_INVALID) { + printf("Error: Group create failed %u\n", i); + return -1; + } + + global->group[i] = group; + } + + return 0; +} + static int create_queues(test_global_t *global) { odp_queue_param_t queue_param; @@ -376,6 +465,27 @@ static int destroy_queues(test_global_t *global) return 0; }
+static int destroy_groups(test_global_t *global) +{ + uint32_t i; + test_options_t *test_options = &global->test_options; + uint32_t num_group = test_options->num_group; + + if (num_group == 0) + return 0; + + for (i = 0; i < num_group; i++) { + odp_schedule_group_t group = global->group[i]; + + if (odp_schedule_group_destroy(group)) { + printf("Error: Group destroy failed %u\n", i); + return -1; + } + } + + return 0; +} + static int test_sched(void *arg) { int num, num_enq, ret, thr; @@ -614,6 +724,9 @@ int main(int argc, char **argv) if (create_pool(global)) return -1;
+ if (create_groups(global)) + return -1; + if (create_queues(global)) return -1;
@@ -626,6 +739,9 @@ int main(int argc, char **argv) if (destroy_queues(global)) return -1;
+ if (destroy_groups(global)) + return -1; + print_stat(global);
if (odp_pool_destroy(global->pool)) {
commit 92f040d4669c5e3df5b8f3986e1667f00237c9d1 Author: Petri Savolainen petri.savolainen@nokia.com Date: Wed Jul 3 15:02:13 2019 +0300
test: sched_perf: allocate dummy queues first
When firstly created queues are dummy queues, usage of those shift active queues (IDs of active queues) which may have impact to scheduler performance.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Stanislaw Kardach skardach@marvell.com
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c index 65cf63bb9..3cc4630d5 100644 --- a/test/performance/odp_sched_perf.c +++ b/test/performance/odp_sched_perf.c @@ -260,11 +260,10 @@ static int create_queues(test_global_t *global) odp_buffer_t buf; odp_schedule_sync_t sync; const char *type_str; - uint32_t i, j; + uint32_t i, j, first; test_options_t *test_options = &global->test_options; uint32_t num_event = test_options->num_event; uint32_t queue_size = test_options->queue_size; - uint32_t num_queue = test_options->num_queue; uint32_t tot_queue = test_options->tot_queue; int type = test_options->queue_type; odp_pool_t pool = global->pool; @@ -313,15 +312,19 @@ static int create_queues(test_global_t *global) } }
- /* Store events into queues. Dummy queues are left empty. */ - for (i = 0; i < num_queue; i++) { + first = test_options->num_dummy; + + /* Store events into queues. Dummy queues are allocated from + * the beginning of the array, so that usage of those affect allocation + * of active queues. Dummy queues are left empty. */ + for (i = first; i < tot_queue; i++) { queue = global->queue[i];
if (test_options->forward) { uint32_t next = i + 1;
- if (next == num_queue) - next = 0; + if (next == tot_queue) + next = first;
if (odp_queue_context_set(queue, &global->queue[next], sizeof(odp_queue_t))) {
-----------------------------------------------------------------------
Summary of changes: test/performance/odp_sched_perf.c | 339 +++++++++++++++++++++++++++++++++++--- 1 file changed, 317 insertions(+), 22 deletions(-)
hooks/post-receive