This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via f1e216da9011444789da716bbc258e530897da6d (commit) via f7caa49d2ba750d1fd29a12e288b441a6dac545f (commit) via f678476829f61a34d5d1e2b69b92c157786162e5 (commit) via c37655b8b74aa50825be8d58afa18d91b4a9a1fd (commit) via 642f27acf0c39ade24d6ea6551dde362206f4d4f (commit) via d2a9548017edaceec8cc682183690592c4c483b2 (commit) via 68e1cd81665c3b8deede1974d653593af85ec183 (commit) from b921639a9df6a0300ca1c50b5853a37241806366 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit f1e216da9011444789da716bbc258e530897da6d Author: Matias Elo matias.elo@nokia.com Date: Fri Nov 1 12:00:41 2019 +0200
validation: scheduler: add scheduled and plain queue tests
Add new validation tests to verify that packet order is maintained when events are enqueued and dequeued to/from a plain queue while maintaining atomic/ordered scheduling context.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Stanislaw Kardach skardach@marvell.com Tested-by: Jussi Kerttula jussi.kerttula@nokia.com
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c index da87deaef..ca92a3cf8 100644 --- a/test/validation/api/scheduler/scheduler.c +++ b/test/validation/api/scheduler/scheduler.c @@ -55,6 +55,8 @@ #define WAIT_TOLERANCE (150 * ODP_TIME_MSEC_IN_NS) #define WAIT_1MS_RETRIES 1000
+#define SCHED_AND_PLAIN_ROUNDS 10000 + /* Test global variables */ typedef struct { int num_workers; @@ -72,6 +74,10 @@ typedef struct { odp_queue_t handle; char name[ODP_QUEUE_NAME_LEN]; } chaos_q[CHAOS_NUM_QUEUES]; + struct { + odp_queue_t sched; + odp_queue_t plain; + } sched_and_plain_q; } test_globals_t;
typedef struct { @@ -1780,6 +1786,229 @@ static void scheduler_test_ordered_lock(void) CU_ASSERT(drain_queues() == 0); }
+static int sched_and_plain_thread(void *arg) +{ + odp_event_t ev1, ev2; + thread_args_t *args = (thread_args_t *)arg; + test_globals_t *globals = args->globals; + odp_queue_t sched_queue = globals->sched_and_plain_q.sched; + odp_queue_t plain_queue = globals->sched_and_plain_q.plain; + odp_schedule_sync_t sync = odp_queue_sched_type(sched_queue); + uint64_t i, wait; + + /* Wait for all threads to start */ + odp_barrier_wait(&globals->barrier); + + /* Run the test */ + wait = odp_schedule_wait_time(10 * ODP_TIME_MSEC_IN_NS); + for (i = 0; i < SCHED_AND_PLAIN_ROUNDS; i++) { + uint32_t rand_val; + + /* Dequeue events from scheduled and plain queues */ + ev1 = odp_schedule(NULL, wait); + if (ev1 == ODP_EVENT_INVALID) + continue; + + if (sync == ODP_SCHED_SYNC_ORDERED) + odp_schedule_order_lock(0); + + ev2 = odp_queue_deq(plain_queue); + CU_ASSERT_FATAL(ev2 != ODP_EVENT_INVALID); + + /* Add random delay to stress scheduler implementation */ + odp_random_data((uint8_t *)&rand_val, sizeof(rand_val), + ODP_RANDOM_BASIC); + odp_time_wait_ns(rand_val % ODP_TIME_USEC_IN_NS); + + /* Enqueue events back to the end of the queues */ + CU_ASSERT_FATAL(!odp_queue_enq(plain_queue, ev2)); + + if (sync == ODP_SCHED_SYNC_ORDERED) + odp_schedule_order_unlock(0); + + CU_ASSERT_FATAL(!odp_queue_enq(sched_queue, ev1)); + } + + /* Make sure scheduling context is released */ + odp_schedule_pause(); + while ((ev1 = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID) + CU_ASSERT_FATAL(!odp_queue_enq(sched_queue, ev1)); + + /* Don't resume scheduling until all threads have finished */ + odp_barrier_wait(&globals->barrier); + odp_schedule_resume(); + + return 0; +} + +static void scheduler_test_sched_and_plain(odp_schedule_sync_t sync) +{ + thread_args_t *args; + test_globals_t *globals; + odp_queue_t sched_queue; + odp_queue_t plain_queue; + odp_pool_t pool; + odp_queue_param_t queue_param; + odp_pool_param_t pool_param; + odp_queue_capability_t queue_capa; + odp_schedule_capability_t sched_capa; + odp_shm_t shm; + odp_event_t ev; + uint32_t *buf_data; + uint32_t seq; + uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS); + uint32_t events_per_queue = BUFS_PER_QUEUE / 2; + uint32_t prev_seq; + int first; + + CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa)); + CU_ASSERT_FATAL(!odp_queue_capability(&queue_capa)) + + if (sync == ODP_SCHED_SYNC_ORDERED && + sched_capa.max_ordered_locks == 0) { + printf("\n NO ORDERED LOCKS. scheduler_test_ordered_and_plain skipped.\n"); + return; + } + + /* Set up the scheduling environment */ + shm = odp_shm_lookup(GLOBALS_SHM_NAME); + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + globals = odp_shm_addr(shm); + CU_ASSERT_PTR_NOT_NULL_FATAL(globals); + + shm = odp_shm_lookup(SHM_THR_ARGS_NAME); + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + args = odp_shm_addr(shm); + CU_ASSERT_PTR_NOT_NULL_FATAL(args); + args->globals = globals; + + /* Make sure all events fit to queues */ + if (sched_capa.max_queue_size && + sched_capa.max_queue_size < events_per_queue) + events_per_queue = sched_capa.max_queue_size; + if (queue_capa.plain.max_size && + queue_capa.plain.max_size < events_per_queue) + events_per_queue = queue_capa.plain.max_size; + + odp_queue_param_init(&queue_param); + queue_param.type = ODP_QUEUE_TYPE_SCHED; + queue_param.sched.sync = sync; + queue_param.size = events_per_queue; + if (sync == ODP_SCHED_SYNC_ORDERED) + queue_param.sched.lock_count = 1; + + sched_queue = odp_queue_create(NULL, &queue_param); + CU_ASSERT_FATAL(sched_queue != ODP_QUEUE_INVALID); + globals->sched_and_plain_q.sched = sched_queue; + + odp_queue_param_init(&queue_param); + queue_param.type = ODP_QUEUE_TYPE_PLAIN; + queue_param.size = events_per_queue; + + plain_queue = odp_queue_create(NULL, &queue_param); + CU_ASSERT_FATAL(sched_queue != ODP_QUEUE_INVALID); + globals->sched_and_plain_q.plain = plain_queue; + + odp_pool_param_init(&pool_param); + pool_param.buf.size = 100; + pool_param.buf.num = 2 * events_per_queue; + pool_param.type = ODP_POOL_BUFFER; + + pool = odp_pool_create("sched_to_plain_pool", &pool_param); + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + /* Create and enq test events with sequential sequence numbers */ + for (seq = 0; seq < events_per_queue; seq++) { + odp_buffer_t buf1, buf2; + + buf1 = odp_buffer_alloc(pool); + if (buf1 == ODP_BUFFER_INVALID) + break; + buf2 = odp_buffer_alloc(pool); + if (buf2 == ODP_BUFFER_INVALID) { + odp_buffer_free(buf1); + break; + } + buf_data = odp_buffer_addr(buf1); + *buf_data = seq; + buf_data = odp_buffer_addr(buf2); + *buf_data = seq; + + /* Events flow id is 0 by default */ + CU_ASSERT_FATAL(!odp_queue_enq(sched_queue, + odp_buffer_to_event(buf1))); + CU_ASSERT_FATAL(!odp_queue_enq(plain_queue, + odp_buffer_to_event(buf2))); + } + CU_ASSERT_FATAL(seq > 2); + + /* Test runs also on the main thread */ + args->cu_thr.numthrds = globals->num_workers - 1; + if (args->cu_thr.numthrds > 0) + odp_cunit_thread_create(sched_and_plain_thread, &args->cu_thr); + + sched_and_plain_thread(args); + + if (args->cu_thr.numthrds > 0) + odp_cunit_thread_exit(&args->cu_thr); + + /* Check plain queue sequence numbers and free events */ + first = 1; + while (1) { + ev = odp_queue_deq(plain_queue); + if (ev == ODP_EVENT_INVALID) + break; + + buf_data = odp_buffer_addr(odp_buffer_from_event(ev)); + seq = *buf_data; + + if (first) { + first = 0; + prev_seq = seq; + continue; + } + + CU_ASSERT(seq == prev_seq + 1 || seq == 0) + prev_seq = seq; + odp_event_free(ev); + } + + /* Check scheduled queue sequence numbers and free events */ + first = 1; + while (1) { + ev = odp_schedule(NULL, wait); + if (ev == ODP_EVENT_INVALID) + break; + + buf_data = odp_buffer_addr(odp_buffer_from_event(ev)); + seq = *buf_data; + + if (first) { + first = 0; + prev_seq = seq; + continue; + } + + CU_ASSERT(seq == prev_seq + 1 || seq == 0) + prev_seq = seq; + odp_event_free(ev); + } + + CU_ASSERT(!odp_queue_destroy(sched_queue)); + CU_ASSERT(!odp_queue_destroy(plain_queue)); + CU_ASSERT(!odp_pool_destroy(pool)); +} + +static void scheduler_test_atomic_and_plain(void) +{ + scheduler_test_sched_and_plain(ODP_SCHED_SYNC_ATOMIC); +} + +static void scheduler_test_ordered_and_plain(void) +{ + scheduler_test_sched_and_plain(ODP_SCHED_SYNC_ORDERED); +} + static int create_queues(test_globals_t *globals) { int i, j, prios, rc; @@ -2278,6 +2507,8 @@ odp_testinfo_t scheduler_suite[] = { ODP_TEST_INFO(scheduler_test_parallel), ODP_TEST_INFO(scheduler_test_atomic), ODP_TEST_INFO(scheduler_test_ordered), + ODP_TEST_INFO(scheduler_test_atomic_and_plain), + ODP_TEST_INFO(scheduler_test_ordered_and_plain), ODP_TEST_INFO(scheduler_test_chaos), ODP_TEST_INFO(scheduler_test_1q_1t_n), ODP_TEST_INFO(scheduler_test_1q_1t_a),
commit f7caa49d2ba750d1fd29a12e288b441a6dac545f Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Oct 31 16:42:19 2019 +0200
test: timer_perf: add private timer pool support
Added -s option to select between shared and private timer pools. Currently, private timer pools can be tested only with single CPU (the master thread runs the test).
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Matias Elo matias.elo@nokia.com
diff --git a/test/performance/odp_timer_perf.c b/test/performance/odp_timer_perf.c index d0f936a8e..c49baf0bd 100644 --- a/test/performance/odp_timer_perf.c +++ b/test/performance/odp_timer_perf.c @@ -25,6 +25,7 @@ typedef struct test_options_t { uint32_t num_timer; uint64_t res_ns; uint64_t period_ns; + int shared;
} test_options_t;
@@ -87,6 +88,10 @@ static void print_usage(void) " -t, --num_timer Number of timers per timer pool. Default: 10\n" " -r, --res_ns Resolution in nsec. Default: 10000000\n" " -p, --period_ns Timeout period in nsec. Default: 100000000\n" + " -s, --shared Shared vs private timer pool. Currently, private pools can be\n" + " tested only with single CPU. Default: 1\n" + " 0: Private timer pools\n" + " 1: Shared timer pools\n" " -h, --help This help\n" "\n"); } @@ -103,17 +108,19 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {"num_timer", required_argument, NULL, 't'}, {"res_ns", required_argument, NULL, 'r'}, {"period_ns", required_argument, NULL, 'p'}, + {"shared", required_argument, NULL, 's'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0} };
- static const char *shortopts = "+c:n:t:r:p:h"; + static const char *shortopts = "+c:n:t:r:p:s:h";
test_options->num_cpu = 1; test_options->num_tp = 1; test_options->num_timer = 10; test_options->res_ns = 10 * ODP_TIME_MSEC_IN_NS; test_options->period_ns = 100 * ODP_TIME_MSEC_IN_NS; + test_options->shared = 1;
while (1) { opt = getopt_long(argc, argv, shortopts, longopts, &long_index); @@ -137,6 +144,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) case 'p': test_options->period_ns = atoll(optarg); break; + case 's': + test_options->shared = atoi(optarg); + break; case 'h': /* fall through */ default: @@ -159,6 +169,7 @@ static int set_num_cpu(test_global_t *global) int ret; test_options_t *test_options = &global->test_options; int num_cpu = test_options->num_cpu; + int shared = test_options->shared;
/* One thread used for the main thread */ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) { @@ -174,14 +185,21 @@ static int set_num_cpu(test_global_t *global) return -1; }
+ if (shared == 0 && num_cpu != 1) { + printf("Error: Private pool test supports only single CPU\n."); + return -1; + } + /* Zero: all available workers */ if (num_cpu == 0) { num_cpu = ret; test_options->num_cpu = num_cpu; }
- /* Main thread + all workers */ - odp_barrier_init(&global->barrier, num_cpu + 1); + if (shared) /* Main thread + all workers */ + odp_barrier_init(&global->barrier, num_cpu + 1); + else /* Only the main thread */ + odp_barrier_init(&global->barrier, 1);
return 0; } @@ -204,11 +222,17 @@ static int create_timer_pools(test_global_t *global) uint32_t num_timer = test_options->num_timer; uint64_t res_ns = test_options->res_ns; uint64_t period_ns = test_options->period_ns; + int priv;
max_tmo_ns = START_NS + (num_timer * period_ns);
+ priv = 0; + if (test_options->shared == 0) + priv = 1; + printf("\nTimer performance test\n"); printf(" num cpu %u\n", num_cpu); + printf(" private pool %i\n", priv); printf(" num timer pool %u\n", num_tp); printf(" num timer %u\n", num_timer); printf(" resolution %" PRIu64 " nsec\n", res_ns); @@ -266,7 +290,7 @@ static int create_timer_pools(test_global_t *global) timer_pool_param.min_tmo = START_NS; timer_pool_param.max_tmo = max_tmo_ns; timer_pool_param.num_timers = num_timer; - timer_pool_param.priv = 0; + timer_pool_param.priv = priv; timer_pool_param.clk_src = ODP_CLOCK_CPU;
odp_pool_param_init(&pool_param); @@ -540,8 +564,6 @@ static int start_workers(test_global_t *global, odp_instance_t instance) thr_param[i].start = test_worker; thr_param[i].arg = &global->thread_arg[i]; thr_param[i].thr_type = ODP_THREAD_WORKER; - - global->thread_arg[i].global = global; }
ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param, @@ -644,16 +666,24 @@ int main(int argc, char **argv) odp_instance_t instance; odp_init_t init; test_global_t *global; + test_options_t *test_options; + int i, shared;
global = &test_global; memset(global, 0, sizeof(test_global_t)); odp_atomic_init_u32(&global->exit_test, 0);
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) + global->thread_arg[i].global = global; + signal(SIGINT, sig_handler);
if (parse_options(argc, argv, &global->test_options)) return -1;
+ test_options = &global->test_options; + shared = test_options->shared; + /* List features not to be used */ odp_init_param_init(&init); init.not_used.feat.cls = 1; @@ -681,22 +711,33 @@ int main(int argc, char **argv) if (create_timer_pools(global)) return -1;
- /* Start worker threads */ - start_workers(global, instance); + if (shared) { + /* Start worker threads */ + start_workers(global, instance);
- /* Wait until workers have started. - * Scheduler calls from workers may be needed to run timer pools in - * a software implementation. Wait 1 msec to ensure that timer pools - * are running before setting timers. */ - odp_barrier_wait(&global->barrier); - odp_time_wait_ns(ODP_TIME_MSEC_IN_NS); + /* Wait until workers have started. + * Scheduler calls from workers may be needed to run timer + * pools in a software implementation. Wait 1 msec to ensure + * that timer pools are running before setting timers. */ + odp_barrier_wait(&global->barrier); + odp_time_wait_ns(ODP_TIME_MSEC_IN_NS); + }
/* Set timers. Force workers to exit on failure. */ if (set_timers(global)) odp_atomic_add_u32(&global->exit_test, MAX_TIMER_POOLS);
- /* Wait workers to exit */ - odph_thread_join(global->thread_tbl, global->test_options.num_cpu); + if (!shared) { + /* Test private pools on the master thread */ + if (test_worker(&global->thread_arg[0])) { + printf("Error: test loop failed\n"); + return -1; + } + } else { + /* Wait workers to exit */ + odph_thread_join(global->thread_tbl, + global->test_options.num_cpu); + }
print_stat(global);
commit f678476829f61a34d5d1e2b69b92c157786162e5 Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Oct 31 10:27:02 2019 +0200
validation: timer: add private timer pool test
Create timer pool with the 'priv' flag set. The same thread calls schedule / queue_deq that created the pool.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Matias Elo matias.elo@nokia.com
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c index 376a1218b..c9a70205e 100644 --- a/test/validation/api/timer/timer.c +++ b/test/validation/api/timer/timer.c @@ -536,7 +536,7 @@ static void timer_test_pkt_event_sched(void) timer_test_event_type(ODP_QUEUE_TYPE_SCHED, ODP_EVENT_PACKET); }
-static void timer_test_queue_type(odp_queue_type_t queue_type) +static void timer_test_queue_type(odp_queue_type_t queue_type, int priv) { odp_pool_t pool; const int num = 10; @@ -567,7 +567,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type) tparam.min_tmo = global_mem->param.min_tmo; tparam.max_tmo = global_mem->param.max_tmo; tparam.num_timers = num + 1; - tparam.priv = 0; + tparam.priv = priv; tparam.clk_src = ODP_CLOCK_CPU;
ODPH_DBG("\nTimer pool parameters:\n"); @@ -665,8 +665,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type) CU_ASSERT(diff_test > (test_period - period_ns)); CU_ASSERT(diff_test < (test_period + period_ns));
- /* Scalable scheduler needs this pause sequence. Otherwise, it gets - * stuck on terminate. */ + /* Reset scheduler context for the next test case */ if (queue_type == ODP_QUEUE_TYPE_SCHED) { odp_schedule_pause(); while (1) { @@ -677,6 +676,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type) CU_FAIL("Drop extra event\n"); odp_event_free(ev); } + odp_schedule_resume(); }
odp_timer_pool_destroy(tp); @@ -687,12 +687,22 @@ static void timer_test_queue_type(odp_queue_type_t queue_type)
static void timer_test_plain_queue(void) { - timer_test_queue_type(ODP_QUEUE_TYPE_PLAIN); + timer_test_queue_type(ODP_QUEUE_TYPE_PLAIN, 0); }
static void timer_test_sched_queue(void) { - timer_test_queue_type(ODP_QUEUE_TYPE_SCHED); + timer_test_queue_type(ODP_QUEUE_TYPE_SCHED, 0); +} + +static void timer_test_plain_queue_priv(void) +{ + timer_test_queue_type(ODP_QUEUE_TYPE_PLAIN, 1); +} + +static void timer_test_sched_queue_priv(void) +{ + timer_test_queue_type(ODP_QUEUE_TYPE_SCHED, 1); }
static void timer_test_cancel(void) @@ -1451,6 +1461,8 @@ odp_testinfo_t timer_suite[] = { ODP_TEST_INFO(timer_test_max_tmo_max_tmo_sched), ODP_TEST_INFO(timer_test_plain_queue), ODP_TEST_INFO(timer_test_sched_queue), + ODP_TEST_INFO(timer_test_plain_queue_priv), + ODP_TEST_INFO(timer_test_sched_queue_priv), ODP_TEST_INFO(timer_test_all), ODP_TEST_INFO_NULL, };
commit c37655b8b74aa50825be8d58afa18d91b4a9a1fd Author: Petri Savolainen petri.savolainen@nokia.com Date: Wed Oct 30 16:08:34 2019 +0200
linux-gen: timer: use thread type config option
Use configure file option to select if control threads poll shared timer pools.
Timer processing of control and worker threads may be separated completely, if control threads create private timer pools and do not process shared pools. This way, worker threads will not poll control thread pools and vice versa, but both can use timers.
Implemented private timer pools with minimal effort. Implementation of private and shared pools could be separated in the future (for better performance), as private pools do not need data synchronization between other threads.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Matias Elo matias.elo@nokia.com
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index e82d29059..3a82ee84b 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -127,6 +127,7 @@ typedef struct timer_pool_s { odp_shm_t shm; timer_t timerid; int notify_overrun; + int owner; pthread_t thr_pthread; /* pthread_t of timer thread */ pid_t thr_pid; /* gettid() for timer thread */ int thr_warm_up; /* number of warm up rounds */ @@ -167,6 +168,7 @@ typedef struct timer_global_t { typedef struct timer_local_t { odp_time_t last_run; int run_cnt; + uint8_t poll_shared;
} timer_local_t;
@@ -353,6 +355,11 @@ static odp_timer_pool_t timer_pool_new(const char *name, odp_atomic_init_u32(&tp->high_wm, 0); tp->first_free = 0; tp->notify_overrun = 1; + tp->owner = -1; + + if (param->priv) + tp->owner = odp_thread_id(); + tp->tick_buf = (void *)((char *)odp_shm_addr(shm) + sz0); tp->timers = (void *)((char *)odp_shm_addr(shm) + sz0 + sz1);
@@ -922,6 +929,17 @@ static inline void timer_pool_scan_inline(int num, odp_time_t now) if (tp == NULL) continue;
+ if (odp_likely(tp->owner < 0)) { + /* Skip shared pool, if this thread is not configured + * to process those */ + if (odp_unlikely(timer_local.poll_shared == 0)) + continue; + } else { + /* Skip private pool, if this thread is not the owner */ + if (tp->owner != odp_thread_id()) + continue; + } + nsec = time_nsec(tp, now); new_tick = nsec / tp->nsec_per_scan; old_tick = odp_atomic_load_u64(&tp->cur_tick); @@ -1557,8 +1575,27 @@ int _odp_timer_term_global(void)
int _odp_timer_init_local(void) { + int conf_thr_type; + odp_thread_type_t thr_type; + timer_local.last_run = odp_time_global_from_ns(0); timer_local.run_cnt = 1; + timer_local.poll_shared = 0; + + /* Timer feature disabled */ + if (timer_global == NULL) + return 0; + + /* Check if this thread polls shared (non-private) timer pools */ + conf_thr_type = timer_global->thread_type; + thr_type = odp_thread_type(); + + if (conf_thr_type == 0) + timer_local.poll_shared = 1; + else if (conf_thr_type == 1 && thr_type == ODP_THREAD_WORKER) + timer_local.poll_shared = 1; + else if (conf_thr_type == 2 && thr_type == ODP_THREAD_CONTROL) + timer_local.poll_shared = 1;
return 0; }
commit 642f27acf0c39ade24d6ea6551dde362206f4d4f Author: Petri Savolainen petri.savolainen@nokia.com Date: Wed Oct 30 13:57:23 2019 +0200
linux-gen: timer: move thread local variables into struct
Moved type definition into beginning of the file and gathered thread local variables into timer_local_t struct.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Matias Elo matias.elo@nokia.com
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 4af31bcc1..e82d29059 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -66,35 +66,12 @@ /* Max inline timer resolution */ #define MAX_INLINE_RES_NS 500
-/****************************************************************************** - * Mutual exclusion in the absence of CAS16 - *****************************************************************************/ - +/* Mutual exclusion in the absence of CAS16 */ #ifndef ODP_ATOMIC_U128 #define NUM_LOCKS 1024 #define IDX2LOCK(idx) (&timer_global->locks[(idx) % NUM_LOCKS]) #endif
-/****************************************************************************** - * Translation between timeout buffer and timeout header - *****************************************************************************/ - -static odp_timeout_hdr_t *timeout_hdr_from_buf(odp_buffer_t buf) -{ - return (odp_timeout_hdr_t *)(void *)buf_hdl_to_hdr(buf); -} - -static odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo) -{ - odp_buffer_t buf = odp_buffer_from_event(odp_timeout_to_event(tmo)); - - return timeout_hdr_from_buf(buf); -} - -/****************************************************************************** - * odp_timer abstract datatype - *****************************************************************************/ - typedef struct #ifdef ODP_ATOMIC_U128 ODP_ALIGNED(16) /* 16-byte atomic operations need properly aligned addresses */ @@ -128,55 +105,11 @@ ODP_STATIC_ASSERT(sizeof(tick_buf_t) == 16, "sizeof(tick_buf_t) == 16"); typedef struct { void *user_ptr; odp_queue_t queue;/* Used for free list when timer is free */ -} _odp_timer_t; - -static void timer_init(_odp_timer_t *tim, - tick_buf_t *tb, - odp_queue_t _q, - void *_up) -{ - tim->queue = _q; - tim->user_ptr = _up; - tb->tmo_u64 = 0; - tb->tmo_buf = ODP_BUFFER_INVALID;
- /* Release the timer by setting timer state to inactive */ -#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2 - tb->exp_tck.v = TMO_INACTIVE; -#else - _odp_atomic_u64_store_mm(&tb->exp_tck, TMO_INACTIVE, _ODP_MEMMODEL_RLS); -#endif -} - -/* Teardown when timer is freed */ -static void timer_fini(_odp_timer_t *tim, tick_buf_t *tb) -{ - ODP_ASSERT(tb->exp_tck.v == TMO_UNUSED); - ODP_ASSERT(tb->tmo_buf == ODP_BUFFER_INVALID); - tim->queue = ODP_QUEUE_INVALID; - tim->user_ptr = NULL; -} - -static inline uint32_t get_next_free(_odp_timer_t *tim) -{ - /* Reusing 'queue' for next free index */ - return _odp_typeval(tim->queue); -} - -static inline void set_next_free(_odp_timer_t *tim, uint32_t nf) -{ - ODP_ASSERT(tim->queue == ODP_QUEUE_INVALID); - /* Reusing 'queue' for next free index */ - tim->queue = _odp_cast_scalar(odp_queue_t, nf); -} - -/****************************************************************************** - * timer_pool_t abstract datatype - * Inludes alloc and free timer - *****************************************************************************/ +} _odp_timer_t;
typedef struct timer_pool_s { -/* Put frequently accessed fields in the first cache line */ + /* Put frequently accessed fields in the first cache line */ uint64_t nsec_per_scan; odp_time_t start_time; odp_atomic_u64_t cur_tick;/* Current tick value */ @@ -199,6 +132,7 @@ typedef struct timer_pool_s { int thr_warm_up; /* number of warm up rounds */ odp_atomic_u32_t thr_ready; /* thread ready from warm up */ int thr_exit; /* request to exit for timer thread */ + } timer_pool_t;
/* Timer pool index must fit into 8 bits with one index value reserved to @@ -230,8 +164,62 @@ typedef struct timer_global_t {
} timer_global_t;
+typedef struct timer_local_t { + odp_time_t last_run; + int run_cnt; + +} timer_local_t; + +/* Points to timer global data */ static timer_global_t *timer_global;
+/* Timer thread local data */ +static __thread timer_local_t timer_local; + +/* Forward declarations */ +static void itimer_init(timer_pool_t *tp); +static void itimer_fini(timer_pool_t *tp); + +static void timer_init(_odp_timer_t *tim, + tick_buf_t *tb, + odp_queue_t _q, + void *_up) +{ + tim->queue = _q; + tim->user_ptr = _up; + tb->tmo_u64 = 0; + tb->tmo_buf = ODP_BUFFER_INVALID; + + /* Release the timer by setting timer state to inactive */ +#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2 + tb->exp_tck.v = TMO_INACTIVE; +#else + _odp_atomic_u64_store_mm(&tb->exp_tck, TMO_INACTIVE, _ODP_MEMMODEL_RLS); +#endif +} + +/* Teardown when timer is freed */ +static void timer_fini(_odp_timer_t *tim, tick_buf_t *tb) +{ + ODP_ASSERT(tb->exp_tck.v == TMO_UNUSED); + ODP_ASSERT(tb->tmo_buf == ODP_BUFFER_INVALID); + tim->queue = ODP_QUEUE_INVALID; + tim->user_ptr = NULL; +} + +static inline uint32_t get_next_free(_odp_timer_t *tim) +{ + /* Reusing 'queue' for next free index */ + return _odp_typeval(tim->queue); +} + +static inline void set_next_free(_odp_timer_t *tim, uint32_t nf) +{ + ODP_ASSERT(tim->queue == ODP_QUEUE_INVALID); + /* Reusing 'queue' for next free index */ + tim->queue = _odp_cast_scalar(odp_queue_t, nf); +} + static inline timer_pool_t *timer_pool_from_hdl(odp_timer_pool_t hdl) { return (timer_pool_t *)(uintptr_t)hdl; @@ -274,9 +262,17 @@ static inline odp_timer_t tp_idx_to_handle(timer_pool_t *tp, (idx + 1)); }
-/* Forward declarations */ -static void itimer_init(timer_pool_t *tp); -static void itimer_fini(timer_pool_t *tp); +static odp_timeout_hdr_t *timeout_hdr_from_buf(odp_buffer_t buf) +{ + return (odp_timeout_hdr_t *)(void *)buf_hdl_to_hdr(buf); +} + +static odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo) +{ + odp_buffer_t buf = odp_buffer_from_event(odp_timeout_to_event(tmo)); + + return timeout_hdr_from_buf(buf); +}
static odp_timer_pool_t timer_pool_new(const char *name, const odp_timer_pool_param_t *param) @@ -952,8 +948,6 @@ static inline void timer_pool_scan_inline(int num, odp_time_t now)
void _timer_run_inline(int dec) { - static __thread odp_time_t last_timer_run; - static __thread int timer_run_cnt = 1; odp_time_t now; int num = timer_global->highest_tp_idx + 1; int poll_interval = timer_global->poll_interval; @@ -964,21 +958,21 @@ void _timer_run_inline(int dec) /* Rate limit how often this thread checks the timer pools. */
if (poll_interval > 1) { - timer_run_cnt -= dec; - if (timer_run_cnt > 0) + timer_local.run_cnt -= dec; + if (timer_local.run_cnt > 0) return; - timer_run_cnt = poll_interval; + timer_local.run_cnt = poll_interval; }
now = odp_time_global();
if (poll_interval > 1) { - odp_time_t period = odp_time_diff(now, last_timer_run); + odp_time_t period = odp_time_diff(now, timer_local.last_run);
if (odp_time_cmp(period, timer_global->poll_interval_time) < 0) return; - last_timer_run = now; + timer_local.last_run = now; }
/* Check the timer pools. */ @@ -1563,6 +1557,9 @@ int _odp_timer_term_global(void)
int _odp_timer_init_local(void) { + timer_local.last_run = odp_time_global_from_ns(0); + timer_local.run_cnt = 1; + return 0; }
commit d2a9548017edaceec8cc682183690592c4c483b2 Author: Petri Savolainen petri.savolainen@nokia.com Date: Wed Oct 30 11:24:56 2019 +0200
linux-gen: timer: add inline_thread_type config file option
Added option to control which thread types process (non-private) inline timer pools.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Matias Elo matias.elo@nokia.com
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf index 4e24ef82d..b35089944 100644 --- a/config/odp-linux-generic.conf +++ b/config/odp-linux-generic.conf @@ -16,7 +16,7 @@
# Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.9" +config_file_version = "0.1.10"
# Shared memory options shm: { @@ -148,18 +148,21 @@ timer: { # Use inline timer implementation # # By default, timer processing is done in background threads (thread per - # timer pool). With inline implementation timers are processed on worker - # cores instead. When using inline timers the application has to call - # odp_schedule() or odp_queue_deq() to actuate timer processing. + # timer pool). With inline implementation timers are processed by ODP + # application threads instead. When using inline timers the application + # has to call odp_schedule() or odp_queue_deq() regularly to actuate + # timer processing. # - # Set to 1 to enable + # 0: Use POSIX timer and background threads to process timers + # 1: Use inline timer implementation and application threads to process + # timers inline = 0
# Inline timer poll interval # # When set to 1 inline timers are polled during every schedule round. # Increasing the value reduces timer processing overhead while - # decreasing accuracy. Ignored when inline timer is not enabled. + # decreasing accuracy. Ignored when inline timer is not used. inline_poll_interval = 10
# Inline timer poll interval in nanoseconds @@ -168,6 +171,18 @@ timer: { # inline timer polling rate in nanoseconds. By default, this defines the # maximum rate a thread may poll timers. If a timer pool is created with # a higher resolution than this, the polling rate is increased - # accordingly. + # accordingly. Ignored when inline timer is not used. inline_poll_interval_nsec = 500000 + + # Inline timer use of threads + # + # Select which thread types process non-private timer pools in inline + # timer implementation. Thread type does not affect private timer + # pool procesessing, those are always processed by the thread which + # created the pool. Ignored when inline timer is not used. + # + # 0: Both control and worker threads process non-private timer pools + # 1: Only worker threads process non-private timer pools + # 2: Only control threads process non-private timer pools + inline_thread_type = 0 } diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index e21b29b0c..4af31bcc1 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -226,6 +226,7 @@ typedef struct timer_global_t { odp_bool_t use_inline_timers; int poll_interval; int highest_tp_idx; + uint8_t thread_type;
} timer_global_t;
@@ -1511,35 +1512,43 @@ int _odp_timer_init_global(const odp_init_t *params) conf_str = "timer.inline"; if (!_odp_libconfig_lookup_int(conf_str, &val)) { ODP_ERR("Config option '%s' not found.\n", conf_str); - odp_shm_free(shm); - return -1; + goto error; } timer_global->use_inline_timers = val;
conf_str = "timer.inline_poll_interval"; if (!_odp_libconfig_lookup_int(conf_str, &val)) { ODP_ERR("Config option '%s' not found.\n", conf_str); - odp_shm_free(shm); - return -1; + goto error; } timer_global->poll_interval = val;
conf_str = "timer.inline_poll_interval_nsec"; if (!_odp_libconfig_lookup_int(conf_str, &val)) { ODP_ERR("Config option '%s' not found.\n", conf_str); - odp_shm_free(shm); - return -1; + goto error; } timer_global->poll_interval_nsec = val; timer_global->poll_interval_time = odp_time_global_from_ns(timer_global->poll_interval_nsec);
+ conf_str = "timer.inline_thread_type"; + if (!_odp_libconfig_lookup_int(conf_str, &val)) { + ODP_ERR("Config option '%s' not found.\n", conf_str); + goto error; + } + timer_global->thread_type = val; + if (!timer_global->use_inline_timers) { timer_res_init(); block_sigalarm(); }
return 0; + +error: + odp_shm_free(shm); + return -1; }
int _odp_timer_term_global(void) diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf index 435f51e6d..362ecd559 100644 --- a/platform/linux-generic/test/inline-timer.conf +++ b/platform/linux-generic/test/inline-timer.conf @@ -1,6 +1,6 @@ # Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.9" +config_file_version = "0.1.10"
timer: { # Enable inline timer implementation diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf index 708b9b4a8..6fb08aeaf 100644 --- a/platform/linux-generic/test/process-mode.conf +++ b/platform/linux-generic/test/process-mode.conf @@ -1,6 +1,6 @@ # Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.9" +config_file_version = "0.1.10"
# Shared memory options shm: {
commit 68e1cd81665c3b8deede1974d653593af85ec183 Author: Petri Savolainen petri.savolainen@nokia.com Date: Tue Oct 29 16:46:05 2019 +0200
linux-gen: timer: coding style fixes
Fix most issues reported by the current checkpatch version.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-by: Matias Elo matias.elo@nokia.com
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 1ca61e179..e21b29b0c 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -87,6 +87,7 @@ static odp_timeout_hdr_t *timeout_hdr_from_buf(odp_buffer_t buf) static odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo) { odp_buffer_t buf = odp_buffer_from_event(odp_timeout_to_event(tmo)); + return timeout_hdr_from_buf(buf); }
@@ -243,8 +244,10 @@ static inline odp_timer_pool_t timer_pool_to_hdl(timer_pool_t *tp) static inline timer_pool_t *handle_to_tp(odp_timer_t hdl) { uint32_t tp_idx = _odp_typeval(hdl) >> INDEX_BITS; + if (odp_likely(tp_idx < MAX_TIMER_POOLS)) { timer_pool_t *tp = timer_global->timer_pool[tp_idx]; + if (odp_likely(tp != NULL)) return timer_global->timer_pool[tp_idx]; } @@ -255,6 +258,7 @@ static inline uint32_t handle_to_idx(odp_timer_t hdl, timer_pool_t *tp) { uint32_t idx = (_odp_typeval(hdl) & ((1U << INDEX_BITS) - 1U)) - 1; + __builtin_prefetch(&tp->tick_buf[idx], 0, 0); if (odp_likely(idx < odp_atomic_load_u32(&tp->high_wm))) return idx; @@ -471,6 +475,7 @@ static inline odp_timer_t timer_alloc(timer_pool_t *tp, void *user_ptr) { odp_timer_t hdl; + odp_spinlock_lock(&tp->lock); if (odp_likely(tp->num_alloc < tp->param.num_timers)) { tp->num_alloc++; @@ -478,6 +483,7 @@ static inline odp_timer_t timer_alloc(timer_pool_t *tp, ODP_ASSERT(tp->first_free != tp->param.num_timers); uint32_t idx = tp->first_free; _odp_timer_t *tim = &tp->timers[idx]; + tp->first_free = get_next_free(tim); /* Initialize timer */ timer_init(tim, &tp->tick_buf[idx], queue, user_ptr); @@ -532,10 +538,8 @@ static inline odp_buffer_t timer_free(timer_pool_t *tp, uint32_t idx) * expire/reset/cancel timer *****************************************************************************/
-static bool timer_reset(uint32_t idx, - uint64_t abs_tck, - odp_buffer_t *tmo_buf, - timer_pool_t *tp) +static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_buffer_t *tmo_buf, + timer_pool_t *tp) { bool success = true; tick_buf_t *tb = &tp->tick_buf[idx]; @@ -582,7 +586,7 @@ static bool timer_reset(uint32_t idx, /* Swap in new expiration tick, get back old tick which * will indicate active/inactive timer state */ old = _odp_atomic_u64_xchg_mm(&tb->exp_tck, abs_tck, - _ODP_MEMMODEL_RLX); + _ODP_MEMMODEL_RLX); if ((old & TMO_INACTIVE) != 0) { /* Timer was inactive (cancelled or expired), * we can't reset a timer without a timeout buffer. @@ -747,12 +751,11 @@ static odp_buffer_t timer_cancel(timer_pool_t *tp,
/* Atomic CAS will fail if we experienced torn reads, * retry update sequence until CAS succeeds */ - } while (!_odp_atomic_u128_cmp_xchg_mm( - (_odp_atomic_u128_t *)tb, - (_uint128_t *)&old, - (_uint128_t *)&new, - _ODP_MEMMODEL_RLS, - _ODP_MEMMODEL_RLX)); + } while (!_odp_atomic_u128_cmp_xchg_mm((_odp_atomic_u128_t *)tb, + (_uint128_t *)&old, + (_uint128_t *)&new, + _ODP_MEMMODEL_RLS, + _ODP_MEMMODEL_RLX)); old_buf = old.tmo_buf; #else /* Take a related lock */ @@ -881,6 +884,7 @@ static inline void timer_pool_scan(timer_pool_t *tp, uint64_t tick) __builtin_prefetch(&array[i + 32], 0, 0); /* Non-atomic read for speed */ uint64_t exp_tck = array[i].exp_tck.v; + if (odp_unlikely(exp_tck <= tick)) { /* Attempt to expire timer */ timer_expire(tp, i, tick); @@ -1142,7 +1146,7 @@ static void itimer_init(timer_pool_t *tp) int ret;
ODP_DBG("Creating POSIX timer for timer pool %s, period %" - PRIu64" ns\n", tp->name, tp->param.res_ns); + PRIu64 " ns\n", tp->name, tp->param.res_ns);
res = tp->param.res_ns; sec = res / ODP_TIME_SEC_IN_NS; @@ -1341,6 +1345,7 @@ odp_event_t odp_timer_free(odp_timer_t hdl) timer_pool_t *tp = handle_to_tp(hdl); uint32_t idx = handle_to_idx(hdl, tp); odp_buffer_t old_buf = timer_free(tp, idx); + return odp_buffer_to_event(old_buf); }
@@ -1387,6 +1392,7 @@ int odp_timer_cancel(odp_timer_t hdl, odp_event_t *tmo_ev) uint32_t idx = handle_to_idx(hdl, tp); /* Set the expiration tick of the timer to TMO_INACTIVE */ odp_buffer_t old_buf = timer_cancel(tp, idx); + if (old_buf != ODP_BUFFER_INVALID) { *tmo_ev = odp_buffer_to_event(old_buf); return 0; /* Active timer cancelled, timeout returned */ @@ -1453,6 +1459,7 @@ void *odp_timeout_user_ptr(odp_timeout_t tmo) odp_timeout_t odp_timeout_alloc(odp_pool_t pool) { odp_buffer_t buf = odp_buffer_alloc(pool); + if (odp_unlikely(buf == ODP_BUFFER_INVALID)) return ODP_TIMEOUT_INVALID; return odp_timeout_from_event(odp_buffer_to_event(buf)); @@ -1461,6 +1468,7 @@ odp_timeout_t odp_timeout_alloc(odp_pool_t pool) void odp_timeout_free(odp_timeout_t tmo) { odp_event_t ev = odp_timeout_to_event(tmo); + odp_buffer_free(odp_buffer_from_event(ev)); }
@@ -1494,6 +1502,7 @@ int _odp_timer_init_global(const odp_init_t *params)
#ifndef ODP_ATOMIC_U128 uint32_t i; + for (i = 0; i < NUM_LOCKS; i++) _odp_atomic_flag_clear(&timer_global->locks[i]); #else
-----------------------------------------------------------------------
Summary of changes: config/odp-linux-generic.conf | 29 ++- platform/linux-generic/odp_timer.c | 246 ++++++++++++++++---------- platform/linux-generic/test/inline-timer.conf | 2 +- platform/linux-generic/test/process-mode.conf | 2 +- test/performance/odp_timer_perf.c | 73 ++++++-- test/validation/api/scheduler/scheduler.c | 231 ++++++++++++++++++++++++ test/validation/api/timer/timer.c | 24 ++- 7 files changed, 479 insertions(+), 128 deletions(-)
hooks/post-receive