This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via 35525f70aa68e55dca6b05cf59a515c50e7dcba2 (commit) via 56f0435d26ad16fdb4c8b6ea874cf88b15a5ddf9 (commit) via 12c4ca508b07b8d2366b0c37bee5f866019e94c2 (commit) via 9ddeaaf7a3226ab03e57cc49775fd234b8d44a2b (commit) via 0dcbdecc871783b5669dc9cb8cf68b7f4d7ffb76 (commit) via d89f772686dba6b331515be41aeefe345a207575 (commit) via 132dff6387e441c7019a8366c681941ee6173452 (commit) from fd383ebb6ea70350c28227d0b133c4e9c7075997 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 35525f70aa68e55dca6b05cf59a515c50e7dcba2 Author: Matias Elo matias.elo@nokia.com Date: Wed Nov 14 14:28:53 2018 +0200
linux-gen: ishm: add config option for selecting huge page usage limit
Add configuration option for selecting huge page usage limit in kilobytes. Memory reservations larger than this value are done using huge pages (if available), whereas smaller reservations are done using normal pages to conserve memory. The default value is still 64 kilobytes.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf index 9969860d..8a1eddbd 100644 --- a/config/odp-linux-generic.conf +++ b/config/odp-linux-generic.conf @@ -34,6 +34,11 @@ shm: { # because the current implementation won't work properly otherwise. num_cached_hp = 0
+ # Huge page usage limit in kilobytes. Memory reservations larger than + # this value are done using huge pages (if available). Smaller + # reservations are done using normal pages to conserve memory. + huge_page_limit_kb = 64 + # Allocate internal shared memory using a single virtual address space. # Set to 1 to enable using process mode. single_va = 0 diff --git a/platform/linux-generic/odp_ishm.c b/platform/linux-generic/odp_ishm.c index a66d9fab..d02e5ee9 100644 --- a/platform/linux-generic/odp_ishm.c +++ b/platform/linux-generic/odp_ishm.c @@ -113,13 +113,6 @@ */ #define ISHM_NB_FRAGMNTS (ISHM_MAX_NB_BLOCKS * 2 + 1)
-/* - * Memory reservations larger than ISHM_HUGE_PAGE_LIMIT (bytes) are allocated - * using huge pages (if available). Smaller reservations are done using normal - * pages to conserve memory. - */ -#define ISHM_HUGE_PAGE_LIMIT (64 * 1024) - /* * when a memory block is to be exported outside its ODP instance, * an block 'attribute file' is created in /dev/shm/odp-<pid>-shm-<name>. @@ -190,6 +183,8 @@ typedef struct ishm_block { typedef struct { odp_spinlock_t lock; uint64_t dev_seq; /* used when creating device names */ + /* limit for reserving memory using huge pages */ + uint64_t huge_page_limit; uint32_t odpthread_cnt; /* number of running ODP threads */ ishm_block_t block[ISHM_MAX_NB_BLOCKS]; void *single_va_start; /* start of single VA memory */ @@ -1108,7 +1103,7 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
/* Otherwise, Try first huge pages when possible and needed: */ if ((fd < 0) && page_hp_size && ((flags & _ODP_ISHM_USE_HP) || - size > ISHM_HUGE_PAGE_LIMIT)) { + size > ishm_tbl->huge_page_limit)) { /* at least, alignment in VA should match page size, but user * can request more: If the user requirement exceeds the page * size then we have to make sure the block will be mapped at @@ -1643,23 +1638,33 @@ int _odp_ishm_init_global(const odp_init_t *init) void *addr; void *spce_addr = NULL; int i; - int single_va_size_kb = 0; + int val_kb; uid_t uid; char *hp_dir = odp_global_ro.hugepage_info.default_huge_page_dir; uint64_t max_memory; uint64_t internal; + uint64_t huge_page_limit;
if (!_odp_libconfig_lookup_ext_int("shm", NULL, "single_va_size_kb", - &single_va_size_kb)) { + &val_kb)) { ODP_ERR("Unable to read single VA size from config\n"); return -1; }
- ODP_DBG("Shm single VA size: %dkB\n", single_va_size_kb); + ODP_DBG("Shm single VA size: %dkB\n", val_kb);
- max_memory = single_va_size_kb * 1024; + max_memory = val_kb * 1024; internal = max_memory / 8;
+ if (!_odp_libconfig_lookup_ext_int("shm", NULL, "huge_page_limit_kb", + &val_kb)) { + ODP_ERR("Unable to read huge page usage limit from config\n"); + return -1; + } + huge_page_limit = (uint64_t)val_kb * 1024; + + ODP_DBG("Shm huge page usage limit: %dkB\n", val_kb); + /* user requested memory size + some extra for internal use */ if (init && init->shm.max_memory) max_memory = init->shm.max_memory + internal; @@ -1710,6 +1715,7 @@ int _odp_ishm_init_global(const odp_init_t *init) memset(ishm_tbl, 0, sizeof(ishm_table_t)); ishm_tbl->dev_seq = 0; ishm_tbl->odpthread_cnt = 0; + ishm_tbl->huge_page_limit = huge_page_limit; odp_spinlock_init(&ishm_tbl->lock);
/* allocate space for the internal shared mem fragment table: */
commit 56f0435d26ad16fdb4c8b6ea874cf88b15a5ddf9 Author: Matias Elo matias.elo@nokia.com Date: Tue Nov 13 16:11:17 2018 +0200
linux-gen: timer: decrease inline timer polling interval under load
Decrease inline timer polling interval after receiving events to compansate for event processing delay.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h index 1f612448..cd80778a 100644 --- a/platform/linux-generic/include/odp_timer_internal.h +++ b/platform/linux-generic/include/odp_timer_internal.h @@ -36,12 +36,14 @@ typedef struct { odp_timer_t timer; } odp_timeout_hdr_t;
-unsigned _timer_run(void); +/* A larger decrement value should be used after receiving events compared to + * an 'empty' call. */ +unsigned int _timer_run(int dec);
/* Static inline wrapper to minimize modification of schedulers. */ -static inline unsigned timer_run(void) +static inline unsigned int timer_run(int dec) { - return odp_global_rw->inline_timers ? _timer_run() : 0; + return odp_global_rw->inline_timers ? _timer_run(dec) : 0; }
#endif diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c index c49db740..5790e1c3 100644 --- a/platform/linux-generic/odp_queue_basic.c +++ b/platform/linux-generic/odp_queue_basic.c @@ -976,27 +976,30 @@ static int queue_api_enq(odp_queue_t handle, odp_event_t ev) static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num) { queue_entry_t *queue = qentry_from_handle(handle); - - if (odp_global_rw->inline_timers && - odp_atomic_load_u64(&queue->s.num_timers)) - timer_run(); + int ret;
if (num > QUEUE_MULTI_MAX) num = QUEUE_MULTI_MAX;
- return queue->s.dequeue_multi(handle, - (odp_buffer_hdr_t **)ev, num); + ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num); + + if (odp_global_rw->inline_timers && + odp_atomic_load_u64(&queue->s.num_timers)) + timer_run(ret ? 2 : 1); + + return ret; }
static odp_event_t queue_api_deq(odp_queue_t handle) { queue_entry_t *queue = qentry_from_handle(handle); + odp_event_t ev = (odp_event_t)queue->s.dequeue(handle);
if (odp_global_rw->inline_timers && odp_atomic_load_u64(&queue->s.num_timers)) - timer_run(); + timer_run(ev != ODP_EVENT_INVALID ? 2 : 1);
- return (odp_event_t)queue->s.dequeue(handle); + return ev; }
/* API functions */ diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c index a3288456..4f2c1b6c 100644 --- a/platform/linux-generic/odp_queue_scalable.c +++ b/platform/linux-generic/odp_queue_scalable.c @@ -837,30 +837,32 @@ static odp_buffer_hdr_t *_queue_deq(odp_queue_t handle) static int queue_deq_multi(odp_queue_t handle, odp_event_t ev[], int num) { queue_entry_t *queue; + int ret;
if (num > QUEUE_MULTI_MAX) num = QUEUE_MULTI_MAX;
queue = qentry_from_ext(handle);
+ ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num); + if (odp_global_rw->inline_timers && odp_atomic_load_u64(&queue->s.num_timers)) - timer_run(); + timer_run(ret ? 2 : 1);
- return queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num); + return ret; }
static odp_event_t queue_deq(odp_queue_t handle) { - queue_entry_t *queue; - - queue = qentry_from_ext(handle); + queue_entry_t *queue = qentry_from_ext(handle); + odp_event_t ev = (odp_event_t)queue->s.dequeue(handle);
if (odp_global_rw->inline_timers && odp_atomic_load_u64(&queue->s.num_timers)) - timer_run(); + timer_run(ev != ODP_EVENT_INVALID ? 2 : 1);
- return (odp_event_t)queue->s.dequeue(handle); + return ev; }
static void queue_param_init(odp_queue_param_t *params) diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index bc46d6a6..22eb6ff1 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -1152,12 +1152,13 @@ static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait, int ret;
while (1) { - timer_run();
ret = do_schedule(out_queue, out_ev, max_num); - - if (ret) + if (ret) { + timer_run(2); break; + } + timer_run(1);
if (wait == ODP_SCHED_WAIT) continue; diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c index 05c0fcaf..ccafcb77 100644 --- a/platform/linux-generic/odp_schedule_scalable.c +++ b/platform/linux-generic/odp_schedule_scalable.c @@ -890,7 +890,7 @@ static int _schedule(odp_queue_t *from, odp_event_t ev[], int num_evts) ts = sched_ts; atomq = ts->atomq;
- timer_run(); + timer_run(1);
/* Once an atomic queue has been scheduled to a thread, it will stay * on that thread until empty or 'rotated' by WRR diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c index 8dcf9b0e..52c8873c 100644 --- a/platform/linux-generic/odp_schedule_sp.c +++ b/platform/linux-generic/odp_schedule_sp.c @@ -552,8 +552,6 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, uint32_t qi; int num;
- timer_run(); - cmd = sched_cmd();
if (cmd && cmd->s.type == CMD_PKTIO) { @@ -593,6 +591,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, }
if (cmd == NULL) { + timer_run(1); /* All priority queues are empty */ if (wait == ODP_SCHED_NO_WAIT) return 0; @@ -617,6 +616,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, num = sched_queue_deq(qi, events, 1, 1);
if (num <= 0) { + timer_run(1); /* Destroyed or empty queue. Remove empty queue from * scheduling. A dequeue operation to on an already * empty queue moves it to NOTSCHED state and @@ -624,6 +624,8 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, continue; }
+ timer_run(2); + sched_local.cmd = cmd;
if (from) diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 6307ae4f..7e2848c7 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -869,10 +869,10 @@ static unsigned process_timer_pools(void) return nexp; }
-unsigned _timer_run(void) +unsigned int _timer_run(int dec) { static __thread odp_time_t last_timer_run; - static __thread unsigned timer_run_cnt = 1; + static __thread int timer_run_cnt = 1; odp_time_t now;
if (timer_global->num_timer_pools == 0) @@ -881,7 +881,8 @@ unsigned _timer_run(void) /* Rate limit how often this thread checks the timer pools. */
if (timer_global->inline_poll_interval > 1) { - if (--timer_run_cnt) + timer_run_cnt -= dec; + if (timer_run_cnt > 0) return 0; timer_run_cnt = timer_global->inline_poll_interval; }
commit 12c4ca508b07b8d2366b0c37bee5f866019e94c2 Author: Matias Elo matias.elo@nokia.com Date: Tue Nov 13 15:08:02 2018 +0200
linux-gen: timer: add config option for inline timer poll frequency
Add configure option 'timer.inline_poll_interval' for adjusting inline timer polling frequency.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf index d123e773..9969860d 100644 --- a/config/odp-linux-generic.conf +++ b/config/odp-linux-generic.conf @@ -140,4 +140,11 @@ timer: { # # Set to 1 to enable inline = 0 + + # Inline timer poll interval + # + # When set to 1 inline timers are polled during every schedule round. + # Increasing the value reduces timer processing overhead while + # decreasing accuracy. Ignored when inline timer is not enabled. + inline_poll_interval = 10 } diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h index 02ba92e0..1f612448 100644 --- a/platform/linux-generic/include/odp_timer_internal.h +++ b/platform/linux-generic/include/odp_timer_internal.h @@ -21,9 +21,6 @@ #include <odp/api/timer.h> #include <odp_global_data.h>
-/* Minimum number of scheduling rounds between checking timer pools. */ -#define CONFIG_TIMER_RUN_RATELIMIT_ROUNDS 1 - /** * Internal Timeout header */ diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index fcda6e39..6307ae4f 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -219,6 +219,7 @@ typedef struct timer_global_t { _odp_atomic_flag_t ODP_ALIGNED_CACHE locks[NUM_LOCKS]; #endif odp_bool_t use_inline_timers; + int inline_poll_interval; } timer_global_t;
static timer_global_t *timer_global; @@ -871,8 +872,7 @@ static unsigned process_timer_pools(void) unsigned _timer_run(void) { static __thread odp_time_t last_timer_run; - static __thread unsigned timer_run_cnt = - CONFIG_TIMER_RUN_RATELIMIT_ROUNDS; + static __thread unsigned timer_run_cnt = 1; odp_time_t now;
if (timer_global->num_timer_pools == 0) @@ -880,10 +880,10 @@ unsigned _timer_run(void)
/* Rate limit how often this thread checks the timer pools. */
- if (CONFIG_TIMER_RUN_RATELIMIT_ROUNDS > 1) { + if (timer_global->inline_poll_interval > 1) { if (--timer_run_cnt) return 0; - timer_run_cnt = CONFIG_TIMER_RUN_RATELIMIT_ROUNDS; + timer_run_cnt = timer_global->inline_poll_interval; }
now = odp_time_global(); @@ -1354,6 +1354,14 @@ int odp_timer_init_global(const odp_init_t *params) } timer_global->use_inline_timers = val;
+ conf_str = "timer.inline_poll_interval"; + if (!_odp_libconfig_lookup_int(conf_str, &val)) { + ODP_ERR("Config option '%s' not found.\n", conf_str); + odp_shm_free(shm); + return -1; + } + timer_global->inline_poll_interval = val; + if (params && params->not_used.feat.timer) timer_global->use_inline_timers = false;
commit 9ddeaaf7a3226ab03e57cc49775fd234b8d44a2b Author: Matias Elo matias.elo@nokia.com Date: Tue Nov 13 15:07:40 2018 +0200
travis: test inline timer implementation
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/.travis.yml b/.travis.yml index 3510d380..d06530ce 100644 --- a/.travis.yml +++ b/.travis.yml @@ -154,6 +154,19 @@ jobs: -e ODP_CONFIG_FILE=/odp/platform/linux-generic/test/process-mode.conf -e ODPH_PROC_MODE=1 ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check.sh + - stage: test + env: TEST=inline_timer + install: + - true + compiler: gcc + script: + - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi + - docker run --privileged -i -t + -v `pwd`:/odp --shm-size 8g + -e CC="${CC}" + -e CONF="" + -e ODP_CONFIG_FILE=/odp/platform/linux-generic/test/inline-timer.conf + ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check_inline_timer.sh - stage: test env: TEST=distcheck compiler: gcc diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf new file mode 100644 index 00000000..cc9f2037 --- /dev/null +++ b/platform/linux-generic/test/inline-timer.conf @@ -0,0 +1,8 @@ +# Mandatory fields +odp_implementation = "linux-generic" +config_file_version = "0.1.5" + +timer: { + # Enable inline timer implementation + inline = 1 +} diff --git a/scripts/ci/check_inline_timer.sh b/scripts/ci/check_inline_timer.sh new file mode 100755 index 00000000..d2eff714 --- /dev/null +++ b/scripts/ci/check_inline_timer.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -e + +"`dirname "$0"`"/build_x86_64.sh + +cd "$(dirname "$0")"/../.. + +echo 1000 | tee /proc/sys/vm/nr_hugepages +mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge + +ODP_SCHEDULER=basic ./test/validation/api/timer/timer_main +ODP_SCHEDULER=sp ./test/validation/api/timer/timer_main +ODP_SCHEDULER=scalable ./test/validation/api/timer/timer_main + +umount /mnt/huge
commit 0dcbdecc871783b5669dc9cb8cf68b7f4d7ffb76 Author: Matias Elo matias.elo@nokia.com Date: Tue Nov 13 13:01:26 2018 +0200
linux-gen: timer: enable inline timer implementation using config file
Add configure option 'timer.inline' for enabling inline timer implementation.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf index 33938289..d123e773 100644 --- a/config/odp-linux-generic.conf +++ b/config/odp-linux-generic.conf @@ -16,7 +16,7 @@
# Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.4" +config_file_version = "0.1.5"
# Shared memory options shm: { @@ -129,3 +129,15 @@ sched_basic: { control = 1 } } + +timer: { + # Use inline timer implementation + # + # By default, timer processing is done in background threads (thread per + # timer pool). With inline implementation timers are processed on worker + # cores instead. When using inline timers the application has to call + # odp_schedule() or odp_queue_deq() to actuate timer processing. + # + # Set to 1 to enable + inline = 0 +} diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 84da5774..fcda6e39 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -55,6 +55,7 @@ #include <odp/api/time.h> #include <odp/api/plat/time_inlines.h> #include <odp/api/timer.h> +#include <odp_libconfig_internal.h> #include <odp_queue_if.h> #include <odp_timer_internal.h> #include <odp/api/plat/queue_inlines.h> @@ -217,7 +218,7 @@ typedef struct timer_global_t { /* Multiple locks per cache line! */ _odp_atomic_flag_t ODP_ALIGNED_CACHE locks[NUM_LOCKS]; #endif - + odp_bool_t use_inline_timers; } timer_global_t;
static timer_global_t *timer_global; @@ -339,22 +340,8 @@ static odp_timer_pool_t timer_pool_new(const char *name, odp_ticketlock_lock(&timer_global->lock); timer_global->timer_pool[tp_idx] = tp;
- if (timer_global->num_timer_pools == 1) { - odp_bool_t inline_tim; - - /* - * Whether to run timer pool processing 'inline' (on worker - * cores) or in background threads (thread-per-timerpool). - * - * If the application will use scheduler this flag is set to - * true, otherwise false. This application conveys this - * information via the 'not_used' bits in odp_init_t which are - * passed to odp_global_init(). - */ - inline_tim = !odp_global_ro.init_param.not_used.feat.schedule; - - odp_global_rw->inline_timers = inline_tim; - } + if (timer_global->num_timer_pools == 1) + odp_global_rw->inline_timers = timer_global->use_inline_timers;
odp_ticketlock_unlock(&timer_global->lock); if (!odp_global_rw->inline_timers) { @@ -1333,7 +1320,8 @@ void odp_timeout_free(odp_timeout_t tmo) int odp_timer_init_global(const odp_init_t *params) { odp_shm_t shm; - odp_bool_t inline_timers = false; + const char *conf_str; + int val = 0;
shm = odp_shm_reserve("_odp_timer", sizeof(timer_global_t), ODP_CACHE_LINE_SIZE, 0); @@ -1358,16 +1346,21 @@ int odp_timer_init_global(const odp_init_t *params) #else ODP_DBG("Using lock-less timer implementation\n"); #endif + conf_str = "timer.inline"; + if (!_odp_libconfig_lookup_int(conf_str, &val)) { + ODP_ERR("Config option '%s' not found.\n", conf_str); + odp_shm_free(shm); + return -1; + } + timer_global->use_inline_timers = val;
- if (params) - inline_timers = - !params->not_used.feat.schedule && - !params->not_used.feat.timer; + if (params && params->not_used.feat.timer) + timer_global->use_inline_timers = false;
timer_global->time_per_ratelimit_period = odp_time_global_from_ns(timer_global->min_res_ns / 2);
- if (!inline_timers) { + if (!timer_global->use_inline_timers) { timer_res_init(); block_sigalarm(); } diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf index a86562f6..e3e6b55b 100644 --- a/platform/linux-generic/test/process-mode.conf +++ b/platform/linux-generic/test/process-mode.conf @@ -1,6 +1,6 @@ # Mandatory fields odp_implementation = "linux-generic" -config_file_version = "0.1.4" +config_file_version = "0.1.5"
# Shared memory options shm: {
commit d89f772686dba6b331515be41aeefe345a207575 Author: Matias Elo matias.elo@nokia.com Date: Thu Nov 15 17:18:22 2018 +0200
linux-gen: timer: zero timer pool memory on reserve
Fixes timer thread failing due to uninitialized variables.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index a2261755..84da5774 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -260,6 +260,7 @@ static odp_timer_pool_t timer_pool_new(const char *name, { uint32_t i, tp_idx; size_t sz0, sz1, sz2; + uint64_t tp_size; uint32_t flags = ODP_SHM_SW_ONLY;
if (odp_global_ro.shm_single_va) @@ -289,12 +290,17 @@ static odp_timer_pool_t timer_pool_new(const char *name, sz1 = ROUNDUP_CACHE_LINE(sizeof(tick_buf_t) * param->num_timers); sz2 = ROUNDUP_CACHE_LINE(sizeof(_odp_timer_t) * param->num_timers); - odp_shm_t shm = odp_shm_reserve(name, sz0 + sz1 + sz2, - ODP_CACHE_LINE_SIZE, flags); + tp_size = sz0 + sz1 + sz2; + + odp_shm_t shm = odp_shm_reserve(name, tp_size, ODP_CACHE_LINE_SIZE, + flags); if (odp_unlikely(shm == ODP_SHM_INVALID)) ODP_ABORT("%s: timer pool shm-alloc(%zuKB) failed\n", name, (sz0 + sz1 + sz2) / 1024); timer_pool_t *tp = (timer_pool_t *)odp_shm_addr(shm); + + memset(tp, 0, tp_size); + tp->prev_scan = odp_time_global(); tp->time_per_tick = odp_time_global_from_ns(param->res_ns); odp_atomic_init_u64(&tp->cur_tick, 0);
commit 132dff6387e441c7019a8366c681941ee6173452 Author: Matias Elo matias.elo@nokia.com Date: Fri Nov 16 10:18:09 2018 +0200
linux-gen: init: always initialize odp_global_ro.init_param
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c index 0f7e0df2..132ada03 100644 --- a/platform/linux-generic/odp_init.c +++ b/platform/linux-generic/odp_init.c @@ -267,6 +267,7 @@ int odp_init_global(odp_instance_t *instance, odp_global_ro.log_fn = odp_override_log; odp_global_ro.abort_fn = odp_override_abort;
+ odp_init_param_init(&odp_global_ro.init_param); if (params != NULL) { odp_global_ro.init_param = *params;
-----------------------------------------------------------------------
Summary of changes: .travis.yml | 13 ++++ config/odp-linux-generic.conf | 26 +++++++- .../linux-generic/include/odp_timer_internal.h | 11 ++-- platform/linux-generic/odp_init.c | 1 + platform/linux-generic/odp_ishm.c | 30 ++++++---- platform/linux-generic/odp_queue_basic.c | 19 +++--- platform/linux-generic/odp_queue_scalable.c | 16 ++--- platform/linux-generic/odp_schedule_basic.c | 7 ++- platform/linux-generic/odp_schedule_scalable.c | 2 +- platform/linux-generic/odp_schedule_sp.c | 6 +- platform/linux-generic/odp_timer.c | 70 ++++++++++++---------- platform/linux-generic/test/inline-timer.conf | 8 +++ platform/linux-generic/test/process-mode.conf | 2 +- scripts/ci/{check.sh => check_inline_timer.sh} | 7 +-- 14 files changed, 142 insertions(+), 76 deletions(-) create mode 100644 platform/linux-generic/test/inline-timer.conf copy scripts/ci/{check.sh => check_inline_timer.sh} (51%)
hooks/post-receive