This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, api-next has been updated via af7be638ef9ac98bdb1f2e4917f152889eb1850f (commit) via dffd44a745bfb4761d9d3faba41aa8259a903f6b (commit) via 0772569e525e28226475a415f5c2e719795917c2 (commit) via 87d2f6df094ad402676149622af4e436b19598af (commit) via 189fc5df9551dd998a1091de863843edd6f98b78 (commit) from e1175d5a69c65bb465022c9f1381c40fdb5c4069 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit af7be638ef9ac98bdb1f2e4917f152889eb1850f Author: Balasubramanian Manoharan bala.manoharan@linaro.org Date: Mon Feb 5 17:49:43 2018 +0530
linux-gen: schedule: implements async ordered lock
implements asynchronous order context lock
Signed-off-by: Balasubramanian Manoharan bala.manoharan@linaro.org Reviewed-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h index 8605ca66..66e05043 100644 --- a/platform/linux-generic/include/odp_schedule_if.h +++ b/platform/linux-generic/include/odp_schedule_if.h @@ -41,6 +41,8 @@ typedef int (*schedule_term_local_fn_t)(void); typedef void (*schedule_order_lock_fn_t)(void); typedef void (*schedule_order_unlock_fn_t)(void); typedef void (*schedule_order_unlock_lock_fn_t)(void); +typedef void (*schedule_order_lock_start_fn_t)(void); +typedef void (*schedule_order_lock_wait_fn_t)(void); typedef uint32_t (*schedule_max_ordered_locks_fn_t)(void); typedef void (*schedule_save_context_fn_t)(uint32_t queue_index);
@@ -60,6 +62,8 @@ typedef struct schedule_fn_t { schedule_term_local_fn_t term_local; schedule_order_lock_fn_t order_lock; schedule_order_unlock_fn_t order_unlock; + schedule_order_lock_start_fn_t start_order_lock; + schedule_order_lock_wait_fn_t wait_order_lock; schedule_order_unlock_lock_fn_t order_unlock_lock; schedule_max_ordered_locks_fn_t max_ordered_locks;
@@ -105,6 +109,8 @@ typedef struct { void (*schedule_order_lock)(uint32_t); void (*schedule_order_unlock)(uint32_t); void (*schedule_order_unlock_lock)(uint32_t, uint32_t); + void (*schedule_order_lock_start)(uint32_t); + void (*schedule_order_lock_wait)(uint32_t);
} schedule_api_t;
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c index c91a9338..a6581bed 100644 --- a/platform/linux-generic/odp_schedule.c +++ b/platform/linux-generic/odp_schedule.c @@ -1152,6 +1152,16 @@ static void schedule_order_unlock_lock(uint32_t unlock_index, schedule_order_lock(lock_index); }
+static void schedule_order_lock_start(uint32_t lock_index) +{ + (void)lock_index; +} + +static void schedule_order_lock_wait(uint32_t lock_index) +{ + schedule_order_lock(lock_index); +} + static void schedule_pause(void) { sched_local.pause = 1; @@ -1438,5 +1448,7 @@ const schedule_api_t schedule_default_api = { .schedule_group_info = schedule_group_info, .schedule_order_lock = schedule_order_lock, .schedule_order_unlock = schedule_order_unlock, - .schedule_order_unlock_lock = schedule_order_unlock_lock + .schedule_order_unlock_lock = schedule_order_unlock_lock, + .schedule_order_lock_start = schedule_order_lock_start, + .schedule_order_lock_wait = schedule_order_lock_wait }; diff --git a/platform/linux-generic/odp_schedule_if.c b/platform/linux-generic/odp_schedule_if.c index e5f734ae..6a3b4e4b 100644 --- a/platform/linux-generic/odp_schedule_if.c +++ b/platform/linux-generic/odp_schedule_if.c @@ -134,3 +134,14 @@ void odp_schedule_order_unlock_lock(uint32_t unlock_index, uint32_t lock_index) { sched_api->schedule_order_unlock_lock(unlock_index, lock_index); } + +void odp_schedule_order_lock_start(uint32_t lock_index) +{ + sched_api->schedule_order_lock_start(lock_index); +} + +void odp_schedule_order_lock_wait(uint32_t lock_index) +{ + sched_api->schedule_order_lock_wait(lock_index); +} + diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c index 02396aa0..3ce85394 100644 --- a/platform/linux-generic/odp_schedule_iquery.c +++ b/platform/linux-generic/odp_schedule_iquery.c @@ -1308,6 +1308,16 @@ static uint32_t schedule_max_ordered_locks(void) return CONFIG_QUEUE_MAX_ORD_LOCKS; }
+static void schedule_order_lock_start(uint32_t lock_index) +{ + (void)lock_index; +} + +static void schedule_order_lock_wait(uint32_t lock_index) +{ + schedule_order_lock(lock_index); +} + static inline bool is_atomic_queue(unsigned int queue_index) { return (sched->queues[queue_index].sync == ODP_SCHED_SYNC_ATOMIC); @@ -1376,7 +1386,9 @@ const schedule_api_t schedule_iquery_api = { .schedule_group_info = schedule_group_info, .schedule_order_lock = schedule_order_lock, .schedule_order_unlock = schedule_order_unlock, - .schedule_order_unlock_lock = schedule_order_unlock_lock + .schedule_order_unlock_lock = schedule_order_unlock_lock, + .schedule_order_lock_start = schedule_order_lock_start, + .schedule_order_lock_wait = schedule_order_lock_wait };
static void thread_set_interest(sched_thread_local_t *thread, diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c index 761f93d3..f5974442 100644 --- a/platform/linux-generic/odp_schedule_scalable.c +++ b/platform/linux-generic/odp_schedule_scalable.c @@ -1186,6 +1186,16 @@ static void schedule_order_unlock_lock(uint32_t unlock_index, schedule_order_lock(lock_index); }
+static void schedule_order_lock_start(uint32_t lock_index) +{ + (void)lock_index; +} + +static void schedule_order_lock_wait(uint32_t lock_index) +{ + schedule_order_lock(lock_index); +} + static void schedule_release_atomic(void) { sched_scalable_thread_state_t *ts; @@ -2096,4 +2106,6 @@ const schedule_api_t schedule_scalable_api = { .schedule_order_lock = schedule_order_lock, .schedule_order_unlock = schedule_order_unlock, .schedule_order_unlock_lock = schedule_order_unlock_lock, + .schedule_order_lock_start = schedule_order_lock_start, + .schedule_order_lock_wait = schedule_order_lock_wait }; diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c index bad47cad..e46ae448 100644 --- a/platform/linux-generic/odp_schedule_sp.c +++ b/platform/linux-generic/odp_schedule_sp.c @@ -829,6 +829,16 @@ static void schedule_order_unlock_lock(uint32_t unlock_index, (void)lock_index; }
+static void schedule_order_lock_start(uint32_t lock_index) +{ + (void)lock_index; +} + +static void schedule_order_lock_wait(uint32_t lock_index) +{ + (void)lock_index; +} + static void order_lock(void) { } @@ -879,5 +889,7 @@ const schedule_api_t schedule_sp_api = { .schedule_group_info = schedule_group_info, .schedule_order_lock = schedule_order_lock, .schedule_order_unlock = schedule_order_unlock, - .schedule_order_unlock_lock = schedule_order_unlock_lock + .schedule_order_unlock_lock = schedule_order_unlock_lock, + .schedule_order_lock_start = schedule_order_lock_start, + .schedule_order_lock_wait = schedule_order_lock_wait };
commit dffd44a745bfb4761d9d3faba41aa8259a903f6b Author: Balasubramanian Manoharan bala.manoharan@linaro.org Date: Mon Feb 5 17:49:26 2018 +0530
api: schedule: add asynchronous order lock
add asynchronous order lock api. schedule order context lock can be acquired asynchronously using two functions start and wait. application can utilize the cycles between these functions to increase performance.
Signed-off-by: Balasubramanian Manoharan bala.manoharan@linaro.org Reviewed-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/include/odp/api/spec/schedule.h b/include/odp/api/spec/schedule.h index d493cc75..bbc74983 100644 --- a/include/odp/api/spec/schedule.h +++ b/include/odp/api/spec/schedule.h @@ -371,6 +371,34 @@ void odp_schedule_order_unlock(uint32_t lock_index); */ void odp_schedule_order_unlock_lock(uint32_t unlock_index, uint32_t lock_index);
+/** Asynchronous ordered context lock + * Request an ordered context lock to be acquired. Starts an ordered context + * lock acquire operation, but does not wait until the lock has been acquired. + * Application can use this call to potentially interleave some processing + * within waiting for this lock. Each start lock call must be paired with a wait + * call that blocks until the lock has been acquired. Locks cannot be acquired + * in nested fashion i.e each start call must follow a paring wait and unlock + * calls, before using another lock. + * The same constraints apply as with odp_schedule_order_lock() + * + * @param lock_index Index of the ordered lock in the current context to + * start acquire operation. + * Must be in the range 0..odp_queue_lock_count() - 1. + * + */ +void odp_schedule_order_lock_start(uint32_t lock_index); + +/** Asynchronous ordered context lock wait + * Wait for a previously started lock acquire operation to finish. + * Lock index must match with the previous start call. Ordered lock acquisition + * will be completed during this call. + * + * @param lock_index Index of the ordered lock in the current context to + * complete acquire operation. + * Must be in the range 0..odp_queue_lock_count() - 1. + */ +void odp_schedule_order_lock_wait(uint32_t lock_index); + /** * @} */
commit 0772569e525e28226475a415f5c2e719795917c2 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Feb 2 10:45:29 2018 +0200
linux-gen: shm: use global init max memory
Use global init parameter to allow application to use more than 512 MB of shared memory.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/_ishm.c b/platform/linux-generic/_ishm.c index 81d77edc..ab112ace 100644 --- a/platform/linux-generic/_ishm.c +++ b/platform/linux-generic/_ishm.c @@ -258,7 +258,7 @@ static void *alloc_fragment(uintptr_t size, int block_index, intptr_t align, ishm_fragment_t *rem_fragmnt; uintptr_t border;/* possible start of new fragment (next alignement) */ intptr_t left; /* room remaining after, if the segment is allocated */ - uintptr_t remainder = ODP_CONFIG_ISHM_VA_PREALLOC_SZ; + uintptr_t remainder = odp_global_data.shm_max_memory;
/* * search for the best bit, i.e. search for the unallocated fragment @@ -1436,7 +1436,7 @@ int _odp_ishm_cleanup_files(const char *dirpath) return 0; }
-int _odp_ishm_init_global(void) +int _odp_ishm_init_global(const odp_init_t *init) { void *addr; void *spce_addr; @@ -1444,7 +1444,15 @@ int _odp_ishm_init_global(void) uid_t uid; char *hp_dir = odp_global_data.hugepage_info.default_huge_page_dir; uint64_t align; + uint64_t max_memory = ODP_CONFIG_ISHM_VA_PREALLOC_SZ; + uint64_t internal = ODP_CONFIG_ISHM_VA_PREALLOC_SZ / 8;
+ /* user requested memory size + some extra for internal use */ + if (init && init->shm.max_memory) + max_memory = init->shm.max_memory + internal; + + odp_global_data.shm_max_memory = max_memory; + odp_global_data.shm_max_size = max_memory - internal; odp_global_data.main_pid = getpid(); odp_global_data.shm_dir = getenv("ODP_SHM_DIR"); if (odp_global_data.shm_dir) { @@ -1507,7 +1515,7 @@ int _odp_ishm_init_global(void) *reserve the address space for _ODP_ISHM_SINGLE_VA reserved blocks, * only address space! */ - spce_addr = _odp_ishmphy_book_va(ODP_CONFIG_ISHM_VA_PREALLOC_SZ, align); + spce_addr = _odp_ishmphy_book_va(max_memory, align); if (!spce_addr) { ODP_ERR("unable to reserve virtual space\n."); goto init_glob_err3; @@ -1516,7 +1524,7 @@ int _odp_ishm_init_global(void) /* use the first fragment descriptor to describe to whole VA space: */ ishm_ftbl->fragment[0].block_index = -1; ishm_ftbl->fragment[0].start = spce_addr; - ishm_ftbl->fragment[0].len = ODP_CONFIG_ISHM_VA_PREALLOC_SZ; + ishm_ftbl->fragment[0].len = max_memory; ishm_ftbl->fragment[0].prev = NULL; ishm_ftbl->fragment[0].next = NULL; ishm_ftbl->used_fragmnts = &ishm_ftbl->fragment[0]; diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h index a21e93c8..444e1163 100644 --- a/platform/linux-generic/include/odp_internal.h +++ b/platform/linux-generic/include/odp_internal.h @@ -44,6 +44,8 @@ typedef struct { struct odp_global_data_s { char *shm_dir; /*< directory for odp mmaped files */ int shm_dir_from_env; /*< overload default with env */ + uint64_t shm_max_memory; + uint64_t shm_max_size; pid_t main_pid; char uid[UID_MAXLEN]; odp_log_func_t log_fn; @@ -129,7 +131,7 @@ int _odp_int_name_tbl_term_global(void); int _odp_fdserver_init_global(void); int _odp_fdserver_term_global(void);
-int _odp_ishm_init_global(void); +int _odp_ishm_init_global(const odp_init_t *init); int _odp_ishm_init_local(void); int _odp_ishm_term_global(void); int _odp_ishm_term_local(void); diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c index be75a530..a2d9d52f 100644 --- a/platform/linux-generic/odp_init.c +++ b/platform/linux-generic/odp_init.c @@ -66,7 +66,7 @@ int odp_init_global(odp_instance_t *instance, } stage = SYSINFO_INIT;
- if (_odp_ishm_init_global()) { + if (_odp_ishm_init_global(params)) { ODP_ERR("ODP ishm init failed.\n"); goto init_failed; } diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c index c322c7eb..c9b04dfd 100644 --- a/platform/linux-generic/odp_shared_memory.c +++ b/platform/linux-generic/odp_shared_memory.c @@ -12,6 +12,7 @@ #include <odp/api/shared_memory.h> #include <odp/api/plat/strong_types.h> #include <_ishm_internal.h> +#include <odp_internal.h> #include <string.h>
ODP_STATIC_ASSERT(ODP_CONFIG_SHM_BLOCKS >= ODP_CONFIG_POOLS, @@ -47,7 +48,7 @@ int odp_shm_capability(odp_shm_capability_t *capa) memset(capa, 0, sizeof(odp_shm_capability_t));
capa->max_blocks = ODP_CONFIG_SHM_BLOCKS; - capa->max_size = 0; + capa->max_size = odp_global_data.shm_max_size; capa->max_align = 0;
return 0;
commit 87d2f6df094ad402676149622af4e436b19598af Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Feb 1 15:32:35 2018 +0200
api: init: add shm memory size parameter
Application memory usage may vary a lot. Implementation may need to pre-allocate memory or address space already at init time to be able to guarantee e.g. the same VA address space for all threads. Total SHM memory usage information is needed at global init time, as implementation likely uses the same shared memory for its own global memory allocations.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/include/odp/api/spec/init.h b/include/odp/api/spec/init.h index 21734db2..ee33e7cd 100644 --- a/include/odp/api/spec/init.h +++ b/include/odp/api/spec/init.h @@ -107,23 +107,29 @@ typedef int (*odp_log_func_t)(odp_log_level_t level, const char *fmt, ...); typedef void (*odp_abort_func_t)(void) ODP_NORETURN;
/** - * ODP initialization data + * Global initialization parameters * - * Data that is required to initialize the ODP API with the - * application specific data such as specifying a logging callback, the log - * level etc. + * These parameters may be used at global initialization time to configure and + * optimize ODP implementation to match the intended usage. Application + * specifies maximum resource usage. Implementation may round up resource + * reservations as needed. Initialization function returns a failure if resource + * requirements are too high. Init parameters may be used also to override + * logging and abort functions. * - * @note It is expected that all unassigned members are zero + * Use odp_init_param_init() to initialize the parameters into their default + * values. Unused parameters are left to default values. */ typedef struct odp_init_t { /** Maximum number of worker threads the user will run concurrently. Valid range is from 0 to platform specific maximum. Set both num_worker and num_control to zero for default number of threads. */ int num_worker; + /** Maximum number of control threads the user will run concurrently. Valid range is from 0 to platform specific maximum. Set both num_worker and num_control to zero for default number of threads. */ int num_control; + /** Pointer to bit mask mapping CPUs available to this ODP instance for running worker threads. Initialize to a NULL pointer to use default CPU mapping. @@ -139,6 +145,7 @@ typedef struct odp_init_t { worker masks */ const odp_cpumask_t *worker_cpus; + /** Pointer to bit mask mapping CPUs available to this ODP instance for running control threads. Initialize to a NULL pointer to use default CPU mapping. @@ -150,10 +157,13 @@ typedef struct odp_init_t { worker and control masks do not overlap. */ const odp_cpumask_t *control_cpus; + /** Replacement for the default log fn */ odp_log_func_t log_fn; + /** Replacement for the default abort fn */ odp_abort_func_t abort_fn; + /** Unused features. These are hints to the ODP implementation that * the application will not use any APIs associated with these * features. Implementations may use this information to provide @@ -161,6 +171,16 @@ typedef struct odp_init_t { * that a feature will not be used and it is used anyway. */ odp_feature_t not_used; + + /** Shared memory parameters */ + struct { + /** Maximum memory usage in bytes. This is the maximum + * amount of shared memory that application will reserve + * concurrently. Use 0 when not set. Default value is 0. + */ + uint64_t max_memory; + } shm; + } odp_init_t;
/** @@ -189,7 +209,8 @@ void odp_init_param_init(odp_init_t *param); * system and outputs a handle for it. The handle is used in other calls * (e.g. odp_init_local()) as a reference to the instance. When user provides * configuration parameters, the platform may configure and optimize the - * instance to match user requirements. + * instance to match user requirements. A failure is returned if requirements + * cannot be met. * * Configuration parameters are divided into standard and platform specific * parts. Standard parameters are supported by any ODP platform, where as
commit 189fc5df9551dd998a1091de863843edd6f98b78 Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Feb 1 14:04:24 2018 +0200
validation: shm: test capa and maximum reservation
Added test which uses capability API, and tries to reserve and use maximum sized block. 100 MB shm memory is assumed to be available to ODP validation tests.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c index bda07011..74eaa32d 100644 --- a/test/validation/api/shmem/shmem.c +++ b/test/validation/api/shmem/shmem.c @@ -22,6 +22,8 @@ #define STRESS_SIZE 32 /* power of 2 and <=256 */ #define STRESS_RANDOM_SZ 5 #define STRESS_ITERATION 5000 +#define MAX_SIZE_TESTED (100 * 1000000UL) +#define MAX_ALIGN_TESTED (1024 * 1024)
typedef enum { STRESS_FREE, /* entry is free and can be allocated */ @@ -212,6 +214,57 @@ void shmem_test_basic(void) CU_ASSERT(0 == odp_shm_free(shm)); }
+/* + * maximum size reservation + */ +static void shmem_test_max_reserve(void) +{ + odp_shm_capability_t capa; + odp_shm_t shm; + uint64_t size, align; + uint8_t *data; + uint64_t i; + + memset(&capa, 0, sizeof(odp_shm_capability_t)); + CU_ASSERT_FATAL(odp_shm_capability(&capa) == 0); + + CU_ASSERT(capa.max_blocks > 0); + + size = capa.max_size; + align = capa.max_align; + + /* Assuming that system has at least MAX_SIZE_TESTED bytes available */ + if (capa.max_size == 0 || capa.max_size > MAX_SIZE_TESTED) + size = MAX_SIZE_TESTED; + + if (capa.max_align == 0 || capa.max_align > MAX_ALIGN_TESTED) + align = MAX_ALIGN_TESTED; + + printf("\n size: %" PRIu64 "\n", size); + printf(" align: %" PRIu64 "\n", align); + + shm = odp_shm_reserve("test_max_reserve", size, align, 0); + CU_ASSERT(shm != ODP_SHM_INVALID); + + data = odp_shm_addr(shm); + CU_ASSERT(data != NULL); + + if (data) { + memset(data, 0xde, size); + for (i = 0; i < size; i++) { + if (data[i] != 0xde) { + printf(" data error i:%" PRIu64 ", data %x" + "\n", i, data[i]); + CU_FAIL("Data error"); + break; + } + } + } + + if (shm != ODP_SHM_INVALID) + CU_ASSERT(odp_shm_free(shm) == 0); +} + /* * thread part for the shmem_test_reserve_after_fork */ @@ -769,6 +822,7 @@ void shmem_test_stress(void)
odp_testinfo_t shmem_suite[] = { ODP_TEST_INFO(shmem_test_basic), + ODP_TEST_INFO(shmem_test_max_reserve), ODP_TEST_INFO(shmem_test_reserve_after_fork), ODP_TEST_INFO(shmem_test_singleva_after_fork), ODP_TEST_INFO(shmem_test_stress),
-----------------------------------------------------------------------
Summary of changes: include/odp/api/spec/init.h | 33 ++++++++++++--- include/odp/api/spec/schedule.h | 28 ++++++++++++ platform/linux-generic/_ishm.c | 16 +++++-- platform/linux-generic/include/odp_internal.h | 4 +- platform/linux-generic/include/odp_schedule_if.h | 6 +++ platform/linux-generic/odp_init.c | 2 +- platform/linux-generic/odp_schedule.c | 14 +++++- platform/linux-generic/odp_schedule_if.c | 11 +++++ platform/linux-generic/odp_schedule_iquery.c | 14 +++++- platform/linux-generic/odp_schedule_scalable.c | 12 ++++++ platform/linux-generic/odp_schedule_sp.c | 14 +++++- platform/linux-generic/odp_shared_memory.c | 3 +- test/validation/api/shmem/shmem.c | 54 ++++++++++++++++++++++++ 13 files changed, 195 insertions(+), 16 deletions(-)
hooks/post-receive