This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, api-next has been updated via 525f6e6b3adf629df89d195fbb1959b40ecc8a0d (commit) via d044b11d7010cc328cca986849a1414c1e46fb53 (commit) via 9c0ad641faeabbede48fd09b7c91f753186163bd (commit) via d7913a845c72753758121111cc5da381198ba0f6 (commit) from 43dd326bf4777a01a0fa75c9c9055376d246e44b (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 525f6e6b3adf629df89d195fbb1959b40ecc8a0d Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Dec 18 16:37:23 2018 +0200
linux-gen: sched: dummy flow aware implementation
Implement flow aware scheduling API with least possible changes. Scheduler does not care about flow IDs, but synchronizes still on queue level. This is functionally correct, but does provide parallelism between different flows of a queue. So, application does not benefit from using flows, but functions correctly. Maximum number of flows per queue is limited to 256 just to minimize number of bytes used in buffer header.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index c6e1345d..5e0b2bcc 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -54,6 +54,9 @@ ODP_STATIC_ASSERT(ODP_CONFIG_POOLS <= (0xFF + 1), "TOO_MANY_POOLS"); /* Check that buffer index fit into bit field */ ODP_STATIC_ASSERT(CONFIG_POOL_MAX_NUM <= (0xFFFFFF + 1), "TOO_LARGE_POOL");
+/* Type size limits number of flow IDs supported */ +#define BUF_HDR_MAX_FLOW_ID 255 + /* Common buffer header */ struct ODP_ALIGNED_CACHE odp_buffer_hdr_t { /* Combined pool and buffer index */ @@ -94,6 +97,9 @@ struct ODP_ALIGNED_CACHE odp_buffer_hdr_t { /* Event type. Maybe different than pool type (crypto compl event) */ int8_t event_type;
+ /* Event flow id */ + uint8_t flow_id; + /* Initial buffer tail pointer */ uint8_t *buf_end;
@@ -120,6 +126,20 @@ static inline odp_buffer_t buf_from_buf_hdr(odp_buffer_hdr_t *hdr) return (odp_buffer_t)hdr; }
+static inline uint32_t event_flow_id(odp_event_t ev) +{ + odp_buffer_hdr_t *buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)ev; + + return buf_hdr->flow_id; +} + +static inline void event_flow_id_set(odp_event_t ev, uint32_t flow_id) +{ + odp_buffer_hdr_t *buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)ev; + + buf_hdr->flow_id = flow_id; +} + #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/odp_event.c b/platform/linux-generic/odp_event.c index bdde93e1..efcbc1e2 100644 --- a/platform/linux-generic/odp_event.c +++ b/platform/linux-generic/odp_event.c @@ -59,17 +59,14 @@ int odp_event_type_multi(const odp_event_t event[], int num, return i; }
-/* For now ODP generic does not support flow awareness, - * so all flow ids are zero. */ -uint32_t odp_event_flow_id(odp_event_t event ODP_UNUSED) +uint32_t odp_event_flow_id(odp_event_t event) { - return 0; + return event_flow_id(event); }
-void odp_event_flow_id_set(odp_event_t event ODP_UNUSED, - uint32_t flow_id ODP_UNUSED) +void odp_event_flow_id_set(odp_event_t event, uint32_t flow_id) { - /* Do nothing */ + event_flow_id_set(event, flow_id); }
void odp_event_free(odp_event_t event) diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index e5a9cfc7..6176c951 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -1579,6 +1579,7 @@ static int schedule_capability(odp_schedule_capability_t *capa) capa->max_prios = schedule_num_prio(); capa->max_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES; capa->max_queue_size = queue_glb->config.max_queue_size; + capa->max_flow_id = BUF_HDR_MAX_FLOW_ID;
return 0; }
commit d044b11d7010cc328cca986849a1414c1e46fb53 Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Dec 18 09:55:12 2018 +0200
validation: sched: add flow aware test case
Move scheduler config call into the test suite as some test cases need non-default config. Scheduler configuration can be set only once, so all test cases share the config. Flow aware mode is enabled, when capability allows that.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c index 27377580..bdcd7b2d 100644 --- a/test/validation/api/scheduler/scheduler.c +++ b/test/validation/api/scheduler/scheduler.c @@ -8,6 +8,7 @@
#include <odp_api.h> #include "odp_cunit_common.h" +#include <odp/helper/odph_api.h>
#define MAX_WORKERS_THREADS 32 #define MAX_ORDERED_LOCKS 2 @@ -24,6 +25,9 @@
#define TEST_QUEUE_SIZE_NUM_EV 50
+#define MAX_FLOWS 16 +#define FLOW_TEST_NUM_EV (10 * MAX_FLOWS) + #define GLOBALS_SHM_NAME "test_globals" #define MSG_POOL_NAME "msg_pool" #define QUEUE_CTX_POOL_NAME "queue_ctx_pool" @@ -62,6 +66,7 @@ typedef struct { odp_pool_t pool; odp_pool_t queue_ctx_pool; uint32_t max_sched_queue_size; + uint64_t num_flows; odp_ticketlock_t lock; odp_spinlock_t atomic_lock; struct { @@ -1846,6 +1851,32 @@ static int scheduler_suite_init(void) odp_pool_t pool; thread_args_t *args; odp_pool_param_t params; + uint64_t num_flows; + odp_schedule_capability_t sched_capa; + odp_schedule_config_t sched_config; + + if (odp_schedule_capability(&sched_capa)) { + printf("odp_schedule_capability() failed\n"); + return -1; + } + + num_flows = 0; + odp_schedule_config_init(&sched_config); + + /* Enable flow aware scheduling */ + if (sched_capa.max_flow_id > 0) { + num_flows = MAX_FLOWS; + if ((MAX_FLOWS - 1) > sched_capa.max_flow_id) + num_flows = sched_capa.max_flow_id + 1; + + sched_config.max_flow_id = num_flows - 1; + } + + /* Configure the scheduler. All test cases share the config. */ + if (odp_schedule_config(&sched_config)) { + printf("odp_schedule_config() failed.\n"); + return -1; + }
odp_pool_param_init(¶ms); params.buf.size = BUF_SIZE; @@ -1872,6 +1903,8 @@ static int scheduler_suite_init(void)
memset(globals, 0, sizeof(test_globals_t));
+ globals->num_flows = num_flows; + globals->num_workers = odp_cpumask_default_worker(&mask, 0); if (globals->num_workers > MAX_WORKERS) globals->num_workers = MAX_WORKERS; @@ -1975,6 +2008,127 @@ static int scheduler_suite_term(void) return 0; }
+static int check_flow_aware_support(void) +{ + if (globals->num_flows == 0) { + printf("\nTest: scheduler_test_flow_aware: SKIPPED\n"); + return ODP_TEST_INACTIVE; + } + + return ODP_TEST_ACTIVE; +} + +static void scheduler_test_flow_aware(void) +{ + odp_schedule_capability_t sched_capa; + odp_schedule_config_t sched_config; + odp_pool_param_t pool_param; + odp_pool_t pool; + odp_queue_param_t queue_param; + odp_queue_t queue, from; + uint32_t j, queue_size, num, num_flows, flow_id; + odp_buffer_t buf; + odp_event_t ev; + int i, ret; + uint32_t flow_stat[MAX_FLOWS]; + odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL, + ODP_SCHED_SYNC_ATOMIC, + ODP_SCHED_SYNC_ORDERED}; + + /* Test should be skipped when no flows */ + CU_ASSERT_FATAL(globals->num_flows); + CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0); + + num_flows = globals->num_flows; + + queue_size = FLOW_TEST_NUM_EV; + odp_schedule_config_init(&sched_config); + if (sched_config.queue_size && + queue_size > sched_config.queue_size) + queue_size = sched_config.queue_size; + + odp_pool_param_init(&pool_param); + pool_param.buf.size = 100; + pool_param.buf.align = 0; + pool_param.buf.num = FLOW_TEST_NUM_EV; + pool_param.type = ODP_POOL_BUFFER; + + pool = odp_pool_create("test_flow_aware", &pool_param); + + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + for (i = 0; i < 3; i++) { + memset(flow_stat, 0, sizeof(flow_stat)); + flow_id = 0; + + odp_queue_param_init(&queue_param); + queue_param.type = ODP_QUEUE_TYPE_SCHED; + queue_param.sched.prio = odp_schedule_default_prio(); + queue_param.sched.sync = sync[i]; + queue_param.sched.group = ODP_SCHED_GROUP_ALL; + queue_param.size = queue_size; + + queue = odp_queue_create("test_flow_aware", &queue_param); + + CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID); + + for (j = 0; j < queue_size; j++) { + buf = odp_buffer_alloc(pool); + CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); + + ev = odp_buffer_to_event(buf); + + odp_event_flow_id_set(ev, flow_id); + CU_ASSERT(odp_event_flow_id(ev) == flow_id); + + ret = odp_queue_enq(queue, ev); + CU_ASSERT(ret == 0); + + if (ret) { + odp_event_free(ev); + continue; + } + + flow_stat[flow_id]++; + + flow_id++; + if (flow_id == num_flows) + flow_id = 0; + } + + num = 0; + for (j = 0; j < 100 * FLOW_TEST_NUM_EV; j++) { + ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); + + if (ev == ODP_EVENT_INVALID) + continue; + + CU_ASSERT(from == queue); + + flow_id = odp_event_flow_id(ev); + flow_stat[flow_id]--; + + odp_event_free(ev); + num++; + } + + CU_ASSERT(num == queue_size); + + for (j = 0; j < num_flows; j++) { + CU_ASSERT(flow_stat[j] == 0); + if (flow_stat[j]) + printf("flow id %" PRIu32 ", missing %" PRIi32 + " events\n", j, flow_stat[j]); + } + + drain_queues(); + CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0); + } + + CU_ASSERT(odp_pool_destroy(pool) == 0); +} + +/* Default scheduler config */ odp_testinfo_t scheduler_suite[] = { ODP_TEST_INFO(scheduler_test_capa), ODP_TEST_INFO(scheduler_test_wait_time), @@ -1985,6 +2139,8 @@ odp_testinfo_t scheduler_suite[] = { ODP_TEST_INFO(scheduler_test_groups), ODP_TEST_INFO(scheduler_test_pause_resume), ODP_TEST_INFO(scheduler_test_ordered_lock), + ODP_TEST_INFO_CONDITIONAL(scheduler_test_flow_aware, + check_flow_aware_support), ODP_TEST_INFO(scheduler_test_parallel), ODP_TEST_INFO(scheduler_test_atomic), ODP_TEST_INFO(scheduler_test_ordered), @@ -2025,6 +2181,32 @@ odp_suiteinfo_t scheduler_suites[] = { ODP_SUITE_INFO_NULL, };
+static int global_init(odp_instance_t *inst) +{ + odp_init_t init_param; + odph_helper_options_t helper_options; + + if (odph_options(&helper_options)) { + fprintf(stderr, "error: odph_options() failed.\n"); + return -1; + } + + odp_init_param_init(&init_param); + init_param.mem_model = helper_options.mem_model; + + if (0 != odp_init_global(inst, &init_param, NULL)) { + fprintf(stderr, "error: odp_init_global() failed.\n"); + return -1; + } + + if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) { + fprintf(stderr, "error: odp_init_local() failed.\n"); + return -1; + } + + return 0; +} + int main(int argc, char *argv[]) { int ret; @@ -2033,6 +2215,7 @@ int main(int argc, char *argv[]) if (odp_cunit_parse_options(argc, argv)) return -1;
+ odp_cunit_register_global_init(global_init); ret = odp_cunit_register(scheduler_suites);
if (ret == 0)
commit 9c0ad641faeabbede48fd09b7c91f753186163bd Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Dec 18 12:56:10 2018 +0200
linux-gen: sched: check that config has been done
Check always on slow path functions that schedule config has been called. Fast path functions do the check only when debugging is enabled.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index 48f232e6..e5a9cfc7 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -597,7 +597,10 @@ static int schedule_init_queue(uint32_t queue_index, int i; int prio = prio_level_from_api(sched_param->prio);
- ODP_ASSERT(_odp_schedule_configured); + if (_odp_schedule_configured == 0) { + ODP_ERR("Scheduler has not been configured\n"); + return -1; + }
pri_set_queue(queue_index, prio); sched->queue[queue_index].grp = sched_param->group; diff --git a/platform/linux-generic/odp_schedule_if.c b/platform/linux-generic/odp_schedule_if.c index cb52f155..ba903e58 100644 --- a/platform/linux-generic/odp_schedule_if.c +++ b/platform/linux-generic/odp_schedule_if.c @@ -24,10 +24,7 @@ extern const schedule_api_t schedule_scalable_api;
const schedule_fn_t *sched_fn; const schedule_api_t *sched_api; - -#ifdef ODP_DEBUG int _odp_schedule_configured; -#endif
uint64_t odp_schedule_wait_time(uint64_t ns) { @@ -51,7 +48,10 @@ int odp_schedule_config(const odp_schedule_config_t *config) int ret; odp_schedule_config_t defconfig;
- ODP_ASSERT(!_odp_schedule_configured); + if (_odp_schedule_configured) { + ODP_ERR("Scheduler has been configured already\n"); + return -1; + }
if (!config) { odp_schedule_config_init(&defconfig); @@ -59,10 +59,9 @@ int odp_schedule_config(const odp_schedule_config_t *config) }
ret = sched_api->schedule_config(config); -#ifdef ODP_DEBUG + if (ret >= 0) _odp_schedule_configured = 1; -#endif
return ret; } diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c index eec88a60..e7b37895 100644 --- a/platform/linux-generic/odp_schedule_sp.c +++ b/platform/linux-generic/odp_schedule_sp.c @@ -375,10 +375,10 @@ static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param) odp_schedule_group_t group = sched_param->group; int prio = 0;
-#ifdef ODP_DEBUG - if (!_odp_schedule_configured) - ODP_ABORT("Scheduler not configured!\n"); -#endif + if (_odp_schedule_configured == 0) { + ODP_ERR("Scheduler has not been configured\n"); + return -1; + }
if (group < 0 || group >= NUM_GROUP) return -1;
commit d7913a845c72753758121111cc5da381198ba0f6 Author: Petri Savolainen petri.savolainen@linaro.org Date: Mon Dec 17 16:03:02 2018 +0200
api: sched: max_flow_id capability
Change max number of flows to max flow ID. This way implementation can utilize full 32 bits of flow ID space.
Also, note explicitly that odp_schedule_config() must be called only once.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Dmitry Eremin-Solenikov dmitry.ereminsolenikov@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/include/odp/api/spec/schedule.h b/include/odp/api/spec/schedule.h index 43292124..fa66f260 100644 --- a/include/odp/api/spec/schedule.h +++ b/include/odp/api/spec/schedule.h @@ -271,8 +271,8 @@ void odp_schedule_config_init(odp_schedule_config_t *config); * * Initialize and configure scheduler with global configuration options * to schedule events across different scheduled queues. - * This function must be called before scheduler is used (any other scheduler - * function is called except odp_schedule_capability() and + * This function must be called only once and before scheduler is used + * (any other scheduler function is called except odp_schedule_capability() and * odp_schedule_config_init()) or any queues are created (by application itself * or by other ODP modules). * An application can pass NULL value to use default configuration. It will diff --git a/include/odp/api/spec/schedule_types.h b/include/odp/api/spec/schedule_types.h index 3648c64e..2acec0db 100644 --- a/include/odp/api/spec/schedule_types.h +++ b/include/odp/api/spec/schedule_types.h @@ -196,12 +196,13 @@ typedef struct odp_schedule_capability_t { * events. */ uint32_t max_queue_size;
- /** Maximum supported flows per queue. - * Specifies the maximum number of flows per queue supported by the - * implementation. A value of 0 indicates flow aware mode is not - * supported. - */ - uint32_t max_flows; + /** Maximum flow ID per queue + * + * Valid flow ID range in flow aware mode of scheduling is from 0 to + * this maximum value. So, maximum number of flows per queue is this + * value plus one. A value of 0 indicates that flow aware mode is not + * supported. */ + uint32_t max_flow_id;
/** Lock-free (ODP_NONBLOCKING_LF) queues support. * The specification is the same as for the blocking implementation. */ @@ -230,21 +231,24 @@ typedef struct odp_schedule_config_t { */ uint32_t queue_size;
- /** Number of flows per queue to be supported. Scheduler enables flow - * aware mode when flow count is configured greater than 1 (up to - * 'max_flows' capability). + /** Maximum flow ID per queue * - * Flows are lightweight entities and events can be assigned to - * specific flows by the application using odp_event_flow_id_set() - * before enqueuing the event into the scheduler. This value is ignored - * unless scheduler supports flow aware mode. + * This value must not exceed 'max_flow_id' capability. Flow aware + * mode of scheduling is enabled when the value is greater than 0. + * The default value is 0. * - * This number should be less than maximum flow supported by the - * implementation. The default value is zero. + * Application can assign events to specific flows by calling + * odp_event_flow_id_set() before enqueuing events into a scheduled + * queue. When in flow aware mode, the event flow id value affects + * scheduling of the event and synchronization is maintained per flow + * within each queue. * - * @see odp_schedule_capability_t + * Depeding on implementation, there may be much more flows supported + * than queues, as flows are lightweight entities. + * + * @see odp_schedule_capability_t, odp_event_flow_id() */ - uint32_t num_flows; + uint32_t max_flow_id;
} odp_schedule_config_t;
-----------------------------------------------------------------------
Summary of changes: include/odp/api/spec/schedule.h | 4 +- include/odp/api/spec/schedule_types.h | 38 +++-- .../linux-generic/include/odp_buffer_internal.h | 20 +++ platform/linux-generic/odp_event.c | 11 +- platform/linux-generic/odp_schedule_basic.c | 6 +- platform/linux-generic/odp_schedule_if.c | 11 +- platform/linux-generic/odp_schedule_sp.c | 8 +- test/validation/api/scheduler/scheduler.c | 183 +++++++++++++++++++++ 8 files changed, 244 insertions(+), 37 deletions(-)
hooks/post-receive