This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via 8ab5804ea8bd430fd7cd0bd0edae316918b976d7 (commit) via 33bd749de8128afbb6b5bcad5ef6bea5a2667178 (commit) via 99b863c804fe38bd919d3b9c2f6dad5ddabf1aaa (commit) via 8d10e64603e67620084cb92648707550110d7bbe (commit) via 72634d86988e33ac38dd1ecda2cc4e8a156307ed (commit) via 341d3029f797ebb2bcf0fa6089c3463eea73e1cc (commit) via 69717806018f61af84645ff458e3dc69a13d747e (commit) from b78d235b8fe23b423bf6e7a3c65abbfb7efa4af8 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 8ab5804ea8bd430fd7cd0bd0edae316918b976d7 Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jun 9 15:56:03 2016 +0300
linux-gen: sched: SP scheduler implementation
This is initial implementation of strict priority scheduler. SP scheduler will not be optimized for throughput, but for low latency processing of high priority events. Implementation may have limited support on various scheduler features (e.g. ordered queues) and low number of priorities, groups, etc.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c index 9f1ef16..7162c7b 100644 --- a/platform/linux-generic/odp_schedule_sp.c +++ b/platform/linux-generic/odp_schedule_sp.c @@ -5,26 +5,173 @@ */
#include <string.h> +#include <odp/api/ticketlock.h> +#include <odp/api/thread.h> #include <odp/api/schedule.h> #include <odp_schedule_if.h> #include <odp_debug_internal.h> +#include <odp_align_internal.h> +#include <odp_config_internal.h> + +/* Dummy pool */ +#include <odp/api/pool.h> + +#define NUM_QUEUE ODP_CONFIG_QUEUES +#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES +#define NUM_PRIO 3 +#define NUM_STATIC_GROUP 3 +#define NUM_GROUP (NUM_STATIC_GROUP + 9) +#define NUM_PKTIN 32 +#define LOWEST_QUEUE_PRIO (NUM_PRIO - 2) +#define PKTIN_PRIO (NUM_PRIO - 1) +#define CMD_QUEUE 0 +#define CMD_PKTIO 1 +#define ROUNDUP_CACHE(x) ODP_CACHE_LINE_SIZE_ROUNDUP(x) +#define GROUP_ALL ODP_SCHED_GROUP_ALL +#define GROUP_WORKER ODP_SCHED_GROUP_WORKER +#define GROUP_CONTROL ODP_SCHED_GROUP_CONTROL + +struct sched_cmd_t; + +struct sched_cmd_s { + struct sched_cmd_t *next; + uint32_t index; + int type; + int prio; + int group; + int init; + int num_pktin; + int pktin_idx[NUM_PKTIN]; +}; + +typedef struct sched_cmd_t { + struct sched_cmd_s s; + uint8_t pad[ROUNDUP_CACHE(sizeof(struct sched_cmd_s)) - + sizeof(struct sched_cmd_s)]; +} sched_cmd_t ODP_ALIGNED_CACHE; + +struct prio_queue_s { + odp_ticketlock_t lock; + sched_cmd_t *head; + sched_cmd_t *tail; +}; + +typedef struct prio_queue_t { + struct prio_queue_s s; + uint8_t pad[ROUNDUP_CACHE(sizeof(struct prio_queue_s)) - + sizeof(struct prio_queue_s)]; +} prio_queue_t ODP_ALIGNED_CACHE; + +struct sched_group_s { + odp_ticketlock_t lock; + + struct { + char name[ODP_SCHED_GROUP_NAME_LEN + 1]; + odp_thrmask_t mask; + int allocated; + } group[NUM_GROUP]; +}; + +typedef struct sched_group_t { + struct sched_group_s s; + uint8_t pad[ROUNDUP_CACHE(sizeof(struct sched_group_s)) - + sizeof(struct sched_group_s)]; +} sched_group_t ODP_ALIGNED_CACHE; + +typedef struct { + sched_cmd_t queue_cmd[NUM_QUEUE]; + sched_cmd_t pktio_cmd[NUM_PKTIO]; + prio_queue_t prio_queue[NUM_PRIO]; + sched_group_t sched_group; +} sched_global_t; + +typedef struct { + sched_cmd_t *cmd; + int pause; + int thr_id; +} sched_local_t; + +static sched_global_t sched_global; +static __thread sched_local_t sched_local; + +/* Dummy pool */ +static odp_pool_t dummy_pool;
static int init_global(void) { - ODP_DBG("Schedule SP init ...\n"); + int i; + odp_pool_param_t params; + sched_group_t *sched_group = &sched_global.sched_group; + + ODP_DBG("Using SP scheduler\n"); + + memset(&sched_global, 0, sizeof(sched_global_t)); + + for (i = 0; i < NUM_QUEUE; i++) { + sched_global.queue_cmd[i].s.type = CMD_QUEUE; + sched_global.queue_cmd[i].s.index = i; + } + + for (i = 0; i < NUM_PKTIO; i++) { + sched_global.pktio_cmd[i].s.type = CMD_PKTIO; + sched_global.pktio_cmd[i].s.index = i; + sched_global.pktio_cmd[i].s.prio = PKTIN_PRIO; + } + + for (i = 0; i < NUM_PRIO; i++) + odp_ticketlock_init(&sched_global.prio_queue[i].s.lock);
- ODP_ABORT("Not implemented."); + odp_ticketlock_init(&sched_group->s.lock); + + strncpy(sched_group->s.group[GROUP_ALL].name, "__group_all", + ODP_SCHED_GROUP_NAME_LEN); + odp_thrmask_zero(&sched_group->s.group[GROUP_ALL].mask); + sched_group->s.group[GROUP_ALL].allocated = 1; + + strncpy(sched_group->s.group[GROUP_WORKER].name, "__group_worker", + ODP_SCHED_GROUP_NAME_LEN); + odp_thrmask_zero(&sched_group->s.group[GROUP_WORKER].mask); + sched_group->s.group[GROUP_WORKER].allocated = 1; + + strncpy(sched_group->s.group[GROUP_CONTROL].name, "__group_control", + ODP_SCHED_GROUP_NAME_LEN); + odp_thrmask_zero(&sched_group->s.group[GROUP_CONTROL].mask); + sched_group->s.group[GROUP_CONTROL].allocated = 1; + + /* REMOVE dummy pool after a bug has been fixed in pool or timer code. + * If scheduler does not create a pool, timer validation test fails !!! + */ + odp_pool_param_init(¶ms); + params.buf.size = 48; + params.buf.align = 0; + params.buf.num = NUM_QUEUE + NUM_PKTIO; + params.type = ODP_POOL_BUFFER; + dummy_pool = odp_pool_create("dummy_sched_pool", ¶ms);
return 0; }
static int init_local(void) { + memset(&sched_local, 0, sizeof(sched_local_t)); + sched_local.thr_id = odp_thread_id(); + return 0; }
static int term_global(void) { + int qi; + + for (qi = 0; qi < NUM_QUEUE; qi++) { + if (sched_global.queue_cmd[qi].s.init) { + /* todo: dequeue until empty ? */ + sched_cb_queue_destroy_finalize(qi); + } + } + + odp_pool_destroy(dummy_pool); + return 0; }
@@ -33,15 +180,516 @@ static int term_local(void) return 0; }
+static int thr_add(odp_schedule_group_t group, int thr) +{ + sched_group_t *sched_group = &sched_global.sched_group; + + if (group < 0 || group >= NUM_GROUP) + return -1; + + odp_ticketlock_lock(&sched_group->s.lock); + + if (!sched_group->s.group[group].allocated) { + odp_ticketlock_unlock(&sched_group->s.lock); + return -1; + } + + odp_thrmask_set(&sched_group->s.group[group].mask, thr); + + odp_ticketlock_unlock(&sched_group->s.lock); + + return 0; +} + +static int thr_rem(odp_schedule_group_t group, int thr) +{ + sched_group_t *sched_group = &sched_global.sched_group; + + if (group < 0 || group >= NUM_GROUP) + return -1; + + odp_ticketlock_lock(&sched_group->s.lock); + + if (!sched_group->s.group[group].allocated) { + odp_ticketlock_unlock(&sched_group->s.lock); + return -1; + } + + odp_thrmask_clr(&sched_group->s.group[group].mask, thr); + + odp_ticketlock_unlock(&sched_group->s.lock); + + return 0; +} + +static int num_grps(void) +{ + return NUM_GROUP - NUM_STATIC_GROUP; +} + +static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param) +{ + sched_group_t *sched_group = &sched_global.sched_group; + odp_schedule_group_t group = sched_param->group; + int prio = 0; + + if (group < 0 || group >= NUM_GROUP) + return -1; + + if (!sched_group->s.group[group].allocated) + return -1; + + if (sched_param->prio > 0) + prio = LOWEST_QUEUE_PRIO; + + sched_global.queue_cmd[qi].s.prio = prio; + sched_global.queue_cmd[qi].s.group = group; + sched_global.queue_cmd[qi].s.init = 1; + + return 0; +} + +static void destroy_queue(uint32_t qi) +{ + sched_global.queue_cmd[qi].s.prio = 0; + sched_global.queue_cmd[qi].s.group = 0; + sched_global.queue_cmd[qi].s.init = 0; +} + +static inline void add_tail(sched_cmd_t *cmd) +{ + prio_queue_t *prio_queue; + + prio_queue = &sched_global.prio_queue[cmd->s.prio]; + cmd->s.next = NULL; + + odp_ticketlock_lock(&prio_queue->s.lock); + + if (prio_queue->s.head == NULL) + prio_queue->s.head = cmd; + else + prio_queue->s.tail->s.next = cmd; + + prio_queue->s.tail = cmd; + + odp_ticketlock_unlock(&prio_queue->s.lock); +} + +static inline sched_cmd_t *rem_head(int prio) +{ + prio_queue_t *prio_queue; + sched_cmd_t *cmd; + + prio_queue = &sched_global.prio_queue[prio]; + + odp_ticketlock_lock(&prio_queue->s.lock); + + if (prio_queue->s.head == NULL) { + cmd = NULL; + } else { + sched_group_t *sched_group = &sched_global.sched_group; + + cmd = prio_queue->s.head; + + /* Remove head cmd only if thread belongs to the + * scheduler group. Otherwise continue to the next priority + * queue. */ + if (odp_thrmask_isset(&sched_group->s.group[cmd->s.group].mask, + sched_local.thr_id)) + prio_queue->s.head = cmd->s.next; + else + cmd = NULL; + } + + odp_ticketlock_unlock(&prio_queue->s.lock); + + return cmd; +} + +static int sched_queue(uint32_t qi) +{ + sched_cmd_t *cmd; + + cmd = &sched_global.queue_cmd[qi]; + add_tail(cmd); + + return 0; +} + +static int ord_enq(uint32_t queue_index, void *buf_hdr, int sustain, int *ret) +{ + (void)queue_index; + (void)buf_hdr; + (void)sustain; + (void)ret; + + /* didn't consume the events */ + return 0; +} + +static int ord_enq_multi(uint32_t queue_index, void *buf_hdr[], int num, + int sustain, int *ret) +{ + (void)queue_index; + (void)buf_hdr; + (void)num; + (void)sustain; + (void)ret; + + /* didn't consume the events */ + return 0; +} + +static void pktio_start(odp_pktio_t pktio, int num, int pktin_idx[]) +{ + int pi, i; + sched_cmd_t *cmd; + + ODP_DBG("pktio:%" PRIu64 ", %i pktin queues %i\n", + odp_pktio_to_u64(pktio), num, pktin_idx[0]); + + pi = odp_pktio_index(pktio); + cmd = &sched_global.pktio_cmd[pi]; + + if (num > NUM_PKTIN) + ODP_ABORT("Supports only %i pktin queues per interface\n", + NUM_PKTIN); + + for (i = 0; i < num; i++) + cmd->s.pktin_idx[i] = pktin_idx[i]; + + cmd->s.num_pktin = num; + + add_tail(cmd); +} + +static inline sched_cmd_t *sched_cmd(int num_prio) +{ + int prio; + + for (prio = 0; prio < num_prio; prio++) { + sched_cmd_t *cmd = rem_head(prio); + + if (cmd) + return cmd; + } + + return NULL; +} + +static uint64_t schedule_wait_time(uint64_t ns) +{ + return ns; +} + +static int schedule_multi(odp_queue_t *from, uint64_t wait, + odp_event_t events[], int max_events) +{ + (void)max_events; + int update_t1 = 1; + + if (sched_local.cmd) { + /* Continue scheduling if queue is not empty */ + if (sched_cb_queue_empty(sched_local.cmd->s.index) == 0) + add_tail(sched_local.cmd); + + sched_local.cmd = NULL; + } + + if (odp_unlikely(sched_local.pause)) + return 0; + + while (1) { + sched_cmd_t *cmd; + uint32_t qi; + int num; + odp_time_t t1; + + cmd = sched_cmd(NUM_PRIO); + + if (cmd && cmd->s.type == CMD_PKTIO) { + sched_cb_pktin_poll(cmd->s.index, cmd->s.num_pktin, + cmd->s.pktin_idx); + + add_tail(cmd); + /* run wait parameter checks under */ + cmd = NULL; + } + + if (cmd == NULL) { + /* All priority queues are empty */ + if (wait == ODP_SCHED_NO_WAIT) + return 0; + + if (wait == ODP_SCHED_WAIT) + continue; + + if (update_t1) { + t1 = odp_time_sum(odp_time_local(), + odp_time_local_from_ns(wait)); + update_t1 = 0; + continue; + } + + if (odp_time_cmp(odp_time_local(), t1) < 0) + continue; + + return 0; + } + + qi = cmd->s.index; + num = sched_cb_queue_deq_multi(qi, events, 1); + + if (num > 0) { + sched_local.cmd = cmd; + + if (from) + *from = sched_cb_queue_handle(qi); + + return num; + } + + if (num < 0) { + /* Destroyed queue */ + sched_cb_queue_destroy_finalize(qi); + continue; + } + + if (num == 0) { + /* Remove empty queue from scheduling. A dequeue + * operation to on an already empty queue moves + * it to NOTSCHED state and sched_queue() will + * be called on next enqueue. */ + continue; + } + } +} + +static odp_event_t schedule(odp_queue_t *from, uint64_t wait) +{ + odp_event_t ev; + + if (schedule_multi(from, wait, &ev, 1) > 0) + return ev; + + return ODP_EVENT_INVALID; +} + +static void schedule_pause(void) +{ + sched_local.pause = 1; +} + +static void schedule_resume(void) +{ + sched_local.pause = 0; +} + +static void schedule_release_atomic(void) +{ +} + +static void schedule_release_ordered(void) +{ +} + +static void schedule_prefetch(int num) +{ + (void)num; +} + +static int schedule_num_prio(void) +{ + /* Lowest priority is used for pktin polling and is internal + * to the scheduler */ + return NUM_PRIO - 1; +} + +static odp_schedule_group_t schedule_group_create(const char *name, + const odp_thrmask_t *thrmask) +{ + odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID; + sched_group_t *sched_group = &sched_global.sched_group; + int i; + + odp_ticketlock_lock(&sched_group->s.lock); + + for (i = NUM_STATIC_GROUP; i < NUM_GROUP; i++) { + if (!sched_group->s.group[i].allocated) { + strncpy(sched_group->s.group[i].name, name, + ODP_SCHED_GROUP_NAME_LEN); + odp_thrmask_copy(&sched_group->s.group[i].mask, + thrmask); + sched_group->s.group[i].allocated = 1; + group = i; + break; + } + } + + odp_ticketlock_unlock(&sched_group->s.lock); + + return group; +} + +static int schedule_group_destroy(odp_schedule_group_t group) +{ + sched_group_t *sched_group = &sched_global.sched_group; + + if (group < NUM_STATIC_GROUP || group >= NUM_GROUP) + return -1; + + odp_ticketlock_lock(&sched_group->s.lock); + + if (!sched_group->s.group[group].allocated) { + odp_ticketlock_unlock(&sched_group->s.lock); + return -1; + } + + memset(&sched_group->s.group[group], 0, + sizeof(sched_group->s.group[0])); + + odp_ticketlock_unlock(&sched_group->s.lock); + + return 0; +} + +static odp_schedule_group_t schedule_group_lookup(const char *name) +{ + odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID; + sched_group_t *sched_group = &sched_global.sched_group; + int i; + + odp_ticketlock_lock(&sched_group->s.lock); + + for (i = NUM_STATIC_GROUP; i < NUM_GROUP; i++) { + if (sched_group->s.group[i].allocated && + strcmp(sched_group->s.group[i].name, name) == 0) { + group = i; + break; + } + } + + odp_ticketlock_unlock(&sched_group->s.lock); + return group; +} + +static int schedule_group_join(odp_schedule_group_t group, + const odp_thrmask_t *thrmask) +{ + sched_group_t *sched_group = &sched_global.sched_group; + + if (group < 0 || group >= NUM_GROUP) + return -1; + + odp_ticketlock_lock(&sched_group->s.lock); + + if (!sched_group->s.group[group].allocated) { + odp_ticketlock_unlock(&sched_group->s.lock); + return -1; + } + + odp_thrmask_or(&sched_group->s.group[group].mask, + &sched_group->s.group[group].mask, + thrmask); + + odp_ticketlock_unlock(&sched_group->s.lock); + + return 0; +} + +static int schedule_group_leave(odp_schedule_group_t group, + const odp_thrmask_t *thrmask) +{ + sched_group_t *sched_group = &sched_global.sched_group; + odp_thrmask_t *all = &sched_group->s.group[GROUP_ALL].mask; + odp_thrmask_t not; + + if (group < 0 || group >= NUM_GROUP) + return -1; + + odp_ticketlock_lock(&sched_group->s.lock); + + if (!sched_group->s.group[group].allocated) { + odp_ticketlock_unlock(&sched_group->s.lock); + return -1; + } + + odp_thrmask_xor(¬, thrmask, all); + odp_thrmask_and(&sched_group->s.group[group].mask, + &sched_group->s.group[group].mask, + ¬); + + odp_ticketlock_unlock(&sched_group->s.lock); + + return 0; +} + +static int schedule_group_thrmask(odp_schedule_group_t group, + odp_thrmask_t *thrmask) +{ + sched_group_t *sched_group = &sched_global.sched_group; + + if (group < 0 || group >= NUM_GROUP) + return -1; + + odp_ticketlock_lock(&sched_group->s.lock); + + if (!sched_group->s.group[group].allocated) { + odp_ticketlock_unlock(&sched_group->s.lock); + return -1; + } + + *thrmask = sched_group->s.group[group].mask; + + odp_ticketlock_unlock(&sched_group->s.lock); + + return 0; +} + +static int schedule_group_info(odp_schedule_group_t group, + odp_schedule_group_info_t *info) +{ + sched_group_t *sched_group = &sched_global.sched_group; + + if (group < 0 || group >= NUM_GROUP) + return -1; + + odp_ticketlock_lock(&sched_group->s.lock); + + if (!sched_group->s.group[group].allocated) { + odp_ticketlock_unlock(&sched_group->s.lock); + return -1; + } + + info->name = sched_group->s.group[group].name; + info->thrmask = sched_group->s.group[group].mask; + + odp_ticketlock_unlock(&sched_group->s.lock); + + return 0; +} + +static void schedule_order_lock(unsigned lock_index) +{ + (void)lock_index; +} + +static void schedule_order_unlock(unsigned lock_index) +{ + (void)lock_index; +} + /* Fill in scheduler interface */ const schedule_fn_t schedule_sp_fn = { - .pktio_start = NULL, - .thr_add = NULL, - .thr_rem = NULL, - .num_grps = NULL, - .init_queue = NULL, - .destroy_queue = NULL, - .sched_queue = NULL, + .pktio_start = pktio_start, + .thr_add = thr_add, + .thr_rem = thr_rem, + .num_grps = num_grps, + .init_queue = init_queue, + .destroy_queue = destroy_queue, + .sched_queue = sched_queue, + .ord_enq = ord_enq, + .ord_enq_multi = ord_enq_multi, .init_global = init_global, .term_global = term_global, .init_local = init_local, @@ -50,22 +698,22 @@ const schedule_fn_t schedule_sp_fn = {
/* Fill in scheduler API calls */ const schedule_api_t schedule_sp_api = { - .schedule_wait_time = NULL, - .schedule = NULL, - .schedule_multi = NULL, - .schedule_pause = NULL, - .schedule_resume = NULL, - .schedule_release_atomic = NULL, - .schedule_release_ordered = NULL, - .schedule_prefetch = NULL, - .schedule_num_prio = NULL, - .schedule_group_create = NULL, - .schedule_group_destroy = NULL, - .schedule_group_lookup = NULL, - .schedule_group_join = NULL, - .schedule_group_leave = NULL, - .schedule_group_thrmask = NULL, - .schedule_group_info = NULL, - .schedule_order_lock = NULL, - .schedule_order_unlock = NULL + .schedule_wait_time = schedule_wait_time, + .schedule = schedule, + .schedule_multi = schedule_multi, + .schedule_pause = schedule_pause, + .schedule_resume = schedule_resume, + .schedule_release_atomic = schedule_release_atomic, + .schedule_release_ordered = schedule_release_ordered, + .schedule_prefetch = schedule_prefetch, + .schedule_num_prio = schedule_num_prio, + .schedule_group_create = schedule_group_create, + .schedule_group_destroy = schedule_group_destroy, + .schedule_group_lookup = schedule_group_lookup, + .schedule_group_join = schedule_group_join, + .schedule_group_leave = schedule_group_leave, + .schedule_group_thrmask = schedule_group_thrmask, + .schedule_group_info = schedule_group_info, + .schedule_order_lock = schedule_order_lock, + .schedule_order_unlock = schedule_order_unlock };
commit 33bd749de8128afbb6b5bcad5ef6bea5a2667178 Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jun 9 15:56:02 2016 +0300
vald: sched: bug corrections
Corrected bugs found during SP scheduler development. Scheduler context (pause-resume) handling and drainging is now consistent over all tests cases. Previously, some test cases returned with scheduler context locked (didn't pause and drain before exiting the loop). Added ASSERT_FATALs to limit error propagation, when test application could not continue correct operation.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/scheduler/scheduler.c b/test/validation/scheduler/scheduler.c index ca04591..316370d 100644 --- a/test/validation/scheduler/scheduler.c +++ b/test/validation/scheduler/scheduler.c @@ -10,19 +10,19 @@
#define MAX_WORKERS_THREADS 32 #define MAX_ORDERED_LOCKS 2 -#define MSG_POOL_SIZE (4 * 1024 * 1024) +#define MSG_POOL_SIZE (64 * 1024) #define QUEUES_PER_PRIO 16 #define BUF_SIZE 64 -#define TEST_NUM_BUFS 100 +#define BUFS_PER_QUEUE 100 +#define BUFS_PER_QUEUE_EXCL 10000 #define BURST_BUF_SIZE 4 -#define NUM_BUFS_EXCL 10000 #define NUM_BUFS_PAUSE 1000 #define NUM_BUFS_BEFORE_PAUSE 10 +#define NUM_GROUPS 2
#define GLOBALS_SHM_NAME "test_globals" #define MSG_POOL_NAME "msg_pool" #define QUEUE_CTX_POOL_NAME "queue_ctx_pool" -#define SHM_MSG_POOL_NAME "shm_msg_pool" #define SHM_THR_ARGS_NAME "shm_thr_args"
#define ONE_Q 1 @@ -47,7 +47,6 @@ #define CHAOS_DEBUG (CHAOS_NUM_ROUNDS < 1000) #define CHAOS_PTR_TO_NDX(p) ((uint64_t)(uint32_t)(uintptr_t)p) #define CHAOS_NDX_TO_PTR(n) ((void *)(uintptr_t)n) -#define CHAOS_WAIT_FAIL (5 * ODP_TIME_SEC_IN_NS)
#define ODP_WAIT_TOLERANCE (60 * ODP_TIME_MSEC_IN_NS)
@@ -63,7 +62,6 @@ typedef struct { odp_queue_t handle; char name[ODP_QUEUE_NAME_LEN]; } chaos_q[CHAOS_NUM_QUEUES]; - odp_atomic_u32_t chaos_pending_event_count; } test_globals_t;
typedef struct { @@ -99,6 +97,20 @@ typedef struct { odp_pool_t pool; odp_pool_t queue_ctx_pool;
+static int drain_queues(void) +{ + odp_event_t ev; + uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS); + int ret = 0; + + while ((ev = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID) { + odp_event_free(ev); + ret++; + } + + return ret; +} + static int exit_schedule_loop(void) { odp_event_t ev; @@ -112,6 +124,8 @@ static int exit_schedule_loop(void) ret++; }
+ odp_schedule_resume(); + return ret; }
@@ -249,8 +263,7 @@ void scheduler_test_groups(void) { odp_pool_t p; odp_pool_param_t params; - odp_queue_param_t qp; - odp_queue_t queue_grp1, queue_grp2, from; + odp_queue_t queue_grp1, queue_grp2; odp_buffer_t buf; odp_event_t ev; uint32_t *u32; @@ -339,7 +352,6 @@ void scheduler_test_groups(void) CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));
/* Now verify scheduler adherence to groups */ - odp_queue_param_init(&qp); odp_pool_param_init(¶ms); params.buf.size = 100; params.buf.align = 0; @@ -351,6 +363,13 @@ void scheduler_test_groups(void) CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
for (i = 0; i < 3; i++) { + odp_queue_param_t qp; + odp_queue_t queue, from; + odp_schedule_group_t mygrp[NUM_GROUPS]; + odp_queue_t queue_grp[NUM_GROUPS]; + int num = NUM_GROUPS; + + odp_queue_param_init(&qp); qp.type = ODP_QUEUE_TYPE_SCHED; qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; qp.sched.sync = sync[i]; @@ -369,7 +388,9 @@ void scheduler_test_groups(void) u32[0] = MAGIC1;
ev = odp_buffer_to_event(buf); - if (!(CU_ASSERT(odp_queue_enq(queue_grp1, ev) == 0))) + rc = odp_queue_enq(queue_grp1, ev); + CU_ASSERT(rc == 0); + if (rc) odp_buffer_free(buf);
/* Now create and populate a queue in group 2 */ @@ -385,58 +406,70 @@ void scheduler_test_groups(void) u32[0] = MAGIC2;
ev = odp_buffer_to_event(buf); - if (!(CU_ASSERT(odp_queue_enq(queue_grp2, ev) == 0))) + rc = odp_queue_enq(queue_grp2, ev); + CU_ASSERT(rc == 0); + if (rc) odp_buffer_free(buf);
- /* Scheduler should give us the event from Group 2 */ - ev = odp_schedule(&from, ODP_SCHED_WAIT); - CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); - CU_ASSERT_FATAL(from == queue_grp2); - - buf = odp_buffer_from_event(ev); - u32 = odp_buffer_addr(buf); - - CU_ASSERT_FATAL(u32[0] == MAGIC2); - - odp_buffer_free(buf); - - /* Scheduler should not return anything now since we're - * not in Group 1 and Queue 2 is empty. Do this several - * times to confirm. - */ - - for (j = 0; j < 10; j++) { - ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); - CU_ASSERT_FATAL(ev == ODP_EVENT_INVALID) - } - - /* Now join group 1 and verify we can get the event */ - rc = odp_schedule_group_join(mygrp1, &mymask); - CU_ASSERT_FATAL(rc == 0); + /* Swap between two groups. Application should serve both + * groups to avoid potential head of line blocking in + * scheduler. */ + mygrp[0] = mygrp1; + mygrp[1] = mygrp2; + queue_grp[0] = queue_grp1; + queue_grp[1] = queue_grp2; + j = 0; + + /* Ensure that each test run starts from mygrp1 */ + odp_schedule_group_leave(mygrp1, &mymask); + odp_schedule_group_leave(mygrp2, &mymask); + odp_schedule_group_join(mygrp1, &mymask); + + while (num) { + queue = queue_grp[j]; + ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); + + if (ev == ODP_EVENT_INVALID) { + /* change group */ + rc = odp_schedule_group_leave(mygrp[j], + &mymask); + CU_ASSERT_FATAL(rc == 0); + + j = (j + 1) % NUM_GROUPS; + rc = odp_schedule_group_join(mygrp[j], + &mymask); + CU_ASSERT_FATAL(rc == 0); + continue; + }
- /* Tell scheduler we're about to request an event. - * Not needed, but a convenient place to test this API. - */ - odp_schedule_prefetch(1); + CU_ASSERT_FATAL(from == queue);
- /* Now get the event from Queue 1 */ - ev = odp_schedule(&from, ODP_SCHED_WAIT); - CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); - CU_ASSERT_FATAL(from == queue_grp1); + buf = odp_buffer_from_event(ev); + u32 = odp_buffer_addr(buf);
- buf = odp_buffer_from_event(ev); - u32 = odp_buffer_addr(buf); + if (from == queue_grp1) { + /* CU_ASSERT_FATAL needs these brackets */ + CU_ASSERT_FATAL(u32[0] == MAGIC1); + } else { + CU_ASSERT_FATAL(u32[0] == MAGIC2); + }
- CU_ASSERT_FATAL(u32[0] == MAGIC1); + odp_buffer_free(buf);
- odp_buffer_free(buf); + /* Tell scheduler we're about to request an event. + * Not needed, but a convenient place to test this API. + */ + odp_schedule_prefetch(1);
- /* Leave group 1 for next pass */ - rc = odp_schedule_group_leave(mygrp1, &mymask); - CU_ASSERT_FATAL(rc == 0); + num--; + }
- /* We must release order before destroying queues */ - odp_schedule_release_ordered(); + /* Release schduler context and leave groups */ + odp_schedule_group_join(mygrp1, &mymask); + odp_schedule_group_join(mygrp2, &mymask); + CU_ASSERT(exit_schedule_loop() == 0); + odp_schedule_group_leave(mygrp1, &mymask); + odp_schedule_group_leave(mygrp2, &mymask);
/* Done with queues for this round */ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0); @@ -474,10 +507,12 @@ static int chaos_thread(void *arg) start_time = odp_time_local();
/* Run the test */ - wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL); - for (i = 0; i < CHAOS_NUM_ROUNDS * CHAOS_NUM_EVENTS; i++) { + wait = odp_schedule_wait_time(5 * ODP_TIME_MSEC_IN_NS); + for (i = 0; i < CHAOS_NUM_ROUNDS; i++) { ev = odp_schedule(&from, wait); - CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); + if (ev == ODP_EVENT_INVALID) + continue; + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); CU_ASSERT_FATAL(cbuf != NULL); if (CHAOS_DEBUG) @@ -495,33 +530,14 @@ static int chaos_thread(void *arg) globals-> chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle, ev); - CU_ASSERT(rc == 0); + CU_ASSERT_FATAL(rc == 0); }
if (CHAOS_DEBUG) printf("Thread %d completed %d rounds...terminating\n", odp_thread_id(), CHAOS_NUM_EVENTS);
- /* Thread complete--drain locally cached scheduled events */ - odp_schedule_pause(); - - while (odp_atomic_load_u32(&globals->chaos_pending_event_count) > 0) { - ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); - if (ev == ODP_EVENT_INVALID) - break; - odp_atomic_dec_u32(&globals->chaos_pending_event_count); - cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); - if (CHAOS_DEBUG) - printf("Thread %d drained event %" PRIu64 - " seq %" PRIu64 - " from Q %s\n", - odp_thread_id(), cbuf->evno, cbuf->seqno, - globals-> - chaos_q - [CHAOS_PTR_TO_NDX(odp_queue_context(from))]. - name); - odp_event_free(ev); - } + exit_schedule_loop();
end_time = odp_time_local(); diff = odp_time_diff(end_time, start_time); @@ -529,7 +545,7 @@ static int chaos_thread(void *arg) printf("Thread %d ends, elapsed time = %" PRIu64 "us\n", odp_thread_id(), odp_time_to_ns(diff) / 1000);
- return CU_get_number_of_failures(); + return 0; }
static void chaos_run(unsigned int qtype) @@ -539,24 +555,21 @@ static void chaos_run(unsigned int qtype) odp_queue_param_t qp; odp_buffer_t buf; chaos_buf *cbuf; - odp_event_t ev; test_globals_t *globals; thread_args_t *args; odp_shm_t shm; - odp_queue_t from; int i, rc; - uint64_t wait; odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL, ODP_SCHED_SYNC_ATOMIC, ODP_SCHED_SYNC_ORDERED}; - const unsigned num_sync = (sizeof(sync) / sizeof(sync[0])); + const unsigned num_sync = (sizeof(sync) / sizeof(odp_schedule_sync_t)); const char *const qtypes[] = {"parallel", "atomic", "ordered"};
/* Set up the scheduling environment */ shm = odp_shm_lookup(GLOBALS_SHM_NAME); CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); globals = odp_shm_addr(shm); - CU_ASSERT_PTR_NOT_NULL_FATAL(shm); + CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
shm = odp_shm_lookup(SHM_THR_ARGS_NAME); CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); @@ -580,7 +593,7 @@ static void chaos_run(unsigned int qtype) qp.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < CHAOS_NUM_QUEUES; i++) { - uint32_t ndx = qtype == num_sync ? i % num_sync : qtype; + uint32_t ndx = (qtype == num_sync ? i % num_sync : qtype);
qp.sched.sync = sync[ndx]; snprintf(globals->chaos_q[i].name, @@ -598,8 +611,6 @@ static void chaos_run(unsigned int qtype) }
/* Now populate the queues with the initial seed elements */ - odp_atomic_init_u32(&globals->chaos_pending_event_count, 0); - for (i = 0; i < CHAOS_NUM_EVENTS; i++) { buf = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); @@ -610,7 +621,6 @@ static void chaos_run(unsigned int qtype) globals->chaos_q[i % CHAOS_NUM_QUEUES].handle, odp_buffer_to_event(buf)); CU_ASSERT_FATAL(rc == 0); - odp_atomic_inc_u32(&globals->chaos_pending_event_count); }
/* Run the test */ @@ -621,26 +631,8 @@ static void chaos_run(unsigned int qtype) printf("Thread %d returning from chaos threads..cleaning up\n", odp_thread_id());
- /* Cleanup: Drain queues, free events */ - wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL); - while (odp_atomic_fetch_dec_u32( - &globals->chaos_pending_event_count) > 0) { - ev = odp_schedule(&from, wait); - CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); - cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); - if (CHAOS_DEBUG) - printf("Draining event %" PRIu64 - " seq %" PRIu64 " from Q %s...\n", - cbuf->evno, - cbuf->seqno, - globals-> - chaos_q - [CHAOS_PTR_TO_NDX(odp_queue_context(from))]. - name); - odp_event_free(ev); - } - - odp_schedule_release_ordered(); + drain_queues(); + exit_schedule_loop();
for (i = 0; i < CHAOS_NUM_QUEUES; i++) { if (CHAOS_DEBUG) @@ -683,6 +675,7 @@ static int schedule_common_(void *arg) buf_contents *bctx, *bctx_cpy; odp_pool_t pool; int locked; + int num;
globals = args->globals; sync = args->sync; @@ -697,7 +690,7 @@ static int schedule_common_(void *arg) odp_event_t ev; odp_buffer_t buf, buf_cpy; odp_queue_t from = ODP_QUEUE_INVALID; - int num = 0; + num = 0;
odp_ticketlock_lock(&globals->lock); if (globals->buf_count == 0) { @@ -711,6 +704,7 @@ static int schedule_common_(void *arg) ev_cpy[BURST_BUF_SIZE]; odp_buffer_t buf_cpy[BURST_BUF_SIZE]; int j; + num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT, events, BURST_BUF_SIZE); CU_ASSERT(num >= 0); @@ -764,9 +758,10 @@ static int schedule_common_(void *arg) odp_event_free(events[j]); } else { ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); - buf = odp_buffer_from_event(ev); - if (buf == ODP_BUFFER_INVALID) + if (ev == ODP_EVENT_INVALID) continue; + + buf = odp_buffer_from_event(ev); num = 1; if (sync == ODP_SCHED_SYNC_ORDERED) { int ndx; @@ -803,7 +798,7 @@ static int schedule_common_(void *arg)
if (args->enable_excl_atomic) { locked = odp_spinlock_trylock(&globals->atomic_lock); - CU_ASSERT(locked == 1); + CU_ASSERT(locked != 0); CU_ASSERT(from != ODP_QUEUE_INVALID); if (locked) { int cnt; @@ -824,6 +819,7 @@ static int schedule_common_(void *arg) odp_schedule_release_ordered();
odp_ticketlock_lock(&globals->lock); + globals->buf_count -= num;
if (globals->buf_count < 0) { @@ -885,7 +881,15 @@ static int schedule_common_(void *arg) if (locked) odp_ticketlock_unlock(&globals->lock);
- return CU_get_number_of_failures(); + /* Clear scheduler atomic / ordered context between tests */ + num = exit_schedule_loop(); + + CU_ASSERT(num == 0); + + if (num) + printf("\nDROPPED %i events\n\n", num); + + return 0; }
static void fill_queues(thread_args_t *args) @@ -897,6 +901,7 @@ static void fill_queues(thread_args_t *args) int buf_count = 0; test_globals_t *globals; char name[32]; + int ret;
globals = args->globals; sync = args->sync; @@ -924,7 +929,7 @@ static void fill_queues(thread_args_t *args) "sched_%d_%d_o", i, j); break; default: - CU_ASSERT(0); + CU_ASSERT_FATAL(0); break; }
@@ -944,7 +949,11 @@ static void fill_queues(thread_args_t *args) odp_buffer_addr(buf); bctx->sequence = qctx->sequence++; } - if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) + + ret = odp_queue_enq(queue, ev); + CU_ASSERT_FATAL(ret == 0); + + if (ret) odp_buffer_free(buf); else buf_count++; @@ -1000,17 +1009,16 @@ static void schedule_common(odp_schedule_sync_t sync, int num_queues, globals = odp_shm_addr(shm); CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+ memset(&args, 0, sizeof(thread_args_t)); args.globals = globals; args.sync = sync; args.num_queues = num_queues; args.num_prio = num_prio; - args.num_bufs = TEST_NUM_BUFS; + args.num_bufs = BUFS_PER_QUEUE; args.num_workers = 1; args.enable_schd_multi = enable_schd_multi; args.enable_excl_atomic = 0; /* Not needed with a single CPU */
- /* resume scheduling in case it was paused */ - odp_schedule_resume(); fill_queues(&args);
schedule_common_(&args); @@ -1041,16 +1049,13 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues, args->num_queues = num_queues; args->num_prio = num_prio; if (enable_excl_atomic) - args->num_bufs = NUM_BUFS_EXCL; + args->num_bufs = BUFS_PER_QUEUE_EXCL; else - args->num_bufs = TEST_NUM_BUFS; + args->num_bufs = BUFS_PER_QUEUE; args->num_workers = globals->num_workers; args->enable_schd_multi = enable_schd_multi; args->enable_excl_atomic = enable_excl_atomic;
- /* disable receive events for main thread */ - exit_schedule_loop(); - fill_queues(args);
/* Create and launch worker threads */ @@ -1262,12 +1267,10 @@ void scheduler_test_pause_resume(void) odp_queue_t from; int i; int local_bufs = 0; - - /* resume scheduling in case it was paused */ - odp_schedule_resume(); + int ret;
queue = odp_queue_lookup("sched_0_0_n"); - CU_ASSERT(queue != ODP_QUEUE_INVALID); + CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
pool = odp_pool_lookup(MSG_POOL_NAME); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); @@ -1276,13 +1279,17 @@ void scheduler_test_pause_resume(void) buf = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); - if (odp_queue_enq(queue, ev)) + ret = odp_queue_enq(queue, ev); + CU_ASSERT(ret == 0); + + if (ret) odp_buffer_free(buf); }
for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) { from = ODP_QUEUE_INVALID; ev = odp_schedule(&from, ODP_SCHED_WAIT); + CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); CU_ASSERT(from == queue); buf = odp_buffer_from_event(ev); odp_buffer_free(buf); @@ -1312,9 +1319,9 @@ void scheduler_test_pause_resume(void) odp_buffer_free(buf); }
- CU_ASSERT(exit_schedule_loop() == 0); + ret = exit_schedule_loop();
- odp_schedule_resume(); + CU_ASSERT(ret == 0); }
static int create_queues(void) @@ -1466,7 +1473,7 @@ int scheduler_suite_init(void) odp_pool_param_init(¶ms); params.buf.size = BUF_SIZE; params.buf.align = 0; - params.buf.num = MSG_POOL_SIZE / BUF_SIZE; + params.buf.num = MSG_POOL_SIZE; params.type = ODP_POOL_BUFFER;
pool = odp_pool_create(MSG_POOL_NAME, ¶ms);
commit 99b863c804fe38bd919d3b9c2f6dad5ddabf1aaa Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jun 9 15:56:01 2016 +0300
vald: timer: add missing pool param init calls
Added missing pool parameter init calls.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/timer/timer.c b/test/validation/timer/timer.c index ef31eba..33eb478 100644 --- a/test/validation/timer/timer.c +++ b/test/validation/timer/timer.c @@ -60,14 +60,15 @@ void timer_test_timeout_pool_alloc(void) odp_event_t ev; int index; char wrong_type = 0; - odp_pool_param_t params = { - .tmo = { - .num = num, - }, - .type = ODP_POOL_TIMEOUT, - }; + odp_pool_param_t params; + + odp_pool_param_init(¶ms); + params.type = ODP_POOL_TIMEOUT; + params.tmo.num = num;
pool = odp_pool_create("timeout_pool_alloc", ¶ms); + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + odp_pool_print(pool);
/* Try to allocate num items from the pool */ @@ -100,14 +101,14 @@ void timer_test_timeout_pool_free(void) { odp_pool_t pool; odp_timeout_t tmo; - odp_pool_param_t params = { - .tmo = { - .num = 1, - }, - .type = ODP_POOL_TIMEOUT, - }; + odp_pool_param_t params; + + odp_pool_param_init(¶ms); + params.type = ODP_POOL_TIMEOUT; + params.tmo.num = 1;
pool = odp_pool_create("timeout_pool_free", ¶ms); + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); odp_pool_print(pool);
/* Allocate the only timeout from the pool */ @@ -140,8 +141,10 @@ void timer_test_odp_timer_cancel(void) odp_timer_set_t rc; uint64_t tick;
- params.tmo.num = 1; + odp_pool_param_init(¶ms); params.type = ODP_POOL_TIMEOUT; + params.tmo.num = 1; + pool = odp_pool_create("tmo_pool_for_cancel", ¶ms);
if (pool == ODP_POOL_INVALID) @@ -476,8 +479,10 @@ void timer_test_odp_timer_all(void) num_workers = 1;
/* Create timeout pools */ - params.tmo.num = (NTIMERS + 1) * num_workers; + odp_pool_param_init(¶ms); params.type = ODP_POOL_TIMEOUT; + params.tmo.num = (NTIMERS + 1) * num_workers; + tbp = odp_pool_create("tmo_pool", ¶ms); if (tbp == ODP_POOL_INVALID) CU_FAIL_FATAL("Timeout pool create failed");
commit 8d10e64603e67620084cb92648707550110d7bbe Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jun 9 15:56:00 2016 +0300
linux-gen: pktio: added debug prints
Added couple of debug prints for easier pktio debug.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c index 416a361..8d88aa3 100644 --- a/platform/linux-generic/odp_packet_io.c +++ b/platform/linux-generic/odp_packet_io.c @@ -677,7 +677,6 @@ int sched_cb_pktin_poll(int pktio_index, int num_queue, int index[]) odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX]; int num, idx; pktio_entry_t *entry; - entry = pktio_entry_by_index(pktio_index);
if (odp_unlikely(is_free(entry))) { @@ -689,8 +688,10 @@ int sched_cb_pktin_poll(int pktio_index, int num_queue, int index[]) if (odp_unlikely(entry->s.num_in_queue == 0)) return -1;
- if (entry->s.state != STATE_STARTED) + if (entry->s.state != STATE_STARTED) { + ODP_DBG("interface not started\n"); return 0; + }
for (idx = 0; idx < num_queue; idx++) { queue_entry_t *qentry; diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c index 75f6a0a..effad9a 100644 --- a/platform/linux-generic/pktio/loop.c +++ b/platform/linux-generic/pktio/loop.c @@ -151,9 +151,13 @@ static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
qentry = queue_to_qentry(pktio_entry->s.pkt_loop.loopq); ret = queue_enq_multi(qentry, hdr_tbl, len, 0); + if (ret > 0) { pktio_entry->s.stats.out_ucast_pkts += ret; pktio_entry->s.stats.out_octets += bytes; + } else { + ODP_DBG("queue enqueue failed %i\n", ret); + return -1; }
odp_ticketlock_unlock(&pktio_entry->s.txl);
commit 72634d86988e33ac38dd1ecda2cc4e8a156307ed Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jun 9 15:55:59 2016 +0300
linux-gen: sched: add sched_cb_queue_empty call
Added a new scheduler interface function, which may be used to control empty queue scheduling. SP scheduler uses this to ensure that empty queues are not stored into scheduler queues.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h index 0d82d84..86a117f 100644 --- a/platform/linux-generic/include/odp_schedule_if.h +++ b/platform/linux-generic/include/odp_schedule_if.h @@ -71,6 +71,7 @@ int sched_cb_queue_is_atomic(uint32_t queue_index); odp_queue_t sched_cb_queue_handle(uint32_t queue_index); void sched_cb_queue_destroy_finalize(uint32_t queue_index); int sched_cb_queue_deq_multi(uint32_t queue_index, odp_event_t ev[], int num); +int sched_cb_queue_empty(uint32_t queue_index);
/* API functions */ typedef struct { diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c index e11c912..f3e589e 100644 --- a/platform/linux-generic/odp_queue.c +++ b/platform/linux-generic/odp_queue.c @@ -745,3 +745,29 @@ int sched_cb_queue_deq_multi(uint32_t queue_index, odp_event_t ev[], int num)
return ret; } + +int sched_cb_queue_empty(uint32_t queue_index) +{ + queue_entry_t *queue = get_qentry(queue_index); + int ret = 0; + + LOCK(&queue->s.lock); + + if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) { + /* Bad queue, or queue has been destroyed. */ + UNLOCK(&queue->s.lock); + return -1; + } + + if (queue->s.head == NULL) { + /* Already empty queue. Update status. */ + if (queue->s.status == QUEUE_STATUS_SCHED) + queue->s.status = QUEUE_STATUS_NOTSCHED; + + ret = 1; + } + + UNLOCK(&queue->s.lock); + + return ret; +}
commit 341d3029f797ebb2bcf0fa6089c3463eea73e1cc Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jun 9 15:55:58 2016 +0300
linux-gen: sched: add ordered enq to schedule_fn_t
Added ordered enqueue functions to scheduler interface.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h index 538d792..0d82d84 100644 --- a/platform/linux-generic/include/odp_schedule_if.h +++ b/platform/linux-generic/include/odp_schedule_if.h @@ -31,6 +31,11 @@ typedef int (*schedule_init_queue_fn_t)(uint32_t queue_index, ); typedef void (*schedule_destroy_queue_fn_t)(uint32_t queue_index); typedef int (*schedule_sched_queue_fn_t)(uint32_t queue_index); +typedef int (*schedule_ord_enq_fn_t)(uint32_t queue_index, void *buf_hdr, + int sustain, int *ret); +typedef int (*schedule_ord_enq_multi_fn_t)(uint32_t queue_index, + void *buf_hdr[], int num, + int sustain, int *ret); typedef int (*schedule_init_global_fn_t)(void); typedef int (*schedule_term_global_fn_t)(void); typedef int (*schedule_init_local_fn_t)(void); @@ -44,6 +49,8 @@ typedef struct schedule_fn_t { schedule_init_queue_fn_t init_queue; schedule_destroy_queue_fn_t destroy_queue; schedule_sched_queue_fn_t sched_queue; + schedule_ord_enq_fn_t ord_enq; + schedule_ord_enq_multi_fn_t ord_enq_multi; schedule_init_global_fn_t init_global; schedule_term_global_fn_t term_global; schedule_init_local_fn_t init_local; diff --git a/platform/linux-generic/include/odp_schedule_ordered_internal.h b/platform/linux-generic/include/odp_schedule_ordered_internal.h index acd62bc..0ffbe3a 100644 --- a/platform/linux-generic/include/odp_schedule_ordered_internal.h +++ b/platform/linux-generic/include/odp_schedule_ordered_internal.h @@ -13,10 +13,9 @@ extern "C" {
#define SUSTAIN_ORDER 1
-int schedule_ordered_queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr, +int schedule_ordered_queue_enq(uint32_t queue_index, void *p_buf_hdr, int sustain, int *ret); -int schedule_ordered_queue_enq_multi(queue_entry_t *queue, - odp_buffer_hdr_t *buf_hdr[], +int schedule_ordered_queue_enq_multi(uint32_t queue_index, void *p_buf_hdr[], int num, int sustain, int *ret);
#ifdef __cplusplus diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c index 58404d9..e11c912 100644 --- a/platform/linux-generic/odp_queue.c +++ b/platform/linux-generic/odp_queue.c @@ -397,7 +397,7 @@ int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr, int sustain) { int ret;
- if (schedule_ordered_queue_enq(queue, buf_hdr, sustain, &ret)) + if (sched_fn->ord_enq(queue->s.index, buf_hdr, sustain, &ret)) return ret;
LOCK(&queue->s.lock); @@ -436,8 +436,8 @@ int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], tail = buf_hdr[num - 1]; buf_hdr[num - 1]->next = NULL;
- if (schedule_ordered_queue_enq_multi(queue, buf_hdr, num, sustain, - &ret)) + if (sched_fn->ord_enq_multi(queue->s.index, (void **)buf_hdr, num, + sustain, &ret)) return ret;
/* Handle unordered enqueues */ diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c index b8d0d2a..3b97909 100644 --- a/platform/linux-generic/odp_schedule.c +++ b/platform/linux-generic/odp_schedule.c @@ -22,6 +22,7 @@ #include <odp/api/thrmask.h> #include <odp_config_internal.h> #include <odp_schedule_internal.h> +#include <odp_schedule_ordered_internal.h> #ifdef _ODP_PKTIO_IPC #include <odp_pool_internal.h> #endif @@ -956,6 +957,8 @@ const schedule_fn_t schedule_default_fn = { .init_queue = schedule_init_queue, .destroy_queue = schedule_destroy_queue, .sched_queue = schedule_sched_queue, + .ord_enq = schedule_ordered_queue_enq, + .ord_enq_multi = schedule_ordered_queue_enq_multi, .init_global = schedule_init_global, .term_global = schedule_term_global, .init_local = schedule_init_local, diff --git a/platform/linux-generic/odp_schedule_ordered.c b/platform/linux-generic/odp_schedule_ordered.c index 17d3825..8c1dd7e 100644 --- a/platform/linux-generic/odp_schedule_ordered.c +++ b/platform/linux-generic/odp_schedule_ordered.c @@ -452,18 +452,19 @@ static int ordered_queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr, return 0; }
-int schedule_ordered_queue_enq(queue_entry_t *queue, - odp_buffer_hdr_t *buf_hdr, +int schedule_ordered_queue_enq(uint32_t queue_index, void *p_buf_hdr, int sustain, int *ret) { queue_entry_t *origin_qe; uint64_t order; + queue_entry_t *qe = get_qentry(queue_index); + odp_buffer_hdr_t *buf_hdr = p_buf_hdr;
get_queue_order(&origin_qe, &order, buf_hdr);
/* Handle enqueues from ordered queues separately */ if (origin_qe) { - *ret = ordered_queue_enq(queue, buf_hdr, sustain, + *ret = ordered_queue_enq(qe, buf_hdr, sustain, origin_qe, order); return 1; } @@ -471,19 +472,20 @@ int schedule_ordered_queue_enq(queue_entry_t *queue, return 0; }
-int schedule_ordered_queue_enq_multi(queue_entry_t *queue, - odp_buffer_hdr_t *buf_hdr[], +int schedule_ordered_queue_enq_multi(uint32_t queue_index, void *p_buf_hdr[], int num, int sustain, int *ret) { queue_entry_t *origin_qe; uint64_t order; int rc; + queue_entry_t *qe = get_qentry(queue_index); + odp_buffer_hdr_t *buf_hdr = p_buf_hdr[0];
/* Handle ordered enqueues commonly via links */ - get_queue_order(&origin_qe, &order, buf_hdr[0]); + get_queue_order(&origin_qe, &order, buf_hdr); if (origin_qe) { - buf_hdr[0]->link = buf_hdr[0]->next; - rc = ordered_queue_enq(queue, buf_hdr[0], sustain, + buf_hdr->link = buf_hdr->next; + rc = ordered_queue_enq(qe, buf_hdr, sustain, origin_qe, order); *ret = rc == 0 ? num : rc; return 1;
commit 69717806018f61af84645ff458e3dc69a13d747e Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Jun 9 15:55:57 2016 +0300
helper: linux: correct pthread join retval check
Helper defines that pthread exit status is NULL on error, but checks for != NULL. Both NULL and !NULL were returned on failure.
Defined that both process and pthread return !0 (!NULL) on failure.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/helper/linux.c b/helper/linux.c index 8b9dbe8..a1dbe52 100644 --- a/helper/linux.c +++ b/helper/linux.c @@ -261,7 +261,7 @@ static void *odpthread_run_start_routine(void *arg) ODPH_ERR("Local init failed\n"); if (start_args->linuxtype == ODPTHREAD_PROCESS) _exit(EXIT_FAILURE); - return NULL; + return (void *)-1; }
ODPH_DBG("helper: ODP %s thread started as linux %s. (pid=%d)\n", @@ -284,7 +284,7 @@ static void *odpthread_run_start_routine(void *arg) _exit(status);
/* threads implementation return void* pointers: cast status to that. */ - return (void *)(long)status; + return (void *)(intptr_t)status; }
/* @@ -426,8 +426,10 @@ int odph_odpthreads_join(odph_odpthread_t *thread_tbl) pid_t pid; int i = 0; int terminated = 0; - int status = 0; /* child process return code (!=0 is error) */ - void *thread_ret; /* "child" thread return code (NULL is error) */ + /* child process return code (!=0 is error) */ + int status = 0; + /* "child" thread return code (!NULL is error) */ + void *thread_ret = NULL; int ret; int retval = 0;
@@ -445,8 +447,11 @@ int odph_odpthreads_join(odph_odpthread_t *thread_tbl) retval = -1; } else { terminated++; - if (thread_ret != NULL) + if (thread_ret != NULL) { + ODPH_ERR("Bad exit status cpu #%d %p\n", + thread_tbl[i].cpu, thread_ret); retval = -1; + } } pthread_attr_destroy(&thread_tbl[i].thread.attr); break; diff --git a/helper/test/thread.c b/helper/test/thread.c index b290753..8268d9f 100644 --- a/helper/test/thread.c +++ b/helper/test/thread.c @@ -17,7 +17,7 @@ static void *worker_fn(void *arg TEST_UNUSED)
/* depend on the odp helper to call odp_term_local */
- return 0; + return NULL; }
/* Create additional dataplane threads */
-----------------------------------------------------------------------
Summary of changes: helper/linux.c | 15 +- helper/test/thread.c | 2 +- platform/linux-generic/include/odp_schedule_if.h | 8 + .../include/odp_schedule_ordered_internal.h | 5 +- platform/linux-generic/odp_packet_io.c | 5 +- platform/linux-generic/odp_queue.c | 32 +- platform/linux-generic/odp_schedule.c | 3 + platform/linux-generic/odp_schedule_ordered.c | 18 +- platform/linux-generic/odp_schedule_sp.c | 702 ++++++++++++++++++++- platform/linux-generic/pktio/loop.c | 4 + test/validation/scheduler/scheduler.c | 267 ++++---- test/validation/timer/timer.c | 33 +- 12 files changed, 901 insertions(+), 193 deletions(-)
hooks/post-receive