This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, api-next has been updated via c7718962c6633c80eb71a0400d89c31f11f88045 (commit) via 79ce62cf3266b0d1b05acc68be382cd6271b7371 (commit) via 39e4ebd39f2d171bbfef5bfee6596033b83c56a0 (commit) via f9e7f458cf7151c75a9145272c62306b96f9ff27 (commit) from b668182d6ea0cb942c2cf43771c618c9457bf146 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit c7718962c6633c80eb71a0400d89c31f11f88045 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Jun 30 17:10:56 2017 +0300
linux-gen: sched: remove unused sched interface functions
Removed functions that are no longer used. Also removed unused parameter from save_context function.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h index 5abbb732..b514c88a 100644 --- a/platform/linux-generic/include/odp_schedule_if.h +++ b/platform/linux-generic/include/odp_schedule_if.h @@ -35,7 +35,7 @@ typedef int (*schedule_term_local_fn_t)(void); typedef void (*schedule_order_lock_fn_t)(void); typedef void (*schedule_order_unlock_fn_t)(void); typedef unsigned (*schedule_max_ordered_locks_fn_t)(void); -typedef void (*schedule_save_context_fn_t)(uint32_t queue_index, void *ptr); +typedef void (*schedule_save_context_fn_t)(uint32_t queue_index);
typedef struct schedule_fn_t { int status_sync; @@ -68,11 +68,6 @@ extern const schedule_fn_t *sched_fn; int sched_cb_pktin_poll(int pktio_index, int num_queue, int index[]); void sched_cb_pktio_stop_finalize(int pktio_index); int sched_cb_num_pktio(void); -int sched_cb_num_queues(void); -int sched_cb_queue_prio(uint32_t queue_index); -int sched_cb_queue_grp(uint32_t queue_index); -int sched_cb_queue_is_ordered(uint32_t queue_index); -int sched_cb_queue_is_atomic(uint32_t queue_index); odp_queue_t sched_cb_queue_handle(uint32_t queue_index); void sched_cb_queue_destroy_finalize(uint32_t queue_index); int sched_cb_queue_deq_multi(uint32_t queue_index, odp_event_t ev[], int num); diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c index d907779b..4c85027b 100644 --- a/platform/linux-generic/odp_queue.c +++ b/platform/linux-generic/odp_queue.c @@ -520,7 +520,7 @@ static inline int deq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], queue->s.tail = NULL;
if (status_sync && queue->s.type == ODP_QUEUE_TYPE_SCHED) - sched_fn->save_context(queue->s.index, queue); + sched_fn->save_context(queue->s.index);
UNLOCK(&queue->s.lock);
@@ -672,25 +672,6 @@ static int queue_info(odp_queue_t handle, odp_queue_info_t *info) return 0; }
-int sched_cb_num_queues(void) -{ - return ODP_CONFIG_QUEUES; -} - -int sched_cb_queue_prio(uint32_t queue_index) -{ - queue_entry_t *qe = get_qentry(queue_index); - - return qe->s.param.sched.prio; -} - -int sched_cb_queue_grp(uint32_t queue_index) -{ - queue_entry_t *qe = get_qentry(queue_index); - - return qe->s.param.sched.group; -} - odp_queue_t sched_cb_queue_handle(uint32_t queue_index) { return queue_from_id(queue_index); diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c index f315a4f0..9605edc7 100644 --- a/platform/linux-generic/odp_schedule_iquery.c +++ b/platform/linux-generic/odp_schedule_iquery.c @@ -1308,10 +1308,8 @@ static inline bool is_ordered_queue(unsigned int queue_index) return (sched->queues[queue_index].sync == ODP_SCHED_SYNC_ORDERED); }
-static void schedule_save_context(uint32_t queue_index, void *ptr) +static void schedule_save_context(uint32_t queue_index) { - (void)ptr; - if (is_atomic_queue(queue_index)) { thread_local.atomic = &sched->availables[queue_index]; } else if (is_ordered_queue(queue_index)) {
commit 79ce62cf3266b0d1b05acc68be382cd6271b7371 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Jun 30 17:10:55 2017 +0300
linux-gen: sched: remove most dependecies to qentry
Moved ordered queue context structure from queue internal structure to the scheduler. Ordering is a scheduler feature and thus all data and code about ordering should be in scheduler implementation. This removes most dependencies to qentry from the scheduler. Remaining dependecies are due to queue interface definition, which is not changed in this patch.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_queue_internal.h b/platform/linux-generic/include/odp_queue_internal.h index d79abd23..032dde88 100644 --- a/platform/linux-generic/include/odp_queue_internal.h +++ b/platform/linux-generic/include/odp_queue_internal.h @@ -42,13 +42,6 @@ struct queue_entry_s { odp_buffer_hdr_t *tail; int status;
- struct { - odp_atomic_u64_t ctx; /**< Current ordered context id */ - odp_atomic_u64_t next_ctx; /**< Next unallocated context id */ - /** Array of ordered locks */ - odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS]; - } ordered ODP_ALIGNED_CACHE; - queue_enq_fn_t enqueue ODP_ALIGNED_CACHE; queue_deq_fn_t dequeue; queue_enq_multi_fn_t enqueue_multi; diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c index 2db95fc6..d907779b 100644 --- a/platform/linux-generic/odp_queue.c +++ b/platform/linux-generic/odp_queue.c @@ -57,16 +57,6 @@ static inline odp_queue_t queue_from_id(uint32_t queue_id) return _odp_cast_scalar(odp_queue_t, queue_id + 1); }
-static inline int queue_is_atomic(queue_entry_t *qe) -{ - return qe->s.param.sched.sync == ODP_SCHED_SYNC_ATOMIC; -} - -static inline int queue_is_ordered(queue_entry_t *qe) -{ - return qe->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED; -} - queue_entry_t *get_qentry(uint32_t queue_id) { return &queue_tbl->queue[queue_id]; @@ -278,13 +268,6 @@ static int queue_destroy(odp_queue_t handle) ODP_ERR("queue "%s" not empty\n", queue->s.name); return -1; } - if (queue_is_ordered(queue) && - odp_atomic_load_u64(&queue->s.ordered.ctx) != - odp_atomic_load_u64(&queue->s.ordered.next_ctx)) { - UNLOCK(&queue->s.lock); - ODP_ERR("queue "%s" reorder incomplete\n", queue->s.name); - return -1; - }
switch (queue->s.status) { case QUEUE_STATUS_READY: @@ -610,20 +593,9 @@ static int queue_init(queue_entry_t *queue, const char *name, if (queue->s.param.sched.lock_count > sched_fn->max_ordered_locks()) return -1;
- if (param->type == ODP_QUEUE_TYPE_SCHED) { + if (param->type == ODP_QUEUE_TYPE_SCHED) queue->s.param.deq_mode = ODP_QUEUE_OP_DISABLED;
- if (param->sched.sync == ODP_SCHED_SYNC_ORDERED) { - unsigned i; - - odp_atomic_init_u64(&queue->s.ordered.ctx, 0); - odp_atomic_init_u64(&queue->s.ordered.next_ctx, 0); - - for (i = 0; i < queue->s.param.sched.lock_count; i++) - odp_atomic_init_u64(&queue->s.ordered.lock[i], - 0); - } - } queue->s.type = queue->s.param.type;
queue->s.enqueue = queue_int_enq; @@ -719,16 +691,6 @@ int sched_cb_queue_grp(uint32_t queue_index) return qe->s.param.sched.group; }
-int sched_cb_queue_is_ordered(uint32_t queue_index) -{ - return queue_is_ordered(get_qentry(queue_index)); -} - -int sched_cb_queue_is_atomic(uint32_t queue_index) -{ - return queue_is_atomic(get_qentry(queue_index)); -} - odp_queue_t sched_cb_queue_handle(uint32_t queue_index) { return queue_from_id(queue_index); diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c index 53670a71..8af27673 100644 --- a/platform/linux-generic/odp_schedule.c +++ b/platform/linux-generic/odp_schedule.c @@ -65,8 +65,11 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) && /* Maximum number of pktio poll commands */ #define NUM_PKTIO_CMD (MAX_PKTIN * NUM_PKTIO)
+/* Not a valid index */ +#define NULL_INDEX ((uint32_t)-1) + /* Not a valid poll command */ -#define PKTIO_CMD_INVALID ((uint32_t)-1) +#define PKTIO_CMD_INVALID NULL_INDEX
/* Pktio command is free */ #define PKTIO_CMD_FREE PKTIO_CMD_INVALID @@ -90,7 +93,7 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) && #define PRIO_QUEUE_MASK (PRIO_QUEUE_RING_SIZE - 1)
/* Priority queue empty, not a valid queue index. */ -#define PRIO_QUEUE_EMPTY ((uint32_t)-1) +#define PRIO_QUEUE_EMPTY NULL_INDEX
/* For best performance, the number of queues should be a power of two. */ ODP_STATIC_ASSERT(CHECK_IS_POWER2(ODP_CONFIG_QUEUES), @@ -127,7 +130,7 @@ ODP_STATIC_ASSERT((8 * sizeof(pri_mask_t)) >= QUEUES_PER_PRIO, /* Storage for stashed enqueue operation arguments */ typedef struct { odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX]; - queue_entry_t *queue; + uint32_t queue_index; int num; } ordered_stash_t;
@@ -152,7 +155,8 @@ typedef struct { odp_queue_t queue; odp_event_t ev_stash[MAX_DEQ]; struct { - queue_entry_t *src_queue; /**< Source queue entry */ + /* Source queue index */ + uint32_t src_queue; uint64_t ctx; /**< Ordered context id */ int stash_num; /**< Number of stashed enqueue operations */ uint8_t in_order; /**< Order status */ @@ -197,6 +201,19 @@ typedef struct { uint32_t cmd_index; } pktio_cmd_t;
+/* Order context of a queue */ +typedef struct { + /* Current ordered context id */ + odp_atomic_u64_t ctx ODP_ALIGNED_CACHE; + + /* Next unallocated context id */ + odp_atomic_u64_t next_ctx; + + /* Array of ordered locks */ + odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS]; + +} order_context_t ODP_ALIGNED_CACHE; + typedef struct { pri_mask_t pri_mask[NUM_PRIO]; odp_spinlock_t mask_lock; @@ -230,6 +247,8 @@ typedef struct { int grp; int prio; int queue_per_prio; + int sync; + unsigned order_lock_count; } queue[ODP_CONFIG_QUEUES];
struct { @@ -237,6 +256,8 @@ typedef struct { int num_cmd; } pktio[NUM_PKTIO];
+ order_context_t order[ODP_CONFIG_QUEUES]; + } sched_global_t;
/* Global scheduler context */ @@ -259,6 +280,7 @@ static void sched_local_init(void) sched_local.thr = odp_thread_id(); sched_local.queue = ODP_QUEUE_INVALID; sched_local.queue_index = PRIO_QUEUE_EMPTY; + sched_local.ordered.src_queue = NULL_INDEX;
id = sched_local.thr & (QUEUES_PER_PRIO - 1);
@@ -488,16 +510,35 @@ static void pri_clr_queue(uint32_t queue_index, int prio) static int schedule_init_queue(uint32_t queue_index, const odp_schedule_param_t *sched_param) { + int i; int prio = sched_param->prio;
pri_set_queue(queue_index, prio); sched->queue[queue_index].grp = sched_param->group; sched->queue[queue_index].prio = prio; sched->queue[queue_index].queue_per_prio = queue_per_prio(queue_index); + sched->queue[queue_index].sync = sched_param->sync; + sched->queue[queue_index].order_lock_count = sched_param->lock_count; + + odp_atomic_init_u64(&sched->order[queue_index].ctx, 0); + odp_atomic_init_u64(&sched->order[queue_index].next_ctx, 0); + + for (i = 0; i < CONFIG_QUEUE_MAX_ORD_LOCKS; i++) + odp_atomic_init_u64(&sched->order[queue_index].lock[i], 0);
return 0; }
+static inline int queue_is_atomic(uint32_t queue_index) +{ + return sched->queue[queue_index].sync == ODP_SCHED_SYNC_ATOMIC; +} + +static inline int queue_is_ordered(uint32_t queue_index) +{ + return sched->queue[queue_index].sync == ODP_SCHED_SYNC_ORDERED; +} + static void schedule_destroy_queue(uint32_t queue_index) { int prio = sched->queue[queue_index].prio; @@ -506,6 +547,11 @@ static void schedule_destroy_queue(uint32_t queue_index) sched->queue[queue_index].grp = 0; sched->queue[queue_index].prio = 0; sched->queue[queue_index].queue_per_prio = 0; + + if (queue_is_ordered(queue_index) && + odp_atomic_load_u64(&sched->order[queue_index].ctx) != + odp_atomic_load_u64(&sched->order[queue_index].next_ctx)) + ODP_ERR("queue reorder incomplete\n"); }
static int poll_cmd_queue_idx(int pktio_index, int pktin_idx) @@ -606,20 +652,20 @@ static void schedule_release_atomic(void) } }
-static inline int ordered_own_turn(queue_entry_t *queue) +static inline int ordered_own_turn(uint32_t queue_index) { uint64_t ctx;
- ctx = odp_atomic_load_acq_u64(&queue->s.ordered.ctx); + ctx = odp_atomic_load_acq_u64(&sched->order[queue_index].ctx);
return ctx == sched_local.ordered.ctx; }
-static inline void wait_for_order(queue_entry_t *queue) +static inline void wait_for_order(uint32_t queue_index) { /* Busy loop to synchronize ordered processing */ while (1) { - if (ordered_own_turn(queue)) + if (ordered_own_turn(queue_index)) break; odp_cpu_pause(); } @@ -635,52 +681,54 @@ static inline void ordered_stash_release(void) int i;
for (i = 0; i < sched_local.ordered.stash_num; i++) { - queue_entry_t *queue; + queue_entry_t *queue_entry; + uint32_t queue_index; odp_buffer_hdr_t **buf_hdr; int num;
- queue = sched_local.ordered.stash[i].queue; + queue_index = sched_local.ordered.stash[i].queue_index; + queue_entry = get_qentry(queue_index); buf_hdr = sched_local.ordered.stash[i].buf_hdr; num = sched_local.ordered.stash[i].num;
- queue_fn->enq_multi(qentry_to_int(queue), buf_hdr, num); + queue_fn->enq_multi(qentry_to_int(queue_entry), buf_hdr, num); } sched_local.ordered.stash_num = 0; }
static inline void release_ordered(void) { + uint32_t qi; unsigned i; - queue_entry_t *queue;
- queue = sched_local.ordered.src_queue; + qi = sched_local.ordered.src_queue;
- wait_for_order(queue); + wait_for_order(qi);
/* Release all ordered locks */ - for (i = 0; i < queue->s.param.sched.lock_count; i++) { + for (i = 0; i < sched->queue[qi].order_lock_count; i++) { if (!sched_local.ordered.lock_called.u8[i]) - odp_atomic_store_rel_u64(&queue->s.ordered.lock[i], + odp_atomic_store_rel_u64(&sched->order[qi].lock[i], sched_local.ordered.ctx + 1); }
sched_local.ordered.lock_called.all = 0; - sched_local.ordered.src_queue = NULL; + sched_local.ordered.src_queue = NULL_INDEX; sched_local.ordered.in_order = 0;
ordered_stash_release();
/* Next thread can continue processing */ - odp_atomic_add_rel_u64(&queue->s.ordered.ctx, 1); + odp_atomic_add_rel_u64(&sched->order[qi].ctx, 1); }
static void schedule_release_ordered(void) { - queue_entry_t *queue; + uint32_t queue_index;
- queue = sched_local.ordered.src_queue; + queue_index = sched_local.ordered.src_queue;
- if (odp_unlikely(!queue || sched_local.num)) + if (odp_unlikely((queue_index == NULL_INDEX) || sched_local.num)) return;
release_ordered(); @@ -688,7 +736,7 @@ static void schedule_release_ordered(void)
static inline void schedule_release_context(void) { - if (sched_local.ordered.src_queue != NULL) + if (sched_local.ordered.src_queue != NULL_INDEX) release_ordered(); else schedule_release_atomic(); @@ -715,9 +763,9 @@ static int schedule_ord_enq_multi(queue_t q_int, void *buf_hdr[], int i; uint32_t stash_num = sched_local.ordered.stash_num; queue_entry_t *dst_queue = qentry_from_int(q_int); - queue_entry_t *src_queue = sched_local.ordered.src_queue; + uint32_t src_queue = sched_local.ordered.src_queue;
- if (!sched_local.ordered.src_queue || sched_local.ordered.in_order) + if ((src_queue == NULL_INDEX) || sched_local.ordered.in_order) return 0;
if (ordered_own_turn(src_queue)) { @@ -740,7 +788,7 @@ static int schedule_ord_enq_multi(queue_t q_int, void *buf_hdr[], return 0; }
- sched_local.ordered.stash[stash_num].queue = dst_queue; + sched_local.ordered.stash[stash_num].queue_index = dst_queue->s.index; sched_local.ordered.stash[stash_num].num = num; for (i = 0; i < num; i++) sched_local.ordered.stash[stash_num].buf_hdr[i] = buf_hdr[i]; @@ -803,7 +851,7 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[], prio > ODP_SCHED_PRIO_DEFAULT)) max_deq = MAX_DEQ / 2;
- ordered = sched_cb_queue_is_ordered(qi); + ordered = queue_is_ordered(qi);
/* Do not cache ordered events locally to improve * parallelism. Ordered context can only be released @@ -835,21 +883,18 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
if (ordered) { uint64_t ctx; - queue_entry_t *queue; odp_atomic_u64_t *next_ctx;
- queue = get_qentry(qi); - next_ctx = &queue->s.ordered.next_ctx; - + next_ctx = &sched->order[qi].next_ctx; ctx = odp_atomic_fetch_inc_u64(next_ctx);
sched_local.ordered.ctx = ctx; - sched_local.ordered.src_queue = queue; + sched_local.ordered.src_queue = qi;
/* Continue scheduling ordered queues */ ring_enq(ring, PRIO_QUEUE_MASK, qi);
- } else if (sched_cb_queue_is_atomic(qi)) { + } else if (queue_is_atomic(qi)) { /* Hold queue during atomic access */ sched_local.queue_index = qi; } else { @@ -1041,14 +1086,14 @@ static int schedule_multi(odp_queue_t *out_queue, uint64_t wait,
static inline void order_lock(void) { - queue_entry_t *queue; + uint32_t queue_index;
- queue = sched_local.ordered.src_queue; + queue_index = sched_local.ordered.src_queue;
- if (!queue) + if (queue_index == NULL_INDEX) return;
- wait_for_order(queue); + wait_for_order(queue_index); }
static void order_unlock(void) @@ -1058,14 +1103,15 @@ static void order_unlock(void) static void schedule_order_lock(unsigned lock_index) { odp_atomic_u64_t *ord_lock; - queue_entry_t *queue; + uint32_t queue_index;
- queue = sched_local.ordered.src_queue; + queue_index = sched_local.ordered.src_queue;
- ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count && + ODP_ASSERT(queue_index != NULL_INDEX && + lock_index <= sched->queue[queue_index].order_lock_count && !sched_local.ordered.lock_called.u8[lock_index]);
- ord_lock = &queue->s.ordered.lock[lock_index]; + ord_lock = &sched->order[queue_index].lock[lock_index];
/* Busy loop to synchronize ordered processing */ while (1) { @@ -1084,13 +1130,14 @@ static void schedule_order_lock(unsigned lock_index) static void schedule_order_unlock(unsigned lock_index) { odp_atomic_u64_t *ord_lock; - queue_entry_t *queue; + uint32_t queue_index;
- queue = sched_local.ordered.src_queue; + queue_index = sched_local.ordered.src_queue;
- ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count); + ODP_ASSERT(queue_index != NULL_INDEX && + lock_index <= sched->queue[queue_index].order_lock_count);
- ord_lock = &queue->s.ordered.lock[lock_index]; + ord_lock = &sched->order[queue_index].lock[lock_index];
ODP_ASSERT(sched_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c index 8d8dcc29..f315a4f0 100644 --- a/platform/linux-generic/odp_schedule_iquery.c +++ b/platform/linux-generic/odp_schedule_iquery.c @@ -71,6 +71,8 @@ typedef struct { /* Maximum number of pktio poll commands */ #define NUM_PKTIO_CMD (MAX_PKTIN * NUM_PKTIO)
+/* Not a valid index */ +#define NULL_INDEX ((uint32_t)-1) /* Pktio command is free */ #define PKTIO_CMD_FREE ((uint32_t)-1)
@@ -117,6 +119,19 @@ typedef struct { /* Forward declaration */ typedef struct sched_thread_local sched_thread_local_t;
+/* Order context of a queue */ +typedef struct { + /* Current ordered context id */ + odp_atomic_u64_t ctx ODP_ALIGNED_CACHE; + + /* Next unallocated context id */ + odp_atomic_u64_t next_ctx; + + /* Array of ordered locks */ + odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS]; + +} order_context_t ODP_ALIGNED_CACHE; + typedef struct { odp_shm_t selfie;
@@ -139,6 +154,8 @@ typedef struct {
/* Quick reference to per thread context */ sched_thread_local_t *threads[ODP_THREAD_COUNT_MAX]; + + order_context_t order[ODP_CONFIG_QUEUES]; } sched_global_t;
/* Per thread events cache */ @@ -154,7 +171,7 @@ typedef struct { /* Storage for stashed enqueue operation arguments */ typedef struct { odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX]; - queue_entry_t *queue; + uint32_t queue_index; int num; } ordered_stash_t;
@@ -195,7 +212,8 @@ struct sched_thread_local { sparse_bitmap_iterator_t iterators[NUM_SCHED_PRIO];
struct { - queue_entry_t *src_queue; /**< Source queue entry */ + /* Source queue index */ + uint32_t src_queue; uint64_t ctx; /**< Ordered context id */ int stash_num; /**< Number of stashed enqueue operations */ uint8_t in_order; /**< Order status */ @@ -314,6 +332,7 @@ static void sched_thread_local_reset(void)
thread_local.thread = odp_thread_id(); thread_local.cache.queue = ODP_QUEUE_INVALID; + thread_local.ordered.src_queue = NULL_INDEX;
odp_rwlock_init(&thread_local.lock);
@@ -395,7 +414,7 @@ static int schedule_term_local(void) static int init_sched_queue(uint32_t queue_index, const odp_schedule_param_t *sched_param) { - int prio, group, thread; + int prio, group, thread, i; sched_prio_t *P; sched_group_t *G; sched_thread_local_t *local; @@ -428,6 +447,12 @@ static int init_sched_queue(uint32_t queue_index, memcpy(&sched->queues[queue_index], sched_param, sizeof(odp_schedule_param_t));
+ odp_atomic_init_u64(&sched->order[queue_index].ctx, 0); + odp_atomic_init_u64(&sched->order[queue_index].next_ctx, 0); + + for (i = 0; i < CONFIG_QUEUE_MAX_ORD_LOCKS; i++) + odp_atomic_init_u64(&sched->order[queue_index].lock[i], 0); + /* Update all threads in this schedule group to * start check this queue index upon scheduling. */ @@ -502,6 +527,11 @@ static void destroy_sched_queue(uint32_t queue_index)
__destroy_sched_queue(G, queue_index); odp_rwlock_write_unlock(&G->lock); + + if (sched->queues[queue_index].sync == ODP_SCHED_SYNC_ORDERED && + odp_atomic_load_u64(&sched->order[queue_index].ctx) != + odp_atomic_load_u64(&sched->order[queue_index].next_ctx)) + ODP_ERR("queue reorder incomplete\n"); }
static int pktio_cmd_queue_hash(int pktio, int pktin) @@ -1070,20 +1100,20 @@ static void schedule_release_atomic(void) } }
-static inline int ordered_own_turn(queue_entry_t *queue) +static inline int ordered_own_turn(uint32_t queue_index) { uint64_t ctx;
- ctx = odp_atomic_load_acq_u64(&queue->s.ordered.ctx); + ctx = odp_atomic_load_acq_u64(&sched->order[queue_index].ctx);
return ctx == thread_local.ordered.ctx; }
-static inline void wait_for_order(queue_entry_t *queue) +static inline void wait_for_order(uint32_t queue_index) { /* Busy loop to synchronize ordered processing */ while (1) { - if (ordered_own_turn(queue)) + if (ordered_own_turn(queue_index)) break; odp_cpu_pause(); } @@ -1099,52 +1129,55 @@ static inline void ordered_stash_release(void) int i;
for (i = 0; i < thread_local.ordered.stash_num; i++) { - queue_entry_t *queue; + queue_entry_t *queue_entry; + uint32_t queue_index; odp_buffer_hdr_t **buf_hdr; int num;
- queue = thread_local.ordered.stash[i].queue; + queue_index = thread_local.ordered.stash[i].queue_index; + queue_entry = get_qentry(queue_index); buf_hdr = thread_local.ordered.stash[i].buf_hdr; num = thread_local.ordered.stash[i].num;
- queue_fn->enq_multi(qentry_to_int(queue), buf_hdr, num); + queue_fn->enq_multi(qentry_to_int(queue_entry), buf_hdr, num); } thread_local.ordered.stash_num = 0; }
static inline void release_ordered(void) { + uint32_t qi; unsigned i; - queue_entry_t *queue;
- queue = thread_local.ordered.src_queue; + qi = thread_local.ordered.src_queue;
- wait_for_order(queue); + wait_for_order(qi);
/* Release all ordered locks */ - for (i = 0; i < queue->s.param.sched.lock_count; i++) { + for (i = 0; i < sched->queues[qi].lock_count; i++) { if (!thread_local.ordered.lock_called.u8[i]) - odp_atomic_store_rel_u64(&queue->s.ordered.lock[i], + odp_atomic_store_rel_u64(&sched->order[qi].lock[i], thread_local.ordered.ctx + 1); }
thread_local.ordered.lock_called.all = 0; - thread_local.ordered.src_queue = NULL; + thread_local.ordered.src_queue = NULL_INDEX; thread_local.ordered.in_order = 0;
ordered_stash_release();
/* Next thread can continue processing */ - odp_atomic_add_rel_u64(&queue->s.ordered.ctx, 1); + odp_atomic_add_rel_u64(&sched->order[qi].ctx, 1); }
static void schedule_release_ordered(void) { - queue_entry_t *queue; + uint32_t queue_index;
- queue = thread_local.ordered.src_queue; + queue_index = thread_local.ordered.src_queue;
- if (odp_unlikely(!queue || thread_local.cache.count)) + if (odp_unlikely((queue_index == NULL_INDEX) || + thread_local.cache.count)) return;
release_ordered(); @@ -1152,7 +1185,7 @@ static void schedule_release_ordered(void)
static inline void schedule_release_context(void) { - if (thread_local.ordered.src_queue != NULL) + if (thread_local.ordered.src_queue != NULL_INDEX) release_ordered(); else schedule_release_atomic(); @@ -1164,9 +1197,9 @@ static int schedule_ord_enq_multi(queue_t q_int, void *buf_hdr[], int i; uint32_t stash_num = thread_local.ordered.stash_num; queue_entry_t *dst_queue = qentry_from_int(q_int); - queue_entry_t *src_queue = thread_local.ordered.src_queue; + uint32_t src_queue = thread_local.ordered.src_queue;
- if (!thread_local.ordered.src_queue || thread_local.ordered.in_order) + if ((src_queue == NULL_INDEX) || thread_local.ordered.in_order) return 0;
if (ordered_own_turn(src_queue)) { @@ -1189,7 +1222,7 @@ static int schedule_ord_enq_multi(queue_t q_int, void *buf_hdr[], return 0; }
- thread_local.ordered.stash[stash_num].queue = dst_queue; + thread_local.ordered.stash[stash_num].queue_index = dst_queue->s.index; thread_local.ordered.stash[stash_num].num = num; for (i = 0; i < num; i++) thread_local.ordered.stash[stash_num].buf_hdr[i] = buf_hdr[i]; @@ -1202,14 +1235,14 @@ static int schedule_ord_enq_multi(queue_t q_int, void *buf_hdr[],
static void order_lock(void) { - queue_entry_t *queue; + uint32_t queue_index;
- queue = thread_local.ordered.src_queue; + queue_index = thread_local.ordered.src_queue;
- if (!queue) + if (queue_index == NULL_INDEX) return;
- wait_for_order(queue); + wait_for_order(queue_index); }
static void order_unlock(void) @@ -1219,14 +1252,15 @@ static void order_unlock(void) static void schedule_order_lock(unsigned lock_index) { odp_atomic_u64_t *ord_lock; - queue_entry_t *queue; + uint32_t queue_index;
- queue = thread_local.ordered.src_queue; + queue_index = thread_local.ordered.src_queue;
- ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count && + ODP_ASSERT(queue_index != NULL_INDEX && + lock_index <= sched->queues[queue_index].lock_count && !thread_local.ordered.lock_called.u8[lock_index]);
- ord_lock = &queue->s.ordered.lock[lock_index]; + ord_lock = &sched->order[queue_index].lock[lock_index];
/* Busy loop to synchronize ordered processing */ while (1) { @@ -1245,13 +1279,14 @@ static void schedule_order_lock(unsigned lock_index) static void schedule_order_unlock(unsigned lock_index) { odp_atomic_u64_t *ord_lock; - queue_entry_t *queue; + uint32_t queue_index;
- queue = thread_local.ordered.src_queue; + queue_index = thread_local.ordered.src_queue;
- ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count); + ODP_ASSERT(queue_index != NULL_INDEX && + lock_index <= sched->queues[queue_index].lock_count);
- ord_lock = &queue->s.ordered.lock[lock_index]; + ord_lock = &sched->order[queue_index].lock[lock_index];
ODP_ASSERT(thread_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
@@ -1275,7 +1310,7 @@ static inline bool is_ordered_queue(unsigned int queue_index)
static void schedule_save_context(uint32_t queue_index, void *ptr) { - queue_entry_t *queue = ptr; + (void)ptr;
if (is_atomic_queue(queue_index)) { thread_local.atomic = &sched->availables[queue_index]; @@ -1283,11 +1318,11 @@ static void schedule_save_context(uint32_t queue_index, void *ptr) uint64_t ctx; odp_atomic_u64_t *next_ctx;
- next_ctx = &queue->s.ordered.next_ctx; + next_ctx = &sched->order[queue_index].next_ctx; ctx = odp_atomic_fetch_inc_u64(next_ctx);
thread_local.ordered.ctx = ctx; - thread_local.ordered.src_queue = queue; + thread_local.ordered.src_queue = queue_index; } }
commit 39e4ebd39f2d171bbfef5bfee6596033b83c56a0 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Jun 30 17:10:54 2017 +0300
linux-gen: sched: use config max ordered locks
Use config file value for the number of ordered locks everywhere.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index 3cff0045..469396df 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -27,7 +27,7 @@ /* * Maximum number of ordered locks per queue */ -#define CONFIG_QUEUE_MAX_ORD_LOCKS 4 +#define CONFIG_QUEUE_MAX_ORD_LOCKS 2
/* * Maximum number of packet IO resources diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c index 69de7ac0..53670a71 100644 --- a/platform/linux-generic/odp_schedule.c +++ b/platform/linux-generic/odp_schedule.c @@ -121,12 +121,6 @@ ODP_STATIC_ASSERT((8 * sizeof(pri_mask_t)) >= QUEUES_PER_PRIO, /* Maximum number of dequeues */ #define MAX_DEQ CONFIG_BURST_SIZE
-/* Maximum number of ordered locks per queue */ -#define MAX_ORDERED_LOCKS_PER_QUEUE 2 - -ODP_STATIC_ASSERT(MAX_ORDERED_LOCKS_PER_QUEUE <= CONFIG_QUEUE_MAX_ORD_LOCKS, - "Too_many_ordered_locks"); - /* Ordered stash size */ #define MAX_ORDERED_STASH 512
@@ -449,7 +443,7 @@ static inline int grp_update_tbl(void)
static unsigned schedule_max_ordered_locks(void) { - return MAX_ORDERED_LOCKS_PER_QUEUE; + return CONFIG_QUEUE_MAX_ORD_LOCKS; }
static inline int queue_per_prio(uint32_t queue_index) diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c index 75f56e63..8d8dcc29 100644 --- a/platform/linux-generic/odp_schedule_iquery.c +++ b/platform/linux-generic/odp_schedule_iquery.c @@ -148,12 +148,6 @@ typedef struct { odp_event_t stash[MAX_DEQ], *top; } event_cache_t;
-/* Maximum number of ordered locks per queue */ -#define MAX_ORDERED_LOCKS_PER_QUEUE 2 - -ODP_STATIC_ASSERT(MAX_ORDERED_LOCKS_PER_QUEUE <= CONFIG_QUEUE_MAX_ORD_LOCKS, - "Too_many_ordered_locks"); - /* Ordered stash size */ #define MAX_ORDERED_STASH 512
@@ -1266,7 +1260,7 @@ static void schedule_order_unlock(unsigned lock_index)
static unsigned schedule_max_ordered_locks(void) { - return MAX_ORDERED_LOCKS_PER_QUEUE; + return CONFIG_QUEUE_MAX_ORD_LOCKS; }
static inline bool is_atomic_queue(unsigned int queue_index)
commit f9e7f458cf7151c75a9145272c62306b96f9ff27 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Jun 30 17:10:53 2017 +0300
linux-gen: sched: remove schedule interface depedency to qentry
Do not use queue internal type in schedule interface.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h index 5877a1cd..5abbb732 100644 --- a/platform/linux-generic/include/odp_schedule_if.h +++ b/platform/linux-generic/include/odp_schedule_if.h @@ -35,9 +35,10 @@ typedef int (*schedule_term_local_fn_t)(void); typedef void (*schedule_order_lock_fn_t)(void); typedef void (*schedule_order_unlock_fn_t)(void); typedef unsigned (*schedule_max_ordered_locks_fn_t)(void); -typedef void (*schedule_save_context_fn_t)(queue_entry_t *queue); +typedef void (*schedule_save_context_fn_t)(uint32_t queue_index, void *ptr);
typedef struct schedule_fn_t { + int status_sync; schedule_pktio_start_fn_t pktio_start; schedule_thr_add_fn_t thr_add; schedule_thr_rem_fn_t thr_rem; @@ -45,7 +46,6 @@ typedef struct schedule_fn_t { schedule_init_queue_fn_t init_queue; schedule_destroy_queue_fn_t destroy_queue; schedule_sched_queue_fn_t sched_queue; - schedule_unsched_queue_fn_t unsched_queue; schedule_ord_enq_multi_fn_t ord_enq_multi; schedule_init_global_fn_t init_global; schedule_term_global_fn_t term_global; @@ -54,7 +54,11 @@ typedef struct schedule_fn_t { schedule_order_lock_fn_t order_lock; schedule_order_unlock_fn_t order_unlock; schedule_max_ordered_locks_fn_t max_ordered_locks; + + /* Called only when status_sync is set */ + schedule_unsched_queue_fn_t unsched_queue; schedule_save_context_fn_t save_context; + } schedule_fn_t;
/* Interface towards the scheduler */ diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c index 19945584..2db95fc6 100644 --- a/platform/linux-generic/odp_queue.c +++ b/platform/linux-generic/odp_queue.c @@ -474,6 +474,7 @@ static inline int deq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], int i, j; queue_entry_t *queue; int updated = 0; + int status_sync = sched_fn->status_sync;
queue = qentry_from_int(q_int); LOCK(&queue->s.lock); @@ -490,7 +491,9 @@ static inline int deq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], /* Already empty queue */ if (queue->s.status == QUEUE_STATUS_SCHED) { queue->s.status = QUEUE_STATUS_NOTSCHED; - sched_fn->unsched_queue(queue->s.index); + + if (status_sync) + sched_fn->unsched_queue(queue->s.index); }
UNLOCK(&queue->s.lock); @@ -533,8 +536,8 @@ static inline int deq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], if (hdr == NULL) queue->s.tail = NULL;
- if (queue->s.type == ODP_QUEUE_TYPE_SCHED) - sched_fn->save_context(queue); + if (status_sync && queue->s.type == ODP_QUEUE_TYPE_SCHED) + sched_fn->save_context(queue->s.index, queue);
UNLOCK(&queue->s.lock);
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c index 814746c7..69de7ac0 100644 --- a/platform/linux-generic/odp_schedule.c +++ b/platform/linux-generic/odp_schedule.c @@ -22,9 +22,11 @@ #include <odp/api/sync.h> #include <odp/api/packet_io.h> #include <odp_ring_internal.h> -#include <odp_queue_internal.h> #include <odp_timer_internal.h>
+/* Should remove this dependency */ +#include <odp_queue_internal.h> + /* Number of priority levels */ #define NUM_PRIO 8
@@ -1340,22 +1342,14 @@ static int schedule_sched_queue(uint32_t queue_index) return 0; }
-static int schedule_unsched_queue(uint32_t queue_index ODP_UNUSED) -{ - return 0; -} - static int schedule_num_grps(void) { return NUM_SCHED_GRPS; }
-static void schedule_save_context(queue_entry_t *queue ODP_UNUSED) -{ -} - /* Fill in scheduler interface */ const schedule_fn_t schedule_default_fn = { + .status_sync = 0, .pktio_start = schedule_pktio_start, .thr_add = schedule_thr_add, .thr_rem = schedule_thr_rem, @@ -1363,7 +1357,6 @@ const schedule_fn_t schedule_default_fn = { .init_queue = schedule_init_queue, .destroy_queue = schedule_destroy_queue, .sched_queue = schedule_sched_queue, - .unsched_queue = schedule_unsched_queue, .ord_enq_multi = schedule_ord_enq_multi, .init_global = schedule_init_global, .term_global = schedule_term_global, @@ -1372,7 +1365,8 @@ const schedule_fn_t schedule_default_fn = { .order_lock = order_lock, .order_unlock = order_unlock, .max_ordered_locks = schedule_max_ordered_locks, - .save_context = schedule_save_context + .unsched_queue = NULL, + .save_context = NULL };
/* Fill in scheduler API calls */ diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c index b33ba7cd..75f56e63 100644 --- a/platform/linux-generic/odp_schedule_iquery.c +++ b/platform/linux-generic/odp_schedule_iquery.c @@ -12,7 +12,6 @@ #include <odp_internal.h> #include <odp_debug_internal.h> #include <odp_ring_internal.h> -#include <odp_queue_internal.h> #include <odp_buffer_internal.h> #include <odp_bitmap_internal.h> #include <odp/api/thread.h> @@ -25,6 +24,9 @@ #include <odp_config_internal.h> #include <odp_timer_internal.h>
+/* Should remove this dependency */ +#include <odp_queue_internal.h> + /* Number of priority levels */ #define NUM_SCHED_PRIO 8
@@ -1267,11 +1269,23 @@ static unsigned schedule_max_ordered_locks(void) return MAX_ORDERED_LOCKS_PER_QUEUE; }
-static void schedule_save_context(queue_entry_t *queue) +static inline bool is_atomic_queue(unsigned int queue_index) +{ + return (sched->queues[queue_index].sync == ODP_SCHED_SYNC_ATOMIC); +} + +static inline bool is_ordered_queue(unsigned int queue_index) +{ + return (sched->queues[queue_index].sync == ODP_SCHED_SYNC_ORDERED); +} + +static void schedule_save_context(uint32_t queue_index, void *ptr) { - if (queue->s.param.sched.sync == ODP_SCHED_SYNC_ATOMIC) { - thread_local.atomic = &sched->availables[queue->s.index]; - } else if (queue->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED) { + queue_entry_t *queue = ptr; + + if (is_atomic_queue(queue_index)) { + thread_local.atomic = &sched->availables[queue_index]; + } else if (is_ordered_queue(queue_index)) { uint64_t ctx; odp_atomic_u64_t *next_ctx;
@@ -1285,6 +1299,7 @@ static void schedule_save_context(queue_entry_t *queue)
/* Fill in scheduler interface */ const schedule_fn_t schedule_iquery_fn = { + .status_sync = 1, .pktio_start = schedule_pktio_start, .thr_add = group_add_thread, .thr_rem = group_remove_thread, @@ -1292,7 +1307,6 @@ const schedule_fn_t schedule_iquery_fn = { .init_queue = init_sched_queue, .destroy_queue = destroy_sched_queue, .sched_queue = schedule_sched_queue, - .unsched_queue = schedule_unsched_queue, .ord_enq_multi = schedule_ord_enq_multi, .init_global = schedule_init_global, .term_global = schedule_term_global, @@ -1301,7 +1315,8 @@ const schedule_fn_t schedule_iquery_fn = { .order_lock = order_lock, .order_unlock = order_unlock, .max_ordered_locks = schedule_max_ordered_locks, - .save_context = schedule_save_context, + .unsched_queue = schedule_unsched_queue, + .save_context = schedule_save_context };
/* Fill in scheduler API calls */ @@ -1428,18 +1443,6 @@ static void thread_clear_interests(sched_thread_local_t *thread, } }
-static inline bool is_atomic_queue(unsigned int queue_index) -{ - return (sched->queues[queue_index].sync - == ODP_SCHED_SYNC_ATOMIC); -} - -static inline bool is_ordered_queue(unsigned int queue_index) -{ - return (sched->queues[queue_index].sync - == ODP_SCHED_SYNC_ORDERED); -} - static inline bool compete_atomic_queue(unsigned int queue_index) { bool expected = sched->availables[queue_index]; diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c index 14b7aa88..9829acc5 100644 --- a/platform/linux-generic/odp_schedule_sp.c +++ b/platform/linux-generic/odp_schedule_sp.c @@ -410,11 +410,6 @@ static int sched_queue(uint32_t qi) return 0; }
-static int unsched_queue(uint32_t qi ODP_UNUSED) -{ - return 0; -} - static int ord_enq_multi(queue_t q_int, void *buf_hdr[], int num, int *ret) { @@ -830,12 +825,9 @@ static void order_unlock(void) { }
-static void save_context(queue_entry_t *queue ODP_UNUSED) -{ -} - /* Fill in scheduler interface */ const schedule_fn_t schedule_sp_fn = { + .status_sync = 0, .pktio_start = pktio_start, .thr_add = thr_add, .thr_rem = thr_rem, @@ -843,16 +835,16 @@ const schedule_fn_t schedule_sp_fn = { .init_queue = init_queue, .destroy_queue = destroy_queue, .sched_queue = sched_queue, - .unsched_queue = unsched_queue, .ord_enq_multi = ord_enq_multi, .init_global = init_global, .term_global = term_global, .init_local = init_local, .term_local = term_local, - .order_lock = order_lock, - .order_unlock = order_unlock, + .order_lock = order_lock, + .order_unlock = order_unlock, .max_ordered_locks = max_ordered_locks, - .save_context = save_context + .unsched_queue = NULL, + .save_context = NULL };
/* Fill in scheduler API calls */
-----------------------------------------------------------------------
Summary of changes: .../linux-generic/include/odp_config_internal.h | 2 +- .../linux-generic/include/odp_queue_internal.h | 7 - platform/linux-generic/include/odp_schedule_if.h | 13 +- platform/linux-generic/odp_queue.c | 68 +-------- platform/linux-generic/odp_schedule.c | 161 +++++++++++++-------- platform/linux-generic/odp_schedule_iquery.c | 158 ++++++++++++-------- platform/linux-generic/odp_schedule_sp.c | 18 +-- 7 files changed, 211 insertions(+), 216 deletions(-)
hooks/post-receive