This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via 6104182f253161f17fd3ca5a377ed1bac62e366e (commit) via 0142d42744e606257c083ec0ac0077e0cd554e21 (commit) via 2b2ff3f281e17d19a3a7a00a3c8ca5460751cd6f (commit) from e454f36a4a7c70dafd87eab03b3ba740dc10860a (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 6104182f253161f17fd3ca5a377ed1bac62e366e Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Sep 15 12:50:41 2016 +0300
test: l2fwd: add missing output event queue read
Non-sched modes were missing update of output event queue handle in the case of multiple pktios per thread.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/performance/odp_l2fwd.c b/test/common_plat/performance/odp_l2fwd.c index 90e3b88..651ed10 100644 --- a/test/common_plat/performance/odp_l2fwd.c +++ b/test/common_plat/performance/odp_l2fwd.c @@ -425,6 +425,9 @@ static int run_worker_plain_queue_mode(void *arg) dst_idx = thr_args->pktio[pktio].tx_idx; queue = thr_args->pktio[pktio].rx_queue; pktout = thr_args->pktio[pktio].pktout; + if (odp_unlikely(use_event_queue)) + tx_queue = thr_args->pktio[pktio].tx_queue; + pktio++; if (pktio == num_pktio) pktio = 0; @@ -522,6 +525,9 @@ static int run_worker_direct_mode(void *arg) dst_idx = thr_args->pktio[pktio].tx_idx; pktin = thr_args->pktio[pktio].pktin; pktout = thr_args->pktio[pktio].pktout; + if (odp_unlikely(use_event_queue)) + tx_queue = thr_args->pktio[pktio].tx_queue; + pktio++; if (pktio == num_pktio) pktio = 0;
commit 0142d42744e606257c083ec0ac0077e0cd554e21 Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Sep 15 12:50:40 2016 +0300
test: l2fwd: lockless output queues in sched mode
Application didn't request lockless (MT_UNSAFE) output queues in scheduler mode, even when the pktio had enough queues (one tx queue per worker). Updated tx side queue configuration code to utilize lockless queues also in sched mode. Also updated queue binding print out, so that it's easy to check which queues workers are using (only tx in sched mode).
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/performance/odp_l2fwd.c b/test/common_plat/performance/odp_l2fwd.c index a656547..90e3b88 100644 --- a/test/common_plat/performance/odp_l2fwd.c +++ b/test/common_plat/performance/odp_l2fwd.c @@ -104,6 +104,7 @@ typedef struct { int dst_change; /**< Change destination eth addresses */ int src_change; /**< Change source eth addresses */ int error_check; /**< Check packet errors */ + int sched_mode; /**< Scheduler mode */ } appl_args_t;
static int exit_threads; /**< Break workers loop if set to 1 */ @@ -132,8 +133,6 @@ typedef struct thread_args_t { int num_pktio;
struct { - odp_pktio_t rx_pktio; - odp_pktio_t tx_pktio; odp_pktin_queue_t pktin; odp_pktout_queue_t pktout; odp_queue_t rx_queue; @@ -290,8 +289,8 @@ static int run_worker_sched_mode(void *arg) int pkts; int thr; int dst_idx; - int thr_idx; int i; + int pktio, num_pktio; odp_pktout_queue_t pktout[MAX_PKTIOS]; odp_queue_t tx_queue[MAX_PKTIOS]; thread_args_t *thr_args = arg; @@ -300,24 +299,17 @@ static int run_worker_sched_mode(void *arg) pktin_mode_t in_mode = gbl_args->appl.in_mode;
thr = odp_thread_id(); - thr_idx = thr_args->thr_idx;
- memset(pktout, 0, sizeof(pktout)); + num_pktio = thr_args->num_pktio;
- for (i = 0; i < MAX_PKTIOS; i++) - tx_queue[i] = ODP_QUEUE_INVALID; + if (num_pktio > MAX_PKTIOS) { + LOG_ERR("Too many pktios %i\n", num_pktio); + return -1; + }
- for (i = 0; i < gbl_args->appl.if_count; i++) { - if (gbl_args->pktios[i].num_tx_queue == - gbl_args->appl.num_workers) { - pktout[i] = gbl_args->pktios[i].pktout[thr_idx]; - tx_queue[i] = gbl_args->pktios[i].tx_q[thr_idx]; - } else if (gbl_args->pktios[i].num_tx_queue == 1) { - pktout[i] = gbl_args->pktios[i].pktout[0]; - tx_queue[i] = gbl_args->pktios[i].tx_q[0]; - } else { - LOG_ABORT("Bad number of output queues %i\n", i); - } + for (pktio = 0; pktio < num_pktio; pktio++) { + tx_queue[pktio] = thr_args->pktio[pktio].tx_queue; + pktout[pktio] = thr_args->pktio[pktio].pktout; }
printf("[%02i] PKTIN_SCHED_%s, %s\n", thr, @@ -605,13 +597,12 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_pktio_op_mode_t mode_rx; odp_pktio_op_mode_t mode_tx; pktin_mode_t in_mode = gbl_args->appl.in_mode; - int num_tx_shared;
odp_pktio_param_init(&pktio_param);
- if (gbl_args->appl.in_mode == PLAIN_QUEUE) + if (in_mode == PLAIN_QUEUE) pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE; - else if (gbl_args->appl.in_mode != DIRECT_RECV) /* pktin_mode SCHED_* */ + else if (in_mode != DIRECT_RECV) /* pktin_mode SCHED_* */ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
if (gbl_args->appl.out_mode != PKTOUT_DIRECT) @@ -634,11 +625,12 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_pktin_queue_param_init(&pktin_param); odp_pktout_queue_param_init(&pktout_param);
- if (sched_mode(in_mode)) { - num_tx_shared = 1; - mode_tx = ODP_PKTIO_OP_MT; - mode_rx = ODP_PKTIO_OP_MT; + /* By default use a queue per worker. Sched mode ignores rx side + * setting. */ + mode_rx = ODP_PKTIO_OP_MT_UNSAFE; + mode_tx = ODP_PKTIO_OP_MT_UNSAFE;
+ if (gbl_args->appl.sched_mode) { if (gbl_args->appl.in_mode == SCHED_ATOMIC) sync_mode = ODP_SCHED_SYNC_ATOMIC; else if (gbl_args->appl.in_mode == SCHED_ORDERED) @@ -649,10 +641,6 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, pktin_param.queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT; pktin_param.queue_param.sched.sync = sync_mode; pktin_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL; - } else { - num_tx_shared = capa.max_output_queues; - mode_tx = ODP_PKTIO_OP_MT_UNSAFE; - mode_rx = ODP_PKTIO_OP_MT_UNSAFE; }
if (num_rx > (int)capa.max_input_queues) { @@ -664,8 +652,8 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
if (num_tx > (int)capa.max_output_queues) { printf("Sharing %i output queues between %i workers\n", - num_tx_shared, num_tx); - num_tx = num_tx_shared; + capa.max_output_queues, num_tx); + num_tx = capa.max_output_queues; mode_tx = ODP_PKTIO_OP_MT; }
@@ -795,31 +783,10 @@ static int print_speed_stats(int num_workers, stats_t *thr_stats,
static void print_port_mapping(void) { - int if_count, num_workers; - int thr, pktio; + int if_count; + int pktio;
if_count = gbl_args->appl.if_count; - num_workers = gbl_args->appl.num_workers; - - printf("\nWorker mapping table (port[queue])\n--------------------\n"); - - for (thr = 0; thr < num_workers; thr++) { - int rx_idx, tx_idx; - int rx_queue_idx, tx_queue_idx; - thread_args_t *thr_args = &gbl_args->thread[thr]; - int num = thr_args->num_pktio; - - printf("Worker %i\n", thr); - - for (pktio = 0; pktio < num; pktio++) { - rx_idx = thr_args->pktio[pktio].rx_idx; - tx_idx = thr_args->pktio[pktio].tx_idx; - rx_queue_idx = thr_args->pktio[pktio].rx_queue_idx; - tx_queue_idx = thr_args->pktio[pktio].tx_queue_idx; - printf(" %i[%i] -> %i[%i]\n", - rx_idx, rx_queue_idx, tx_idx, tx_queue_idx); - } - }
printf("\nPort config\n--------------------\n");
@@ -875,51 +842,77 @@ static int find_dest_port(int port) static void bind_workers(void) { int if_count, num_workers; - int rx_idx, tx_idx, thr, pktio; + int rx_idx, tx_idx, thr, pktio, i; thread_args_t *thr_args;
if_count = gbl_args->appl.if_count; num_workers = gbl_args->appl.num_workers;
- /* initialize port forwarding table */ - for (rx_idx = 0; rx_idx < if_count; rx_idx++) - gbl_args->dst_port[rx_idx] = find_dest_port(rx_idx); - - if (if_count > num_workers) { - thr = 0; - - for (rx_idx = 0; rx_idx < if_count; rx_idx++) { - thr_args = &gbl_args->thread[thr]; - pktio = thr_args->num_pktio; - tx_idx = gbl_args->dst_port[rx_idx]; - thr_args->pktio[pktio].rx_idx = rx_idx; - thr_args->pktio[pktio].tx_idx = tx_idx; - thr_args->num_pktio++; - - gbl_args->pktios[rx_idx].num_rx_thr++; - gbl_args->pktios[tx_idx].num_tx_thr++; - - thr++; - if (thr >= num_workers) - thr = 0; + if (gbl_args->appl.sched_mode) { + /* all threads receive and send on all pktios */ + for (i = 0; i < if_count; i++) { + gbl_args->pktios[i].num_rx_thr = num_workers; + gbl_args->pktios[i].num_tx_thr = num_workers; } - } else { - rx_idx = 0;
for (thr = 0; thr < num_workers; thr++) { thr_args = &gbl_args->thread[thr]; - pktio = thr_args->num_pktio; - tx_idx = gbl_args->dst_port[rx_idx]; - thr_args->pktio[pktio].rx_idx = rx_idx; - thr_args->pktio[pktio].tx_idx = tx_idx; - thr_args->num_pktio++; - - gbl_args->pktios[rx_idx].num_rx_thr++; - gbl_args->pktios[tx_idx].num_tx_thr++; - - rx_idx++; - if (rx_idx >= if_count) - rx_idx = 0; + thr_args->num_pktio = if_count; + + /* In sched mode, pktios are not cross connected with + * local pktio indexes */ + for (i = 0; i < if_count; i++) { + thr_args->pktio[i].rx_idx = i; + thr_args->pktio[i].tx_idx = i; + } + } + } else { + /* initialize port forwarding table */ + for (rx_idx = 0; rx_idx < if_count; rx_idx++) + gbl_args->dst_port[rx_idx] = find_dest_port(rx_idx); + + if (if_count > num_workers) { + /* Less workers than pktios. Assign single worker per + * pktio. */ + thr = 0; + + for (rx_idx = 0; rx_idx < if_count; rx_idx++) { + thr_args = &gbl_args->thread[thr]; + pktio = thr_args->num_pktio; + /* Cross connect rx to tx */ + tx_idx = gbl_args->dst_port[rx_idx]; + thr_args->pktio[pktio].rx_idx = rx_idx; + thr_args->pktio[pktio].tx_idx = tx_idx; + thr_args->num_pktio++; + + gbl_args->pktios[rx_idx].num_rx_thr++; + gbl_args->pktios[tx_idx].num_tx_thr++; + + thr++; + if (thr >= num_workers) + thr = 0; + } + } else { + /* More workers than pktios. Assign at least one worker + * per pktio. */ + rx_idx = 0; + + for (thr = 0; thr < num_workers; thr++) { + thr_args = &gbl_args->thread[thr]; + pktio = thr_args->num_pktio; + /* Cross connect rx to tx */ + tx_idx = gbl_args->dst_port[rx_idx]; + thr_args->pktio[pktio].rx_idx = rx_idx; + thr_args->pktio[pktio].tx_idx = tx_idx; + thr_args->num_pktio++; + + gbl_args->pktios[rx_idx].num_rx_thr++; + gbl_args->pktios[tx_idx].num_tx_thr++; + + rx_idx++; + if (rx_idx >= if_count) + rx_idx = 0; + } } } } @@ -930,37 +923,44 @@ static void bind_workers(void) static void bind_queues(void) { int num_workers; - int thr, pktio; + int thr, i;
num_workers = gbl_args->appl.num_workers;
+ printf("\nQueue binding (indexes)\n-----------------------\n"); + for (thr = 0; thr < num_workers; thr++) { int rx_idx, tx_idx; thread_args_t *thr_args = &gbl_args->thread[thr]; int num = thr_args->num_pktio;
- for (pktio = 0; pktio < num; pktio++) { + printf("worker %i\n", thr); + + for (i = 0; i < num; i++) { int rx_queue, tx_queue;
- rx_idx = thr_args->pktio[pktio].rx_idx; - tx_idx = thr_args->pktio[pktio].tx_idx; + rx_idx = thr_args->pktio[i].rx_idx; + tx_idx = thr_args->pktio[i].tx_idx; rx_queue = gbl_args->pktios[rx_idx].next_rx_queue; tx_queue = gbl_args->pktios[tx_idx].next_tx_queue;
- thr_args->pktio[pktio].rx_queue_idx = rx_queue; - thr_args->pktio[pktio].tx_queue_idx = tx_queue; - thr_args->pktio[pktio].pktin = + thr_args->pktio[i].rx_queue_idx = rx_queue; + thr_args->pktio[i].tx_queue_idx = tx_queue; + thr_args->pktio[i].pktin = gbl_args->pktios[rx_idx].pktin[rx_queue]; - thr_args->pktio[pktio].pktout = - gbl_args->pktios[tx_idx].pktout[tx_queue]; - thr_args->pktio[pktio].rx_queue = + thr_args->pktio[i].rx_queue = gbl_args->pktios[rx_idx].rx_q[rx_queue]; - thr_args->pktio[pktio].tx_queue = + thr_args->pktio[i].pktout = + gbl_args->pktios[tx_idx].pktout[tx_queue]; + thr_args->pktio[i].tx_queue = gbl_args->pktios[tx_idx].tx_q[tx_queue]; - thr_args->pktio[pktio].rx_pktio = - gbl_args->pktios[rx_idx].pktio; - thr_args->pktio[pktio].tx_pktio = - gbl_args->pktios[tx_idx].pktio; + + if (!gbl_args->appl.sched_mode) + printf(" rx: pktio %i, queue %i\n", + rx_idx, rx_queue); + + printf(" tx: pktio %i, queue %i\n", + tx_idx, tx_queue);
rx_queue++; tx_queue++; @@ -974,6 +974,8 @@ static void bind_queues(void) gbl_args->pktios[tx_idx].next_tx_queue = tx_queue; } } + + printf("\n"); }
static void init_port_lookup_tbl(void) @@ -1340,6 +1342,9 @@ int main(int argc, char *argv[]) /* Parse and store the application arguments */ parse_args(argc, argv, &gbl_args->appl);
+ if (sched_mode(gbl_args->appl.in_mode)) + gbl_args->appl.sched_mode = 1; + /* Print both system and application information */ print_info(NO_PATH(argv[0]), &gbl_args->appl);
@@ -1392,8 +1397,7 @@ int main(int argc, char *argv[]) num_rx = num_workers; num_tx = num_workers;
- if (gbl_args->appl.in_mode == DIRECT_RECV || - gbl_args->appl.in_mode == PLAIN_QUEUE) { + if (!gbl_args->appl.sched_mode) { /* A queue per assigned worker */ num_rx = gbl_args->pktios[i].num_rx_thr; num_tx = gbl_args->pktios[i].num_tx_thr; @@ -1431,8 +1435,7 @@ int main(int argc, char *argv[])
init_port_lookup_tbl();
- if (gbl_args->appl.in_mode == DIRECT_RECV || - gbl_args->appl.in_mode == PLAIN_QUEUE) + if (!gbl_args->appl.sched_mode) print_port_mapping();
memset(thread_tbl, 0, sizeof(thread_tbl));
commit 2b2ff3f281e17d19a3a7a00a3c8ca5460751cd6f Author: Petri Savolainen petri.savolainen@nokia.com Date: Thu Sep 15 12:50:39 2016 +0300
test: l2fwd: lookup table for sched mode
Replaced linear destination port search in scheduler mode with a lookup table. Table index is API provided pktio_index and data is application port index.
Signed-off-by: Petri Savolainen petri.savolainen@nokia.com Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/performance/odp_l2fwd.c b/test/common_plat/performance/odp_l2fwd.c index 4020dea..a656547 100644 --- a/test/common_plat/performance/odp_l2fwd.c +++ b/test/common_plat/performance/odp_l2fwd.c @@ -54,6 +54,9 @@ /** Maximum number of pktio interfaces */ #define MAX_PKTIOS 8
+/** Maximum pktio index table size */ +#define MAX_PKTIO_INDEXES 1024 + /** * Packet input mode */ @@ -158,7 +161,7 @@ typedef struct { odph_ethaddr_t port_eth_addr[MAX_PKTIOS]; /** Table of dst ethernet addresses */ odph_ethaddr_t dst_eth_addr[MAX_PKTIOS]; - /** Table of dst ports */ + /** Table of dst ports. This is used by non-sched modes. */ int dst_port[MAX_PKTIOS]; /** Table of pktio handles */ struct { @@ -174,6 +177,12 @@ typedef struct { int next_rx_queue; int next_tx_queue; } pktios[MAX_PKTIOS]; + + /** Destination port lookup table. + * Table index is pktio_index of the API. This is used by the sched + * mode. */ + uint8_t dst_port_from_idx[MAX_PKTIO_INDEXES]; + } args_t;
/** Global pointer to args */ @@ -182,29 +191,6 @@ static args_t *gbl_args; static odp_barrier_t barrier;
/** - * Lookup the destination port for a given packet - * - * @param pkt ODP packet handle - */ -static inline int lookup_dest_port(odp_packet_t pkt) -{ - int i, src_idx; - odp_pktio_t pktio_src; - - pktio_src = odp_packet_input(pkt); - - for (src_idx = -1, i = 0; gbl_args->pktios[i].pktio - != ODP_PKTIO_INVALID; ++i) - if (gbl_args->pktios[i].pktio == pktio_src) - src_idx = i; - - if (src_idx == -1) - LOG_ABORT("Failed to determine pktio input\n"); - - return gbl_args->dst_port[src_idx]; -} - -/** * Drop packets which input parsing marked as containing errors. * * Frees packets with error and modifies pkt_tbl[] to only contain packets with @@ -301,8 +287,6 @@ static inline int event_queue_send(odp_queue_t queue, odp_packet_t *pkt_tbl, */ static int run_worker_sched_mode(void *arg) { - odp_event_t ev_tbl[MAX_PKT_BURST]; - odp_packet_t pkt_tbl[MAX_PKT_BURST]; int pkts; int thr; int dst_idx; @@ -345,8 +329,11 @@ static int run_worker_sched_mode(void *arg)
/* Loop packets */ while (!exit_threads) { + odp_event_t ev_tbl[MAX_PKT_BURST]; + odp_packet_t pkt_tbl[MAX_PKT_BURST]; int sent; unsigned tx_drops; + int src_idx;
pkts = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT, ev_tbl, MAX_PKT_BURST); @@ -373,7 +360,8 @@ static int run_worker_sched_mode(void *arg) }
/* packets from the same queue are from the same interface */ - dst_idx = lookup_dest_port(pkt_tbl[0]); + src_idx = odp_packet_input_index(pkt_tbl[0]); + dst_idx = gbl_args->dst_port_from_idx[src_idx]; fill_eth_addrs(pkt_tbl, pkts, dst_idx);
if (odp_unlikely(use_event_queue)) @@ -988,6 +976,26 @@ static void bind_queues(void) } }
+static void init_port_lookup_tbl(void) +{ + int rx_idx, if_count; + + if_count = gbl_args->appl.if_count; + + for (rx_idx = 0; rx_idx < if_count; rx_idx++) { + odp_pktio_t pktio = gbl_args->pktios[rx_idx].pktio; + int pktio_idx = odp_pktio_index(pktio); + int dst_port = find_dest_port(rx_idx); + + if (pktio_idx < 0 || pktio_idx >= MAX_PKTIO_INDEXES) { + LOG_ERR("Bad pktio index %i\n", pktio_idx); + exit(EXIT_FAILURE); + } + + gbl_args->dst_port_from_idx[pktio_idx] = dst_port; + } +} + /** * Prinf usage information */ @@ -1370,6 +1378,10 @@ int main(int argc, char *argv[]) } odp_pool_print(pool);
+ if (odp_pktio_max_index() >= MAX_PKTIO_INDEXES) + LOG_DBG("Warning: max pktio index (%u) is too large\n", + odp_pktio_max_index()); + bind_workers();
for (i = 0; i < if_count; ++i) { @@ -1417,6 +1429,8 @@ int main(int argc, char *argv[])
bind_queues();
+ init_port_lookup_tbl(); + if (gbl_args->appl.in_mode == DIRECT_RECV || gbl_args->appl.in_mode == PLAIN_QUEUE) print_port_mapping();
-----------------------------------------------------------------------
Summary of changes: test/common_plat/performance/odp_l2fwd.c | 295 +++++++++++++++++-------------- 1 file changed, 159 insertions(+), 136 deletions(-)
hooks/post-receive