This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, api-next has been updated via f21b2d23874d517e147a3e096e99aa272b99dbe4 (commit) via 3ffa326e2dbdcfcc631c672bc5f443f4ca8fe911 (commit) via 66bc236b2059f554c378b90d5291ca4a8aecf5b4 (commit) via 22a659377f3b181e351428708708971c819582f7 (commit) via 458bc7ddf290806f0a95c76da6f6d4f3c8a07737 (commit) via 12229e66492f7947c180cf540aecd53096431bc8 (commit) via 7378c0ddeb8c34a38d0021230a65bccfc21a6f1b (commit) from 2961df923de0b3034c09a288b28394c1a2ea1a9d (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit f21b2d23874d517e147a3e096e99aa272b99dbe4 Author: Yi He yi.he@linaro.org Date: Fri Dec 23 02:32:09 2016 +0000
linux-gen: sched: fix SP scheduler hang in process mode
SP scheduler hangs in process mode performance test due to global data structure were not created in shared memory region.
Signed-off-by: Yi He yi.he@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c index a592e5c..0fd4d87 100644 --- a/platform/linux-generic/odp_schedule_sp.c +++ b/platform/linux-generic/odp_schedule_sp.c @@ -9,6 +9,7 @@ #include <odp/api/thread.h> #include <odp/api/time.h> #include <odp/api/schedule.h> +#include <odp/api/shared_memory.h> #include <odp_schedule_if.h> #include <odp_debug_internal.h> #include <odp_align_internal.h> @@ -107,6 +108,7 @@ typedef struct { sched_cmd_t pktio_cmd[NUM_PKTIO]; prio_queue_t prio_queue[NUM_GROUP][NUM_PRIO]; sched_group_t sched_group; + odp_shm_t shm; } sched_global_t;
typedef struct { @@ -118,7 +120,7 @@ typedef struct { int group[NUM_GROUP]; } sched_local_t;
-static sched_global_t sched_global; +static sched_global_t *sched_global; static __thread sched_local_t sched_local;
static inline uint32_t index_to_ring_idx(int pktio, uint32_t index) @@ -144,30 +146,44 @@ static inline uint32_t index_from_ring_idx(uint32_t *index, uint32_t ring_idx) static int init_global(void) { int i, j; - sched_group_t *sched_group = &sched_global.sched_group; + odp_shm_t shm; + sched_group_t *sched_group = NULL;
ODP_DBG("Using SP scheduler\n");
- memset(&sched_global, 0, sizeof(sched_global_t)); + shm = odp_shm_reserve("sp_scheduler", + sizeof(sched_global_t), + ODP_CACHE_LINE_SIZE, 0); + + sched_global = odp_shm_addr(shm); + + if (sched_global == NULL) { + ODP_ERR("Schedule init: Shm reserve failed.\n"); + return -1; + } + + memset(sched_global, 0, sizeof(sched_global_t)); + sched_global->shm = shm;
for (i = 0; i < NUM_QUEUE; i++) { - sched_global.queue_cmd[i].s.type = CMD_QUEUE; - sched_global.queue_cmd[i].s.index = i; - sched_global.queue_cmd[i].s.ring_idx = index_to_ring_idx(0, i); + sched_global->queue_cmd[i].s.type = CMD_QUEUE; + sched_global->queue_cmd[i].s.index = i; + sched_global->queue_cmd[i].s.ring_idx = index_to_ring_idx(0, i); }
for (i = 0; i < NUM_PKTIO; i++) { - sched_global.pktio_cmd[i].s.type = CMD_PKTIO; - sched_global.pktio_cmd[i].s.index = i; - sched_global.pktio_cmd[i].s.ring_idx = index_to_ring_idx(1, i); - sched_global.pktio_cmd[i].s.prio = PKTIN_PRIO; - sched_global.pktio_cmd[i].s.group = GROUP_PKTIN; + sched_global->pktio_cmd[i].s.type = CMD_PKTIO; + sched_global->pktio_cmd[i].s.index = i; + sched_global->pktio_cmd[i].s.ring_idx = index_to_ring_idx(1, i); + sched_global->pktio_cmd[i].s.prio = PKTIN_PRIO; + sched_global->pktio_cmd[i].s.group = GROUP_PKTIN; }
for (i = 0; i < NUM_GROUP; i++) for (j = 0; j < NUM_PRIO; j++) - ring_init(&sched_global.prio_queue[i][j].ring); + ring_init(&sched_global->prio_queue[i][j].ring);
+ sched_group = &sched_global->sched_group; odp_ticketlock_init(&sched_group->s.lock);
for (i = 0; i < NUM_THREAD; i++) @@ -201,16 +217,22 @@ static int init_local(void)
static int term_global(void) { - int qi; + int qi, ret = 0;
for (qi = 0; qi < NUM_QUEUE; qi++) { - if (sched_global.queue_cmd[qi].s.init) { + if (sched_global->queue_cmd[qi].s.init) { /* todo: dequeue until empty ? */ sched_cb_queue_destroy_finalize(qi); } }
- return 0; + ret = odp_shm_free(sched_global->shm); + if (ret < 0) { + ODP_ERR("Shm free failed for sp_scheduler"); + ret = -1; + } + + return ret; }
static int term_local(void) @@ -266,7 +288,7 @@ static void remove_group(sched_group_t *sched_group, int thr, int group)
static int thr_add(odp_schedule_group_t group, int thr) { - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group;
if (group < 0 || group >= NUM_GROUP) return -1; @@ -291,7 +313,7 @@ static int thr_add(odp_schedule_group_t group, int thr)
static int thr_rem(odp_schedule_group_t group, int thr) { - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group;
if (group < 0 || group >= NUM_GROUP) return -1; @@ -319,7 +341,7 @@ static int num_grps(void)
static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param) { - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group; odp_schedule_group_t group = sched_param->group; int prio = 0;
@@ -332,18 +354,18 @@ static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param) if (sched_param->prio > 0) prio = LOWEST_QUEUE_PRIO;
- sched_global.queue_cmd[qi].s.prio = prio; - sched_global.queue_cmd[qi].s.group = group; - sched_global.queue_cmd[qi].s.init = 1; + sched_global->queue_cmd[qi].s.prio = prio; + sched_global->queue_cmd[qi].s.group = group; + sched_global->queue_cmd[qi].s.init = 1;
return 0; }
static void destroy_queue(uint32_t qi) { - sched_global.queue_cmd[qi].s.prio = 0; - sched_global.queue_cmd[qi].s.group = 0; - sched_global.queue_cmd[qi].s.init = 0; + sched_global->queue_cmd[qi].s.prio = 0; + sched_global->queue_cmd[qi].s.group = 0; + sched_global->queue_cmd[qi].s.init = 0; }
static inline void add_tail(sched_cmd_t *cmd) @@ -353,8 +375,7 @@ static inline void add_tail(sched_cmd_t *cmd) int prio = cmd->s.prio; uint32_t idx = cmd->s.ring_idx;
- prio_queue = &sched_global.prio_queue[group][prio]; - + prio_queue = &sched_global->prio_queue[group][prio]; ring_enq(&prio_queue->ring, RING_MASK, idx); }
@@ -364,8 +385,7 @@ static inline sched_cmd_t *rem_head(int group, int prio) uint32_t ring_idx, index; int pktio;
- prio_queue = &sched_global.prio_queue[group][prio]; - + prio_queue = &sched_global->prio_queue[group][prio]; ring_idx = ring_deq(&prio_queue->ring, RING_MASK);
if (ring_idx == RING_EMPTY) @@ -374,16 +394,16 @@ static inline sched_cmd_t *rem_head(int group, int prio) pktio = index_from_ring_idx(&index, ring_idx);
if (pktio) - return &sched_global.pktio_cmd[index]; + return &sched_global->pktio_cmd[index];
- return &sched_global.queue_cmd[index]; + return &sched_global->queue_cmd[index]; }
static int sched_queue(uint32_t qi) { sched_cmd_t *cmd;
- cmd = &sched_global.queue_cmd[qi]; + cmd = &sched_global->queue_cmd[qi]; add_tail(cmd);
return 0; @@ -414,7 +434,7 @@ static void pktio_start(int pktio_index, int num, int pktin_idx[]) ODP_DBG("pktio index: %i, %i pktin queues %i\n", pktio_index, num, pktin_idx[0]);
- cmd = &sched_global.pktio_cmd[pktio_index]; + cmd = &sched_global->pktio_cmd[pktio_index];
if (num > NUM_PKTIN) ODP_ABORT("Supports only %i pktin queues per interface\n", @@ -432,7 +452,7 @@ static inline sched_cmd_t *sched_cmd(void) { int prio, i; int thr = sched_local.thr_id; - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group; thr_group_t *thr_group = &sched_group->s.thr[thr]; uint32_t gen_cnt;
@@ -606,7 +626,7 @@ static odp_schedule_group_t schedule_group_create(const char *name, const odp_thrmask_t *thrmask) { odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID; - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group; int i;
odp_ticketlock_lock(&sched_group->s.lock); @@ -637,7 +657,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
static int schedule_group_destroy(odp_schedule_group_t group) { - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group;
if (group < NUM_STATIC_GROUP || group >= NUM_GROUP) return -1; @@ -660,7 +680,7 @@ static int schedule_group_destroy(odp_schedule_group_t group) static odp_schedule_group_t schedule_group_lookup(const char *name) { odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID; - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group; int i;
odp_ticketlock_lock(&sched_group->s.lock); @@ -681,7 +701,7 @@ static int schedule_group_join(odp_schedule_group_t group, const odp_thrmask_t *thrmask) { int thr; - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group;
if (group < 0 || group >= NUM_GROUP) return -1; @@ -713,7 +733,7 @@ static int schedule_group_leave(odp_schedule_group_t group, const odp_thrmask_t *thrmask) { int thr; - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group; odp_thrmask_t *all = &sched_group->s.group[GROUP_ALL].mask; odp_thrmask_t not;
@@ -747,7 +767,7 @@ static int schedule_group_leave(odp_schedule_group_t group, static int schedule_group_thrmask(odp_schedule_group_t group, odp_thrmask_t *thrmask) { - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group;
if (group < 0 || group >= NUM_GROUP) return -1; @@ -769,7 +789,7 @@ static int schedule_group_thrmask(odp_schedule_group_t group, static int schedule_group_info(odp_schedule_group_t group, odp_schedule_group_info_t *info) { - sched_group_t *sched_group = &sched_global.sched_group; + sched_group_t *sched_group = &sched_global->sched_group;
if (group < 0 || group >= NUM_GROUP) return -1;
commit 3ffa326e2dbdcfcc631c672bc5f443f4ca8fe911 Merge: 2961df9 66bc236 Author: Maxim Uvarov maxim.uvarov@linaro.org Date: Thu Feb 9 18:56:42 2017 +0300
Merge branch 'master' into api-next
commit 66bc236b2059f554c378b90d5291ca4a8aecf5b4 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Feb 8 14:19:41 2017 +0200
test: l2fwd: use packet_data
Since l2fwd can assume that all packets are Ethernet. Also odp_packet_data() points to the first byte of the frame directly after packet input.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/performance/odp_l2fwd.c b/test/common_plat/performance/odp_l2fwd.c index 9864c64..8f5c5e1 100644 --- a/test/common_plat/performance/odp_l2fwd.c +++ b/test/common_plat/performance/odp_l2fwd.c @@ -241,15 +241,13 @@ static inline void fill_eth_addrs(odp_packet_t pkt_tbl[],
odp_packet_prefetch(pkt, 0, ODPH_ETHHDR_LEN);
- if (odp_packet_has_eth(pkt)) { - eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL); + eth = odp_packet_data(pkt);
- if (gbl_args->appl.src_change) - eth->src = gbl_args->port_eth_addr[dst_port]; + if (gbl_args->appl.src_change) + eth->src = gbl_args->port_eth_addr[dst_port];
- if (gbl_args->appl.dst_change) - eth->dst = gbl_args->dst_eth_addr[dst_port]; - } + if (gbl_args->appl.dst_change) + eth->dst = gbl_args->dst_eth_addr[dst_port]; } }
commit 22a659377f3b181e351428708708971c819582f7 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Feb 8 14:19:40 2017 +0200
test: l2fwd script: limit number of generator cpus
During 'make check' run generator on up to four cpus. Generator on all cpus was an overkill compared to l2fwd on two cpus. Generator and l2fwd still share cpus (on odp-linux) as cpumask_default_worker() returns the same cpumask for both.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/performance/odp_l2fwd_run.sh b/test/common_plat/performance/odp_l2fwd_run.sh index 757cf53..dd42ede 100755 --- a/test/common_plat/performance/odp_l2fwd_run.sh +++ b/test/common_plat/performance/odp_l2fwd_run.sh @@ -66,12 +66,14 @@ run_l2fwd() exit 1 fi
- #@todo: limit odp_generator to cores - #https://bugs.linaro.org/show_bug.cgi?id=1398 + # Max 4 workers + # @todo: ensure that generator and l2fwd workers are not allocated to + # the same CPUs (odp_generator${EXEEXT} --interval $FLOOD_MODE -I $IF0 \ --srcip 192.168.0.1 --dstip 192.168.0.2 \ - -m u 2>&1 > /dev/null) \ + -m u -w 4 2>&1 > /dev/null) \ 2>&1 > /dev/null & + GEN_PID=$!
# this just turns off output buffering so that you still get periodic @@ -82,6 +84,8 @@ run_l2fwd() STDBUF= fi LOG=odp_l2fwd_tmp.log + + # Max 2 workers $STDBUF odp_l2fwd${EXEEXT} -i $IF1,$IF2 -m 0 -t 30 -c 2 | tee $LOG ret=$?
commit 458bc7ddf290806f0a95c76da6f6d4f3c8a07737 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Feb 8 14:19:39 2017 +0200
test: generator: various improvements
User may select number of worker threads (-w) or cpumask (-c) (but not both) to limit number of worker thread.
Increased pool size since many threads empty the pool easily and result some threads to give up (on the first time those see pool empty).
Added EXAMPLE_ERR() before abort() to make debugging easier.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bogdan Pricope bogdan.pricope@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c index 6ac8f2d..8062d87 100644 --- a/example/generator/odp_generator.c +++ b/example/generator/odp_generator.c @@ -22,10 +22,10 @@
#include <odp/helper/odph_api.h>
-#define MAX_WORKERS 32 /**< max number of works */ -#define SHM_PKT_POOL_SIZE (512*2048) /**< pkt pool size */ -#define SHM_PKT_POOL_BUF_SIZE 1856 /**< pkt pool buf size */ -#define DEFAULT_PKT_INTERVAL 1000 /**< interval btw each pkt */ +#define MAX_WORKERS 32 /* Max number of workers */ +#define POOL_NUM_PKT 2048 /* Number of packets in packet pool */ +#define POOL_PKT_LEN 1856 /* Max packet length */ +#define DEFAULT_PKT_INTERVAL 1000 /* Interval between each packet */
#define APPL_MODE_UDP 0 /**< UDP mode */ #define APPL_MODE_PING 1 /**< ping mode */ @@ -41,7 +41,7 @@ * Parsed command line application arguments */ typedef struct { - int cpu_count; /**< system CPU count */ + int num_workers; /**< Number of worker thread */ const char *mask; /**< CPU mask */ int if_count; /**< Number of interfaces to be used */ char **if_names; /**< Array of pointers to interface names */ @@ -383,14 +383,17 @@ static int gen_send_thread(void *arg) (unsigned int)args->appl.number) break;
+ pkt = ODP_PACKET_INVALID; + if (args->appl.mode == APPL_MODE_UDP) pkt = pack_udp_pkt(thr_args->pool); else if (args->appl.mode == APPL_MODE_PING) pkt = pack_icmp_pkt(thr_args->pool); - else - pkt = ODP_PACKET_INVALID;
- if (!odp_packet_is_valid(pkt)) { + if (pkt == ODP_PACKET_INVALID) { + /* Thread gives up as soon as it sees the pool empty. + * Depending on pool size and transmit latency, it may + * be normal that pool gets empty sometimes. */ EXAMPLE_ERR(" [%2i] alloc_single failed\n", thr); break; } @@ -671,13 +674,17 @@ int main(int argc, char *argv[]) /* Print both system and application information */ print_info(NO_PATH(argv[0]), &args->appl);
- /* Default to system CPU count unless user specified */ + /* Default to max number of workers, unless user specified number of + * workers or cpumask */ num_workers = MAX_WORKERS; - if (args->appl.cpu_count) - num_workers = args->appl.cpu_count; - num_workers = odp_cpumask_default_worker(&cpumask, num_workers); - if (args->appl.mask) { + + if (args->appl.num_workers) { + /* -w option: number of workers */ + num_workers = args->appl.num_workers; + num_workers = odp_cpumask_default_worker(&cpumask, num_workers); + } else if (args->appl.mask) { + /* -c option: cpumask */ odp_cpumask_from_str(&cpumask, args->appl.mask); num_workers = odp_cpumask_count(&cpumask); } @@ -700,9 +707,9 @@ int main(int argc, char *argv[])
/* Create packet pool */ odp_pool_param_init(¶ms); - params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; - params.pkt.len = SHM_PKT_POOL_BUF_SIZE; - params.pkt.num = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; + params.pkt.seg_len = POOL_PKT_LEN; + params.pkt.len = POOL_PKT_LEN; + params.pkt.num = POOL_NUM_PKT; params.type = ODP_POOL_PACKET;
pool = odp_pool_create("packet_pool", ¶ms); @@ -763,18 +770,24 @@ int main(int argc, char *argv[]) odp_cpumask_set(&cpu_mask, cpu_first);
tq = odp_queue_create("", NULL); - if (tq == ODP_QUEUE_INVALID) + if (tq == ODP_QUEUE_INVALID) { + EXAMPLE_ERR("queue_create failed\n"); abort(); + } args->thread[1].pktio_dev = args->appl.if_names[0]; args->thread[1].pool = pool; args->thread[1].tp = tp; args->thread[1].tq = tq; args->thread[1].tim = odp_timer_alloc(tp, tq, NULL); - if (args->thread[1].tim == ODP_TIMER_INVALID) + if (args->thread[1].tim == ODP_TIMER_INVALID) { + EXAMPLE_ERR("timer_alloc failed\n"); abort(); + } args->thread[1].tmo_ev = odp_timeout_alloc(tmop); - if (args->thread[1].tmo_ev == ODP_TIMEOUT_INVALID) + if (args->thread[1].tmo_ev == ODP_TIMEOUT_INVALID) { + EXAMPLE_ERR("timeout_alloc failed\n"); abort(); + } args->thread[1].mode = args->appl.mode;
memset(&thr_params, 0, sizeof(thr_params)); @@ -786,18 +799,24 @@ int main(int argc, char *argv[]) odph_odpthreads_create(&thread_tbl[1], &cpu_mask, &thr_params);
tq = odp_queue_create("", NULL); - if (tq == ODP_QUEUE_INVALID) + if (tq == ODP_QUEUE_INVALID) { + EXAMPLE_ERR("queue_create failed\n"); abort(); + } args->thread[0].pktio_dev = args->appl.if_names[0]; args->thread[0].pool = pool; args->thread[0].tp = tp; args->thread[0].tq = tq; args->thread[0].tim = odp_timer_alloc(tp, tq, NULL); - if (args->thread[0].tim == ODP_TIMER_INVALID) + if (args->thread[0].tim == ODP_TIMER_INVALID) { + EXAMPLE_ERR("timer_alloc failed\n"); abort(); + } args->thread[0].tmo_ev = odp_timeout_alloc(tmop); - if (args->thread[0].tmo_ev == ODP_TIMEOUT_INVALID) + if (args->thread[0].tmo_ev == ODP_TIMEOUT_INVALID) { + EXAMPLE_ERR("timeout_alloc failed\n"); abort(); + } args->thread[0].mode = args->appl.mode; cpu_next = odp_cpumask_next(&cpumask, cpu_first); odp_cpumask_zero(&cpu_mask); @@ -819,17 +838,23 @@ int main(int argc, char *argv[])
args->thread[i].pktio_dev = args->appl.if_names[if_idx]; tq = odp_queue_create("", NULL); - if (tq == ODP_QUEUE_INVALID) + if (tq == ODP_QUEUE_INVALID) { + EXAMPLE_ERR("queue_create failed\n"); abort(); + } args->thread[i].pool = pool; args->thread[i].tp = tp; args->thread[i].tq = tq; args->thread[i].tim = odp_timer_alloc(tp, tq, NULL); - if (args->thread[i].tim == ODP_TIMER_INVALID) + if (args->thread[i].tim == ODP_TIMER_INVALID) { + EXAMPLE_ERR("timer_alloc failed\n"); abort(); + } args->thread[i].tmo_ev = odp_timeout_alloc(tmop); - if (args->thread[i].tmo_ev == ODP_TIMEOUT_INVALID) + if (args->thread[i].tmo_ev == ODP_TIMEOUT_INVALID) { + EXAMPLE_ERR("timeout_alloc failed\n"); abort(); + } args->thread[i].mode = args->appl.mode;
if (args->appl.mode == APPL_MODE_UDP) { @@ -955,7 +980,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
switch (opt) { case 'w': - appl_args->cpu_count = atoi(optarg); + appl_args->num_workers = atoi(optarg); break; case 'c': appl_args->mask = optarg;
commit 12229e66492f7947c180cf540aecd53096431bc8 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Feb 8 14:19:38 2017 +0200
validation: packet: print reason for suite init failure
Knowing the reason for suite init function failure helps in debugging.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/validation/api/packet/packet.c b/test/common_plat/validation/api/packet/packet.c index fa5206f..e3d28f6 100644 --- a/test/common_plat/validation/api/packet/packet.c +++ b/test/common_plat/validation/api/packet/packet.c @@ -110,8 +110,10 @@ int packet_suite_init(void) uint8_t data = 0; uint32_t i;
- if (odp_pool_capability(&capa) < 0) + if (odp_pool_capability(&capa) < 0) { + printf("pool_capability failed\n"); return -1; + }
/* Pick a typical packet size and decrement it to the single segment * limit if needed (min_seg_len maybe equal to max_len @@ -136,14 +138,17 @@ int packet_suite_init(void) params.pkt.uarea_size = sizeof(struct udata_struct);
packet_pool = odp_pool_create("packet_pool", ¶ms); - if (packet_pool == ODP_POOL_INVALID) + if (packet_pool == ODP_POOL_INVALID) { + printf("pool_create failed: 1\n"); return -1; + }
params.pkt.uarea_size = 0; packet_pool_no_uarea = odp_pool_create("packet_pool_no_uarea", ¶ms); if (packet_pool_no_uarea == ODP_POOL_INVALID) { odp_pool_destroy(packet_pool); + printf("pool_create failed: 2\n"); return -1; }
@@ -154,6 +159,7 @@ int packet_suite_init(void) if (packet_pool_double_uarea == ODP_POOL_INVALID) { odp_pool_destroy(packet_pool_no_uarea); odp_pool_destroy(packet_pool); + printf("pool_create failed: 3\n"); return -1; }
@@ -174,8 +180,10 @@ int packet_suite_init(void) } while (segmented_test_packet == ODP_PACKET_INVALID);
if (odp_packet_is_valid(test_packet) == 0 || - odp_packet_is_valid(segmented_test_packet) == 0) + odp_packet_is_valid(segmented_test_packet) == 0) { + printf("packet_is_valid failed\n"); return -1; + }
segmentation_supported = odp_packet_is_segmented(segmented_test_packet);
@@ -187,16 +195,21 @@ int packet_suite_init(void)
udat = odp_packet_user_area(test_packet); udat_size = odp_packet_user_area_size(test_packet); - if (!udat || udat_size != sizeof(struct udata_struct)) + if (!udat || udat_size != sizeof(struct udata_struct)) { + printf("packet_user_area failed: 1\n"); return -1; + }
odp_pool_print(packet_pool); memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
udat = odp_packet_user_area(segmented_test_packet); udat_size = odp_packet_user_area_size(segmented_test_packet); - if (udat == NULL || udat_size != sizeof(struct udata_struct)) + if (udat == NULL || udat_size != sizeof(struct udata_struct)) { + printf("packet_user_area failed: 2\n"); return -1; + } + memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
return 0;
commit 7378c0ddeb8c34a38d0021230a65bccfc21a6f1b Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Feb 8 14:19:37 2017 +0200
test: l2fwd: add pktio driver print out
Print out pktio driver name in start up. Driver name (e.g. dpdk or netmap) helps in checking that correct pktio device started.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-and-tested-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/common_plat/performance/odp_l2fwd.c b/test/common_plat/performance/odp_l2fwd.c index 54dc4cf..9864c64 100644 --- a/test/common_plat/performance/odp_l2fwd.c +++ b/test/common_plat/performance/odp_l2fwd.c @@ -603,6 +603,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_pktio_op_mode_t mode_rx; odp_pktio_op_mode_t mode_tx; pktin_mode_t in_mode = gbl_args->appl.in_mode; + odp_pktio_info_t info;
odp_pktio_param_init(&pktio_param);
@@ -620,8 +621,13 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, return -1; }
- printf("created pktio %" PRIu64 " (%s)\n", - odp_pktio_to_u64(pktio), dev); + if (odp_pktio_info(pktio, &info)) { + LOG_ERR("Error: pktio info failed %s\n", dev); + return -1; + } + + printf("created pktio %" PRIu64 ", dev: %s, drv: %s\n", + odp_pktio_to_u64(pktio), dev, info.drv_name);
if (odp_pktio_capability(pktio, &capa)) { LOG_ERR("Error: capability query failed %s\n", dev);
-----------------------------------------------------------------------
Summary of changes: example/generator/odp_generator.c | 77 ++++++++++++------ platform/linux-generic/odp_schedule_sp.c | 100 ++++++++++++++---------- test/common_plat/performance/odp_l2fwd.c | 22 +++--- test/common_plat/performance/odp_l2fwd_run.sh | 10 ++- test/common_plat/validation/api/packet/packet.c | 23 ++++-- 5 files changed, 149 insertions(+), 83 deletions(-)
hooks/post-receive