This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via 025af44ec6fec843d848215670bb54f578f66e52 (commit) via 8e52887f394b2b42b9ce6511b379ed32328403e3 (commit) via 2efa9f9e48d0a64c56c995e7d2a59ccc546d94f5 (commit) via c1ad4f948f12f3748d76af679922236d5cf1b61e (commit) via f737af0943b09f756e3b40290ad2e0c8f8101f00 (commit) via 0307f9337d1061bbd9afc415a592fcb3c8c57c21 (commit) via f13f74ec048ecf2252d6fef68611e813bc294305 (commit) via f294fd186fa42b9c5dfa33820c3992230708a2ea (commit) via e0934ef22cb60d1b27766c1dfea61afc93109e8b (commit) via 5361c69b7a4ecd50c1b0c30af6d1f08fea28718e (commit) from 84d072b5c7327437cae7a13333dd35ab9777ce14 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 025af44ec6fec843d848215670bb54f578f66e52 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Feb 21 12:20:06 2018 +0200
test: l2fwd: increase num pkt and honour pool capability
Increase number of packets to 16k as 8k packets limit throughput on 40Gbit testing. Also limit packet count and length to pool capability maximums when needed.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c index 3ae47173..2859000d 100644 --- a/test/performance/odp_l2fwd.c +++ b/test/performance/odp_l2fwd.c @@ -28,10 +28,10 @@ #define MAX_WORKERS 32
/* Size of the shared memory block */ -#define SHM_PKT_POOL_SIZE 8192 +#define POOL_PKT_NUM (16 * 1024)
/* Buffer size of the packet pool buffer */ -#define SHM_PKT_POOL_BUF_SIZE 1856 +#define POOL_PKT_LEN 1536
/* Maximum number of packet in a burst */ #define MAX_PKT_BURST 32 @@ -663,7 +663,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_pktio_t pktio; odp_pktio_param_t pktio_param; odp_schedule_sync_t sync_mode; - odp_pktio_capability_t capa; + odp_pktio_capability_t pktio_capa; odp_pktio_config_t config; odp_pktin_queue_param_t pktin_param; odp_pktout_queue_param_t pktout_param; @@ -699,8 +699,8 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, if (gbl_args->appl.verbose) odp_pktio_print(pktio);
- if (odp_pktio_capability(pktio, &capa)) { - LOG_ERR("Error: capability query failed %s\n", dev); + if (odp_pktio_capability(pktio, &pktio_capa)) { + LOG_ERR("Error: pktio capability query failed %s\n", dev); return -1; }
@@ -739,17 +739,17 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, pktin_param.queue_param.sched.group = group; }
- if (num_rx > (int)capa.max_input_queues) { + if (num_rx > (int)pktio_capa.max_input_queues) { printf("Sharing %i input queues between %i workers\n", - capa.max_input_queues, num_rx); - num_rx = capa.max_input_queues; + pktio_capa.max_input_queues, num_rx); + num_rx = pktio_capa.max_input_queues; mode_rx = ODP_PKTIO_OP_MT; }
- if (num_tx > (int)capa.max_output_queues) { + if (num_tx > (int)pktio_capa.max_output_queues) { printf("Sharing %i output queues between %i workers\n", - capa.max_output_queues, num_tx); - num_tx = capa.max_output_queues; + pktio_capa.max_output_queues, num_tx); + num_tx = pktio_capa.max_output_queues; mode_tx = ODP_PKTIO_OP_MT; }
@@ -1446,6 +1446,8 @@ int main(int argc, char *argv[]) int num_groups; odp_schedule_group_t group[MAX_PKTIOS]; odp_init_t init; + odp_pool_capability_t pool_capa; + uint32_t pkt_len, pkt_num;
odp_init_param_init(&init);
@@ -1525,11 +1527,25 @@ int main(int argc, char *argv[]) exit(EXIT_FAILURE); }
+ if (odp_pool_capability(&pool_capa)) { + LOG_ERR("Error: pool capability failed\n"); + return -1; + } + + pkt_len = POOL_PKT_LEN; + pkt_num = POOL_PKT_NUM; + + if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len) + pkt_len = pool_capa.pkt.max_len; + + if (pool_capa.pkt.max_num && pkt_num > pool_capa.pkt.max_num) + pkt_num = pool_capa.pkt.max_num; + /* Create packet pool */ odp_pool_param_init(¶ms); - params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; - params.pkt.len = SHM_PKT_POOL_BUF_SIZE; - params.pkt.num = SHM_PKT_POOL_SIZE; + params.pkt.seg_len = pkt_len; + params.pkt.len = pkt_len; + params.pkt.num = pkt_num; params.type = ODP_POOL_PACKET;
pool = odp_pool_create("packet pool", ¶ms);
commit 8e52887f394b2b42b9ce6511b379ed32328403e3 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Feb 21 12:04:19 2018 +0200
test: l2fwd: remove unnecessary doxygen tags
Doxygen documentation is not generated from this file. Remove unnecessary and incomplete doxygen taggings.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c index 26051c2d..3ae47173 100644 --- a/test/performance/odp_l2fwd.c +++ b/test/performance/odp_l2fwd.c @@ -6,13 +6,7 @@
#include "config.h"
-/** - * @file - * - * @example odp_l2fwd.c ODP basic forwarding application - */ - -/** enable strtok */ +/* enable strtok */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif @@ -30,38 +24,28 @@ #include <odp_api.h> #include <odp/helper/odph_api.h>
-/** @def MAX_WORKERS - * @brief Maximum number of worker threads - */ +/* Maximum number of worker threads */ #define MAX_WORKERS 32
-/** @def SHM_PKT_POOL_SIZE - * @brief Size of the shared memory block - */ +/* Size of the shared memory block */ #define SHM_PKT_POOL_SIZE 8192
-/** @def SHM_PKT_POOL_BUF_SIZE - * @brief Buffer size of the packet pool buffer - */ +/* Buffer size of the packet pool buffer */ #define SHM_PKT_POOL_BUF_SIZE 1856
-/** @def MAX_PKT_BURST - * @brief Maximum number of packet in a burst - */ +/* Maximum number of packet in a burst */ #define MAX_PKT_BURST 32
-/** Maximum number of pktio queues per interface */ +/* Maximum number of pktio queues per interface */ #define MAX_QUEUES 32
-/** Maximum number of pktio interfaces */ +/* Maximum number of pktio interfaces */ #define MAX_PKTIOS 8
-/** Maximum pktio index table size */ +/* Maximum pktio index table size */ #define MAX_PKTIO_INDEXES 1024
-/** - * Packet input mode - */ +/* Packet input mode */ typedef enum pktin_mode_t { DIRECT_RECV, PLAIN_QUEUE, @@ -70,9 +54,7 @@ typedef enum pktin_mode_t { SCHED_ORDERED, } pktin_mode_t;
-/** - * Packet output modes - */ +/* Packet output modes */ typedef enum pktout_mode_t { PKTOUT_DIRECT, PKTOUT_QUEUE @@ -85,60 +67,56 @@ static inline int sched_mode(pktin_mode_t in_mode) (in_mode == SCHED_ORDERED); }
-/** Get rid of path in filename - only for unix-type paths using '/' */ +/* Get rid of path in filename - only for unix-type paths using '/' */ #define NO_PATH(file_name) (strrchr((file_name), '/') ? \ strrchr((file_name), '/') + 1 : (file_name)) -/** +/* * Parsed command line application arguments */ typedef struct { - int extra_check; /**< Some extra checks have been enabled */ + int extra_check; /* Some extra checks have been enabled */ int cpu_count; - int if_count; /**< Number of interfaces to be used */ - int addr_count; /**< Number of dst addresses to be used */ - int num_workers; /**< Number of worker threads */ - char **if_names; /**< Array of pointers to interface names */ - odph_ethaddr_t addrs[MAX_PKTIOS]; /**< Array of dst addresses */ - pktin_mode_t in_mode; /**< Packet input mode */ - pktout_mode_t out_mode; /**< Packet output mode */ - int time; /**< Time in seconds to run. */ - int accuracy; /**< Number of seconds to get and print statistics */ - char *if_str; /**< Storage for interface names */ - int dst_change; /**< Change destination eth addresses */ - int src_change; /**< Change source eth addresses */ - int error_check; /**< Check packet errors */ - int chksum; /**< Checksum offload */ - int sched_mode; /**< Scheduler mode */ - int num_groups; /**< Number of scheduling groups */ - int verbose; /**< Verbose output */ + int if_count; /* Number of interfaces to be used */ + int addr_count; /* Number of dst addresses to be used */ + int num_workers; /* Number of worker threads */ + char **if_names; /* Array of pointers to interface names */ + odph_ethaddr_t addrs[MAX_PKTIOS]; /* Array of dst addresses */ + pktin_mode_t in_mode; /* Packet input mode */ + pktout_mode_t out_mode; /* Packet output mode */ + int time; /* Time in seconds to run. */ + int accuracy; /* Number of seconds to get and print stats */ + char *if_str; /* Storage for interface names */ + int dst_change; /* Change destination eth addresses */ + int src_change; /* Change source eth addresses */ + int error_check; /* Check packet errors */ + int chksum; /* Checksum offload */ + int sched_mode; /* Scheduler mode */ + int num_groups; /* Number of scheduling groups */ + int verbose; /* Verbose output */ } appl_args_t;
-static int exit_threads; /**< Break workers loop if set to 1 */ +static int exit_threads; /* Break workers loop if set to 1 */
static void sig_handler(int signo ODP_UNUSED) { exit_threads = 1; }
-/** - * Statistics - */ +/* Statistics */ typedef union ODP_ALIGNED_CACHE { struct { - /** Number of forwarded packets */ + /* Number of forwarded packets */ uint64_t packets; - /** Packets dropped due to receive error */ + /* Packets dropped due to receive error */ uint64_t rx_drops; - /** Packets dropped due to transmit error */ + /* Packets dropped due to transmit error */ uint64_t tx_drops; } s;
uint8_t padding[ODP_CACHE_LINE_SIZE]; } stats_t;
-/** - * Thread specific data - */ +/* Thread specific data */ typedef struct thread_args_t { stats_t stats;
@@ -161,24 +139,24 @@ typedef struct thread_args_t { int num_groups; } thread_args_t;
-/** +/* * Grouping of all global data */ typedef struct { - /** Thread specific arguments */ + /* Thread specific arguments */ thread_args_t thread[MAX_WORKERS]; - /** Barriers to synchronize main and workers */ + /* Barriers to synchronize main and workers */ odp_barrier_t init_barrier; odp_barrier_t term_barrier; - /** Application (parsed) arguments */ + /* Application (parsed) arguments */ appl_args_t appl; - /** Table of port ethernet addresses */ + /* Table of port ethernet addresses */ odph_ethaddr_t port_eth_addr[MAX_PKTIOS]; - /** Table of dst ethernet addresses */ + /* Table of dst ethernet addresses */ odph_ethaddr_t dst_eth_addr[MAX_PKTIOS]; - /** Table of dst ports. This is used by non-sched modes. */ + /* Table of dst ports. This is used by non-sched modes. */ int dst_port[MAX_PKTIOS]; - /** Table of pktio handles */ + /* Table of pktio handles */ struct { odp_pktio_t pktio; odp_pktin_queue_t pktin[MAX_QUEUES]; @@ -193,26 +171,26 @@ typedef struct { int next_tx_queue; } pktios[MAX_PKTIOS];
- /** Destination port lookup table. - * Table index is pktio_index of the API. This is used by the sched - * mode. */ + /* Destination port lookup table. + * Table index is pktio_index of the API. This is used by the sched + * mode. */ uint8_t dst_port_from_idx[MAX_PKTIO_INDEXES];
} args_t;
-/** Global pointer to args */ +/* Global pointer to args */ static args_t *gbl_args;
-/** +/* * Drop packets which input parsing marked as containing errors. * * Frees packets with error and modifies pkt_tbl[] to only contain packets with * no detected errors. * - * @param pkt_tbl Array of packets - * @param num Number of packets in pkt_tbl[] + * pkt_tbl Array of packets + * num Number of packets in pkt_tbl[] * - * @return Number of packets dropped + * Returns number of packets dropped */ static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num) { @@ -234,12 +212,12 @@ static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num) return dropped; }
-/** +/* * Fill packets' eth addresses according to the destination port * - * @param pkt_tbl Array of packets - * @param num Number of packets in the array - * @param dst_port Destination port + * pkt_tbl Array of packets + * num Number of packets in the array + * dst_port Destination port */ static inline void fill_eth_addrs(odp_packet_t pkt_tbl[], unsigned num, int dst_port) @@ -301,10 +279,10 @@ static inline void chksum_insert(odp_packet_t *pkt_tbl, int pkts) } }
-/** +/* * Packet IO worker thread using scheduled queues * - * @param arg thread arguments of type 'thread_args_t *' + * arg thread arguments of type 'thread_args_t *' */ static int run_worker_sched_mode(void *arg) { @@ -441,10 +419,10 @@ static int run_worker_sched_mode(void *arg) return 0; }
-/** +/* * Packet IO worker thread using plain queues * - * @param arg thread arguments of type 'thread_args_t *' + * arg thread arguments of type 'thread_args_t *' */ static int run_worker_plain_queue_mode(void *arg) { @@ -568,10 +546,10 @@ static int run_worker_plain_queue_mode(void *arg) return 0; }
-/** +/* * Packet IO worker thread accessing IO resources directly * - * @param arg thread arguments of type 'thread_args_t *' + * arg thread arguments of type 'thread_args_t *' */ static int run_worker_direct_mode(void *arg) { @@ -670,15 +648,14 @@ static int run_worker_direct_mode(void *arg) return 0; }
-/** +/* * Create a pktio handle, optionally associating a default input queue. * - * @param dev Name of device to open - * @param index Pktio index - * @param pool Pool to associate with device for packet RX/TX + * dev Name of device to open + * index Pktio index + * pool Pool to associate with device for packet RX/TX * - * @retval 0 on success - * @retval -1 on failure + * Returns 0 on success, -1 on failure */ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_pool_t pool, odp_schedule_group_t group) @@ -837,14 +814,13 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, return 0; }
-/** - * Print statistics - * - * @param num_workers Number of worker threads - * @param thr_stats Pointers to stats storage - * @param duration Number of seconds to loop in - * @param timeout Number of seconds for stats calculation +/* + * Print statistics * + * num_workers Number of worker threads + * thr_stats Pointers to stats storage + * duration Number of seconds to loop in + * timeout Number of seconds for stats calculation */ static int print_speed_stats(int num_workers, stats_t **thr_stats, int duration, int timeout) @@ -926,10 +902,10 @@ static void print_port_mapping(void) printf("\n"); }
-/** +/* * Find the destination port for a given input port * - * @param port Input port index + * port Input port index */ static int find_dest_port(int port) { @@ -1117,7 +1093,7 @@ static void init_port_lookup_tbl(void) } }
-/** +/* * Prinf usage information */ static void usage(char *progname) @@ -1172,12 +1148,12 @@ static void usage(char *progname) ); }
-/** +/* * Parse and store the command line arguments * - * @param argc argument count - * @param argv[] argument vector - * @param appl_args Store application arguments here + * argc argument count + * argv[] argument vector + * appl_args Store application arguments here */ static void parse_args(int argc, char *argv[], appl_args_t *appl_args) { @@ -1377,7 +1353,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args) optind = 1; /* reset 'extern optind' from the getopt lib */ }
-/** +/* * Print system and application info */ static void print_info(char *progname, appl_args_t *appl_args) @@ -1447,8 +1423,8 @@ static void create_groups(int num, odp_schedule_group_t *group) } }
-/** - * ODP L2 forwarding main function +/* + * L2 forwarding main function */ int main(int argc, char *argv[]) {
commit 2efa9f9e48d0a64c56c995e7d2a59ccc546d94f5 Author: Petri Savolainen petri.savolainen@linaro.org Date: Wed Feb 21 10:40:02 2018 +0200
linux-gen: queue: simplify lock macro usage
Use queue entry pointer as macro parameter instead of lock pointer.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c index 9e99994d..e4f6fd82 100644 --- a/platform/linux-generic/odp_queue_basic.c +++ b/platform/linux-generic/odp_queue_basic.c @@ -30,9 +30,9 @@ #define NUM_INTERNAL_QUEUES 64
#include <odp/api/plat/ticketlock_inlines.h> -#define LOCK(a) _odp_ticketlock_lock(a) -#define UNLOCK(a) _odp_ticketlock_unlock(a) -#define LOCK_INIT(a) odp_ticketlock_init(a) +#define LOCK(queue_ptr) _odp_ticketlock_lock(&((queue_ptr)->s.lock)) +#define UNLOCK(queue_ptr) _odp_ticketlock_unlock(&((queue_ptr)->s.lock)) +#define LOCK_INIT(queue_ptr) odp_ticketlock_init(&((queue_ptr)->s.lock))
#include <string.h> #include <inttypes.h> @@ -91,7 +91,7 @@ static int queue_init_global(void) for (i = 0; i < ODP_CONFIG_QUEUES; i++) { /* init locks */ queue_entry_t *queue = get_qentry(i); - LOCK_INIT(&queue->s.lock); + LOCK_INIT(queue); queue->s.index = i; queue->s.handle = queue_from_index(i); } @@ -130,12 +130,12 @@ static int queue_term_global(void)
for (i = 0; i < ODP_CONFIG_QUEUES; i++) { queue = &queue_glb->queue[i]; - LOCK(&queue->s.lock); + LOCK(queue); if (queue->s.status != QUEUE_STATUS_FREE) { ODP_ERR("Not destroyed queue: %s\n", queue->s.name); rc = -1; } - UNLOCK(&queue->s.lock); + UNLOCK(queue); }
queue_lf_term_global(); @@ -231,10 +231,10 @@ static odp_queue_t queue_create(const char *name, if (queue->s.status != QUEUE_STATUS_FREE) continue;
- LOCK(&queue->s.lock); + LOCK(queue); if (queue->s.status == QUEUE_STATUS_FREE) { if (queue_init(queue, name, param)) { - UNLOCK(&queue->s.lock); + UNLOCK(queue); return ODP_QUEUE_INVALID; }
@@ -246,7 +246,7 @@ static odp_queue_t queue_create(const char *name, queue_lf = queue_lf_create(queue);
if (queue_lf == NULL) { - UNLOCK(&queue->s.lock); + UNLOCK(queue); return ODP_QUEUE_INVALID; } queue->s.queue_lf = queue_lf; @@ -265,10 +265,10 @@ static odp_queue_t queue_create(const char *name, queue->s.status = QUEUE_STATUS_READY;
handle = queue->s.handle; - UNLOCK(&queue->s.lock); + UNLOCK(queue); break; } - UNLOCK(&queue->s.lock); + UNLOCK(queue); }
if (handle == ODP_QUEUE_INVALID) @@ -290,13 +290,13 @@ void sched_cb_queue_destroy_finalize(uint32_t queue_index) { queue_entry_t *queue = get_qentry(queue_index);
- LOCK(&queue->s.lock); + LOCK(queue);
if (queue->s.status == QUEUE_STATUS_DESTROYED) { queue->s.status = QUEUE_STATUS_FREE; sched_fn->destroy_queue(queue_index); } - UNLOCK(&queue->s.lock); + UNLOCK(queue); }
static int queue_destroy(odp_queue_t handle) @@ -307,19 +307,19 @@ static int queue_destroy(odp_queue_t handle) if (handle == ODP_QUEUE_INVALID) return -1;
- LOCK(&queue->s.lock); + LOCK(queue); if (queue->s.status == QUEUE_STATUS_FREE) { - UNLOCK(&queue->s.lock); + UNLOCK(queue); ODP_ERR("queue "%s" already free\n", queue->s.name); return -1; } if (queue->s.status == QUEUE_STATUS_DESTROYED) { - UNLOCK(&queue->s.lock); + UNLOCK(queue); ODP_ERR("queue "%s" already destroyed\n", queue->s.name); return -1; } if (ring_st_is_empty(&queue->s.ring_st) == 0) { - UNLOCK(&queue->s.lock); + UNLOCK(queue); ODP_ERR("queue "%s" not empty\n", queue->s.name); return -1; } @@ -343,7 +343,7 @@ static int queue_destroy(odp_queue_t handle) if (queue->s.param.nonblocking == ODP_NONBLOCKING_LF) queue_lf_destroy(queue->s.queue_lf);
- UNLOCK(&queue->s.lock); + UNLOCK(queue);
return 0; } @@ -373,13 +373,13 @@ static odp_queue_t queue_lookup(const char *name) queue->s.status == QUEUE_STATUS_DESTROYED) continue;
- LOCK(&queue->s.lock); + LOCK(queue); if (strcmp(name, queue->s.name) == 0) { /* found it */ - UNLOCK(&queue->s.lock); + UNLOCK(queue); return queue->s.handle; } - UNLOCK(&queue->s.lock); + UNLOCK(queue); }
return ODP_QUEUE_INVALID; @@ -423,10 +423,10 @@ static inline int enq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[],
buffer_index_from_buf(buf_idx, buf_hdr, num);
- LOCK(&queue->s.lock); + LOCK(queue);
if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) { - UNLOCK(&queue->s.lock); + UNLOCK(queue); ODP_ERR("Bad queue status\n"); return -1; } @@ -434,7 +434,7 @@ static inline int enq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], num_enq = ring_st_enq_multi(ring_st, buf_idx, num);
if (odp_unlikely(num_enq == 0)) { - UNLOCK(&queue->s.lock); + UNLOCK(queue); return 0; }
@@ -443,7 +443,7 @@ static inline int enq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], sched = 1; }
- UNLOCK(&queue->s.lock); + UNLOCK(queue);
/* Add queue to scheduling */ if (sched && sched_fn->sched_queue(queue->s.index)) @@ -502,12 +502,12 @@ static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
ring_st = &queue->s.ring_st;
- LOCK(&queue->s.lock); + LOCK(queue);
if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) { /* Bad queue, or queue has been destroyed. * Scheduler finalizes queue destroy after this. */ - UNLOCK(&queue->s.lock); + UNLOCK(queue); return -1; }
@@ -522,7 +522,7 @@ static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], sched_fn->unsched_queue(queue->s.index); }
- UNLOCK(&queue->s.lock); + UNLOCK(queue);
return 0; } @@ -530,7 +530,7 @@ static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], if (status_sync && queue->s.type == ODP_QUEUE_TYPE_SCHED) sched_fn->save_context(queue->s.index);
- UNLOCK(&queue->s.lock); + UNLOCK(queue);
buffer_index_to_buf(buf_hdr, buf_idx, num_deq);
@@ -643,12 +643,12 @@ static int queue_info(odp_queue_t handle, odp_queue_info_t *info)
queue = get_qentry(queue_id);
- LOCK(&queue->s.lock); + LOCK(queue); status = queue->s.status;
if (odp_unlikely(status == QUEUE_STATUS_FREE || status == QUEUE_STATUS_DESTROYED)) { - UNLOCK(&queue->s.lock); + UNLOCK(queue); ODP_ERR("Invalid queue status:%d\n", status); return -1; } @@ -656,7 +656,7 @@ static int queue_info(odp_queue_t handle, odp_queue_info_t *info) info->name = queue->s.name; info->param = queue->s.param;
- UNLOCK(&queue->s.lock); + UNLOCK(queue);
return 0; } @@ -673,11 +673,11 @@ int sched_cb_queue_empty(uint32_t queue_index) queue_entry_t *queue = get_qentry(queue_index); int ret = 0;
- LOCK(&queue->s.lock); + LOCK(queue);
if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) { /* Bad queue, or queue has been destroyed. */ - UNLOCK(&queue->s.lock); + UNLOCK(queue); return -1; }
@@ -689,7 +689,7 @@ int sched_cb_queue_empty(uint32_t queue_index) ret = 1; }
- UNLOCK(&queue->s.lock); + UNLOCK(queue);
return ret; }
commit c1ad4f948f12f3748d76af679922236d5cf1b61e Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Feb 20 15:24:26 2018 +0200
linux-gen: config: increase max burst size
Burst size can be now increased without overhead as buffer header size is not tied to it. Burst size increase from 16 to 32 improves e.g. l2fwd throughput more than 10%.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index 6fcb1cc0..d579381e 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -144,7 +144,7 @@ extern "C" { * This controls the burst size on various enqueue, dequeue, etc calls. Large * burst size improves throughput, but may degrade QoS (increase latency). */ -#define CONFIG_BURST_SIZE 16 +#define CONFIG_BURST_SIZE 32
/* * Maximum number of events in a pool
commit f737af0943b09f756e3b40290ad2e0c8f8101f00 Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Feb 20 13:19:12 2018 +0200
linux-gen: queue: ring based queue implementation
Change from linked list of bursts to a ring implementation. Queues have maximum size but code is simpler and performance is a bit better. This step helps in a potential future step to implement queues with a lockless ring.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index bd90ee15..e50dd604 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -33,19 +33,31 @@ extern "C" { #include <odp_schedule_if.h> #include <stddef.h>
-#define BUFFER_BURST_SIZE CONFIG_BURST_SIZE - typedef struct seg_entry_t { void *hdr; uint8_t *data; uint32_t len; } seg_entry_t;
+typedef union buffer_index_t { + uint32_t u32; + + struct { + uint32_t pool :8; + uint32_t buffer :24; + }; +} buffer_index_t; + +/* Check that pool index fit into bit field */ +ODP_STATIC_ASSERT(ODP_CONFIG_POOLS <= (0xFF + 1), "TOO_MANY_POOLS"); + +/* Check that buffer index fit into bit field */ +ODP_STATIC_ASSERT(CONFIG_POOL_MAX_NUM <= (0xFFFFFF + 1), "TOO_LARGE_POOL"); + /* Common buffer header */ struct ODP_ALIGNED_CACHE odp_buffer_hdr_t { - - /* Buffer index in the pool */ - uint32_t index; + /* Combined pool and buffer index */ + buffer_index_t index;
/* Total segment count */ uint16_t segcount; @@ -73,16 +85,6 @@ struct ODP_ALIGNED_CACHE odp_buffer_hdr_t { /* Segments */ seg_entry_t seg[CONFIG_PACKET_SEGS_PER_HDR];
- /* Burst counts */ - uint8_t burst_num; - uint8_t burst_first; - - /* Next buf in a list */ - struct odp_buffer_hdr_t *next; - - /* Burst table */ - struct odp_buffer_hdr_t *burst[BUFFER_BURST_SIZE]; - /* --- Mostly read only data --- */ const void *user_ptr;
@@ -109,8 +111,6 @@ struct ODP_ALIGNED_CACHE odp_buffer_hdr_t { ODP_STATIC_ASSERT(CONFIG_PACKET_SEGS_PER_HDR < 256, "CONFIG_PACKET_SEGS_PER_HDR_TOO_LARGE");
-ODP_STATIC_ASSERT(BUFFER_BURST_SIZE < 256, "BUFFER_BURST_SIZE_TOO_LARGE"); - #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index faa019f4..e3de2b65 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -104,6 +104,34 @@ static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf) return (odp_buffer_hdr_t *)(uintptr_t)buf; }
+static inline odp_buffer_hdr_t *buf_hdr_from_index(pool_t *pool, + uint32_t buffer_idx) +{ + uint64_t block_offset; + odp_buffer_hdr_t *buf_hdr; + + block_offset = buffer_idx * (uint64_t)pool->block_size; + + /* clang requires cast to uintptr_t */ + buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset]; + + return buf_hdr; +} + +static inline odp_buffer_hdr_t *buf_hdr_from_index_u32(uint32_t u32) +{ + buffer_index_t index; + uint32_t pool_idx, buffer_idx; + pool_t *pool; + + index.u32 = u32; + pool_idx = index.pool; + buffer_idx = index.buffer; + pool = pool_entry(pool_idx); + + return buf_hdr_from_index(pool, buffer_idx); +} + int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int num); void buffer_free_multi(odp_buffer_hdr_t *buf_hdr[], int num_free);
diff --git a/platform/linux-generic/include/odp_queue_internal.h b/platform/linux-generic/include/odp_queue_internal.h index 8215a818..0540bf72 100644 --- a/platform/linux-generic/include/odp_queue_internal.h +++ b/platform/linux-generic/include/odp_queue_internal.h @@ -29,6 +29,7 @@ extern "C" { #include <odp/api/hints.h> #include <odp/api/ticketlock.h> #include <odp_config_internal.h> +#include <odp_ring_st_internal.h>
#define QUEUE_STATUS_FREE 0 #define QUEUE_STATUS_DESTROYED 1 @@ -38,9 +39,7 @@ extern "C" {
struct queue_entry_s { odp_ticketlock_t ODP_ALIGNED_CACHE lock; - - odp_buffer_hdr_t *head; - odp_buffer_hdr_t *tail; + ring_st_t ring_st; int status;
queue_enq_fn_t ODP_ALIGNED_CACHE enqueue; diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index 22df7d67..e24aa0c7 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -1819,7 +1819,8 @@ void odp_packet_print_data(odp_packet_t pkt, uint32_t offset, len += snprintf(&str[len], n - len, " pool index %" PRIu32 "\n", pool->pool_idx); len += snprintf(&str[len], n - len, - " buf index %" PRIu32 "\n", hdr->buf_hdr.index); + " buf index %" PRIu32 "\n", + hdr->buf_hdr.index.buffer); len += snprintf(&str[len], n - len, " segcount %" PRIu16 "\n", hdr->buf_hdr.segcount); len += snprintf(&str[len], n - len, diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 687edb8f..998fc649 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -80,20 +80,6 @@ static inline pool_t *pool_from_buf(odp_buffer_t buf) return buf_hdr->pool_ptr; }
-static inline odp_buffer_hdr_t *buf_hdr_from_index(pool_t *pool, - uint32_t buffer_idx) -{ - uint64_t block_offset; - odp_buffer_hdr_t *buf_hdr; - - block_offset = buffer_idx * (uint64_t)pool->block_size; - - /* clang requires cast to uintptr_t */ - buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset]; - - return buf_hdr; -} - int odp_pool_init_global(void) { uint32_t i; @@ -296,7 +282,9 @@ static void init_buffers(pool_t *pool) memset(buf_hdr, 0, (uintptr_t)data - (uintptr_t)buf_hdr);
/* Initialize buffer metadata */ - buf_hdr->index = i; + buf_hdr->index.u32 = 0; + buf_hdr->index.pool = pool->pool_idx; + buf_hdr->index.buffer = i; buf_hdr->type = type; buf_hdr->event_type = type; buf_hdr->pool_ptr = pool; @@ -785,7 +773,7 @@ static inline void buffer_free_to_pool(pool_t *pool, ring = &pool->ring->hdr; mask = pool->ring_mask; for (i = 0; i < num; i++) - buf_index[i] = buf_hdr[i]->index; + buf_index[i] = buf_hdr[i]->index.buffer;
ring_enq_multi(ring, mask, buf_index, num);
@@ -825,7 +813,7 @@ static inline void buffer_free_to_pool(pool_t *pool, }
for (i = 0; i < num; i++) - cache->buf_index[cache_num + i] = buf_hdr[i]->index; + cache->buf_index[cache_num + i] = buf_hdr[i]->index.buffer;
cache->num = cache_num + num; } diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c index 2801b220..9e99994d 100644 --- a/platform/linux-generic/odp_queue_basic.c +++ b/platform/linux-generic/odp_queue_basic.c @@ -40,9 +40,14 @@ static int queue_init(queue_entry_t *queue, const char *name, const odp_queue_param_t *param);
+typedef struct ODP_ALIGNED_CACHE { + /* Storage space for ring data */ + uint32_t data[CONFIG_QUEUE_SIZE]; +} ring_data_t; + typedef struct queue_global_t { queue_entry_t queue[ODP_CONFIG_QUEUES]; - + ring_data_t ring_data[ODP_CONFIG_QUEUES]; uint32_t queue_lf_num; uint32_t queue_lf_size; queue_lf_func_t queue_lf_func; @@ -154,9 +159,11 @@ static int queue_capability(odp_queue_capability_t *capa) capa->max_sched_groups = sched_fn->num_grps(); capa->sched_prios = odp_schedule_num_prio(); capa->plain.max_num = capa->max_queues; - capa->sched.max_num = capa->max_queues; + capa->plain.max_size = CONFIG_QUEUE_SIZE; capa->plain.lockfree.max_num = queue_glb->queue_lf_num; capa->plain.lockfree.max_size = queue_glb->queue_lf_size; + capa->sched.max_num = capa->max_queues; + capa->sched.max_size = CONFIG_QUEUE_SIZE;
return 0; } @@ -204,6 +211,20 @@ static odp_queue_t queue_create(const char *name, param = &default_param; }
+ if (param->nonblocking == ODP_BLOCKING) { + if (param->size > CONFIG_QUEUE_SIZE) + return ODP_QUEUE_INVALID; + } else if (param->nonblocking == ODP_NONBLOCKING_LF) { + /* Only plain type lock-free queues supported */ + if (param->type != ODP_QUEUE_TYPE_PLAIN) + return ODP_QUEUE_INVALID; + if (param->size > queue_glb->queue_lf_size) + return ODP_QUEUE_INVALID; + } else { + /* Wait-free queues not supported */ + return ODP_QUEUE_INVALID; + } + for (i = 0; i < ODP_CONFIG_QUEUES; i++) { queue = &queue_glb->queue[i];
@@ -297,7 +318,7 @@ static int queue_destroy(odp_queue_t handle) ODP_ERR("queue "%s" already destroyed\n", queue->s.name); return -1; } - if (queue->s.head != NULL) { + if (ring_st_is_empty(&queue->s.ring_st) == 0) { UNLOCK(&queue->s.lock); ODP_ERR("queue "%s" not empty\n", queue->s.name); return -1; @@ -364,81 +385,71 @@ static odp_queue_t queue_lookup(const char *name) return ODP_QUEUE_INVALID; }
+static inline void buffer_index_from_buf(uint32_t buffer_index[], + odp_buffer_hdr_t *buf_hdr[], int num) +{ + int i; + + for (i = 0; i < num; i++) + buffer_index[i] = buf_hdr[i]->index.u32; +} + +static inline void buffer_index_to_buf(odp_buffer_hdr_t *buf_hdr[], + uint32_t buffer_index[], int num) +{ + int i; + + for (i = 0; i < num; i++) { + buf_hdr[i] = buf_hdr_from_index_u32(buffer_index[i]); + odp_prefetch(buf_hdr[i]); + } +} + static inline int enq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], int num) { int sched = 0; - int i, ret; + int ret; queue_entry_t *queue; - odp_buffer_hdr_t *hdr, *tail, *next_hdr; + int num_enq; + ring_st_t *ring_st; + uint32_t buf_idx[num];
queue = qentry_from_int(q_int); + ring_st = &queue->s.ring_st; + if (sched_fn->ord_enq_multi(q_int, (void **)buf_hdr, num, &ret)) return ret;
- /* Optimize the common case of single enqueue */ - if (num == 1) { - tail = buf_hdr[0]; - hdr = tail; - hdr->burst_num = 0; - hdr->next = NULL; - } else { - int next; - - /* Start from the last buffer header */ - tail = buf_hdr[num - 1]; - hdr = tail; - hdr->next = NULL; - next = num - 2; - - while (1) { - /* Build a burst. The buffer header carrying - * a burst is the last buffer of the burst. */ - for (i = 0; next >= 0 && i < BUFFER_BURST_SIZE; - i++, next--) - hdr->burst[BUFFER_BURST_SIZE - 1 - i] = - buf_hdr[next]; - - hdr->burst_num = i; - hdr->burst_first = BUFFER_BURST_SIZE - i; - - if (odp_likely(next < 0)) - break; - - /* Get another header and link it */ - next_hdr = hdr; - hdr = buf_hdr[next]; - hdr->next = next_hdr; - next--; - } - } + buffer_index_from_buf(buf_idx, buf_hdr, num);
LOCK(&queue->s.lock); + if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) { UNLOCK(&queue->s.lock); ODP_ERR("Bad queue status\n"); return -1; }
- /* Empty queue */ - if (queue->s.head == NULL) - queue->s.head = hdr; - else - queue->s.tail->next = hdr; + num_enq = ring_st_enq_multi(ring_st, buf_idx, num);
- queue->s.tail = tail; + if (odp_unlikely(num_enq == 0)) { + UNLOCK(&queue->s.lock); + return 0; + }
if (queue->s.status == QUEUE_STATUS_NOTSCHED) { queue->s.status = QUEUE_STATUS_SCHED; - sched = 1; /* retval: schedule queue */ + sched = 1; } + UNLOCK(&queue->s.lock);
/* Add queue to scheduling */ if (sched && sched_fn->sched_queue(queue->s.index)) ODP_ABORT("schedule_queue failed\n");
- return num; /* All events enqueued */ + return num_enq; }
static int queue_int_enq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], @@ -484,12 +495,15 @@ static int queue_enq(odp_queue_t handle, odp_event_t ev) static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num) { - odp_buffer_hdr_t *hdr, *next; - int i, j; - int updated = 0; int status_sync = sched_fn->status_sync; + int num_deq; + ring_st_t *ring_st; + uint32_t buf_idx[num]; + + ring_st = &queue->s.ring_st;
LOCK(&queue->s.lock); + if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) { /* Bad queue, or queue has been destroyed. * Scheduler finalizes queue destroy after this. */ @@ -497,9 +511,9 @@ static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], return -1; }
- hdr = queue->s.head; + num_deq = ring_st_deq_multi(ring_st, buf_idx, num);
- if (hdr == NULL) { + if (num_deq == 0) { /* Already empty queue */ if (queue->s.status == QUEUE_STATUS_SCHED) { queue->s.status = QUEUE_STATUS_NOTSCHED; @@ -509,51 +523,18 @@ static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], }
UNLOCK(&queue->s.lock); - return 0; - } - - for (i = 0; i < num && hdr; ) { - int burst_num = hdr->burst_num; - int first = hdr->burst_first; - - /* First, get bursted buffers */ - for (j = 0; j < burst_num && i < num; j++, i++) { - buf_hdr[i] = hdr->burst[first + j]; - odp_prefetch(buf_hdr[i]); - } - - if (burst_num) { - hdr->burst_num = burst_num - j; - hdr->burst_first = first + j; - }
- if (i == num) - break; - - /* When burst is empty, consume the current buffer header and - * move to the next header */ - buf_hdr[i] = hdr; - next = hdr->next; - hdr->next = NULL; - hdr = next; - updated++; - i++; + return 0; }
- /* Write head only if updated */ - if (updated) - queue->s.head = hdr; - - /* Queue is empty */ - if (hdr == NULL) - queue->s.tail = NULL; - if (status_sync && queue->s.type == ODP_QUEUE_TYPE_SCHED) sched_fn->save_context(queue->s.index);
UNLOCK(&queue->s.lock);
- return i; + buffer_index_to_buf(buf_hdr, buf_idx, num_deq); + + return num_deq; }
static int queue_int_deq_multi(queue_t q_int, odp_buffer_hdr_t *buf_hdr[], @@ -622,8 +603,9 @@ static int queue_init(queue_entry_t *queue, const char *name, queue->s.pktin = PKTIN_INVALID; queue->s.pktout = PKTOUT_INVALID;
- queue->s.head = NULL; - queue->s.tail = NULL; + ring_st_init(&queue->s.ring_st, + queue_glb->ring_data[queue->s.index].data, + CONFIG_QUEUE_SIZE);
return 0; } @@ -699,7 +681,7 @@ int sched_cb_queue_empty(uint32_t queue_index) return -1; }
- if (queue->s.head == NULL) { + if (ring_st_is_empty(&queue->s.ring_st)) { /* Already empty queue. Update status. */ if (queue->s.status == QUEUE_STATUS_SCHED) queue->s.status = QUEUE_STATUS_NOTSCHED;
commit 0307f9337d1061bbd9afc415a592fcb3c8c57c21 Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Feb 20 10:54:46 2018 +0200
test: pktio_ordered: honour max queue size
Limit maximum number of events to maximum pool and queue capability when needed. Pool size should not be larger than queue size as test run in validation suite suffers from queue enqueue failures (queue full) and sequence number checking does not pass due to that.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_pktio_ordered.c b/test/performance/odp_pktio_ordered.c index af2e9574..5647b1eb 100644 --- a/test/performance/odp_pktio_ordered.c +++ b/test/performance/odp_pktio_ordered.c @@ -75,12 +75,12 @@
#define JHASH_GOLDEN_RATIO 0x9e3779b9
+/* Maximum pool and queue size */ +#define MAX_NUM_PKT (8 * 1024) + /** Maximum number of worker threads */ #define MAX_WORKERS 64
-/** Number of packet buffers in the memory pool */ -#define PKT_POOL_SIZE 8192 - /** Buffer size of the packet pool buffer in bytes*/ #define PKT_POOL_BUF_SIZE 1856
@@ -93,9 +93,6 @@ /** Maximum number of pktio queues per interface */ #define MAX_QUEUES 32
-/** Seems to need at least 8192 elements per queue */ -#define QUEUE_SIZE 8192 - /** Maximum number of pktio interfaces */ #define MAX_PKTIOS 8
@@ -1074,7 +1071,8 @@ int main(int argc, char *argv[]) odp_pool_t pool; odp_pool_param_t params; odp_shm_t shm; - odp_queue_capability_t capa; + odp_queue_capability_t queue_capa; + odp_pool_capability_t pool_capa; odph_ethaddr_t new_addr; odph_odpthread_t thread_tbl[MAX_WORKERS]; stats_t *stats; @@ -1085,6 +1083,7 @@ int main(int argc, char *argv[]) int ret; int num_workers; int in_mode; + uint32_t queue_size, pool_size;
/* Init ODP before calling anything else */ if (odp_init_global(&instance, NULL, NULL)) { @@ -1098,6 +1097,16 @@ int main(int argc, char *argv[]) exit(EXIT_FAILURE); }
+ if (odp_queue_capability(&queue_capa)) { + LOG_ERR("Error: Queue capa failed\n"); + exit(EXIT_FAILURE); + } + + if (odp_pool_capability(&pool_capa)) { + LOG_ERR("Error: Pool capa failed\n"); + exit(EXIT_FAILURE); + } + /* Reserve memory for args from shared mem */ shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE, 0); @@ -1121,8 +1130,7 @@ int main(int argc, char *argv[])
if (gbl_args->appl.in_mode == SCHED_ORDERED) { /* At least one ordered lock required */ - odp_queue_capability(&capa); - if (capa.max_ordered_locks < 1) { + if (queue_capa.max_ordered_locks < 1) { LOG_ERR("Error: Ordered locks not available.\n"); exit(EXIT_FAILURE); } @@ -1145,11 +1153,25 @@ int main(int argc, char *argv[]) printf("First CPU: %i\n", odp_cpumask_first(&cpumask)); printf("CPU mask: %s\n\n", cpumaskstr);
+ pool_size = MAX_NUM_PKT; + if (pool_capa.pkt.max_num && pool_capa.pkt.max_num < MAX_NUM_PKT) + pool_size = pool_capa.pkt.max_num; + + queue_size = MAX_NUM_PKT; + if (queue_capa.sched.max_size && + queue_capa.sched.max_size < MAX_NUM_PKT) + queue_size = queue_capa.sched.max_size; + + /* Pool should not be larger than queue, otherwise queue enqueues at + * packet input may fail. */ + if (pool_size > queue_size) + pool_size = queue_size; + /* Create packet pool */ odp_pool_param_init(¶ms); params.pkt.seg_len = PKT_POOL_BUF_SIZE; params.pkt.len = PKT_POOL_BUF_SIZE; - params.pkt.num = PKT_POOL_SIZE; + params.pkt.num = pool_size; params.pkt.uarea_size = PKT_UAREA_SIZE; params.type = ODP_POOL_PACKET;
@@ -1225,7 +1247,7 @@ int main(int argc, char *argv[]) qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT; qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC; qparam.sched.group = ODP_SCHED_GROUP_ALL; - qparam.size = QUEUE_SIZE; + qparam.size = queue_size;
gbl_args->flow_qcontext[i][j].idx = i; gbl_args->flow_qcontext[i][j].input_queue = 0;
commit f13f74ec048ecf2252d6fef68611e813bc294305 Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Feb 20 10:07:11 2018 +0200
validation: sched: honour max queue size
When needed, scale down atomic queue size requirement to maximum queue size capability.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c index 098c03a0..d5783b4d 100644 --- a/test/validation/api/scheduler/scheduler.c +++ b/test/validation/api/scheduler/scheduler.c @@ -57,6 +57,7 @@ typedef struct { odp_barrier_t barrier; int buf_count; int buf_count_cpy; + uint32_t max_sched_queue_size; odp_ticketlock_t lock; odp_spinlock_t atomic_lock; struct { @@ -1060,7 +1061,7 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues, args->num_queues = num_queues; args->num_prio = num_prio; if (enable_excl_atomic) - args->num_bufs = BUFS_PER_QUEUE_EXCL; + args->num_bufs = globals->max_sched_queue_size; else args->num_bufs = BUFS_PER_QUEUE; args->num_workers = globals->num_workers; @@ -1405,7 +1406,7 @@ static void scheduler_test_ordered_lock(void) CU_ASSERT(ret == 0); }
-static int create_queues(void) +static int create_queues(test_globals_t *globals) { int i, j, prios, rc; odp_queue_capability_t capa; @@ -1427,6 +1428,12 @@ static int create_queues(void) capa.max_ordered_locks); }
+ globals->max_sched_queue_size = BUFS_PER_QUEUE_EXCL; + if (capa.sched.max_size && capa.sched.max_size < BUFS_PER_QUEUE_EXCL) { + printf("Max sched queue size %u\n", capa.sched.max_size); + globals->max_sched_queue_size = capa.sched.max_size; + } + prios = odp_schedule_num_prio(); odp_pool_param_init(¶ms); params.buf.size = sizeof(queue_context); @@ -1455,17 +1462,17 @@ static int create_queues(void) q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) { - printf("Schedule queue create failed.\n"); + printf("Parallel queue create failed.\n"); return -1; }
snprintf(name, sizeof(name), "sched_%d_%d_a", i, j); p.sched.sync = ODP_SCHED_SYNC_ATOMIC; - p.size = BUFS_PER_QUEUE_EXCL; + p.size = globals->max_sched_queue_size; q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) { - printf("Schedule queue create failed.\n"); + printf("Atomic queue create failed.\n"); return -1; }
@@ -1501,7 +1508,7 @@ static int create_queues(void) q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) { - printf("Schedule queue create failed.\n"); + printf("Ordered queue create failed.\n"); return -1; } if (odp_queue_lock_count(q) != @@ -1598,7 +1605,7 @@ static int scheduler_suite_init(void) odp_ticketlock_init(&globals->lock); odp_spinlock_init(&globals->atomic_lock);
- if (create_queues() != 0) + if (create_queues(globals) != 0) return -1;
return 0;
commit f294fd186fa42b9c5dfa33820c3992230708a2ea Author: Petri Savolainen petri.savolainen@linaro.org Date: Tue Feb 20 09:53:24 2018 +0200
helper: cuckoo: check pool and queue limits
Use capability to check if pool and queue can hold enough events. Also lower test case resource requirement.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/helper/cuckootable.c b/helper/cuckootable.c index 1dc43b57..febfa28e 100644 --- a/helper/cuckootable.c +++ b/helper/cuckootable.c @@ -184,6 +184,8 @@ odph_cuckoo_table_create(
odp_queue_t queue; odp_queue_param_t qparam; + odp_queue_capability_t qcapa; + odp_pool_capability_t pcapa;
char pool_name[ODPH_TABLE_NAME_LEN + 3], queue_name[ODPH_TABLE_NAME_LEN + 3]; @@ -191,6 +193,26 @@ odph_cuckoo_table_create( uint32_t impl_size, kv_entry_size, bucket_num, bucket_size;
+ if (odp_queue_capability(&qcapa)) { + ODPH_DBG("queue capa failed\n"); + return NULL; + } + + if (qcapa.plain.max_size && qcapa.plain.max_size < capacity) { + ODPH_DBG("queue max_size too small\n"); + return NULL; + } + + if (odp_pool_capability(&pcapa)) { + ODPH_DBG("pool capa failed\n"); + return NULL; + } + + if (pcapa.buf.max_num && pcapa.buf.max_num < capacity) { + ODPH_DBG("pool max_num too small\n"); + return NULL; + } + /* Check for valid parameters */ if ( (capacity > HASH_ENTRIES_MAX) || diff --git a/helper/test/cuckootable.c b/helper/test/cuckootable.c index 7798f94e..3afa490a 100644 --- a/helper/test/cuckootable.c +++ b/helper/test/cuckootable.c @@ -429,7 +429,7 @@ static int test_creation_with_bad_parameters(void) return 0; }
-#define PERFORMANCE_CAPACITY 1000000 +#define PERFORMANCE_CAPACITY 4000
/* * Test the performance of cuckoo hash table.
commit e0934ef22cb60d1b27766c1dfea61afc93109e8b Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Feb 16 17:01:41 2018 +0200
linux-gen: ring_st: ring for single thread usage
This ring can be used as simple FIFO when enqueue / dequeue operation synchronization is not needed, or synchronization is provided by an upper layer already.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am index b6e19555..c35c0bfe 100644 --- a/platform/linux-generic/Makefile.am +++ b/platform/linux-generic/Makefile.am @@ -111,9 +111,10 @@ noinst_HEADERS = \ include/odp_posix_extensions.h \ include/odp_queue_internal.h \ include/odp_queue_scalable_internal.h \ - include/odp_ring_internal.h \ include/odp_queue_if.h \ include/odp_queue_lf.h \ + include/odp_ring_internal.h \ + include/odp_ring_st_internal.h \ include/odp_schedule_if.h \ include/odp_schedule_scalable.h \ include/odp_schedule_scalable_config.h \ diff --git a/platform/linux-generic/include/odp_ring_st_internal.h b/platform/linux-generic/include/odp_ring_st_internal.h new file mode 100644 index 00000000..5fb37d4e --- /dev/null +++ b/platform/linux-generic/include/odp_ring_st_internal.h @@ -0,0 +1,109 @@ +/* Copyright (c) 2018, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef ODP_RING_ST_INTERNAL_H_ +#define ODP_RING_ST_INTERNAL_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <odp/api/hints.h> +#include <odp_align_internal.h> + +/* Basic ring for single thread usage. Operations must be synchronized by using + * locks (or other means), when multiple threads use the same ring. */ +typedef struct { + uint32_t head; + uint32_t tail; + uint32_t mask; + uint32_t *data; + +} ring_st_t; + +/* Initialize ring. Ring size must be a power of two. */ +static inline void ring_st_init(ring_st_t *ring, uint32_t *data, uint32_t size) +{ + ring->head = 0; + ring->tail = 0; + ring->mask = size - 1; + ring->data = data; +} + +/* Dequeue data from the ring head. Max_num is smaller than ring size.*/ +static inline uint32_t ring_st_deq_multi(ring_st_t *ring, uint32_t data[], + uint32_t max_num) +{ + uint32_t head, tail, mask, idx; + uint32_t num, i; + + head = ring->head; + tail = ring->tail; + mask = ring->mask; + num = tail - head; + + /* Empty */ + if (num == 0) + return 0; + + if (num > max_num) + num = max_num; + + idx = head & mask; + + for (i = 0; i < num; i++) { + data[i] = ring->data[idx]; + idx = (idx + 1) & mask; + } + + ring->head = head + num; + + return num; +} + +/* Enqueue data into the ring tail. Num_data is smaller than ring size. */ +static inline uint32_t ring_st_enq_multi(ring_st_t *ring, const uint32_t data[], + uint32_t num_data) +{ + uint32_t head, tail, mask, size, idx; + uint32_t num, i; + + head = ring->head; + tail = ring->tail; + mask = ring->mask; + size = mask + 1; + num = size - (tail - head); + + /* Full */ + if (num == 0) + return 0; + + if (num > num_data) + num = num_data; + + idx = tail & mask; + + for (i = 0; i < num; i++) { + ring->data[idx] = data[i]; + idx = (idx + 1) & mask; + } + + ring->tail = tail + num; + + return num; +} + +/* Check if ring is empty */ +static inline int ring_st_is_empty(ring_st_t *ring) +{ + return ring->head == ring->tail; +} + +#ifdef __cplusplus +} +#endif + +#endif
commit 5361c69b7a4ecd50c1b0c30af6d1f08fea28718e Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Feb 16 15:42:58 2018 +0200
linux-gen: queue: inline queue from index conversion
Inline queue handle from queue index conversion function.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_queue_internal.h b/platform/linux-generic/include/odp_queue_internal.h index b14f3ea8..8215a818 100644 --- a/platform/linux-generic/include/odp_queue_internal.h +++ b/platform/linux-generic/include/odp_queue_internal.h @@ -63,11 +63,16 @@ union queue_entry_u { uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))]; };
-static inline uint32_t queue_to_id(odp_queue_t handle) +static inline uint32_t queue_to_index(odp_queue_t handle) { return _odp_typeval(handle) - 1; }
+static inline odp_queue_t queue_from_index(uint32_t queue_id) +{ + return _odp_cast_scalar(odp_queue_t, queue_id + 1); +} + static inline queue_entry_t *qentry_from_int(queue_t q_int) { return (queue_entry_t *)(void *)(q_int); diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c index 0c0a11ec..2801b220 100644 --- a/platform/linux-generic/odp_queue_basic.c +++ b/platform/linux-generic/odp_queue_basic.c @@ -51,26 +51,16 @@ typedef struct queue_global_t {
static queue_global_t *queue_glb;
-static -queue_entry_t *get_qentry(uint32_t queue_id); - -static inline queue_entry_t *handle_to_qentry(odp_queue_t handle) +static inline queue_entry_t *get_qentry(uint32_t queue_id) { - uint32_t queue_id; - - queue_id = queue_to_id(handle); - return get_qentry(queue_id); + return &queue_glb->queue[queue_id]; }
-static inline odp_queue_t queue_from_id(uint32_t queue_id) +static inline queue_entry_t *handle_to_qentry(odp_queue_t handle) { - return _odp_cast_scalar(odp_queue_t, queue_id + 1); -} + uint32_t queue_id = queue_to_index(handle);
-static -queue_entry_t *get_qentry(uint32_t queue_id) -{ - return &queue_glb->queue[queue_id]; + return get_qentry(queue_id); }
static int queue_init_global(void) @@ -98,7 +88,7 @@ static int queue_init_global(void) queue_entry_t *queue = get_qentry(i); LOCK_INIT(&queue->s.lock); queue->s.index = i; - queue->s.handle = queue_from_id(i); + queue->s.handle = queue_from_index(i); }
lf_func = &queue_glb->queue_lf_func; @@ -661,7 +651,7 @@ static int queue_info(odp_queue_t handle, odp_queue_info_t *info) return -1; }
- queue_id = queue_to_id(handle); + queue_id = queue_to_index(handle);
if (odp_unlikely(queue_id >= ODP_CONFIG_QUEUES)) { ODP_ERR("Invalid queue handle:%" PRIu64 "\n", @@ -689,11 +679,6 @@ static int queue_info(odp_queue_t handle, odp_queue_info_t *info) return 0; }
-odp_queue_t sched_cb_queue_handle(uint32_t queue_index) -{ - return queue_from_id(queue_index); -} - int sched_cb_queue_deq_multi(uint32_t queue_index, odp_event_t ev[], int num) { queue_entry_t *qe = get_qentry(queue_index); diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index 7195eede..e5e1fe60 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -25,10 +25,7 @@ #include <odp/api/packet_io.h> #include <odp_ring_internal.h> #include <odp_timer_internal.h> - -/* Should remove this dependency */ #include <odp_queue_internal.h> -#include <odp_timer_internal.h>
/* Number of priority levels */ #define NUM_PRIO 8 @@ -876,7 +873,7 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[], continue; }
- handle = sched_cb_queue_handle(qi); + handle = queue_from_index(qi); sched_local.num = num; sched_local.index = 0; sched_local.queue = handle; diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c index 3ce85394..ea62c364 100644 --- a/platform/linux-generic/odp_schedule_iquery.c +++ b/platform/linux-generic/odp_schedule_iquery.c @@ -1541,7 +1541,7 @@ static inline int consume_queue(int prio, unsigned int queue_index)
cache->top = &cache->stash[0]; cache->count = count; - cache->queue = sched_cb_queue_handle(queue_index); + cache->queue = queue_from_index(queue_index); return count; }
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c index e46ae448..007d673f 100644 --- a/platform/linux-generic/odp_schedule_sp.c +++ b/platform/linux-generic/odp_schedule_sp.c @@ -18,6 +18,7 @@ #include <odp_config_internal.h> #include <odp_ring_internal.h> #include <odp_timer_internal.h> +#include <odp_queue_internal.h>
#define NUM_THREAD ODP_THREAD_COUNT_MAX #define NUM_QUEUE ODP_CONFIG_QUEUES @@ -564,7 +565,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, sched_local.cmd = cmd;
if (from) - *from = sched_cb_queue_handle(qi); + *from = queue_from_index(qi);
return num; }
-----------------------------------------------------------------------
Summary of changes: helper/cuckootable.c | 22 ++ helper/test/cuckootable.c | 2 +- platform/linux-generic/Makefile.am | 3 +- .../linux-generic/include/odp_buffer_internal.h | 34 +-- .../linux-generic/include/odp_config_internal.h | 2 +- platform/linux-generic/include/odp_pool_internal.h | 28 +++ .../linux-generic/include/odp_queue_internal.h | 12 +- .../linux-generic/include/odp_ring_st_internal.h | 109 +++++++++ platform/linux-generic/odp_packet.c | 3 +- platform/linux-generic/odp_pool.c | 22 +- platform/linux-generic/odp_queue_basic.c | 267 +++++++++------------ platform/linux-generic/odp_schedule_basic.c | 5 +- platform/linux-generic/odp_schedule_iquery.c | 2 +- platform/linux-generic/odp_schedule_sp.c | 3 +- test/performance/odp_l2fwd.c | 234 +++++++++--------- test/performance/odp_pktio_ordered.c | 44 +++- test/validation/api/scheduler/scheduler.c | 21 +- 17 files changed, 476 insertions(+), 337 deletions(-) create mode 100644 platform/linux-generic/include/odp_ring_st_internal.h
hooks/post-receive