1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2016-2017 Intel Corporation
32bfe3f2eSlogwang */
42bfe3f2eSlogwang
52bfe3f2eSlogwang #include <inttypes.h>
62bfe3f2eSlogwang #include <string.h>
72bfe3f2eSlogwang
82bfe3f2eSlogwang #include <rte_bus_vdev.h>
92bfe3f2eSlogwang #include <rte_kvargs.h>
102bfe3f2eSlogwang #include <rte_ring.h>
112bfe3f2eSlogwang #include <rte_errno.h>
122bfe3f2eSlogwang #include <rte_event_ring.h>
132bfe3f2eSlogwang #include <rte_service_component.h>
142bfe3f2eSlogwang
152bfe3f2eSlogwang #include "sw_evdev.h"
16d30ea906Sjfb8856606 #include "iq_chunk.h"
17*2d9fd380Sjfb8856606 #include "event_ring.h"
182bfe3f2eSlogwang
192bfe3f2eSlogwang #define EVENTDEV_NAME_SW_PMD event_sw
202bfe3f2eSlogwang #define NUMA_NODE_ARG "numa_node"
212bfe3f2eSlogwang #define SCHED_QUANTA_ARG "sched_quanta"
222bfe3f2eSlogwang #define CREDIT_QUANTA_ARG "credit_quanta"
23*2d9fd380Sjfb8856606 #define MIN_BURST_SIZE_ARG "min_burst"
24*2d9fd380Sjfb8856606 #define DEQ_BURST_SIZE_ARG "deq_burst"
25*2d9fd380Sjfb8856606 #define REFIL_ONCE_ARG "refill_once"
262bfe3f2eSlogwang
272bfe3f2eSlogwang static void
282bfe3f2eSlogwang sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
292bfe3f2eSlogwang
302bfe3f2eSlogwang static int
sw_port_link(struct rte_eventdev * dev,void * port,const uint8_t queues[],const uint8_t priorities[],uint16_t num)312bfe3f2eSlogwang sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
322bfe3f2eSlogwang const uint8_t priorities[], uint16_t num)
332bfe3f2eSlogwang {
342bfe3f2eSlogwang struct sw_port *p = port;
352bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
362bfe3f2eSlogwang int i;
372bfe3f2eSlogwang
382bfe3f2eSlogwang RTE_SET_USED(priorities);
392bfe3f2eSlogwang for (i = 0; i < num; i++) {
402bfe3f2eSlogwang struct sw_qid *q = &sw->qids[queues[i]];
412bfe3f2eSlogwang unsigned int j;
422bfe3f2eSlogwang
432bfe3f2eSlogwang /* check for qid map overflow */
442bfe3f2eSlogwang if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
454b05018fSfengbojiang rte_errno = EDQUOT;
462bfe3f2eSlogwang break;
472bfe3f2eSlogwang }
482bfe3f2eSlogwang
492bfe3f2eSlogwang if (p->is_directed && p->num_qids_mapped > 0) {
504b05018fSfengbojiang rte_errno = EDQUOT;
512bfe3f2eSlogwang break;
522bfe3f2eSlogwang }
532bfe3f2eSlogwang
542bfe3f2eSlogwang for (j = 0; j < q->cq_num_mapped_cqs; j++) {
552bfe3f2eSlogwang if (q->cq_map[j] == p->id)
562bfe3f2eSlogwang break;
572bfe3f2eSlogwang }
582bfe3f2eSlogwang
592bfe3f2eSlogwang /* check if port is already linked */
602bfe3f2eSlogwang if (j < q->cq_num_mapped_cqs)
612bfe3f2eSlogwang continue;
622bfe3f2eSlogwang
632bfe3f2eSlogwang if (q->type == SW_SCHED_TYPE_DIRECT) {
642bfe3f2eSlogwang /* check directed qids only map to one port */
652bfe3f2eSlogwang if (p->num_qids_mapped > 0) {
664b05018fSfengbojiang rte_errno = EDQUOT;
672bfe3f2eSlogwang break;
682bfe3f2eSlogwang }
692bfe3f2eSlogwang /* check port only takes a directed flow */
702bfe3f2eSlogwang if (num > 1) {
714b05018fSfengbojiang rte_errno = EDQUOT;
722bfe3f2eSlogwang break;
732bfe3f2eSlogwang }
742bfe3f2eSlogwang
752bfe3f2eSlogwang p->is_directed = 1;
762bfe3f2eSlogwang p->num_qids_mapped = 1;
772bfe3f2eSlogwang } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
782bfe3f2eSlogwang p->num_ordered_qids++;
792bfe3f2eSlogwang p->num_qids_mapped++;
802bfe3f2eSlogwang } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
812bfe3f2eSlogwang q->type == RTE_SCHED_TYPE_PARALLEL) {
822bfe3f2eSlogwang p->num_qids_mapped++;
832bfe3f2eSlogwang }
842bfe3f2eSlogwang
852bfe3f2eSlogwang q->cq_map[q->cq_num_mapped_cqs] = p->id;
862bfe3f2eSlogwang rte_smp_wmb();
872bfe3f2eSlogwang q->cq_num_mapped_cqs++;
882bfe3f2eSlogwang }
892bfe3f2eSlogwang return i;
902bfe3f2eSlogwang }
912bfe3f2eSlogwang
922bfe3f2eSlogwang static int
sw_port_unlink(struct rte_eventdev * dev,void * port,uint8_t queues[],uint16_t nb_unlinks)932bfe3f2eSlogwang sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
942bfe3f2eSlogwang uint16_t nb_unlinks)
952bfe3f2eSlogwang {
962bfe3f2eSlogwang struct sw_port *p = port;
972bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
982bfe3f2eSlogwang unsigned int i, j;
992bfe3f2eSlogwang
1002bfe3f2eSlogwang int unlinked = 0;
1012bfe3f2eSlogwang for (i = 0; i < nb_unlinks; i++) {
1022bfe3f2eSlogwang struct sw_qid *q = &sw->qids[queues[i]];
1032bfe3f2eSlogwang for (j = 0; j < q->cq_num_mapped_cqs; j++) {
1042bfe3f2eSlogwang if (q->cq_map[j] == p->id) {
1052bfe3f2eSlogwang q->cq_map[j] =
1062bfe3f2eSlogwang q->cq_map[q->cq_num_mapped_cqs - 1];
1072bfe3f2eSlogwang rte_smp_wmb();
1082bfe3f2eSlogwang q->cq_num_mapped_cqs--;
1092bfe3f2eSlogwang unlinked++;
1102bfe3f2eSlogwang
1112bfe3f2eSlogwang p->num_qids_mapped--;
1122bfe3f2eSlogwang
1132bfe3f2eSlogwang if (q->type == RTE_SCHED_TYPE_ORDERED)
1142bfe3f2eSlogwang p->num_ordered_qids--;
1152bfe3f2eSlogwang
1162bfe3f2eSlogwang continue;
1172bfe3f2eSlogwang }
1182bfe3f2eSlogwang }
1192bfe3f2eSlogwang }
120d30ea906Sjfb8856606
121d30ea906Sjfb8856606 p->unlinks_in_progress += unlinked;
122d30ea906Sjfb8856606 rte_smp_mb();
123d30ea906Sjfb8856606
1242bfe3f2eSlogwang return unlinked;
1252bfe3f2eSlogwang }
1262bfe3f2eSlogwang
1272bfe3f2eSlogwang static int
sw_port_unlinks_in_progress(struct rte_eventdev * dev,void * port)128d30ea906Sjfb8856606 sw_port_unlinks_in_progress(struct rte_eventdev *dev, void *port)
129d30ea906Sjfb8856606 {
130d30ea906Sjfb8856606 RTE_SET_USED(dev);
131d30ea906Sjfb8856606 struct sw_port *p = port;
132d30ea906Sjfb8856606 return p->unlinks_in_progress;
133d30ea906Sjfb8856606 }
134d30ea906Sjfb8856606
135d30ea906Sjfb8856606 static int
sw_port_setup(struct rte_eventdev * dev,uint8_t port_id,const struct rte_event_port_conf * conf)1362bfe3f2eSlogwang sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
1372bfe3f2eSlogwang const struct rte_event_port_conf *conf)
1382bfe3f2eSlogwang {
1392bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
1402bfe3f2eSlogwang struct sw_port *p = &sw->ports[port_id];
1412bfe3f2eSlogwang char buf[RTE_RING_NAMESIZE];
1422bfe3f2eSlogwang unsigned int i;
1432bfe3f2eSlogwang
1442bfe3f2eSlogwang struct rte_event_dev_info info;
1452bfe3f2eSlogwang sw_info_get(dev, &info);
1462bfe3f2eSlogwang
1472bfe3f2eSlogwang /* detect re-configuring and return credits to instance if needed */
1482bfe3f2eSlogwang if (p->initialized) {
1492bfe3f2eSlogwang /* taking credits from pool is done one quanta at a time, and
1502bfe3f2eSlogwang * credits may be spend (counted in p->inflights) or still
1512bfe3f2eSlogwang * available in the port (p->inflight_credits). We must return
1522bfe3f2eSlogwang * the sum to no leak credits
1532bfe3f2eSlogwang */
1542bfe3f2eSlogwang int possible_inflights = p->inflight_credits + p->inflights;
1552bfe3f2eSlogwang rte_atomic32_sub(&sw->inflights, possible_inflights);
1562bfe3f2eSlogwang }
1572bfe3f2eSlogwang
1582bfe3f2eSlogwang *p = (struct sw_port){0}; /* zero entire structure */
1592bfe3f2eSlogwang p->id = port_id;
1602bfe3f2eSlogwang p->sw = sw;
1612bfe3f2eSlogwang
1622bfe3f2eSlogwang /* check to see if rings exists - port_setup() can be called multiple
1632bfe3f2eSlogwang * times legally (assuming device is stopped). If ring exists, free it
1642bfe3f2eSlogwang * to so it gets re-created with the correct size
1652bfe3f2eSlogwang */
1662bfe3f2eSlogwang snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
1672bfe3f2eSlogwang port_id, "rx_worker_ring");
1682bfe3f2eSlogwang struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
1692bfe3f2eSlogwang if (existing_ring)
1702bfe3f2eSlogwang rte_event_ring_free(existing_ring);
1712bfe3f2eSlogwang
1722bfe3f2eSlogwang p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
1732bfe3f2eSlogwang dev->data->socket_id,
1742bfe3f2eSlogwang RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
1752bfe3f2eSlogwang if (p->rx_worker_ring == NULL) {
1762bfe3f2eSlogwang SW_LOG_ERR("Error creating RX worker ring for port %d\n",
1772bfe3f2eSlogwang port_id);
1782bfe3f2eSlogwang return -1;
1792bfe3f2eSlogwang }
1802bfe3f2eSlogwang
1812bfe3f2eSlogwang p->inflight_max = conf->new_event_threshold;
182*2d9fd380Sjfb8856606 p->implicit_release = !(conf->event_port_cfg &
183*2d9fd380Sjfb8856606 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1842bfe3f2eSlogwang
1852bfe3f2eSlogwang /* check if ring exists, same as rx_worker above */
1862bfe3f2eSlogwang snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
1872bfe3f2eSlogwang port_id, "cq_worker_ring");
1882bfe3f2eSlogwang existing_ring = rte_event_ring_lookup(buf);
1892bfe3f2eSlogwang if (existing_ring)
1902bfe3f2eSlogwang rte_event_ring_free(existing_ring);
1912bfe3f2eSlogwang
1922bfe3f2eSlogwang p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
1932bfe3f2eSlogwang dev->data->socket_id,
1942bfe3f2eSlogwang RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
1952bfe3f2eSlogwang if (p->cq_worker_ring == NULL) {
1962bfe3f2eSlogwang rte_event_ring_free(p->rx_worker_ring);
1972bfe3f2eSlogwang SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
1982bfe3f2eSlogwang port_id);
1992bfe3f2eSlogwang return -1;
2002bfe3f2eSlogwang }
2012bfe3f2eSlogwang sw->cq_ring_space[port_id] = conf->dequeue_depth;
2022bfe3f2eSlogwang
2032bfe3f2eSlogwang /* set hist list contents to empty */
2042bfe3f2eSlogwang for (i = 0; i < SW_PORT_HIST_LIST; i++) {
2052bfe3f2eSlogwang p->hist_list[i].fid = -1;
2062bfe3f2eSlogwang p->hist_list[i].qid = -1;
2072bfe3f2eSlogwang }
2082bfe3f2eSlogwang dev->data->ports[port_id] = p;
2092bfe3f2eSlogwang
2102bfe3f2eSlogwang rte_smp_wmb();
2112bfe3f2eSlogwang p->initialized = 1;
2122bfe3f2eSlogwang return 0;
2132bfe3f2eSlogwang }
2142bfe3f2eSlogwang
2152bfe3f2eSlogwang static void
sw_port_release(void * port)2162bfe3f2eSlogwang sw_port_release(void *port)
2172bfe3f2eSlogwang {
2182bfe3f2eSlogwang struct sw_port *p = (void *)port;
2192bfe3f2eSlogwang if (p == NULL)
2202bfe3f2eSlogwang return;
2212bfe3f2eSlogwang
2222bfe3f2eSlogwang rte_event_ring_free(p->rx_worker_ring);
2232bfe3f2eSlogwang rte_event_ring_free(p->cq_worker_ring);
2242bfe3f2eSlogwang memset(p, 0, sizeof(*p));
2252bfe3f2eSlogwang }
2262bfe3f2eSlogwang
2272bfe3f2eSlogwang static int32_t
qid_init(struct sw_evdev * sw,unsigned int idx,int type,const struct rte_event_queue_conf * queue_conf)2282bfe3f2eSlogwang qid_init(struct sw_evdev *sw, unsigned int idx, int type,
2292bfe3f2eSlogwang const struct rte_event_queue_conf *queue_conf)
2302bfe3f2eSlogwang {
2312bfe3f2eSlogwang unsigned int i;
2322bfe3f2eSlogwang int dev_id = sw->data->dev_id;
2332bfe3f2eSlogwang int socket_id = sw->data->socket_id;
234d30ea906Sjfb8856606 char buf[IQ_ROB_NAMESIZE];
2352bfe3f2eSlogwang struct sw_qid *qid = &sw->qids[idx];
2362bfe3f2eSlogwang
2372bfe3f2eSlogwang /* Initialize the FID structures to no pinning (-1), and zero packets */
2382bfe3f2eSlogwang const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
2392bfe3f2eSlogwang for (i = 0; i < RTE_DIM(qid->fids); i++)
2402bfe3f2eSlogwang qid->fids[i] = fid;
2412bfe3f2eSlogwang
2422bfe3f2eSlogwang qid->id = idx;
2432bfe3f2eSlogwang qid->type = type;
2442bfe3f2eSlogwang qid->priority = queue_conf->priority;
2452bfe3f2eSlogwang
2462bfe3f2eSlogwang if (qid->type == RTE_SCHED_TYPE_ORDERED) {
2472bfe3f2eSlogwang uint32_t window_size;
2482bfe3f2eSlogwang
2492bfe3f2eSlogwang /* rte_ring and window_size_mask require require window_size to
2502bfe3f2eSlogwang * be a power-of-2.
2512bfe3f2eSlogwang */
2522bfe3f2eSlogwang window_size = rte_align32pow2(
2532bfe3f2eSlogwang queue_conf->nb_atomic_order_sequences);
2542bfe3f2eSlogwang
2552bfe3f2eSlogwang qid->window_size = window_size - 1;
2562bfe3f2eSlogwang
2572bfe3f2eSlogwang if (!window_size) {
2582bfe3f2eSlogwang SW_LOG_DBG(
2592bfe3f2eSlogwang "invalid reorder_window_size for ordered queue\n"
2602bfe3f2eSlogwang );
2612bfe3f2eSlogwang goto cleanup;
2622bfe3f2eSlogwang }
2632bfe3f2eSlogwang
2642bfe3f2eSlogwang snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
2652bfe3f2eSlogwang qid->reorder_buffer = rte_zmalloc_socket(buf,
2662bfe3f2eSlogwang window_size * sizeof(qid->reorder_buffer[0]),
2672bfe3f2eSlogwang 0, socket_id);
2682bfe3f2eSlogwang if (!qid->reorder_buffer) {
2692bfe3f2eSlogwang SW_LOG_DBG("reorder_buffer malloc failed\n");
2702bfe3f2eSlogwang goto cleanup;
2712bfe3f2eSlogwang }
2722bfe3f2eSlogwang
2732bfe3f2eSlogwang memset(&qid->reorder_buffer[0],
2742bfe3f2eSlogwang 0,
2752bfe3f2eSlogwang window_size * sizeof(qid->reorder_buffer[0]));
2762bfe3f2eSlogwang
277*2d9fd380Sjfb8856606 qid->reorder_buffer_freelist = rob_ring_create(window_size,
278*2d9fd380Sjfb8856606 socket_id);
2792bfe3f2eSlogwang if (!qid->reorder_buffer_freelist) {
2802bfe3f2eSlogwang SW_LOG_DBG("freelist ring create failed");
2812bfe3f2eSlogwang goto cleanup;
2822bfe3f2eSlogwang }
2832bfe3f2eSlogwang
2842bfe3f2eSlogwang /* Populate the freelist with reorder buffer entries. Enqueue
2852bfe3f2eSlogwang * 'window_size - 1' entries because the rte_ring holds only
2862bfe3f2eSlogwang * that many.
2872bfe3f2eSlogwang */
2882bfe3f2eSlogwang for (i = 0; i < window_size - 1; i++) {
289*2d9fd380Sjfb8856606 if (rob_ring_enqueue(qid->reorder_buffer_freelist,
290*2d9fd380Sjfb8856606 &qid->reorder_buffer[i]) != 1)
2912bfe3f2eSlogwang goto cleanup;
2922bfe3f2eSlogwang }
2932bfe3f2eSlogwang
2942bfe3f2eSlogwang qid->reorder_buffer_index = 0;
2952bfe3f2eSlogwang qid->cq_next_tx = 0;
2962bfe3f2eSlogwang }
2972bfe3f2eSlogwang
2982bfe3f2eSlogwang qid->initialized = 1;
2992bfe3f2eSlogwang
3002bfe3f2eSlogwang return 0;
3012bfe3f2eSlogwang
3022bfe3f2eSlogwang cleanup:
3032bfe3f2eSlogwang if (qid->reorder_buffer) {
3042bfe3f2eSlogwang rte_free(qid->reorder_buffer);
3052bfe3f2eSlogwang qid->reorder_buffer = NULL;
3062bfe3f2eSlogwang }
3072bfe3f2eSlogwang
3082bfe3f2eSlogwang if (qid->reorder_buffer_freelist) {
309*2d9fd380Sjfb8856606 rob_ring_free(qid->reorder_buffer_freelist);
3102bfe3f2eSlogwang qid->reorder_buffer_freelist = NULL;
3112bfe3f2eSlogwang }
3122bfe3f2eSlogwang
3132bfe3f2eSlogwang return -EINVAL;
3142bfe3f2eSlogwang }
3152bfe3f2eSlogwang
3162bfe3f2eSlogwang static void
sw_queue_release(struct rte_eventdev * dev,uint8_t id)3172bfe3f2eSlogwang sw_queue_release(struct rte_eventdev *dev, uint8_t id)
3182bfe3f2eSlogwang {
3192bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
3202bfe3f2eSlogwang struct sw_qid *qid = &sw->qids[id];
3212bfe3f2eSlogwang
3222bfe3f2eSlogwang if (qid->type == RTE_SCHED_TYPE_ORDERED) {
3232bfe3f2eSlogwang rte_free(qid->reorder_buffer);
324*2d9fd380Sjfb8856606 rob_ring_free(qid->reorder_buffer_freelist);
3252bfe3f2eSlogwang }
3262bfe3f2eSlogwang memset(qid, 0, sizeof(*qid));
3272bfe3f2eSlogwang }
3282bfe3f2eSlogwang
3292bfe3f2eSlogwang static int
sw_queue_setup(struct rte_eventdev * dev,uint8_t queue_id,const struct rte_event_queue_conf * conf)3302bfe3f2eSlogwang sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
3312bfe3f2eSlogwang const struct rte_event_queue_conf *conf)
3322bfe3f2eSlogwang {
3332bfe3f2eSlogwang int type;
3342bfe3f2eSlogwang
3352bfe3f2eSlogwang type = conf->schedule_type;
3362bfe3f2eSlogwang
3372bfe3f2eSlogwang if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
3382bfe3f2eSlogwang type = SW_SCHED_TYPE_DIRECT;
3392bfe3f2eSlogwang } else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
3402bfe3f2eSlogwang & conf->event_queue_cfg) {
3412bfe3f2eSlogwang SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
3422bfe3f2eSlogwang return -ENOTSUP;
3432bfe3f2eSlogwang }
3442bfe3f2eSlogwang
3452bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
3462bfe3f2eSlogwang
3472bfe3f2eSlogwang if (sw->qids[queue_id].initialized)
3482bfe3f2eSlogwang sw_queue_release(dev, queue_id);
3492bfe3f2eSlogwang
3502bfe3f2eSlogwang return qid_init(sw, queue_id, type, conf);
3512bfe3f2eSlogwang }
3522bfe3f2eSlogwang
3532bfe3f2eSlogwang static void
sw_init_qid_iqs(struct sw_evdev * sw)354d30ea906Sjfb8856606 sw_init_qid_iqs(struct sw_evdev *sw)
355d30ea906Sjfb8856606 {
356d30ea906Sjfb8856606 int i, j;
357d30ea906Sjfb8856606
358d30ea906Sjfb8856606 /* Initialize the IQ memory of all configured qids */
359d30ea906Sjfb8856606 for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
360d30ea906Sjfb8856606 struct sw_qid *qid = &sw->qids[i];
361d30ea906Sjfb8856606
362d30ea906Sjfb8856606 if (!qid->initialized)
363d30ea906Sjfb8856606 continue;
364d30ea906Sjfb8856606
365d30ea906Sjfb8856606 for (j = 0; j < SW_IQS_MAX; j++)
366d30ea906Sjfb8856606 iq_init(sw, &qid->iq[j]);
367d30ea906Sjfb8856606 }
368d30ea906Sjfb8856606 }
369d30ea906Sjfb8856606
370d30ea906Sjfb8856606 static int
sw_qids_empty(struct sw_evdev * sw)371d30ea906Sjfb8856606 sw_qids_empty(struct sw_evdev *sw)
372d30ea906Sjfb8856606 {
373d30ea906Sjfb8856606 unsigned int i, j;
374d30ea906Sjfb8856606
375d30ea906Sjfb8856606 for (i = 0; i < sw->qid_count; i++) {
376d30ea906Sjfb8856606 for (j = 0; j < SW_IQS_MAX; j++) {
377d30ea906Sjfb8856606 if (iq_count(&sw->qids[i].iq[j]))
378d30ea906Sjfb8856606 return 0;
379d30ea906Sjfb8856606 }
380d30ea906Sjfb8856606 }
381d30ea906Sjfb8856606
382d30ea906Sjfb8856606 return 1;
383d30ea906Sjfb8856606 }
384d30ea906Sjfb8856606
385d30ea906Sjfb8856606 static int
sw_ports_empty(struct sw_evdev * sw)386d30ea906Sjfb8856606 sw_ports_empty(struct sw_evdev *sw)
387d30ea906Sjfb8856606 {
388d30ea906Sjfb8856606 unsigned int i;
389d30ea906Sjfb8856606
390d30ea906Sjfb8856606 for (i = 0; i < sw->port_count; i++) {
391d30ea906Sjfb8856606 if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) ||
392d30ea906Sjfb8856606 rte_event_ring_count(sw->ports[i].cq_worker_ring))
393d30ea906Sjfb8856606 return 0;
394d30ea906Sjfb8856606 }
395d30ea906Sjfb8856606
396d30ea906Sjfb8856606 return 1;
397d30ea906Sjfb8856606 }
398d30ea906Sjfb8856606
399d30ea906Sjfb8856606 static void
sw_drain_ports(struct rte_eventdev * dev)400d30ea906Sjfb8856606 sw_drain_ports(struct rte_eventdev *dev)
401d30ea906Sjfb8856606 {
402d30ea906Sjfb8856606 struct sw_evdev *sw = sw_pmd_priv(dev);
403d30ea906Sjfb8856606 eventdev_stop_flush_t flush;
404d30ea906Sjfb8856606 unsigned int i;
405d30ea906Sjfb8856606 uint8_t dev_id;
406d30ea906Sjfb8856606 void *arg;
407d30ea906Sjfb8856606
408d30ea906Sjfb8856606 flush = dev->dev_ops->dev_stop_flush;
409d30ea906Sjfb8856606 dev_id = dev->data->dev_id;
410d30ea906Sjfb8856606 arg = dev->data->dev_stop_flush_arg;
411d30ea906Sjfb8856606
412d30ea906Sjfb8856606 for (i = 0; i < sw->port_count; i++) {
413d30ea906Sjfb8856606 struct rte_event ev;
414d30ea906Sjfb8856606
415d30ea906Sjfb8856606 while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) {
416d30ea906Sjfb8856606 if (flush)
417d30ea906Sjfb8856606 flush(dev_id, ev, arg);
418d30ea906Sjfb8856606
419d30ea906Sjfb8856606 ev.op = RTE_EVENT_OP_RELEASE;
420d30ea906Sjfb8856606 rte_event_enqueue_burst(dev_id, i, &ev, 1);
421d30ea906Sjfb8856606 }
422d30ea906Sjfb8856606 }
423d30ea906Sjfb8856606 }
424d30ea906Sjfb8856606
425d30ea906Sjfb8856606 static void
sw_drain_queue(struct rte_eventdev * dev,struct sw_iq * iq)426d30ea906Sjfb8856606 sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq)
427d30ea906Sjfb8856606 {
428d30ea906Sjfb8856606 struct sw_evdev *sw = sw_pmd_priv(dev);
429d30ea906Sjfb8856606 eventdev_stop_flush_t flush;
430d30ea906Sjfb8856606 uint8_t dev_id;
431d30ea906Sjfb8856606 void *arg;
432d30ea906Sjfb8856606
433d30ea906Sjfb8856606 flush = dev->dev_ops->dev_stop_flush;
434d30ea906Sjfb8856606 dev_id = dev->data->dev_id;
435d30ea906Sjfb8856606 arg = dev->data->dev_stop_flush_arg;
436d30ea906Sjfb8856606
437d30ea906Sjfb8856606 while (iq_count(iq) > 0) {
438d30ea906Sjfb8856606 struct rte_event ev;
439d30ea906Sjfb8856606
440d30ea906Sjfb8856606 iq_dequeue_burst(sw, iq, &ev, 1);
441d30ea906Sjfb8856606
442d30ea906Sjfb8856606 if (flush)
443d30ea906Sjfb8856606 flush(dev_id, ev, arg);
444d30ea906Sjfb8856606 }
445d30ea906Sjfb8856606 }
446d30ea906Sjfb8856606
447d30ea906Sjfb8856606 static void
sw_drain_queues(struct rte_eventdev * dev)448d30ea906Sjfb8856606 sw_drain_queues(struct rte_eventdev *dev)
449d30ea906Sjfb8856606 {
450d30ea906Sjfb8856606 struct sw_evdev *sw = sw_pmd_priv(dev);
451d30ea906Sjfb8856606 unsigned int i, j;
452d30ea906Sjfb8856606
453d30ea906Sjfb8856606 for (i = 0; i < sw->qid_count; i++) {
454d30ea906Sjfb8856606 for (j = 0; j < SW_IQS_MAX; j++)
455d30ea906Sjfb8856606 sw_drain_queue(dev, &sw->qids[i].iq[j]);
456d30ea906Sjfb8856606 }
457d30ea906Sjfb8856606 }
458d30ea906Sjfb8856606
459d30ea906Sjfb8856606 static void
sw_clean_qid_iqs(struct rte_eventdev * dev)460d30ea906Sjfb8856606 sw_clean_qid_iqs(struct rte_eventdev *dev)
461d30ea906Sjfb8856606 {
462d30ea906Sjfb8856606 struct sw_evdev *sw = sw_pmd_priv(dev);
463d30ea906Sjfb8856606 int i, j;
464d30ea906Sjfb8856606
465d30ea906Sjfb8856606 /* Release the IQ memory of all configured qids */
466d30ea906Sjfb8856606 for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
467d30ea906Sjfb8856606 struct sw_qid *qid = &sw->qids[i];
468d30ea906Sjfb8856606
469d30ea906Sjfb8856606 for (j = 0; j < SW_IQS_MAX; j++) {
470d30ea906Sjfb8856606 if (!qid->iq[j].head)
471d30ea906Sjfb8856606 continue;
472d30ea906Sjfb8856606 iq_free_chunk_list(sw, qid->iq[j].head);
473d30ea906Sjfb8856606 qid->iq[j].head = NULL;
474d30ea906Sjfb8856606 }
475d30ea906Sjfb8856606 }
476d30ea906Sjfb8856606 }
477d30ea906Sjfb8856606
478d30ea906Sjfb8856606 static void
sw_queue_def_conf(struct rte_eventdev * dev,uint8_t queue_id,struct rte_event_queue_conf * conf)4792bfe3f2eSlogwang sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
4802bfe3f2eSlogwang struct rte_event_queue_conf *conf)
4812bfe3f2eSlogwang {
4822bfe3f2eSlogwang RTE_SET_USED(dev);
4832bfe3f2eSlogwang RTE_SET_USED(queue_id);
4842bfe3f2eSlogwang
4852bfe3f2eSlogwang static const struct rte_event_queue_conf default_conf = {
4862bfe3f2eSlogwang .nb_atomic_flows = 4096,
4872bfe3f2eSlogwang .nb_atomic_order_sequences = 1,
4882bfe3f2eSlogwang .schedule_type = RTE_SCHED_TYPE_ATOMIC,
4892bfe3f2eSlogwang .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
4902bfe3f2eSlogwang };
4912bfe3f2eSlogwang
4922bfe3f2eSlogwang *conf = default_conf;
4932bfe3f2eSlogwang }
4942bfe3f2eSlogwang
4952bfe3f2eSlogwang static void
sw_port_def_conf(struct rte_eventdev * dev,uint8_t port_id,struct rte_event_port_conf * port_conf)4962bfe3f2eSlogwang sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
4972bfe3f2eSlogwang struct rte_event_port_conf *port_conf)
4982bfe3f2eSlogwang {
4992bfe3f2eSlogwang RTE_SET_USED(dev);
5002bfe3f2eSlogwang RTE_SET_USED(port_id);
5012bfe3f2eSlogwang
5022bfe3f2eSlogwang port_conf->new_event_threshold = 1024;
5032bfe3f2eSlogwang port_conf->dequeue_depth = 16;
5042bfe3f2eSlogwang port_conf->enqueue_depth = 16;
505*2d9fd380Sjfb8856606 port_conf->event_port_cfg = 0;
5062bfe3f2eSlogwang }
5072bfe3f2eSlogwang
5082bfe3f2eSlogwang static int
sw_dev_configure(const struct rte_eventdev * dev)5092bfe3f2eSlogwang sw_dev_configure(const struct rte_eventdev *dev)
5102bfe3f2eSlogwang {
5112bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
5122bfe3f2eSlogwang const struct rte_eventdev_data *data = dev->data;
5132bfe3f2eSlogwang const struct rte_event_dev_config *conf = &data->dev_conf;
514d30ea906Sjfb8856606 int num_chunks, i;
5152bfe3f2eSlogwang
5162bfe3f2eSlogwang sw->qid_count = conf->nb_event_queues;
5172bfe3f2eSlogwang sw->port_count = conf->nb_event_ports;
5182bfe3f2eSlogwang sw->nb_events_limit = conf->nb_events_limit;
5192bfe3f2eSlogwang rte_atomic32_set(&sw->inflights, 0);
5202bfe3f2eSlogwang
521d30ea906Sjfb8856606 /* Number of chunks sized for worst-case spread of events across IQs */
522d30ea906Sjfb8856606 num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
523d30ea906Sjfb8856606 sw->qid_count*SW_IQS_MAX*2;
524d30ea906Sjfb8856606
525d30ea906Sjfb8856606 /* If this is a reconfiguration, free the previous IQ allocation. All
526d30ea906Sjfb8856606 * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
527d30ea906Sjfb8856606 * will be reinitialized in sw_start().
528d30ea906Sjfb8856606 */
529d30ea906Sjfb8856606 if (sw->chunks)
530d30ea906Sjfb8856606 rte_free(sw->chunks);
531d30ea906Sjfb8856606
532d30ea906Sjfb8856606 sw->chunks = rte_malloc_socket(NULL,
533d30ea906Sjfb8856606 sizeof(struct sw_queue_chunk) *
534d30ea906Sjfb8856606 num_chunks,
535d30ea906Sjfb8856606 0,
536d30ea906Sjfb8856606 sw->data->socket_id);
537d30ea906Sjfb8856606 if (!sw->chunks)
538d30ea906Sjfb8856606 return -ENOMEM;
539d30ea906Sjfb8856606
540d30ea906Sjfb8856606 sw->chunk_list_head = NULL;
541d30ea906Sjfb8856606 for (i = 0; i < num_chunks; i++)
542d30ea906Sjfb8856606 iq_free_chunk(sw, &sw->chunks[i]);
543d30ea906Sjfb8856606
5442bfe3f2eSlogwang if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
5452bfe3f2eSlogwang return -ENOTSUP;
5462bfe3f2eSlogwang
5472bfe3f2eSlogwang return 0;
5482bfe3f2eSlogwang }
5492bfe3f2eSlogwang
5502bfe3f2eSlogwang struct rte_eth_dev;
5512bfe3f2eSlogwang
5522bfe3f2eSlogwang static int
sw_eth_rx_adapter_caps_get(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,uint32_t * caps)5532bfe3f2eSlogwang sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
5542bfe3f2eSlogwang const struct rte_eth_dev *eth_dev,
5552bfe3f2eSlogwang uint32_t *caps)
5562bfe3f2eSlogwang {
5572bfe3f2eSlogwang RTE_SET_USED(dev);
5582bfe3f2eSlogwang RTE_SET_USED(eth_dev);
5592bfe3f2eSlogwang *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
5602bfe3f2eSlogwang return 0;
5612bfe3f2eSlogwang }
5622bfe3f2eSlogwang
563d30ea906Sjfb8856606 static int
sw_timer_adapter_caps_get(const struct rte_eventdev * dev,uint64_t flags,uint32_t * caps,const struct rte_event_timer_adapter_ops ** ops)564d30ea906Sjfb8856606 sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
565d30ea906Sjfb8856606 uint64_t flags,
566d30ea906Sjfb8856606 uint32_t *caps,
567d30ea906Sjfb8856606 const struct rte_event_timer_adapter_ops **ops)
568d30ea906Sjfb8856606 {
569d30ea906Sjfb8856606 RTE_SET_USED(dev);
570d30ea906Sjfb8856606 RTE_SET_USED(flags);
571d30ea906Sjfb8856606 *caps = 0;
572d30ea906Sjfb8856606
573d30ea906Sjfb8856606 /* Use default SW ops */
574d30ea906Sjfb8856606 *ops = NULL;
575d30ea906Sjfb8856606
576d30ea906Sjfb8856606 return 0;
577d30ea906Sjfb8856606 }
578d30ea906Sjfb8856606
579d30ea906Sjfb8856606 static int
sw_crypto_adapter_caps_get(const struct rte_eventdev * dev,const struct rte_cryptodev * cdev,uint32_t * caps)580d30ea906Sjfb8856606 sw_crypto_adapter_caps_get(const struct rte_eventdev *dev,
581d30ea906Sjfb8856606 const struct rte_cryptodev *cdev,
582d30ea906Sjfb8856606 uint32_t *caps)
583d30ea906Sjfb8856606 {
584d30ea906Sjfb8856606 RTE_SET_USED(dev);
585d30ea906Sjfb8856606 RTE_SET_USED(cdev);
586d30ea906Sjfb8856606 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
587d30ea906Sjfb8856606 return 0;
588d30ea906Sjfb8856606 }
589d30ea906Sjfb8856606
5902bfe3f2eSlogwang static void
sw_info_get(struct rte_eventdev * dev,struct rte_event_dev_info * info)5912bfe3f2eSlogwang sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
5922bfe3f2eSlogwang {
5932bfe3f2eSlogwang RTE_SET_USED(dev);
5942bfe3f2eSlogwang
5952bfe3f2eSlogwang static const struct rte_event_dev_info evdev_sw_info = {
5962bfe3f2eSlogwang .driver_name = SW_PMD_NAME,
5972bfe3f2eSlogwang .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
5982bfe3f2eSlogwang .max_event_queue_flows = SW_QID_NUM_FIDS,
5992bfe3f2eSlogwang .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
6002bfe3f2eSlogwang .max_event_priority_levels = SW_IQS_MAX,
6012bfe3f2eSlogwang .max_event_ports = SW_PORTS_MAX,
6022bfe3f2eSlogwang .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
6032bfe3f2eSlogwang .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
6042bfe3f2eSlogwang .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
605d30ea906Sjfb8856606 .event_dev_cap = (
606d30ea906Sjfb8856606 RTE_EVENT_DEV_CAP_QUEUE_QOS |
6072bfe3f2eSlogwang RTE_EVENT_DEV_CAP_BURST_MODE |
608d30ea906Sjfb8856606 RTE_EVENT_DEV_CAP_EVENT_QOS |
609d30ea906Sjfb8856606 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
610d30ea906Sjfb8856606 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
611d30ea906Sjfb8856606 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
612*2d9fd380Sjfb8856606 RTE_EVENT_DEV_CAP_NONSEQ_MODE |
613*2d9fd380Sjfb8856606 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID),
6142bfe3f2eSlogwang };
6152bfe3f2eSlogwang
6162bfe3f2eSlogwang *info = evdev_sw_info;
6172bfe3f2eSlogwang }
6182bfe3f2eSlogwang
6192bfe3f2eSlogwang static void
sw_dump(struct rte_eventdev * dev,FILE * f)6202bfe3f2eSlogwang sw_dump(struct rte_eventdev *dev, FILE *f)
6212bfe3f2eSlogwang {
6222bfe3f2eSlogwang const struct sw_evdev *sw = sw_pmd_priv(dev);
6232bfe3f2eSlogwang
6242bfe3f2eSlogwang static const char * const q_type_strings[] = {
6252bfe3f2eSlogwang "Ordered", "Atomic", "Parallel", "Directed"
6262bfe3f2eSlogwang };
6272bfe3f2eSlogwang uint32_t i;
6282bfe3f2eSlogwang fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
6292bfe3f2eSlogwang sw->port_count, sw->qid_count);
6302bfe3f2eSlogwang
6312bfe3f2eSlogwang fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n",
6322bfe3f2eSlogwang sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
6332bfe3f2eSlogwang fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
6342bfe3f2eSlogwang fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
6352bfe3f2eSlogwang fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
6362bfe3f2eSlogwang fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
6372bfe3f2eSlogwang uint32_t inflights = rte_atomic32_read(&sw->inflights);
6382bfe3f2eSlogwang uint32_t credits = sw->nb_events_limit - inflights;
6392bfe3f2eSlogwang fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
6402bfe3f2eSlogwang
6412bfe3f2eSlogwang #define COL_RED "\x1b[31m"
6422bfe3f2eSlogwang #define COL_RESET "\x1b[0m"
6432bfe3f2eSlogwang
6442bfe3f2eSlogwang for (i = 0; i < sw->port_count; i++) {
6452bfe3f2eSlogwang int max, j;
6462bfe3f2eSlogwang const struct sw_port *p = &sw->ports[i];
6472bfe3f2eSlogwang if (!p->initialized) {
6482bfe3f2eSlogwang fprintf(f, " %sPort %d not initialized.%s\n",
6492bfe3f2eSlogwang COL_RED, i, COL_RESET);
6502bfe3f2eSlogwang continue;
6512bfe3f2eSlogwang }
6522bfe3f2eSlogwang fprintf(f, " Port %d %s\n", i,
6532bfe3f2eSlogwang p->is_directed ? " (SingleCons)" : "");
6542bfe3f2eSlogwang fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64
6552bfe3f2eSlogwang "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
6562bfe3f2eSlogwang sw->ports[i].stats.rx_dropped,
6572bfe3f2eSlogwang sw->ports[i].stats.tx_pkts,
6582bfe3f2eSlogwang (p->inflights == p->inflight_max) ?
6592bfe3f2eSlogwang COL_RED : COL_RESET,
6602bfe3f2eSlogwang sw->ports[i].inflights, COL_RESET);
6612bfe3f2eSlogwang
6622bfe3f2eSlogwang fprintf(f, "\tMax New: %u"
6632bfe3f2eSlogwang "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
6642bfe3f2eSlogwang sw->ports[i].inflight_max,
6652bfe3f2eSlogwang sw->ports[i].avg_pkt_ticks,
6662bfe3f2eSlogwang sw->ports[i].inflight_credits);
6672bfe3f2eSlogwang fprintf(f, "\tReceive burst distribution:\n");
6682bfe3f2eSlogwang float zp_percent = p->zero_polls * 100.0 / p->total_polls;
6692bfe3f2eSlogwang fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
6702bfe3f2eSlogwang zp_percent);
6712bfe3f2eSlogwang for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
6722bfe3f2eSlogwang if (p->poll_buckets[max] != 0)
6732bfe3f2eSlogwang break;
6742bfe3f2eSlogwang for (j = 0; j <= max; j++) {
6752bfe3f2eSlogwang if (p->poll_buckets[j] != 0) {
6762bfe3f2eSlogwang float poll_pc = p->poll_buckets[j] * 100.0 /
6772bfe3f2eSlogwang p->total_polls;
6782bfe3f2eSlogwang fprintf(f, "%u-%u:%.02f%% ",
6792bfe3f2eSlogwang ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
6802bfe3f2eSlogwang ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
6812bfe3f2eSlogwang poll_pc);
6822bfe3f2eSlogwang }
6832bfe3f2eSlogwang }
6842bfe3f2eSlogwang fprintf(f, "\n");
6852bfe3f2eSlogwang
6862bfe3f2eSlogwang if (p->rx_worker_ring) {
6872bfe3f2eSlogwang uint64_t used = rte_event_ring_count(p->rx_worker_ring);
6882bfe3f2eSlogwang uint64_t space = rte_event_ring_free_count(
6892bfe3f2eSlogwang p->rx_worker_ring);
6902bfe3f2eSlogwang const char *col = (space == 0) ? COL_RED : COL_RESET;
6912bfe3f2eSlogwang fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
6922bfe3f2eSlogwang PRIu64 COL_RESET"\n", col, used, space);
6932bfe3f2eSlogwang } else
6942bfe3f2eSlogwang fprintf(f, "\trx ring not initialized.\n");
6952bfe3f2eSlogwang
6962bfe3f2eSlogwang if (p->cq_worker_ring) {
6972bfe3f2eSlogwang uint64_t used = rte_event_ring_count(p->cq_worker_ring);
6982bfe3f2eSlogwang uint64_t space = rte_event_ring_free_count(
6992bfe3f2eSlogwang p->cq_worker_ring);
7002bfe3f2eSlogwang const char *col = (space == 0) ? COL_RED : COL_RESET;
7012bfe3f2eSlogwang fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
7022bfe3f2eSlogwang PRIu64 COL_RESET"\n", col, used, space);
7032bfe3f2eSlogwang } else
7042bfe3f2eSlogwang fprintf(f, "\tcq ring not initialized.\n");
7052bfe3f2eSlogwang }
7062bfe3f2eSlogwang
7072bfe3f2eSlogwang for (i = 0; i < sw->qid_count; i++) {
7082bfe3f2eSlogwang const struct sw_qid *qid = &sw->qids[i];
7092bfe3f2eSlogwang if (!qid->initialized) {
7102bfe3f2eSlogwang fprintf(f, " %sQueue %d not initialized.%s\n",
7112bfe3f2eSlogwang COL_RED, i, COL_RESET);
7122bfe3f2eSlogwang continue;
7132bfe3f2eSlogwang }
7142bfe3f2eSlogwang int affinities_per_port[SW_PORTS_MAX] = {0};
7152bfe3f2eSlogwang uint32_t inflights = 0;
7162bfe3f2eSlogwang
7172bfe3f2eSlogwang fprintf(f, " Queue %d (%s)\n", i, q_type_strings[qid->type]);
7182bfe3f2eSlogwang fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64"\n",
7192bfe3f2eSlogwang qid->stats.rx_pkts, qid->stats.rx_dropped,
7202bfe3f2eSlogwang qid->stats.tx_pkts);
7212bfe3f2eSlogwang if (qid->type == RTE_SCHED_TYPE_ORDERED) {
722*2d9fd380Sjfb8856606 struct rob_ring *rob_buf_free =
7232bfe3f2eSlogwang qid->reorder_buffer_freelist;
7242bfe3f2eSlogwang if (rob_buf_free)
7252bfe3f2eSlogwang fprintf(f, "\tReorder entries in use: %u\n",
726*2d9fd380Sjfb8856606 rob_ring_free_count(rob_buf_free));
7272bfe3f2eSlogwang else
7282bfe3f2eSlogwang fprintf(f,
7292bfe3f2eSlogwang "\tReorder buffer not initialized\n");
7302bfe3f2eSlogwang }
7312bfe3f2eSlogwang
7322bfe3f2eSlogwang uint32_t flow;
7332bfe3f2eSlogwang for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
7342bfe3f2eSlogwang if (qid->fids[flow].cq != -1) {
7352bfe3f2eSlogwang affinities_per_port[qid->fids[flow].cq]++;
7362bfe3f2eSlogwang inflights += qid->fids[flow].pcount;
7372bfe3f2eSlogwang }
7382bfe3f2eSlogwang
7392bfe3f2eSlogwang uint32_t port;
7402bfe3f2eSlogwang fprintf(f, "\tPer Port Stats:\n");
7412bfe3f2eSlogwang for (port = 0; port < sw->port_count; port++) {
7422bfe3f2eSlogwang fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
7432bfe3f2eSlogwang qid->to_port[port]);
7442bfe3f2eSlogwang fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
7452bfe3f2eSlogwang }
7462bfe3f2eSlogwang
7472bfe3f2eSlogwang uint32_t iq;
7482bfe3f2eSlogwang uint32_t iq_printed = 0;
7492bfe3f2eSlogwang for (iq = 0; iq < SW_IQS_MAX; iq++) {
750d30ea906Sjfb8856606 if (!qid->iq[iq].head) {
7512bfe3f2eSlogwang fprintf(f, "\tiq %d is not initialized.\n", iq);
7522bfe3f2eSlogwang iq_printed = 1;
7532bfe3f2eSlogwang continue;
7542bfe3f2eSlogwang }
755d30ea906Sjfb8856606 uint32_t used = iq_count(&qid->iq[iq]);
756d30ea906Sjfb8856606 const char *col = COL_RESET;
7572bfe3f2eSlogwang if (used > 0) {
758d30ea906Sjfb8856606 fprintf(f, "\t%siq %d: Used %d"
759d30ea906Sjfb8856606 COL_RESET"\n", col, iq, used);
7602bfe3f2eSlogwang iq_printed = 1;
7612bfe3f2eSlogwang }
7622bfe3f2eSlogwang }
7632bfe3f2eSlogwang if (iq_printed == 0)
7642bfe3f2eSlogwang fprintf(f, "\t-- iqs empty --\n");
7652bfe3f2eSlogwang }
7662bfe3f2eSlogwang }
7672bfe3f2eSlogwang
7682bfe3f2eSlogwang static int
sw_start(struct rte_eventdev * dev)7692bfe3f2eSlogwang sw_start(struct rte_eventdev *dev)
7702bfe3f2eSlogwang {
7712bfe3f2eSlogwang unsigned int i, j;
7722bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
7732bfe3f2eSlogwang
7742bfe3f2eSlogwang rte_service_component_runstate_set(sw->service_id, 1);
7752bfe3f2eSlogwang
7762bfe3f2eSlogwang /* check a service core is mapped to this service */
7772bfe3f2eSlogwang if (!rte_service_runstate_get(sw->service_id)) {
7782bfe3f2eSlogwang SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
7792bfe3f2eSlogwang sw->service_name);
7802bfe3f2eSlogwang return -ENOENT;
7812bfe3f2eSlogwang }
7822bfe3f2eSlogwang
7832bfe3f2eSlogwang /* check all ports are set up */
7842bfe3f2eSlogwang for (i = 0; i < sw->port_count; i++)
7852bfe3f2eSlogwang if (sw->ports[i].rx_worker_ring == NULL) {
7862bfe3f2eSlogwang SW_LOG_ERR("Port %d not configured\n", i);
7872bfe3f2eSlogwang return -ESTALE;
7882bfe3f2eSlogwang }
7892bfe3f2eSlogwang
7902bfe3f2eSlogwang /* check all queues are configured and mapped to ports*/
7912bfe3f2eSlogwang for (i = 0; i < sw->qid_count; i++)
792d30ea906Sjfb8856606 if (!sw->qids[i].initialized ||
7932bfe3f2eSlogwang sw->qids[i].cq_num_mapped_cqs == 0) {
7942bfe3f2eSlogwang SW_LOG_ERR("Queue %d not configured\n", i);
7952bfe3f2eSlogwang return -ENOLINK;
7962bfe3f2eSlogwang }
7972bfe3f2eSlogwang
7982bfe3f2eSlogwang /* build up our prioritized array of qids */
7992bfe3f2eSlogwang /* We don't use qsort here, as if all/multiple entries have the same
8002bfe3f2eSlogwang * priority, the result is non-deterministic. From "man 3 qsort":
8012bfe3f2eSlogwang * "If two members compare as equal, their order in the sorted
8022bfe3f2eSlogwang * array is undefined."
8032bfe3f2eSlogwang */
8042bfe3f2eSlogwang uint32_t qidx = 0;
8052bfe3f2eSlogwang for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
8062bfe3f2eSlogwang for (i = 0; i < sw->qid_count; i++) {
8072bfe3f2eSlogwang if (sw->qids[i].priority == j) {
8082bfe3f2eSlogwang sw->qids_prioritized[qidx] = &sw->qids[i];
8092bfe3f2eSlogwang qidx++;
8102bfe3f2eSlogwang }
8112bfe3f2eSlogwang }
8122bfe3f2eSlogwang }
8132bfe3f2eSlogwang
814d30ea906Sjfb8856606 sw_init_qid_iqs(sw);
815d30ea906Sjfb8856606
8162bfe3f2eSlogwang if (sw_xstats_init(sw) < 0)
8172bfe3f2eSlogwang return -EINVAL;
8182bfe3f2eSlogwang
8192bfe3f2eSlogwang rte_smp_wmb();
8202bfe3f2eSlogwang sw->started = 1;
8212bfe3f2eSlogwang
8222bfe3f2eSlogwang return 0;
8232bfe3f2eSlogwang }
8242bfe3f2eSlogwang
8252bfe3f2eSlogwang static void
sw_stop(struct rte_eventdev * dev)8262bfe3f2eSlogwang sw_stop(struct rte_eventdev *dev)
8272bfe3f2eSlogwang {
8282bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
829d30ea906Sjfb8856606 int32_t runstate;
830d30ea906Sjfb8856606
831d30ea906Sjfb8856606 /* Stop the scheduler if it's running */
832d30ea906Sjfb8856606 runstate = rte_service_runstate_get(sw->service_id);
833d30ea906Sjfb8856606 if (runstate == 1)
834d30ea906Sjfb8856606 rte_service_runstate_set(sw->service_id, 0);
835d30ea906Sjfb8856606
836d30ea906Sjfb8856606 while (rte_service_may_be_active(sw->service_id))
837d30ea906Sjfb8856606 rte_pause();
838d30ea906Sjfb8856606
839d30ea906Sjfb8856606 /* Flush all events out of the device */
840d30ea906Sjfb8856606 while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) {
841d30ea906Sjfb8856606 sw_event_schedule(dev);
842d30ea906Sjfb8856606 sw_drain_ports(dev);
843d30ea906Sjfb8856606 sw_drain_queues(dev);
844d30ea906Sjfb8856606 }
845d30ea906Sjfb8856606
846d30ea906Sjfb8856606 sw_clean_qid_iqs(dev);
8472bfe3f2eSlogwang sw_xstats_uninit(sw);
8482bfe3f2eSlogwang sw->started = 0;
8492bfe3f2eSlogwang rte_smp_wmb();
850d30ea906Sjfb8856606
851d30ea906Sjfb8856606 if (runstate == 1)
852d30ea906Sjfb8856606 rte_service_runstate_set(sw->service_id, 1);
8532bfe3f2eSlogwang }
8542bfe3f2eSlogwang
8552bfe3f2eSlogwang static int
sw_close(struct rte_eventdev * dev)8562bfe3f2eSlogwang sw_close(struct rte_eventdev *dev)
8572bfe3f2eSlogwang {
8582bfe3f2eSlogwang struct sw_evdev *sw = sw_pmd_priv(dev);
8592bfe3f2eSlogwang uint32_t i;
8602bfe3f2eSlogwang
8612bfe3f2eSlogwang for (i = 0; i < sw->qid_count; i++)
8622bfe3f2eSlogwang sw_queue_release(dev, i);
8632bfe3f2eSlogwang sw->qid_count = 0;
8642bfe3f2eSlogwang
8652bfe3f2eSlogwang for (i = 0; i < sw->port_count; i++)
8662bfe3f2eSlogwang sw_port_release(&sw->ports[i]);
8672bfe3f2eSlogwang sw->port_count = 0;
8682bfe3f2eSlogwang
8692bfe3f2eSlogwang memset(&sw->stats, 0, sizeof(sw->stats));
8702bfe3f2eSlogwang sw->sched_called = 0;
8712bfe3f2eSlogwang sw->sched_no_iq_enqueues = 0;
8722bfe3f2eSlogwang sw->sched_no_cq_enqueues = 0;
8732bfe3f2eSlogwang sw->sched_cq_qid_called = 0;
8742bfe3f2eSlogwang
8752bfe3f2eSlogwang return 0;
8762bfe3f2eSlogwang }
8772bfe3f2eSlogwang
8782bfe3f2eSlogwang static int
assign_numa_node(const char * key __rte_unused,const char * value,void * opaque)8792bfe3f2eSlogwang assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
8802bfe3f2eSlogwang {
8812bfe3f2eSlogwang int *socket_id = opaque;
8822bfe3f2eSlogwang *socket_id = atoi(value);
8832bfe3f2eSlogwang if (*socket_id >= RTE_MAX_NUMA_NODES)
8842bfe3f2eSlogwang return -1;
8852bfe3f2eSlogwang return 0;
8862bfe3f2eSlogwang }
8872bfe3f2eSlogwang
8882bfe3f2eSlogwang static int
set_sched_quanta(const char * key __rte_unused,const char * value,void * opaque)8892bfe3f2eSlogwang set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
8902bfe3f2eSlogwang {
8912bfe3f2eSlogwang int *quanta = opaque;
8922bfe3f2eSlogwang *quanta = atoi(value);
8932bfe3f2eSlogwang if (*quanta < 0 || *quanta >= 4096)
8942bfe3f2eSlogwang return -1;
8952bfe3f2eSlogwang return 0;
8962bfe3f2eSlogwang }
8972bfe3f2eSlogwang
8982bfe3f2eSlogwang static int
set_credit_quanta(const char * key __rte_unused,const char * value,void * opaque)8992bfe3f2eSlogwang set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
9002bfe3f2eSlogwang {
9012bfe3f2eSlogwang int *credit = opaque;
9022bfe3f2eSlogwang *credit = atoi(value);
9032bfe3f2eSlogwang if (*credit < 0 || *credit >= 128)
9042bfe3f2eSlogwang return -1;
9052bfe3f2eSlogwang return 0;
9062bfe3f2eSlogwang }
9072bfe3f2eSlogwang
908*2d9fd380Sjfb8856606 static int
set_deq_burst_sz(const char * key __rte_unused,const char * value,void * opaque)909*2d9fd380Sjfb8856606 set_deq_burst_sz(const char *key __rte_unused, const char *value, void *opaque)
910*2d9fd380Sjfb8856606 {
911*2d9fd380Sjfb8856606 int *deq_burst_sz = opaque;
912*2d9fd380Sjfb8856606 *deq_burst_sz = atoi(value);
913*2d9fd380Sjfb8856606 if (*deq_burst_sz < 0 || *deq_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE)
914*2d9fd380Sjfb8856606 return -1;
915*2d9fd380Sjfb8856606 return 0;
916*2d9fd380Sjfb8856606 }
917*2d9fd380Sjfb8856606
918*2d9fd380Sjfb8856606 static int
set_min_burst_sz(const char * key __rte_unused,const char * value,void * opaque)919*2d9fd380Sjfb8856606 set_min_burst_sz(const char *key __rte_unused, const char *value, void *opaque)
920*2d9fd380Sjfb8856606 {
921*2d9fd380Sjfb8856606 int *min_burst_sz = opaque;
922*2d9fd380Sjfb8856606 *min_burst_sz = atoi(value);
923*2d9fd380Sjfb8856606 if (*min_burst_sz < 0 || *min_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE)
924*2d9fd380Sjfb8856606 return -1;
925*2d9fd380Sjfb8856606 return 0;
926*2d9fd380Sjfb8856606 }
927*2d9fd380Sjfb8856606
928*2d9fd380Sjfb8856606 static int
set_refill_once(const char * key __rte_unused,const char * value,void * opaque)929*2d9fd380Sjfb8856606 set_refill_once(const char *key __rte_unused, const char *value, void *opaque)
930*2d9fd380Sjfb8856606 {
931*2d9fd380Sjfb8856606 int *refill_once_per_call = opaque;
932*2d9fd380Sjfb8856606 *refill_once_per_call = atoi(value);
933*2d9fd380Sjfb8856606 if (*refill_once_per_call < 0 || *refill_once_per_call > 1)
934*2d9fd380Sjfb8856606 return -1;
935*2d9fd380Sjfb8856606 return 0;
936*2d9fd380Sjfb8856606 }
9372bfe3f2eSlogwang
sw_sched_service_func(void * args)9382bfe3f2eSlogwang static int32_t sw_sched_service_func(void *args)
9392bfe3f2eSlogwang {
9402bfe3f2eSlogwang struct rte_eventdev *dev = args;
9412bfe3f2eSlogwang sw_event_schedule(dev);
9422bfe3f2eSlogwang return 0;
9432bfe3f2eSlogwang }
9442bfe3f2eSlogwang
9452bfe3f2eSlogwang static int
sw_probe(struct rte_vdev_device * vdev)9462bfe3f2eSlogwang sw_probe(struct rte_vdev_device *vdev)
9472bfe3f2eSlogwang {
948d30ea906Sjfb8856606 static struct rte_eventdev_ops evdev_sw_ops = {
9492bfe3f2eSlogwang .dev_configure = sw_dev_configure,
9502bfe3f2eSlogwang .dev_infos_get = sw_info_get,
9512bfe3f2eSlogwang .dev_close = sw_close,
9522bfe3f2eSlogwang .dev_start = sw_start,
9532bfe3f2eSlogwang .dev_stop = sw_stop,
9542bfe3f2eSlogwang .dump = sw_dump,
9552bfe3f2eSlogwang
9562bfe3f2eSlogwang .queue_def_conf = sw_queue_def_conf,
9572bfe3f2eSlogwang .queue_setup = sw_queue_setup,
9582bfe3f2eSlogwang .queue_release = sw_queue_release,
9592bfe3f2eSlogwang .port_def_conf = sw_port_def_conf,
9602bfe3f2eSlogwang .port_setup = sw_port_setup,
9612bfe3f2eSlogwang .port_release = sw_port_release,
9622bfe3f2eSlogwang .port_link = sw_port_link,
9632bfe3f2eSlogwang .port_unlink = sw_port_unlink,
964d30ea906Sjfb8856606 .port_unlinks_in_progress = sw_port_unlinks_in_progress,
9652bfe3f2eSlogwang
9662bfe3f2eSlogwang .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
9672bfe3f2eSlogwang
968d30ea906Sjfb8856606 .timer_adapter_caps_get = sw_timer_adapter_caps_get,
969d30ea906Sjfb8856606
970d30ea906Sjfb8856606 .crypto_adapter_caps_get = sw_crypto_adapter_caps_get,
971d30ea906Sjfb8856606
9722bfe3f2eSlogwang .xstats_get = sw_xstats_get,
9732bfe3f2eSlogwang .xstats_get_names = sw_xstats_get_names,
9742bfe3f2eSlogwang .xstats_get_by_name = sw_xstats_get_by_name,
9752bfe3f2eSlogwang .xstats_reset = sw_xstats_reset,
976d30ea906Sjfb8856606
977d30ea906Sjfb8856606 .dev_selftest = test_sw_eventdev,
9782bfe3f2eSlogwang };
9792bfe3f2eSlogwang
9802bfe3f2eSlogwang static const char *const args[] = {
9812bfe3f2eSlogwang NUMA_NODE_ARG,
9822bfe3f2eSlogwang SCHED_QUANTA_ARG,
9832bfe3f2eSlogwang CREDIT_QUANTA_ARG,
984*2d9fd380Sjfb8856606 MIN_BURST_SIZE_ARG,
985*2d9fd380Sjfb8856606 DEQ_BURST_SIZE_ARG,
986*2d9fd380Sjfb8856606 REFIL_ONCE_ARG,
9872bfe3f2eSlogwang NULL
9882bfe3f2eSlogwang };
9892bfe3f2eSlogwang const char *name;
9902bfe3f2eSlogwang const char *params;
9912bfe3f2eSlogwang struct rte_eventdev *dev;
9922bfe3f2eSlogwang struct sw_evdev *sw;
9932bfe3f2eSlogwang int socket_id = rte_socket_id();
9942bfe3f2eSlogwang int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
9952bfe3f2eSlogwang int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
996*2d9fd380Sjfb8856606 int min_burst_size = 1;
997*2d9fd380Sjfb8856606 int deq_burst_size = SCHED_DEQUEUE_DEFAULT_BURST_SIZE;
998*2d9fd380Sjfb8856606 int refill_once = 0;
9992bfe3f2eSlogwang
10002bfe3f2eSlogwang name = rte_vdev_device_name(vdev);
10012bfe3f2eSlogwang params = rte_vdev_device_args(vdev);
10022bfe3f2eSlogwang if (params != NULL && params[0] != '\0') {
10032bfe3f2eSlogwang struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
10042bfe3f2eSlogwang
10052bfe3f2eSlogwang if (!kvlist) {
10062bfe3f2eSlogwang SW_LOG_INFO(
10072bfe3f2eSlogwang "Ignoring unsupported parameters when creating device '%s'\n",
10082bfe3f2eSlogwang name);
10092bfe3f2eSlogwang } else {
10102bfe3f2eSlogwang int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
10112bfe3f2eSlogwang assign_numa_node, &socket_id);
10122bfe3f2eSlogwang if (ret != 0) {
10132bfe3f2eSlogwang SW_LOG_ERR(
10142bfe3f2eSlogwang "%s: Error parsing numa node parameter",
10152bfe3f2eSlogwang name);
10162bfe3f2eSlogwang rte_kvargs_free(kvlist);
10172bfe3f2eSlogwang return ret;
10182bfe3f2eSlogwang }
10192bfe3f2eSlogwang
10202bfe3f2eSlogwang ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
10212bfe3f2eSlogwang set_sched_quanta, &sched_quanta);
10222bfe3f2eSlogwang if (ret != 0) {
10232bfe3f2eSlogwang SW_LOG_ERR(
10242bfe3f2eSlogwang "%s: Error parsing sched quanta parameter",
10252bfe3f2eSlogwang name);
10262bfe3f2eSlogwang rte_kvargs_free(kvlist);
10272bfe3f2eSlogwang return ret;
10282bfe3f2eSlogwang }
10292bfe3f2eSlogwang
10302bfe3f2eSlogwang ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
10312bfe3f2eSlogwang set_credit_quanta, &credit_quanta);
10322bfe3f2eSlogwang if (ret != 0) {
10332bfe3f2eSlogwang SW_LOG_ERR(
10342bfe3f2eSlogwang "%s: Error parsing credit quanta parameter",
10352bfe3f2eSlogwang name);
10362bfe3f2eSlogwang rte_kvargs_free(kvlist);
10372bfe3f2eSlogwang return ret;
10382bfe3f2eSlogwang }
10392bfe3f2eSlogwang
1040*2d9fd380Sjfb8856606 ret = rte_kvargs_process(kvlist, MIN_BURST_SIZE_ARG,
1041*2d9fd380Sjfb8856606 set_min_burst_sz, &min_burst_size);
1042*2d9fd380Sjfb8856606 if (ret != 0) {
1043*2d9fd380Sjfb8856606 SW_LOG_ERR(
1044*2d9fd380Sjfb8856606 "%s: Error parsing minimum burst size parameter",
1045*2d9fd380Sjfb8856606 name);
1046*2d9fd380Sjfb8856606 rte_kvargs_free(kvlist);
1047*2d9fd380Sjfb8856606 return ret;
1048*2d9fd380Sjfb8856606 }
1049*2d9fd380Sjfb8856606
1050*2d9fd380Sjfb8856606 ret = rte_kvargs_process(kvlist, DEQ_BURST_SIZE_ARG,
1051*2d9fd380Sjfb8856606 set_deq_burst_sz, &deq_burst_size);
1052*2d9fd380Sjfb8856606 if (ret != 0) {
1053*2d9fd380Sjfb8856606 SW_LOG_ERR(
1054*2d9fd380Sjfb8856606 "%s: Error parsing dequeue burst size parameter",
1055*2d9fd380Sjfb8856606 name);
1056*2d9fd380Sjfb8856606 rte_kvargs_free(kvlist);
1057*2d9fd380Sjfb8856606 return ret;
1058*2d9fd380Sjfb8856606 }
1059*2d9fd380Sjfb8856606
1060*2d9fd380Sjfb8856606 ret = rte_kvargs_process(kvlist, REFIL_ONCE_ARG,
1061*2d9fd380Sjfb8856606 set_refill_once, &refill_once);
1062*2d9fd380Sjfb8856606 if (ret != 0) {
1063*2d9fd380Sjfb8856606 SW_LOG_ERR(
1064*2d9fd380Sjfb8856606 "%s: Error parsing refill once per call switch",
1065*2d9fd380Sjfb8856606 name);
1066*2d9fd380Sjfb8856606 rte_kvargs_free(kvlist);
1067*2d9fd380Sjfb8856606 return ret;
1068*2d9fd380Sjfb8856606 }
1069*2d9fd380Sjfb8856606
10702bfe3f2eSlogwang rte_kvargs_free(kvlist);
10712bfe3f2eSlogwang }
10722bfe3f2eSlogwang }
10732bfe3f2eSlogwang
10742bfe3f2eSlogwang SW_LOG_INFO(
1075*2d9fd380Sjfb8856606 "Creating eventdev sw device %s, numa_node=%d, "
1076*2d9fd380Sjfb8856606 "sched_quanta=%d, credit_quanta=%d "
1077*2d9fd380Sjfb8856606 "min_burst=%d, deq_burst=%d, refill_once=%d\n",
1078*2d9fd380Sjfb8856606 name, socket_id, sched_quanta, credit_quanta,
1079*2d9fd380Sjfb8856606 min_burst_size, deq_burst_size, refill_once);
10802bfe3f2eSlogwang
10812bfe3f2eSlogwang dev = rte_event_pmd_vdev_init(name,
10822bfe3f2eSlogwang sizeof(struct sw_evdev), socket_id);
10832bfe3f2eSlogwang if (dev == NULL) {
10842bfe3f2eSlogwang SW_LOG_ERR("eventdev vdev init() failed");
10852bfe3f2eSlogwang return -EFAULT;
10862bfe3f2eSlogwang }
10872bfe3f2eSlogwang dev->dev_ops = &evdev_sw_ops;
10882bfe3f2eSlogwang dev->enqueue = sw_event_enqueue;
10892bfe3f2eSlogwang dev->enqueue_burst = sw_event_enqueue_burst;
10902bfe3f2eSlogwang dev->enqueue_new_burst = sw_event_enqueue_burst;
10912bfe3f2eSlogwang dev->enqueue_forward_burst = sw_event_enqueue_burst;
10922bfe3f2eSlogwang dev->dequeue = sw_event_dequeue;
10932bfe3f2eSlogwang dev->dequeue_burst = sw_event_dequeue_burst;
10942bfe3f2eSlogwang
10952bfe3f2eSlogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY)
10962bfe3f2eSlogwang return 0;
10972bfe3f2eSlogwang
10982bfe3f2eSlogwang sw = dev->data->dev_private;
10992bfe3f2eSlogwang sw->data = dev->data;
11002bfe3f2eSlogwang
11012bfe3f2eSlogwang /* copy values passed from vdev command line to instance */
11022bfe3f2eSlogwang sw->credit_update_quanta = credit_quanta;
11032bfe3f2eSlogwang sw->sched_quanta = sched_quanta;
1104*2d9fd380Sjfb8856606 sw->sched_min_burst_size = min_burst_size;
1105*2d9fd380Sjfb8856606 sw->sched_deq_burst_size = deq_burst_size;
1106*2d9fd380Sjfb8856606 sw->refill_once_per_iter = refill_once;
11072bfe3f2eSlogwang
11082bfe3f2eSlogwang /* register service with EAL */
11092bfe3f2eSlogwang struct rte_service_spec service;
11102bfe3f2eSlogwang memset(&service, 0, sizeof(struct rte_service_spec));
11112bfe3f2eSlogwang snprintf(service.name, sizeof(service.name), "%s_service", name);
11122bfe3f2eSlogwang snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
11132bfe3f2eSlogwang name);
11142bfe3f2eSlogwang service.socket_id = socket_id;
11152bfe3f2eSlogwang service.callback = sw_sched_service_func;
11162bfe3f2eSlogwang service.callback_userdata = (void *)dev;
11172bfe3f2eSlogwang
11182bfe3f2eSlogwang int32_t ret = rte_service_component_register(&service, &sw->service_id);
11192bfe3f2eSlogwang if (ret) {
11202bfe3f2eSlogwang SW_LOG_ERR("service register() failed");
11212bfe3f2eSlogwang return -ENOEXEC;
11222bfe3f2eSlogwang }
11232bfe3f2eSlogwang
11242bfe3f2eSlogwang dev->data->service_inited = 1;
11252bfe3f2eSlogwang dev->data->service_id = sw->service_id;
11262bfe3f2eSlogwang
11272bfe3f2eSlogwang return 0;
11282bfe3f2eSlogwang }
11292bfe3f2eSlogwang
11302bfe3f2eSlogwang static int
sw_remove(struct rte_vdev_device * vdev)11312bfe3f2eSlogwang sw_remove(struct rte_vdev_device *vdev)
11322bfe3f2eSlogwang {
11332bfe3f2eSlogwang const char *name;
11342bfe3f2eSlogwang
11352bfe3f2eSlogwang name = rte_vdev_device_name(vdev);
11362bfe3f2eSlogwang if (name == NULL)
11372bfe3f2eSlogwang return -EINVAL;
11382bfe3f2eSlogwang
11392bfe3f2eSlogwang SW_LOG_INFO("Closing eventdev sw device %s\n", name);
11402bfe3f2eSlogwang
11412bfe3f2eSlogwang return rte_event_pmd_vdev_uninit(name);
11422bfe3f2eSlogwang }
11432bfe3f2eSlogwang
11442bfe3f2eSlogwang static struct rte_vdev_driver evdev_sw_pmd_drv = {
11452bfe3f2eSlogwang .probe = sw_probe,
11462bfe3f2eSlogwang .remove = sw_remove
11472bfe3f2eSlogwang };
11482bfe3f2eSlogwang
11492bfe3f2eSlogwang RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
11502bfe3f2eSlogwang RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
1151*2d9fd380Sjfb8856606 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>"
1152*2d9fd380Sjfb8856606 MIN_BURST_SIZE_ARG "=<int>" DEQ_BURST_SIZE_ARG "=<int>"
1153*2d9fd380Sjfb8856606 REFIL_ONCE_ARG "=<int>");
1154*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(eventdev_sw_log_level, pmd.event.sw, NOTICE);
1155