1*99a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2*99a2dd95SBruce Richardson * Copyright(C) 2020 Marvell International Ltd. 3*99a2dd95SBruce Richardson */ 4*99a2dd95SBruce Richardson 5*99a2dd95SBruce Richardson #include <rte_debug.h> 6*99a2dd95SBruce Richardson #include <rte_ethdev.h> 7*99a2dd95SBruce Richardson #include <rte_ether.h> 8*99a2dd95SBruce Richardson #include <rte_graph.h> 9*99a2dd95SBruce Richardson #include <rte_graph_worker.h> 10*99a2dd95SBruce Richardson #include <rte_mbuf.h> 11*99a2dd95SBruce Richardson 12*99a2dd95SBruce Richardson #include "ethdev_rx_priv.h" 13*99a2dd95SBruce Richardson #include "node_private.h" 14*99a2dd95SBruce Richardson 15*99a2dd95SBruce Richardson static struct ethdev_rx_node_main ethdev_rx_main; 16*99a2dd95SBruce Richardson 17*99a2dd95SBruce Richardson static __rte_always_inline uint16_t 18*99a2dd95SBruce Richardson ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node, 19*99a2dd95SBruce Richardson ethdev_rx_node_ctx_t *ctx) 20*99a2dd95SBruce Richardson { 21*99a2dd95SBruce Richardson uint16_t count, next_index; 22*99a2dd95SBruce Richardson uint16_t port, queue; 23*99a2dd95SBruce Richardson 24*99a2dd95SBruce Richardson port = ctx->port_id; 25*99a2dd95SBruce Richardson queue = ctx->queue_id; 26*99a2dd95SBruce Richardson next_index = ctx->cls_next; 27*99a2dd95SBruce Richardson 28*99a2dd95SBruce Richardson /* Get pkts from port */ 29*99a2dd95SBruce Richardson count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs, 30*99a2dd95SBruce Richardson RTE_GRAPH_BURST_SIZE); 31*99a2dd95SBruce Richardson 32*99a2dd95SBruce Richardson if (!count) 33*99a2dd95SBruce Richardson return 0; 34*99a2dd95SBruce Richardson node->idx = count; 35*99a2dd95SBruce Richardson /* Enqueue to next node */ 36*99a2dd95SBruce Richardson rte_node_next_stream_move(graph, node, next_index); 37*99a2dd95SBruce Richardson 38*99a2dd95SBruce Richardson return count; 39*99a2dd95SBruce Richardson } 40*99a2dd95SBruce Richardson 41*99a2dd95SBruce Richardson static __rte_always_inline uint16_t 42*99a2dd95SBruce Richardson ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node, 43*99a2dd95SBruce Richardson void **objs, uint16_t cnt) 44*99a2dd95SBruce Richardson { 45*99a2dd95SBruce Richardson ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx; 46*99a2dd95SBruce Richardson uint16_t n_pkts = 0; 47*99a2dd95SBruce Richardson 48*99a2dd95SBruce Richardson RTE_SET_USED(objs); 49*99a2dd95SBruce Richardson RTE_SET_USED(cnt); 50*99a2dd95SBruce Richardson 51*99a2dd95SBruce Richardson n_pkts = ethdev_rx_node_process_inline(graph, node, ctx); 52*99a2dd95SBruce Richardson return n_pkts; 53*99a2dd95SBruce Richardson } 54*99a2dd95SBruce Richardson 55*99a2dd95SBruce Richardson static inline uint32_t 56*99a2dd95SBruce Richardson l3_ptype(uint16_t etype, uint32_t ptype) 57*99a2dd95SBruce Richardson { 58*99a2dd95SBruce Richardson ptype = ptype & ~RTE_PTYPE_L3_MASK; 59*99a2dd95SBruce Richardson if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) 60*99a2dd95SBruce Richardson ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 61*99a2dd95SBruce Richardson else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) 62*99a2dd95SBruce Richardson ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; 63*99a2dd95SBruce Richardson return ptype; 64*99a2dd95SBruce Richardson } 65*99a2dd95SBruce Richardson 66*99a2dd95SBruce Richardson /* Callback for soft ptype parsing */ 67*99a2dd95SBruce Richardson static uint16_t 68*99a2dd95SBruce Richardson eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs, 69*99a2dd95SBruce Richardson uint16_t nb_pkts, uint16_t max_pkts, void *user_param) 70*99a2dd95SBruce Richardson { 71*99a2dd95SBruce Richardson struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3; 72*99a2dd95SBruce Richardson struct rte_ether_hdr *eth_hdr; 73*99a2dd95SBruce Richardson uint16_t etype, n_left; 74*99a2dd95SBruce Richardson struct rte_mbuf **pkts; 75*99a2dd95SBruce Richardson 76*99a2dd95SBruce Richardson RTE_SET_USED(port); 77*99a2dd95SBruce Richardson RTE_SET_USED(queue); 78*99a2dd95SBruce Richardson RTE_SET_USED(max_pkts); 79*99a2dd95SBruce Richardson RTE_SET_USED(user_param); 80*99a2dd95SBruce Richardson 81*99a2dd95SBruce Richardson pkts = mbufs; 82*99a2dd95SBruce Richardson n_left = nb_pkts; 83*99a2dd95SBruce Richardson while (n_left >= 12) { 84*99a2dd95SBruce Richardson 85*99a2dd95SBruce Richardson /* Prefetch next-next mbufs */ 86*99a2dd95SBruce Richardson rte_prefetch0(pkts[8]); 87*99a2dd95SBruce Richardson rte_prefetch0(pkts[9]); 88*99a2dd95SBruce Richardson rte_prefetch0(pkts[10]); 89*99a2dd95SBruce Richardson rte_prefetch0(pkts[11]); 90*99a2dd95SBruce Richardson 91*99a2dd95SBruce Richardson /* Prefetch next mbuf data */ 92*99a2dd95SBruce Richardson rte_prefetch0( 93*99a2dd95SBruce Richardson rte_pktmbuf_mtod(pkts[4], struct rte_ether_hdr *)); 94*99a2dd95SBruce Richardson rte_prefetch0( 95*99a2dd95SBruce Richardson rte_pktmbuf_mtod(pkts[5], struct rte_ether_hdr *)); 96*99a2dd95SBruce Richardson rte_prefetch0( 97*99a2dd95SBruce Richardson rte_pktmbuf_mtod(pkts[6], struct rte_ether_hdr *)); 98*99a2dd95SBruce Richardson rte_prefetch0( 99*99a2dd95SBruce Richardson rte_pktmbuf_mtod(pkts[7], struct rte_ether_hdr *)); 100*99a2dd95SBruce Richardson 101*99a2dd95SBruce Richardson mbuf0 = pkts[0]; 102*99a2dd95SBruce Richardson mbuf1 = pkts[1]; 103*99a2dd95SBruce Richardson mbuf2 = pkts[2]; 104*99a2dd95SBruce Richardson mbuf3 = pkts[3]; 105*99a2dd95SBruce Richardson pkts += 4; 106*99a2dd95SBruce Richardson n_left -= 4; 107*99a2dd95SBruce Richardson 108*99a2dd95SBruce Richardson /* Extract ptype of mbuf0 */ 109*99a2dd95SBruce Richardson eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *); 110*99a2dd95SBruce Richardson etype = eth_hdr->ether_type; 111*99a2dd95SBruce Richardson mbuf0->packet_type = l3_ptype(etype, 0); 112*99a2dd95SBruce Richardson 113*99a2dd95SBruce Richardson /* Extract ptype of mbuf1 */ 114*99a2dd95SBruce Richardson eth_hdr = rte_pktmbuf_mtod(mbuf1, struct rte_ether_hdr *); 115*99a2dd95SBruce Richardson etype = eth_hdr->ether_type; 116*99a2dd95SBruce Richardson mbuf1->packet_type = l3_ptype(etype, 0); 117*99a2dd95SBruce Richardson 118*99a2dd95SBruce Richardson /* Extract ptype of mbuf2 */ 119*99a2dd95SBruce Richardson eth_hdr = rte_pktmbuf_mtod(mbuf2, struct rte_ether_hdr *); 120*99a2dd95SBruce Richardson etype = eth_hdr->ether_type; 121*99a2dd95SBruce Richardson mbuf2->packet_type = l3_ptype(etype, 0); 122*99a2dd95SBruce Richardson 123*99a2dd95SBruce Richardson /* Extract ptype of mbuf3 */ 124*99a2dd95SBruce Richardson eth_hdr = rte_pktmbuf_mtod(mbuf3, struct rte_ether_hdr *); 125*99a2dd95SBruce Richardson etype = eth_hdr->ether_type; 126*99a2dd95SBruce Richardson mbuf3->packet_type = l3_ptype(etype, 0); 127*99a2dd95SBruce Richardson } 128*99a2dd95SBruce Richardson 129*99a2dd95SBruce Richardson while (n_left > 0) { 130*99a2dd95SBruce Richardson mbuf0 = pkts[0]; 131*99a2dd95SBruce Richardson 132*99a2dd95SBruce Richardson pkts += 1; 133*99a2dd95SBruce Richardson n_left -= 1; 134*99a2dd95SBruce Richardson 135*99a2dd95SBruce Richardson /* Extract ptype of mbuf0 */ 136*99a2dd95SBruce Richardson eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *); 137*99a2dd95SBruce Richardson etype = eth_hdr->ether_type; 138*99a2dd95SBruce Richardson mbuf0->packet_type = l3_ptype(etype, 0); 139*99a2dd95SBruce Richardson } 140*99a2dd95SBruce Richardson 141*99a2dd95SBruce Richardson return nb_pkts; 142*99a2dd95SBruce Richardson } 143*99a2dd95SBruce Richardson 144*99a2dd95SBruce Richardson #define MAX_PTYPES 16 145*99a2dd95SBruce Richardson static int 146*99a2dd95SBruce Richardson ethdev_ptype_setup(uint16_t port, uint16_t queue) 147*99a2dd95SBruce Richardson { 148*99a2dd95SBruce Richardson uint8_t l3_ipv4 = 0, l3_ipv6 = 0; 149*99a2dd95SBruce Richardson uint32_t ptypes[MAX_PTYPES]; 150*99a2dd95SBruce Richardson int i, rc; 151*99a2dd95SBruce Richardson 152*99a2dd95SBruce Richardson /* Check IPv4 & IPv6 ptype support */ 153*99a2dd95SBruce Richardson rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK, ptypes, 154*99a2dd95SBruce Richardson MAX_PTYPES); 155*99a2dd95SBruce Richardson for (i = 0; i < rc; i++) { 156*99a2dd95SBruce Richardson if (ptypes[i] & RTE_PTYPE_L3_IPV4) 157*99a2dd95SBruce Richardson l3_ipv4 = 1; 158*99a2dd95SBruce Richardson if (ptypes[i] & RTE_PTYPE_L3_IPV6) 159*99a2dd95SBruce Richardson l3_ipv6 = 1; 160*99a2dd95SBruce Richardson } 161*99a2dd95SBruce Richardson 162*99a2dd95SBruce Richardson if (!l3_ipv4 || !l3_ipv6) { 163*99a2dd95SBruce Richardson node_info("ethdev_rx", 164*99a2dd95SBruce Richardson "Enabling ptype callback for required ptypes on port %u\n", 165*99a2dd95SBruce Richardson port); 166*99a2dd95SBruce Richardson 167*99a2dd95SBruce Richardson if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb, 168*99a2dd95SBruce Richardson NULL)) { 169*99a2dd95SBruce Richardson node_err("ethdev_rx", 170*99a2dd95SBruce Richardson "Failed to add rx ptype cb: port=%d, queue=%d\n", 171*99a2dd95SBruce Richardson port, queue); 172*99a2dd95SBruce Richardson return -EINVAL; 173*99a2dd95SBruce Richardson } 174*99a2dd95SBruce Richardson } 175*99a2dd95SBruce Richardson 176*99a2dd95SBruce Richardson return 0; 177*99a2dd95SBruce Richardson } 178*99a2dd95SBruce Richardson 179*99a2dd95SBruce Richardson static int 180*99a2dd95SBruce Richardson ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node) 181*99a2dd95SBruce Richardson { 182*99a2dd95SBruce Richardson ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx; 183*99a2dd95SBruce Richardson ethdev_rx_node_elem_t *elem = ethdev_rx_main.head; 184*99a2dd95SBruce Richardson 185*99a2dd95SBruce Richardson RTE_SET_USED(graph); 186*99a2dd95SBruce Richardson 187*99a2dd95SBruce Richardson while (elem) { 188*99a2dd95SBruce Richardson if (elem->nid == node->id) { 189*99a2dd95SBruce Richardson /* Update node specific context */ 190*99a2dd95SBruce Richardson memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t)); 191*99a2dd95SBruce Richardson break; 192*99a2dd95SBruce Richardson } 193*99a2dd95SBruce Richardson elem = elem->next; 194*99a2dd95SBruce Richardson } 195*99a2dd95SBruce Richardson 196*99a2dd95SBruce Richardson RTE_VERIFY(elem != NULL); 197*99a2dd95SBruce Richardson 198*99a2dd95SBruce Richardson ctx->cls_next = ETHDEV_RX_NEXT_PKT_CLS; 199*99a2dd95SBruce Richardson 200*99a2dd95SBruce Richardson /* Check and setup ptype */ 201*99a2dd95SBruce Richardson return ethdev_ptype_setup(ctx->port_id, ctx->queue_id); 202*99a2dd95SBruce Richardson } 203*99a2dd95SBruce Richardson 204*99a2dd95SBruce Richardson struct ethdev_rx_node_main * 205*99a2dd95SBruce Richardson ethdev_rx_get_node_data_get(void) 206*99a2dd95SBruce Richardson { 207*99a2dd95SBruce Richardson return ðdev_rx_main; 208*99a2dd95SBruce Richardson } 209*99a2dd95SBruce Richardson 210*99a2dd95SBruce Richardson static struct rte_node_register ethdev_rx_node_base = { 211*99a2dd95SBruce Richardson .process = ethdev_rx_node_process, 212*99a2dd95SBruce Richardson .flags = RTE_NODE_SOURCE_F, 213*99a2dd95SBruce Richardson .name = "ethdev_rx", 214*99a2dd95SBruce Richardson 215*99a2dd95SBruce Richardson .init = ethdev_rx_node_init, 216*99a2dd95SBruce Richardson 217*99a2dd95SBruce Richardson .nb_edges = ETHDEV_RX_NEXT_MAX, 218*99a2dd95SBruce Richardson .next_nodes = { 219*99a2dd95SBruce Richardson /* Default pkt classification node */ 220*99a2dd95SBruce Richardson [ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls", 221*99a2dd95SBruce Richardson [ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup", 222*99a2dd95SBruce Richardson }, 223*99a2dd95SBruce Richardson }; 224*99a2dd95SBruce Richardson 225*99a2dd95SBruce Richardson struct rte_node_register * 226*99a2dd95SBruce Richardson ethdev_rx_node_get(void) 227*99a2dd95SBruce Richardson { 228*99a2dd95SBruce Richardson return ðdev_rx_node_base; 229*99a2dd95SBruce Richardson } 230*99a2dd95SBruce Richardson 231*99a2dd95SBruce Richardson RTE_NODE_REGISTER(ethdev_rx_node_base); 232