1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #ifndef __L3FWD_EM_SEQUENTIAL_H__
6 #define __L3FWD_EM_SEQUENTIAL_H__
7
8 /**
9 * @file
10 * This is an optional implementation of packet classification in Exact-Match
11 * path using sequential packet classification method.
12 * While hash lookup multi seems to provide better performance, it's disabled
13 * by default and can be enabled with NO_HASH_LOOKUP_MULTI global define in
14 * compilation time.
15 */
16
17 #if defined RTE_ARCH_X86
18 #include "l3fwd_sse.h"
19 #elif defined __ARM_NEON
20 #include "l3fwd_neon.h"
21 #endif
22
23 static __rte_always_inline uint16_t
em_get_dst_port(const struct lcore_conf * qconf,struct rte_mbuf * pkt,uint16_t portid)24 em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
25 uint16_t portid)
26 {
27 uint8_t next_hop;
28 struct rte_ipv4_hdr *ipv4_hdr;
29 struct rte_ipv6_hdr *ipv6_hdr;
30 uint32_t tcp_or_udp;
31 uint32_t l3_ptypes;
32
33 tcp_or_udp = pkt->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
34 l3_ptypes = pkt->packet_type & RTE_PTYPE_L3_MASK;
35
36 if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
37
38 /* Handle IPv4 headers.*/
39 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
40 sizeof(struct rte_ether_hdr));
41
42 next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
43 qconf->ipv4_lookup_struct);
44
45 if (next_hop >= RTE_MAX_ETHPORTS ||
46 (enabled_port_mask & 1 << next_hop) == 0)
47 next_hop = portid;
48
49 return next_hop;
50
51 } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
52
53 /* Handle IPv6 headers.*/
54 ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *,
55 sizeof(struct rte_ether_hdr));
56
57 next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
58 qconf->ipv6_lookup_struct);
59
60 if (next_hop >= RTE_MAX_ETHPORTS ||
61 (enabled_port_mask & 1 << next_hop) == 0)
62 next_hop = portid;
63
64 return next_hop;
65
66 }
67
68 return portid;
69 }
70
71 /*
72 * Buffer optimized handling of packets, invoked
73 * from main_loop.
74 */
75 static inline void
l3fwd_em_send_packets(int nb_rx,struct rte_mbuf ** pkts_burst,uint16_t portid,struct lcore_conf * qconf)76 l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
77 uint16_t portid, struct lcore_conf *qconf)
78 {
79 int32_t i, j;
80 uint16_t dst_port[MAX_PKT_BURST];
81
82 if (nb_rx > 0) {
83 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[0],
84 struct rte_ether_hdr *) + 1);
85 }
86
87 for (i = 1, j = 0; j < nb_rx; i++, j++) {
88 if (i < nb_rx) {
89 rte_prefetch0(rte_pktmbuf_mtod(
90 pkts_burst[i],
91 struct rte_ether_hdr *) + 1);
92 }
93 dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
94 }
95
96 send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
97 }
98
99 /*
100 * Buffer optimized handling of events, invoked
101 * from main_loop.
102 */
103 static inline void
l3fwd_em_process_events(int nb_rx,struct rte_event ** events,struct lcore_conf * qconf)104 l3fwd_em_process_events(int nb_rx, struct rte_event **events,
105 struct lcore_conf *qconf)
106 {
107 int32_t i, j;
108
109 rte_prefetch0(rte_pktmbuf_mtod(events[0]->mbuf,
110 struct rte_ether_hdr *) + 1);
111
112 for (i = 1, j = 0; j < nb_rx; i++, j++) {
113 struct rte_mbuf *mbuf = events[j]->mbuf;
114
115 if (i < nb_rx) {
116 rte_prefetch0(rte_pktmbuf_mtod(
117 events[i]->mbuf,
118 struct rte_ether_hdr *) + 1);
119 }
120 mbuf->port = em_get_dst_port(qconf, mbuf, mbuf->port);
121 process_packet(mbuf, &mbuf->port);
122 }
123 }
124 #endif /* __L3FWD_EM_SEQUENTIAL_H__ */
125