1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #ifndef __L3FWD_EM_H__
6 #define __L3FWD_EM_H__
7
8 static __rte_always_inline uint16_t
l3fwd_em_handle_ipv4(struct rte_mbuf * m,uint16_t portid,struct rte_ether_hdr * eth_hdr,struct lcore_conf * qconf)9 l3fwd_em_handle_ipv4(struct rte_mbuf *m, uint16_t portid,
10 struct rte_ether_hdr *eth_hdr, struct lcore_conf *qconf)
11 {
12 struct rte_ipv4_hdr *ipv4_hdr;
13 uint16_t dst_port;
14
15 /* Handle IPv4 headers.*/
16 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
17 sizeof(struct rte_ether_hdr));
18
19 #ifdef DO_RFC_1812_CHECKS
20 /* Check to make sure the packet is valid (RFC1812) */
21 if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
22 rte_pktmbuf_free(m);
23 return BAD_PORT;
24 }
25 #endif
26 dst_port = em_get_ipv4_dst_port(ipv4_hdr, portid,
27 qconf->ipv4_lookup_struct);
28
29 if (dst_port >= RTE_MAX_ETHPORTS ||
30 (enabled_port_mask & 1 << dst_port) == 0)
31 dst_port = portid;
32
33 #ifdef DO_RFC_1812_CHECKS
34 /* Update time to live and header checksum */
35 --(ipv4_hdr->time_to_live);
36 ++(ipv4_hdr->hdr_checksum);
37 #endif
38 /* dst addr */
39 *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[dst_port];
40
41 /* src addr */
42 rte_ether_addr_copy(&ports_eth_addr[dst_port],
43 ð_hdr->s_addr);
44
45 return dst_port;
46 }
47
48 static __rte_always_inline uint16_t
l3fwd_em_handle_ipv6(struct rte_mbuf * m,uint16_t portid,struct rte_ether_hdr * eth_hdr,struct lcore_conf * qconf)49 l3fwd_em_handle_ipv6(struct rte_mbuf *m, uint16_t portid,
50 struct rte_ether_hdr *eth_hdr, struct lcore_conf *qconf)
51 {
52 /* Handle IPv6 headers.*/
53 struct rte_ipv6_hdr *ipv6_hdr;
54 uint16_t dst_port;
55
56 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
57 sizeof(struct rte_ether_hdr));
58
59 dst_port = em_get_ipv6_dst_port(ipv6_hdr, portid,
60 qconf->ipv6_lookup_struct);
61
62 if (dst_port >= RTE_MAX_ETHPORTS ||
63 (enabled_port_mask & 1 << dst_port) == 0)
64 dst_port = portid;
65
66 /* dst addr */
67 *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[dst_port];
68
69 /* src addr */
70 rte_ether_addr_copy(&ports_eth_addr[dst_port],
71 ð_hdr->s_addr);
72
73 return dst_port;
74 }
75
76 static __rte_always_inline void
l3fwd_em_simple_forward(struct rte_mbuf * m,uint16_t portid,struct lcore_conf * qconf)77 l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
78 struct lcore_conf *qconf)
79 {
80 struct rte_ether_hdr *eth_hdr;
81 uint16_t dst_port;
82 uint32_t tcp_or_udp;
83 uint32_t l3_ptypes;
84
85 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
86 tcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
87 l3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;
88
89 if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
90 dst_port = l3fwd_em_handle_ipv4(m, portid, eth_hdr, qconf);
91 send_single_packet(qconf, m, dst_port);
92 } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
93 dst_port = l3fwd_em_handle_ipv6(m, portid, eth_hdr, qconf);
94 send_single_packet(qconf, m, dst_port);
95 } else {
96 /* Free the mbuf that contains non-IPV4/IPV6 packet */
97 rte_pktmbuf_free(m);
98 }
99 }
100
101 static __rte_always_inline void
l3fwd_em_simple_process(struct rte_mbuf * m,struct lcore_conf * qconf)102 l3fwd_em_simple_process(struct rte_mbuf *m, struct lcore_conf *qconf)
103 {
104 struct rte_ether_hdr *eth_hdr;
105 uint32_t tcp_or_udp;
106 uint32_t l3_ptypes;
107
108 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
109 tcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
110 l3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;
111
112 if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4))
113 m->port = l3fwd_em_handle_ipv4(m, m->port, eth_hdr, qconf);
114 else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6))
115 m->port = l3fwd_em_handle_ipv6(m, m->port, eth_hdr, qconf);
116 else
117 m->port = BAD_PORT;
118 }
119
120 /*
121 * Buffer non-optimized handling of packets, invoked
122 * from main_loop.
123 */
124 static inline void
l3fwd_em_no_opt_send_packets(int nb_rx,struct rte_mbuf ** pkts_burst,uint16_t portid,struct lcore_conf * qconf)125 l3fwd_em_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
126 uint16_t portid, struct lcore_conf *qconf)
127 {
128 int32_t j;
129
130 /* Prefetch first packets */
131 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++)
132 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], void *));
133
134 /*
135 * Prefetch and forward already prefetched
136 * packets.
137 */
138 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
139 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
140 j + PREFETCH_OFFSET], void *));
141 l3fwd_em_simple_forward(pkts_burst[j], portid, qconf);
142 }
143
144 /* Forward remaining prefetched packets */
145 for (; j < nb_rx; j++)
146 l3fwd_em_simple_forward(pkts_burst[j], portid, qconf);
147 }
148
149 /*
150 * Buffer non-optimized handling of events, invoked
151 * from main_loop.
152 */
153 static inline void
l3fwd_em_no_opt_process_events(int nb_rx,struct rte_event ** events,struct lcore_conf * qconf)154 l3fwd_em_no_opt_process_events(int nb_rx, struct rte_event **events,
155 struct lcore_conf *qconf)
156 {
157 int32_t j;
158
159 /* Prefetch first packets */
160 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++)
161 rte_prefetch0(rte_pktmbuf_mtod(events[j]->mbuf, void *));
162
163 /*
164 * Prefetch and forward already prefetched
165 * packets.
166 */
167 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
168 rte_prefetch0(rte_pktmbuf_mtod(events[
169 j + PREFETCH_OFFSET]->mbuf, void *));
170 l3fwd_em_simple_process(events[j]->mbuf, qconf);
171 }
172
173 /* Forward remaining prefetched packets */
174 for (; j < nb_rx; j++)
175 l3fwd_em_simple_process(events[j]->mbuf, qconf);
176 }
177
178 #endif /* __L3FWD_EM_H__ */
179