1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15 #include <stdbool.h>
16 #include <sys/socket.h>
17 #include <arpa/inet.h>
18
19 #include <rte_debug.h>
20 #include <rte_ether.h>
21 #include <rte_ethdev.h>
22 #include <rte_cycles.h>
23 #include <rte_mbuf.h>
24 #include <rte_ip.h>
25 #include <rte_tcp.h>
26 #include <rte_udp.h>
27 #include <rte_lpm.h>
28 #include <rte_lpm6.h>
29
30 #include "l3fwd.h"
31 #include "l3fwd_event.h"
32
33 struct ipv4_l3fwd_lpm_route {
34 uint32_t ip;
35 uint8_t depth;
36 uint8_t if_out;
37 };
38
39 struct ipv6_l3fwd_lpm_route {
40 uint8_t ip[16];
41 uint8_t depth;
42 uint8_t if_out;
43 };
44
45 /* 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735). */
46 static const struct ipv4_l3fwd_lpm_route ipv4_l3fwd_lpm_route_array[] = {
47 {RTE_IPV4(198, 18, 0, 0), 24, 0},
48 {RTE_IPV4(198, 18, 1, 0), 24, 1},
49 {RTE_IPV4(198, 18, 2, 0), 24, 2},
50 {RTE_IPV4(198, 18, 3, 0), 24, 3},
51 {RTE_IPV4(198, 18, 4, 0), 24, 4},
52 {RTE_IPV4(198, 18, 5, 0), 24, 5},
53 {RTE_IPV4(198, 18, 6, 0), 24, 6},
54 {RTE_IPV4(198, 18, 7, 0), 24, 7},
55 };
56
57 /* 2001:0200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180) */
58 static const struct ipv6_l3fwd_lpm_route ipv6_l3fwd_lpm_route_array[] = {
59 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 48, 0},
60 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, 48, 1},
61 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0}, 48, 2},
62 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0}, 48, 3},
63 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0}, 48, 4},
64 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0}, 48, 5},
65 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0}, 48, 6},
66 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0}, 48, 7},
67 };
68
69 #define IPV4_L3FWD_LPM_MAX_RULES 1024
70 #define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
71 #define IPV6_L3FWD_LPM_MAX_RULES 1024
72 #define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
73
74 static struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
75 static struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
76
77 static inline uint16_t
lpm_get_ipv4_dst_port(const struct rte_ipv4_hdr * ipv4_hdr,uint16_t portid,struct rte_lpm * ipv4_l3fwd_lookup_struct)78 lpm_get_ipv4_dst_port(const struct rte_ipv4_hdr *ipv4_hdr,
79 uint16_t portid,
80 struct rte_lpm *ipv4_l3fwd_lookup_struct)
81 {
82 uint32_t dst_ip = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
83 uint32_t next_hop;
84
85 if (rte_lpm_lookup(ipv4_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
86 return next_hop;
87 else
88 return portid;
89 }
90
91 static inline uint16_t
lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr * ipv6_hdr,uint16_t portid,struct rte_lpm6 * ipv6_l3fwd_lookup_struct)92 lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
93 uint16_t portid,
94 struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
95 {
96 const uint8_t *dst_ip = ipv6_hdr->dst_addr;
97 uint32_t next_hop;
98
99 if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
100 return next_hop;
101 else
102 return portid;
103 }
104
105 static __rte_always_inline uint16_t
lpm_get_dst_port(const struct lcore_conf * qconf,struct rte_mbuf * pkt,uint16_t portid)106 lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
107 uint16_t portid)
108 {
109 struct rte_ipv6_hdr *ipv6_hdr;
110 struct rte_ipv4_hdr *ipv4_hdr;
111 struct rte_ether_hdr *eth_hdr;
112
113 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
114
115 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
116 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
117
118 return lpm_get_ipv4_dst_port(ipv4_hdr, portid,
119 qconf->ipv4_lookup_struct);
120 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
121
122 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
123 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
124
125 return lpm_get_ipv6_dst_port(ipv6_hdr, portid,
126 qconf->ipv6_lookup_struct);
127 }
128
129 return portid;
130 }
131
132 /*
133 * lpm_get_dst_port optimized routine for packets where dst_ipv4 is already
134 * precalculated. If packet is ipv6 dst_addr is taken directly from packet
135 * header and dst_ipv4 value is not used.
136 */
137 static __rte_always_inline uint16_t
lpm_get_dst_port_with_ipv4(const struct lcore_conf * qconf,struct rte_mbuf * pkt,uint32_t dst_ipv4,uint16_t portid)138 lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
139 uint32_t dst_ipv4, uint16_t portid)
140 {
141 uint32_t next_hop;
142 struct rte_ipv6_hdr *ipv6_hdr;
143 struct rte_ether_hdr *eth_hdr;
144
145 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
146 return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct,
147 dst_ipv4, &next_hop) == 0)
148 ? next_hop : portid);
149
150 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
151
152 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
153 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
154
155 return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
156 ipv6_hdr->dst_addr, &next_hop) == 0)
157 ? next_hop : portid);
158
159 }
160
161 return portid;
162 }
163
164 #if defined(RTE_ARCH_X86)
165 #include "l3fwd_lpm_sse.h"
166 #elif defined __ARM_NEON
167 #include "l3fwd_lpm_neon.h"
168 #elif defined(RTE_ARCH_PPC_64)
169 #include "l3fwd_lpm_altivec.h"
170 #else
171 #include "l3fwd_lpm.h"
172 #endif
173
174 /* main processing loop */
175 int
lpm_main_loop(__rte_unused void * dummy)176 lpm_main_loop(__rte_unused void *dummy)
177 {
178 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
179 unsigned lcore_id;
180 uint64_t prev_tsc, diff_tsc, cur_tsc;
181 int i, nb_rx;
182 uint16_t portid;
183 uint8_t queueid;
184 struct lcore_conf *qconf;
185 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
186 US_PER_S * BURST_TX_DRAIN_US;
187
188 prev_tsc = 0;
189
190 lcore_id = rte_lcore_id();
191 qconf = &lcore_conf[lcore_id];
192
193 if (qconf->n_rx_queue == 0) {
194 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
195 return 0;
196 }
197
198 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
199
200 for (i = 0; i < qconf->n_rx_queue; i++) {
201
202 portid = qconf->rx_queue_list[i].port_id;
203 queueid = qconf->rx_queue_list[i].queue_id;
204 RTE_LOG(INFO, L3FWD,
205 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
206 lcore_id, portid, queueid);
207 }
208
209 while (!force_quit) {
210
211 cur_tsc = rte_rdtsc();
212
213 /*
214 * TX burst queue drain
215 */
216 diff_tsc = cur_tsc - prev_tsc;
217 if (unlikely(diff_tsc > drain_tsc)) {
218
219 for (i = 0; i < qconf->n_tx_port; ++i) {
220 portid = qconf->tx_port_id[i];
221 if (qconf->tx_mbufs[portid].len == 0)
222 continue;
223 send_burst(qconf,
224 qconf->tx_mbufs[portid].len,
225 portid);
226 qconf->tx_mbufs[portid].len = 0;
227 }
228
229 prev_tsc = cur_tsc;
230 }
231
232 /*
233 * Read packet from RX queues
234 */
235 for (i = 0; i < qconf->n_rx_queue; ++i) {
236 portid = qconf->rx_queue_list[i].port_id;
237 queueid = qconf->rx_queue_list[i].queue_id;
238 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
239 MAX_PKT_BURST);
240 if (nb_rx == 0)
241 continue;
242
243 #if defined RTE_ARCH_X86 || defined __ARM_NEON \
244 || defined RTE_ARCH_PPC_64
245 l3fwd_lpm_send_packets(nb_rx, pkts_burst,
246 portid, qconf);
247 #else
248 l3fwd_lpm_no_opt_send_packets(nb_rx, pkts_burst,
249 portid, qconf);
250 #endif /* X86 */
251 }
252 }
253
254 return 0;
255 }
256
257 static __rte_always_inline uint16_t
lpm_process_event_pkt(const struct lcore_conf * lconf,struct rte_mbuf * mbuf)258 lpm_process_event_pkt(const struct lcore_conf *lconf, struct rte_mbuf *mbuf)
259 {
260 mbuf->port = lpm_get_dst_port(lconf, mbuf, mbuf->port);
261
262 #if defined RTE_ARCH_X86 || defined __ARM_NEON \
263 || defined RTE_ARCH_PPC_64
264 process_packet(mbuf, &mbuf->port);
265 #else
266
267 struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf,
268 struct rte_ether_hdr *);
269 #ifdef DO_RFC_1812_CHECKS
270 struct rte_ipv4_hdr *ipv4_hdr;
271 if (RTE_ETH_IS_IPV4_HDR(mbuf->packet_type)) {
272 /* Handle IPv4 headers.*/
273 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf,
274 struct rte_ipv4_hdr *,
275 sizeof(struct rte_ether_hdr));
276
277 if (is_valid_ipv4_pkt(ipv4_hdr, mbuf->pkt_len)
278 < 0) {
279 mbuf->port = BAD_PORT;
280 continue;
281 }
282 /* Update time to live and header checksum */
283 --(ipv4_hdr->time_to_live);
284 ++(ipv4_hdr->hdr_checksum);
285 }
286 #endif
287 /* dst addr */
288 *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[mbuf->port];
289
290 /* src addr */
291 rte_ether_addr_copy(&ports_eth_addr[mbuf->port],
292 ð_hdr->s_addr);
293 #endif
294 return mbuf->port;
295 }
296
297 static __rte_always_inline void
lpm_event_loop_single(struct l3fwd_event_resources * evt_rsrc,const uint8_t flags)298 lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
299 const uint8_t flags)
300 {
301 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
302 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
303 evt_rsrc->evq.nb_queues - 1];
304 const uint8_t event_d_id = evt_rsrc->event_d_id;
305 struct lcore_conf *lconf;
306 unsigned int lcore_id;
307 struct rte_event ev;
308
309 if (event_p_id < 0)
310 return;
311
312 lcore_id = rte_lcore_id();
313 lconf = &lcore_conf[lcore_id];
314
315 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
316 while (!force_quit) {
317 if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
318 continue;
319
320 if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
321 rte_pktmbuf_free(ev.mbuf);
322 continue;
323 }
324
325 if (flags & L3FWD_EVENT_TX_ENQ) {
326 ev.queue_id = tx_q_id;
327 ev.op = RTE_EVENT_OP_FORWARD;
328 while (rte_event_enqueue_burst(event_d_id, event_p_id,
329 &ev, 1) && !force_quit)
330 ;
331 }
332
333 if (flags & L3FWD_EVENT_TX_DIRECT) {
334 rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
335 while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
336 event_p_id, &ev, 1, 0) &&
337 !force_quit)
338 ;
339 }
340 }
341 }
342
343 static __rte_always_inline void
lpm_event_loop_burst(struct l3fwd_event_resources * evt_rsrc,const uint8_t flags)344 lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
345 const uint8_t flags)
346 {
347 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
348 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
349 evt_rsrc->evq.nb_queues - 1];
350 const uint8_t event_d_id = evt_rsrc->event_d_id;
351 const uint16_t deq_len = evt_rsrc->deq_depth;
352 struct rte_event events[MAX_PKT_BURST];
353 struct lcore_conf *lconf;
354 unsigned int lcore_id;
355 int i, nb_enq, nb_deq;
356
357 if (event_p_id < 0)
358 return;
359
360 lcore_id = rte_lcore_id();
361
362 lconf = &lcore_conf[lcore_id];
363
364 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
365
366 while (!force_quit) {
367 /* Read events from RX queues */
368 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
369 events, deq_len, 0);
370 if (nb_deq == 0) {
371 rte_pause();
372 continue;
373 }
374
375 for (i = 0; i < nb_deq; i++) {
376 if (flags & L3FWD_EVENT_TX_ENQ) {
377 events[i].queue_id = tx_q_id;
378 events[i].op = RTE_EVENT_OP_FORWARD;
379 }
380
381 if (flags & L3FWD_EVENT_TX_DIRECT)
382 rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
383 0);
384
385 lpm_process_event_pkt(lconf, events[i].mbuf);
386 }
387
388 if (flags & L3FWD_EVENT_TX_ENQ) {
389 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
390 events, nb_deq);
391 while (nb_enq < nb_deq && !force_quit)
392 nb_enq += rte_event_enqueue_burst(event_d_id,
393 event_p_id, events + nb_enq,
394 nb_deq - nb_enq);
395 }
396
397 if (flags & L3FWD_EVENT_TX_DIRECT) {
398 nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
399 event_p_id, events, nb_deq, 0);
400 while (nb_enq < nb_deq && !force_quit)
401 nb_enq += rte_event_eth_tx_adapter_enqueue(
402 event_d_id, event_p_id,
403 events + nb_enq,
404 nb_deq - nb_enq, 0);
405 }
406 }
407 }
408
409 static __rte_always_inline void
lpm_event_loop(struct l3fwd_event_resources * evt_rsrc,const uint8_t flags)410 lpm_event_loop(struct l3fwd_event_resources *evt_rsrc,
411 const uint8_t flags)
412 {
413 if (flags & L3FWD_EVENT_SINGLE)
414 lpm_event_loop_single(evt_rsrc, flags);
415 if (flags & L3FWD_EVENT_BURST)
416 lpm_event_loop_burst(evt_rsrc, flags);
417 }
418
419 int __rte_noinline
lpm_event_main_loop_tx_d(__rte_unused void * dummy)420 lpm_event_main_loop_tx_d(__rte_unused void *dummy)
421 {
422 struct l3fwd_event_resources *evt_rsrc =
423 l3fwd_get_eventdev_rsrc();
424
425 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);
426 return 0;
427 }
428
429 int __rte_noinline
lpm_event_main_loop_tx_d_burst(__rte_unused void * dummy)430 lpm_event_main_loop_tx_d_burst(__rte_unused void *dummy)
431 {
432 struct l3fwd_event_resources *evt_rsrc =
433 l3fwd_get_eventdev_rsrc();
434
435 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);
436 return 0;
437 }
438
439 int __rte_noinline
lpm_event_main_loop_tx_q(__rte_unused void * dummy)440 lpm_event_main_loop_tx_q(__rte_unused void *dummy)
441 {
442 struct l3fwd_event_resources *evt_rsrc =
443 l3fwd_get_eventdev_rsrc();
444
445 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);
446 return 0;
447 }
448
449 int __rte_noinline
lpm_event_main_loop_tx_q_burst(__rte_unused void * dummy)450 lpm_event_main_loop_tx_q_burst(__rte_unused void *dummy)
451 {
452 struct l3fwd_event_resources *evt_rsrc =
453 l3fwd_get_eventdev_rsrc();
454
455 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);
456 return 0;
457 }
458
459 void
setup_lpm(const int socketid)460 setup_lpm(const int socketid)
461 {
462 struct rte_lpm6_config config;
463 struct rte_lpm_config config_ipv4;
464 unsigned i;
465 int ret;
466 char s[64];
467 char abuf[INET6_ADDRSTRLEN];
468
469 /* create the LPM table */
470 config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
471 config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
472 config_ipv4.flags = 0;
473 snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
474 ipv4_l3fwd_lpm_lookup_struct[socketid] =
475 rte_lpm_create(s, socketid, &config_ipv4);
476 if (ipv4_l3fwd_lpm_lookup_struct[socketid] == NULL)
477 rte_exit(EXIT_FAILURE,
478 "Unable to create the l3fwd LPM table on socket %d\n",
479 socketid);
480
481 /* populate the LPM table */
482 for (i = 0; i < RTE_DIM(ipv4_l3fwd_lpm_route_array); i++) {
483 struct in_addr in;
484
485 /* skip unused ports */
486 if ((1 << ipv4_l3fwd_lpm_route_array[i].if_out &
487 enabled_port_mask) == 0)
488 continue;
489
490 ret = rte_lpm_add(ipv4_l3fwd_lpm_lookup_struct[socketid],
491 ipv4_l3fwd_lpm_route_array[i].ip,
492 ipv4_l3fwd_lpm_route_array[i].depth,
493 ipv4_l3fwd_lpm_route_array[i].if_out);
494
495 if (ret < 0) {
496 rte_exit(EXIT_FAILURE,
497 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
498 i, socketid);
499 }
500
501 in.s_addr = htonl(ipv4_l3fwd_lpm_route_array[i].ip);
502 printf("LPM: Adding route %s / %d (%d)\n",
503 inet_ntop(AF_INET, &in, abuf, sizeof(abuf)),
504 ipv4_l3fwd_lpm_route_array[i].depth,
505 ipv4_l3fwd_lpm_route_array[i].if_out);
506 }
507
508 /* create the LPM6 table */
509 snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
510
511 config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
512 config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
513 config.flags = 0;
514 ipv6_l3fwd_lpm_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
515 &config);
516 if (ipv6_l3fwd_lpm_lookup_struct[socketid] == NULL)
517 rte_exit(EXIT_FAILURE,
518 "Unable to create the l3fwd LPM table on socket %d\n",
519 socketid);
520
521 /* populate the LPM table */
522 for (i = 0; i < RTE_DIM(ipv6_l3fwd_lpm_route_array); i++) {
523
524 /* skip unused ports */
525 if ((1 << ipv6_l3fwd_lpm_route_array[i].if_out &
526 enabled_port_mask) == 0)
527 continue;
528
529 ret = rte_lpm6_add(ipv6_l3fwd_lpm_lookup_struct[socketid],
530 ipv6_l3fwd_lpm_route_array[i].ip,
531 ipv6_l3fwd_lpm_route_array[i].depth,
532 ipv6_l3fwd_lpm_route_array[i].if_out);
533
534 if (ret < 0) {
535 rte_exit(EXIT_FAILURE,
536 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
537 i, socketid);
538 }
539
540 printf("LPM: Adding route %s / %d (%d)\n",
541 inet_ntop(AF_INET6, ipv6_l3fwd_lpm_route_array[i].ip,
542 abuf, sizeof(abuf)),
543 ipv6_l3fwd_lpm_route_array[i].depth,
544 ipv6_l3fwd_lpm_route_array[i].if_out);
545 }
546 }
547
548 int
lpm_check_ptype(int portid)549 lpm_check_ptype(int portid)
550 {
551 int i, ret;
552 int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
553 uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
554
555 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
556 if (ret <= 0)
557 return 0;
558
559 uint32_t ptypes[ret];
560
561 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
562 for (i = 0; i < ret; ++i) {
563 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
564 ptype_l3_ipv4 = 1;
565 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
566 ptype_l3_ipv6 = 1;
567 }
568
569 if (ptype_l3_ipv4 == 0)
570 printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
571
572 if (ptype_l3_ipv6 == 0)
573 printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
574
575 if (ptype_l3_ipv4 && ptype_l3_ipv6)
576 return 1;
577
578 return 0;
579
580 }
581
582 static inline void
lpm_parse_ptype(struct rte_mbuf * m)583 lpm_parse_ptype(struct rte_mbuf *m)
584 {
585 struct rte_ether_hdr *eth_hdr;
586 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
587 uint16_t ether_type;
588
589 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
590 ether_type = eth_hdr->ether_type;
591 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
592 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
593 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
594 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
595
596 m->packet_type = packet_type;
597 }
598
599 uint16_t
lpm_cb_parse_ptype(uint16_t port __rte_unused,uint16_t queue __rte_unused,struct rte_mbuf * pkts[],uint16_t nb_pkts,uint16_t max_pkts __rte_unused,void * user_param __rte_unused)600 lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
601 struct rte_mbuf *pkts[], uint16_t nb_pkts,
602 uint16_t max_pkts __rte_unused,
603 void *user_param __rte_unused)
604 {
605 unsigned int i;
606
607 if (unlikely(nb_pkts == 0))
608 return nb_pkts;
609 rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
610 for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
611 rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
612 struct ether_hdr *));
613 lpm_parse_ptype(pkts[i]);
614 }
615 lpm_parse_ptype(pkts[i]);
616
617 return nb_pkts;
618 }
619
620 /* Return ipv4/ipv6 lpm fwd lookup struct. */
621 void *
lpm_get_ipv4_l3fwd_lookup_struct(const int socketid)622 lpm_get_ipv4_l3fwd_lookup_struct(const int socketid)
623 {
624 return ipv4_l3fwd_lpm_lookup_struct[socketid];
625 }
626
627 void *
lpm_get_ipv6_l3fwd_lookup_struct(const int socketid)628 lpm_get_ipv6_l3fwd_lookup_struct(const int socketid)
629 {
630 return ipv6_l3fwd_lpm_lookup_struct[socketid];
631 }
632