1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <stdint.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <sys/queue.h>
13 #include <errno.h>
14 #include <netinet/ip.h>
15 
16 #include <rte_common.h>
17 #include <rte_memory.h>
18 #include <rte_eal.h>
19 #include <rte_launch.h>
20 #include <rte_per_lcore.h>
21 #include <rte_lcore.h>
22 #include <rte_branch_prediction.h>
23 #include <rte_atomic.h>
24 #include <rte_ring.h>
25 #include <rte_log.h>
26 #include <rte_debug.h>
27 #include <rte_mempool.h>
28 #include <rte_memcpy.h>
29 #include <rte_mbuf.h>
30 #include <rte_ether.h>
31 #include <rte_interrupts.h>
32 #include <rte_ethdev.h>
33 #include <rte_byteorder.h>
34 #include <rte_malloc.h>
35 #include <rte_string_fns.h>
36 #include <rte_efd.h>
37 #include <rte_ip.h>
38 
39 #include "common.h"
40 #include "args.h"
41 #include "init.h"
42 
43 /*
44  * When doing reads from the NIC or the node queues,
45  * use this batch size
46  */
47 #define PACKET_READ_SIZE 32
48 
49 /*
50  * Local buffers to put packets in, used to send packets in bursts to the
51  * nodes
52  */
53 struct node_rx_buf {
54 	struct rte_mbuf *buffer[PACKET_READ_SIZE];
55 	uint16_t count;
56 };
57 
58 struct efd_stats {
59 	uint64_t distributed;
60 	uint64_t drop;
61 } flow_dist_stats;
62 
63 /* One buffer per node rx queue - dynamically allocate array */
64 static struct node_rx_buf *cl_rx_buf;
65 
66 static const char *
67 get_printable_mac_addr(uint16_t port)
68 {
69 	static const char err_address[] = "00:00:00:00:00:00";
70 	static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
71 	struct ether_addr mac;
72 
73 	if (unlikely(port >= RTE_MAX_ETHPORTS))
74 		return err_address;
75 	if (unlikely(addresses[port][0] == '\0')) {
76 		rte_eth_macaddr_get(port, &mac);
77 		snprintf(addresses[port], sizeof(addresses[port]),
78 				"%02x:%02x:%02x:%02x:%02x:%02x\n",
79 				mac.addr_bytes[0], mac.addr_bytes[1],
80 				mac.addr_bytes[2], mac.addr_bytes[3],
81 				mac.addr_bytes[4], mac.addr_bytes[5]);
82 	}
83 	return addresses[port];
84 }
85 
86 /*
87  * This function displays the recorded statistics for each port
88  * and for each node. It uses ANSI terminal codes to clear
89  * screen when called. It is called from a single non-master
90  * thread in the server process, when the process is run with more
91  * than one lcore enabled.
92  */
93 static void
94 do_stats_display(void)
95 {
96 	unsigned int i, j;
97 	const char clr[] = {27, '[', '2', 'J', '\0'};
98 	const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
99 	uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS];
100 	uint64_t node_tx[MAX_NODES], node_tx_drop[MAX_NODES];
101 
102 	/* to get TX stats, we need to do some summing calculations */
103 	memset(port_tx, 0, sizeof(port_tx));
104 	memset(port_tx_drop, 0, sizeof(port_tx_drop));
105 	memset(node_tx, 0, sizeof(node_tx));
106 	memset(node_tx_drop, 0, sizeof(node_tx_drop));
107 
108 	for (i = 0; i < num_nodes; i++) {
109 		const struct tx_stats *tx = &info->tx_stats[i];
110 
111 		for (j = 0; j < info->num_ports; j++) {
112 			const uint64_t tx_val = tx->tx[info->id[j]];
113 			const uint64_t drop_val = tx->tx_drop[info->id[j]];
114 
115 			port_tx[j] += tx_val;
116 			port_tx_drop[j] += drop_val;
117 			node_tx[i] += tx_val;
118 			node_tx_drop[i] += drop_val;
119 		}
120 	}
121 
122 	/* Clear screen and move to top left */
123 	printf("%s%s", clr, topLeft);
124 
125 	printf("PORTS\n");
126 	printf("-----\n");
127 	for (i = 0; i < info->num_ports; i++)
128 		printf("Port %u: '%s'\t", (unsigned int)info->id[i],
129 				get_printable_mac_addr(info->id[i]));
130 	printf("\n\n");
131 	for (i = 0; i < info->num_ports; i++) {
132 		printf("Port %u - rx: %9"PRIu64"\t"
133 				"tx: %9"PRIu64"\n",
134 				(unsigned int)info->id[i], info->rx_stats.rx[i],
135 				port_tx[i]);
136 	}
137 
138 	printf("\nSERVER\n");
139 	printf("-----\n");
140 	printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
141 			flow_dist_stats.distributed, flow_dist_stats.drop);
142 
143 	printf("\nNODES\n");
144 	printf("-------\n");
145 	for (i = 0; i < num_nodes; i++) {
146 		const unsigned long long rx = nodes[i].stats.rx;
147 		const unsigned long long rx_drop = nodes[i].stats.rx_drop;
148 		const struct filter_stats *filter = &info->filter_stats[i];
149 
150 		printf("Node %2u - rx: %9llu, rx_drop: %9llu\n"
151 				"            tx: %9"PRIu64", tx_drop: %9"PRIu64"\n"
152 				"            filter_passed: %9"PRIu64", "
153 				"filter_drop: %9"PRIu64"\n",
154 				i, rx, rx_drop, node_tx[i], node_tx_drop[i],
155 				filter->passed, filter->drop);
156 	}
157 
158 	printf("\n");
159 }
160 
161 /*
162  * The function called from each non-master lcore used by the process.
163  * The test_and_set function is used to randomly pick a single lcore on which
164  * the code to display the statistics will run. Otherwise, the code just
165  * repeatedly sleeps.
166  */
167 static int
168 sleep_lcore(__attribute__((unused)) void *dummy)
169 {
170 	/* Used to pick a display thread - static, so zero-initialised */
171 	static rte_atomic32_t display_stats;
172 
173 	/* Only one core should display stats */
174 	if (rte_atomic32_test_and_set(&display_stats)) {
175 		const unsigned int sleeptime = 1;
176 
177 		printf("Core %u displaying statistics\n", rte_lcore_id());
178 
179 		/* Longer initial pause so above printf is seen */
180 		sleep(sleeptime * 3);
181 
182 		/* Loop forever: sleep always returns 0 or <= param */
183 		while (sleep(sleeptime) <= sleeptime)
184 			do_stats_display();
185 	}
186 	return 0;
187 }
188 
189 /*
190  * Function to set all the node statistic values to zero.
191  * Called at program startup.
192  */
193 static void
194 clear_stats(void)
195 {
196 	unsigned int i;
197 
198 	for (i = 0; i < num_nodes; i++)
199 		nodes[i].stats.rx = nodes[i].stats.rx_drop = 0;
200 }
201 
202 /*
203  * send a burst of traffic to a node, assuming there are packets
204  * available to be sent to this node
205  */
206 static void
207 flush_rx_queue(uint16_t node)
208 {
209 	uint16_t j;
210 	struct node *cl;
211 
212 	if (cl_rx_buf[node].count == 0)
213 		return;
214 
215 	cl = &nodes[node];
216 	if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
217 			cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
218 		for (j = 0; j < cl_rx_buf[node].count; j++)
219 			rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
220 		cl->stats.rx_drop += cl_rx_buf[node].count;
221 	} else
222 		cl->stats.rx += cl_rx_buf[node].count;
223 
224 	cl_rx_buf[node].count = 0;
225 }
226 
227 /*
228  * marks a packet down to be sent to a particular node process
229  */
230 static inline void
231 enqueue_rx_packet(uint8_t node, struct rte_mbuf *buf)
232 {
233 	cl_rx_buf[node].buffer[cl_rx_buf[node].count++] = buf;
234 }
235 
236 /*
237  * This function takes a group of packets and routes them
238  * individually to the node process. Very simply round-robins the packets
239  * without checking any of the packet contents.
240  */
241 static void
242 process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
243 		uint16_t rx_count, unsigned int socket_id)
244 {
245 	uint16_t i;
246 	uint8_t node;
247 	efd_value_t data[RTE_EFD_BURST_MAX];
248 	const void *key_ptrs[RTE_EFD_BURST_MAX];
249 
250 	struct ipv4_hdr *ipv4_hdr;
251 	uint32_t ipv4_dst_ip[RTE_EFD_BURST_MAX];
252 
253 	for (i = 0; i < rx_count; i++) {
254 		/* Handle IPv4 header.*/
255 		ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
256 				sizeof(struct ether_hdr));
257 		ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
258 		key_ptrs[i] = (void *)&ipv4_dst_ip[i];
259 	}
260 
261 	rte_efd_lookup_bulk(efd_table, socket_id, rx_count,
262 				(const void **) key_ptrs, data);
263 	for (i = 0; i < rx_count; i++) {
264 		node = (uint8_t) ((uintptr_t)data[i]);
265 
266 		if (node >= num_nodes) {
267 			/*
268 			 * Node is out of range, which means that
269 			 * flow has not been inserted
270 			 */
271 			flow_dist_stats.drop++;
272 			rte_pktmbuf_free(pkts[i]);
273 		} else {
274 			flow_dist_stats.distributed++;
275 			enqueue_rx_packet(node, pkts[i]);
276 		}
277 	}
278 
279 	for (i = 0; i < num_nodes; i++)
280 		flush_rx_queue(i);
281 }
282 
283 /*
284  * Function called by the master lcore of the DPDK process.
285  */
286 static void
287 do_packet_forwarding(void)
288 {
289 	unsigned int port_num = 0; /* indexes the port[] array */
290 	unsigned int socket_id = rte_socket_id();
291 
292 	for (;;) {
293 		struct rte_mbuf *buf[PACKET_READ_SIZE];
294 		uint16_t rx_count;
295 
296 		/* read a port */
297 		rx_count = rte_eth_rx_burst(info->id[port_num], 0,
298 				buf, PACKET_READ_SIZE);
299 		info->rx_stats.rx[port_num] += rx_count;
300 
301 		/* Now process the NIC packets read */
302 		if (likely(rx_count > 0))
303 			process_packets(port_num, buf, rx_count, socket_id);
304 
305 		/* move to next port */
306 		if (++port_num == info->num_ports)
307 			port_num = 0;
308 	}
309 }
310 
311 int
312 main(int argc, char *argv[])
313 {
314 	/* initialise the system */
315 	if (init(argc, argv) < 0)
316 		return -1;
317 	RTE_LOG(INFO, APP, "Finished Process Init.\n");
318 
319 	cl_rx_buf = calloc(num_nodes, sizeof(cl_rx_buf[0]));
320 
321 	/* clear statistics */
322 	clear_stats();
323 
324 	/* put all other cores to sleep bar master */
325 	rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER);
326 
327 	do_packet_forwarding();
328 	return 0;
329 }
330