xref: /dpdk/examples/server_node_efd/server/main.c (revision ecaed092)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <stdint.h>
39 #include <stdarg.h>
40 #include <inttypes.h>
41 #include <inttypes.h>
42 #include <sys/queue.h>
43 #include <errno.h>
44 #include <netinet/ip.h>
45 
46 #include <rte_common.h>
47 #include <rte_memory.h>
48 #include <rte_memzone.h>
49 #include <rte_eal.h>
50 #include <rte_byteorder.h>
51 #include <rte_launch.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_branch_prediction.h>
55 #include <rte_atomic.h>
56 #include <rte_ring.h>
57 #include <rte_log.h>
58 #include <rte_debug.h>
59 #include <rte_mempool.h>
60 #include <rte_memcpy.h>
61 #include <rte_mbuf.h>
62 #include <rte_ether.h>
63 #include <rte_interrupts.h>
64 #include <rte_pci.h>
65 #include <rte_ethdev.h>
66 #include <rte_byteorder.h>
67 #include <rte_malloc.h>
68 #include <rte_string_fns.h>
69 #include <rte_efd.h>
70 #include <rte_ip.h>
71 
72 #include "common.h"
73 #include "args.h"
74 #include "init.h"
75 
76 /*
77  * When doing reads from the NIC or the node queues,
78  * use this batch size
79  */
80 #define PACKET_READ_SIZE 32
81 
82 /*
83  * Local buffers to put packets in, used to send packets in bursts to the
84  * nodes
85  */
86 struct node_rx_buf {
87 	struct rte_mbuf *buffer[PACKET_READ_SIZE];
88 	uint16_t count;
89 };
90 
91 struct efd_stats {
92 	uint64_t distributed;
93 	uint64_t drop;
94 } flow_dist_stats;
95 
96 /* One buffer per node rx queue - dynamically allocate array */
97 static struct node_rx_buf *cl_rx_buf;
98 
99 static const char *
100 get_printable_mac_addr(uint8_t port)
101 {
102 	static const char err_address[] = "00:00:00:00:00:00";
103 	static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
104 	struct ether_addr mac;
105 
106 	if (unlikely(port >= RTE_MAX_ETHPORTS))
107 		return err_address;
108 	if (unlikely(addresses[port][0] == '\0')) {
109 		rte_eth_macaddr_get(port, &mac);
110 		snprintf(addresses[port], sizeof(addresses[port]),
111 				"%02x:%02x:%02x:%02x:%02x:%02x\n",
112 				mac.addr_bytes[0], mac.addr_bytes[1],
113 				mac.addr_bytes[2], mac.addr_bytes[3],
114 				mac.addr_bytes[4], mac.addr_bytes[5]);
115 	}
116 	return addresses[port];
117 }
118 
119 /*
120  * This function displays the recorded statistics for each port
121  * and for each node. It uses ANSI terminal codes to clear
122  * screen when called. It is called from a single non-master
123  * thread in the server process, when the process is run with more
124  * than one lcore enabled.
125  */
126 static void
127 do_stats_display(void)
128 {
129 	unsigned int i, j;
130 	const char clr[] = {27, '[', '2', 'J', '\0'};
131 	const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
132 	uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS];
133 	uint64_t node_tx[MAX_NODES], node_tx_drop[MAX_NODES];
134 
135 	/* to get TX stats, we need to do some summing calculations */
136 	memset(port_tx, 0, sizeof(port_tx));
137 	memset(port_tx_drop, 0, sizeof(port_tx_drop));
138 	memset(node_tx, 0, sizeof(node_tx));
139 	memset(node_tx_drop, 0, sizeof(node_tx_drop));
140 
141 	for (i = 0; i < num_nodes; i++) {
142 		const struct tx_stats *tx = &info->tx_stats[i];
143 
144 		for (j = 0; j < info->num_ports; j++) {
145 			const uint64_t tx_val = tx->tx[info->id[j]];
146 			const uint64_t drop_val = tx->tx_drop[info->id[j]];
147 
148 			port_tx[j] += tx_val;
149 			port_tx_drop[j] += drop_val;
150 			node_tx[i] += tx_val;
151 			node_tx_drop[i] += drop_val;
152 		}
153 	}
154 
155 	/* Clear screen and move to top left */
156 	printf("%s%s", clr, topLeft);
157 
158 	printf("PORTS\n");
159 	printf("-----\n");
160 	for (i = 0; i < info->num_ports; i++)
161 		printf("Port %u: '%s'\t", (unsigned int)info->id[i],
162 				get_printable_mac_addr(info->id[i]));
163 	printf("\n\n");
164 	for (i = 0; i < info->num_ports; i++) {
165 		printf("Port %u - rx: %9"PRIu64"\t"
166 				"tx: %9"PRIu64"\n",
167 				(unsigned int)info->id[i], info->rx_stats.rx[i],
168 				port_tx[i]);
169 	}
170 
171 	printf("\nSERVER\n");
172 	printf("-----\n");
173 	printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
174 			flow_dist_stats.distributed, flow_dist_stats.drop);
175 
176 	printf("\nNODES\n");
177 	printf("-------\n");
178 	for (i = 0; i < num_nodes; i++) {
179 		const unsigned long long rx = nodes[i].stats.rx;
180 		const unsigned long long rx_drop = nodes[i].stats.rx_drop;
181 		const struct filter_stats *filter = &info->filter_stats[i];
182 
183 		printf("Node %2u - rx: %9llu, rx_drop: %9llu\n"
184 				"            tx: %9"PRIu64", tx_drop: %9"PRIu64"\n"
185 				"            filter_passed: %9"PRIu64", "
186 				"filter_drop: %9"PRIu64"\n",
187 				i, rx, rx_drop, node_tx[i], node_tx_drop[i],
188 				filter->passed, filter->drop);
189 	}
190 
191 	printf("\n");
192 }
193 
194 /*
195  * The function called from each non-master lcore used by the process.
196  * The test_and_set function is used to randomly pick a single lcore on which
197  * the code to display the statistics will run. Otherwise, the code just
198  * repeatedly sleeps.
199  */
200 static int
201 sleep_lcore(__attribute__((unused)) void *dummy)
202 {
203 	/* Used to pick a display thread - static, so zero-initialised */
204 	static rte_atomic32_t display_stats;
205 
206 	/* Only one core should display stats */
207 	if (rte_atomic32_test_and_set(&display_stats)) {
208 		const unsigned int sleeptime = 1;
209 
210 		printf("Core %u displaying statistics\n", rte_lcore_id());
211 
212 		/* Longer initial pause so above printf is seen */
213 		sleep(sleeptime * 3);
214 
215 		/* Loop forever: sleep always returns 0 or <= param */
216 		while (sleep(sleeptime) <= sleeptime)
217 			do_stats_display();
218 	}
219 	return 0;
220 }
221 
222 /*
223  * Function to set all the node statistic values to zero.
224  * Called at program startup.
225  */
226 static void
227 clear_stats(void)
228 {
229 	unsigned int i;
230 
231 	for (i = 0; i < num_nodes; i++)
232 		nodes[i].stats.rx = nodes[i].stats.rx_drop = 0;
233 }
234 
235 /*
236  * send a burst of traffic to a node, assuming there are packets
237  * available to be sent to this node
238  */
239 static void
240 flush_rx_queue(uint16_t node)
241 {
242 	uint16_t j;
243 	struct node *cl;
244 
245 	if (cl_rx_buf[node].count == 0)
246 		return;
247 
248 	cl = &nodes[node];
249 	if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
250 			cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
251 		for (j = 0; j < cl_rx_buf[node].count; j++)
252 			rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
253 		cl->stats.rx_drop += cl_rx_buf[node].count;
254 	} else
255 		cl->stats.rx += cl_rx_buf[node].count;
256 
257 	cl_rx_buf[node].count = 0;
258 }
259 
260 /*
261  * marks a packet down to be sent to a particular node process
262  */
263 static inline void
264 enqueue_rx_packet(uint8_t node, struct rte_mbuf *buf)
265 {
266 	cl_rx_buf[node].buffer[cl_rx_buf[node].count++] = buf;
267 }
268 
269 /*
270  * This function takes a group of packets and routes them
271  * individually to the node process. Very simply round-robins the packets
272  * without checking any of the packet contents.
273  */
274 static void
275 process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
276 		uint16_t rx_count, unsigned int socket_id)
277 {
278 	uint16_t i;
279 	uint8_t node;
280 	efd_value_t data[RTE_EFD_BURST_MAX];
281 	const void *key_ptrs[RTE_EFD_BURST_MAX];
282 
283 	struct ipv4_hdr *ipv4_hdr;
284 	uint32_t ipv4_dst_ip[RTE_EFD_BURST_MAX];
285 
286 	for (i = 0; i < rx_count; i++) {
287 		/* Handle IPv4 header.*/
288 		ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
289 				sizeof(struct ether_hdr));
290 		ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
291 		key_ptrs[i] = (void *)&ipv4_dst_ip[i];
292 	}
293 
294 	rte_efd_lookup_bulk(efd_table, socket_id, rx_count,
295 				(const void **) key_ptrs, data);
296 	for (i = 0; i < rx_count; i++) {
297 		node = (uint8_t) ((uintptr_t)data[i]);
298 
299 		if (node >= num_nodes) {
300 			/*
301 			 * Node is out of range, which means that
302 			 * flow has not been inserted
303 			 */
304 			flow_dist_stats.drop++;
305 			rte_pktmbuf_free(pkts[i]);
306 		} else {
307 			flow_dist_stats.distributed++;
308 			enqueue_rx_packet(node, pkts[i]);
309 		}
310 	}
311 
312 	for (i = 0; i < num_nodes; i++)
313 		flush_rx_queue(i);
314 }
315 
316 /*
317  * Function called by the master lcore of the DPDK process.
318  */
319 static void
320 do_packet_forwarding(void)
321 {
322 	unsigned int port_num = 0; /* indexes the port[] array */
323 	unsigned int socket_id = rte_socket_id();
324 
325 	for (;;) {
326 		struct rte_mbuf *buf[PACKET_READ_SIZE];
327 		uint16_t rx_count;
328 
329 		/* read a port */
330 		rx_count = rte_eth_rx_burst(info->id[port_num], 0,
331 				buf, PACKET_READ_SIZE);
332 		info->rx_stats.rx[port_num] += rx_count;
333 
334 		/* Now process the NIC packets read */
335 		if (likely(rx_count > 0))
336 			process_packets(port_num, buf, rx_count, socket_id);
337 
338 		/* move to next port */
339 		if (++port_num == info->num_ports)
340 			port_num = 0;
341 	}
342 }
343 
344 int
345 main(int argc, char *argv[])
346 {
347 	/* initialise the system */
348 	if (init(argc, argv) < 0)
349 		return -1;
350 	RTE_LOG(INFO, APP, "Finished Process Init.\n");
351 
352 	cl_rx_buf = calloc(num_nodes, sizeof(cl_rx_buf[0]));
353 
354 	/* clear statistics */
355 	clear_stats();
356 
357 	/* put all other cores to sleep bar master */
358 	rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER);
359 
360 	do_packet_forwarding();
361 	return 0;
362 }
363