xref: /dpdk/examples/server_node_efd/node/node.c (revision 11541c5c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <inttypes.h>
8 #include <stdarg.h>
9 #include <errno.h>
10 #include <sys/queue.h>
11 #include <stdlib.h>
12 #include <getopt.h>
13 #include <string.h>
14 
15 #include <rte_common.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_memzone.h>
19 #include <rte_eal.h>
20 #include <rte_atomic.h>
21 #include <rte_branch_prediction.h>
22 #include <rte_log.h>
23 #include <rte_per_lcore.h>
24 #include <rte_launch.h>
25 #include <rte_lcore.h>
26 #include <rte_ring.h>
27 #include <rte_debug.h>
28 #include <rte_mempool.h>
29 #include <rte_mbuf.h>
30 #include <rte_interrupts.h>
31 #include <rte_ether.h>
32 #include <rte_ethdev.h>
33 #include <rte_string_fns.h>
34 #include <rte_ip.h>
35 
36 #include "common.h"
37 
38 /* Number of packets to attempt to read from queue */
39 #define PKT_READ_SIZE  ((uint16_t)32)
40 
41 /*
42  * Our node id number - tells us which rx queue to read, and NIC TX
43  * queue to write to.
44  */
45 static uint8_t node_id;
46 
47 #define MBQ_CAPACITY 32
48 
49 /* maps input ports to output ports for packets */
50 static uint16_t output_ports[RTE_MAX_ETHPORTS];
51 
52 /* buffers up a set of packet that are ready to send */
53 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
54 
55 /* shared data from server. We update statistics here */
56 static struct tx_stats *tx_stats;
57 
58 static struct filter_stats *filter_stats;
59 
60 /*
61  * print a usage message
62  */
63 static void
64 usage(const char *progname)
65 {
66 	printf("Usage: %s [EAL args] -- -n <node_id>\n\n", progname);
67 }
68 
69 /*
70  * Convert the node id number from a string to an int.
71  */
72 static int
73 parse_node_num(const char *node)
74 {
75 	char *end = NULL;
76 	unsigned long temp;
77 
78 	if (node == NULL || *node == '\0')
79 		return -1;
80 
81 	temp = strtoul(node, &end, 10);
82 	if (end == NULL || *end != '\0')
83 		return -1;
84 
85 	node_id = (uint8_t)temp;
86 	return 0;
87 }
88 
89 /*
90  * Parse the application arguments to the node app.
91  */
92 static int
93 parse_app_args(int argc, char *argv[])
94 {
95 	int option_index, opt;
96 	char **argvopt = argv;
97 	const char *progname = NULL;
98 	static struct option lgopts[] = { /* no long options */
99 		{NULL, 0, 0, 0 }
100 	};
101 	progname = argv[0];
102 
103 	while ((opt = getopt_long(argc, argvopt, "n:", lgopts,
104 		&option_index)) != EOF) {
105 		switch (opt) {
106 		case 'n':
107 			if (parse_node_num(optarg) != 0) {
108 				usage(progname);
109 				return -1;
110 			}
111 			break;
112 		default:
113 			usage(progname);
114 			return -1;
115 		}
116 	}
117 	return 0;
118 }
119 
120 /*
121  * Tx buffer error callback
122  */
123 static void
124 flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
125 		void *userdata) {
126 	int i;
127 	uint16_t port_id = (uintptr_t)userdata;
128 
129 	tx_stats->tx_drop[port_id] += count;
130 
131 	/* free the mbufs which failed from transmit */
132 	for (i = 0; i < count; i++)
133 		rte_pktmbuf_free(unsent[i]);
134 
135 }
136 
137 static void
138 configure_tx_buffer(uint16_t port_id, uint16_t size)
139 {
140 	int ret;
141 
142 	/* Initialize TX buffers */
143 	tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
144 			RTE_ETH_TX_BUFFER_SIZE(size), 0,
145 			rte_eth_dev_socket_id(port_id));
146 	if (tx_buffer[port_id] == NULL)
147 		rte_exit(EXIT_FAILURE,
148 			"Cannot allocate buffer for tx on port %u\n", port_id);
149 
150 	rte_eth_tx_buffer_init(tx_buffer[port_id], size);
151 
152 	ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
153 			flush_tx_error_callback, (void *)(intptr_t)port_id);
154 	if (ret < 0)
155 		rte_exit(EXIT_FAILURE,
156 			"Cannot set error callback for tx buffer on port %u\n",
157 			port_id);
158 }
159 
160 /*
161  * set up output ports so that all traffic on port gets sent out
162  * its paired port. Index using actual port numbers since that is
163  * what comes in the mbuf structure.
164  */
165 static void
166 configure_output_ports(const struct shared_info *info)
167 {
168 	int i;
169 
170 	if (info->num_ports > RTE_MAX_ETHPORTS)
171 		rte_exit(EXIT_FAILURE, "Too many ethernet ports. "
172 				"RTE_MAX_ETHPORTS = %u\n",
173 				(unsigned int)RTE_MAX_ETHPORTS);
174 	for (i = 0; i < info->num_ports - 1; i += 2) {
175 		uint8_t p1 = info->id[i];
176 		uint8_t p2 = info->id[i+1];
177 
178 		output_ports[p1] = p2;
179 		output_ports[p2] = p1;
180 
181 		configure_tx_buffer(p1, MBQ_CAPACITY);
182 		configure_tx_buffer(p2, MBQ_CAPACITY);
183 
184 	}
185 }
186 
187 /*
188  * Create the hash table that will contain the flows that
189  * the node will handle, which will be used to decide if packet
190  * is transmitted or dropped.
191  */
192 
193 /* Creation of hash table. 8< */
194 static struct rte_hash *
195 create_hash_table(const struct shared_info *info)
196 {
197 	uint32_t num_flows_node = info->num_flows / info->num_nodes;
198 	char name[RTE_HASH_NAMESIZE];
199 	struct rte_hash *h;
200 
201 	/* create table */
202 	struct rte_hash_parameters hash_params = {
203 		.entries = num_flows_node * 2, /* table load = 50% */
204 		.key_len = sizeof(uint32_t), /* Store IPv4 dest IP address */
205 		.socket_id = rte_socket_id(),
206 		.hash_func_init_val = 0,
207 	};
208 
209 	snprintf(name, sizeof(name), "hash_table_%d", node_id);
210 	hash_params.name = name;
211 	h = rte_hash_create(&hash_params);
212 
213 	if (h == NULL)
214 		rte_exit(EXIT_FAILURE,
215 				"Problem creating the hash table for node %d\n",
216 				node_id);
217 	return h;
218 }
219 
220 static void
221 populate_hash_table(const struct rte_hash *h, const struct shared_info *info)
222 {
223 	unsigned int i;
224 	int32_t ret;
225 	uint32_t ip_dst;
226 	uint32_t num_flows_node = 0;
227 	uint64_t target_node;
228 
229 	/* Add flows in table */
230 	for (i = 0; i < info->num_flows; i++) {
231 		target_node = i % info->num_nodes;
232 		if (target_node != node_id)
233 			continue;
234 
235 		ip_dst = rte_cpu_to_be_32(i);
236 
237 		ret = rte_hash_add_key(h, (void *) &ip_dst);
238 		if (ret < 0)
239 			rte_exit(EXIT_FAILURE, "Unable to add entry %u "
240 					"in hash table\n", i);
241 		else
242 			num_flows_node++;
243 
244 	}
245 
246 	printf("Hash table: Adding 0x%x keys\n", num_flows_node);
247 }
248 /* >8 End of creation of hash table. */
249 
250 /*
251  * This function performs routing of packets
252  * Just sends each input packet out an output port based solely on the input
253  * port it arrived on.
254  */
255 static inline void
256 transmit_packet(struct rte_mbuf *buf)
257 {
258 	int sent;
259 	const uint16_t in_port = buf->port;
260 	const uint16_t out_port = output_ports[in_port];
261 	struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
262 
263 	sent = rte_eth_tx_buffer(out_port, node_id, buffer, buf);
264 	if (sent)
265 		tx_stats->tx[out_port] += sent;
266 
267 }
268 
269 /* Packets dequeued from the shared ring. 8< */
270 static inline void
271 handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
272 {
273 	struct rte_ipv4_hdr *ipv4_hdr;
274 	uint32_t ipv4_dst_ip[PKT_READ_SIZE];
275 	const void *key_ptrs[PKT_READ_SIZE];
276 	unsigned int i;
277 	int32_t positions[PKT_READ_SIZE] = {0};
278 
279 	for (i = 0; i < num_packets; i++) {
280 		/* Handle IPv4 header.*/
281 		ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i],
282 			struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
283 		ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
284 		key_ptrs[i] = &ipv4_dst_ip[i];
285 	}
286 	/* Check if packets belongs to any flows handled by this node */
287 	rte_hash_lookup_bulk(h, key_ptrs, num_packets, positions);
288 
289 	for (i = 0; i < num_packets; i++) {
290 		if (likely(positions[i] >= 0)) {
291 			filter_stats->passed++;
292 			transmit_packet(bufs[i]);
293 		} else {
294 			filter_stats->drop++;
295 			/* Drop packet, as flow is not handled by this node */
296 			rte_pktmbuf_free(bufs[i]);
297 		}
298 	}
299 }
300 /* >8 End of packets dequeueing. */
301 
302 /*
303  * Application main function - loops through
304  * receiving and processing packets. Never returns
305  */
306 int
307 main(int argc, char *argv[])
308 {
309 	const struct rte_memzone *mz;
310 	struct rte_ring *rx_ring;
311 	struct rte_hash *h;
312 	struct rte_mempool *mp;
313 	struct shared_info *info;
314 	int need_flush = 0; /* indicates whether we have unsent packets */
315 	int retval;
316 	void *pkts[PKT_READ_SIZE];
317 	uint16_t sent;
318 
319 	retval = rte_eal_init(argc, argv);
320 	if (retval  < 0)
321 		return -1;
322 	argc -= retval;
323 	argv += retval;
324 
325 	if (parse_app_args(argc, argv) < 0)
326 		rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
327 
328 	if (rte_eth_dev_count_avail() == 0)
329 		rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
330 
331 	/* Attaching to the server process memory. 8< */
332 	rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
333 	if (rx_ring == NULL)
334 		rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
335 				"is server process running?\n");
336 
337 	mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
338 	if (mp == NULL)
339 		rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
340 
341 	mz = rte_memzone_lookup(MZ_SHARED_INFO);
342 	if (mz == NULL)
343 		rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
344 	info = mz->addr;
345 	tx_stats = &(info->tx_stats[node_id]);
346 	filter_stats = &(info->filter_stats[node_id]);
347 	/* >8 End of attaching to the server process memory. */
348 
349 	configure_output_ports(info);
350 
351 	h = create_hash_table(info);
352 
353 	populate_hash_table(h, info);
354 
355 	RTE_LOG(INFO, APP, "Finished Process Init.\n");
356 
357 	printf("\nNode process %d handling packets\n", node_id);
358 	printf("[Press Ctrl-C to quit ...]\n");
359 
360 	for (;;) {
361 		uint16_t  rx_pkts = PKT_READ_SIZE;
362 		uint16_t port;
363 
364 		/*
365 		 * Try dequeuing max possible packets first, if that fails,
366 		 * get the most we can. Loop body should only execute once,
367 		 * maximum
368 		 */
369 		while (rx_pkts > 0 &&
370 				unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
371 					rx_pkts, NULL) == 0))
372 			rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
373 					PKT_READ_SIZE);
374 
375 		if (unlikely(rx_pkts == 0)) {
376 			if (need_flush)
377 				for (port = 0; port < info->num_ports; port++) {
378 					sent = rte_eth_tx_buffer_flush(
379 							info->id[port],
380 							node_id,
381 							tx_buffer[port]);
382 					if (unlikely(sent))
383 						tx_stats->tx[port] += sent;
384 				}
385 			need_flush = 0;
386 			continue;
387 		}
388 
389 		handle_packets(h, (struct rte_mbuf **)pkts, rx_pkts);
390 
391 		need_flush = 1;
392 	}
393 
394 	/* clean up the EAL */
395 	rte_eal_cleanup();
396 }
397