1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <unistd.h> 9 #include <stdint.h> 10 #include <stdarg.h> 11 #include <inttypes.h> 12 #include <sys/queue.h> 13 #include <errno.h> 14 #include <netinet/ip.h> 15 16 #include <rte_common.h> 17 #include <rte_memory.h> 18 #include <rte_eal.h> 19 #include <rte_launch.h> 20 #include <rte_per_lcore.h> 21 #include <rte_lcore.h> 22 #include <rte_branch_prediction.h> 23 #include <rte_atomic.h> 24 #include <rte_ring.h> 25 #include <rte_log.h> 26 #include <rte_debug.h> 27 #include <rte_mempool.h> 28 #include <rte_memcpy.h> 29 #include <rte_mbuf.h> 30 #include <rte_ether.h> 31 #include <rte_interrupts.h> 32 #include <rte_ethdev.h> 33 #include <rte_byteorder.h> 34 #include <rte_malloc.h> 35 #include <rte_string_fns.h> 36 #include <rte_efd.h> 37 #include <rte_ip.h> 38 39 #include "common.h" 40 #include "args.h" 41 #include "init.h" 42 43 /* 44 * When doing reads from the NIC or the node queues, 45 * use this batch size 46 */ 47 #define PACKET_READ_SIZE 32 48 49 /* 50 * Local buffers to put packets in, used to send packets in bursts to the 51 * nodes 52 */ 53 struct node_rx_buf { 54 struct rte_mbuf *buffer[PACKET_READ_SIZE]; 55 uint16_t count; 56 }; 57 58 struct efd_stats { 59 uint64_t distributed; 60 uint64_t drop; 61 } flow_dist_stats; 62 63 /* One buffer per node rx queue - dynamically allocate array */ 64 static struct node_rx_buf *cl_rx_buf; 65 66 static const char * 67 get_printable_mac_addr(uint16_t port) 68 { 69 static const char err_address[] = "00:00:00:00:00:00"; 70 static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)]; 71 struct rte_ether_addr mac; 72 int ret; 73 74 if (unlikely(port >= RTE_MAX_ETHPORTS)) 75 return err_address; 76 if (unlikely(addresses[port][0] == '\0')) { 77 ret = rte_eth_macaddr_get(port, &mac); 78 if (ret != 0) { 79 printf("Failed to get MAC address (port %u): %s\n", 80 port, rte_strerror(-ret)); 81 return err_address; 82 } 83 84 snprintf(addresses[port], sizeof(addresses[port]), 85 "%02x:%02x:%02x:%02x:%02x:%02x\n", 86 mac.addr_bytes[0], mac.addr_bytes[1], 87 mac.addr_bytes[2], mac.addr_bytes[3], 88 mac.addr_bytes[4], mac.addr_bytes[5]); 89 } 90 return addresses[port]; 91 } 92 93 /* 94 * This function displays the recorded statistics for each port 95 * and for each node. It uses ANSI terminal codes to clear 96 * screen when called. It is called from a single non-master 97 * thread in the server process, when the process is run with more 98 * than one lcore enabled. 99 */ 100 static void 101 do_stats_display(void) 102 { 103 unsigned int i, j; 104 const char clr[] = {27, '[', '2', 'J', '\0'}; 105 const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'}; 106 uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS]; 107 uint64_t node_tx[MAX_NODES], node_tx_drop[MAX_NODES]; 108 109 /* to get TX stats, we need to do some summing calculations */ 110 memset(port_tx, 0, sizeof(port_tx)); 111 memset(port_tx_drop, 0, sizeof(port_tx_drop)); 112 memset(node_tx, 0, sizeof(node_tx)); 113 memset(node_tx_drop, 0, sizeof(node_tx_drop)); 114 115 for (i = 0; i < num_nodes; i++) { 116 const struct tx_stats *tx = &info->tx_stats[i]; 117 118 for (j = 0; j < info->num_ports; j++) { 119 const uint64_t tx_val = tx->tx[info->id[j]]; 120 const uint64_t drop_val = tx->tx_drop[info->id[j]]; 121 122 port_tx[j] += tx_val; 123 port_tx_drop[j] += drop_val; 124 node_tx[i] += tx_val; 125 node_tx_drop[i] += drop_val; 126 } 127 } 128 129 /* Clear screen and move to top left */ 130 printf("%s%s", clr, topLeft); 131 132 printf("PORTS\n"); 133 printf("-----\n"); 134 for (i = 0; i < info->num_ports; i++) 135 printf("Port %u: '%s'\t", (unsigned int)info->id[i], 136 get_printable_mac_addr(info->id[i])); 137 printf("\n\n"); 138 for (i = 0; i < info->num_ports; i++) { 139 printf("Port %u - rx: %9"PRIu64"\t" 140 "tx: %9"PRIu64"\n", 141 (unsigned int)info->id[i], info->rx_stats.rx[i], 142 port_tx[i]); 143 } 144 145 printf("\nSERVER\n"); 146 printf("-----\n"); 147 printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n", 148 flow_dist_stats.distributed, flow_dist_stats.drop); 149 150 printf("\nNODES\n"); 151 printf("-------\n"); 152 for (i = 0; i < num_nodes; i++) { 153 const unsigned long long rx = nodes[i].stats.rx; 154 const unsigned long long rx_drop = nodes[i].stats.rx_drop; 155 const struct filter_stats *filter = &info->filter_stats[i]; 156 157 printf("Node %2u - rx: %9llu, rx_drop: %9llu\n" 158 " tx: %9"PRIu64", tx_drop: %9"PRIu64"\n" 159 " filter_passed: %9"PRIu64", " 160 "filter_drop: %9"PRIu64"\n", 161 i, rx, rx_drop, node_tx[i], node_tx_drop[i], 162 filter->passed, filter->drop); 163 } 164 165 printf("\n"); 166 } 167 168 /* 169 * The function called from each non-master lcore used by the process. 170 * The test_and_set function is used to randomly pick a single lcore on which 171 * the code to display the statistics will run. Otherwise, the code just 172 * repeatedly sleeps. 173 */ 174 static int 175 sleep_lcore(__attribute__((unused)) void *dummy) 176 { 177 /* Used to pick a display thread - static, so zero-initialised */ 178 static rte_atomic32_t display_stats; 179 180 /* Only one core should display stats */ 181 if (rte_atomic32_test_and_set(&display_stats)) { 182 const unsigned int sleeptime = 1; 183 184 printf("Core %u displaying statistics\n", rte_lcore_id()); 185 186 /* Longer initial pause so above printf is seen */ 187 sleep(sleeptime * 3); 188 189 /* Loop forever: sleep always returns 0 or <= param */ 190 while (sleep(sleeptime) <= sleeptime) 191 do_stats_display(); 192 } 193 return 0; 194 } 195 196 /* 197 * Function to set all the node statistic values to zero. 198 * Called at program startup. 199 */ 200 static void 201 clear_stats(void) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num_nodes; i++) 206 nodes[i].stats.rx = nodes[i].stats.rx_drop = 0; 207 } 208 209 /* 210 * send a burst of traffic to a node, assuming there are packets 211 * available to be sent to this node 212 */ 213 static void 214 flush_rx_queue(uint16_t node) 215 { 216 uint16_t j; 217 struct node *cl; 218 219 if (cl_rx_buf[node].count == 0) 220 return; 221 222 cl = &nodes[node]; 223 if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer, 224 cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){ 225 for (j = 0; j < cl_rx_buf[node].count; j++) 226 rte_pktmbuf_free(cl_rx_buf[node].buffer[j]); 227 cl->stats.rx_drop += cl_rx_buf[node].count; 228 } else 229 cl->stats.rx += cl_rx_buf[node].count; 230 231 cl_rx_buf[node].count = 0; 232 } 233 234 /* 235 * marks a packet down to be sent to a particular node process 236 */ 237 static inline void 238 enqueue_rx_packet(uint8_t node, struct rte_mbuf *buf) 239 { 240 cl_rx_buf[node].buffer[cl_rx_buf[node].count++] = buf; 241 } 242 243 /* 244 * This function takes a group of packets and routes them 245 * individually to the node process. Very simply round-robins the packets 246 * without checking any of the packet contents. 247 */ 248 static void 249 process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[], 250 uint16_t rx_count, unsigned int socket_id) 251 { 252 uint16_t i; 253 uint8_t node; 254 efd_value_t data[RTE_EFD_BURST_MAX]; 255 const void *key_ptrs[RTE_EFD_BURST_MAX]; 256 257 struct rte_ipv4_hdr *ipv4_hdr; 258 uint32_t ipv4_dst_ip[RTE_EFD_BURST_MAX]; 259 260 for (i = 0; i < rx_count; i++) { 261 /* Handle IPv4 header.*/ 262 ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], 263 struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr)); 264 ipv4_dst_ip[i] = ipv4_hdr->dst_addr; 265 key_ptrs[i] = (void *)&ipv4_dst_ip[i]; 266 } 267 268 rte_efd_lookup_bulk(efd_table, socket_id, rx_count, 269 (const void **) key_ptrs, data); 270 for (i = 0; i < rx_count; i++) { 271 node = (uint8_t) ((uintptr_t)data[i]); 272 273 if (node >= num_nodes) { 274 /* 275 * Node is out of range, which means that 276 * flow has not been inserted 277 */ 278 flow_dist_stats.drop++; 279 rte_pktmbuf_free(pkts[i]); 280 } else { 281 flow_dist_stats.distributed++; 282 enqueue_rx_packet(node, pkts[i]); 283 } 284 } 285 286 for (i = 0; i < num_nodes; i++) 287 flush_rx_queue(i); 288 } 289 290 /* 291 * Function called by the master lcore of the DPDK process. 292 */ 293 static void 294 do_packet_forwarding(void) 295 { 296 unsigned int port_num = 0; /* indexes the port[] array */ 297 unsigned int socket_id = rte_socket_id(); 298 299 for (;;) { 300 struct rte_mbuf *buf[PACKET_READ_SIZE]; 301 uint16_t rx_count; 302 303 /* read a port */ 304 rx_count = rte_eth_rx_burst(info->id[port_num], 0, 305 buf, PACKET_READ_SIZE); 306 info->rx_stats.rx[port_num] += rx_count; 307 308 /* Now process the NIC packets read */ 309 if (likely(rx_count > 0)) 310 process_packets(port_num, buf, rx_count, socket_id); 311 312 /* move to next port */ 313 if (++port_num == info->num_ports) 314 port_num = 0; 315 } 316 } 317 318 int 319 main(int argc, char *argv[]) 320 { 321 /* initialise the system */ 322 if (init(argc, argv) < 0) 323 return -1; 324 RTE_LOG(INFO, APP, "Finished Process Init.\n"); 325 326 cl_rx_buf = calloc(num_nodes, sizeof(cl_rx_buf[0])); 327 328 /* clear statistics */ 329 clear_stats(); 330 331 /* put all other cores to sleep bar master */ 332 rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER); 333 334 do_packet_forwarding(); 335 return 0; 336 } 337