1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 #include <string.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/stat.h> 15 16 #include <rte_common.h> 17 #include <rte_byteorder.h> 18 #include <rte_log.h> 19 #include <rte_debug.h> 20 #include <rte_cycles.h> 21 #include <rte_memory.h> 22 #include <rte_memcpy.h> 23 #include <rte_launch.h> 24 #include <rte_eal.h> 25 #include <rte_per_lcore.h> 26 #include <rte_lcore.h> 27 #include <rte_atomic.h> 28 #include <rte_branch_prediction.h> 29 #include <rte_mempool.h> 30 #include <rte_mbuf.h> 31 #include <rte_interrupts.h> 32 #include <rte_pci.h> 33 #include <rte_ether.h> 34 #include <rte_ethdev.h> 35 #include <rte_ip.h> 36 #include <rte_tcp.h> 37 #include <rte_udp.h> 38 #include <rte_string_fns.h> 39 #include <rte_flow.h> 40 41 #include "testpmd.h" 42 43 /* use RFC863 Discard Protocol */ 44 uint16_t tx_udp_src_port = 9; 45 uint16_t tx_udp_dst_port = 9; 46 47 /* use RFC5735 / RFC2544 reserved network test addresses */ 48 uint32_t tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1; 49 uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2; 50 51 #define IP_DEFTTL 64 /* from RFC 1340. */ 52 53 static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */ 54 RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */ 55 static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */ 56 57 static void 58 copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt, 59 unsigned offset) 60 { 61 struct rte_mbuf *seg; 62 void *seg_buf; 63 unsigned copy_len; 64 65 seg = pkt; 66 while (offset >= seg->data_len) { 67 offset -= seg->data_len; 68 seg = seg->next; 69 } 70 copy_len = seg->data_len - offset; 71 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset); 72 while (len > copy_len) { 73 rte_memcpy(seg_buf, buf, (size_t) copy_len); 74 len -= copy_len; 75 buf = ((char*) buf + copy_len); 76 seg = seg->next; 77 seg_buf = rte_pktmbuf_mtod(seg, char *); 78 copy_len = seg->data_len; 79 } 80 rte_memcpy(seg_buf, buf, (size_t) len); 81 } 82 83 static inline void 84 copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset) 85 { 86 if (offset + len <= pkt->data_len) { 87 rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset), 88 buf, (size_t) len); 89 return; 90 } 91 copy_buf_to_pkt_segs(buf, len, pkt, offset); 92 } 93 94 static void 95 setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, 96 struct rte_udp_hdr *udp_hdr, 97 uint16_t pkt_data_len) 98 { 99 uint16_t *ptr16; 100 uint32_t ip_cksum; 101 uint16_t pkt_len; 102 103 /* 104 * Initialize UDP header. 105 */ 106 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr)); 107 udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port); 108 udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port); 109 udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len); 110 udp_hdr->dgram_cksum = 0; /* No UDP checksum. */ 111 112 /* 113 * Initialize IP header. 114 */ 115 pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr)); 116 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF; 117 ip_hdr->type_of_service = 0; 118 ip_hdr->fragment_offset = 0; 119 ip_hdr->time_to_live = IP_DEFTTL; 120 ip_hdr->next_proto_id = IPPROTO_UDP; 121 ip_hdr->packet_id = 0; 122 ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len); 123 ip_hdr->src_addr = rte_cpu_to_be_32(tx_ip_src_addr); 124 ip_hdr->dst_addr = rte_cpu_to_be_32(tx_ip_dst_addr); 125 126 /* 127 * Compute IP header checksum. 128 */ 129 ptr16 = (unaligned_uint16_t*) ip_hdr; 130 ip_cksum = 0; 131 ip_cksum += ptr16[0]; ip_cksum += ptr16[1]; 132 ip_cksum += ptr16[2]; ip_cksum += ptr16[3]; 133 ip_cksum += ptr16[4]; 134 ip_cksum += ptr16[6]; ip_cksum += ptr16[7]; 135 ip_cksum += ptr16[8]; ip_cksum += ptr16[9]; 136 137 /* 138 * Reduce 32 bit checksum to 16 bits and complement it. 139 */ 140 ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) + 141 (ip_cksum & 0x0000FFFF); 142 if (ip_cksum > 65535) 143 ip_cksum -= 65535; 144 ip_cksum = (~ip_cksum) & 0x0000FFFF; 145 if (ip_cksum == 0) 146 ip_cksum = 0xFFFF; 147 ip_hdr->hdr_checksum = (uint16_t) ip_cksum; 148 } 149 150 static inline bool 151 pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, 152 struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci, 153 const uint16_t vlan_tci_outer, const uint64_t ol_flags) 154 { 155 struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT]; 156 uint8_t ip_var = RTE_PER_LCORE(_ip_var); 157 struct rte_mbuf *pkt_seg; 158 uint32_t nb_segs, pkt_len; 159 uint8_t i; 160 161 if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND)) 162 nb_segs = rte_rand() % tx_pkt_nb_segs + 1; 163 else 164 nb_segs = tx_pkt_nb_segs; 165 166 if (nb_segs > 1) { 167 if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs - 1)) 168 return false; 169 } 170 171 rte_pktmbuf_reset_headroom(pkt); 172 pkt->data_len = tx_pkt_seg_lengths[0]; 173 pkt->ol_flags &= EXT_ATTACHED_MBUF; 174 pkt->ol_flags |= ol_flags; 175 pkt->vlan_tci = vlan_tci; 176 pkt->vlan_tci_outer = vlan_tci_outer; 177 pkt->l2_len = sizeof(struct rte_ether_hdr); 178 pkt->l3_len = sizeof(struct rte_ipv4_hdr); 179 180 pkt_len = pkt->data_len; 181 pkt_seg = pkt; 182 for (i = 1; i < nb_segs; i++) { 183 pkt_seg->next = pkt_segs[i - 1]; 184 pkt_seg = pkt_seg->next; 185 pkt_seg->data_len = tx_pkt_seg_lengths[i]; 186 pkt_len += pkt_seg->data_len; 187 } 188 pkt_seg->next = NULL; /* Last segment of packet. */ 189 /* 190 * Copy headers in first packet segment(s). 191 */ 192 copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0); 193 copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt, 194 sizeof(struct rte_ether_hdr)); 195 if (txonly_multi_flow) { 196 struct rte_ipv4_hdr *ip_hdr; 197 uint32_t addr; 198 199 ip_hdr = rte_pktmbuf_mtod_offset(pkt, 200 struct rte_ipv4_hdr *, 201 sizeof(struct rte_ether_hdr)); 202 /* 203 * Generate multiple flows by varying IP src addr. This 204 * enables packets are well distributed by RSS in 205 * receiver side if any and txonly mode can be a decent 206 * packet generator for developer's quick performance 207 * regression test. 208 */ 209 addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id(); 210 ip_hdr->src_addr = rte_cpu_to_be_32(addr); 211 } 212 copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, 213 sizeof(struct rte_ether_hdr) + 214 sizeof(struct rte_ipv4_hdr)); 215 /* 216 * Complete first mbuf of packet and append it to the 217 * burst of packets to be transmitted. 218 */ 219 pkt->nb_segs = nb_segs; 220 pkt->pkt_len = pkt_len; 221 222 return true; 223 } 224 225 /* 226 * Transmit a burst of multi-segments packets. 227 */ 228 static void 229 pkt_burst_transmit(struct fwd_stream *fs) 230 { 231 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 232 struct rte_port *txp; 233 struct rte_mbuf *pkt; 234 struct rte_mempool *mbp; 235 struct rte_ether_hdr eth_hdr; 236 uint16_t nb_tx; 237 uint16_t nb_pkt; 238 uint16_t vlan_tci, vlan_tci_outer; 239 uint32_t retry; 240 uint64_t ol_flags = 0; 241 uint64_t tx_offloads; 242 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 243 uint64_t start_tsc; 244 uint64_t end_tsc; 245 uint64_t core_cycles; 246 #endif 247 248 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 249 start_tsc = rte_rdtsc(); 250 #endif 251 252 mbp = current_fwd_lcore()->mbp; 253 txp = &ports[fs->tx_port]; 254 tx_offloads = txp->dev_conf.txmode.offloads; 255 vlan_tci = txp->tx_vlan_id; 256 vlan_tci_outer = txp->tx_vlan_id_outer; 257 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) 258 ol_flags = PKT_TX_VLAN_PKT; 259 if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) 260 ol_flags |= PKT_TX_QINQ_PKT; 261 if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) 262 ol_flags |= PKT_TX_MACSEC; 263 264 /* 265 * Initialize Ethernet header. 266 */ 267 rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr); 268 rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr); 269 eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); 270 271 if (rte_mempool_get_bulk(mbp, (void **)pkts_burst, 272 nb_pkt_per_burst) == 0) { 273 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { 274 if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp, 275 ð_hdr, vlan_tci, 276 vlan_tci_outer, 277 ol_flags))) { 278 rte_mempool_put_bulk(mbp, 279 (void **)&pkts_burst[nb_pkt], 280 nb_pkt_per_burst - nb_pkt); 281 break; 282 } 283 } 284 } else { 285 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { 286 pkt = rte_mbuf_raw_alloc(mbp); 287 if (pkt == NULL) 288 break; 289 if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr, 290 vlan_tci, 291 vlan_tci_outer, 292 ol_flags))) { 293 rte_pktmbuf_free(pkt); 294 break; 295 } 296 pkts_burst[nb_pkt] = pkt; 297 } 298 } 299 300 if (nb_pkt == 0) 301 return; 302 303 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt); 304 /* 305 * Retry if necessary 306 */ 307 if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) { 308 retry = 0; 309 while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) { 310 rte_delay_us(burst_tx_delay_time); 311 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, 312 &pkts_burst[nb_tx], nb_pkt - nb_tx); 313 } 314 } 315 fs->tx_packets += nb_tx; 316 317 if (txonly_multi_flow) 318 RTE_PER_LCORE(_ip_var) += nb_tx; 319 320 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 321 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; 322 #endif 323 if (unlikely(nb_tx < nb_pkt)) { 324 if (verbose_level > 0 && fs->fwd_dropped == 0) 325 printf("port %d tx_queue %d - drop " 326 "(nb_pkt:%u - nb_tx:%u)=%u packets\n", 327 fs->tx_port, fs->tx_queue, 328 (unsigned) nb_pkt, (unsigned) nb_tx, 329 (unsigned) (nb_pkt - nb_tx)); 330 fs->fwd_dropped += (nb_pkt - nb_tx); 331 do { 332 rte_pktmbuf_free(pkts_burst[nb_tx]); 333 } while (++nb_tx < nb_pkt); 334 } 335 336 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 337 end_tsc = rte_rdtsc(); 338 core_cycles = (end_tsc - start_tsc); 339 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); 340 #endif 341 } 342 343 static void 344 tx_only_begin(__attribute__((unused)) portid_t pi) 345 { 346 uint16_t pkt_data_len; 347 348 pkt_data_len = (uint16_t) (tx_pkt_length - ( 349 sizeof(struct rte_ether_hdr) + 350 sizeof(struct rte_ipv4_hdr) + 351 sizeof(struct rte_udp_hdr))); 352 setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len); 353 } 354 355 struct fwd_engine tx_only_engine = { 356 .fwd_mode_name = "txonly", 357 .port_fwd_begin = tx_only_begin, 358 .port_fwd_end = NULL, 359 .packet_fwd = pkt_burst_transmit, 360 }; 361