xref: /dpdk/app/test/packet_burst_generator.c (revision 987d40a0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <rte_byteorder.h>
6 #include <rte_mbuf.h>
7 #include <rte_ip.h>
8 #include <rte_os_shim.h>
9 
10 #include "packet_burst_generator.h"
11 
12 #define UDP_SRC_PORT 1024
13 #define UDP_DST_PORT 1024
14 
15 
16 #define IP_DEFTTL  64   /* from RFC 1340. */
17 
18 static void
copy_buf_to_pkt_segs(void * buf,unsigned len,struct rte_mbuf * pkt,unsigned offset)19 copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
20 		unsigned offset)
21 {
22 	struct rte_mbuf *seg;
23 	void *seg_buf;
24 	unsigned copy_len;
25 
26 	seg = pkt;
27 	while (offset >= seg->data_len) {
28 		offset -= seg->data_len;
29 		seg = seg->next;
30 	}
31 	copy_len = seg->data_len - offset;
32 	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
33 	while (len > copy_len) {
34 		rte_memcpy(seg_buf, buf, (size_t) copy_len);
35 		len -= copy_len;
36 		buf = ((char *) buf + copy_len);
37 		seg = seg->next;
38 		seg_buf = rte_pktmbuf_mtod(seg, void *);
39 	}
40 	rte_memcpy(seg_buf, buf, (size_t) len);
41 }
42 
43 static inline void
copy_buf_to_pkt(void * buf,unsigned len,struct rte_mbuf * pkt,unsigned offset)44 copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
45 {
46 	if (offset + len <= pkt->data_len) {
47 		rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset), buf,
48 			   (size_t) len);
49 		return;
50 	}
51 	copy_buf_to_pkt_segs(buf, len, pkt, offset);
52 }
53 
54 void
initialize_eth_header(struct rte_ether_hdr * eth_hdr,struct rte_ether_addr * src_mac,struct rte_ether_addr * dst_mac,uint16_t ether_type,uint8_t vlan_enabled,uint16_t van_id)55 initialize_eth_header(struct rte_ether_hdr *eth_hdr,
56 		struct rte_ether_addr *src_mac,
57 		struct rte_ether_addr *dst_mac, uint16_t ether_type,
58 		uint8_t vlan_enabled, uint16_t van_id)
59 {
60 	rte_ether_addr_copy(dst_mac, &eth_hdr->dst_addr);
61 	rte_ether_addr_copy(src_mac, &eth_hdr->src_addr);
62 
63 	if (vlan_enabled) {
64 		struct rte_vlan_hdr *vhdr = (struct rte_vlan_hdr *)(
65 			(uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr));
66 
67 		eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
68 
69 		vhdr->eth_proto =  rte_cpu_to_be_16(ether_type);
70 		vhdr->vlan_tci = van_id;
71 	} else {
72 		eth_hdr->ether_type = rte_cpu_to_be_16(ether_type);
73 	}
74 }
75 
76 void
initialize_arp_header(struct rte_arp_hdr * arp_hdr,struct rte_ether_addr * src_mac,struct rte_ether_addr * dst_mac,uint32_t src_ip,uint32_t dst_ip,uint32_t opcode)77 initialize_arp_header(struct rte_arp_hdr *arp_hdr,
78 		struct rte_ether_addr *src_mac,
79 		struct rte_ether_addr *dst_mac,
80 		uint32_t src_ip, uint32_t dst_ip,
81 		uint32_t opcode)
82 {
83 	arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
84 	arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
85 	arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;
86 	arp_hdr->arp_plen = sizeof(uint32_t);
87 	arp_hdr->arp_opcode = rte_cpu_to_be_16(opcode);
88 	rte_ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha);
89 	arp_hdr->arp_data.arp_sip = src_ip;
90 	rte_ether_addr_copy(dst_mac, &arp_hdr->arp_data.arp_tha);
91 	arp_hdr->arp_data.arp_tip = dst_ip;
92 }
93 
94 uint16_t
initialize_udp_header(struct rte_udp_hdr * udp_hdr,uint16_t src_port,uint16_t dst_port,uint16_t pkt_data_len)95 initialize_udp_header(struct rte_udp_hdr *udp_hdr, uint16_t src_port,
96 		uint16_t dst_port, uint16_t pkt_data_len)
97 {
98 	uint16_t pkt_len;
99 
100 	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
101 
102 	udp_hdr->src_port = rte_cpu_to_be_16(src_port);
103 	udp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
104 	udp_hdr->dgram_len = rte_cpu_to_be_16(pkt_len);
105 	udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
106 
107 	return pkt_len;
108 }
109 
110 uint16_t
initialize_tcp_header(struct rte_tcp_hdr * tcp_hdr,uint16_t src_port,uint16_t dst_port,uint16_t pkt_data_len)111 initialize_tcp_header(struct rte_tcp_hdr *tcp_hdr, uint16_t src_port,
112 		uint16_t dst_port, uint16_t pkt_data_len)
113 {
114 	uint16_t pkt_len;
115 
116 	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_tcp_hdr));
117 
118 	memset(tcp_hdr, 0, sizeof(struct rte_tcp_hdr));
119 	tcp_hdr->src_port = rte_cpu_to_be_16(src_port);
120 	tcp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
121 	tcp_hdr->data_off = (sizeof(struct rte_tcp_hdr) << 2) & 0xF0;
122 
123 	return pkt_len;
124 }
125 
126 uint16_t
initialize_sctp_header(struct rte_sctp_hdr * sctp_hdr,uint16_t src_port,uint16_t dst_port,uint16_t pkt_data_len)127 initialize_sctp_header(struct rte_sctp_hdr *sctp_hdr, uint16_t src_port,
128 		uint16_t dst_port, uint16_t pkt_data_len)
129 {
130 	uint16_t pkt_len;
131 
132 	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
133 
134 	sctp_hdr->src_port = rte_cpu_to_be_16(src_port);
135 	sctp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
136 	sctp_hdr->tag = 0;
137 	sctp_hdr->cksum = 0; /* No SCTP checksum. */
138 
139 	return pkt_len;
140 }
141 
142 uint16_t
initialize_ipv6_header(struct rte_ipv6_hdr * ip_hdr,uint8_t * src_addr,uint8_t * dst_addr,uint16_t pkt_data_len)143 initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
144 		uint8_t *dst_addr, uint16_t pkt_data_len)
145 {
146 	ip_hdr->vtc_flow = rte_cpu_to_be_32(0x60000000); /* Set version to 6. */
147 	ip_hdr->payload_len = rte_cpu_to_be_16(pkt_data_len);
148 	ip_hdr->proto = IPPROTO_UDP;
149 	ip_hdr->hop_limits = IP_DEFTTL;
150 
151 	rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
152 	rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
153 
154 	return (uint16_t) (pkt_data_len + sizeof(struct rte_ipv6_hdr));
155 }
156 
157 uint16_t
initialize_ipv4_header(struct rte_ipv4_hdr * ip_hdr,uint32_t src_addr,uint32_t dst_addr,uint16_t pkt_data_len)158 initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
159 		uint32_t dst_addr, uint16_t pkt_data_len)
160 {
161 	uint16_t pkt_len;
162 	unaligned_uint16_t *ptr16;
163 	uint32_t ip_cksum;
164 
165 	/*
166 	 * Initialize IP header.
167 	 */
168 	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
169 
170 	ip_hdr->version_ihl   = RTE_IPV4_VHL_DEF;
171 	ip_hdr->type_of_service   = 0;
172 	ip_hdr->fragment_offset = 0;
173 	ip_hdr->time_to_live   = IP_DEFTTL;
174 	ip_hdr->next_proto_id = IPPROTO_UDP;
175 	ip_hdr->packet_id = 0;
176 	ip_hdr->total_length   = rte_cpu_to_be_16(pkt_len);
177 	ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
178 	ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
179 
180 	/*
181 	 * Compute IP header checksum.
182 	 */
183 	ptr16 = (unaligned_uint16_t *)ip_hdr;
184 	ip_cksum = 0;
185 	ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
186 	ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
187 	ip_cksum += ptr16[4];
188 	ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
189 	ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
190 
191 	/*
192 	 * Reduce 32 bit checksum to 16 bits and complement it.
193 	 */
194 	ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
195 		(ip_cksum & 0x0000FFFF);
196 	ip_cksum %= 65536;
197 	ip_cksum = (~ip_cksum) & 0x0000FFFF;
198 	if (ip_cksum == 0)
199 		ip_cksum = 0xFFFF;
200 	ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
201 
202 	return pkt_len;
203 }
204 
205 uint16_t
initialize_ipv4_header_proto(struct rte_ipv4_hdr * ip_hdr,uint32_t src_addr,uint32_t dst_addr,uint16_t pkt_data_len,uint8_t proto)206 initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
207 		uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto)
208 {
209 	uint16_t pkt_len;
210 	unaligned_uint16_t *ptr16;
211 	uint32_t ip_cksum;
212 
213 	/*
214 	 * Initialize IP header.
215 	 */
216 	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
217 
218 	ip_hdr->version_ihl   = RTE_IPV4_VHL_DEF;
219 	ip_hdr->type_of_service   = 0;
220 	ip_hdr->fragment_offset = 0;
221 	ip_hdr->time_to_live   = IP_DEFTTL;
222 	ip_hdr->next_proto_id = proto;
223 	ip_hdr->packet_id = 0;
224 	ip_hdr->total_length   = rte_cpu_to_be_16(pkt_len);
225 	ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
226 	ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
227 
228 	/*
229 	 * Compute IP header checksum.
230 	 */
231 	ptr16 = (unaligned_uint16_t *)ip_hdr;
232 	ip_cksum = 0;
233 	ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
234 	ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
235 	ip_cksum += ptr16[4];
236 	ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
237 	ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
238 
239 	/*
240 	 * Reduce 32 bit checksum to 16 bits and complement it.
241 	 */
242 	ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
243 		(ip_cksum & 0x0000FFFF);
244 	ip_cksum %= 65536;
245 	ip_cksum = (~ip_cksum) & 0x0000FFFF;
246 	if (ip_cksum == 0)
247 		ip_cksum = 0xFFFF;
248 	ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
249 
250 	return pkt_len;
251 }
252 
253 /*
254  * The maximum number of segments per packet is used when creating
255  * scattered transmit packets composed of a list of mbufs.
256  */
257 #define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
258 
259 
260 int
generate_packet_burst(struct rte_mempool * mp,struct rte_mbuf ** pkts_burst,struct rte_ether_hdr * eth_hdr,uint8_t vlan_enabled,void * ip_hdr,uint8_t ipv4,struct rte_udp_hdr * udp_hdr,int nb_pkt_per_burst,uint8_t pkt_len,uint8_t nb_pkt_segs)261 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
262 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
263 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
264 		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
265 {
266 	int i, nb_pkt = 0;
267 	size_t eth_hdr_size;
268 
269 	struct rte_mbuf *pkt_seg;
270 	struct rte_mbuf *pkt;
271 
272 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
273 		pkt = rte_pktmbuf_alloc(mp);
274 		if (pkt == NULL) {
275 nomore_mbuf:
276 			if (nb_pkt == 0)
277 				return -1;
278 			break;
279 		}
280 
281 		pkt->data_len = pkt_len;
282 		pkt_seg = pkt;
283 		for (i = 1; i < nb_pkt_segs; i++) {
284 			pkt_seg->next = rte_pktmbuf_alloc(mp);
285 			if (pkt_seg->next == NULL) {
286 				pkt->nb_segs = i;
287 				rte_pktmbuf_free(pkt);
288 				goto nomore_mbuf;
289 			}
290 			pkt_seg = pkt_seg->next;
291 			pkt_seg->data_len = pkt_len;
292 		}
293 		pkt_seg->next = NULL; /* Last segment of packet. */
294 
295 		/*
296 		 * Copy headers in first packet segment(s).
297 		 */
298 		if (vlan_enabled)
299 			eth_hdr_size = sizeof(struct rte_ether_hdr) +
300 				sizeof(struct rte_vlan_hdr);
301 		else
302 			eth_hdr_size = sizeof(struct rte_ether_hdr);
303 
304 		copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
305 
306 		if (ipv4) {
307 			copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr),
308 				pkt, eth_hdr_size);
309 			copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt,
310 				eth_hdr_size + sizeof(struct rte_ipv4_hdr));
311 		} else {
312 			copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr),
313 				pkt, eth_hdr_size);
314 			copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt,
315 				eth_hdr_size + sizeof(struct rte_ipv6_hdr));
316 		}
317 
318 		/*
319 		 * Complete first mbuf of packet and append it to the
320 		 * burst of packets to be transmitted.
321 		 */
322 		pkt->nb_segs = nb_pkt_segs;
323 		pkt->pkt_len = pkt_len;
324 		pkt->l2_len = eth_hdr_size;
325 
326 		if (ipv4) {
327 			pkt->vlan_tci  = RTE_ETHER_TYPE_IPV4;
328 			pkt->l3_len = sizeof(struct rte_ipv4_hdr);
329 		} else {
330 			pkt->vlan_tci  = RTE_ETHER_TYPE_IPV6;
331 			pkt->l3_len = sizeof(struct rte_ipv6_hdr);
332 		}
333 
334 		pkts_burst[nb_pkt] = pkt;
335 	}
336 
337 	return nb_pkt;
338 }
339 
340 int
generate_packet_burst_proto(struct rte_mempool * mp,struct rte_mbuf ** pkts_burst,struct rte_ether_hdr * eth_hdr,uint8_t vlan_enabled,void * ip_hdr,uint8_t ipv4,uint8_t proto,void * proto_hdr,int nb_pkt_per_burst,uint8_t pkt_len,uint8_t nb_pkt_segs)341 generate_packet_burst_proto(struct rte_mempool *mp,
342 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
343 		uint8_t vlan_enabled, void *ip_hdr,
344 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
345 		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
346 {
347 	int i, nb_pkt = 0;
348 	size_t eth_hdr_size;
349 
350 	struct rte_mbuf *pkt_seg;
351 	struct rte_mbuf *pkt;
352 
353 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
354 		pkt = rte_pktmbuf_alloc(mp);
355 		if (pkt == NULL) {
356 nomore_mbuf:
357 			if (nb_pkt == 0)
358 				return -1;
359 			break;
360 		}
361 
362 		pkt->data_len = pkt_len;
363 		pkt_seg = pkt;
364 		for (i = 1; i < nb_pkt_segs; i++) {
365 			pkt_seg->next = rte_pktmbuf_alloc(mp);
366 			if (pkt_seg->next == NULL) {
367 				pkt->nb_segs = i;
368 				rte_pktmbuf_free(pkt);
369 				goto nomore_mbuf;
370 			}
371 			pkt_seg = pkt_seg->next;
372 			pkt_seg->data_len = pkt_len;
373 		}
374 		pkt_seg->next = NULL; /* Last segment of packet. */
375 
376 		/*
377 		 * Copy headers in first packet segment(s).
378 		 */
379 		if (vlan_enabled)
380 			eth_hdr_size = sizeof(struct rte_ether_hdr) +
381 				sizeof(struct rte_vlan_hdr);
382 		else
383 			eth_hdr_size = sizeof(struct rte_ether_hdr);
384 
385 		copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
386 
387 		if (ipv4) {
388 			copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr),
389 					pkt, eth_hdr_size);
390 			switch (proto) {
391 			case IPPROTO_UDP:
392 				copy_buf_to_pkt(proto_hdr,
393 					sizeof(struct rte_udp_hdr), pkt,
394 					eth_hdr_size +
395 						sizeof(struct rte_ipv4_hdr));
396 				break;
397 			case IPPROTO_TCP:
398 				copy_buf_to_pkt(proto_hdr,
399 					sizeof(struct rte_tcp_hdr), pkt,
400 					eth_hdr_size +
401 						sizeof(struct rte_ipv4_hdr));
402 				break;
403 			case IPPROTO_SCTP:
404 				copy_buf_to_pkt(proto_hdr,
405 					sizeof(struct rte_sctp_hdr), pkt,
406 					eth_hdr_size +
407 						sizeof(struct rte_ipv4_hdr));
408 				break;
409 			default:
410 				break;
411 			}
412 		} else {
413 			copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr),
414 					pkt, eth_hdr_size);
415 			switch (proto) {
416 			case IPPROTO_UDP:
417 				copy_buf_to_pkt(proto_hdr,
418 					sizeof(struct rte_udp_hdr), pkt,
419 					eth_hdr_size +
420 						sizeof(struct rte_ipv6_hdr));
421 				break;
422 			case IPPROTO_TCP:
423 				copy_buf_to_pkt(proto_hdr,
424 					sizeof(struct rte_tcp_hdr), pkt,
425 					eth_hdr_size +
426 						sizeof(struct rte_ipv6_hdr));
427 				break;
428 			case IPPROTO_SCTP:
429 				copy_buf_to_pkt(proto_hdr,
430 					sizeof(struct rte_sctp_hdr), pkt,
431 					eth_hdr_size +
432 						sizeof(struct rte_ipv6_hdr));
433 				break;
434 			default:
435 				break;
436 			}
437 		}
438 
439 		/*
440 		 * Complete first mbuf of packet and append it to the
441 		 * burst of packets to be transmitted.
442 		 */
443 		pkt->nb_segs = nb_pkt_segs;
444 		pkt->pkt_len = pkt_len;
445 		pkt->l2_len = eth_hdr_size;
446 
447 		if (ipv4) {
448 			pkt->vlan_tci  = RTE_ETHER_TYPE_IPV4;
449 			pkt->l3_len = sizeof(struct rte_ipv4_hdr);
450 		} else {
451 			pkt->vlan_tci  = RTE_ETHER_TYPE_IPV6;
452 			pkt->l3_len = sizeof(struct rte_ipv6_hdr);
453 		}
454 
455 		pkts_burst[nb_pkt] = pkt;
456 	}
457 
458 	return nb_pkt;
459 }
460