| /f-stack/dpdk/examples/ipsec-secgw/test/ |
| H A D | tun_null_header_reconstruct.py | 135 return pkt 140 return pkt 145 pkt = self.sa_ipv4v4.encrypt(pkt) 148 pkt[IP].tos = tos_outter 149 return pkt 154 pkt = self.sa_ipv6v6.encrypt(pkt) 158 return pkt 163 pkt = self.sa_ipv4v6.encrypt(pkt) 167 return pkt 172 pkt = self.sa_ipv6v4.encrypt(pkt) [all …]
|
| H A D | trs_ipv6opts.py | 40 def decrypt(pkt, sa): argument 41 esp = pkt[ESP] 48 pkt[ESP].underlayer.nh = d[IPv6].nh 49 pkt[ESP].underlayer.remove_payload() 52 npkt = pkt/d[IPv6].payload 65 pkt = IPv6(src=SRC_ADDR, dst=DST_ADDR) 69 resp = self.px.xfer_unprotected(pkt) 96 resp = self.px.xfer_unprotected(pkt) 123 e = self.inb_sa.encrypt(pkt) 149 e = self.inb_sa.encrypt(pkt) [all …]
|
| /f-stack/dpdk/examples/vm_power_manager/ |
| H A D | channel_monitor.c | 122 pkt->vfid[idx] = 0; in set_policy_mac() 180 pkt->workload = LOW; in parse_json_to_pkt() 477 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) { in update_policy() 479 policies[i].pkt = *pkt; in update_policy() 496 policies[i].pkt = *pkt; in update_policy() 524 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) { in remove_policy() 676 struct channel_packet *pkt = &pol->pkt; in apply_policy() local 844 switch (pkt->unit) { in process_request() 883 pkt->vm_name); in process_request() 889 update_policy(pkt); in process_request() [all …]
|
| /f-stack/dpdk/examples/vm_power_manager/guest_cli/ |
| H A D | vm_power_cli_guest.c | 90 pkt->num_vcpu = 2; in set_policy_defaults() 107 pkt->workload = LOW; in set_policy_defaults() 108 pkt->policy_to_use = TIME; in set_policy_defaults() 109 pkt->command = PKT_POLICY; in set_policy_defaults() 110 strlcpy(pkt->vm_name, "ubuntu2", sizeof(pkt->vm_name)); in set_policy_defaults() 207 strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); in cmd_query_freq_list_parsed() 218 strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); in cmd_query_freq_list_parsed() 317 strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); in cmd_query_caps_list_parsed() 328 strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name)); in cmd_query_caps_list_parsed() 386 ret = rte_power_guest_channel_receive_msg(&pkt, sizeof pkt, lcore_id); in check_response_cmd() [all …]
|
| /f-stack/dpdk/drivers/bus/vmbus/ |
| H A D | vmbus_channel.c | 114 pkt.hdr.type = type; in rte_vmbus_chan_send() 163 pkt.rsvd = 0; in rte_vmbus_chan_send_sglist() 233 error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt)); in rte_vmbus_chan_recv() 243 if (unlikely(pkt.hlen > pkt.tlen)) { in rte_vmbus_chan_recv() 245 pkt.hlen, pkt.tlen); in rte_vmbus_chan_recv() 278 error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt)); in rte_vmbus_chan_recv_raw() 288 if (unlikely(pkt.hlen > pkt.tlen)) { in rte_vmbus_chan_recv_raw() 290 pkt.hlen, pkt.tlen); in rte_vmbus_chan_recv_raw() 420 if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0) in vmbus_dump_ring() 422 pkt.type, in vmbus_dump_ring() [all …]
|
| /f-stack/dpdk/app/test/ |
| H A D | packet_burst_generator.c | 25 seg = pkt; in copy_buf_to_pkt_segs() 268 struct rte_mbuf *pkt; in generate_packet_burst() local 272 if (pkt == NULL) { in generate_packet_burst() 280 pkt_seg = pkt; in generate_packet_burst() 284 pkt->nb_segs = i; in generate_packet_burst() 306 pkt, eth_hdr_size); in generate_packet_burst() 311 pkt, eth_hdr_size); in generate_packet_burst() 349 struct rte_mbuf *pkt; in generate_packet_burst_proto() local 353 if (pkt == NULL) { in generate_packet_burst_proto() 361 pkt_seg = pkt; in generate_packet_burst_proto() [all …]
|
| /f-stack/dpdk/lib/librte_gso/ |
| H A D | gso_tunnel_tcp4.c | 9 update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta, in update_tunnel_ipv4_tcp_headers() argument 20 outer_ipv4_offset = pkt->outer_l2_len; in update_tunnel_ipv4_tcp_headers() 21 udp_gre_offset = outer_ipv4_offset + pkt->outer_l3_len; in update_tunnel_ipv4_tcp_headers() 22 inner_ipv4_offset = udp_gre_offset + pkt->l2_len; in update_tunnel_ipv4_tcp_headers() 23 tcp_offset = inner_ipv4_offset + pkt->l3_len; in update_tunnel_ipv4_tcp_headers() 26 ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) + in update_tunnel_ipv4_tcp_headers() 40 update_udp_hdr = (pkt->ol_flags & PKT_TX_TUNNEL_VXLAN) ? 1 : 0; in update_tunnel_ipv4_tcp_headers() 55 gso_tunnel_tcp4_segment(struct rte_mbuf *pkt, in gso_tunnel_tcp4_segment() argument 67 hdr_offset = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len; in gso_tunnel_tcp4_segment() 79 hdr_offset += pkt->l3_len + pkt->l4_len; in gso_tunnel_tcp4_segment() [all …]
|
| H A D | rte_gso.c | 27 rte_gso_segment(struct rte_mbuf *pkt, in rte_gso_segment() argument 44 if (gso_ctx->gso_size >= pkt->pkt_len) { in rte_gso_segment() 53 ol_flags = pkt->ol_flags; in rte_gso_segment() 55 if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) && in rte_gso_segment() 57 ((IS_IPV4_GRE_TCP4(pkt->ol_flags) && in rte_gso_segment() 59 pkt->ol_flags &= (~PKT_TX_TCP_SEG); in rte_gso_segment() 63 } else if (IS_IPV4_TCP(pkt->ol_flags) && in rte_gso_segment() 65 pkt->ol_flags &= (~PKT_TX_TCP_SEG); in rte_gso_segment() 69 } else if (IS_IPV4_UDP(pkt->ol_flags) && in rte_gso_segment() 71 pkt->ol_flags &= (~PKT_TX_UDP_SEG); in rte_gso_segment() [all …]
|
| H A D | gso_tcp4.c | 9 update_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta, in update_ipv4_tcp_headers() argument 16 uint16_t l3_offset = pkt->l2_len; in update_ipv4_tcp_headers() 17 uint16_t l4_offset = l3_offset + pkt->l3_len; in update_ipv4_tcp_headers() 19 ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) + in update_ipv4_tcp_headers() 21 tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); in update_ipv4_tcp_headers() 35 gso_tcp4_segment(struct rte_mbuf *pkt, in gso_tcp4_segment() argument 49 ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) + in gso_tcp4_segment() 50 pkt->l2_len); in gso_tcp4_segment() 57 hdr_offset = pkt->l2_len + pkt->l3_len + pkt->l4_len; in gso_tcp4_segment() 58 if (unlikely(hdr_offset >= pkt->pkt_len)) { in gso_tcp4_segment() [all …]
|
| H A D | gso_udp4.c | 11 update_ipv4_udp_headers(struct rte_mbuf *pkt, struct rte_mbuf **segs, in update_ipv4_udp_headers() argument 16 uint16_t l2_hdrlen = pkt->l2_len, l3_hdrlen = pkt->l3_len; in update_ipv4_udp_headers() 38 gso_udp4_segment(struct rte_mbuf *pkt, in gso_udp4_segment() argument 51 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *, in gso_udp4_segment() 52 pkt->l2_len); in gso_udp4_segment() 63 hdr_offset = pkt->l2_len + pkt->l3_len; in gso_udp4_segment() 66 if (unlikely(hdr_offset + pkt->l4_len >= pkt->pkt_len)) { in gso_udp4_segment() 76 ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool, in gso_udp4_segment() 79 update_ipv4_udp_headers(pkt, pkts_out, ret); in gso_udp4_segment()
|
| /f-stack/dpdk/examples/ipsec-secgw/ |
| H A D | ipsec_worker.c | 190 struct rte_mbuf *pkt; in process_ipsec_ev_inbound() local 197 pkt = ev->mbuf; in process_ipsec_ev_inbound() 205 if (unlikely(pkt->ol_flags & in process_ipsec_ev_inbound() 279 rte_pktmbuf_free(pkt); in process_ipsec_ev_inbound() 290 struct rte_mbuf *pkt; in process_ipsec_ev_outbound() local 298 pkt = ev->mbuf; in process_ipsec_ev_outbound() 379 rte_pktmbuf_free(pkt); in process_ipsec_ev_outbound() 403 struct rte_mbuf *pkt; in ipsec_wrkr_non_burst_int_port_drv_mode() local 453 pkt = ev.mbuf; in ipsec_wrkr_non_burst_int_port_drv_mode() 454 port_id = pkt->port; in ipsec_wrkr_non_burst_int_port_drv_mode() [all …]
|
| /f-stack/dpdk/app/test-pmd/ |
| H A D | txonly.c | 74 seg = pkt; in copy_buf_to_pkt_segs() 211 pkt->ol_flags |= ol_flags; in pkt_burst_prepare() 212 pkt->vlan_tci = vlan_tci; in pkt_burst_prepare() 217 pkt_len = pkt->data_len; in pkt_burst_prepare() 218 pkt_seg = pkt; in pkt_burst_prepare() 311 pkt->nb_segs = nb_segs; in pkt_burst_prepare() 312 pkt->pkt_len = pkt_len; in pkt_burst_prepare() 325 struct rte_mbuf *pkt; in pkt_burst_transmit() local 374 if (pkt == NULL) in pkt_burst_transmit() 381 rte_pktmbuf_free(pkt); in pkt_burst_transmit() [all …]
|
| H A D | flowgen.c | 88 struct rte_mbuf *pkt; in pkt_burst_flow_gen() local 126 pkt = rte_mbuf_raw_alloc(mbp); in pkt_burst_flow_gen() 127 if (!pkt) in pkt_burst_flow_gen() 130 pkt->data_len = pkt_size; in pkt_burst_flow_gen() 131 pkt->next = NULL; in pkt_burst_flow_gen() 164 pkt->nb_segs = 1; in pkt_burst_flow_gen() 165 pkt->pkt_len = pkt_size; in pkt_burst_flow_gen() 166 pkt->ol_flags &= EXT_ATTACHED_MBUF; in pkt_burst_flow_gen() 167 pkt->ol_flags |= ol_flags; in pkt_burst_flow_gen() 168 pkt->vlan_tci = vlan_tci; in pkt_burst_flow_gen() [all …]
|
| /f-stack/dpdk/lib/librte_port/ |
| H A D | rte_swx_port_source_sink.c | 163 pkt->handle = m_dst; in source_pkt_rx() 164 pkt->pkt = m_dst->buf_addr; in source_pkt_rx() 165 pkt->offset = m_dst->data_off; in source_pkt_rx() 166 pkt->length = m_dst->pkt_len; in source_pkt_rx() 169 pkt->length, in source_pkt_rx() 170 pkt->offset); in source_pkt_rx() 172 rte_hexdump(stdout, NULL, &pkt->pkt[pkt->offset], pkt->length); in source_pkt_rx() 286 pkt->length, in sink_pkt_tx() 287 pkt->offset); in sink_pkt_tx() 289 rte_hexdump(stdout, NULL, &pkt->pkt[pkt->offset], pkt->length); in sink_pkt_tx() [all …]
|
| H A D | rte_swx_port_ethdev.c | 113 pkt->handle = m; in reader_pkt_rx() 114 pkt->pkt = m->buf_addr; in reader_pkt_rx() 115 pkt->offset = m->data_off; in reader_pkt_rx() 116 pkt->length = m->pkt_len; in reader_pkt_rx() 122 pkt->length, in reader_pkt_rx() 123 pkt->offset); in reader_pkt_rx() 148 rte_pktmbuf_free(pkt); in reader_free() 250 pkt->length, in writer_pkt_tx() 251 pkt->offset); in writer_pkt_tx() 253 rte_hexdump(stdout, NULL, &pkt->pkt[pkt->offset], pkt->length); in writer_pkt_tx() [all …]
|
| /f-stack/dpdk/lib/librte_gro/ |
| H A D | gro_vxlan_udp4.c | 100 struct rte_mbuf *pkt, in insert_new_item() argument 223 l2_offset = pkt->outer_l2_len + pkt->outer_l3_len; in udp4_check_vxlan_neighbor() 245 pkt->outer_l2_len + pkt->outer_l3_len)) { in merge_two_vxlan_udp4_packets() 262 len = pkt->pkt_len - pkt->outer_l2_len; in update_vxlan_header() 264 pkt->outer_l2_len); in update_vxlan_header() 273 len -= pkt->l2_len; in update_vxlan_header() 308 pkt->outer_l2_len); in gro_vxlan_udp4_reassemble() 325 hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len + in gro_vxlan_udp4_reassemble() 326 pkt->l3_len; in gro_vxlan_udp4_reassemble() 464 hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len + in gro_vxlan_udp4_merge_items() [all …]
|
| H A D | gro_vxlan_tcp4.c | 100 struct rte_mbuf *pkt, in insert_new_item() argument 228 l2_offset = pkt->outer_l2_len + pkt->outer_l3_len; in check_vxlan_seq_option() 246 struct rte_mbuf *pkt, in merge_two_vxlan_tcp4_packets() argument 254 pkt->outer_l3_len)) { in merge_two_vxlan_tcp4_packets() 272 len = pkt->pkt_len - pkt->outer_l2_len; in update_vxlan_header() 274 pkt->outer_l2_len); in update_vxlan_header() 283 len -= pkt->l2_len; in update_vxlan_header() 319 pkt->outer_l2_len); in gro_vxlan_tcp4_reassemble() 321 pkt->outer_l3_len); in gro_vxlan_tcp4_reassemble() 336 hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len + in gro_vxlan_tcp4_reassemble() [all …]
|
| H A D | gro_udp4.c | 101 struct rte_mbuf *pkt, in insert_new_item() argument 184 pkt->l2_len); in update_header() 186 pkt->l2_len); in update_header() 216 hdr_len = pkt->l2_len + pkt->l3_len; in gro_udp4_reassemble() 228 if (pkt->pkt_len <= hdr_len) in gro_udp4_reassemble() 232 if (ip_dl <= pkt->l3_len) in gro_udp4_reassemble() 235 ip_dl -= pkt->l3_len; in gro_udp4_reassemble() 294 pkt, cmp, frag_offset, in gro_udp4_reassemble() 346 struct rte_mbuf *pkt; in gro_udp4_merge_items() local 354 hdr_len = pkt->l2_len + pkt->l3_len; in gro_udp4_merge_items() [all …]
|
| /f-stack/freebsd/contrib/octeon-sdk/ |
| H A D | cvmx-srio.c | 894 uint32_t pkt = 0; in cvmx_srio_config_read32() local 923 pkt = 0; in cvmx_srio_config_read32() 930 pkt = 0; in cvmx_srio_config_read32() 950 pkt = 0; in cvmx_srio_config_read32() 955 pkt = 0; in cvmx_srio_config_read32() 1064 uint32_t pkt = 0; in cvmx_srio_config_write32() local 1093 pkt = 0; in cvmx_srio_config_write32() 1100 pkt = 0; in cvmx_srio_config_write32() 1109 pkt = data << 8; in cvmx_srio_config_write32() 1135 pkt = 0; in cvmx_srio_config_write32() [all …]
|
| /f-stack/dpdk/lib/librte_power/ |
| H A D | guest_channel.c | 58 struct channel_packet pkt; in guest_channel_host_connect() local 103 pkt.command = CPU_POWER_CONNECT; in guest_channel_host_connect() 105 ret = guest_channel_send_msg(&pkt, lcore_id); in guest_channel_host_connect() 124 int ret, buffer_len = sizeof(*pkt); in guest_channel_send_msg() 125 void *buffer = pkt; in guest_channel_send_msg() 155 return guest_channel_send_msg(pkt, lcore_id); in rte_power_guest_channel_send_msg() 158 int power_guest_channel_read_msg(void *pkt, in power_guest_channel_read_msg() argument 165 if (pkt_len == 0 || pkt == NULL) in power_guest_channel_read_msg() 194 pkt, pkt_len); in power_guest_channel_read_msg() 206 pkt = (char *)pkt + ret; in power_guest_channel_read_msg() [all …]
|
| /f-stack/dpdk/drivers/net/netvsc/ |
| H A D | hn_rxtx.c | 141 return pkt->pktinfooffset + pkt->pktinfolen; in hn_rndis_pktlen() 680 hn_rndis_dump(pkt); in hn_rndis_rx_data() 688 if (unlikely(pkt->len < pkt->datalen in hn_rndis_rx_data() 689 + pkt->oobdatalen + pkt->pktinfolen)) in hn_rndis_rx_data() 1241 olen = pkt->len; in hn_try_txagg() 1249 chim = (uint8_t *)pkt + pkt->len; in hn_try_txagg() 1321 pkt->pktinfooffset = sizeof(*pkt); in hn_encap() 1323 pkt->vchandle = 0; in hn_encap() 1324 pkt->reserved = 0; in hn_encap() 1372 pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen; in hn_encap() [all …]
|
| /f-stack/dpdk/examples/performance-thread/l3fwd-thread/ |
| H A D | test.sh | 14 --enable-jumbo --max-pkt-len 1500 \ 26 --enable-jumbo --max-pkt-len 1500 \ 37 --enable-jumbo --max-pkt-len 1500 \ 48 --enable-jumbo --max-pkt-len 1500 \ 64 --enable-jumbo --max-pkt-len 1500 \ 76 --enable-jumbo --max-pkt-len 1500 \ 87 --enable-jumbo --max-pkt-len 1500 \ 98 --enable-jumbo --max-pkt-len 1500 \ 114 --enable-jumbo --max-pkt-len 1500 \ 124 --enable-jumbo --max-pkt-len 1500 \ [all …]
|
| /f-stack/dpdk/examples/l3fwd/ |
| H A D | l3fwd_lpm_sse.h | 14 processx4_step1(struct rte_mbuf *pkt[FWDSTEP], in processx4_step1() 22 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *); in processx4_step1() 25 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4; in processx4_step1() 27 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *); in processx4_step1() 30 ipv4_flag[0] &= pkt[1]->packet_type; in processx4_step1() 32 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *); in processx4_step1() 35 ipv4_flag[0] &= pkt[2]->packet_type; in processx4_step1() 37 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *); in processx4_step1() 40 ipv4_flag[0] &= pkt[3]->packet_type; in processx4_step1() 54 struct rte_mbuf *pkt[FWDSTEP], in processx4_step2() [all …]
|
| H A D | l3fwd_lpm_altivec.h | 16 processx4_step1(struct rte_mbuf *pkt[FWDSTEP], in processx4_step1() 24 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *); in processx4_step1() 27 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4; in processx4_step1() 33 ipv4_flag[0] &= pkt[1]->packet_type; in processx4_step1() 39 ipv4_flag[0] &= pkt[2]->packet_type; in processx4_step1() 45 ipv4_flag[0] &= pkt[3]->packet_type; in processx4_step1() 60 struct rte_mbuf *pkt[FWDSTEP], in processx4_step2() 83 dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0], in processx4_step2() 85 dprt[1] = lpm_get_dst_port_with_ipv4(qconf, pkt[1], in processx4_step2() 87 dprt[2] = lpm_get_dst_port_with_ipv4(qconf, pkt[2], in processx4_step2() [all …]
|
| H A D | l3fwd_lpm_neon.h | 17 processx4_step1(struct rte_mbuf *pkt[FWDSTEP], in processx4_step1() 25 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *); in processx4_step1() 28 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4; in processx4_step1() 33 ipv4_flag[0] &= pkt[1]->packet_type; in processx4_step1() 38 ipv4_flag[0] &= pkt[2]->packet_type; in processx4_step1() 43 ipv4_flag[0] &= pkt[3]->packet_type; in processx4_step1() 57 struct rte_mbuf *pkt[FWDSTEP], in processx4_step2() 72 dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0], in processx4_step2() 74 dprt[1] = lpm_get_dst_port_with_ipv4(qconf, pkt[1], in processx4_step2() 76 dprt[2] = lpm_get_dst_port_with_ipv4(qconf, pkt[2], in processx4_step2() [all …]
|