| /f-stack/dpdk/lib/librte_eal/windows/ |
| H A D | eal_memalloc.c | 191 int cur_idx, start_idx, j; in alloc_seg_walk() local 209 cur_idx = rte_fbarray_find_next_n_free( in alloc_seg_walk() 211 if (cur_idx < 0) in alloc_seg_walk() 213 start_idx = cur_idx; in alloc_seg_walk() 221 cur_idx = rte_fbarray_find_biggest_free( in alloc_seg_walk() 223 if (cur_idx < 0) in alloc_seg_walk() 225 start_idx = cur_idx; in alloc_seg_walk() 230 &cur_msl->memseg_arr, cur_idx); in alloc_seg_walk() 234 for (i = 0; i < need; i++, cur_idx++) { in alloc_seg_walk() 250 for (j = start_idx; j < cur_idx; j++) { in alloc_seg_walk() [all …]
|
| /f-stack/dpdk/lib/librte_gro/ |
| H A D | gro_udp4.c | 209 uint32_t cur_idx, prev_idx, item_idx; in gro_udp4_reassemble() local 287 cur_idx = tbl->flows[i].start_index; in gro_udp4_reassemble() 288 prev_idx = cur_idx; in gro_udp4_reassemble() 290 cmp = udp4_check_neighbor(&(tbl->items[cur_idx]), in gro_udp4_reassemble() 293 if (merge_two_udp4_packets(&(tbl->items[cur_idx]), in gro_udp4_reassemble() 311 < tbl->items[cur_idx].frag_offset) { in gro_udp4_reassemble() 315 prev_idx = cur_idx; in gro_udp4_reassemble() 316 cur_idx = tbl->items[cur_idx].next_pkt_idx; in gro_udp4_reassemble() 317 } while (cur_idx != INVALID_ARRAY_INDEX); in gro_udp4_reassemble() 320 if (cur_idx == tbl->flows[i].start_index) { in gro_udp4_reassemble() [all …]
|
| H A D | gro_tcp4.c | 206 uint32_t cur_idx, prev_idx, item_idx; in gro_tcp4_reassemble() local 294 cur_idx = tbl->flows[i].start_index; in gro_tcp4_reassemble() 295 prev_idx = cur_idx; in gro_tcp4_reassemble() 297 cmp = check_seq_option(&(tbl->items[cur_idx]), tcp_hdr, in gro_tcp4_reassemble() 301 if (merge_two_tcp4_packets(&(tbl->items[cur_idx]), in gro_tcp4_reassemble() 315 prev_idx = cur_idx; in gro_tcp4_reassemble() 316 cur_idx = tbl->items[cur_idx].next_pkt_idx; in gro_tcp4_reassemble() 317 } while (cur_idx != INVALID_ARRAY_INDEX); in gro_tcp4_reassemble()
|
| H A D | gro_vxlan_udp4.c | 300 uint32_t cur_idx, prev_idx, item_idx; in gro_vxlan_udp4_reassemble() local 395 cur_idx = tbl->flows[i].start_index; in gro_vxlan_udp4_reassemble() 396 prev_idx = cur_idx; in gro_vxlan_udp4_reassemble() 398 cmp = udp4_check_vxlan_neighbor(&(tbl->items[cur_idx]), in gro_vxlan_udp4_reassemble() 402 &(tbl->items[cur_idx]), in gro_vxlan_udp4_reassemble() 421 < tbl->items[cur_idx].inner_item.frag_offset) { in gro_vxlan_udp4_reassemble() 425 prev_idx = cur_idx; in gro_vxlan_udp4_reassemble() 426 cur_idx = tbl->items[cur_idx].inner_item.next_pkt_idx; in gro_vxlan_udp4_reassemble() 427 } while (cur_idx != INVALID_ARRAY_INDEX); in gro_vxlan_udp4_reassemble() 430 if (cur_idx == tbl->flows[i].start_index) { in gro_vxlan_udp4_reassemble() [all …]
|
| H A D | gro_vxlan_tcp4.c | 304 uint32_t cur_idx, prev_idx, item_idx; in gro_vxlan_tcp4_reassemble() local 415 cur_idx = tbl->flows[i].start_index; in gro_vxlan_tcp4_reassemble() 416 prev_idx = cur_idx; in gro_vxlan_tcp4_reassemble() 418 cmp = check_vxlan_seq_option(&(tbl->items[cur_idx]), tcp_hdr, in gro_vxlan_tcp4_reassemble() 422 if (merge_two_vxlan_tcp4_packets(&(tbl->items[cur_idx]), in gro_vxlan_tcp4_reassemble() 439 prev_idx = cur_idx; in gro_vxlan_tcp4_reassemble() 440 cur_idx = tbl->items[cur_idx].inner_item.next_pkt_idx; in gro_vxlan_tcp4_reassemble() 441 } while (cur_idx != INVALID_ARRAY_INDEX); in gro_vxlan_tcp4_reassemble()
|
| /f-stack/dpdk/lib/librte_eal/common/ |
| H A D | eal_common_fbarray.c | 1321 int cur_idx, next_idx, cur_len, biggest_idx, biggest_len; in fbarray_find_biggest() local 1359 cur_idx = start; in fbarray_find_biggest() 1363 cur_idx = find_func(arr, cur_idx); in fbarray_find_biggest() 1366 if (cur_idx >= 0) { in fbarray_find_biggest() 1367 cur_len = find_contig_func(arr, cur_idx); in fbarray_find_biggest() 1369 next_idx = rev ? cur_idx - cur_len : cur_idx + cur_len; in fbarray_find_biggest() 1371 cur_idx = rev ? next_idx + 1 : cur_idx; in fbarray_find_biggest() 1374 biggest_idx = cur_idx; in fbarray_find_biggest() 1377 cur_idx = next_idx; in fbarray_find_biggest() 1382 if (cur_idx < 0) in fbarray_find_biggest()
|
| /f-stack/dpdk/lib/librte_eal/linux/ |
| H A D | eal_memalloc.c | 798 int cur_idx, start_idx, j, dir_fd = -1; in alloc_seg_walk() local 820 if (cur_idx < 0) in alloc_seg_walk() 822 start_idx = cur_idx; in alloc_seg_walk() 832 if (cur_idx < 0) in alloc_seg_walk() 834 start_idx = cur_idx; in alloc_seg_walk() 839 cur_idx); in alloc_seg_walk() 867 for (i = 0; i < need; i++, cur_idx++) { in alloc_seg_walk() 873 cur_idx * page_sz); in alloc_seg_walk() 876 msl_idx, cur_idx)) { in alloc_seg_walk() 885 for (j = start_idx; j < cur_idx; j++) { in alloc_seg_walk() [all …]
|
| H A D | eal_vfio.c | 220 int i, n_merged, cur_idx; in compact_user_maps() local 244 cur_idx = 0; in compact_user_maps() 250 dst = &user_mem_maps->maps[cur_idx++]; in compact_user_maps() 258 user_mem_maps->n_maps = cur_idx; in compact_user_maps()
|
| /f-stack/dpdk/drivers/net/thunderx/base/ |
| H A D | nicvf_mbox.c | 247 size_t cur_idx = 0; in nicvf_mbox_config_rss() local 255 while (cur_idx < tot_len) { in nicvf_mbox_config_rss() 256 cur_len = nicvf_min(tot_len - cur_idx, in nicvf_mbox_config_rss() 258 mbx.msg.msg = (cur_idx > 0) ? in nicvf_mbox_config_rss() 260 mbx.rss_cfg.tbl_offset = cur_idx; in nicvf_mbox_config_rss() 263 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[cur_idx++]; in nicvf_mbox_config_rss()
|
| /f-stack/dpdk/lib/librte_vhost/ |
| H A D | virtio_net.c | 607 uint16_t cur_idx; in reserve_avail_buf_split() local 615 cur_idx = vq->last_avail_idx; in reserve_avail_buf_split() 623 if (unlikely(cur_idx == avail_head)) in reserve_avail_buf_split() 633 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx, in reserve_avail_buf_split() 642 cur_idx++; in reserve_avail_buf_split()
|
| /f-stack/dpdk/lib/librte_hash/ |
| H A D | rte_cuckoo_hash.c | 934 uint32_t cur_idx, alt_idx; in rte_hash_cuckoo_make_space_mw() local 948 cur_idx = tail->cur_bkt_idx; in rte_hash_cuckoo_make_space_mw() 960 alt_idx = get_alt_bucket_index(h, cur_idx, in rte_hash_cuckoo_make_space_mw()
|