| /dpdk/lib/bpf/ |
| H A D | bpf_validate.c | 483 rd->u.min *= rd->u.min; in eval_mul() 494 rd->s.min *= rd->s.min; in eval_mul() 518 rd->u.max = rd->u.max; in eval_divmod() 524 rd->s.min = (int32_t)rd->s.min; in eval_divmod() 525 rd->s.max = (int32_t)rd->s.max; in eval_divmod() 542 rd->s.min = RTE_MAX(rd->s.max, 0); in eval_divmod() 543 rd->s.min = RTE_MIN(rd->s.min, 0); in eval_divmod() 561 rd->u.min = (int32_t)rd->u.min; in eval_neg() 562 rd->u.max = (int32_t)rd->u.max; in eval_neg() 573 rd->s.min = (int32_t)rd->s.min; in eval_neg() [all …]
|
| H A D | bpf_jit_arm64.c | 243 insn |= rd; in emit_add_sub_imm() 324 insn |= rd; in mov_imm() 442 insn |= rd; in emit_add_sub() 450 emit_add_sub(ctx, is64, rd, rd, rm, A64_ADD); in emit_add() 456 emit_add_sub(ctx, is64, rd, rd, rm, A64_SUB); in emit_sub() 462 emit_add_sub(ctx, is64, rd, A64_ZR, rd, A64_SUB); in emit_neg() 475 insn |= rd; in emit_mul() 496 insn |= rd; in emit_data_process_two_src() 631 emit_msub(ctx, is64, rd, tmp, rm, rd); in emit_mod() 651 emit_bitfield(ctx, 1, rd, rd, 0, 15, A64_UBFM); in emit_zero_extend() [all …]
|
| /dpdk/lib/eal/include/generic/ |
| H A D | rte_pflock.h | 44 } rd, wr; member 93 pf->rd.in = 0; in rte_pflock_init() 94 pf->rd.out = 0; in rte_pflock_init() 118 w = __atomic_fetch_add(&pf->rd.in, RTE_PFLOCK_RINC, __ATOMIC_ACQUIRE) in rte_pflock_read_lock() 124 RTE_WAIT_UNTIL_MASKED(&pf->rd.in, RTE_PFLOCK_WBITS, !=, w, in rte_pflock_read_lock() 141 __atomic_fetch_add(&pf->rd.out, RTE_PFLOCK_RINC, __ATOMIC_RELEASE); in rte_pflock_read_unlock() 174 ticket = __atomic_fetch_add(&pf->rd.in, w, __ATOMIC_RELAXED); in rte_pflock_write_lock() 177 rte_wait_until_equal_16(&pf->rd.out, ticket, __ATOMIC_ACQUIRE); in rte_pflock_write_lock() 194 __atomic_fetch_and(&pf->rd.in, RTE_PFLOCK_LSB, __ATOMIC_RELEASE); in rte_pflock_write_unlock()
|
| /dpdk/drivers/net/af_packet/ |
| H A D | rte_eth_af_packet.c | 44 struct iovec *rd; member 61 struct iovec *rd; member 451 rte_free(internals->rx_queue[q].rd); in eth_dev_close() 452 rte_free(internals->tx_queue[q].rd); in eth_dev_close() 841 rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd)); in rte_pmd_init_internals() 844 if (rx_queue->rd == NULL) in rte_pmd_init_internals() 848 rx_queue->rd[i].iov_len = req->tp_frame_size; in rte_pmd_init_internals() 861 if (tx_queue->rd == NULL) in rte_pmd_init_internals() 865 tx_queue->rd[i].iov_len = req->tp_frame_size; in rte_pmd_init_internals() 924 rte_free((*internals)->rx_queue[q].rd); in rte_pmd_init_internals() [all …]
|
| /dpdk/app/test/ |
| H A D | test_rwlock.c | 408 uint32_t lc, rd, wr; in process_try_lcore_stats() local 416 rd = 0; in process_try_lcore_stats() 424 rd++; in process_try_lcore_stats() 435 if (rd != 0) { in process_try_lcore_stats() 436 printf("aggregated stats for %u RDLOCK cores:\n", rd); in process_try_lcore_stats() 437 print_try_lcore_stats(&rlc, rd); in process_try_lcore_stats()
|
| /dpdk/drivers/net/ice/ |
| H A D | ice_dcf_parent.c | 56 struct ice_flow_redirect rd; in ice_dcf_update_vsi_ctx() local 58 memset(&rd, 0, sizeof(struct ice_flow_redirect)); in ice_dcf_update_vsi_ctx() 59 rd.type = ICE_FLOW_REDIRECT_VSI; in ice_dcf_update_vsi_ctx() 60 rd.vsi_handle = vsi_handle; in ice_dcf_update_vsi_ctx() 61 rd.new_vsi_num = new_vsi_num; in ice_dcf_update_vsi_ctx() 62 ice_flow_redirect((struct ice_adapter *)hw->back, &rd); in ice_dcf_update_vsi_ctx()
|
| H A D | ice_switch_filter.c | 1958 struct ice_flow_redirect *rd) in ice_switch_redirect() argument 1976 if (rdata->vsi_handle != rd->vsi_handle) in ice_switch_redirect() 1983 if (rd->type != ICE_FLOW_REDIRECT_VSI) in ice_switch_redirect() 1995 rinfo.sw_act.vsi_handle == rd->vsi_handle) || in ice_switch_redirect() 2015 rd->vsi_handle; in ice_switch_redirect() 2028 hw->vsi_ctx[rd->vsi_handle]->vsi_num = filter_conf_ptr->vsi_num; in ice_switch_redirect() 2034 rinfo.sw_act.vsi_handle = rd->vsi_handle; in ice_switch_redirect() 2075 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num; in ice_switch_redirect() 2088 filter_conf_ptr->vsi_num = rd->new_vsi_num; in ice_switch_redirect()
|
| H A D | ice_generic_flow.h | 532 struct ice_flow_redirect *rd);
|
| H A D | ice_generic_flow.c | 2586 struct ice_flow_redirect *rd) in ice_flow_redirect() argument 2598 ret = p_flow->engine->redirect(ad, p_flow, rd); in ice_flow_redirect()
|
| /dpdk/lib/acl/ |
| H A D | rte_acl.c | 451 acl_check_rule(const struct rte_acl_rule_data *rd) in acl_check_rule() argument 453 if ((RTE_LEN2MASK(RTE_ACL_MAX_CATEGORIES, typeof(rd->category_mask)) & in acl_check_rule() 454 rd->category_mask) == 0 || in acl_check_rule() 455 rd->priority > RTE_ACL_MAX_PRIORITY || in acl_check_rule() 456 rd->priority < RTE_ACL_MIN_PRIORITY) in acl_check_rule()
|
| /dpdk/lib/vhost/ |
| H A D | vhost_user.c | 599 struct rte_vhost_resubmit_desc *rd; in numa_realloc() local 601 rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num, in numa_realloc() 603 if (!rd) { in numa_realloc() 608 ri->resubmit_list = rd; in numa_realloc()
|
| /dpdk/doc/guides/sample_app_ug/ |
| H A D | multi_process.rst | 305 Similarly, packets are routed between the 3rd and 4th network ports and so on.
|