| /dpdk/drivers/raw/cnxk_gpio/ |
| H A D | cnxk_gpio_irq.c | 59 if (!stack) in cnxk_gpio_irq_stack_free() 62 if (stack->inuse) in cnxk_gpio_irq_stack_free() 63 stack->inuse--; in cnxk_gpio_irq_stack_free() 68 rte_free(stack); in cnxk_gpio_irq_stack_free() 85 if (stack) { in cnxk_gpio_irq_stack_alloc() 86 stack->inuse++; in cnxk_gpio_irq_stack_alloc() 90 stack = rte_malloc(NULL, sizeof(*stack), 0); in cnxk_gpio_irq_stack_alloc() 91 if (!stack) in cnxk_gpio_irq_stack_alloc() 97 rte_free(stack); in cnxk_gpio_irq_stack_alloc() 101 stack->cpu = cpu; in cnxk_gpio_irq_stack_alloc() [all …]
|
| /dpdk/lib/stack/ |
| H A D | rte_stack_std.h | 30 rte_spinlock_lock(&stack->lock); in __rte_stack_std_push() 31 cache_objs = &stack->objs[stack->len]; in __rte_stack_std_push() 35 rte_spinlock_unlock(&stack->lock); in __rte_stack_std_push() 43 stack->len += n; in __rte_stack_std_push() 45 rte_spinlock_unlock(&stack->lock); in __rte_stack_std_push() 68 rte_spinlock_lock(&stack->lock); in __rte_stack_std_pop() 70 if (unlikely(n > stack->len)) { in __rte_stack_std_pop() 71 rte_spinlock_unlock(&stack->lock); in __rte_stack_std_pop() 75 cache_objs = stack->objs; in __rte_stack_std_pop() 81 stack->len -= n; in __rte_stack_std_pop() [all …]
|
| /dpdk/drivers/net/bnxt/tf_core/ |
| H A D | stack.h | 15 struct stack { struct 37 struct stack *st); 47 uint32_t *stack_items(struct stack *st); 57 int32_t stack_size(struct stack *st); 67 bool stack_is_empty(struct stack *st); 77 bool stack_is_full(struct stack *st); 89 int stack_push(struct stack *st, uint32_t x); 103 int stack_pop(struct stack *st, uint32_t *x); 115 void stack_dump(struct stack *st);
|
| H A D | stack.c | 18 stack_init(int num_entries, uint32_t *items, struct stack *st) in stack_init() 33 uint32_t *stack_items(struct stack *st) in stack_items() 41 stack_size(struct stack *st) in stack_size() 49 stack_is_empty(struct stack *st) in stack_is_empty() 57 stack_is_full(struct stack *st) in stack_is_full() 65 stack_push(struct stack *st, uint32_t x) in stack_push() 81 stack_pop(struct stack *st, uint32_t *x) in stack_pop() 94 void stack_dump(struct stack *st) in stack_dump()
|
| H A D | meson.build | 16 'stack.c',
|
| H A D | tf_em_common.h | 69 struct stack ext_act_pool[TF_DIR_MAX];
|
| H A D | tf_em_common.c | 49 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir]; in tf_create_tbl_pool_external() 184 struct stack *pool; in tf_tbl_ext_alloc() 237 struct stack *pool; in tf_tbl_ext_free()
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | stack_lib.rst | 7 DPDK's stack library provides an API for configuration and use of a bounded 8 stack of pointers. 10 The stack library provides the following basic operations: 12 * Create a uniquely named stack of a user-specified size and using a 19 * Free a previously created stack. 21 * Lookup a pointer to a stack by its name. 23 * Query a stack's current depth and number of free entries. 52 list's tail to the current stack head, and using a CAS to swing the stack head 55 adjusts the stack length and returns. 64 allocated before stack pushes and freed after stack pops. Since the stack has a [all …]
|
| H A D | kernel_nic_interface.rst | 18 * Allows an interface with the kernel network stack. 72 network stack. Without any parameters, only one kernel thread is created 344 If an mbuf is dequeued, it will be converted to a sk_buff and sent to the net stack via netif_rx(). 357 The packet is received from the Linux net stack, by calling the kni_net_tx() callback.
|
| H A D | generic_receive_offload_lib.rst | 56 types are allocated in the stack. This design simplifies applications' 57 operations. However, limited by the stack size, the maximum number of
|
| /dpdk/doc/guides/mempool/ |
| H A D | stack.rst | 10 the mempool type (ring, stack, etc.) will have a negligible impact on 11 performance. However a stack-based mempool is often better suited to pipelined 18 The following modes of operation are available for the stack mempool driver and 21 - ``stack`` 31 The standard stack outperforms the lock-free stack on average, however the 32 standard stack is non-preemptive: if a mempool user is preempted while holding 33 the stack lock, that thread will block all other mempool accesses until it 35 stack whose threads can be preempted can suffer from brief, infrequent 38 The lock-free stack, by design, is not susceptible to this problem; one thread can 42 For a more detailed description of the stack implementations, please refer to
|
| H A D | index.rst | 17 stack
|
| /dpdk/drivers/mempool/bucket/ |
| H A D | rte_mempool_bucket.c | 64 struct bucket_stack *stack; in bucket_stack_create() local 71 if (stack == NULL) in bucket_stack_create() 73 stack->limit = n_elts; in bucket_stack_create() 74 stack->top = 0; in bucket_stack_create() 76 return stack; in bucket_stack_create() 82 RTE_ASSERT(stack->top < stack->limit); in bucket_stack_push() 83 stack->objects[stack->top++] = obj; in bucket_stack_push() 89 RTE_ASSERT(stack->top > 0); in bucket_stack_pop_unsafe() 90 return stack->objects[--stack->top]; in bucket_stack_pop_unsafe() 96 if (stack->top == 0) in bucket_stack_pop() [all …]
|
| /dpdk/doc/guides/howto/ |
| H A D | flow_bifurcation.rst | 17 the Linux kernel stack). It can direct some traffic, for example data plane 19 plane traffic, to the traditional Linux networking stack. 35 In this way the Linux networking stack can receive specific traffic through 56 and let the rest go to the kernel stack.
|
| H A D | virtio_user_as_exceptional_path.rst | 14 such as KNI which exchanges packets with kernel networking stack. This 112 Then, all traffic from physical NIC can be forwarded into kernel stack, and all
|
| /dpdk/drivers/mempool/stack/ |
| H A D | meson.build | 6 deps += ['stack']
|
| /dpdk/drivers/mempool/ |
| H A D | meson.build | 11 'stack',
|
| /dpdk/doc/guides/nics/ |
| H A D | kni.rst | 13 Linux networking stack will be transparent to the DPDK application. 24 To forward any traffic from physical NIC to the Linux networking stack,
|
| /dpdk/doc/guides/rel_notes/ |
| H A D | release_19_05.rst | 24 Added a new stack library and APIs for configuration and use of a bounded 25 stack of pointers. The API provides multi-thread safe push and pop 28 The library supports two stack implementations: standard (lock-based) and 34 Added a new lock-free stack handler, which uses the newly added stack 58 network stack to achieve high performance packet processing.
|
| /dpdk/lib/bpf/ |
| H A D | bpf_exec.c | 484 uint64_t stack[MAX_BPF_STACK_SIZE / sizeof(uint64_t)]; in rte_bpf_exec_burst() local 489 reg[EBPF_REG_10] = (uintptr_t)(stack + RTE_DIM(stack)); in rte_bpf_exec_burst()
|
| /dpdk/lib/table/ |
| H A D | rte_table_hash_key8.c | 77 uint32_t *stack; member 412 f->stack = (uint32_t *) in rte_table_hash_create_key8_ext() 421 f->stack[i] = i; in rte_table_hash_create_key8_ext() 505 bucket_index = f->stack[--f->stack_pos]; in rte_table_hash_entry_add_key8_ext() 570 f->stack[f->stack_pos++] = bucket_index; in rte_table_hash_entry_delete_key8_ext()
|
| H A D | rte_table_hash_key16.c | 81 uint32_t *stack; member 425 f->stack = (uint32_t *) in rte_table_hash_create_key16_ext() 437 f->stack[i] = i; in rte_table_hash_create_key16_ext() 517 bucket_index = f->stack[--f->stack_pos]; in rte_table_hash_entry_add_key16_ext() 583 f->stack[f->stack_pos++] = bucket_index; in rte_table_hash_entry_delete_key16_ext()
|
| H A D | rte_table_hash_key32.c | 81 uint32_t *stack; member 435 f->stack = (uint32_t *) in rte_table_hash_create_key32_ext() 451 f->stack[i] = i; in rte_table_hash_create_key32_ext() 533 bucket_index = f->stack[--f->stack_pos]; in rte_table_hash_entry_add_key32_ext() 600 f->stack[f->stack_pos++] = bucket_index; in rte_table_hash_entry_delete_key32_ext()
|
| /dpdk/doc/api/ |
| H A D | doxy-api.conf.in | 78 @TOPDIR@/lib/stack \
|
| /dpdk/doc/guides/cryptodevs/ |
| H A D | ccp.rst | 73 Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
|