| /f-stack/freebsd/contrib/vchiq/interface/vchiq_arm/ |
| H A D | vchiq_util.c | 46 queue->read = 0; in vchiu_queue_init() 69 return queue->read == queue->write; in vchiu_queue_is_empty() 74 return queue->write == queue->read + queue->size; in vchiu_queue_is_full() 82 while (queue->write == queue->read + queue->size) { in vchiu_queue_push() 94 queue->storage[queue->write & (queue->size - 1)] = header; in vchiu_queue_push() 102 queue->write++; in vchiu_queue_push() 109 while (queue->write == queue->read) { in vchiu_queue_peek() 123 return queue->storage[queue->read & (queue->size - 1)]; in vchiu_queue_peek() 130 while (queue->write == queue->read) { in vchiu_queue_pop() 142 header = queue->storage[queue->read & (queue->size - 1)]; in vchiu_queue_pop() [all …]
|
| H A D | vchiq_util.h | 55 extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size); 56 extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue); 58 extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue); 59 extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue); 61 extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header); 63 extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue); 64 extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
|
| /f-stack/dpdk/drivers/net/ark/ |
| H A D | ark_ethdev_rx.c | 98 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_hw_setup() 111 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_update_cons_index() 187 if (queue->reserve_q == 0 || queue->paddress_q == 0) { in eth_ark_dev_rx_queue_setup() 264 mbuf = queue->reserve_q[cons_index & queue->queue_mask]; in eth_ark_recv_pkts() 413 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_start_queue() 445 uint32_t limit = queue->cons_index + queue->queue_size; in eth_ark_rx_seed_mbufs() 449 uint32_t seed_m = queue->seed_index & queue->queue_mask; in eth_ark_rx_seed_mbufs() 532 queue->seed_index - queue->cons_index); in eth_ark_rx_seed_recovery() 653 ark_mpu_dump(queue->mpu, name, queue->phys_qid); in ark_ethdev_rx_dump() 654 ark_mpu_dump_setup(queue->mpu, queue->phys_qid); in ark_ethdev_rx_dump() [all …]
|
| H A D | ark_ethdev_tx.c | 107 prod_index_limit = queue->queue_size + queue->free_index; in eth_ark_xmit_pkts() 141 idx = queue->prod_index & queue->queue_mask; in eth_ark_xmit_pkts() 170 ark_mpu_set_producer(queue->mpu, queue->prod_index); in eth_ark_xmit_pkts() 186 (queue->prod_index - queue->free_index); in eth_ark_tx_jumbo() 193 idx = queue->prod_index & queue->queue_mask; in eth_ark_tx_jumbo() 254 queue->bufs = in eth_ark_tx_queue_setup() 260 if (queue->meta_q == 0 || queue->bufs == 0) { in eth_ark_tx_queue_setup() 342 queue->cons_index = queue->prod_index; in eth_ark_tx_queue_release() 360 while (queue->cons_index != queue->prod_index) { in eth_ark_tx_queue_stop() 399 meta = &queue->meta_q[queue->free_index & queue->queue_mask]; in free_completed_tx() [all …]
|
| /f-stack/freebsd/kern/ |
| H A D | subr_taskqueue.c | 166 return (queue); in _taskqueue_create() 212 TQ_LOCK(queue); in taskqueue_free() 214 taskqueue_terminate(queue->tq_threads, queue); in taskqueue_free() 272 queue->tq_enqueue(queue->tq_context); in taskqueue_enqueue_locked() 285 TQ_LOCK(queue); in taskqueue_enqueue() 313 TQ_LOCK(queue); in taskqueue_enqueue_timeout_sbt() 431 TQ_LOCK(queue); in taskqueue_block() 440 TQ_LOCK(queue); in taskqueue_unblock() 443 queue->tq_enqueue(queue->tq_context); in taskqueue_unblock() 494 TQ_LOCK(queue); in taskqueue_run() [all …]
|
| H A D | subr_gtaskqueue.c | 140 if (!queue) { in _gtaskqueue_create() 156 return (queue); in _gtaskqueue_create() 176 TQ_LOCK(queue); in gtaskqueue_free() 178 gtaskqueue_terminate(queue->tq_threads, queue); in gtaskqueue_free() 202 TQ_LOCK(queue); in grouptask_block() 220 TQ_LOCK(queue); in grouptask_unblock() 234 TQ_LOCK(queue); in grouptaskqueue_enqueue() 247 queue->tq_enqueue(queue->tq_context); in grouptaskqueue_enqueue() 326 TQ_LOCK(queue); in gtaskqueue_block() 335 TQ_LOCK(queue); in gtaskqueue_unblock() [all …]
|
| H A D | subr_disk.c | 157 TAILQ_INIT(&head->queue); in bioq_init() 169 if (bp == TAILQ_FIRST(&head->queue)) in bioq_remove() 174 TAILQ_REMOVE(&head->queue, bp, bio_queue); in bioq_remove() 175 if (TAILQ_EMPTY(&head->queue)) in bioq_remove() 195 TAILQ_INSERT_HEAD(&head->queue, bp, bio_queue); in bioq_insert_head() 204 TAILQ_INSERT_TAIL(&head->queue, bp, bio_queue); in bioq_insert_tail() 215 return (TAILQ_FIRST(&head->queue)); in bioq_first() 223 bp = TAILQ_FIRST(&head->queue); in bioq_takefirst() 283 cur = TAILQ_FIRST(&head->queue); in bioq_disksort() 296 TAILQ_INSERT_HEAD(&head->queue, bp, bio_queue); in bioq_disksort() [all …]
|
| /f-stack/dpdk/drivers/net/pfe/ |
| H A D | pfe_hif_lib.c | 161 queue->base, queue->size); in hif_lib_client_init_rx_buffers() 235 queue->base, queue->size); in hif_lib_client_init_tx_buffers() 346 struct rx_queue_desc *desc = queue->base + queue->read_idx; in hif_lib_event_handler_start() 407 desc = queue->base + queue->read_idx; in hif_lib_receive_pkt() 467 queue->read_idx = (queue->read_idx + 1) & in hif_lib_receive_pkt() 497 struct tx_queue_desc *desc = queue->base + queue->write_idx; in hif_lib_xmit_pkt() 513 queue->write_idx = (queue->write_idx + 1) & (queue->size - 1); in hif_lib_xmit_pkt() 523 struct tx_queue_desc *desc = queue->base + queue->read_idx; in hif_lib_tx_get_next_complete() 526 qno, queue->read_idx, queue->tx_pending); in hif_lib_tx_get_next_complete() 531 if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) { in hif_lib_tx_get_next_complete() [all …]
|
| /f-stack/dpdk/drivers/event/opdl/ |
| H A D | opdl_evdev_init.c | 299 struct opdl_queue *queue = &device->queue[q_id]; in opdl_add_deps() local 415 struct opdl_queue *queue = &device->queue[i]; in build_all_dependencies() local 472 struct opdl_queue *queue = &device->queue[i]; in check_queues_linked() local 709 struct opdl_queue *queue = &device->queue[port->queue_id]; in initialise_all_other_ports() local 733 queue->ports[queue->nb_ports] = port; in initialise_all_other_ports() 751 queue->ports[queue->nb_ports] = port; in initialise_all_other_ports() 778 queue->ports[queue->nb_ports] = port; in initialise_all_other_ports() 799 queue = &device->queue[next_qid]; in initialise_all_other_ports() 808 queue->ports[queue->nb_ports] = port; in initialise_all_other_ports() 901 queue = &device->queue[port->queue_id]; in initialise_queue_zero_ports() [all …]
|
| /f-stack/freebsd/contrib/octeon-sdk/ |
| H A D | cvmx-pko.c | 309 for (queue = 0; queue < num_queues; queue++) in __cvmx_pko_iport_config() 318 queue) in __cvmx_pko_iport_config() 350 for (queue = 0; queue < num_queues; queue++) in __cvmx_pko_iport_config() 729 int queue; in cvmx_pko_shutdown() local 737 for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) in cvmx_pko_shutdown() 747 for (queue=0; queue<CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) in cvmx_pko_shutdown() 753 config.s.queue = queue & 0x7f; in cvmx_pko_shutdown() 822 for (queue = 0; queue < num_queues; queue++) in cvmx_pko_config_port() 830 priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue) in cvmx_pko_config_port() 871 for (queue = 0; queue < num_queues; queue++) in cvmx_pko_config_port() [all …]
|
| H A D | cvmx-zip.c | 96 int cvmx_zip_queue_initialize(int queue, int zcoremask) in cvmx_zip_queue_initialize() argument 123 cvmx_write_csr(CVMX_ZIP_QUEX_BUF(queue), zip_que_buf.u64); in cvmx_zip_queue_initialize() 126 que_map.u64 = cvmx_read_csr(CVMX_ZIP_QUEX_MAP(queue)); in cvmx_zip_queue_initialize() 128 cvmx_write_csr(CVMX_ZIP_QUEX_MAP(queue), que_map.u64); in cvmx_zip_queue_initialize() 132 que_ena.s.ena |= (1<<queue); in cvmx_zip_queue_initialize() 139 if (queue) in cvmx_zip_queue_initialize() 146 cvmx_read_csr(CVMX_ZIP_QUEX_BUF(queue)); in cvmx_zip_queue_initialize() 183 int cvmx_zip_queue_shutdown(int queue) in cvmx_zip_queue_shutdown() argument 187 if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_ZIP_QUE(queue))) in cvmx_zip_queue_shutdown() 198 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_ZIP_QUE(queue)); in cvmx_zip_queue_shutdown() [all …]
|
| H A D | cvmx-pko.h | 283 static inline void cvmx_pko_doorbell(uint64_t ipd_port, uint64_t queue, uint64_t len) in cvmx_pko_doorbell() argument 297 ptr.s.queue = queue; in cvmx_pko_doorbell() 381 result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), in cvmx_pko_send_packet_finish() 387 cvmx_pko_doorbell(ipd_port, queue, 2); in cvmx_pko_send_packet_finish() 429 result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue), in cvmx_pko_send_packet_finish3() 436 cvmx_pko_doorbell(ipd_port, queue, 3); in cvmx_pko_send_packet_finish3() 743 ptr.s.queue = queue; in cvmx_pko_doorbell_pkoid() 770 result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), in cvmx_pko_send_packet_finish_pkoid() 776 cvmx_pko_doorbell_pkoid(pko_port, queue, 2); in cvmx_pko_send_packet_finish_pkoid() 812 result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue), in cvmx_pko_send_packet_finish3_pkoid() [all …]
|
| /f-stack/dpdk/drivers/common/qat/ |
| H A D | qat_qp.c | 359 queue->hw_queue_number, queue->memz_name); in qat_queue_delete() 364 memset(queue->base_addr, 0x7F, queue->queue_size); in qat_queue_delete() 401 snprintf(queue->memz_name, sizeof(queue->memz_name), in qat_queue_create() 405 queue->hw_bundle_number, queue->hw_queue_number); in qat_queue_create() 432 queue->head = 0; in qat_queue_create() 433 queue->tail = 0; in qat_queue_create() 529 queue = &qp->rx_q; in adf_configure_queues() 660 tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask); in qat_enqueue_op_burst() 924 ((uint8_t *)queue->base_addr + queue->head); in qat_cq_dequeue_response() 946 queue->head = adf_modulo(queue->head + queue->msg_size, in qat_cq_dequeue_response() [all …]
|
| /f-stack/freebsd/contrib/ck/src/ |
| H A D | ck_barrier_combining.c | 43 if (queue->head != NULL) { in ck_barrier_combining_queue_dequeue() 44 front = queue->head; in ck_barrier_combining_queue_dequeue() 45 queue->head = queue->head->next; in ck_barrier_combining_queue_dequeue() 81 if (queue->head == NULL) { in ck_barrier_combining_queue_enqueue() 82 queue->head = queue->tail = node_value; in ck_barrier_combining_queue_enqueue() 86 queue->tail->next = node_value; in ck_barrier_combining_queue_enqueue() 87 queue->tail = node_value; in ck_barrier_combining_queue_enqueue() 99 struct ck_barrier_combining_queue queue; in ck_barrier_combining_group_init() local 101 queue.head = queue.tail = NULL; in ck_barrier_combining_group_init() 115 while (queue.head != NULL) { in ck_barrier_combining_group_init() [all …]
|
| /f-stack/dpdk/app/test-pmd/ |
| H A D | util.c | 81 port_id, queue, in dump_pkt_burst() 227 (unsigned int) queue); in dump_pkt_burst() 277 uint16_t queue; in add_tx_md_callback() local 287 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) in add_tx_md_callback() 289 ports[portid].tx_set_md_cb[queue] = in add_tx_md_callback() 298 uint16_t queue; in remove_tx_md_callback() local 308 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) in remove_tx_md_callback() 333 uint16_t queue; in add_tx_dynf_callback() local 343 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) in add_tx_dynf_callback() 354 uint16_t queue; in remove_tx_dynf_callback() local [all …]
|
| /f-stack/app/nginx-1.16.1/src/core/ |
| H A D | ngx_queue.c | 18 ngx_queue_middle(ngx_queue_t *queue) in ngx_queue_middle() argument 22 middle = ngx_queue_head(queue); in ngx_queue_middle() 24 if (middle == ngx_queue_last(queue)) { in ngx_queue_middle() 28 next = ngx_queue_head(queue); in ngx_queue_middle() 35 if (next == ngx_queue_last(queue)) { in ngx_queue_middle() 41 if (next == ngx_queue_last(queue)) { in ngx_queue_middle() 51 ngx_queue_sort(ngx_queue_t *queue, in ngx_queue_sort() argument 56 q = ngx_queue_head(queue); in ngx_queue_sort() 58 if (q == ngx_queue_last(queue)) { in ngx_queue_sort() 62 for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { in ngx_queue_sort() [all …]
|
| /f-stack/freebsd/sys/ |
| H A D | taskqueue.h | 85 int taskqueue_enqueue_timeout(struct taskqueue *queue, 93 int taskqueue_cancel_timeout(struct taskqueue *queue, 96 void taskqueue_drain_timeout(struct taskqueue *queue, 98 void taskqueue_drain_all(struct taskqueue *queue); 99 void taskqueue_quiesce(struct taskqueue *queue); 100 void taskqueue_free(struct taskqueue *queue); 101 void taskqueue_run(struct taskqueue *queue); 102 void taskqueue_block(struct taskqueue *queue); 103 void taskqueue_unblock(struct taskqueue *queue); 105 void taskqueue_set_callback(struct taskqueue *queue, [all …]
|
| /f-stack/freebsd/contrib/ck/include/spinlock/ |
| H A D | mcs.h | 48 ck_spinlock_mcs_init(struct ck_spinlock_mcs **queue) in ck_spinlock_mcs_init() argument 51 *queue = NULL; in ck_spinlock_mcs_init() 57 ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, in ck_spinlock_mcs_trylock() argument 66 r = ck_pr_cas_ptr(queue, NULL, node); in ck_spinlock_mcs_trylock() 72 ck_spinlock_mcs_locked(struct ck_spinlock_mcs **queue) in ck_spinlock_mcs_locked() argument 76 r = ck_pr_load_ptr(queue) != NULL; in ck_spinlock_mcs_locked() 82 ck_spinlock_mcs_lock(struct ck_spinlock_mcs **queue, in ck_spinlock_mcs_lock() argument 100 previous = ck_pr_fas_ptr(queue, node); in ck_spinlock_mcs_lock() 116 ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, in ck_spinlock_mcs_unlock() argument 130 if (ck_pr_load_ptr(queue) == node && in ck_spinlock_mcs_unlock() [all …]
|
| /f-stack/dpdk/lib/librte_bpf/ |
| H A D | bpf_pkt.c | 47 uint16_t queue; member 146 if (cbi->port == port && cbi->queue == queue) in bpf_eth_cbh_find() 158 cbi = bpf_eth_cbh_find(cbh, port, queue); in bpf_eth_cbh_add() 165 cbi->queue = queue; in bpf_eth_cbh_add() 490 bpf_eth_unload(cbh, port, queue); in rte_bpf_eth_rx_unload() 501 bpf_eth_unload(cbh, port, queue); in rte_bpf_eth_tx_unload() 521 queue >= RTE_MAX_QUEUES_PER_PORT) in bpf_eth_elf_load() 531 __func__, port, queue); in bpf_eth_elf_load() 543 __func__, port, queue); in bpf_eth_elf_load() 549 bc = bpf_eth_cbh_add(cbh, port, queue); in bpf_eth_elf_load() [all …]
|
| /f-stack/dpdk/lib/librte_pdump/ |
| H A D | rte_pdump.c | 43 uint16_t queue; member 50 uint16_t queue; member 130 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue; in pdump_register_rx_callbacks() 185 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue; in pdump_register_tx_callbacks() 253 queue = p->data.en_v1.queue; in set_pdump_rxtx_cbs() 265 queue = p->data.dis_v1.queue; in set_pdump_rxtx_cbs() 271 if (queue == RTE_PDUMP_ALL_QUEUES) { in set_pdump_rxtx_cbs() 304 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1; in set_pdump_rxtx_cbs() 313 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1; in set_pdump_rxtx_cbs() 451 req->data.en_v1.queue = queue; in pdump_prepare_client_request() [all …]
|
| /f-stack/dpdk/drivers/event/dlb2/pf/base/ |
| H A D | dlb2_resource.c | 650 if (queue == NULL) { in dlb2_attach_ldb_queues() 660 queue->owned = true; in dlb2_attach_ldb_queues() 1370 return queue; in dlb2_get_ldb_queue_from_id() 1375 return queue; in dlb2_get_ldb_queue_from_id() 3543 queue->id.phys_id; in dlb2_configure_ldb_queue() 3905 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id; in dlb2_hw_create_ldb_queue() 4545 if (queue == NULL || queue->domain_id.phys_id != in dlb2_verify_create_dir_port_args() 5104 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id; in dlb2_hw_create_dir_queue() 5206 return queue; in dlb2_get_domain_ldb_queue() 5279 if (queue == NULL || !queue->configured) { in dlb2_verify_map_qid_args() [all …]
|
| /f-stack/dpdk/drivers/net/nfb/ |
| H A D | nfb_tx.c | 16 if (txq->queue == NULL) { in nfb_eth_tx_queue_start() 21 ret = ndp_queue_start(txq->queue); in nfb_eth_tx_queue_start() 37 if (txq->queue == NULL) { in nfb_eth_tx_queue_stop() 42 ret = ndp_queue_stop(txq->queue); in nfb_eth_tx_queue_stop() 90 txq->queue = ndp_open_tx_queue(nfb, tx_queue_id); in nfb_eth_tx_queue_init() 91 if (txq->queue == NULL) in nfb_eth_tx_queue_init() 108 if (txq->queue != NULL) { in nfb_eth_tx_queue_release() 109 ndp_close_tx_queue(txq->queue); in nfb_eth_tx_queue_release() 111 txq->queue = NULL; in nfb_eth_tx_queue_release()
|
| /f-stack/freebsd/contrib/device-tree/Bindings/misc/ |
| H A D | intel,ixp4xx-ahb-queue-manager.yaml | 5 $id: "http://devicetree.org/schemas/misc/intel,ixp4xx-ahb-queue-manager.yaml#" 18 queues from the queue manager with foo-queue = <&qmgr N> where the 19 &qmgr is a phandle to the queue manager and N is the queue resource 20 number. The queue resources available and their specific purpose 26 - const: intel,ixp4xx-ahb-queue-manager 47 qmgr: queue-manager@60000000 { 48 compatible = "intel,ixp4xx-ahb-queue-manager";
|
| /f-stack/dpdk/drivers/crypto/bcmfs/ |
| H A D | bcmfs_qp.c | 43 if (queue == NULL) { in bcmfs_queue_delete() 48 queue_pair_id, queue->q_type, queue->memz_name); in bcmfs_queue_delete() 50 mz = rte_memzone_lookup(queue->memz_name); in bcmfs_queue_delete() 53 memset(queue->base_addr, 0x9B, queue->queue_size); in bcmfs_queue_delete() 57 status, queue->memz_name); in bcmfs_queue_delete() 60 queue->memz_name); in bcmfs_queue_delete() 128 queue->q_type = qtype; in bcmfs_queue_create() 133 snprintf(queue->memz_name, sizeof(queue->memz_name), in bcmfs_queue_create() 146 queue->base_phys_addr); in bcmfs_queue_create() 152 queue->base_phys_addr = qp_mz->iova; in bcmfs_queue_create() [all …]
|
| /f-stack/freebsd/netinet/ |
| H A D | tcp_pcap.c | 281 if (queue->mq_maxlen == 0) in tcp_pcap_add() 298 while (mbufq_full(queue)) { in tcp_pcap_add() 299 mhead = mbufq_dequeue(queue); in tcp_pcap_add() 418 if (mbufq_enqueue(queue, n)) { in tcp_pcap_add() 426 tcp_pcap_drain(struct mbufq *queue) in tcp_pcap_drain() argument 429 while ((m = mbufq_dequeue(queue))) in tcp_pcap_drain() 443 queue->mq_maxlen = newval; in tcp_pcap_set_sock_max() 444 while (queue->mq_len > queue->mq_maxlen) in tcp_pcap_set_sock_max() 445 tcp_pcap_m_freem(mbufq_dequeue(queue)); in tcp_pcap_set_sock_max() 449 tcp_pcap_get_sock_max(struct mbufq *queue) in tcp_pcap_get_sock_max() argument [all …]
|