| /f-stack/freebsd/contrib/octeon-sdk/ |
| H A D | cvmx-resources.config | 114 description = "PKO queues per port for interface 0 (ports 0-15)"; 117 description = "PKO queues per port for interface 1 (ports 16-31)"; 120 description = "PKO queues per port for interface 2"; 123 description = "PKO queues per port for interface 3"; 126 description = "PKO queues per port for interface 4"; 135 description = "PKO queues per port for PCI (ports 32-35)"; 138 description = "PKO queues per port for Loop devices (ports 36-39)"; 139 /* We use two queues per port for SRIO0. Having two queues per 140 port with two ports gives us four queues, one for each mailbox */ 144 /* We use two queues per port for SRIO1. Having two queues per [all …]
|
| /f-stack/freebsd/crypto/ccp/ |
| H A D | ccp_lsb.c | 49 qp = &sc->queues[queue]; in ccp_queue_decode_lsb_regions() 77 for (q = 0; q < nitems(sc->queues); q++) { in ccp_assign_lsb_regions() 81 sc->queues[q].private_lsb = -1; in ccp_assign_lsb_regions() 87 sc->queues[q].private_lsb = i; in ccp_assign_lsb_regions()
|
| H A D | ccp.c | 142 for (i = 0; i < nitems(sc->queues); i++) { in ccp_initialize_queues() 143 qp = &sc->queues[i]; in ccp_initialize_queues() 162 for (i = 0; i < nitems(sc->queues); i++) { in ccp_free_queues() 163 qp = &sc->queues[i]; in ccp_free_queues() 465 for (q = 0; q < nitems(sc->queues); q++) in ccp_newsession() 468 if (q == nitems(sc->queues)) { in ccp_newsession() 547 qp = &sc->queues[s->queue]; in ccp_process() 783 if (qindex >= nitems(sc->queues)) { in DB_SHOW_COMMAND() 787 db_show_ccp_qp(&sc->queues[qindex]); in DB_SHOW_COMMAND()
|
| /f-stack/freebsd/contrib/device-tree/Bindings/soc/ti/ |
| H A D | keystone-navigator-qmss.txt | 32 -- managed-queues : the actual queues managed by each queue manager 33 instance, specified as <"base queue #" "# of queues">. 51 - qpend : pool of qpend(interruptible) queues 53 as free descriptor queues or the 54 transmit DMA queues. 58 <"base queue #" "# of queues">. 60 for interruptible queues. The driver additionally sets 67 queues looking for descriptors that have been pushed 90 monitor up to 32 queues starting at the base queue #. 132 managed-queues = <0 0x2000>; [all …]
|
| /f-stack/dpdk/doc/guides/eventdevs/ |
| H A D | opdl.rst | 11 packets follow is determined by the order in which queues are set up.\ 27 * Load balanced (for Atomic, Ordered, Parallel queues) 28 * Single Link (for single-link queues) 56 queues in the middle of a pipeline cannot delete packets. 62 As stated the order in which packets travel through queues is static in 63 nature. They go through the queues in the order the queues are setup at 65 sets up 3 queues, Q0, Q1, Q2 and has 3 associated ports P0, P1, P2 and 86 due to the static nature of the underlying queues. It is because of this 92 - The order in which packets moved between queues is static and fixed \ 98 - All packets follow the same path through device queues. [all …]
|
| H A D | dlb.rst | 26 supports atomic, ordered, and parallel scheduling events from queues to ports. 42 directed queues, ports, credits, and other hardware resources. Some 73 of its ports or queues are not, the PMD will apply their previous 83 before its ports or queues can be. 107 queues with 512 reorder entries, and so on down to 32 queues with 32 entries. 118 load-balanced queues can use the full 16-bit flow ID range. 130 queues, and max_single_link_event_port_queue_pairs reports the number of 131 available directed ports and queues. 140 directed ports and queues come in pairs. 184 credits are used for directed queues. [all …]
|
| H A D | dlb2.rst | 40 directed queues, ports, credits, and other hardware resources. Some 72 queues with 512 reorder entries, and so on down to 32 queues with 32 entries. 83 load-balanced queues can use the full 16-bit flow ID range. 107 queues with 512 reorder entries, and so on down to 32 queues with 32 entries. 118 load-balanced queues can use the full 16-bit flow ID range. 131 available directed ports and queues. 140 directed ports and queues come in pairs. 175 credits are used for directed queues. 305 of its ports or queues are not, the PMD will apply their previous 315 before its ports or queues can be. [all …]
|
| /f-stack/freebsd/contrib/device-tree/Bindings/misc/ |
| H A D | intel,ixp4xx-ahb-queue-manager.yaml | 14 The IXP4xx AHB Queue Manager maintains queues as circular buffers in 17 IXP4xx for accelerating queues, especially for networking. Clients pick 18 queues from the queue manager with foo-queue = <&qmgr N> where the 33 - description: Interrupt for queues 0-31 34 - description: Interrupt for queues 32-63
|
| /f-stack/dpdk/app/test/ |
| H A D | test_eventdev.c | 784 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; in test_eventdev_link() local 797 queues[i] = i; in test_eventdev_link() 812 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; in test_eventdev_unlink() local 824 queues[i] = i; in test_eventdev_unlink() 840 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; in test_eventdev_link_get() local 854 queues[i] = i; in test_eventdev_link_get() 865 queues[i] = i; in test_eventdev_link_get() 880 queues[0] = 0; in test_eventdev_link_get() 897 queues[i] = i; in test_eventdev_link_get() 906 queues[i] = i; in test_eventdev_link_get() [all …]
|
| /f-stack/dpdk/lib/librte_bbdev/ |
| H A D | rte_bbdev.h | 424 struct rte_bbdev_queue_data *queues; /**< Queue structures */ member 499 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; in rte_bbdev_enqueue_enc_ops() 530 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; in rte_bbdev_enqueue_dec_ops() 561 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; in rte_bbdev_enqueue_ldpc_enc_ops() 592 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; in rte_bbdev_enqueue_ldpc_dec_ops() 625 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; in rte_bbdev_dequeue_enc_ops() 658 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; in rte_bbdev_dequeue_dec_ops() 690 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; in rte_bbdev_dequeue_ldpc_enc_ops() 721 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id]; in rte_bbdev_dequeue_ldpc_dec_ops()
|
| H A D | rte_bbdev.c | 345 if (dev->data->queues != NULL) { in rte_bbdev_setup_queues() 366 rte_free(dev->data->queues); in rte_bbdev_setup_queues() 373 if (dev->data->queues == NULL) { in rte_bbdev_setup_queues() 399 rte_free(dev->data->queues); in rte_bbdev_setup_queues() 400 dev->data->queues = NULL; in rte_bbdev_setup_queues() 579 dev->data->queues[i].started = true; in rte_bbdev_start() 634 rte_free(dev->data->queues); in rte_bbdev_close() 645 dev->data->queues = NULL; in rte_bbdev_close() 662 if (dev->data->queues[queue_id].started) { in rte_bbdev_queue_start() 719 &dev->data->queues[q_id].queue_stats; in get_stats_from_queues() [all …]
|
| /f-stack/dpdk/drivers/event/dsw/ |
| H A D | dsw_evdev.c | 104 struct dsw_queue *queue = &dsw->queues[queue_id]; in dsw_queue_setup() 173 const uint8_t queues[], uint16_t num, bool link) in dsw_port_link_unlink() argument 181 uint8_t qid = queues[i]; in dsw_port_link_unlink() 182 struct dsw_queue *q = &dsw->queues[qid]; in dsw_port_link_unlink() 197 dsw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], in dsw_port_link() argument 200 return dsw_port_link_unlink(dev, port, queues, num, true); in dsw_port_link() 204 dsw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], in dsw_port_unlink() argument 207 return dsw_port_link_unlink(dev, port, queues, num, false); in dsw_port_unlink() 258 struct dsw_queue *queue = &dsw->queues[queue_id]; in initial_flow_to_port_assignment() 265 dsw->queues[queue_id].flow_to_port_map[flow_hash] = in initial_flow_to_port_assignment()
|
| /f-stack/dpdk/doc/guides/sample_app_ug/ |
| H A D | vmdq_dcb_forwarding.rst | 8 …e application performs L2 forwarding using VMDQ and DCB to divide the incoming traffic into queues. 17 The VMDQ and DCB filters work on MAC and VLAN traffic to divide the traffic into input queues on th… 20 Then, DCB places each packet into one of queues within that group, based upon the VLAN user priorit… 23 With Intel® 82599 NIC, for example, the traffic is split into 128 queues on input, where each threa… 24 multiple queues. When run with 8 threads, that is, with the -c FF option, each thread receives and … 27 …Ethernet Controller NIC also supports the splitting of traffic into 16 pools of 8 queues. While the 56 Since VMD queues are being used for VMM, this application works correctly 145 and dividing up the possible user priority values equally among the individual queues 149 With Intel® X710/XL710 NICs, if number of tcs is 4, and number of queues in pool is 8, 150 then the user priority fields are allocated 2 to one tc, and a tc has 2 queues mapping to it, then [all …]
|
| H A D | vmdq_forwarding.rst | 8 The application performs L2 forwarding using VMDq to divide the incoming traffic into queues. 17 …it the incoming packets up into different "pools" - each with its own set of RX queues - based upon 21 With Intel® 82599 NIC, for example, the traffic is split into 128 queues on input, where each threa… 22 multiple queues. When run with 8 threads, that is, with the -c FF option, each thread receives and … 24 As supplied, the sample application configures the VMDq feature to have 32 pools with 4 queues each. 25 …0 Gigabit Ethernet Controller NIC also supports the splitting of traffic into 16 pools of 2 queues. 26 … or XL710 Ethernet Controller NICs support many configurations of VMDq pools of 4 or 8 queues each. 27 And queues numbers for each VMDq pool can be changed by setting RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM 111 For the VLAN IDs, each one can be allocated to possibly multiple pools of queues.
|
| /f-stack/dpdk/drivers/raw/ioat/ |
| H A D | dpdk_idxd_cfg.py | 32 def configure_dsa(dsa_id, queues): argument 47 nb_queues = min(queues, max_queues) 48 if queues > nb_queues:
|
| /f-stack/freebsd/contrib/device-tree/Bindings/dma/ |
| H A D | fsl-qdma.txt | 22 - fsl,dma-queues: Should contain number of queues supported. 28 based on queues 52 fsl,dma-queues = <2>;
|
| /f-stack/freebsd/contrib/device-tree/Bindings/mfd/ |
| H A D | fsl-imx25-tsadc.txt | 3 This device combines two general purpose conversion queues one used for general 15 conversion queues. 20 This device includes two conversion queues which can be added as subnodes.
|
| /f-stack/dpdk/drivers/net/mlx5/ |
| H A D | mlx5_rxq.c | 1903 (!memcmp(ind_tbl->queues, queues, in mlx5_ind_table_obj_match_queues() 1929 (memcmp(ind_tbl->queues, queues, in mlx5_ind_table_obj_get() 1939 mlx5_rxq_get(dev, ind_tbl->queues[i]); in mlx5_ind_table_obj_get() 2020 uint16_t *queues = ind_tbl->queues; in mlx5_ind_table_obj_setup() local 2028 if (!mlx5_rxq_get(dev, queues[i])) { in mlx5_ind_table_obj_setup() 2041 mlx5_rxq_release(dev, ind_tbl->queues[j]); in mlx5_ind_table_obj_setup() 2078 memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues)); in mlx5_ind_table_obj_new() 2133 if (!mlx5_rxq_get(dev, queues[i])) { in mlx5_ind_table_obj_modify() 2145 ind_tbl->queues = queues; in mlx5_ind_table_obj_modify() 2238 queues, queues_n)) { in mlx5_hrxq_modify() [all …]
|
| /f-stack/freebsd/contrib/device-tree/Bindings/net/ |
| H A D | brcm,systemport.txt | 10 interrupts, and the second cell should be for the transmit queues. An 21 - systemport,num-txq: number of HW transmit queues, an integer 22 - systemport,num-rxq: number of HW receive queues, an integer
|
| H A D | fsl-fec.txt | 14 - fsl,num-tx-queues : The property is valid for enet-avb IP, which supports 15 hw multi queues. Should specify the tx queue number, otherwise set tx queue 17 - fsl,num-rx-queues : The property is valid for enet-avb IP, which supports 18 hw multi queues. Should specify the rx queue number, otherwise set rx queue 39 tx/rx queues 1 and 2. "int0" will be used for queue 0 and ENET_MII interrupts. 40 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
|
| /f-stack/dpdk/doc/guides/nics/ |
| H A D | vhost.rst | 20 * It has multiple queues support. 37 #. ``queues``: 39 It is used to specify the number of queues virtio-net device has. 93 ./dpdk-testpmd -l 0-3 -n 4 --vdev 'net_vhost0,iface=/tmp/sock0,queues=1' -- -i 104 -netdev vhost-user,id=net0,chardev=chr0,vhostforce,queues=1 \
|
| H A D | dpaa.rst | 48 - The Queue Manager (QMan) is a hardware accelerator that manages frame queues. 146 - Multiple queues for TX and RX 208 This defines the number of Rx queues configured for an application, per 209 port. Hardware would distribute across these many number of queues on Rx 211 In case the application is configured to use lesser number of queues than 217 These queues use one private HW portal per queue configured, so they are 218 limited in the system. The first configured ethdev queues will be 219 automatically be assigned from the these high perf PUSH queues. Any queue 220 configuration beyond that will be standard Rx queues. The application can 224 Currently these queues are not used for LS1023/LS1043 platform by default. [all …]
|
| /f-stack/dpdk/drivers/net/iavf/ |
| H A D | iavf_ethdev.c | 1589 queues++; in iavf_parse_queue_proto_xtr() 1591 if (*queues != '[') { in iavf_parse_queue_proto_xtr() 1601 queues++; in iavf_parse_queue_proto_xtr() 1604 queues++; in iavf_parse_queue_proto_xtr() 1605 if (*queues == '\0') in iavf_parse_queue_proto_xtr() 1612 queues += strcspn(queues, ")"); in iavf_parse_queue_proto_xtr() 1618 queues += strcspn(queues, ":"); in iavf_parse_queue_proto_xtr() 1622 queues++; in iavf_parse_queue_proto_xtr() 1641 queues += idx; in iavf_parse_queue_proto_xtr() 1643 while (isblank(*queues) || *queues == ',' || *queues == ']') in iavf_parse_queue_proto_xtr() [all …]
|
| /f-stack/dpdk/drivers/raw/skeleton/ |
| H A D | skeleton_rawdev.c | 145 skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH; in reset_queues() 146 skeldev->queues[i].state = SKELETON_QUEUE_DETACH; in reset_queues() 240 skelq = &skeldev->queues[queue_id]; in skeleton_rawdev_queue_def_conf() 275 q = &skeldev->queues[queue_id]; in skeleton_rawdev_queue_setup() 303 skeldev->queues[queue_id].state = SKELETON_QUEUE_DETACH; in skeleton_rawdev_queue_release() 304 skeldev->queues[queue_id].depth = SKELETON_QUEUE_DEF_DEPTH; in skeleton_rawdev_queue_release() 614 skeldev->queues[i].state = SKELETON_QUEUE_DETACH; in skeleton_rawdev_create() 615 skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH; in skeleton_rawdev_create()
|
| /f-stack/dpdk/doc/guides/prog_guide/ |
| H A D | eventdev.rst | 14 polling model, lcores poll ethdev ports and associated Rx queues directly 84 then events of any type may be sent to any queue. Otherwise, the queues only 125 from those queues (more details in `Linking Queues and Ports`_ below). 168 In the following code, we configure eventdev instance with 3 queues 169 and 6 ports as follows. The 3 queues consist of 2 Atomic and 1 Single-Link, 189 Once the eventdev itself is configured, the next step is to configure queues. 219 These queues are used for the remainder of this walk-through. 224 Once queues are set up successfully, create the ports as required. 266 The final step is to "wire up" the ports to the queues. After this, the 269 from e.g.: a NIC so it is not linked to any eventdev queues. [all …]
|