1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Cesnet 3 * Copyright(c) 2019 Netcope Technologies, a.s. <[email protected]> 4 * All rights reserved. 5 */ 6 7 #ifndef _NFB_RX_H_ 8 #define _NFB_RX_H_ 9 10 #include <nfb/nfb.h> 11 #include <nfb/ndp.h> 12 13 #include <rte_mbuf.h> 14 #include <rte_mbuf_dyn.h> 15 #include <rte_ethdev.h> 16 17 extern uint64_t nfb_timestamp_rx_dynflag; 18 extern int nfb_timestamp_dynfield_offset; 19 20 static inline rte_mbuf_timestamp_t * 21 nfb_timestamp_dynfield(struct rte_mbuf *mbuf) 22 { 23 return RTE_MBUF_DYNFIELD(mbuf, 24 nfb_timestamp_dynfield_offset, rte_mbuf_timestamp_t *); 25 } 26 27 struct ndp_rx_queue { 28 struct nfb_device *nfb; /* nfb dev structure */ 29 struct ndp_queue *queue; /* rx queue */ 30 uint16_t rx_queue_id; /* index */ 31 uint8_t in_port; /* port */ 32 uint8_t flags; /* setup flags */ 33 34 struct rte_mempool *mb_pool; /* memory pool to allocate packets */ 35 uint16_t buf_size; /* mbuf size */ 36 37 volatile uint64_t rx_pkts; /* packets read */ 38 volatile uint64_t rx_bytes; /* bytes read */ 39 volatile uint64_t err_pkts; /* erroneous packets */ 40 }; 41 42 /** 43 * Initialize ndp_rx_queue structure 44 * 45 * @param nfb 46 * Pointer to nfb device structure. 47 * @param rx_queue_id 48 * RX queue index. 49 * @param port_id 50 * Device [external] port identifier. 51 * @param mb_pool 52 * Memory pool for buffer allocations. 53 * @param[out] rxq 54 * Pointer to ndp_rx_queue output structure 55 * @return 56 * 0 on success, a negative errno value otherwise. 57 */ 58 int 59 nfb_eth_rx_queue_init(struct nfb_device *nfb, 60 uint16_t rx_queue_id, 61 uint16_t port_id, 62 struct rte_mempool *mb_pool, 63 struct ndp_rx_queue *rxq); 64 65 /** 66 * DPDK callback to setup a RX queue for use. 67 * 68 * @param dev 69 * Pointer to Ethernet device structure. 70 * @param idx 71 * RX queue index. 72 * @param desc 73 * Number of descriptors to configure in queue. 74 * @param socket 75 * NUMA socket on which memory must be allocated. 76 * @param[in] conf 77 * Thresholds parameters. 78 * @param mb_pool 79 * Memory pool for buffer allocations. 80 * 81 * @return 82 * 0 on success, a negative errno value otherwise. 83 */ 84 int 85 nfb_eth_rx_queue_setup(struct rte_eth_dev *dev, 86 uint16_t rx_queue_id, 87 uint16_t nb_rx_desc __rte_unused, 88 unsigned int socket_id, 89 const struct rte_eth_rxconf *rx_conf __rte_unused, 90 struct rte_mempool *mb_pool); 91 92 /** 93 * DPDK callback to release a RX queue. 94 * 95 * @param dev 96 * Pointer to Ethernet device structure. 97 * @param qid 98 * Receive queue index. 99 */ 100 void 101 nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 102 103 /** 104 * Start traffic on Rx queue. 105 * 106 * @param dev 107 * Pointer to Ethernet device structure. 108 * @param txq_id 109 * RX queue index. 110 * @return 111 * 0 on success, a negative errno value otherwise. 112 */ 113 int 114 nfb_eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id); 115 116 /** 117 * Stop traffic on Rx queue. 118 * 119 * @param dev 120 * Pointer to Ethernet device structure. 121 * @param txq_id 122 * RX queue index. 123 */ 124 int 125 nfb_eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id); 126 127 /** 128 * DPDK callback for RX. 129 * 130 * @param dpdk_rxq 131 * Generic pointer to RX queue structure. 132 * @param[out] bufs 133 * Array to store received packets. 134 * @param nb_pkts 135 * Maximum number of packets in array. 136 * 137 * @return 138 * Number of packets successfully received (<= nb_pkts). 139 */ 140 static __rte_always_inline uint16_t 141 nfb_eth_ndp_rx(void *queue, 142 struct rte_mbuf **bufs, 143 uint16_t nb_pkts) 144 { 145 struct ndp_rx_queue *ndp = queue; 146 uint16_t packet_size; 147 uint64_t num_bytes = 0; 148 uint16_t num_rx; 149 unsigned int i; 150 151 const uint16_t buf_size = ndp->buf_size; 152 153 struct rte_mbuf *mbuf; 154 struct ndp_packet packets[nb_pkts]; 155 156 struct rte_mbuf *mbufs[nb_pkts]; 157 158 if (unlikely(ndp->queue == NULL || nb_pkts == 0)) { 159 RTE_LOG(ERR, PMD, "RX invalid arguments!\n"); 160 return 0; 161 } 162 163 /* returns either all or nothing */ 164 i = rte_pktmbuf_alloc_bulk(ndp->mb_pool, mbufs, nb_pkts); 165 if (unlikely(i != 0)) 166 return 0; 167 168 num_rx = ndp_rx_burst_get(ndp->queue, packets, nb_pkts); 169 170 if (unlikely(num_rx != nb_pkts)) { 171 for (i = num_rx; i < nb_pkts; i++) 172 rte_pktmbuf_free(mbufs[i]); 173 } 174 175 nb_pkts = num_rx; 176 177 num_rx = 0; 178 /* 179 * Reads the given number of packets from NDP queue given 180 * by queue and copies the packet data into a newly allocated mbuf 181 * to return. 182 */ 183 for (i = 0; i < nb_pkts; ++i) { 184 mbuf = mbufs[i]; 185 186 /* get the space available for data in the mbuf */ 187 packet_size = packets[i].data_length; 188 189 if (likely(packet_size <= buf_size)) { 190 /* NDP packet will fit in one mbuf, go ahead and copy */ 191 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), 192 packets[i].data, packet_size); 193 194 mbuf->data_len = (uint16_t)packet_size; 195 196 mbuf->pkt_len = packet_size; 197 mbuf->port = ndp->in_port; 198 mbuf->ol_flags = 0; 199 200 if (nfb_timestamp_dynfield_offset >= 0) { 201 rte_mbuf_timestamp_t timestamp; 202 203 /* nanoseconds */ 204 timestamp = 205 rte_le_to_cpu_32(*((uint32_t *) 206 (packets[i].header + 4))); 207 timestamp <<= 32; 208 /* seconds */ 209 timestamp |= 210 rte_le_to_cpu_32(*((uint32_t *) 211 (packets[i].header + 8))); 212 *nfb_timestamp_dynfield(mbuf) = timestamp; 213 mbuf->ol_flags |= nfb_timestamp_rx_dynflag; 214 } 215 216 bufs[num_rx++] = mbuf; 217 num_bytes += packet_size; 218 } else { 219 /* 220 * NDP packet will not fit in one mbuf, 221 * scattered mode is not enabled, drop packet 222 */ 223 rte_pktmbuf_free(mbuf); 224 } 225 } 226 227 ndp_rx_burst_put(ndp->queue); 228 229 ndp->rx_pkts += num_rx; 230 ndp->rx_bytes += num_bytes; 231 return num_rx; 232 } 233 234 #endif /* _NFB_RX_H_ */ 235