xref: /f-stack/dpdk/drivers/net/e1000/igb_rxtx.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_pci.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
32 #include <rte_mbuf.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev_driver.h>
35 #include <rte_prefetch.h>
36 #include <rte_udp.h>
37 #include <rte_tcp.h>
38 #include <rte_sctp.h>
39 #include <rte_net.h>
40 #include <rte_string_fns.h>
41 
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
45 
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
48 #else
49 #define IGB_TX_IEEE1588_TMST 0
50 #endif
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK (			 \
53 		PKT_TX_OUTER_IPV6 |	 \
54 		PKT_TX_OUTER_IPV4 |	 \
55 		PKT_TX_IPV6 |		 \
56 		PKT_TX_IPV4 |		 \
57 		PKT_TX_VLAN_PKT |		 \
58 		PKT_TX_IP_CKSUM |		 \
59 		PKT_TX_L4_MASK |		 \
60 		PKT_TX_TCP_SEG |		 \
61 		IGB_TX_IEEE1588_TMST)
62 
63 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
64 		(PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
65 
66 /**
67  * Structure associated with each descriptor of the RX ring of a RX queue.
68  */
69 struct igb_rx_entry {
70 	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
71 };
72 
73 /**
74  * Structure associated with each descriptor of the TX ring of a TX queue.
75  */
76 struct igb_tx_entry {
77 	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
78 	uint16_t next_id; /**< Index of next descriptor in ring. */
79 	uint16_t last_id; /**< Index of last scattered descriptor. */
80 };
81 
82 /**
83  * rx queue flags
84  */
85 enum igb_rxq_flags {
86 	IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
87 };
88 
89 /**
90  * Structure associated with each RX queue.
91  */
92 struct igb_rx_queue {
93 	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
94 	volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
95 	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
96 	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
97 	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
98 	struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
99 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
100 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
101 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
102 	uint16_t            rx_tail;    /**< current value of RDT register. */
103 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
104 	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
105 	uint16_t            queue_id;   /**< RX queue index. */
106 	uint16_t            reg_idx;    /**< RX queue register index. */
107 	uint16_t            port_id;    /**< Device port identifier. */
108 	uint8_t             pthresh;    /**< Prefetch threshold register. */
109 	uint8_t             hthresh;    /**< Host threshold register. */
110 	uint8_t             wthresh;    /**< Write-back threshold register. */
111 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
112 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
113 	uint32_t            flags;      /**< RX flags. */
114 	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
115 };
116 
117 /**
118  * Hardware context number
119  */
120 enum igb_advctx_num {
121 	IGB_CTX_0    = 0, /**< CTX0    */
122 	IGB_CTX_1    = 1, /**< CTX1    */
123 	IGB_CTX_NUM  = 2, /**< CTX_NUM */
124 };
125 
126 /** Offload features */
127 union igb_tx_offload {
128 	uint64_t data;
129 	struct {
130 		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
131 		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
132 		uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
133 		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
134 		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
135 
136 		/* uint64_t unused:8; */
137 	};
138 };
139 
140 /*
141  * Compare mask for igb_tx_offload.data,
142  * should be in sync with igb_tx_offload layout.
143  * */
144 #define TX_MACIP_LEN_CMP_MASK	0x000000000000FFFFULL /**< L2L3 header mask. */
145 #define TX_VLAN_CMP_MASK		0x00000000FFFF0000ULL /**< Vlan mask. */
146 #define TX_TCP_LEN_CMP_MASK		0x000000FF00000000ULL /**< TCP header mask. */
147 #define TX_TSO_MSS_CMP_MASK		0x00FFFF0000000000ULL /**< TSO segsz mask. */
148 /** Mac + IP + TCP + Mss mask. */
149 #define TX_TSO_CMP_MASK	\
150 	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
151 
152 /**
153  * Strucutre to check if new context need be built
154  */
155 struct igb_advctx_info {
156 	uint64_t flags;           /**< ol_flags related to context build. */
157 	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
158 	union igb_tx_offload tx_offload;
159 	/** compare mask for tx offload. */
160 	union igb_tx_offload tx_offload_mask;
161 };
162 
163 /**
164  * Structure associated with each TX queue.
165  */
166 struct igb_tx_queue {
167 	volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
168 	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
169 	struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
170 	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
171 	uint32_t               txd_type;      /**< Device-specific TXD type */
172 	uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
173 	uint16_t               tx_tail; /**< Current value of TDT register. */
174 	uint16_t               tx_head;
175 	/**< Index of first used TX descriptor. */
176 	uint16_t               queue_id; /**< TX queue index. */
177 	uint16_t               reg_idx;  /**< TX queue register index. */
178 	uint16_t               port_id;  /**< Device port identifier. */
179 	uint8_t                pthresh;  /**< Prefetch threshold register. */
180 	uint8_t                hthresh;  /**< Host threshold register. */
181 	uint8_t                wthresh;  /**< Write-back threshold register. */
182 	uint32_t               ctx_curr;
183 	/**< Current used hardware descriptor. */
184 	uint32_t               ctx_start;
185 	/**< Start context position for transmit queue. */
186 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
187 	/**< Hardware context history.*/
188 	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
189 };
190 
191 #if 1
192 #define RTE_PMD_USE_PREFETCH
193 #endif
194 
195 #ifdef RTE_PMD_USE_PREFETCH
196 #define rte_igb_prefetch(p)	rte_prefetch0(p)
197 #else
198 #define rte_igb_prefetch(p)	do {} while(0)
199 #endif
200 
201 #ifdef RTE_PMD_PACKET_PREFETCH
202 #define rte_packet_prefetch(p) rte_prefetch1(p)
203 #else
204 #define rte_packet_prefetch(p)	do {} while(0)
205 #endif
206 
207 /*
208  * Macro for VMDq feature for 1 GbE NIC.
209  */
210 #define E1000_VMOLR_SIZE			(8)
211 #define IGB_TSO_MAX_HDRLEN			(512)
212 #define IGB_TSO_MAX_MSS				(9216)
213 
214 /*********************************************************************
215  *
216  *  TX function
217  *
218  **********************************************************************/
219 
220 /*
221  *There're some limitations in hardware for TCP segmentation offload. We
222  *should check whether the parameters are valid.
223  */
224 static inline uint64_t
check_tso_para(uint64_t ol_req,union igb_tx_offload ol_para)225 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
226 {
227 	if (!(ol_req & PKT_TX_TCP_SEG))
228 		return ol_req;
229 	if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
230 			ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
231 		ol_req &= ~PKT_TX_TCP_SEG;
232 		ol_req |= PKT_TX_TCP_CKSUM;
233 	}
234 	return ol_req;
235 }
236 
237 /*
238  * Advanced context descriptor are almost same between igb/ixgbe
239  * This is a separate function, looking for optimization opportunity here
240  * Rework required to go with the pre-defined values.
241  */
242 
243 static inline void
igbe_set_xmit_ctx(struct igb_tx_queue * txq,volatile struct e1000_adv_tx_context_desc * ctx_txd,uint64_t ol_flags,union igb_tx_offload tx_offload)244 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
245 		volatile struct e1000_adv_tx_context_desc *ctx_txd,
246 		uint64_t ol_flags, union igb_tx_offload tx_offload)
247 {
248 	uint32_t type_tucmd_mlhl;
249 	uint32_t mss_l4len_idx;
250 	uint32_t ctx_idx, ctx_curr;
251 	uint32_t vlan_macip_lens;
252 	union igb_tx_offload tx_offload_mask;
253 
254 	ctx_curr = txq->ctx_curr;
255 	ctx_idx = ctx_curr + txq->ctx_start;
256 
257 	tx_offload_mask.data = 0;
258 	type_tucmd_mlhl = 0;
259 
260 	/* Specify which HW CTX to upload. */
261 	mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
262 
263 	if (ol_flags & PKT_TX_VLAN_PKT)
264 		tx_offload_mask.data |= TX_VLAN_CMP_MASK;
265 
266 	/* check if TCP segmentation required for this packet */
267 	if (ol_flags & PKT_TX_TCP_SEG) {
268 		/* implies IP cksum in IPv4 */
269 		if (ol_flags & PKT_TX_IP_CKSUM)
270 			type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
271 				E1000_ADVTXD_TUCMD_L4T_TCP |
272 				E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
273 		else
274 			type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
275 				E1000_ADVTXD_TUCMD_L4T_TCP |
276 				E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
277 
278 		tx_offload_mask.data |= TX_TSO_CMP_MASK;
279 		mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
280 		mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
281 	} else { /* no TSO, check if hardware checksum is needed */
282 		if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
283 			tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
284 
285 		if (ol_flags & PKT_TX_IP_CKSUM)
286 			type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
287 
288 		switch (ol_flags & PKT_TX_L4_MASK) {
289 		case PKT_TX_UDP_CKSUM:
290 			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
291 				E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292 			mss_l4len_idx |= sizeof(struct rte_udp_hdr)
293 				<< E1000_ADVTXD_L4LEN_SHIFT;
294 			break;
295 		case PKT_TX_TCP_CKSUM:
296 			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
297 				E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
298 			mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
299 				<< E1000_ADVTXD_L4LEN_SHIFT;
300 			break;
301 		case PKT_TX_SCTP_CKSUM:
302 			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
303 				E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
304 			mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
305 				<< E1000_ADVTXD_L4LEN_SHIFT;
306 			break;
307 		default:
308 			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
309 				E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
310 			break;
311 		}
312 	}
313 
314 	txq->ctx_cache[ctx_curr].flags = ol_flags;
315 	txq->ctx_cache[ctx_curr].tx_offload.data =
316 		tx_offload_mask.data & tx_offload.data;
317 	txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
318 
319 	ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
320 	vlan_macip_lens = (uint32_t)tx_offload.data;
321 	ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
322 	ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
323 	ctx_txd->u.seqnum_seed = 0;
324 }
325 
326 /*
327  * Check which hardware context can be used. Use the existing match
328  * or create a new context descriptor.
329  */
330 static inline uint32_t
what_advctx_update(struct igb_tx_queue * txq,uint64_t flags,union igb_tx_offload tx_offload)331 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
332 		union igb_tx_offload tx_offload)
333 {
334 	/* If match with the current context */
335 	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
336 		(txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
337 		(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
338 			return txq->ctx_curr;
339 	}
340 
341 	/* If match with the second context */
342 	txq->ctx_curr ^= 1;
343 	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
344 		(txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
345 		(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
346 			return txq->ctx_curr;
347 	}
348 
349 	/* Mismatch, use the previous context */
350 	return IGB_CTX_NUM;
351 }
352 
353 static inline uint32_t
tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)354 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
355 {
356 	static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
357 	static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
358 	uint32_t tmp;
359 
360 	tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
361 	tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
362 	tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
363 	return tmp;
364 }
365 
366 static inline uint32_t
tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)367 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
368 {
369 	uint32_t cmdtype;
370 	static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
371 	static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
372 	cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
373 	cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
374 	return cmdtype;
375 }
376 
377 uint16_t
eth_igb_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)378 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
379 	       uint16_t nb_pkts)
380 {
381 	struct igb_tx_queue *txq;
382 	struct igb_tx_entry *sw_ring;
383 	struct igb_tx_entry *txe, *txn;
384 	volatile union e1000_adv_tx_desc *txr;
385 	volatile union e1000_adv_tx_desc *txd;
386 	struct rte_mbuf     *tx_pkt;
387 	struct rte_mbuf     *m_seg;
388 	uint64_t buf_dma_addr;
389 	uint32_t olinfo_status;
390 	uint32_t cmd_type_len;
391 	uint32_t pkt_len;
392 	uint16_t slen;
393 	uint64_t ol_flags;
394 	uint16_t tx_end;
395 	uint16_t tx_id;
396 	uint16_t tx_last;
397 	uint16_t nb_tx;
398 	uint64_t tx_ol_req;
399 	uint32_t new_ctx = 0;
400 	uint32_t ctx = 0;
401 	union igb_tx_offload tx_offload = {0};
402 
403 	txq = tx_queue;
404 	sw_ring = txq->sw_ring;
405 	txr     = txq->tx_ring;
406 	tx_id   = txq->tx_tail;
407 	txe = &sw_ring[tx_id];
408 
409 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
410 		tx_pkt = *tx_pkts++;
411 		pkt_len = tx_pkt->pkt_len;
412 
413 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
414 
415 		/*
416 		 * The number of descriptors that must be allocated for a
417 		 * packet is the number of segments of that packet, plus 1
418 		 * Context Descriptor for the VLAN Tag Identifier, if any.
419 		 * Determine the last TX descriptor to allocate in the TX ring
420 		 * for the packet, starting from the current position (tx_id)
421 		 * in the ring.
422 		 */
423 		tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
424 
425 		ol_flags = tx_pkt->ol_flags;
426 		tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
427 
428 		/* If a Context Descriptor need be built . */
429 		if (tx_ol_req) {
430 			tx_offload.l2_len = tx_pkt->l2_len;
431 			tx_offload.l3_len = tx_pkt->l3_len;
432 			tx_offload.l4_len = tx_pkt->l4_len;
433 			tx_offload.vlan_tci = tx_pkt->vlan_tci;
434 			tx_offload.tso_segsz = tx_pkt->tso_segsz;
435 			tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
436 
437 			ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
438 			/* Only allocate context descriptor if required*/
439 			new_ctx = (ctx == IGB_CTX_NUM);
440 			ctx = txq->ctx_curr + txq->ctx_start;
441 			tx_last = (uint16_t) (tx_last + new_ctx);
442 		}
443 		if (tx_last >= txq->nb_tx_desc)
444 			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
445 
446 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
447 			   " tx_first=%u tx_last=%u",
448 			   (unsigned) txq->port_id,
449 			   (unsigned) txq->queue_id,
450 			   (unsigned) pkt_len,
451 			   (unsigned) tx_id,
452 			   (unsigned) tx_last);
453 
454 		/*
455 		 * Check if there are enough free descriptors in the TX ring
456 		 * to transmit the next packet.
457 		 * This operation is based on the two following rules:
458 		 *
459 		 *   1- Only check that the last needed TX descriptor can be
460 		 *      allocated (by construction, if that descriptor is free,
461 		 *      all intermediate ones are also free).
462 		 *
463 		 *      For this purpose, the index of the last TX descriptor
464 		 *      used for a packet (the "last descriptor" of a packet)
465 		 *      is recorded in the TX entries (the last one included)
466 		 *      that are associated with all TX descriptors allocated
467 		 *      for that packet.
468 		 *
469 		 *   2- Avoid to allocate the last free TX descriptor of the
470 		 *      ring, in order to never set the TDT register with the
471 		 *      same value stored in parallel by the NIC in the TDH
472 		 *      register, which makes the TX engine of the NIC enter
473 		 *      in a deadlock situation.
474 		 *
475 		 *      By extension, avoid to allocate a free descriptor that
476 		 *      belongs to the last set of free descriptors allocated
477 		 *      to the same packet previously transmitted.
478 		 */
479 
480 		/*
481 		 * The "last descriptor" of the previously sent packet, if any,
482 		 * which used the last descriptor to allocate.
483 		 */
484 		tx_end = sw_ring[tx_last].last_id;
485 
486 		/*
487 		 * The next descriptor following that "last descriptor" in the
488 		 * ring.
489 		 */
490 		tx_end = sw_ring[tx_end].next_id;
491 
492 		/*
493 		 * The "last descriptor" associated with that next descriptor.
494 		 */
495 		tx_end = sw_ring[tx_end].last_id;
496 
497 		/*
498 		 * Check that this descriptor is free.
499 		 */
500 		if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
501 			if (nb_tx == 0)
502 				return 0;
503 			goto end_of_tx;
504 		}
505 
506 		/*
507 		 * Set common flags of all TX Data Descriptors.
508 		 *
509 		 * The following bits must be set in all Data Descriptors:
510 		 *   - E1000_ADVTXD_DTYP_DATA
511 		 *   - E1000_ADVTXD_DCMD_DEXT
512 		 *
513 		 * The following bits must be set in the first Data Descriptor
514 		 * and are ignored in the other ones:
515 		 *   - E1000_ADVTXD_DCMD_IFCS
516 		 *   - E1000_ADVTXD_MAC_1588
517 		 *   - E1000_ADVTXD_DCMD_VLE
518 		 *
519 		 * The following bits must only be set in the last Data
520 		 * Descriptor:
521 		 *   - E1000_TXD_CMD_EOP
522 		 *
523 		 * The following bits can be set in any Data Descriptor, but
524 		 * are only set in the last Data Descriptor:
525 		 *   - E1000_TXD_CMD_RS
526 		 */
527 		cmd_type_len = txq->txd_type |
528 			E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
529 		if (tx_ol_req & PKT_TX_TCP_SEG)
530 			pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
531 		olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
532 #if defined(RTE_LIBRTE_IEEE1588)
533 		if (ol_flags & PKT_TX_IEEE1588_TMST)
534 			cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
535 #endif
536 		if (tx_ol_req) {
537 			/* Setup TX Advanced context descriptor if required */
538 			if (new_ctx) {
539 				volatile struct e1000_adv_tx_context_desc *
540 				    ctx_txd;
541 
542 				ctx_txd = (volatile struct
543 				    e1000_adv_tx_context_desc *)
544 				    &txr[tx_id];
545 
546 				txn = &sw_ring[txe->next_id];
547 				RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
548 
549 				if (txe->mbuf != NULL) {
550 					rte_pktmbuf_free_seg(txe->mbuf);
551 					txe->mbuf = NULL;
552 				}
553 
554 				igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
555 
556 				txe->last_id = tx_last;
557 				tx_id = txe->next_id;
558 				txe = txn;
559 			}
560 
561 			/* Setup the TX Advanced Data Descriptor */
562 			cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
563 			olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
564 			olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
565 		}
566 
567 		m_seg = tx_pkt;
568 		do {
569 			txn = &sw_ring[txe->next_id];
570 			txd = &txr[tx_id];
571 
572 			if (txe->mbuf != NULL)
573 				rte_pktmbuf_free_seg(txe->mbuf);
574 			txe->mbuf = m_seg;
575 
576 			/*
577 			 * Set up transmit descriptor.
578 			 */
579 			slen = (uint16_t) m_seg->data_len;
580 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
581 			txd->read.buffer_addr =
582 				rte_cpu_to_le_64(buf_dma_addr);
583 			txd->read.cmd_type_len =
584 				rte_cpu_to_le_32(cmd_type_len | slen);
585 			txd->read.olinfo_status =
586 				rte_cpu_to_le_32(olinfo_status);
587 			txe->last_id = tx_last;
588 			tx_id = txe->next_id;
589 			txe = txn;
590 			m_seg = m_seg->next;
591 		} while (m_seg != NULL);
592 
593 		/*
594 		 * The last packet data descriptor needs End Of Packet (EOP)
595 		 * and Report Status (RS).
596 		 */
597 		txd->read.cmd_type_len |=
598 			rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
599 	}
600  end_of_tx:
601 	rte_wmb();
602 
603 	/*
604 	 * Set the Transmit Descriptor Tail (TDT).
605 	 */
606 	E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
607 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
608 		   (unsigned) txq->port_id, (unsigned) txq->queue_id,
609 		   (unsigned) tx_id, (unsigned) nb_tx);
610 	txq->tx_tail = tx_id;
611 
612 	return nb_tx;
613 }
614 
615 /*********************************************************************
616  *
617  *  TX prep functions
618  *
619  **********************************************************************/
620 uint16_t
eth_igb_prep_pkts(__rte_unused void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)621 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
622 		uint16_t nb_pkts)
623 {
624 	int i, ret;
625 	struct rte_mbuf *m;
626 
627 	for (i = 0; i < nb_pkts; i++) {
628 		m = tx_pkts[i];
629 
630 		/* Check some limitations for TSO in hardware */
631 		if (m->ol_flags & PKT_TX_TCP_SEG)
632 			if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
633 					(m->l2_len + m->l3_len + m->l4_len >
634 					IGB_TSO_MAX_HDRLEN)) {
635 				rte_errno = EINVAL;
636 				return i;
637 			}
638 
639 		if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
640 			rte_errno = ENOTSUP;
641 			return i;
642 		}
643 
644 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
645 		ret = rte_validate_tx_offload(m);
646 		if (ret != 0) {
647 			rte_errno = -ret;
648 			return i;
649 		}
650 #endif
651 		ret = rte_net_intel_cksum_prepare(m);
652 		if (ret != 0) {
653 			rte_errno = -ret;
654 			return i;
655 		}
656 	}
657 
658 	return i;
659 }
660 
661 /*********************************************************************
662  *
663  *  RX functions
664  *
665  **********************************************************************/
666 #define IGB_PACKET_TYPE_IPV4              0X01
667 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
668 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
669 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
670 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
671 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
672 #define IGB_PACKET_TYPE_IPV6              0X04
673 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
674 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
675 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
676 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
677 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
678 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
679 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
680 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
681 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
682 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
683 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
684 #define IGB_PACKET_TYPE_MAX               0X80
685 #define IGB_PACKET_TYPE_MASK              0X7F
686 #define IGB_PACKET_TYPE_SHIFT             0X04
687 static inline uint32_t
igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)688 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
689 {
690 	static const uint32_t
691 		ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
692 		[IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
693 			RTE_PTYPE_L3_IPV4,
694 		[IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
695 			RTE_PTYPE_L3_IPV4_EXT,
696 		[IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
697 			RTE_PTYPE_L3_IPV6,
698 		[IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
699 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
700 			RTE_PTYPE_INNER_L3_IPV6,
701 		[IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
702 			RTE_PTYPE_L3_IPV6_EXT,
703 		[IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
704 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
705 			RTE_PTYPE_INNER_L3_IPV6_EXT,
706 		[IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
707 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
708 		[IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
709 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
710 		[IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
711 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
712 			RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
713 		[IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
714 			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
715 		[IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
716 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
717 			RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
718 		[IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
719 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
720 		[IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
721 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
722 		[IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
723 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
724 			RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
725 		[IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
726 			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
727 		[IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
728 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
729 			RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
730 		[IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
731 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
732 		[IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
733 			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
734 	};
735 	if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
736 		return RTE_PTYPE_UNKNOWN;
737 
738 	pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
739 
740 	return ptype_table[pkt_info];
741 }
742 
743 static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue * rxq,uint32_t hl_tp_rs)744 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
745 {
746 	uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
747 
748 #if defined(RTE_LIBRTE_IEEE1588)
749 	static uint32_t ip_pkt_etqf_map[8] = {
750 		0, 0, 0, PKT_RX_IEEE1588_PTP,
751 		0, 0, 0, 0,
752 	};
753 
754 	struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
755 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
756 
757 	/* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
758 	if (hw->mac.type == e1000_i210)
759 		pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
760 	else
761 		pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
762 #else
763 	RTE_SET_USED(rxq);
764 #endif
765 
766 	return pkt_flags;
767 }
768 
769 static inline uint64_t
rx_desc_status_to_pkt_flags(uint32_t rx_status)770 rx_desc_status_to_pkt_flags(uint32_t rx_status)
771 {
772 	uint64_t pkt_flags;
773 
774 	/* Check if VLAN present */
775 	pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
776 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
777 
778 #if defined(RTE_LIBRTE_IEEE1588)
779 	if (rx_status & E1000_RXD_STAT_TMST)
780 		pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
781 #endif
782 	return pkt_flags;
783 }
784 
785 static inline uint64_t
rx_desc_error_to_pkt_flags(uint32_t rx_status)786 rx_desc_error_to_pkt_flags(uint32_t rx_status)
787 {
788 	/*
789 	 * Bit 30: IPE, IPv4 checksum error
790 	 * Bit 29: L4I, L4I integrity error
791 	 */
792 
793 	static uint64_t error_to_pkt_flags_map[4] = {
794 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
795 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
796 		PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
797 		PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
798 	};
799 	return error_to_pkt_flags_map[(rx_status >>
800 		E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
801 }
802 
803 uint16_t
eth_igb_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)804 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
805 	       uint16_t nb_pkts)
806 {
807 	struct igb_rx_queue *rxq;
808 	volatile union e1000_adv_rx_desc *rx_ring;
809 	volatile union e1000_adv_rx_desc *rxdp;
810 	struct igb_rx_entry *sw_ring;
811 	struct igb_rx_entry *rxe;
812 	struct rte_mbuf *rxm;
813 	struct rte_mbuf *nmb;
814 	union e1000_adv_rx_desc rxd;
815 	uint64_t dma_addr;
816 	uint32_t staterr;
817 	uint32_t hlen_type_rss;
818 	uint16_t pkt_len;
819 	uint16_t rx_id;
820 	uint16_t nb_rx;
821 	uint16_t nb_hold;
822 	uint64_t pkt_flags;
823 
824 	nb_rx = 0;
825 	nb_hold = 0;
826 	rxq = rx_queue;
827 	rx_id = rxq->rx_tail;
828 	rx_ring = rxq->rx_ring;
829 	sw_ring = rxq->sw_ring;
830 	while (nb_rx < nb_pkts) {
831 		/*
832 		 * The order of operations here is important as the DD status
833 		 * bit must not be read after any other descriptor fields.
834 		 * rx_ring and rxdp are pointing to volatile data so the order
835 		 * of accesses cannot be reordered by the compiler. If they were
836 		 * not volatile, they could be reordered which could lead to
837 		 * using invalid descriptor fields when read from rxd.
838 		 */
839 		rxdp = &rx_ring[rx_id];
840 		staterr = rxdp->wb.upper.status_error;
841 		if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
842 			break;
843 		rxd = *rxdp;
844 
845 		/*
846 		 * End of packet.
847 		 *
848 		 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
849 		 * likely to be invalid and to be dropped by the various
850 		 * validation checks performed by the network stack.
851 		 *
852 		 * Allocate a new mbuf to replenish the RX ring descriptor.
853 		 * If the allocation fails:
854 		 *    - arrange for that RX descriptor to be the first one
855 		 *      being parsed the next time the receive function is
856 		 *      invoked [on the same queue].
857 		 *
858 		 *    - Stop parsing the RX ring and return immediately.
859 		 *
860 		 * This policy do not drop the packet received in the RX
861 		 * descriptor for which the allocation of a new mbuf failed.
862 		 * Thus, it allows that packet to be later retrieved if
863 		 * mbuf have been freed in the mean time.
864 		 * As a side effect, holding RX descriptors instead of
865 		 * systematically giving them back to the NIC may lead to
866 		 * RX ring exhaustion situations.
867 		 * However, the NIC can gracefully prevent such situations
868 		 * to happen by sending specific "back-pressure" flow control
869 		 * frames to its peer(s).
870 		 */
871 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
872 			   "staterr=0x%x pkt_len=%u",
873 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
874 			   (unsigned) rx_id, (unsigned) staterr,
875 			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
876 
877 		nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
878 		if (nmb == NULL) {
879 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
880 				   "queue_id=%u", (unsigned) rxq->port_id,
881 				   (unsigned) rxq->queue_id);
882 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
883 			break;
884 		}
885 
886 		nb_hold++;
887 		rxe = &sw_ring[rx_id];
888 		rx_id++;
889 		if (rx_id == rxq->nb_rx_desc)
890 			rx_id = 0;
891 
892 		/* Prefetch next mbuf while processing current one. */
893 		rte_igb_prefetch(sw_ring[rx_id].mbuf);
894 
895 		/*
896 		 * When next RX descriptor is on a cache-line boundary,
897 		 * prefetch the next 4 RX descriptors and the next 8 pointers
898 		 * to mbufs.
899 		 */
900 		if ((rx_id & 0x3) == 0) {
901 			rte_igb_prefetch(&rx_ring[rx_id]);
902 			rte_igb_prefetch(&sw_ring[rx_id]);
903 		}
904 
905 		rxm = rxe->mbuf;
906 		rxe->mbuf = nmb;
907 		dma_addr =
908 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
909 		rxdp->read.hdr_addr = 0;
910 		rxdp->read.pkt_addr = dma_addr;
911 
912 		/*
913 		 * Initialize the returned mbuf.
914 		 * 1) setup generic mbuf fields:
915 		 *    - number of segments,
916 		 *    - next segment,
917 		 *    - packet length,
918 		 *    - RX port identifier.
919 		 * 2) integrate hardware offload data, if any:
920 		 *    - RSS flag & hash,
921 		 *    - IP checksum flag,
922 		 *    - VLAN TCI, if any,
923 		 *    - error flags.
924 		 */
925 		pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
926 				      rxq->crc_len);
927 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
928 		rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
929 		rxm->nb_segs = 1;
930 		rxm->next = NULL;
931 		rxm->pkt_len = pkt_len;
932 		rxm->data_len = pkt_len;
933 		rxm->port = rxq->port_id;
934 
935 		rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
936 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
937 
938 		/*
939 		 * The vlan_tci field is only valid when PKT_RX_VLAN is
940 		 * set in the pkt_flags field and must be in CPU byte order.
941 		 */
942 		if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
943 				(rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
944 			rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
945 		} else {
946 			rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
947 		}
948 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
949 		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
950 		pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
951 		rxm->ol_flags = pkt_flags;
952 		rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
953 						lo_dword.hs_rss.pkt_info);
954 
955 		/*
956 		 * Store the mbuf address into the next entry of the array
957 		 * of returned packets.
958 		 */
959 		rx_pkts[nb_rx++] = rxm;
960 	}
961 	rxq->rx_tail = rx_id;
962 
963 	/*
964 	 * If the number of free RX descriptors is greater than the RX free
965 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
966 	 * register.
967 	 * Update the RDT with the value of the last processed RX descriptor
968 	 * minus 1, to guarantee that the RDT register is never equal to the
969 	 * RDH register, which creates a "full" ring situtation from the
970 	 * hardware point of view...
971 	 */
972 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
973 	if (nb_hold > rxq->rx_free_thresh) {
974 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
975 			   "nb_hold=%u nb_rx=%u",
976 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
977 			   (unsigned) rx_id, (unsigned) nb_hold,
978 			   (unsigned) nb_rx);
979 		rx_id = (uint16_t) ((rx_id == 0) ?
980 				     (rxq->nb_rx_desc - 1) : (rx_id - 1));
981 		E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
982 		nb_hold = 0;
983 	}
984 	rxq->nb_rx_hold = nb_hold;
985 	return nb_rx;
986 }
987 
988 uint16_t
eth_igb_recv_scattered_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)989 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
990 			 uint16_t nb_pkts)
991 {
992 	struct igb_rx_queue *rxq;
993 	volatile union e1000_adv_rx_desc *rx_ring;
994 	volatile union e1000_adv_rx_desc *rxdp;
995 	struct igb_rx_entry *sw_ring;
996 	struct igb_rx_entry *rxe;
997 	struct rte_mbuf *first_seg;
998 	struct rte_mbuf *last_seg;
999 	struct rte_mbuf *rxm;
1000 	struct rte_mbuf *nmb;
1001 	union e1000_adv_rx_desc rxd;
1002 	uint64_t dma; /* Physical address of mbuf data buffer */
1003 	uint32_t staterr;
1004 	uint32_t hlen_type_rss;
1005 	uint16_t rx_id;
1006 	uint16_t nb_rx;
1007 	uint16_t nb_hold;
1008 	uint16_t data_len;
1009 	uint64_t pkt_flags;
1010 
1011 	nb_rx = 0;
1012 	nb_hold = 0;
1013 	rxq = rx_queue;
1014 	rx_id = rxq->rx_tail;
1015 	rx_ring = rxq->rx_ring;
1016 	sw_ring = rxq->sw_ring;
1017 
1018 	/*
1019 	 * Retrieve RX context of current packet, if any.
1020 	 */
1021 	first_seg = rxq->pkt_first_seg;
1022 	last_seg = rxq->pkt_last_seg;
1023 
1024 	while (nb_rx < nb_pkts) {
1025 	next_desc:
1026 		/*
1027 		 * The order of operations here is important as the DD status
1028 		 * bit must not be read after any other descriptor fields.
1029 		 * rx_ring and rxdp are pointing to volatile data so the order
1030 		 * of accesses cannot be reordered by the compiler. If they were
1031 		 * not volatile, they could be reordered which could lead to
1032 		 * using invalid descriptor fields when read from rxd.
1033 		 */
1034 		rxdp = &rx_ring[rx_id];
1035 		staterr = rxdp->wb.upper.status_error;
1036 		if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1037 			break;
1038 		rxd = *rxdp;
1039 
1040 		/*
1041 		 * Descriptor done.
1042 		 *
1043 		 * Allocate a new mbuf to replenish the RX ring descriptor.
1044 		 * If the allocation fails:
1045 		 *    - arrange for that RX descriptor to be the first one
1046 		 *      being parsed the next time the receive function is
1047 		 *      invoked [on the same queue].
1048 		 *
1049 		 *    - Stop parsing the RX ring and return immediately.
1050 		 *
1051 		 * This policy does not drop the packet received in the RX
1052 		 * descriptor for which the allocation of a new mbuf failed.
1053 		 * Thus, it allows that packet to be later retrieved if
1054 		 * mbuf have been freed in the mean time.
1055 		 * As a side effect, holding RX descriptors instead of
1056 		 * systematically giving them back to the NIC may lead to
1057 		 * RX ring exhaustion situations.
1058 		 * However, the NIC can gracefully prevent such situations
1059 		 * to happen by sending specific "back-pressure" flow control
1060 		 * frames to its peer(s).
1061 		 */
1062 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1063 			   "staterr=0x%x data_len=%u",
1064 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1065 			   (unsigned) rx_id, (unsigned) staterr,
1066 			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1067 
1068 		nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1069 		if (nmb == NULL) {
1070 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1071 				   "queue_id=%u", (unsigned) rxq->port_id,
1072 				   (unsigned) rxq->queue_id);
1073 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1074 			break;
1075 		}
1076 
1077 		nb_hold++;
1078 		rxe = &sw_ring[rx_id];
1079 		rx_id++;
1080 		if (rx_id == rxq->nb_rx_desc)
1081 			rx_id = 0;
1082 
1083 		/* Prefetch next mbuf while processing current one. */
1084 		rte_igb_prefetch(sw_ring[rx_id].mbuf);
1085 
1086 		/*
1087 		 * When next RX descriptor is on a cache-line boundary,
1088 		 * prefetch the next 4 RX descriptors and the next 8 pointers
1089 		 * to mbufs.
1090 		 */
1091 		if ((rx_id & 0x3) == 0) {
1092 			rte_igb_prefetch(&rx_ring[rx_id]);
1093 			rte_igb_prefetch(&sw_ring[rx_id]);
1094 		}
1095 
1096 		/*
1097 		 * Update RX descriptor with the physical address of the new
1098 		 * data buffer of the new allocated mbuf.
1099 		 */
1100 		rxm = rxe->mbuf;
1101 		rxe->mbuf = nmb;
1102 		dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1103 		rxdp->read.pkt_addr = dma;
1104 		rxdp->read.hdr_addr = 0;
1105 
1106 		/*
1107 		 * Set data length & data buffer address of mbuf.
1108 		 */
1109 		data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1110 		rxm->data_len = data_len;
1111 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1112 
1113 		/*
1114 		 * If this is the first buffer of the received packet,
1115 		 * set the pointer to the first mbuf of the packet and
1116 		 * initialize its context.
1117 		 * Otherwise, update the total length and the number of segments
1118 		 * of the current scattered packet, and update the pointer to
1119 		 * the last mbuf of the current packet.
1120 		 */
1121 		if (first_seg == NULL) {
1122 			first_seg = rxm;
1123 			first_seg->pkt_len = data_len;
1124 			first_seg->nb_segs = 1;
1125 		} else {
1126 			first_seg->pkt_len += data_len;
1127 			first_seg->nb_segs++;
1128 			last_seg->next = rxm;
1129 		}
1130 
1131 		/*
1132 		 * If this is not the last buffer of the received packet,
1133 		 * update the pointer to the last mbuf of the current scattered
1134 		 * packet and continue to parse the RX ring.
1135 		 */
1136 		if (! (staterr & E1000_RXD_STAT_EOP)) {
1137 			last_seg = rxm;
1138 			goto next_desc;
1139 		}
1140 
1141 		/*
1142 		 * This is the last buffer of the received packet.
1143 		 * If the CRC is not stripped by the hardware:
1144 		 *   - Subtract the CRC	length from the total packet length.
1145 		 *   - If the last buffer only contains the whole CRC or a part
1146 		 *     of it, free the mbuf associated to the last buffer.
1147 		 *     If part of the CRC is also contained in the previous
1148 		 *     mbuf, subtract the length of that CRC part from the
1149 		 *     data length of the previous mbuf.
1150 		 */
1151 		rxm->next = NULL;
1152 		if (unlikely(rxq->crc_len > 0)) {
1153 			first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1154 			if (data_len <= RTE_ETHER_CRC_LEN) {
1155 				rte_pktmbuf_free_seg(rxm);
1156 				first_seg->nb_segs--;
1157 				last_seg->data_len = (uint16_t)
1158 					(last_seg->data_len -
1159 					 (RTE_ETHER_CRC_LEN - data_len));
1160 				last_seg->next = NULL;
1161 			} else
1162 				rxm->data_len = (uint16_t)
1163 					(data_len - RTE_ETHER_CRC_LEN);
1164 		}
1165 
1166 		/*
1167 		 * Initialize the first mbuf of the returned packet:
1168 		 *    - RX port identifier,
1169 		 *    - hardware offload data, if any:
1170 		 *      - RSS flag & hash,
1171 		 *      - IP checksum flag,
1172 		 *      - VLAN TCI, if any,
1173 		 *      - error flags.
1174 		 */
1175 		first_seg->port = rxq->port_id;
1176 		first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1177 
1178 		/*
1179 		 * The vlan_tci field is only valid when PKT_RX_VLAN is
1180 		 * set in the pkt_flags field and must be in CPU byte order.
1181 		 */
1182 		if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1183 				(rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1184 			first_seg->vlan_tci =
1185 				rte_be_to_cpu_16(rxd.wb.upper.vlan);
1186 		} else {
1187 			first_seg->vlan_tci =
1188 				rte_le_to_cpu_16(rxd.wb.upper.vlan);
1189 		}
1190 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1191 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1192 		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1193 		pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1194 		first_seg->ol_flags = pkt_flags;
1195 		first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1196 					lower.lo_dword.hs_rss.pkt_info);
1197 
1198 		/* Prefetch data of first segment, if configured to do so. */
1199 		rte_packet_prefetch((char *)first_seg->buf_addr +
1200 			first_seg->data_off);
1201 
1202 		/*
1203 		 * Store the mbuf address into the next entry of the array
1204 		 * of returned packets.
1205 		 */
1206 		rx_pkts[nb_rx++] = first_seg;
1207 
1208 		/*
1209 		 * Setup receipt context for a new packet.
1210 		 */
1211 		first_seg = NULL;
1212 	}
1213 
1214 	/*
1215 	 * Record index of the next RX descriptor to probe.
1216 	 */
1217 	rxq->rx_tail = rx_id;
1218 
1219 	/*
1220 	 * Save receive context.
1221 	 */
1222 	rxq->pkt_first_seg = first_seg;
1223 	rxq->pkt_last_seg = last_seg;
1224 
1225 	/*
1226 	 * If the number of free RX descriptors is greater than the RX free
1227 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1228 	 * register.
1229 	 * Update the RDT with the value of the last processed RX descriptor
1230 	 * minus 1, to guarantee that the RDT register is never equal to the
1231 	 * RDH register, which creates a "full" ring situtation from the
1232 	 * hardware point of view...
1233 	 */
1234 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1235 	if (nb_hold > rxq->rx_free_thresh) {
1236 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1237 			   "nb_hold=%u nb_rx=%u",
1238 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1239 			   (unsigned) rx_id, (unsigned) nb_hold,
1240 			   (unsigned) nb_rx);
1241 		rx_id = (uint16_t) ((rx_id == 0) ?
1242 				     (rxq->nb_rx_desc - 1) : (rx_id - 1));
1243 		E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1244 		nb_hold = 0;
1245 	}
1246 	rxq->nb_rx_hold = nb_hold;
1247 	return nb_rx;
1248 }
1249 
1250 /*
1251  * Maximum number of Ring Descriptors.
1252  *
1253  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1254  * desscriptors should meet the following condition:
1255  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1256  */
1257 
1258 static void
igb_tx_queue_release_mbufs(struct igb_tx_queue * txq)1259 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1260 {
1261 	unsigned i;
1262 
1263 	if (txq->sw_ring != NULL) {
1264 		for (i = 0; i < txq->nb_tx_desc; i++) {
1265 			if (txq->sw_ring[i].mbuf != NULL) {
1266 				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1267 				txq->sw_ring[i].mbuf = NULL;
1268 			}
1269 		}
1270 	}
1271 }
1272 
1273 static void
igb_tx_queue_release(struct igb_tx_queue * txq)1274 igb_tx_queue_release(struct igb_tx_queue *txq)
1275 {
1276 	if (txq != NULL) {
1277 		igb_tx_queue_release_mbufs(txq);
1278 		rte_free(txq->sw_ring);
1279 		rte_free(txq);
1280 	}
1281 }
1282 
1283 void
eth_igb_tx_queue_release(void * txq)1284 eth_igb_tx_queue_release(void *txq)
1285 {
1286 	igb_tx_queue_release(txq);
1287 }
1288 
1289 static int
igb_tx_done_cleanup(struct igb_tx_queue * txq,uint32_t free_cnt)1290 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1291 {
1292 	struct igb_tx_entry *sw_ring;
1293 	volatile union e1000_adv_tx_desc *txr;
1294 	uint16_t tx_first; /* First segment analyzed. */
1295 	uint16_t tx_id;    /* Current segment being processed. */
1296 	uint16_t tx_last;  /* Last segment in the current packet. */
1297 	uint16_t tx_next;  /* First segment of the next packet. */
1298 	int count = 0;
1299 
1300 	if (!txq)
1301 		return -ENODEV;
1302 
1303 	sw_ring = txq->sw_ring;
1304 	txr = txq->tx_ring;
1305 
1306 	/* tx_tail is the last sent packet on the sw_ring. Goto the end
1307 	 * of that packet (the last segment in the packet chain) and
1308 	 * then the next segment will be the start of the oldest segment
1309 	 * in the sw_ring. This is the first packet that will be
1310 	 * attempted to be freed.
1311 	 */
1312 
1313 	/* Get last segment in most recently added packet. */
1314 	tx_first = sw_ring[txq->tx_tail].last_id;
1315 
1316 	/* Get the next segment, which is the oldest segment in ring. */
1317 	tx_first = sw_ring[tx_first].next_id;
1318 
1319 	/* Set the current index to the first. */
1320 	tx_id = tx_first;
1321 
1322 	/* Loop through each packet. For each packet, verify that an
1323 	 * mbuf exists and that the last segment is free. If so, free
1324 	 * it and move on.
1325 	 */
1326 	while (1) {
1327 		tx_last = sw_ring[tx_id].last_id;
1328 
1329 		if (sw_ring[tx_last].mbuf) {
1330 			if (txr[tx_last].wb.status &
1331 			    E1000_TXD_STAT_DD) {
1332 				/* Increment the number of packets
1333 				 * freed.
1334 				 */
1335 				count++;
1336 
1337 				/* Get the start of the next packet. */
1338 				tx_next = sw_ring[tx_last].next_id;
1339 
1340 				/* Loop through all segments in a
1341 				 * packet.
1342 				 */
1343 				do {
1344 					if (sw_ring[tx_id].mbuf) {
1345 						rte_pktmbuf_free_seg(
1346 							sw_ring[tx_id].mbuf);
1347 						sw_ring[tx_id].mbuf = NULL;
1348 						sw_ring[tx_id].last_id = tx_id;
1349 					}
1350 
1351 					/* Move to next segemnt. */
1352 					tx_id = sw_ring[tx_id].next_id;
1353 
1354 				} while (tx_id != tx_next);
1355 
1356 				if (unlikely(count == (int)free_cnt))
1357 					break;
1358 			} else {
1359 				/* mbuf still in use, nothing left to
1360 				 * free.
1361 				 */
1362 				break;
1363 			}
1364 		} else {
1365 			/* There are multiple reasons to be here:
1366 			 * 1) All the packets on the ring have been
1367 			 *    freed - tx_id is equal to tx_first
1368 			 *    and some packets have been freed.
1369 			 *    - Done, exit
1370 			 * 2) Interfaces has not sent a rings worth of
1371 			 *    packets yet, so the segment after tail is
1372 			 *    still empty. Or a previous call to this
1373 			 *    function freed some of the segments but
1374 			 *    not all so there is a hole in the list.
1375 			 *    Hopefully this is a rare case.
1376 			 *    - Walk the list and find the next mbuf. If
1377 			 *      there isn't one, then done.
1378 			 */
1379 			if (likely(tx_id == tx_first && count != 0))
1380 				break;
1381 
1382 			/* Walk the list and find the next mbuf, if any. */
1383 			do {
1384 				/* Move to next segemnt. */
1385 				tx_id = sw_ring[tx_id].next_id;
1386 
1387 				if (sw_ring[tx_id].mbuf)
1388 					break;
1389 
1390 			} while (tx_id != tx_first);
1391 
1392 			/* Determine why previous loop bailed. If there
1393 			 * is not an mbuf, done.
1394 			 */
1395 			if (!sw_ring[tx_id].mbuf)
1396 				break;
1397 		}
1398 	}
1399 
1400 	return count;
1401 }
1402 
1403 int
eth_igb_tx_done_cleanup(void * txq,uint32_t free_cnt)1404 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1405 {
1406 	return igb_tx_done_cleanup(txq, free_cnt);
1407 }
1408 
1409 static void
igb_reset_tx_queue_stat(struct igb_tx_queue * txq)1410 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1411 {
1412 	txq->tx_head = 0;
1413 	txq->tx_tail = 0;
1414 	txq->ctx_curr = 0;
1415 	memset((void*)&txq->ctx_cache, 0,
1416 		IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1417 }
1418 
1419 static void
igb_reset_tx_queue(struct igb_tx_queue * txq,struct rte_eth_dev * dev)1420 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1421 {
1422 	static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1423 	struct igb_tx_entry *txe = txq->sw_ring;
1424 	uint16_t i, prev;
1425 	struct e1000_hw *hw;
1426 
1427 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428 	/* Zero out HW ring memory */
1429 	for (i = 0; i < txq->nb_tx_desc; i++) {
1430 		txq->tx_ring[i] = zeroed_desc;
1431 	}
1432 
1433 	/* Initialize ring entries */
1434 	prev = (uint16_t)(txq->nb_tx_desc - 1);
1435 	for (i = 0; i < txq->nb_tx_desc; i++) {
1436 		volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1437 
1438 		txd->wb.status = E1000_TXD_STAT_DD;
1439 		txe[i].mbuf = NULL;
1440 		txe[i].last_id = i;
1441 		txe[prev].next_id = i;
1442 		prev = i;
1443 	}
1444 
1445 	txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1446 	/* 82575 specific, each tx queue will use 2 hw contexts */
1447 	if (hw->mac.type == e1000_82575)
1448 		txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1449 
1450 	igb_reset_tx_queue_stat(txq);
1451 }
1452 
1453 uint64_t
igb_get_tx_port_offloads_capa(struct rte_eth_dev * dev)1454 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1455 {
1456 	uint64_t tx_offload_capa;
1457 
1458 	RTE_SET_USED(dev);
1459 	tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1460 			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
1461 			  DEV_TX_OFFLOAD_UDP_CKSUM   |
1462 			  DEV_TX_OFFLOAD_TCP_CKSUM   |
1463 			  DEV_TX_OFFLOAD_SCTP_CKSUM  |
1464 			  DEV_TX_OFFLOAD_TCP_TSO     |
1465 			  DEV_TX_OFFLOAD_MULTI_SEGS;
1466 
1467 	return tx_offload_capa;
1468 }
1469 
1470 uint64_t
igb_get_tx_queue_offloads_capa(struct rte_eth_dev * dev)1471 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1472 {
1473 	uint64_t tx_queue_offload_capa;
1474 
1475 	tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1476 
1477 	return tx_queue_offload_capa;
1478 }
1479 
1480 int
eth_igb_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)1481 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1482 			 uint16_t queue_idx,
1483 			 uint16_t nb_desc,
1484 			 unsigned int socket_id,
1485 			 const struct rte_eth_txconf *tx_conf)
1486 {
1487 	const struct rte_memzone *tz;
1488 	struct igb_tx_queue *txq;
1489 	struct e1000_hw     *hw;
1490 	uint32_t size;
1491 	uint64_t offloads;
1492 
1493 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1494 
1495 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1496 
1497 	/*
1498 	 * Validate number of transmit descriptors.
1499 	 * It must not exceed hardware maximum, and must be multiple
1500 	 * of E1000_ALIGN.
1501 	 */
1502 	if (nb_desc % IGB_TXD_ALIGN != 0 ||
1503 			(nb_desc > E1000_MAX_RING_DESC) ||
1504 			(nb_desc < E1000_MIN_RING_DESC)) {
1505 		return -EINVAL;
1506 	}
1507 
1508 	/*
1509 	 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1510 	 * driver.
1511 	 */
1512 	if (tx_conf->tx_free_thresh != 0)
1513 		PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1514 			     "used for the 1G driver.");
1515 	if (tx_conf->tx_rs_thresh != 0)
1516 		PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1517 			     "used for the 1G driver.");
1518 	if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1519 		PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1520 			     "consider setting the TX WTHRESH value to 4, 8, "
1521 			     "or 16.");
1522 
1523 	/* Free memory prior to re-allocation if needed */
1524 	if (dev->data->tx_queues[queue_idx] != NULL) {
1525 		igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1526 		dev->data->tx_queues[queue_idx] = NULL;
1527 	}
1528 
1529 	/* First allocate the tx queue data structure */
1530 	txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1531 							RTE_CACHE_LINE_SIZE);
1532 	if (txq == NULL)
1533 		return -ENOMEM;
1534 
1535 	/*
1536 	 * Allocate TX ring hardware descriptors. A memzone large enough to
1537 	 * handle the maximum ring size is allocated in order to allow for
1538 	 * resizing in later calls to the queue setup function.
1539 	 */
1540 	size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1541 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1542 				      E1000_ALIGN, socket_id);
1543 	if (tz == NULL) {
1544 		igb_tx_queue_release(txq);
1545 		return -ENOMEM;
1546 	}
1547 
1548 	txq->nb_tx_desc = nb_desc;
1549 	txq->pthresh = tx_conf->tx_thresh.pthresh;
1550 	txq->hthresh = tx_conf->tx_thresh.hthresh;
1551 	txq->wthresh = tx_conf->tx_thresh.wthresh;
1552 	if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1553 		txq->wthresh = 1;
1554 	txq->queue_id = queue_idx;
1555 	txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1556 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1557 	txq->port_id = dev->data->port_id;
1558 
1559 	txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1560 	txq->tx_ring_phys_addr = tz->iova;
1561 
1562 	txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1563 	/* Allocate software ring */
1564 	txq->sw_ring = rte_zmalloc("txq->sw_ring",
1565 				   sizeof(struct igb_tx_entry) * nb_desc,
1566 				   RTE_CACHE_LINE_SIZE);
1567 	if (txq->sw_ring == NULL) {
1568 		igb_tx_queue_release(txq);
1569 		return -ENOMEM;
1570 	}
1571 	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1572 		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1573 
1574 	igb_reset_tx_queue(txq, dev);
1575 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
1576 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
1577 	dev->data->tx_queues[queue_idx] = txq;
1578 	txq->offloads = offloads;
1579 
1580 	return 0;
1581 }
1582 
1583 static void
igb_rx_queue_release_mbufs(struct igb_rx_queue * rxq)1584 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1585 {
1586 	unsigned i;
1587 
1588 	if (rxq->sw_ring != NULL) {
1589 		for (i = 0; i < rxq->nb_rx_desc; i++) {
1590 			if (rxq->sw_ring[i].mbuf != NULL) {
1591 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1592 				rxq->sw_ring[i].mbuf = NULL;
1593 			}
1594 		}
1595 	}
1596 }
1597 
1598 static void
igb_rx_queue_release(struct igb_rx_queue * rxq)1599 igb_rx_queue_release(struct igb_rx_queue *rxq)
1600 {
1601 	if (rxq != NULL) {
1602 		igb_rx_queue_release_mbufs(rxq);
1603 		rte_free(rxq->sw_ring);
1604 		rte_free(rxq);
1605 	}
1606 }
1607 
1608 void
eth_igb_rx_queue_release(void * rxq)1609 eth_igb_rx_queue_release(void *rxq)
1610 {
1611 	igb_rx_queue_release(rxq);
1612 }
1613 
1614 static void
igb_reset_rx_queue(struct igb_rx_queue * rxq)1615 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1616 {
1617 	static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1618 	unsigned i;
1619 
1620 	/* Zero out HW ring memory */
1621 	for (i = 0; i < rxq->nb_rx_desc; i++) {
1622 		rxq->rx_ring[i] = zeroed_desc;
1623 	}
1624 
1625 	rxq->rx_tail = 0;
1626 	rxq->pkt_first_seg = NULL;
1627 	rxq->pkt_last_seg = NULL;
1628 }
1629 
1630 uint64_t
igb_get_rx_port_offloads_capa(struct rte_eth_dev * dev)1631 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1632 {
1633 	uint64_t rx_offload_capa;
1634 	struct e1000_hw *hw;
1635 
1636 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1637 
1638 	rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
1639 			  DEV_RX_OFFLOAD_VLAN_FILTER |
1640 			  DEV_RX_OFFLOAD_IPV4_CKSUM  |
1641 			  DEV_RX_OFFLOAD_UDP_CKSUM   |
1642 			  DEV_RX_OFFLOAD_TCP_CKSUM   |
1643 			  DEV_RX_OFFLOAD_JUMBO_FRAME |
1644 			  DEV_RX_OFFLOAD_KEEP_CRC    |
1645 			  DEV_RX_OFFLOAD_SCATTER     |
1646 			  DEV_RX_OFFLOAD_RSS_HASH;
1647 
1648 	if (hw->mac.type == e1000_i350 ||
1649 	    hw->mac.type == e1000_i210 ||
1650 	    hw->mac.type == e1000_i211)
1651 		rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
1652 
1653 	return rx_offload_capa;
1654 }
1655 
1656 uint64_t
igb_get_rx_queue_offloads_capa(struct rte_eth_dev * dev)1657 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1658 {
1659 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1660 	uint64_t rx_queue_offload_capa;
1661 
1662 	switch (hw->mac.type) {
1663 	case e1000_vfadapt_i350:
1664 		/*
1665 		 * As only one Rx queue can be used, let per queue offloading
1666 		 * capability be same to per port queue offloading capability
1667 		 * for better convenience.
1668 		 */
1669 		rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1670 		break;
1671 	default:
1672 		rx_queue_offload_capa = 0;
1673 	}
1674 	return rx_queue_offload_capa;
1675 }
1676 
1677 int
eth_igb_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)1678 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1679 			 uint16_t queue_idx,
1680 			 uint16_t nb_desc,
1681 			 unsigned int socket_id,
1682 			 const struct rte_eth_rxconf *rx_conf,
1683 			 struct rte_mempool *mp)
1684 {
1685 	const struct rte_memzone *rz;
1686 	struct igb_rx_queue *rxq;
1687 	struct e1000_hw     *hw;
1688 	unsigned int size;
1689 	uint64_t offloads;
1690 
1691 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1692 
1693 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1694 
1695 	/*
1696 	 * Validate number of receive descriptors.
1697 	 * It must not exceed hardware maximum, and must be multiple
1698 	 * of E1000_ALIGN.
1699 	 */
1700 	if (nb_desc % IGB_RXD_ALIGN != 0 ||
1701 			(nb_desc > E1000_MAX_RING_DESC) ||
1702 			(nb_desc < E1000_MIN_RING_DESC)) {
1703 		return -EINVAL;
1704 	}
1705 
1706 	/* Free memory prior to re-allocation if needed */
1707 	if (dev->data->rx_queues[queue_idx] != NULL) {
1708 		igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1709 		dev->data->rx_queues[queue_idx] = NULL;
1710 	}
1711 
1712 	/* First allocate the RX queue data structure. */
1713 	rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1714 			  RTE_CACHE_LINE_SIZE);
1715 	if (rxq == NULL)
1716 		return -ENOMEM;
1717 	rxq->offloads = offloads;
1718 	rxq->mb_pool = mp;
1719 	rxq->nb_rx_desc = nb_desc;
1720 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
1721 	rxq->hthresh = rx_conf->rx_thresh.hthresh;
1722 	rxq->wthresh = rx_conf->rx_thresh.wthresh;
1723 	if (rxq->wthresh > 0 &&
1724 	    (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1725 		rxq->wthresh = 1;
1726 	rxq->drop_en = rx_conf->rx_drop_en;
1727 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1728 	rxq->queue_id = queue_idx;
1729 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1730 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1731 	rxq->port_id = dev->data->port_id;
1732 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1733 		rxq->crc_len = RTE_ETHER_CRC_LEN;
1734 	else
1735 		rxq->crc_len = 0;
1736 
1737 	/*
1738 	 *  Allocate RX ring hardware descriptors. A memzone large enough to
1739 	 *  handle the maximum ring size is allocated in order to allow for
1740 	 *  resizing in later calls to the queue setup function.
1741 	 */
1742 	size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1743 	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1744 				      E1000_ALIGN, socket_id);
1745 	if (rz == NULL) {
1746 		igb_rx_queue_release(rxq);
1747 		return -ENOMEM;
1748 	}
1749 	rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1750 	rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1751 	rxq->rx_ring_phys_addr = rz->iova;
1752 	rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1753 
1754 	/* Allocate software ring. */
1755 	rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1756 				   sizeof(struct igb_rx_entry) * nb_desc,
1757 				   RTE_CACHE_LINE_SIZE);
1758 	if (rxq->sw_ring == NULL) {
1759 		igb_rx_queue_release(rxq);
1760 		return -ENOMEM;
1761 	}
1762 	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1763 		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1764 
1765 	dev->data->rx_queues[queue_idx] = rxq;
1766 	igb_reset_rx_queue(rxq);
1767 
1768 	return 0;
1769 }
1770 
1771 uint32_t
eth_igb_rx_queue_count(struct rte_eth_dev * dev,uint16_t rx_queue_id)1772 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1773 {
1774 #define IGB_RXQ_SCAN_INTERVAL 4
1775 	volatile union e1000_adv_rx_desc *rxdp;
1776 	struct igb_rx_queue *rxq;
1777 	uint32_t desc = 0;
1778 
1779 	rxq = dev->data->rx_queues[rx_queue_id];
1780 	rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1781 
1782 	while ((desc < rxq->nb_rx_desc) &&
1783 		(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1784 		desc += IGB_RXQ_SCAN_INTERVAL;
1785 		rxdp += IGB_RXQ_SCAN_INTERVAL;
1786 		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1787 			rxdp = &(rxq->rx_ring[rxq->rx_tail +
1788 				desc - rxq->nb_rx_desc]);
1789 	}
1790 
1791 	return desc;
1792 }
1793 
1794 int
eth_igb_rx_descriptor_done(void * rx_queue,uint16_t offset)1795 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1796 {
1797 	volatile union e1000_adv_rx_desc *rxdp;
1798 	struct igb_rx_queue *rxq = rx_queue;
1799 	uint32_t desc;
1800 
1801 	if (unlikely(offset >= rxq->nb_rx_desc))
1802 		return 0;
1803 	desc = rxq->rx_tail + offset;
1804 	if (desc >= rxq->nb_rx_desc)
1805 		desc -= rxq->nb_rx_desc;
1806 
1807 	rxdp = &rxq->rx_ring[desc];
1808 	return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1809 }
1810 
1811 int
eth_igb_rx_descriptor_status(void * rx_queue,uint16_t offset)1812 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1813 {
1814 	struct igb_rx_queue *rxq = rx_queue;
1815 	volatile uint32_t *status;
1816 	uint32_t desc;
1817 
1818 	if (unlikely(offset >= rxq->nb_rx_desc))
1819 		return -EINVAL;
1820 
1821 	if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1822 		return RTE_ETH_RX_DESC_UNAVAIL;
1823 
1824 	desc = rxq->rx_tail + offset;
1825 	if (desc >= rxq->nb_rx_desc)
1826 		desc -= rxq->nb_rx_desc;
1827 
1828 	status = &rxq->rx_ring[desc].wb.upper.status_error;
1829 	if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1830 		return RTE_ETH_RX_DESC_DONE;
1831 
1832 	return RTE_ETH_RX_DESC_AVAIL;
1833 }
1834 
1835 int
eth_igb_tx_descriptor_status(void * tx_queue,uint16_t offset)1836 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1837 {
1838 	struct igb_tx_queue *txq = tx_queue;
1839 	volatile uint32_t *status;
1840 	uint32_t desc;
1841 
1842 	if (unlikely(offset >= txq->nb_tx_desc))
1843 		return -EINVAL;
1844 
1845 	desc = txq->tx_tail + offset;
1846 	if (desc >= txq->nb_tx_desc)
1847 		desc -= txq->nb_tx_desc;
1848 
1849 	status = &txq->tx_ring[desc].wb.status;
1850 	if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1851 		return RTE_ETH_TX_DESC_DONE;
1852 
1853 	return RTE_ETH_TX_DESC_FULL;
1854 }
1855 
1856 void
igb_dev_clear_queues(struct rte_eth_dev * dev)1857 igb_dev_clear_queues(struct rte_eth_dev *dev)
1858 {
1859 	uint16_t i;
1860 	struct igb_tx_queue *txq;
1861 	struct igb_rx_queue *rxq;
1862 
1863 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1864 		txq = dev->data->tx_queues[i];
1865 		if (txq != NULL) {
1866 			igb_tx_queue_release_mbufs(txq);
1867 			igb_reset_tx_queue(txq, dev);
1868 		}
1869 	}
1870 
1871 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1872 		rxq = dev->data->rx_queues[i];
1873 		if (rxq != NULL) {
1874 			igb_rx_queue_release_mbufs(rxq);
1875 			igb_reset_rx_queue(rxq);
1876 		}
1877 	}
1878 }
1879 
1880 void
igb_dev_free_queues(struct rte_eth_dev * dev)1881 igb_dev_free_queues(struct rte_eth_dev *dev)
1882 {
1883 	uint16_t i;
1884 
1885 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1886 		eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1887 		dev->data->rx_queues[i] = NULL;
1888 		rte_eth_dma_zone_free(dev, "rx_ring", i);
1889 	}
1890 	dev->data->nb_rx_queues = 0;
1891 
1892 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1893 		eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1894 		dev->data->tx_queues[i] = NULL;
1895 		rte_eth_dma_zone_free(dev, "tx_ring", i);
1896 	}
1897 	dev->data->nb_tx_queues = 0;
1898 }
1899 
1900 /**
1901  * Receive Side Scaling (RSS).
1902  * See section 7.1.1.7 in the following document:
1903  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1904  *
1905  * Principles:
1906  * The source and destination IP addresses of the IP header and the source and
1907  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1908  * against a configurable random key to compute a 32-bit RSS hash result.
1909  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1910  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1911  * RSS output index which is used as the RX queue index where to store the
1912  * received packets.
1913  * The following output is supplied in the RX write-back descriptor:
1914  *     - 32-bit result of the Microsoft RSS hash function,
1915  *     - 4-bit RSS type field.
1916  */
1917 
1918 /*
1919  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1920  * Used as the default key.
1921  */
1922 static uint8_t rss_intel_key[40] = {
1923 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1924 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1925 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1926 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1927 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1928 };
1929 
1930 static void
igb_rss_disable(struct rte_eth_dev * dev)1931 igb_rss_disable(struct rte_eth_dev *dev)
1932 {
1933 	struct e1000_hw *hw;
1934 	uint32_t mrqc;
1935 
1936 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1937 	mrqc = E1000_READ_REG(hw, E1000_MRQC);
1938 	mrqc &= ~E1000_MRQC_ENABLE_MASK;
1939 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1940 }
1941 
1942 static void
igb_hw_rss_hash_set(struct e1000_hw * hw,struct rte_eth_rss_conf * rss_conf)1943 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1944 {
1945 	uint8_t  *hash_key;
1946 	uint32_t rss_key;
1947 	uint32_t mrqc;
1948 	uint64_t rss_hf;
1949 	uint16_t i;
1950 
1951 	hash_key = rss_conf->rss_key;
1952 	if (hash_key != NULL) {
1953 		/* Fill in RSS hash key */
1954 		for (i = 0; i < 10; i++) {
1955 			rss_key  = hash_key[(i * 4)];
1956 			rss_key |= hash_key[(i * 4) + 1] << 8;
1957 			rss_key |= hash_key[(i * 4) + 2] << 16;
1958 			rss_key |= hash_key[(i * 4) + 3] << 24;
1959 			E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1960 		}
1961 	}
1962 
1963 	/* Set configured hashing protocols in MRQC register */
1964 	rss_hf = rss_conf->rss_hf;
1965 	mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1966 	if (rss_hf & ETH_RSS_IPV4)
1967 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1968 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1969 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1970 	if (rss_hf & ETH_RSS_IPV6)
1971 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1972 	if (rss_hf & ETH_RSS_IPV6_EX)
1973 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1974 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1975 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1976 	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1977 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1978 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1979 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1980 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1981 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1982 	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1983 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1984 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1985 }
1986 
1987 int
eth_igb_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1988 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1989 			struct rte_eth_rss_conf *rss_conf)
1990 {
1991 	struct e1000_hw *hw;
1992 	uint32_t mrqc;
1993 	uint64_t rss_hf;
1994 
1995 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1996 
1997 	/*
1998 	 * Before changing anything, first check that the update RSS operation
1999 	 * does not attempt to disable RSS, if RSS was enabled at
2000 	 * initialization time, or does not attempt to enable RSS, if RSS was
2001 	 * disabled at initialization time.
2002 	 */
2003 	rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
2004 	mrqc = E1000_READ_REG(hw, E1000_MRQC);
2005 	if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
2006 		if (rss_hf != 0) /* Enable RSS */
2007 			return -(EINVAL);
2008 		return 0; /* Nothing to do */
2009 	}
2010 	/* RSS enabled */
2011 	if (rss_hf == 0) /* Disable RSS */
2012 		return -(EINVAL);
2013 	igb_hw_rss_hash_set(hw, rss_conf);
2014 	return 0;
2015 }
2016 
eth_igb_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)2017 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2018 			      struct rte_eth_rss_conf *rss_conf)
2019 {
2020 	struct e1000_hw *hw;
2021 	uint8_t *hash_key;
2022 	uint32_t rss_key;
2023 	uint32_t mrqc;
2024 	uint64_t rss_hf;
2025 	uint16_t i;
2026 
2027 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2028 	hash_key = rss_conf->rss_key;
2029 	if (hash_key != NULL) {
2030 		/* Return RSS hash key */
2031 		for (i = 0; i < 10; i++) {
2032 			rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2033 			hash_key[(i * 4)] = rss_key & 0x000000FF;
2034 			hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2035 			hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2036 			hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2037 		}
2038 	}
2039 
2040 	/* Get RSS functions configured in MRQC register */
2041 	mrqc = E1000_READ_REG(hw, E1000_MRQC);
2042 	if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2043 		rss_conf->rss_hf = 0;
2044 		return 0;
2045 	}
2046 	rss_hf = 0;
2047 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2048 		rss_hf |= ETH_RSS_IPV4;
2049 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2050 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2051 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2052 		rss_hf |= ETH_RSS_IPV6;
2053 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2054 		rss_hf |= ETH_RSS_IPV6_EX;
2055 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2056 		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2057 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2058 		rss_hf |= ETH_RSS_IPV6_TCP_EX;
2059 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2060 		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2061 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2062 		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2063 	if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2064 		rss_hf |= ETH_RSS_IPV6_UDP_EX;
2065 	rss_conf->rss_hf = rss_hf;
2066 	return 0;
2067 }
2068 
2069 static void
igb_rss_configure(struct rte_eth_dev * dev)2070 igb_rss_configure(struct rte_eth_dev *dev)
2071 {
2072 	struct rte_eth_rss_conf rss_conf;
2073 	struct e1000_hw *hw;
2074 	uint32_t shift;
2075 	uint16_t i;
2076 
2077 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078 
2079 	/* Fill in redirection table. */
2080 	shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2081 	for (i = 0; i < 128; i++) {
2082 		union e1000_reta {
2083 			uint32_t dword;
2084 			uint8_t  bytes[4];
2085 		} reta;
2086 		uint8_t q_idx;
2087 
2088 		q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2089 				   i % dev->data->nb_rx_queues : 0);
2090 		reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2091 		if ((i & 3) == 3)
2092 			E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2093 	}
2094 
2095 	/*
2096 	 * Configure the RSS key and the RSS protocols used to compute
2097 	 * the RSS hash of input packets.
2098 	 */
2099 	rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2100 	if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2101 		igb_rss_disable(dev);
2102 		return;
2103 	}
2104 	if (rss_conf.rss_key == NULL)
2105 		rss_conf.rss_key = rss_intel_key; /* Default hash key */
2106 	igb_hw_rss_hash_set(hw, &rss_conf);
2107 }
2108 
2109 /*
2110  * Check if the mac type support VMDq or not.
2111  * Return 1 if it supports, otherwise, return 0.
2112  */
2113 static int
igb_is_vmdq_supported(const struct rte_eth_dev * dev)2114 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2115 {
2116 	const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2117 
2118 	switch (hw->mac.type) {
2119 	case e1000_82576:
2120 	case e1000_82580:
2121 	case e1000_i350:
2122 		return 1;
2123 	case e1000_82540:
2124 	case e1000_82541:
2125 	case e1000_82542:
2126 	case e1000_82543:
2127 	case e1000_82544:
2128 	case e1000_82545:
2129 	case e1000_82546:
2130 	case e1000_82547:
2131 	case e1000_82571:
2132 	case e1000_82572:
2133 	case e1000_82573:
2134 	case e1000_82574:
2135 	case e1000_82583:
2136 	case e1000_i210:
2137 	case e1000_i211:
2138 	default:
2139 		PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2140 		return 0;
2141 	}
2142 }
2143 
2144 static int
igb_vmdq_rx_hw_configure(struct rte_eth_dev * dev)2145 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2146 {
2147 	struct rte_eth_vmdq_rx_conf *cfg;
2148 	struct e1000_hw *hw;
2149 	uint32_t mrqc, vt_ctl, vmolr, rctl;
2150 	int i;
2151 
2152 	PMD_INIT_FUNC_TRACE();
2153 
2154 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2155 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2156 
2157 	/* Check if mac type can support VMDq, return value of 0 means NOT support */
2158 	if (igb_is_vmdq_supported(dev) == 0)
2159 		return -1;
2160 
2161 	igb_rss_disable(dev);
2162 
2163 	/* RCTL: eanble VLAN filter */
2164 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2165 	rctl |= E1000_RCTL_VFE;
2166 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2167 
2168 	/* MRQC: enable vmdq */
2169 	mrqc = E1000_READ_REG(hw, E1000_MRQC);
2170 	mrqc |= E1000_MRQC_ENABLE_VMDQ;
2171 	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2172 
2173 	/* VTCTL:  pool selection according to VLAN tag */
2174 	vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2175 	if (cfg->enable_default_pool)
2176 		vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2177 	vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2178 	E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2179 
2180 	for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2181 		vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2182 		vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2183 			E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2184 			E1000_VMOLR_MPME);
2185 
2186 		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2187 			vmolr |= E1000_VMOLR_AUPE;
2188 		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2189 			vmolr |= E1000_VMOLR_ROMPE;
2190 		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2191 			vmolr |= E1000_VMOLR_ROPE;
2192 		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2193 			vmolr |= E1000_VMOLR_BAM;
2194 		if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2195 			vmolr |= E1000_VMOLR_MPME;
2196 
2197 		E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2198 	}
2199 
2200 	/*
2201 	 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2202 	 * Both 82576 and 82580 support it
2203 	 */
2204 	if (hw->mac.type != e1000_i350) {
2205 		for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2206 			vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2207 			vmolr |= E1000_VMOLR_STRVLAN;
2208 			E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2209 		}
2210 	}
2211 
2212 	/* VFTA - enable all vlan filters */
2213 	for (i = 0; i < IGB_VFTA_SIZE; i++)
2214 		E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2215 
2216 	/* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2217 	if (hw->mac.type != e1000_82580)
2218 		E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2219 
2220 	/*
2221 	 * RAH/RAL - allow pools to read specific mac addresses
2222 	 * In this case, all pools should be able to read from mac addr 0
2223 	 */
2224 	E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2225 	E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2226 
2227 	/* VLVF: set up filters for vlan tags as configured */
2228 	for (i = 0; i < cfg->nb_pool_maps; i++) {
2229 		/* set vlan id in VF register and set the valid bit */
2230 		E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2231                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2232 			((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2233 			E1000_VLVF_POOLSEL_MASK)));
2234 	}
2235 
2236 	E1000_WRITE_FLUSH(hw);
2237 
2238 	return 0;
2239 }
2240 
2241 
2242 /*********************************************************************
2243  *
2244  *  Enable receive unit.
2245  *
2246  **********************************************************************/
2247 
2248 static int
igb_alloc_rx_queue_mbufs(struct igb_rx_queue * rxq)2249 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2250 {
2251 	struct igb_rx_entry *rxe = rxq->sw_ring;
2252 	uint64_t dma_addr;
2253 	unsigned i;
2254 
2255 	/* Initialize software ring entries. */
2256 	for (i = 0; i < rxq->nb_rx_desc; i++) {
2257 		volatile union e1000_adv_rx_desc *rxd;
2258 		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2259 
2260 		if (mbuf == NULL) {
2261 			PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2262 				     "queue_id=%hu", rxq->queue_id);
2263 			return -ENOMEM;
2264 		}
2265 		dma_addr =
2266 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2267 		rxd = &rxq->rx_ring[i];
2268 		rxd->read.hdr_addr = 0;
2269 		rxd->read.pkt_addr = dma_addr;
2270 		rxe[i].mbuf = mbuf;
2271 	}
2272 
2273 	return 0;
2274 }
2275 
2276 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2277 static int
igb_dev_mq_rx_configure(struct rte_eth_dev * dev)2278 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2279 {
2280 	struct e1000_hw *hw =
2281 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2282 	uint32_t mrqc;
2283 
2284 	if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2285 		/*
2286 		 * SRIOV active scheme
2287 		 * FIXME if support RSS together with VMDq & SRIOV
2288 		 */
2289 		mrqc = E1000_MRQC_ENABLE_VMDQ;
2290 		/* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2291 		mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2292 		E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2293 	} else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2294 		/*
2295 		 * SRIOV inactive scheme
2296 		 */
2297 		switch (dev->data->dev_conf.rxmode.mq_mode) {
2298 			case ETH_MQ_RX_RSS:
2299 				igb_rss_configure(dev);
2300 				break;
2301 			case ETH_MQ_RX_VMDQ_ONLY:
2302 				/*Configure general VMDQ only RX parameters*/
2303 				igb_vmdq_rx_hw_configure(dev);
2304 				break;
2305 			case ETH_MQ_RX_NONE:
2306 				/* if mq_mode is none, disable rss mode.*/
2307 			default:
2308 				igb_rss_disable(dev);
2309 				break;
2310 		}
2311 	}
2312 
2313 	return 0;
2314 }
2315 
2316 int
eth_igb_rx_init(struct rte_eth_dev * dev)2317 eth_igb_rx_init(struct rte_eth_dev *dev)
2318 {
2319 	struct rte_eth_rxmode *rxmode;
2320 	struct e1000_hw     *hw;
2321 	struct igb_rx_queue *rxq;
2322 	uint32_t rctl;
2323 	uint32_t rxcsum;
2324 	uint32_t srrctl;
2325 	uint16_t buf_size;
2326 	uint16_t rctl_bsize;
2327 	uint16_t i;
2328 	int ret;
2329 
2330 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2331 	srrctl = 0;
2332 
2333 	/*
2334 	 * Make sure receives are disabled while setting
2335 	 * up the descriptor ring.
2336 	 */
2337 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2338 	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2339 
2340 	rxmode = &dev->data->dev_conf.rxmode;
2341 
2342 	/*
2343 	 * Configure support of jumbo frames, if any.
2344 	 */
2345 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2346 		rctl |= E1000_RCTL_LPE;
2347 
2348 		/*
2349 		 * Set maximum packet length by default, and might be updated
2350 		 * together with enabling/disabling dual VLAN.
2351 		 */
2352 		E1000_WRITE_REG(hw, E1000_RLPML,
2353 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
2354 						VLAN_TAG_SIZE);
2355 	} else
2356 		rctl &= ~E1000_RCTL_LPE;
2357 
2358 	/* Configure and enable each RX queue. */
2359 	rctl_bsize = 0;
2360 	dev->rx_pkt_burst = eth_igb_recv_pkts;
2361 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2362 		uint64_t bus_addr;
2363 		uint32_t rxdctl;
2364 
2365 		rxq = dev->data->rx_queues[i];
2366 
2367 		rxq->flags = 0;
2368 		/*
2369 		 * i350 and i354 vlan packets have vlan tags byte swapped.
2370 		 */
2371 		if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2372 			rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2373 			PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2374 		} else {
2375 			PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2376 		}
2377 
2378 		/* Allocate buffers for descriptor rings and set up queue */
2379 		ret = igb_alloc_rx_queue_mbufs(rxq);
2380 		if (ret)
2381 			return ret;
2382 
2383 		/*
2384 		 * Reset crc_len in case it was changed after queue setup by a
2385 		 *  call to configure
2386 		 */
2387 		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2388 			rxq->crc_len = RTE_ETHER_CRC_LEN;
2389 		else
2390 			rxq->crc_len = 0;
2391 
2392 		bus_addr = rxq->rx_ring_phys_addr;
2393 		E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2394 				rxq->nb_rx_desc *
2395 				sizeof(union e1000_adv_rx_desc));
2396 		E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2397 				(uint32_t)(bus_addr >> 32));
2398 		E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2399 
2400 		srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2401 
2402 		/*
2403 		 * Configure RX buffer size.
2404 		 */
2405 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2406 			RTE_PKTMBUF_HEADROOM);
2407 		if (buf_size >= 1024) {
2408 			/*
2409 			 * Configure the BSIZEPACKET field of the SRRCTL
2410 			 * register of the queue.
2411 			 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2412 			 * If this field is equal to 0b, then RCTL.BSIZE
2413 			 * determines the RX packet buffer size.
2414 			 */
2415 			srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2416 				   E1000_SRRCTL_BSIZEPKT_MASK);
2417 			buf_size = (uint16_t) ((srrctl &
2418 						E1000_SRRCTL_BSIZEPKT_MASK) <<
2419 					       E1000_SRRCTL_BSIZEPKT_SHIFT);
2420 
2421 			/* It adds dual VLAN length for supporting dual VLAN */
2422 			if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2423 						2 * VLAN_TAG_SIZE) > buf_size){
2424 				if (!dev->data->scattered_rx)
2425 					PMD_INIT_LOG(DEBUG,
2426 						     "forcing scatter mode");
2427 				dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2428 				dev->data->scattered_rx = 1;
2429 			}
2430 		} else {
2431 			/*
2432 			 * Use BSIZE field of the device RCTL register.
2433 			 */
2434 			if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2435 				rctl_bsize = buf_size;
2436 			if (!dev->data->scattered_rx)
2437 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2438 			dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2439 			dev->data->scattered_rx = 1;
2440 		}
2441 
2442 		/* Set if packets are dropped when no descriptors available */
2443 		if (rxq->drop_en)
2444 			srrctl |= E1000_SRRCTL_DROP_EN;
2445 
2446 		E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2447 
2448 		/* Enable this RX queue. */
2449 		rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2450 		rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2451 		rxdctl &= 0xFFF00000;
2452 		rxdctl |= (rxq->pthresh & 0x1F);
2453 		rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2454 		rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2455 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2456 	}
2457 
2458 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2459 		if (!dev->data->scattered_rx)
2460 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2461 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2462 		dev->data->scattered_rx = 1;
2463 	}
2464 
2465 	/*
2466 	 * Setup BSIZE field of RCTL register, if needed.
2467 	 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2468 	 * register, since the code above configures the SRRCTL register of
2469 	 * the RX queue in such a case.
2470 	 * All configurable sizes are:
2471 	 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2472 	 *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2473 	 *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2474 	 *  2048: rctl |= E1000_RCTL_SZ_2048;
2475 	 *  1024: rctl |= E1000_RCTL_SZ_1024;
2476 	 *   512: rctl |= E1000_RCTL_SZ_512;
2477 	 *   256: rctl |= E1000_RCTL_SZ_256;
2478 	 */
2479 	if (rctl_bsize > 0) {
2480 		if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2481 			rctl |= E1000_RCTL_SZ_512;
2482 		else /* 256 <= buf_size < 512 - use 256 */
2483 			rctl |= E1000_RCTL_SZ_256;
2484 	}
2485 
2486 	/*
2487 	 * Configure RSS if device configured with multiple RX queues.
2488 	 */
2489 	igb_dev_mq_rx_configure(dev);
2490 
2491 	/* Update the rctl since igb_dev_mq_rx_configure may change its value */
2492 	rctl |= E1000_READ_REG(hw, E1000_RCTL);
2493 
2494 	/*
2495 	 * Setup the Checksum Register.
2496 	 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2497 	 */
2498 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2499 	rxcsum |= E1000_RXCSUM_PCSD;
2500 
2501 	/* Enable both L3/L4 rx checksum offload */
2502 	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
2503 		rxcsum |= E1000_RXCSUM_IPOFL;
2504 	else
2505 		rxcsum &= ~E1000_RXCSUM_IPOFL;
2506 	if (rxmode->offloads &
2507 		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
2508 		rxcsum |= E1000_RXCSUM_TUOFL;
2509 	else
2510 		rxcsum &= ~E1000_RXCSUM_TUOFL;
2511 	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2512 		rxcsum |= E1000_RXCSUM_CRCOFL;
2513 	else
2514 		rxcsum &= ~E1000_RXCSUM_CRCOFL;
2515 
2516 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2517 
2518 	/* Setup the Receive Control Register. */
2519 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
2520 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2521 
2522 		/* clear STRCRC bit in all queues */
2523 		if (hw->mac.type == e1000_i350 ||
2524 		    hw->mac.type == e1000_i210 ||
2525 		    hw->mac.type == e1000_i211 ||
2526 		    hw->mac.type == e1000_i354) {
2527 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
2528 				rxq = dev->data->rx_queues[i];
2529 				uint32_t dvmolr = E1000_READ_REG(hw,
2530 					E1000_DVMOLR(rxq->reg_idx));
2531 				dvmolr &= ~E1000_DVMOLR_STRCRC;
2532 				E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2533 			}
2534 		}
2535 	} else {
2536 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2537 
2538 		/* set STRCRC bit in all queues */
2539 		if (hw->mac.type == e1000_i350 ||
2540 		    hw->mac.type == e1000_i210 ||
2541 		    hw->mac.type == e1000_i211 ||
2542 		    hw->mac.type == e1000_i354) {
2543 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
2544 				rxq = dev->data->rx_queues[i];
2545 				uint32_t dvmolr = E1000_READ_REG(hw,
2546 					E1000_DVMOLR(rxq->reg_idx));
2547 				dvmolr |= E1000_DVMOLR_STRCRC;
2548 				E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2549 			}
2550 		}
2551 	}
2552 
2553 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2554 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2555 		E1000_RCTL_RDMTS_HALF |
2556 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2557 
2558 	/* Make sure VLAN Filters are off. */
2559 	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2560 		rctl &= ~E1000_RCTL_VFE;
2561 	/* Don't store bad packets. */
2562 	rctl &= ~E1000_RCTL_SBP;
2563 
2564 	/* Enable Receives. */
2565 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2566 
2567 	/*
2568 	 * Setup the HW Rx Head and Tail Descriptor Pointers.
2569 	 * This needs to be done after enable.
2570 	 */
2571 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2572 		rxq = dev->data->rx_queues[i];
2573 		E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2574 		E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2575 	}
2576 
2577 	return 0;
2578 }
2579 
2580 /*********************************************************************
2581  *
2582  *  Enable transmit unit.
2583  *
2584  **********************************************************************/
2585 void
eth_igb_tx_init(struct rte_eth_dev * dev)2586 eth_igb_tx_init(struct rte_eth_dev *dev)
2587 {
2588 	struct e1000_hw     *hw;
2589 	struct igb_tx_queue *txq;
2590 	uint32_t tctl;
2591 	uint32_t txdctl;
2592 	uint16_t i;
2593 
2594 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2595 
2596 	/* Setup the Base and Length of the Tx Descriptor Rings. */
2597 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2598 		uint64_t bus_addr;
2599 		txq = dev->data->tx_queues[i];
2600 		bus_addr = txq->tx_ring_phys_addr;
2601 
2602 		E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2603 				txq->nb_tx_desc *
2604 				sizeof(union e1000_adv_tx_desc));
2605 		E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2606 				(uint32_t)(bus_addr >> 32));
2607 		E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2608 
2609 		/* Setup the HW Tx Head and Tail descriptor pointers. */
2610 		E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2611 		E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2612 
2613 		/* Setup Transmit threshold registers. */
2614 		txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2615 		txdctl |= txq->pthresh & 0x1F;
2616 		txdctl |= ((txq->hthresh & 0x1F) << 8);
2617 		txdctl |= ((txq->wthresh & 0x1F) << 16);
2618 		txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2619 		E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2620 	}
2621 
2622 	/* Program the Transmit Control Register. */
2623 	tctl = E1000_READ_REG(hw, E1000_TCTL);
2624 	tctl &= ~E1000_TCTL_CT;
2625 	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2626 		 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2627 
2628 	e1000_config_collision_dist(hw);
2629 
2630 	/* This write will effectively turn on the transmit unit. */
2631 	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2632 }
2633 
2634 /*********************************************************************
2635  *
2636  *  Enable VF receive unit.
2637  *
2638  **********************************************************************/
2639 int
eth_igbvf_rx_init(struct rte_eth_dev * dev)2640 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2641 {
2642 	struct e1000_hw     *hw;
2643 	struct igb_rx_queue *rxq;
2644 	uint32_t srrctl;
2645 	uint16_t buf_size;
2646 	uint16_t rctl_bsize;
2647 	uint16_t i;
2648 	int ret;
2649 
2650 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2651 
2652 	/* setup MTU */
2653 	e1000_rlpml_set_vf(hw,
2654 		(uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2655 		VLAN_TAG_SIZE));
2656 
2657 	/* Configure and enable each RX queue. */
2658 	rctl_bsize = 0;
2659 	dev->rx_pkt_burst = eth_igb_recv_pkts;
2660 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2661 		uint64_t bus_addr;
2662 		uint32_t rxdctl;
2663 
2664 		rxq = dev->data->rx_queues[i];
2665 
2666 		rxq->flags = 0;
2667 		/*
2668 		 * i350VF LB vlan packets have vlan tags byte swapped.
2669 		 */
2670 		if (hw->mac.type == e1000_vfadapt_i350) {
2671 			rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2672 			PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2673 		} else {
2674 			PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2675 		}
2676 
2677 		/* Allocate buffers for descriptor rings and set up queue */
2678 		ret = igb_alloc_rx_queue_mbufs(rxq);
2679 		if (ret)
2680 			return ret;
2681 
2682 		bus_addr = rxq->rx_ring_phys_addr;
2683 		E1000_WRITE_REG(hw, E1000_RDLEN(i),
2684 				rxq->nb_rx_desc *
2685 				sizeof(union e1000_adv_rx_desc));
2686 		E1000_WRITE_REG(hw, E1000_RDBAH(i),
2687 				(uint32_t)(bus_addr >> 32));
2688 		E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2689 
2690 		srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2691 
2692 		/*
2693 		 * Configure RX buffer size.
2694 		 */
2695 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2696 			RTE_PKTMBUF_HEADROOM);
2697 		if (buf_size >= 1024) {
2698 			/*
2699 			 * Configure the BSIZEPACKET field of the SRRCTL
2700 			 * register of the queue.
2701 			 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2702 			 * If this field is equal to 0b, then RCTL.BSIZE
2703 			 * determines the RX packet buffer size.
2704 			 */
2705 			srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2706 				   E1000_SRRCTL_BSIZEPKT_MASK);
2707 			buf_size = (uint16_t) ((srrctl &
2708 						E1000_SRRCTL_BSIZEPKT_MASK) <<
2709 					       E1000_SRRCTL_BSIZEPKT_SHIFT);
2710 
2711 			/* It adds dual VLAN length for supporting dual VLAN */
2712 			if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2713 						2 * VLAN_TAG_SIZE) > buf_size){
2714 				if (!dev->data->scattered_rx)
2715 					PMD_INIT_LOG(DEBUG,
2716 						     "forcing scatter mode");
2717 				dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2718 				dev->data->scattered_rx = 1;
2719 			}
2720 		} else {
2721 			/*
2722 			 * Use BSIZE field of the device RCTL register.
2723 			 */
2724 			if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2725 				rctl_bsize = buf_size;
2726 			if (!dev->data->scattered_rx)
2727 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2728 			dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2729 			dev->data->scattered_rx = 1;
2730 		}
2731 
2732 		/* Set if packets are dropped when no descriptors available */
2733 		if (rxq->drop_en)
2734 			srrctl |= E1000_SRRCTL_DROP_EN;
2735 
2736 		E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2737 
2738 		/* Enable this RX queue. */
2739 		rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2740 		rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2741 		rxdctl &= 0xFFF00000;
2742 		rxdctl |= (rxq->pthresh & 0x1F);
2743 		rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2744 		if (hw->mac.type == e1000_vfadapt) {
2745 			/*
2746 			 * Workaround of 82576 VF Erratum
2747 			 * force set WTHRESH to 1
2748 			 * to avoid Write-Back not triggered sometimes
2749 			 */
2750 			rxdctl |= 0x10000;
2751 			PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2752 		}
2753 		else
2754 			rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2755 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2756 	}
2757 
2758 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2759 		if (!dev->data->scattered_rx)
2760 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2761 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2762 		dev->data->scattered_rx = 1;
2763 	}
2764 
2765 	/*
2766 	 * Setup the HW Rx Head and Tail Descriptor Pointers.
2767 	 * This needs to be done after enable.
2768 	 */
2769 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2770 		rxq = dev->data->rx_queues[i];
2771 		E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2772 		E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2773 	}
2774 
2775 	return 0;
2776 }
2777 
2778 /*********************************************************************
2779  *
2780  *  Enable VF transmit unit.
2781  *
2782  **********************************************************************/
2783 void
eth_igbvf_tx_init(struct rte_eth_dev * dev)2784 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2785 {
2786 	struct e1000_hw     *hw;
2787 	struct igb_tx_queue *txq;
2788 	uint32_t txdctl;
2789 	uint16_t i;
2790 
2791 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2792 
2793 	/* Setup the Base and Length of the Tx Descriptor Rings. */
2794 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2795 		uint64_t bus_addr;
2796 
2797 		txq = dev->data->tx_queues[i];
2798 		bus_addr = txq->tx_ring_phys_addr;
2799 		E1000_WRITE_REG(hw, E1000_TDLEN(i),
2800 				txq->nb_tx_desc *
2801 				sizeof(union e1000_adv_tx_desc));
2802 		E1000_WRITE_REG(hw, E1000_TDBAH(i),
2803 				(uint32_t)(bus_addr >> 32));
2804 		E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2805 
2806 		/* Setup the HW Tx Head and Tail descriptor pointers. */
2807 		E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2808 		E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2809 
2810 		/* Setup Transmit threshold registers. */
2811 		txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2812 		txdctl |= txq->pthresh & 0x1F;
2813 		txdctl |= ((txq->hthresh & 0x1F) << 8);
2814 		if (hw->mac.type == e1000_82576) {
2815 			/*
2816 			 * Workaround of 82576 VF Erratum
2817 			 * force set WTHRESH to 1
2818 			 * to avoid Write-Back not triggered sometimes
2819 			 */
2820 			txdctl |= 0x10000;
2821 			PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2822 		}
2823 		else
2824 			txdctl |= ((txq->wthresh & 0x1F) << 16);
2825 		txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2826 		E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2827 	}
2828 
2829 }
2830 
2831 void
igb_rxq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)2832 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2833 	struct rte_eth_rxq_info *qinfo)
2834 {
2835 	struct igb_rx_queue *rxq;
2836 
2837 	rxq = dev->data->rx_queues[queue_id];
2838 
2839 	qinfo->mp = rxq->mb_pool;
2840 	qinfo->scattered_rx = dev->data->scattered_rx;
2841 	qinfo->nb_desc = rxq->nb_rx_desc;
2842 
2843 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2844 	qinfo->conf.rx_drop_en = rxq->drop_en;
2845 	qinfo->conf.offloads = rxq->offloads;
2846 }
2847 
2848 void
igb_txq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)2849 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2850 	struct rte_eth_txq_info *qinfo)
2851 {
2852 	struct igb_tx_queue *txq;
2853 
2854 	txq = dev->data->tx_queues[queue_id];
2855 
2856 	qinfo->nb_desc = txq->nb_tx_desc;
2857 
2858 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2859 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2860 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2861 	qinfo->conf.offloads = txq->offloads;
2862 }
2863 
2864 int
igb_rss_conf_init(struct rte_eth_dev * dev,struct igb_rte_flow_rss_conf * out,const struct rte_flow_action_rss * in)2865 igb_rss_conf_init(struct rte_eth_dev *dev,
2866 		  struct igb_rte_flow_rss_conf *out,
2867 		  const struct rte_flow_action_rss *in)
2868 {
2869 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2870 
2871 	if (in->key_len > RTE_DIM(out->key) ||
2872 	    ((hw->mac.type == e1000_82576) &&
2873 	     (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
2874 	    ((hw->mac.type != e1000_82576) &&
2875 	     (in->queue_num > IGB_MAX_RX_QUEUE_NUM)))
2876 		return -EINVAL;
2877 	out->conf = (struct rte_flow_action_rss){
2878 		.func = in->func,
2879 		.level = in->level,
2880 		.types = in->types,
2881 		.key_len = in->key_len,
2882 		.queue_num = in->queue_num,
2883 		.key = memcpy(out->key, in->key, in->key_len),
2884 		.queue = memcpy(out->queue, in->queue,
2885 				sizeof(*in->queue) * in->queue_num),
2886 	};
2887 	return 0;
2888 }
2889 
2890 int
igb_action_rss_same(const struct rte_flow_action_rss * comp,const struct rte_flow_action_rss * with)2891 igb_action_rss_same(const struct rte_flow_action_rss *comp,
2892 		    const struct rte_flow_action_rss *with)
2893 {
2894 	return (comp->func == with->func &&
2895 		comp->level == with->level &&
2896 		comp->types == with->types &&
2897 		comp->key_len == with->key_len &&
2898 		comp->queue_num == with->queue_num &&
2899 		!memcmp(comp->key, with->key, with->key_len) &&
2900 		!memcmp(comp->queue, with->queue,
2901 			sizeof(*with->queue) * with->queue_num));
2902 }
2903 
2904 int
igb_config_rss_filter(struct rte_eth_dev * dev,struct igb_rte_flow_rss_conf * conf,bool add)2905 igb_config_rss_filter(struct rte_eth_dev *dev,
2906 		struct igb_rte_flow_rss_conf *conf, bool add)
2907 {
2908 	uint32_t shift;
2909 	uint16_t i, j;
2910 	struct rte_eth_rss_conf rss_conf = {
2911 		.rss_key = conf->conf.key_len ?
2912 			(void *)(uintptr_t)conf->conf.key : NULL,
2913 		.rss_key_len = conf->conf.key_len,
2914 		.rss_hf = conf->conf.types,
2915 	};
2916 	struct e1000_filter_info *filter_info =
2917 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2918 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2919 
2920 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2921 
2922 	if (!add) {
2923 		if (igb_action_rss_same(&filter_info->rss_info.conf,
2924 					&conf->conf)) {
2925 			igb_rss_disable(dev);
2926 			memset(&filter_info->rss_info, 0,
2927 				sizeof(struct igb_rte_flow_rss_conf));
2928 			return 0;
2929 		}
2930 		return -EINVAL;
2931 	}
2932 
2933 	if (filter_info->rss_info.conf.queue_num)
2934 		return -EINVAL;
2935 
2936 	/* Fill in redirection table. */
2937 	shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2938 	for (i = 0, j = 0; i < 128; i++, j++) {
2939 		union e1000_reta {
2940 			uint32_t dword;
2941 			uint8_t  bytes[4];
2942 		} reta;
2943 		uint8_t q_idx;
2944 
2945 		if (j == conf->conf.queue_num)
2946 			j = 0;
2947 		q_idx = conf->conf.queue[j];
2948 		reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2949 		if ((i & 3) == 3)
2950 			E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2951 	}
2952 
2953 	/* Configure the RSS key and the RSS protocols used to compute
2954 	 * the RSS hash of input packets.
2955 	 */
2956 	if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2957 		igb_rss_disable(dev);
2958 		return 0;
2959 	}
2960 	if (rss_conf.rss_key == NULL)
2961 		rss_conf.rss_key = rss_intel_key; /* Default hash key */
2962 	igb_hw_rss_hash_set(hw, &rss_conf);
2963 
2964 	if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf))
2965 		return -EINVAL;
2966 
2967 	return 0;
2968 }
2969