1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4 */
5
6 #include "axgbe_ethdev.h"
7 #include "axgbe_common.h"
8 #include "axgbe_phy.h"
9 #include "axgbe_rxtx.h"
10
axgbe_get_max_frame(struct axgbe_port * pdata)11 static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
12 {
13 return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
14 RTE_ETHER_CRC_LEN + VLAN_HLEN;
15 }
16
17 /* query busy bit */
mdio_complete(struct axgbe_port * pdata)18 static int mdio_complete(struct axgbe_port *pdata)
19 {
20 if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
21 return 1;
22
23 return 0;
24 }
25
axgbe_write_ext_mii_regs(struct axgbe_port * pdata,int addr,int reg,u16 val)26 static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
27 int reg, u16 val)
28 {
29 unsigned int mdio_sca, mdio_sccd;
30 uint64_t timeout;
31
32 mdio_sca = 0;
33 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
34 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
35 AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
36
37 mdio_sccd = 0;
38 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
39 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
40 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
41 AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
42
43 timeout = rte_get_timer_cycles() + rte_get_timer_hz();
44 while (time_before(rte_get_timer_cycles(), timeout)) {
45 rte_delay_us(100);
46 if (mdio_complete(pdata))
47 return 0;
48 }
49
50 PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
51 return -ETIMEDOUT;
52 }
53
axgbe_read_ext_mii_regs(struct axgbe_port * pdata,int addr,int reg)54 static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
55 int reg)
56 {
57 unsigned int mdio_sca, mdio_sccd;
58 uint64_t timeout;
59
60 mdio_sca = 0;
61 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
62 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
63 AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
64
65 mdio_sccd = 0;
66 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
67 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
68 AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
69
70 timeout = rte_get_timer_cycles() + rte_get_timer_hz();
71
72 while (time_before(rte_get_timer_cycles(), timeout)) {
73 rte_delay_us(100);
74 if (mdio_complete(pdata))
75 goto success;
76 }
77
78 PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
79 return -ETIMEDOUT;
80
81 success:
82 return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
83 }
84
axgbe_set_ext_mii_mode(struct axgbe_port * pdata,unsigned int port,enum axgbe_mdio_mode mode)85 static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
86 enum axgbe_mdio_mode mode)
87 {
88 unsigned int reg_val = 0;
89
90 switch (mode) {
91 case AXGBE_MDIO_MODE_CL22:
92 if (port > AXGMAC_MAX_C22_PORT)
93 return -EINVAL;
94 reg_val |= (1 << port);
95 break;
96 case AXGBE_MDIO_MODE_CL45:
97 break;
98 default:
99 return -EINVAL;
100 }
101 AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
102
103 return 0;
104 }
105
axgbe_read_mmd_regs_v2(struct axgbe_port * pdata,int prtad __rte_unused,int mmd_reg)106 static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
107 int prtad __rte_unused, int mmd_reg)
108 {
109 unsigned int mmd_address, index, offset;
110 int mmd_data;
111
112 if (mmd_reg & MII_ADDR_C45)
113 mmd_address = mmd_reg & ~MII_ADDR_C45;
114 else
115 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
116
117 /* The PCS registers are accessed using mmio. The underlying
118 * management interface uses indirect addressing to access the MMD
119 * register sets. This requires accessing of the PCS register in two
120 * phases, an address phase and a data phase.
121 *
122 * The mmio interface is based on 16-bit offsets and values. All
123 * register offsets must therefore be adjusted by left shifting the
124 * offset 1 bit and reading 16 bits of data.
125 */
126 mmd_address <<= 1;
127 index = mmd_address & ~pdata->xpcs_window_mask;
128 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
129
130 pthread_mutex_lock(&pdata->xpcs_mutex);
131
132 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
133 mmd_data = XPCS16_IOREAD(pdata, offset);
134
135 pthread_mutex_unlock(&pdata->xpcs_mutex);
136
137 return mmd_data;
138 }
139
axgbe_write_mmd_regs_v2(struct axgbe_port * pdata,int prtad __rte_unused,int mmd_reg,int mmd_data)140 static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
141 int prtad __rte_unused,
142 int mmd_reg, int mmd_data)
143 {
144 unsigned int mmd_address, index, offset;
145
146 if (mmd_reg & MII_ADDR_C45)
147 mmd_address = mmd_reg & ~MII_ADDR_C45;
148 else
149 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
150
151 /* The PCS registers are accessed using mmio. The underlying
152 * management interface uses indirect addressing to access the MMD
153 * register sets. This requires accessing of the PCS register in two
154 * phases, an address phase and a data phase.
155 *
156 * The mmio interface is based on 16-bit offsets and values. All
157 * register offsets must therefore be adjusted by left shifting the
158 * offset 1 bit and writing 16 bits of data.
159 */
160 mmd_address <<= 1;
161 index = mmd_address & ~pdata->xpcs_window_mask;
162 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
163
164 pthread_mutex_lock(&pdata->xpcs_mutex);
165
166 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
167 XPCS16_IOWRITE(pdata, offset, mmd_data);
168
169 pthread_mutex_unlock(&pdata->xpcs_mutex);
170 }
171
axgbe_read_mmd_regs(struct axgbe_port * pdata,int prtad,int mmd_reg)172 static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
173 int mmd_reg)
174 {
175 switch (pdata->vdata->xpcs_access) {
176 case AXGBE_XPCS_ACCESS_V1:
177 PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
178 return -1;
179 case AXGBE_XPCS_ACCESS_V2:
180 default:
181 return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
182 }
183 }
184
axgbe_write_mmd_regs(struct axgbe_port * pdata,int prtad,int mmd_reg,int mmd_data)185 static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
186 int mmd_reg, int mmd_data)
187 {
188 switch (pdata->vdata->xpcs_access) {
189 case AXGBE_XPCS_ACCESS_V1:
190 PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
191 return;
192 case AXGBE_XPCS_ACCESS_V2:
193 default:
194 return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
195 }
196 }
197
axgbe_set_speed(struct axgbe_port * pdata,int speed)198 static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
199 {
200 unsigned int ss;
201
202 switch (speed) {
203 case SPEED_1000:
204 ss = 0x03;
205 break;
206 case SPEED_2500:
207 ss = 0x02;
208 break;
209 case SPEED_10000:
210 ss = 0x00;
211 break;
212 default:
213 return -EINVAL;
214 }
215
216 if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
217 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
218
219 return 0;
220 }
221
axgbe_disable_tx_flow_control(struct axgbe_port * pdata)222 static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
223 {
224 unsigned int max_q_count, q_count;
225 unsigned int reg, reg_val;
226 unsigned int i;
227
228 /* Clear MTL flow control */
229 for (i = 0; i < pdata->rx_q_count; i++)
230 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
231
232 /* Clear MAC flow control */
233 max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
234 q_count = RTE_MIN(pdata->tx_q_count,
235 max_q_count);
236 reg = MAC_Q0TFCR;
237 for (i = 0; i < q_count; i++) {
238 reg_val = AXGMAC_IOREAD(pdata, reg);
239 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
240 AXGMAC_IOWRITE(pdata, reg, reg_val);
241
242 reg += MAC_QTFCR_INC;
243 }
244
245 return 0;
246 }
247
axgbe_enable_tx_flow_control(struct axgbe_port * pdata)248 static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
249 {
250 unsigned int max_q_count, q_count;
251 unsigned int reg, reg_val;
252 unsigned int i;
253
254 /* Set MTL flow control */
255 for (i = 0; i < pdata->rx_q_count; i++) {
256 unsigned int ehfc = 0;
257
258 /* Flow control thresholds are established */
259 if (pdata->rx_rfd[i])
260 ehfc = 1;
261
262 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
263
264 PMD_DRV_LOG(DEBUG, "flow control %s for RXq%u\n",
265 ehfc ? "enabled" : "disabled", i);
266 }
267
268 /* Set MAC flow control */
269 max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
270 q_count = RTE_MIN(pdata->tx_q_count,
271 max_q_count);
272 reg = MAC_Q0TFCR;
273 for (i = 0; i < q_count; i++) {
274 reg_val = AXGMAC_IOREAD(pdata, reg);
275
276 /* Enable transmit flow control */
277 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
278 /* Set pause time */
279 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
280
281 AXGMAC_IOWRITE(pdata, reg, reg_val);
282
283 reg += MAC_QTFCR_INC;
284 }
285
286 return 0;
287 }
288
axgbe_disable_rx_flow_control(struct axgbe_port * pdata)289 static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
290 {
291 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
292
293 return 0;
294 }
295
axgbe_enable_rx_flow_control(struct axgbe_port * pdata)296 static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
297 {
298 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
299
300 return 0;
301 }
302
axgbe_config_tx_flow_control(struct axgbe_port * pdata)303 static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
304 {
305 if (pdata->tx_pause)
306 axgbe_enable_tx_flow_control(pdata);
307 else
308 axgbe_disable_tx_flow_control(pdata);
309
310 return 0;
311 }
312
axgbe_config_rx_flow_control(struct axgbe_port * pdata)313 static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
314 {
315 if (pdata->rx_pause)
316 axgbe_enable_rx_flow_control(pdata);
317 else
318 axgbe_disable_rx_flow_control(pdata);
319
320 return 0;
321 }
322
axgbe_config_flow_control(struct axgbe_port * pdata)323 static void axgbe_config_flow_control(struct axgbe_port *pdata)
324 {
325 axgbe_config_tx_flow_control(pdata);
326 axgbe_config_rx_flow_control(pdata);
327
328 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
329 }
330
axgbe_queue_flow_control_threshold(struct axgbe_port * pdata,unsigned int queue,unsigned int q_fifo_size)331 static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
332 unsigned int queue,
333 unsigned int q_fifo_size)
334 {
335 unsigned int frame_fifo_size;
336 unsigned int rfa, rfd;
337
338 frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
339
340 /* This path deals with just maximum frame sizes which are
341 * limited to a jumbo frame of 9,000 (plus headers, etc.)
342 * so we can never exceed the maximum allowable RFA/RFD
343 * values.
344 */
345 if (q_fifo_size <= 2048) {
346 /* rx_rfd to zero to signal no flow control */
347 pdata->rx_rfa[queue] = 0;
348 pdata->rx_rfd[queue] = 0;
349 return;
350 }
351
352 if (q_fifo_size <= 4096) {
353 /* Between 2048 and 4096 */
354 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
355 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
356 return;
357 }
358
359 if (q_fifo_size <= frame_fifo_size) {
360 /* Between 4096 and max-frame */
361 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
362 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
363 return;
364 }
365
366 if (q_fifo_size <= (frame_fifo_size * 3)) {
367 /* Between max-frame and 3 max-frames,
368 * trigger if we get just over a frame of data and
369 * resume when we have just under half a frame left.
370 */
371 rfa = q_fifo_size - frame_fifo_size;
372 rfd = rfa + (frame_fifo_size / 2);
373 } else {
374 /* Above 3 max-frames - trigger when just over
375 * 2 frames of space available
376 */
377 rfa = frame_fifo_size * 2;
378 rfa += AXGMAC_FLOW_CONTROL_UNIT;
379 rfd = rfa + frame_fifo_size;
380 }
381
382 pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
383 pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
384 }
385
axgbe_calculate_flow_control_threshold(struct axgbe_port * pdata)386 static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
387 {
388 unsigned int q_fifo_size;
389 unsigned int i;
390
391 for (i = 0; i < pdata->rx_q_count; i++) {
392 q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
393
394 axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
395 }
396 }
397
axgbe_config_flow_control_threshold(struct axgbe_port * pdata)398 static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
399 {
400 unsigned int i;
401
402 for (i = 0; i < pdata->rx_q_count; i++) {
403 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
404 pdata->rx_rfa[i]);
405 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
406 pdata->rx_rfd[i]);
407 }
408 }
409
__axgbe_exit(struct axgbe_port * pdata)410 static int __axgbe_exit(struct axgbe_port *pdata)
411 {
412 unsigned int count = 2000;
413
414 /* Issue a software reset */
415 AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
416 rte_delay_us(10);
417
418 /* Poll Until Poll Condition */
419 while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
420 rte_delay_us(500);
421
422 if (!count)
423 return -EBUSY;
424
425 return 0;
426 }
427
axgbe_exit(struct axgbe_port * pdata)428 static int axgbe_exit(struct axgbe_port *pdata)
429 {
430 int ret;
431
432 /* To guard against possible incorrectly generated interrupts,
433 * issue the software reset twice.
434 */
435 ret = __axgbe_exit(pdata);
436 if (ret)
437 return ret;
438
439 return __axgbe_exit(pdata);
440 }
441
axgbe_flush_tx_queues(struct axgbe_port * pdata)442 static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
443 {
444 unsigned int i, count;
445
446 if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
447 return 0;
448
449 for (i = 0; i < pdata->tx_q_count; i++)
450 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
451
452 /* Poll Until Poll Condition */
453 for (i = 0; i < pdata->tx_q_count; i++) {
454 count = 2000;
455 while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
456 MTL_Q_TQOMR, FTQ))
457 rte_delay_us(500);
458
459 if (!count)
460 return -EBUSY;
461 }
462
463 return 0;
464 }
465
axgbe_config_dma_bus(struct axgbe_port * pdata)466 static void axgbe_config_dma_bus(struct axgbe_port *pdata)
467 {
468 /* Set enhanced addressing mode */
469 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
470
471 /* Out standing read/write requests*/
472 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
473 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
474
475 /* Set the System Bus mode */
476 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
477 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
478 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
479 }
480
axgbe_config_dma_cache(struct axgbe_port * pdata)481 static void axgbe_config_dma_cache(struct axgbe_port *pdata)
482 {
483 unsigned int arcache, awcache, arwcache;
484
485 arcache = 0;
486 AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
487 AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
488
489 awcache = 0;
490 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
491 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
492 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
493 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
494 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
495 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
496 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
497 AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
498
499 arwcache = 0;
500 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
501 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
502 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
503 AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
504 }
505
axgbe_config_edma_control(struct axgbe_port * pdata)506 static void axgbe_config_edma_control(struct axgbe_port *pdata)
507 {
508 AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
509 AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
510 }
511
axgbe_config_osp_mode(struct axgbe_port * pdata)512 static int axgbe_config_osp_mode(struct axgbe_port *pdata)
513 {
514 /* Force DMA to operate on second packet before closing descriptors
515 * of first packet
516 */
517 struct axgbe_tx_queue *txq;
518 unsigned int i;
519
520 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
521 txq = pdata->eth_dev->data->tx_queues[i];
522 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
523 pdata->tx_osp_mode);
524 }
525
526 return 0;
527 }
528
axgbe_config_pblx8(struct axgbe_port * pdata)529 static int axgbe_config_pblx8(struct axgbe_port *pdata)
530 {
531 struct axgbe_tx_queue *txq;
532 unsigned int i;
533
534 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
535 txq = pdata->eth_dev->data->tx_queues[i];
536 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
537 pdata->pblx8);
538 }
539 return 0;
540 }
541
axgbe_config_tx_pbl_val(struct axgbe_port * pdata)542 static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
543 {
544 struct axgbe_tx_queue *txq;
545 unsigned int i;
546
547 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
548 txq = pdata->eth_dev->data->tx_queues[i];
549 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
550 pdata->tx_pbl);
551 }
552
553 return 0;
554 }
555
axgbe_config_rx_pbl_val(struct axgbe_port * pdata)556 static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
557 {
558 struct axgbe_rx_queue *rxq;
559 unsigned int i;
560
561 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
562 rxq = pdata->eth_dev->data->rx_queues[i];
563 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
564 pdata->rx_pbl);
565 }
566
567 return 0;
568 }
569
axgbe_config_rx_buffer_size(struct axgbe_port * pdata)570 static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
571 {
572 struct axgbe_rx_queue *rxq;
573 unsigned int i;
574
575 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
576 rxq = pdata->eth_dev->data->rx_queues[i];
577
578 rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
579 RTE_PKTMBUF_HEADROOM;
580 rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
581 ~(AXGBE_RX_BUF_ALIGN - 1);
582
583 if (rxq->buf_size > pdata->rx_buf_size)
584 pdata->rx_buf_size = rxq->buf_size;
585
586 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
587 rxq->buf_size);
588 }
589 }
590
axgbe_write_rss_reg(struct axgbe_port * pdata,unsigned int type,unsigned int index,unsigned int val)591 static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
592 unsigned int index, unsigned int val)
593 {
594 unsigned int wait;
595
596 if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
597 return -EBUSY;
598
599 AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
600
601 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
602 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
603 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
604 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
605
606 wait = 1000;
607 while (wait--) {
608 if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
609 return 0;
610
611 rte_delay_us(1500);
612 }
613
614 return -EBUSY;
615 }
616
axgbe_write_rss_hash_key(struct axgbe_port * pdata)617 int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
618 {
619 struct rte_eth_rss_conf *rss_conf;
620 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
621 unsigned int *key;
622 int ret;
623
624 rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
625
626 if (!rss_conf->rss_key)
627 key = (unsigned int *)&pdata->rss_key;
628 else
629 key = (unsigned int *)&rss_conf->rss_key;
630
631 while (key_regs--) {
632 ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
633 key_regs, *key++);
634 if (ret)
635 return ret;
636 }
637
638 return 0;
639 }
640
axgbe_write_rss_lookup_table(struct axgbe_port * pdata)641 int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
642 {
643 unsigned int i;
644 int ret;
645
646 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
647 ret = axgbe_write_rss_reg(pdata,
648 AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
649 pdata->rss_table[i]);
650 if (ret)
651 return ret;
652 }
653
654 return 0;
655 }
656
axgbe_enable_rss(struct axgbe_port * pdata)657 static int axgbe_enable_rss(struct axgbe_port *pdata)
658 {
659 int ret;
660
661 /* Program the hash key */
662 ret = axgbe_write_rss_hash_key(pdata);
663 if (ret)
664 return ret;
665
666 /* Program the lookup table */
667 ret = axgbe_write_rss_lookup_table(pdata);
668 if (ret)
669 return ret;
670
671 /* Set the RSS options */
672 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
673
674 /* Enable RSS */
675 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
676
677 return 0;
678 }
679
axgbe_rss_options(struct axgbe_port * pdata)680 static void axgbe_rss_options(struct axgbe_port *pdata)
681 {
682 struct rte_eth_rss_conf *rss_conf;
683 uint64_t rss_hf;
684
685 rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
686 pdata->rss_hf = rss_conf->rss_hf;
687 rss_hf = rss_conf->rss_hf;
688
689 if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
690 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
691 if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
692 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
693 if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
694 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
695 }
696
axgbe_config_rss(struct axgbe_port * pdata)697 static int axgbe_config_rss(struct axgbe_port *pdata)
698 {
699 uint32_t i;
700
701 if (pdata->rss_enable) {
702 /* Initialize RSS hash key and lookup table */
703 uint32_t *key = (uint32_t *)pdata->rss_key;
704
705 for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
706 *key++ = (uint32_t)rte_rand();
707 for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
708 AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
709 i % pdata->eth_dev->data->nb_rx_queues);
710 axgbe_rss_options(pdata);
711 if (axgbe_enable_rss(pdata)) {
712 PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
713 return -1;
714 }
715 } else {
716 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
717 }
718
719 return 0;
720 }
721
axgbe_enable_dma_interrupts(struct axgbe_port * pdata)722 static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
723 {
724 struct axgbe_tx_queue *txq;
725 unsigned int dma_ch_isr, dma_ch_ier;
726 unsigned int i;
727
728 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
729 txq = pdata->eth_dev->data->tx_queues[i];
730
731 /* Clear all the interrupts which are set */
732 dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
733 AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
734
735 /* Clear all interrupt enable bits */
736 dma_ch_ier = 0;
737
738 /* Enable following interrupts
739 * NIE - Normal Interrupt Summary Enable
740 * AIE - Abnormal Interrupt Summary Enable
741 * FBEE - Fatal Bus Error Enable
742 */
743 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
744 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
745 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
746
747 /* Enable following Rx interrupts
748 * RBUE - Receive Buffer Unavailable Enable
749 * RIE - Receive Interrupt Enable (unless using
750 * per channel interrupts in edge triggered
751 * mode)
752 */
753 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
754
755 AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
756 }
757 }
758
wrapper_tx_desc_init(struct axgbe_port * pdata)759 static void wrapper_tx_desc_init(struct axgbe_port *pdata)
760 {
761 struct axgbe_tx_queue *txq;
762 unsigned int i;
763
764 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
765 txq = pdata->eth_dev->data->tx_queues[i];
766 txq->cur = 0;
767 txq->dirty = 0;
768 /* Update the total number of Tx descriptors */
769 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
770 /* Update the starting address of descriptor ring */
771 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
772 high32_value(txq->ring_phys_addr));
773 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
774 low32_value(txq->ring_phys_addr));
775 }
776 }
777
wrapper_rx_desc_init(struct axgbe_port * pdata)778 static int wrapper_rx_desc_init(struct axgbe_port *pdata)
779 {
780 struct axgbe_rx_queue *rxq;
781 struct rte_mbuf *mbuf;
782 volatile union axgbe_rx_desc *desc;
783 unsigned int i, j;
784
785 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
786 rxq = pdata->eth_dev->data->rx_queues[i];
787
788 /* Initialize software ring entries */
789 rxq->mbuf_alloc = 0;
790 rxq->cur = 0;
791 rxq->dirty = 0;
792 desc = AXGBE_GET_DESC_PT(rxq, 0);
793
794 for (j = 0; j < rxq->nb_desc; j++) {
795 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
796 if (mbuf == NULL) {
797 PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
798 (unsigned int)rxq->queue_id, j);
799 axgbe_dev_rx_queue_release(rxq);
800 return -ENOMEM;
801 }
802 rxq->sw_ring[j] = mbuf;
803 /* Mbuf populate */
804 mbuf->next = NULL;
805 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
806 mbuf->nb_segs = 1;
807 mbuf->port = rxq->port_id;
808 desc->read.baddr =
809 rte_cpu_to_le_64(
810 rte_mbuf_data_iova_default(mbuf));
811 rte_wmb();
812 AXGMAC_SET_BITS_LE(desc->read.desc3,
813 RX_NORMAL_DESC3, OWN, 1);
814 rte_wmb();
815 rxq->mbuf_alloc++;
816 desc++;
817 }
818 /* Update the total number of Rx descriptors */
819 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
820 rxq->nb_desc - 1);
821 /* Update the starting address of descriptor ring */
822 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
823 high32_value(rxq->ring_phys_addr));
824 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
825 low32_value(rxq->ring_phys_addr));
826 /* Update the Rx Descriptor Tail Pointer */
827 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
828 low32_value(rxq->ring_phys_addr +
829 (rxq->nb_desc - 1) *
830 sizeof(union axgbe_rx_desc)));
831 }
832 return 0;
833 }
834
axgbe_config_mtl_mode(struct axgbe_port * pdata)835 static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
836 {
837 unsigned int i;
838
839 /* Set Tx to weighted round robin scheduling algorithm */
840 AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
841
842 /* Set Tx traffic classes to use WRR algorithm with equal weights */
843 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
844 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
845 MTL_TSA_ETS);
846 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
847 }
848
849 /* Set Rx to strict priority algorithm */
850 AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
851 }
852
axgbe_config_tsf_mode(struct axgbe_port * pdata,unsigned int val)853 static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
854 {
855 unsigned int i;
856
857 for (i = 0; i < pdata->tx_q_count; i++)
858 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
859
860 return 0;
861 }
862
axgbe_config_rsf_mode(struct axgbe_port * pdata,unsigned int val)863 static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
864 {
865 unsigned int i;
866
867 for (i = 0; i < pdata->rx_q_count; i++)
868 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
869
870 return 0;
871 }
872
axgbe_config_tx_threshold(struct axgbe_port * pdata,unsigned int val)873 static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
874 unsigned int val)
875 {
876 unsigned int i;
877
878 for (i = 0; i < pdata->tx_q_count; i++)
879 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
880
881 return 0;
882 }
883
axgbe_config_rx_threshold(struct axgbe_port * pdata,unsigned int val)884 static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
885 unsigned int val)
886 {
887 unsigned int i;
888
889 for (i = 0; i < pdata->rx_q_count; i++)
890 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
891
892 return 0;
893 }
894
895 /*Distrubting fifo size */
axgbe_config_rx_fifo_size(struct axgbe_port * pdata)896 static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
897 {
898 unsigned int fifo_size;
899 unsigned int q_fifo_size;
900 unsigned int p_fifo, i;
901
902 fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
903 pdata->hw_feat.rx_fifo_size);
904 q_fifo_size = fifo_size / pdata->rx_q_count;
905
906 /* Calculate the fifo setting by dividing the queue's fifo size
907 * by the fifo allocation increment (with 0 representing the
908 * base allocation increment so decrement the result
909 * by 1).
910 */
911 p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
912 if (p_fifo)
913 p_fifo--;
914
915 for (i = 0; i < pdata->rx_q_count; i++)
916 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
917 pdata->fifo = p_fifo;
918
919 /*Calculate and config Flow control threshold*/
920 axgbe_calculate_flow_control_threshold(pdata);
921 axgbe_config_flow_control_threshold(pdata);
922
923 PMD_DRV_LOG(DEBUG, "%d Rx hardware queues, %d byte fifo per queue\n",
924 pdata->rx_q_count, q_fifo_size);
925 }
926
axgbe_config_tx_fifo_size(struct axgbe_port * pdata)927 static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
928 {
929 unsigned int fifo_size;
930 unsigned int q_fifo_size;
931 unsigned int p_fifo, i;
932
933 fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
934 pdata->hw_feat.tx_fifo_size);
935 q_fifo_size = fifo_size / pdata->tx_q_count;
936
937 /* Calculate the fifo setting by dividing the queue's fifo size
938 * by the fifo allocation increment (with 0 representing the
939 * base allocation increment so decrement the result
940 * by 1).
941 */
942 p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
943 if (p_fifo)
944 p_fifo--;
945
946 for (i = 0; i < pdata->tx_q_count; i++)
947 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
948
949 PMD_DRV_LOG(DEBUG, "%d Tx hardware queues, %d byte fifo per queue\n",
950 pdata->tx_q_count, q_fifo_size);
951 }
952
axgbe_config_queue_mapping(struct axgbe_port * pdata)953 static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
954 {
955 unsigned int qptc, qptc_extra, queue;
956 unsigned int i, j, reg, reg_val;
957
958 /* Map the MTL Tx Queues to Traffic Classes
959 * Note: Tx Queues >= Traffic Classes
960 */
961 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
962 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
963
964 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
965 for (j = 0; j < qptc; j++) {
966 PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
967 AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
968 Q2TCMAP, i);
969 }
970 if (i < qptc_extra) {
971 PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
972 AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
973 Q2TCMAP, i);
974 }
975 }
976
977 if (pdata->rss_enable) {
978 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
979 reg = MTL_RQDCM0R;
980 reg_val = 0;
981 for (i = 0; i < pdata->rx_q_count;) {
982 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
983
984 if ((i % MTL_RQDCM_Q_PER_REG) &&
985 (i != pdata->rx_q_count))
986 continue;
987
988 AXGMAC_IOWRITE(pdata, reg, reg_val);
989
990 reg += MTL_RQDCM_INC;
991 reg_val = 0;
992 }
993 }
994 }
995
axgbe_enable_mtl_interrupts(struct axgbe_port * pdata)996 static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
997 {
998 unsigned int mtl_q_isr;
999 unsigned int q_count, i;
1000
1001 q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
1002 for (i = 0; i < q_count; i++) {
1003 /* Clear all the interrupts which are set */
1004 mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
1005 AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
1006
1007 /* No MTL interrupts to be enabled */
1008 AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
1009 }
1010 }
1011
bitrev32(uint32_t x)1012 static uint32_t bitrev32(uint32_t x)
1013 {
1014 x = (x >> 16) | (x << 16);
1015 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1016 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1017 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1018 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1019 return x;
1020 }
1021
crc32_le(uint32_t crc,uint8_t * p,uint32_t len)1022 static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len)
1023 {
1024 int i;
1025 while (len--) {
1026 crc ^= *p++;
1027 for (i = 0; i < 8; i++)
1028 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
1029 }
1030 return crc;
1031 }
1032
axgbe_set_mac_hash_table(struct axgbe_port * pdata,u8 * addr,bool add)1033 void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add)
1034 {
1035 uint32_t crc, htable_index, htable_bitmask;
1036
1037 crc = bitrev32(~crc32_le(~0, addr, RTE_ETHER_ADDR_LEN));
1038 crc >>= pdata->hash_table_shift;
1039 htable_index = crc >> 5;
1040 htable_bitmask = 1 << (crc & 0x1f);
1041
1042 if (add) {
1043 pdata->uc_hash_table[htable_index] |= htable_bitmask;
1044 pdata->uc_hash_mac_addr++;
1045 } else {
1046 pdata->uc_hash_table[htable_index] &= ~htable_bitmask;
1047 pdata->uc_hash_mac_addr--;
1048 }
1049 PMD_DRV_LOG(DEBUG, "%s MAC hash table Bit %d at Index %#x\n",
1050 add ? "set" : "clear", (crc & 0x1f), htable_index);
1051
1052 AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index),
1053 pdata->uc_hash_table[htable_index]);
1054 }
1055
axgbe_set_mac_addn_addr(struct axgbe_port * pdata,u8 * addr,uint32_t index)1056 void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index)
1057 {
1058 unsigned int mac_addr_hi, mac_addr_lo;
1059 u8 *mac_addr;
1060
1061 mac_addr_lo = 0;
1062 mac_addr_hi = 0;
1063
1064 if (addr) {
1065 mac_addr = (u8 *)&mac_addr_lo;
1066 mac_addr[0] = addr[0];
1067 mac_addr[1] = addr[1];
1068 mac_addr[2] = addr[2];
1069 mac_addr[3] = addr[3];
1070 mac_addr = (u8 *)&mac_addr_hi;
1071 mac_addr[0] = addr[4];
1072 mac_addr[1] = addr[5];
1073
1074 /*Address Enable: Use this Addr for Perfect Filtering */
1075 AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
1076 }
1077
1078 PMD_DRV_LOG(DEBUG, "%s mac address at %#x\n",
1079 addr ? "set" : "clear", index);
1080
1081 AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi);
1082 AXGMAC_IOWRITE(pdata, MAC_MACALR(index), mac_addr_lo);
1083 }
1084
axgbe_set_mac_address(struct axgbe_port * pdata,u8 * addr)1085 static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
1086 {
1087 unsigned int mac_addr_hi, mac_addr_lo;
1088
1089 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1090 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1091 (addr[1] << 8) | (addr[0] << 0);
1092
1093 AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1094 AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1095
1096 return 0;
1097 }
1098
axgbe_config_mac_hash_table(struct axgbe_port * pdata)1099 static void axgbe_config_mac_hash_table(struct axgbe_port *pdata)
1100 {
1101 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1102
1103 pdata->hash_table_shift = 0;
1104 pdata->hash_table_count = 0;
1105 pdata->uc_hash_mac_addr = 0;
1106 memset(pdata->uc_hash_table, 0, sizeof(pdata->uc_hash_table));
1107
1108 if (hw_feat->hash_table_size) {
1109 pdata->hash_table_shift = 26 - (hw_feat->hash_table_size >> 7);
1110 pdata->hash_table_count = hw_feat->hash_table_size / 32;
1111 }
1112 }
1113
axgbe_config_mac_address(struct axgbe_port * pdata)1114 static void axgbe_config_mac_address(struct axgbe_port *pdata)
1115 {
1116 axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
1117 }
1118
axgbe_config_jumbo_enable(struct axgbe_port * pdata)1119 static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
1120 {
1121 unsigned int val;
1122
1123 val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
1124
1125 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1126 }
1127
axgbe_config_mac_speed(struct axgbe_port * pdata)1128 static void axgbe_config_mac_speed(struct axgbe_port *pdata)
1129 {
1130 axgbe_set_speed(pdata, pdata->phy_speed);
1131 }
1132
axgbe_config_checksum_offload(struct axgbe_port * pdata)1133 static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
1134 {
1135 if (pdata->rx_csum_enable)
1136 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1137 else
1138 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1139 }
1140
axgbe_config_mmc(struct axgbe_port * pdata)1141 static void axgbe_config_mmc(struct axgbe_port *pdata)
1142 {
1143 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1144
1145 /* Reset stats */
1146 memset(stats, 0, sizeof(*stats));
1147
1148 /* Set counters to reset on read */
1149 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
1150
1151 /* Reset the counters */
1152 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
1153 }
1154
axgbe_init(struct axgbe_port * pdata)1155 static int axgbe_init(struct axgbe_port *pdata)
1156 {
1157 int ret;
1158
1159 /* Flush Tx queues */
1160 ret = axgbe_flush_tx_queues(pdata);
1161 if (ret)
1162 return ret;
1163 /* Initialize DMA related features */
1164 axgbe_config_dma_bus(pdata);
1165 axgbe_config_dma_cache(pdata);
1166 axgbe_config_edma_control(pdata);
1167 axgbe_config_osp_mode(pdata);
1168 axgbe_config_pblx8(pdata);
1169 axgbe_config_tx_pbl_val(pdata);
1170 axgbe_config_rx_pbl_val(pdata);
1171 axgbe_config_rx_buffer_size(pdata);
1172 axgbe_config_rss(pdata);
1173 wrapper_tx_desc_init(pdata);
1174 ret = wrapper_rx_desc_init(pdata);
1175 if (ret)
1176 return ret;
1177 axgbe_enable_dma_interrupts(pdata);
1178
1179 /* Initialize MTL related features */
1180 axgbe_config_mtl_mode(pdata);
1181 axgbe_config_queue_mapping(pdata);
1182 axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
1183 axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
1184 axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
1185 axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
1186 axgbe_config_tx_fifo_size(pdata);
1187 axgbe_config_rx_fifo_size(pdata);
1188
1189 axgbe_enable_mtl_interrupts(pdata);
1190
1191 /* Initialize MAC related features */
1192 axgbe_config_mac_hash_table(pdata);
1193 axgbe_config_mac_address(pdata);
1194 axgbe_config_jumbo_enable(pdata);
1195 axgbe_config_flow_control(pdata);
1196 axgbe_config_mac_speed(pdata);
1197 axgbe_config_checksum_offload(pdata);
1198 axgbe_config_mmc(pdata);
1199
1200 return 0;
1201 }
1202
axgbe_init_function_ptrs_dev(struct axgbe_hw_if * hw_if)1203 void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
1204 {
1205 hw_if->exit = axgbe_exit;
1206 hw_if->config_flow_control = axgbe_config_flow_control;
1207
1208 hw_if->init = axgbe_init;
1209
1210 hw_if->read_mmd_regs = axgbe_read_mmd_regs;
1211 hw_if->write_mmd_regs = axgbe_write_mmd_regs;
1212
1213 hw_if->set_speed = axgbe_set_speed;
1214
1215 hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
1216 hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
1217 hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
1218 /* For FLOW ctrl */
1219 hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
1220 hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
1221 }
1222