1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include "otx2_ethdev.h"
6 
7 int
otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev * eth_dev,bool enb)8 otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev *eth_dev, bool enb)
9 {
10 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
11 	struct otx2_fc_info *fc = &dev->fc_info;
12 	struct otx2_mbox *mbox = dev->mbox;
13 	struct nix_bp_cfg_req *req;
14 	struct nix_bp_cfg_rsp *rsp;
15 	int rc;
16 
17 	if (otx2_dev_is_sdp(dev))
18 		return 0;
19 
20 	if (enb) {
21 		req = otx2_mbox_alloc_msg_nix_bp_enable(mbox);
22 		req->chan_base = 0;
23 		req->chan_cnt = 1;
24 		req->bpid_per_chan = 0;
25 
26 		rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
27 		if (rc || req->chan_cnt != rsp->chan_cnt) {
28 			otx2_err("Insufficient BPIDs, alloc=%u < req=%u rc=%d",
29 				 rsp->chan_cnt, req->chan_cnt, rc);
30 			return rc;
31 		}
32 
33 		fc->bpid[0] = rsp->chan_bpid[0];
34 	} else {
35 		req = otx2_mbox_alloc_msg_nix_bp_disable(mbox);
36 		req->chan_base = 0;
37 		req->chan_cnt = 1;
38 
39 		rc = otx2_mbox_process(mbox);
40 
41 		memset(fc->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
42 	}
43 
44 	return rc;
45 }
46 
47 int
otx2_nix_flow_ctrl_get(struct rte_eth_dev * eth_dev,struct rte_eth_fc_conf * fc_conf)48 otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
49 		       struct rte_eth_fc_conf *fc_conf)
50 {
51 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
52 	struct cgx_pause_frm_cfg *req, *rsp;
53 	struct otx2_mbox *mbox = dev->mbox;
54 	int rc;
55 
56 	if (otx2_dev_is_lbk(dev)) {
57 		fc_conf->mode = RTE_FC_NONE;
58 		return 0;
59 	}
60 
61 	req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
62 	req->set = 0;
63 
64 	rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
65 	if (rc)
66 		goto done;
67 
68 	if (rsp->rx_pause && rsp->tx_pause)
69 		fc_conf->mode = RTE_FC_FULL;
70 	else if (rsp->rx_pause)
71 		fc_conf->mode = RTE_FC_RX_PAUSE;
72 	else if (rsp->tx_pause)
73 		fc_conf->mode = RTE_FC_TX_PAUSE;
74 	else
75 		fc_conf->mode = RTE_FC_NONE;
76 
77 done:
78 	return rc;
79 }
80 
81 static int
otx2_nix_cq_bp_cfg(struct rte_eth_dev * eth_dev,bool enb)82 otx2_nix_cq_bp_cfg(struct rte_eth_dev *eth_dev, bool enb)
83 {
84 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
85 	struct otx2_fc_info *fc = &dev->fc_info;
86 	struct otx2_mbox *mbox = dev->mbox;
87 	struct nix_aq_enq_req *aq;
88 	struct otx2_eth_rxq *rxq;
89 	int i, rc;
90 
91 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
92 		rxq = eth_dev->data->rx_queues[i];
93 
94 		aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
95 		if (!aq) {
96 			/* The shared memory buffer can be full.
97 			 * flush it and retry
98 			 */
99 			otx2_mbox_msg_send(mbox, 0);
100 			rc = otx2_mbox_wait_for_rsp(mbox, 0);
101 			if (rc < 0)
102 				return rc;
103 
104 			aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
105 			if (!aq)
106 				return -ENOMEM;
107 		}
108 		aq->qidx = rxq->rq;
109 		aq->ctype = NIX_AQ_CTYPE_CQ;
110 		aq->op = NIX_AQ_INSTOP_WRITE;
111 
112 		if (enb) {
113 			aq->cq.bpid = fc->bpid[0];
114 			aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
115 			aq->cq.bp = rxq->cq_drop;
116 			aq->cq_mask.bp = ~(aq->cq_mask.bp);
117 		}
118 
119 		aq->cq.bp_ena = !!enb;
120 		aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
121 	}
122 
123 	otx2_mbox_msg_send(mbox, 0);
124 	rc = otx2_mbox_wait_for_rsp(mbox, 0);
125 	if (rc < 0)
126 		return rc;
127 
128 	return 0;
129 }
130 
131 static int
otx2_nix_rx_fc_cfg(struct rte_eth_dev * eth_dev,bool enb)132 otx2_nix_rx_fc_cfg(struct rte_eth_dev *eth_dev, bool enb)
133 {
134 	return otx2_nix_cq_bp_cfg(eth_dev, enb);
135 }
136 
137 int
otx2_nix_flow_ctrl_set(struct rte_eth_dev * eth_dev,struct rte_eth_fc_conf * fc_conf)138 otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
139 		       struct rte_eth_fc_conf *fc_conf)
140 {
141 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
142 	struct otx2_fc_info *fc = &dev->fc_info;
143 	struct otx2_mbox *mbox = dev->mbox;
144 	struct cgx_pause_frm_cfg *req;
145 	uint8_t tx_pause, rx_pause;
146 	int rc = 0;
147 
148 	if (otx2_dev_is_lbk(dev)) {
149 		otx2_info("No flow control support for LBK bound ethports");
150 		return -ENOTSUP;
151 	}
152 
153 	if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
154 	    fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
155 		otx2_info("Flowctrl parameter is not supported");
156 		return -EINVAL;
157 	}
158 
159 	if (fc_conf->mode == fc->mode)
160 		return 0;
161 
162 	rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
163 		    (fc_conf->mode == RTE_FC_RX_PAUSE);
164 	tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
165 		    (fc_conf->mode == RTE_FC_TX_PAUSE);
166 
167 	/* Check if TX pause frame is already enabled or not */
168 	if (fc->tx_pause ^ tx_pause) {
169 		if (otx2_dev_is_Ax(dev) && eth_dev->data->dev_started) {
170 			/* on Ax, CQ should be in disabled state
171 			 * while setting flow control configuration.
172 			 */
173 			otx2_info("Stop the port=%d for setting flow control\n",
174 				  eth_dev->data->port_id);
175 				return 0;
176 		}
177 		/* TX pause frames, enable/disable flowctrl on RX side. */
178 		rc = otx2_nix_rx_fc_cfg(eth_dev, tx_pause);
179 		if (rc)
180 			return rc;
181 	}
182 
183 	req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
184 	req->set = 1;
185 	req->rx_pause = rx_pause;
186 	req->tx_pause = tx_pause;
187 
188 	rc = otx2_mbox_process(mbox);
189 	if (rc)
190 		return rc;
191 
192 	fc->tx_pause = tx_pause;
193 	fc->rx_pause = rx_pause;
194 	fc->mode = fc_conf->mode;
195 
196 	return rc;
197 }
198 
199 int
otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev * eth_dev)200 otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
201 {
202 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
203 	struct otx2_fc_info *fc = &dev->fc_info;
204 	struct rte_eth_fc_conf fc_conf;
205 
206 	if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev))
207 		return 0;
208 
209 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
210 	fc_conf.mode = fc->mode;
211 
212 	/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
213 	if (otx2_dev_is_Ax(dev) &&
214 	    (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
215 	    (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
216 		fc_conf.mode =
217 				(fc_conf.mode == RTE_FC_FULL ||
218 				fc_conf.mode == RTE_FC_TX_PAUSE) ?
219 				RTE_FC_TX_PAUSE : RTE_FC_NONE;
220 	}
221 
222 	return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
223 }
224 
225 int
otx2_nix_flow_ctrl_init(struct rte_eth_dev * eth_dev)226 otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
227 {
228 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
229 	struct otx2_fc_info *fc = &dev->fc_info;
230 	struct rte_eth_fc_conf fc_conf;
231 	int rc;
232 
233 	if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev))
234 		return 0;
235 
236 	memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
237 	/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
238 	 * by AF driver, update those info in PMD structure.
239 	 */
240 	rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
241 	if (rc)
242 		goto exit;
243 
244 	fc->mode = fc_conf.mode;
245 	fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
246 			(fc_conf.mode == RTE_FC_RX_PAUSE);
247 	fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
248 			(fc_conf.mode == RTE_FC_TX_PAUSE);
249 
250 exit:
251 	return rc;
252 }
253