1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
3 * All rights reserved.
4 */
5
6 #include <rte_malloc.h>
7 #include <rte_alarm.h>
8 #include <rte_cycles.h>
9
10 #include "bnxt.h"
11 #include "bnxt_hwrm.h"
12 #include "bnxt_ring.h"
13 #include "hsi_struct_def_dpdk.h"
14
bnxt_wait_for_device_shutdown(struct bnxt * bp)15 void bnxt_wait_for_device_shutdown(struct bnxt *bp)
16 {
17 uint32_t val, timeout;
18
19 /* if HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD is set
20 * in HWRM_FUNC_QCAPS command, wait for FW_STATUS to set
21 * the SHUTDOWN bit in health register
22 */
23 if (!(bp->recovery_info &&
24 (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)))
25 return;
26
27 /* Driver has to wait for fw_reset_max_msecs or shutdown bit which comes
28 * first for FW to collect crash dump.
29 */
30 timeout = bp->fw_reset_max_msecs;
31
32 /* Driver has to poll for shutdown bit in fw_status register
33 *
34 * 1. in case of hot fw upgrade, this bit will be set after all
35 * function drivers unregistered with fw.
36 * 2. in case of fw initiated error recovery, this bit will be
37 * set after fw has collected the core dump
38 */
39 do {
40 val = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
41 if (val & BNXT_FW_STATUS_SHUTDOWN)
42 return;
43
44 rte_delay_ms(100);
45 timeout -= 100;
46 } while (timeout);
47 }
48
49 static void
bnxt_process_default_vnic_change(struct bnxt * bp,struct hwrm_async_event_cmpl * async_cmp)50 bnxt_process_default_vnic_change(struct bnxt *bp,
51 struct hwrm_async_event_cmpl *async_cmp)
52 {
53 uint16_t vnic_state, vf_fid, vf_id;
54 struct bnxt_representor *vf_rep_bp;
55 struct rte_eth_dev *eth_dev;
56 bool vfr_found = false;
57 uint32_t event_data;
58
59 if (!BNXT_TRUFLOW_EN(bp))
60 return;
61
62 PMD_DRV_LOG(INFO, "Default vnic change async event received\n");
63 event_data = rte_le_to_cpu_32(async_cmp->event_data1);
64
65 vnic_state = (event_data & BNXT_DEFAULT_VNIC_STATE_MASK) >>
66 BNXT_DEFAULT_VNIC_STATE_SFT;
67 if (vnic_state != BNXT_DEFAULT_VNIC_ALLOC)
68 return;
69
70 if (!bp->rep_info)
71 return;
72
73 vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >>
74 BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT;
75 PMD_DRV_LOG(INFO, "async event received vf_id 0x%x\n", vf_fid);
76
77 for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS; vf_id++) {
78 eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
79 if (!eth_dev)
80 continue;
81 vf_rep_bp = eth_dev->data->dev_private;
82 if (vf_rep_bp &&
83 vf_rep_bp->fw_fid == vf_fid) {
84 vfr_found = true;
85 break;
86 }
87 }
88 if (!vfr_found)
89 return;
90
91 bnxt_rep_dev_start_op(eth_dev);
92 }
93
94 /*
95 * Async event handling
96 */
bnxt_handle_async_event(struct bnxt * bp,struct cmpl_base * cmp)97 void bnxt_handle_async_event(struct bnxt *bp,
98 struct cmpl_base *cmp)
99 {
100 struct hwrm_async_event_cmpl *async_cmp =
101 (struct hwrm_async_event_cmpl *)cmp;
102 uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id);
103 struct bnxt_error_recovery_info *info;
104 uint32_t event_data;
105
106 switch (event_id) {
107 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
108 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
109 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
110 /* FALLTHROUGH */
111 bnxt_link_update_op(bp->eth_dev, 0);
112 break;
113 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
114 PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
115 break;
116 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
117 PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
118 bnxt_hwrm_func_qcfg(bp, NULL);
119 break;
120 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
121 PMD_DRV_LOG(INFO, "Port conn async event\n");
122 break;
123 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
124 /*
125 * Avoid any rx/tx packet processing during firmware reset
126 * operation.
127 */
128 bnxt_stop_rxtx(bp);
129
130 /* Ignore reset notify async events when stopping the port */
131 if (!bp->eth_dev->data->dev_started) {
132 bp->flags |= BNXT_FLAG_FATAL_ERROR;
133 return;
134 }
135
136 event_data = rte_le_to_cpu_32(async_cmp->event_data1);
137 /* timestamp_lo/hi values are in units of 100ms */
138 bp->fw_reset_max_msecs = async_cmp->timestamp_hi ?
139 rte_le_to_cpu_16(async_cmp->timestamp_hi) * 100 :
140 BNXT_MAX_FW_RESET_TIMEOUT;
141 bp->fw_reset_min_msecs = async_cmp->timestamp_lo ?
142 async_cmp->timestamp_lo * 100 :
143 BNXT_MIN_FW_READY_TIMEOUT;
144 if ((event_data & EVENT_DATA1_REASON_CODE_MASK) ==
145 EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) {
146 PMD_DRV_LOG(INFO,
147 "Firmware fatal reset event received\n");
148 bp->flags |= BNXT_FLAG_FATAL_ERROR;
149 } else {
150 PMD_DRV_LOG(INFO,
151 "Firmware non-fatal reset event received\n");
152 }
153
154 bp->flags |= BNXT_FLAG_FW_RESET;
155 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
156 (void *)bp);
157 break;
158 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY:
159 info = bp->recovery_info;
160
161 if (!info)
162 return;
163
164 PMD_DRV_LOG(INFO, "Error recovery async event received\n");
165
166 event_data = rte_le_to_cpu_32(async_cmp->event_data1) &
167 EVENT_DATA1_FLAGS_MASK;
168
169 if (event_data & EVENT_DATA1_FLAGS_MASTER_FUNC)
170 info->flags |= BNXT_FLAG_MASTER_FUNC;
171 else
172 info->flags &= ~BNXT_FLAG_MASTER_FUNC;
173
174 if (event_data & EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
175 info->flags |= BNXT_FLAG_RECOVERY_ENABLED;
176 else
177 info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED;
178
179 PMD_DRV_LOG(INFO, "recovery enabled(%d), master function(%d)\n",
180 bnxt_is_recovery_enabled(bp),
181 bnxt_is_master_func(bp));
182
183 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
184 return;
185
186 info->last_heart_beat =
187 bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
188 info->last_reset_counter =
189 bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
190
191 bnxt_schedule_fw_health_check(bp);
192 break;
193 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
194 PMD_DRV_LOG(INFO, "DNC event: evt_data1 %#x evt_data2 %#x\n",
195 rte_le_to_cpu_32(async_cmp->event_data1),
196 rte_le_to_cpu_32(async_cmp->event_data2));
197 break;
198 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE:
199 bnxt_process_default_vnic_change(bp, async_cmp);
200 break;
201 default:
202 PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
203 break;
204 }
205 }
206
bnxt_handle_fwd_req(struct bnxt * bp,struct cmpl_base * cmpl)207 void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
208 {
209 struct hwrm_exec_fwd_resp_input *fwreq;
210 struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
211 struct input *fwd_cmd;
212 uint16_t fw_vf_id;
213 uint16_t vf_id;
214 uint16_t req_len;
215 int rc;
216
217 if (bp->pf->active_vfs <= 0) {
218 PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
219 return;
220 }
221
222 /* Qualify the fwd request */
223 fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
224 vf_id = fw_vf_id - bp->pf->first_vf_id;
225
226 req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
227 HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >>
228 HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
229 if (req_len > sizeof(fwreq->encap_request))
230 req_len = sizeof(fwreq->encap_request);
231
232 /* Locate VF's forwarded command */
233 fwd_cmd = (struct input *)bp->pf->vf_info[vf_id].req_buf;
234
235 if (fw_vf_id < bp->pf->first_vf_id ||
236 fw_vf_id >= bp->pf->first_vf_id + bp->pf->active_vfs) {
237 PMD_DRV_LOG(ERR,
238 "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
239 fw_vf_id, bp->pf->first_vf_id,
240 (bp->pf->first_vf_id) + bp->pf->active_vfs - 1,
241 bp->pf->first_vf_id, bp->pf->active_vfs);
242 goto reject;
243 }
244
245 if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) {
246 /*
247 * In older firmware versions, the MAC had to be all zeros for
248 * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
249 * zeros if it's being configured and has been ok'd by caller.
250 */
251 if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) {
252 struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd;
253
254 if (vfc->enables &
255 HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) {
256 bnxt_hwrm_func_vf_mac(bp, vf_id,
257 (const uint8_t *)"\x00\x00\x00\x00\x00");
258 }
259 }
260
261 if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
262 struct hwrm_cfa_l2_set_rx_mask_input *srm =
263 (void *)fwd_cmd;
264
265 srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0);
266 srm->num_vlan_tags = rte_cpu_to_le_32(0);
267 srm->mask &= ~rte_cpu_to_le_32(
268 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY |
269 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
270 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
271 }
272
273 /* Forward */
274 rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
275 if (rc) {
276 PMD_DRV_LOG(ERR,
277 "Failed to send FWD req VF 0x%x, type 0x%x.\n",
278 fw_vf_id - bp->pf->first_vf_id,
279 rte_le_to_cpu_16(fwd_cmd->req_type));
280 }
281 return;
282 }
283
284 reject:
285 rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
286 if (rc) {
287 PMD_DRV_LOG(ERR,
288 "Failed to send REJECT req VF 0x%x, type 0x%x.\n",
289 fw_vf_id - bp->pf->first_vf_id,
290 rte_le_to_cpu_16(fwd_cmd->req_type));
291 }
292
293 return;
294 }
295
bnxt_event_hwrm_resp_handler(struct bnxt * bp,struct cmpl_base * cmp)296 int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
297 {
298 bool evt = 0;
299
300 if (bp == NULL || cmp == NULL) {
301 PMD_DRV_LOG(ERR, "invalid NULL argument\n");
302 return evt;
303 }
304
305 if (unlikely(is_bnxt_in_error(bp)))
306 return 0;
307
308 switch (CMP_TYPE(cmp)) {
309 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
310 /* Handle any async event */
311 bnxt_handle_async_event(bp, cmp);
312 evt = 1;
313 break;
314 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
315 /* Handle HWRM forwarded responses */
316 bnxt_handle_fwd_req(bp, cmp);
317 evt = 1;
318 break;
319 default:
320 /* Ignore any other events */
321 PMD_DRV_LOG(DEBUG, "Ignoring %02x completion\n", CMP_TYPE(cmp));
322 break;
323 }
324
325 return evt;
326 }
327
bnxt_is_master_func(struct bnxt * bp)328 bool bnxt_is_master_func(struct bnxt *bp)
329 {
330 if (bp->recovery_info->flags & BNXT_FLAG_MASTER_FUNC)
331 return true;
332
333 return false;
334 }
335
bnxt_is_recovery_enabled(struct bnxt * bp)336 bool bnxt_is_recovery_enabled(struct bnxt *bp)
337 {
338 struct bnxt_error_recovery_info *info;
339
340 info = bp->recovery_info;
341 if (info && (info->flags & BNXT_FLAG_RECOVERY_ENABLED))
342 return true;
343
344 return false;
345 }
346
bnxt_stop_rxtx(struct bnxt * bp)347 void bnxt_stop_rxtx(struct bnxt *bp)
348 {
349 bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
350 bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
351 }
352