1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <rte_malloc.h> 7 #include <rte_alarm.h> 8 #include <rte_cycles.h> 9 10 #include "bnxt.h" 11 #include "bnxt_hwrm.h" 12 #include "bnxt_ring.h" 13 #include "hsi_struct_def_dpdk.h" 14 15 void bnxt_wait_for_device_shutdown(struct bnxt *bp) 16 { 17 uint32_t val, timeout; 18 19 /* if HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD is set 20 * in HWRM_FUNC_QCAPS command, wait for FW_STATUS to set 21 * the SHUTDOWN bit in health register 22 */ 23 if (!(bp->recovery_info && 24 (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD))) 25 return; 26 27 /* Driver has to wait for fw_reset_max_msecs or shutdown bit which comes 28 * first for FW to collect crash dump. 29 */ 30 timeout = bp->fw_reset_max_msecs; 31 32 /* Driver has to poll for shutdown bit in fw_status register 33 * 34 * 1. in case of hot fw upgrade, this bit will be set after all 35 * function drivers unregistered with fw. 36 * 2. in case of fw initiated error recovery, this bit will be 37 * set after fw has collected the core dump 38 */ 39 do { 40 val = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 41 if (val & BNXT_FW_STATUS_SHUTDOWN) 42 return; 43 44 rte_delay_ms(100); 45 timeout -= 100; 46 } while (timeout); 47 } 48 49 static void 50 bnxt_process_default_vnic_change(struct bnxt *bp, 51 struct hwrm_async_event_cmpl *async_cmp) 52 { 53 uint16_t vnic_state, vf_fid, vf_id; 54 struct bnxt_representor *vf_rep_bp; 55 struct rte_eth_dev *eth_dev; 56 bool vfr_found = false; 57 uint32_t event_data; 58 59 if (!BNXT_TRUFLOW_EN(bp)) 60 return; 61 62 PMD_DRV_LOG(INFO, "Default vnic change async event received\n"); 63 event_data = rte_le_to_cpu_32(async_cmp->event_data1); 64 65 vnic_state = (event_data & BNXT_DEFAULT_VNIC_STATE_MASK) >> 66 BNXT_DEFAULT_VNIC_STATE_SFT; 67 if (vnic_state != BNXT_DEFAULT_VNIC_ALLOC) 68 return; 69 70 if (!bp->rep_info) 71 return; 72 73 vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >> 74 BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT; 75 PMD_DRV_LOG(INFO, "async event received vf_id 0x%x\n", vf_fid); 76 77 for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS(bp); vf_id++) { 78 eth_dev = bp->rep_info[vf_id].vfr_eth_dev; 79 if (!eth_dev) 80 continue; 81 vf_rep_bp = eth_dev->data->dev_private; 82 if (vf_rep_bp && 83 vf_rep_bp->fw_fid == vf_fid) { 84 vfr_found = true; 85 break; 86 } 87 } 88 if (!vfr_found) 89 return; 90 91 bnxt_rep_dev_start_op(eth_dev); 92 } 93 94 static void bnxt_handle_event_error_report(struct bnxt *bp, 95 uint32_t data1, 96 uint32_t data2) 97 { 98 switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) { 99 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 100 PMD_DRV_LOG(WARNING, "Port:%d Pause Storm detected!\n", 101 bp->eth_dev->data->port_id); 102 break; 103 default: 104 PMD_DRV_LOG(INFO, "FW reported unknown error type data1 %d" 105 " data2: %d\n", data1, data2); 106 break; 107 } 108 } 109 110 void bnxt_handle_vf_cfg_change(void *arg) 111 { 112 struct bnxt *bp = arg; 113 struct rte_eth_dev *eth_dev = bp->eth_dev; 114 int rc; 115 116 /* Free and recreate filters with default VLAN */ 117 if (eth_dev->data->dev_started) { 118 rc = bnxt_dev_stop_op(eth_dev); 119 if (rc != 0) { 120 PMD_DRV_LOG(ERR, "Failed to stop Port:%u\n", eth_dev->data->port_id); 121 return; 122 } 123 124 rc = bnxt_dev_start_op(eth_dev); 125 if (rc != 0) 126 PMD_DRV_LOG(ERR, "Failed to start Port:%u\n", eth_dev->data->port_id); 127 } 128 } 129 130 /* 131 * Async event handling 132 */ 133 void bnxt_handle_async_event(struct bnxt *bp, 134 struct cmpl_base *cmp) 135 { 136 struct hwrm_async_event_cmpl *async_cmp = 137 (struct hwrm_async_event_cmpl *)cmp; 138 uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id); 139 uint16_t port_id = bp->eth_dev->data->port_id; 140 struct bnxt_error_recovery_info *info; 141 uint32_t event_data; 142 uint32_t data1, data2; 143 uint32_t status; 144 145 data1 = rte_le_to_cpu_32(async_cmp->event_data1); 146 data2 = rte_le_to_cpu_32(async_cmp->event_data2); 147 148 switch (event_id) { 149 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 150 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 151 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: 152 /* FALLTHROUGH */ 153 bnxt_link_update_op(bp->eth_dev, 0); 154 rte_eth_dev_callback_process(bp->eth_dev, 155 RTE_ETH_EVENT_INTR_LSC, NULL); 156 break; 157 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 158 PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n"); 159 break; 160 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 161 PMD_DRV_LOG(INFO, "Port %u: VF config change async event\n", port_id); 162 PMD_DRV_LOG(INFO, "event: data1 %#x data2 %#x\n", data1, data2); 163 bnxt_hwrm_func_qcfg(bp, NULL); 164 if (BNXT_VF(bp)) 165 rte_eal_alarm_set(1, bnxt_handle_vf_cfg_change, (void *)bp); 166 break; 167 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: 168 PMD_DRV_LOG(INFO, "Port conn async event\n"); 169 break; 170 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: 171 /* 172 * Avoid any rx/tx packet processing during firmware reset 173 * operation. 174 */ 175 bnxt_stop_rxtx(bp->eth_dev); 176 177 /* Ignore reset notify async events when stopping the port */ 178 if (!bp->eth_dev->data->dev_started) { 179 bp->flags |= BNXT_FLAG_FATAL_ERROR; 180 return; 181 } 182 183 pthread_mutex_lock(&bp->err_recovery_lock); 184 event_data = data1; 185 /* timestamp_lo/hi values are in units of 100ms */ 186 bp->fw_reset_max_msecs = async_cmp->timestamp_hi ? 187 rte_le_to_cpu_16(async_cmp->timestamp_hi) * 100 : 188 BNXT_MAX_FW_RESET_TIMEOUT; 189 bp->fw_reset_min_msecs = async_cmp->timestamp_lo ? 190 async_cmp->timestamp_lo * 100 : 191 BNXT_MIN_FW_READY_TIMEOUT; 192 if ((event_data & EVENT_DATA1_REASON_CODE_MASK) == 193 EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) { 194 PMD_DRV_LOG(INFO, 195 "Port %u: Firmware fatal reset event received\n", 196 port_id); 197 bp->flags |= BNXT_FLAG_FATAL_ERROR; 198 } else { 199 PMD_DRV_LOG(INFO, 200 "Port %u: Firmware non-fatal reset event received\n", 201 port_id); 202 } 203 204 bp->flags |= BNXT_FLAG_FW_RESET; 205 pthread_mutex_unlock(&bp->err_recovery_lock); 206 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 207 (void *)bp); 208 break; 209 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: 210 info = bp->recovery_info; 211 212 if (!info) 213 return; 214 215 event_data = data1 & EVENT_DATA1_FLAGS_MASK; 216 217 if (event_data & EVENT_DATA1_FLAGS_RECOVERY_ENABLED) { 218 info->flags |= BNXT_FLAG_RECOVERY_ENABLED; 219 } else { 220 info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED; 221 PMD_DRV_LOG(INFO, "Driver recovery watchdog is disabled\n"); 222 return; 223 } 224 225 if (event_data & EVENT_DATA1_FLAGS_MASTER_FUNC) 226 info->flags |= BNXT_FLAG_PRIMARY_FUNC; 227 else 228 info->flags &= ~BNXT_FLAG_PRIMARY_FUNC; 229 230 status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 231 PMD_DRV_LOG(INFO, 232 "Port: %u Driver recovery watchdog, role: %s, FW status: 0x%x (%s)\n", 233 port_id, bnxt_is_primary_func(bp) ? "primary" : "backup", status, 234 (status == BNXT_FW_STATUS_HEALTHY) ? "healthy" : "unhealthy"); 235 236 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 237 return; 238 239 info->last_heart_beat = 240 bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 241 info->last_reset_counter = 242 bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 243 244 bnxt_schedule_fw_health_check(bp); 245 break; 246 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 247 PMD_DRV_LOG(INFO, "Port: %u DNC event: data1 %#x data2 %#x\n", 248 port_id, data1, data2); 249 break; 250 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE: 251 bnxt_process_default_vnic_change(bp, async_cmp); 252 break; 253 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: 254 PMD_DRV_LOG(INFO, 255 "Port %u: Received fw echo request: data1 %#x data2 %#x\n", 256 port_id, data1, data2); 257 if (bp->recovery_info) 258 bnxt_hwrm_fw_echo_reply(bp, data1, data2); 259 break; 260 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: 261 bnxt_handle_event_error_report(bp, data1, data2); 262 break; 263 default: 264 PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id); 265 break; 266 } 267 } 268 269 void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) 270 { 271 struct hwrm_exec_fwd_resp_input *fwreq; 272 struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl; 273 struct input *fwd_cmd; 274 uint16_t fw_vf_id; 275 uint16_t vf_id; 276 uint16_t req_len; 277 int rc; 278 279 if (bp->pf->active_vfs <= 0) { 280 PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n"); 281 return; 282 } 283 284 /* Qualify the fwd request */ 285 fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id); 286 vf_id = fw_vf_id - bp->pf->first_vf_id; 287 288 req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) & 289 HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >> 290 HWRM_FWD_REQ_CMPL_REQ_LEN_SFT; 291 if (req_len > sizeof(fwreq->encap_request)) 292 req_len = sizeof(fwreq->encap_request); 293 294 /* Locate VF's forwarded command */ 295 fwd_cmd = (struct input *)bp->pf->vf_info[vf_id].req_buf; 296 297 if (fw_vf_id < bp->pf->first_vf_id || 298 fw_vf_id >= bp->pf->first_vf_id + bp->pf->active_vfs) { 299 PMD_DRV_LOG(ERR, 300 "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n", 301 fw_vf_id, bp->pf->first_vf_id, 302 (bp->pf->first_vf_id) + bp->pf->active_vfs - 1, 303 bp->pf->first_vf_id, bp->pf->active_vfs); 304 goto reject; 305 } 306 307 if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) { 308 /* 309 * In older firmware versions, the MAC had to be all zeros for 310 * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all 311 * zeros if it's being configured and has been ok'd by caller. 312 */ 313 if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) { 314 struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd; 315 316 if (vfc->enables & 317 HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) { 318 bnxt_hwrm_func_vf_mac(bp, vf_id, 319 (const uint8_t *)"\x00\x00\x00\x00\x00"); 320 } 321 } 322 323 if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) { 324 struct hwrm_cfa_l2_set_rx_mask_input *srm = 325 (void *)fwd_cmd; 326 327 srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0); 328 srm->num_vlan_tags = rte_cpu_to_le_32(0); 329 srm->mask &= ~rte_cpu_to_le_32( 330 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY | 331 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN | 332 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN); 333 } 334 335 /* Forward */ 336 rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); 337 if (rc) { 338 PMD_DRV_LOG(ERR, 339 "Failed to send FWD req VF 0x%x, type 0x%x.\n", 340 fw_vf_id - bp->pf->first_vf_id, 341 rte_le_to_cpu_16(fwd_cmd->req_type)); 342 } 343 return; 344 } 345 346 reject: 347 rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); 348 if (rc) { 349 PMD_DRV_LOG(ERR, 350 "Failed to send REJECT req VF 0x%x, type 0x%x.\n", 351 fw_vf_id - bp->pf->first_vf_id, 352 rte_le_to_cpu_16(fwd_cmd->req_type)); 353 } 354 355 return; 356 } 357 358 int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp) 359 { 360 bool evt = 0; 361 362 if (bp == NULL || cmp == NULL) { 363 PMD_DRV_LOG(ERR, "invalid NULL argument\n"); 364 return evt; 365 } 366 367 if (unlikely(is_bnxt_in_error(bp))) 368 return 0; 369 370 switch (CMP_TYPE(cmp)) { 371 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 372 /* Handle any async event */ 373 bnxt_handle_async_event(bp, cmp); 374 evt = 1; 375 break; 376 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 377 /* Handle HWRM forwarded responses */ 378 bnxt_handle_fwd_req(bp, cmp); 379 evt = 1; 380 break; 381 default: 382 /* Ignore any other events */ 383 PMD_DRV_LOG(DEBUG, "Ignoring %02x completion\n", CMP_TYPE(cmp)); 384 break; 385 } 386 387 return evt; 388 } 389 390 bool bnxt_is_primary_func(struct bnxt *bp) 391 { 392 if (bp->recovery_info->flags & BNXT_FLAG_PRIMARY_FUNC) 393 return true; 394 395 return false; 396 } 397 398 bool bnxt_is_recovery_enabled(struct bnxt *bp) 399 { 400 struct bnxt_error_recovery_info *info; 401 402 info = bp->recovery_info; 403 if (info && (info->flags & BNXT_FLAG_RECOVERY_ENABLED)) 404 return true; 405 406 return false; 407 } 408 409 void bnxt_stop_rxtx(struct rte_eth_dev *eth_dev) 410 { 411 eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 412 eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 413 414 rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst = 415 eth_dev->rx_pkt_burst; 416 rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst = 417 eth_dev->tx_pkt_burst; 418 rte_mb(); 419 420 /* Allow time for threads to exit the real burst functions. */ 421 rte_delay_ms(100); 422 } 423