1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
3 * All rights reserved.
4 */
5
6 #ifndef _BNXT_CPR_H_
7 #define _BNXT_CPR_H_
8 #include <stdbool.h>
9
10 #include <rte_io.h>
11 #include "hsi_struct_def_dpdk.h"
12
13 struct bnxt_db_info;
14
15 #define CMP_TYPE(cmp) \
16 (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
17
18 /* Get completion length from completion type, in 16-byte units. */
19 #define CMP_LEN(cmp_type) (((cmp_type) & 1) + 1)
20
21
22 #define ADV_RAW_CMP(idx, n) ((idx) + (n))
23 #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
24 #define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask)
25 #define RING_CMPL(ring_mask, idx) ((idx) & (ring_mask))
26 #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
27
28 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
29 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
30
31 #define B_CP_DB_REARM(cpr, raw_cons) \
32 rte_write32((DB_CP_REARM_FLAGS | \
33 DB_RING_IDX(&((cpr)->cp_db), raw_cons)), \
34 ((cpr)->cp_db.doorbell))
35
36 #define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), \
37 ((cpr)->cp_db.doorbell))
38
39 #define B_CP_DB_DISARM(cpr) (*(uint32_t *)((cpr)->cp_db.doorbell) = \
40 DB_KEY_CP | DB_IRQ_DIS)
41
42 #define B_CP_DB_IDX_ARM(cpr, cons) \
43 (*(uint32_t *)((cpr)->cp_db.doorbell) = (DB_CP_REARM_FLAGS | \
44 (cons)))
45
46 #define B_CP_DB_IDX_DISARM(cpr, cons) do { \
47 rte_smp_wmb(); \
48 (*(uint32_t *)((cpr)->cp_db.doorbell) = (DB_CP_FLAGS | \
49 (cons)); \
50 } while (0)
51 #define B_CP_DIS_DB(cpr, raw_cons) \
52 rte_write32_relaxed((DB_CP_FLAGS | \
53 DB_RING_IDX(&((cpr)->cp_db), raw_cons)), \
54 ((cpr)->cp_db.doorbell))
55
56 #define B_CP_DB(cpr, raw_cons, ring_mask) \
57 rte_write32((DB_CP_FLAGS | \
58 RING_CMPL((ring_mask), raw_cons)), \
59 ((cpr)->cp_db.doorbell))
60
61 struct bnxt_db_info {
62 void *doorbell;
63 union {
64 uint64_t db_key64;
65 uint32_t db_key32;
66 };
67 bool db_64;
68 uint32_t db_ring_mask;
69 uint32_t db_epoch_mask;
70 uint32_t db_epoch_shift;
71 };
72
73 #define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
74 ((db)->db_epoch_shift))
75 #define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
76 DB_EPOCH(db, idx))
77
78 struct bnxt_ring;
79 struct bnxt_cp_ring_info {
80 uint32_t cp_raw_cons;
81
82 struct cmpl_base *cp_desc_ring;
83 struct bnxt_db_info cp_db;
84 rte_iova_t cp_desc_mapping;
85
86 struct ctx_hw_stats *hw_stats;
87 rte_iova_t hw_stats_map;
88 uint32_t hw_stats_ctx_id;
89
90 struct bnxt_ring *cp_ring_struct;
91 };
92
93 #define RX_CMP_L2_ERRORS \
94 (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR)
95
96 struct bnxt;
97 void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
98 void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
99 int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp);
100 void bnxt_dev_reset_and_resume(void *arg);
101 void bnxt_wait_for_device_shutdown(struct bnxt *bp);
102
103 #define EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL \
104 HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL
105 #define EVENT_DATA1_REASON_CODE_MASK \
106 HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK
107
108 #define EVENT_DATA1_FLAGS_MASK \
109 HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK
110
111 #define EVENT_DATA1_FLAGS_MASTER_FUNC \
112 HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC
113
114 #define EVENT_DATA1_FLAGS_RECOVERY_ENABLED \
115 HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED
116
117 bool bnxt_is_recovery_enabled(struct bnxt *bp);
118 bool bnxt_is_primary_func(struct bnxt *bp);
119
120 void bnxt_stop_rxtx(struct rte_eth_dev *eth_dev);
121
122 /**
123 * Check validity of a completion ring entry. If the entry is valid, include a
124 * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
125 * completion are not hoisted by the compiler or by the CPU to come before the
126 * loading of the "valid" field.
127 *
128 * Note: the caller must not access any fields in the specified completion
129 * entry prior to calling this function.
130 *
131 * @param cmpl
132 * Pointer to an entry in the completion ring.
133 * @param raw_cons
134 * Raw consumer index of entry in completion ring.
135 * @param ring_size
136 * Size of completion ring.
137 */
138 static __rte_always_inline bool
bnxt_cpr_cmp_valid(const void * cmpl,uint32_t raw_cons,uint32_t ring_size)139 bnxt_cpr_cmp_valid(const void *cmpl, uint32_t raw_cons, uint32_t ring_size)
140 {
141 const struct cmpl_base *c = cmpl;
142 bool expected, valid;
143
144 expected = !(raw_cons & ring_size);
145 valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
146 if (valid == expected) {
147 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
148 return true;
149 }
150 return false;
151 }
152 #endif
153