1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 /*
28 * File : ecore_int.c
29 */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include "bcm_osal.h"
34 #include "ecore.h"
35 #include "ecore_spq.h"
36 #include "reg_addr.h"
37 #include "ecore_gtt_reg_addr.h"
38 #include "ecore_init_ops.h"
39 #include "ecore_rt_defs.h"
40 #include "ecore_int.h"
41 #include "reg_addr.h"
42 #include "ecore_hw.h"
43 #include "ecore_sriov.h"
44 #include "ecore_vf.h"
45 #include "ecore_hw_defs.h"
46 #include "ecore_hsi_common.h"
47 #include "ecore_mcp.h"
48 #include "ecore_dbg_fw_funcs.h"
49
50 #ifdef DIAG
51 /* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor],
52 * and so the functions are lacking ecore prefix.
53 * If there would be other clients needing this [or if the content that isn't
54 * really optional there would increase], we'll need to re-think this.
55 */
56 enum dbg_status dbg_read_attn(struct ecore_hwfn *dev,
57 struct ecore_ptt *ptt,
58 enum block_id block,
59 enum dbg_attn_type attn_type,
60 bool clear_status,
61 struct dbg_attn_block_result *results);
62
63 enum dbg_status dbg_parse_attn(struct ecore_hwfn *dev,
64 struct dbg_attn_block_result *results);
65
66 const char* dbg_get_status_str(enum dbg_status status);
67
68 #define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \
69 dbg_read_attn(hwfn, ptt, id, type, clear, results)
70 #define ecore_dbg_parse_attn(hwfn, results) \
71 dbg_parse_attn(hwfn, results)
72 #define ecore_dbg_get_status_str(status) \
73 dbg_get_status_str(status)
74 #endif
75
76 struct ecore_pi_info {
77 ecore_int_comp_cb_t comp_cb;
78 void *cookie; /* Will be sent to the completion callback function */
79 };
80
81 struct ecore_sb_sp_info {
82 struct ecore_sb_info sb_info;
83 /* per protocol index data */
84 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
85 };
86
87 enum ecore_attention_type {
88 ECORE_ATTN_TYPE_ATTN,
89 ECORE_ATTN_TYPE_PARITY,
90 };
91
92 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
93 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
94
95 struct aeu_invert_reg_bit {
96 char bit_name[30];
97
98 #define ATTENTION_PARITY (1 << 0)
99
100 #define ATTENTION_LENGTH_MASK (0x00000ff0)
101 #define ATTENTION_LENGTH_SHIFT (4)
102 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
103 ATTENTION_LENGTH_SHIFT)
104 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
105 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
106 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
107 ATTENTION_PARITY)
108
109 /* Multiple bits start with this offset */
110 #define ATTENTION_OFFSET_MASK (0x000ff000)
111 #define ATTENTION_OFFSET_SHIFT (12)
112
113 #define ATTENTION_BB_MASK (0x00700000)
114 #define ATTENTION_BB_SHIFT (20)
115 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
116 #define ATTENTION_BB_DIFFERENT (1 << 23)
117
118 #define ATTENTION_CLEAR_ENABLE (1 << 28)
119 unsigned int flags;
120
121 /* Callback to call if attention will be triggered */
122 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
123
124 enum block_id block_index;
125 };
126
127 struct aeu_invert_reg {
128 struct aeu_invert_reg_bit bits[32];
129 };
130
131 #define MAX_ATTN_GRPS (8)
132 #define NUM_ATTN_REGS (9)
133
ecore_mcp_attn_cb(struct ecore_hwfn * p_hwfn)134 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
135 {
136 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
137
138 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
139 tmp);
140 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
141 0xffffffff);
142
143 return ECORE_SUCCESS;
144 }
145
146 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
147 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
148 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
149 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
150 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
151 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
152 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
153 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
154 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
155 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
156 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
157 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
158 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
159 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
160 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
161 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
162 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
163 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
164 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
165 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
166 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
167 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
168 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
169 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
ecore_pswhst_attn_cb(struct ecore_hwfn * p_hwfn)170 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
171 {
172 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID);
173
174 /* Disabled VF access */
175 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
176 u32 addr, data;
177
178 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
179 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
180 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
181 PSWHST_REG_VF_DISABLED_ERROR_DATA);
182 DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n",
183 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) >>
184 ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
185 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) >>
186 ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
187 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
188 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
189 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
190 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
191 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
192 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
193 addr);
194 }
195
196 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
197 PSWHST_REG_INCORRECT_ACCESS_VALID);
198 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
199 u32 addr, data, length;
200
201 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
202 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
203 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
204 PSWHST_REG_INCORRECT_ACCESS_DATA);
205 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
206 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
207
208 DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
209 addr, length,
210 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
211 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
212 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
213 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
214 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
215 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
216 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
217 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
218 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
219 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
220 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
221 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
222 data);
223 }
224
225 /* TODO - We know 'some' of these are legal due to virtualization,
226 * but is it true for all of them?
227 */
228 return ECORE_SUCCESS;
229 }
230
231 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
232 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
233 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
234 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
235 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
236 #define ECORE_GRC_ATTENTION_PF_MASK (0xf)
237 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
238 #define ECORE_GRC_ATTENTION_VF_SHIFT (4)
239 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
240 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
241 #define ECORE_GRC_ATTENTION_PRIV_VF (0)
grc_timeout_attn_master_to_str(u8 master)242 static const char* grc_timeout_attn_master_to_str(u8 master)
243 {
244 switch(master) {
245 case 1: return "PXP";
246 case 2: return "MCP";
247 case 3: return "MSDM";
248 case 4: return "PSDM";
249 case 5: return "YSDM";
250 case 6: return "USDM";
251 case 7: return "TSDM";
252 case 8: return "XSDM";
253 case 9: return "DBU";
254 case 10: return "DMAE";
255 default:
256 return "Unkown";
257 }
258 }
259
ecore_grc_attn_cb(struct ecore_hwfn * p_hwfn)260 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
261 {
262 u32 tmp, tmp2;
263
264 /* We've already cleared the timeout interrupt register, so we learn
265 * of interrupts via the validity register
266 */
267 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
268 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
269 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
270 goto out;
271
272 /* Read the GRC timeout information */
273 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
274 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
275 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
276 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
277
278 DP_NOTICE(p_hwfn->p_dev, false,
279 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
280 tmp2, tmp,
281 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
282 : "Read from",
283 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
284 grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
285 ECORE_GRC_ATTENTION_MASTER_SHIFT),
286 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
287 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
288 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
289 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
290 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
291 ECORE_GRC_ATTENTION_VF_SHIFT);
292
293 out:
294 /* Regardles of anything else, clean the validity bit */
295 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
296 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
297 return ECORE_SUCCESS;
298 }
299
300 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
301 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
302 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
303 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
304 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
305 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
306 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
307 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
308 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
309 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
310 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
311 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
312 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
313
ecore_pglueb_rbc_attn_handler(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)314 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
315 struct ecore_ptt *p_ptt)
316 {
317 u32 tmp;
318
319 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
320 if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
321 u32 addr_lo, addr_hi, details;
322
323 addr_lo = ecore_rd(p_hwfn, p_ptt,
324 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
325 addr_hi = ecore_rd(p_hwfn, p_ptt,
326 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
327 details = ecore_rd(p_hwfn, p_ptt,
328 PGLUE_B_REG_TX_ERR_WR_DETAILS);
329
330 DP_NOTICE(p_hwfn, false,
331 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
332 addr_hi, addr_lo, details,
333 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
334 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
335 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
336 tmp,
337 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
338 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
339 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
340 }
341
342 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
343 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
344 u32 addr_lo, addr_hi, details;
345
346 addr_lo = ecore_rd(p_hwfn, p_ptt,
347 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
348 addr_hi = ecore_rd(p_hwfn, p_ptt,
349 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
350 details = ecore_rd(p_hwfn, p_ptt,
351 PGLUE_B_REG_TX_ERR_RD_DETAILS);
352
353 DP_NOTICE(p_hwfn, false,
354 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
355 addr_hi, addr_lo, details,
356 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
357 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
358 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
359 tmp,
360 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0),
361 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0),
362 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0));
363 }
364
365 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
366 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
367 DP_NOTICE(p_hwfn, false, "ICPL eror - %08x\n", tmp);
368
369 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
370 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
371 u32 addr_hi, addr_lo;
372
373 addr_lo = ecore_rd(p_hwfn, p_ptt,
374 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
375 addr_hi = ecore_rd(p_hwfn, p_ptt,
376 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
377
378 DP_NOTICE(p_hwfn, false,
379 "ICPL eror - %08x [Address %08x:%08x]\n",
380 tmp, addr_hi, addr_lo);
381 }
382
383 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
384 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
385 u32 addr_hi, addr_lo, details;
386
387 addr_lo = ecore_rd(p_hwfn, p_ptt,
388 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
389 addr_hi = ecore_rd(p_hwfn, p_ptt,
390 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
391 details = ecore_rd(p_hwfn, p_ptt,
392 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
393
394 DP_NOTICE(p_hwfn, false,
395 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
396 details, tmp, addr_hi, addr_lo);
397 }
398
399 /* Clear the indications */
400 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
401
402 return ECORE_SUCCESS;
403 }
404
ecore_pglueb_rbc_attn_cb(struct ecore_hwfn * p_hwfn)405 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
406 {
407 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
408 }
409
ecore_fw_assertion(struct ecore_hwfn * p_hwfn)410 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
411 {
412 DP_NOTICE(p_hwfn, false, "FW assertion!\n");
413
414 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
415
416 return ECORE_INVAL;
417 }
418
419 static enum _ecore_status_t
ecore_general_attention_35(struct ecore_hwfn * p_hwfn)420 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
421 {
422 DP_INFO(p_hwfn, "General attention 35!\n");
423
424 return ECORE_SUCCESS;
425 }
426
427 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
428 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
429 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
430 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
431 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
432
433 #define ECORE_DB_REC_COUNT 10
434 #define ECORE_DB_REC_INTERVAL 100
435
436 /* assumes sticky overflow indication was set for this PF */
ecore_db_rec_attn(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)437 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
438 struct ecore_ptt *p_ptt)
439 {
440 u8 count = ECORE_DB_REC_COUNT;
441 u32 usage = 1;
442
443 /* wait for usage to zero or count to run out. This is necessary since
444 * EDPM doorbell transactions can take multiple 64b cycles, and as such
445 * can "split" over the pci. Possibly, the doorbell drop can happen with
446 * half an EDPM in the queue and other half dropped. Another EDPM
447 * doorbell to the same address (from doorbell recovery mechanism or
448 * from the doorbelling entity) could have first half dropped and second
449 * half interperted as continuation of the first. To prevent such
450 * malformed doorbells from reaching the device, flush the queue before
451 * releaseing the overflow sticky indication.
452 */
453 while (count-- && usage) {
454 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
455 OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
456 }
457
458 /* should have been depleted by now */
459 if (usage) {
460 DP_NOTICE(p_hwfn->p_dev, false,
461 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
462 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
463 return ECORE_TIMEOUT;
464 }
465
466 /* flush any pedning (e)dpm as they may never arrive */
467 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
468
469 /* release overflow sticky indication (stop silently dropping everything) */
470 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
471
472 /* repeat all last doorbells (doorbell drop recovery) */
473 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
474
475 return ECORE_SUCCESS;
476 }
477
ecore_dorq_attn_cb(struct ecore_hwfn * p_hwfn)478 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
479 {
480 u32 int_sts, first_drop_reason, details, address, overflow,
481 all_drops_reason;
482 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
483 enum _ecore_status_t rc;
484
485 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
486 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
487 int_sts);
488
489 /* int_sts may be zero since all PFs were interrupted for doorbell
490 * overflow but another one already handled it. Can abort here. If
491 * This PF also requires overflow recovery we will be interrupted again.
492 * The masked almost full indication may also be set. Ignoring.
493 */
494 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
495 return ECORE_SUCCESS;
496
497 /* check if db_drop or overflow happened */
498 if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
499 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
500
501 /* obtain data about db drop/overflow */
502 first_drop_reason = ecore_rd(p_hwfn, p_ptt,
503 DORQ_REG_DB_DROP_REASON) &
504 ECORE_DORQ_ATTENTION_REASON_MASK;
505 details = ecore_rd(p_hwfn, p_ptt,
506 DORQ_REG_DB_DROP_DETAILS);
507 address = ecore_rd(p_hwfn, p_ptt,
508 DORQ_REG_DB_DROP_DETAILS_ADDRESS);
509 overflow = ecore_rd(p_hwfn, p_ptt,
510 DORQ_REG_PF_OVFL_STICKY);
511 all_drops_reason = ecore_rd(p_hwfn, p_ptt,
512 DORQ_REG_DB_DROP_DETAILS_REASON);
513
514 /* log info */
515 DP_NOTICE(p_hwfn->p_dev, false,
516 "Doorbell drop occurred\n"
517 "Address\t\t0x%08x\t(second BAR address)\n"
518 "FID\t\t0x%04x\t\t(Opaque FID)\n"
519 "Size\t\t0x%04x\t\t(in bytes)\n"
520 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
521 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
522 "Overflow\t0x%x\t\t(a per PF indication)\n",
523 address, GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
524 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
525 first_drop_reason, all_drops_reason, overflow);
526
527 /* if this PF caused overflow, initiate recovery */
528 if (overflow) {
529 rc = ecore_db_rec_attn(p_hwfn, p_ptt);
530 if (rc != ECORE_SUCCESS)
531 return rc;
532 }
533
534 /* clear the doorbell drop details and prepare for next drop */
535 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
536
537 /* mark interrupt as handeld (note: even if drop was due to a diffrent
538 * reason than overflow we mark as handled)
539 */
540 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
541 DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
542
543 /* if there are no indications otherthan drop indications, success */
544 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
545 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
546 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
547 return ECORE_SUCCESS;
548 }
549
550 /* some other indication was present - non recoverable */
551 DP_INFO(p_hwfn, "DORQ fatal attention\n");
552
553 return ECORE_INVAL;
554 }
555
ecore_tm_attn_cb(struct ecore_hwfn * p_hwfn)556 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
557 {
558 #ifndef ASIC_ONLY
559 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
560 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
561 TM_REG_INT_STS_1);
562
563 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
564 TM_REG_INT_STS_1_PEND_CONN_SCAN))
565 return ECORE_INVAL;
566
567 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
568 TM_REG_INT_STS_1_PEND_CONN_SCAN))
569 DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n");
570 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
571 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
572 TM_REG_INT_MASK_1_PEND_TASK_SCAN;
573 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
574
575 return ECORE_SUCCESS;
576 }
577 #endif
578
579 return ECORE_INVAL;
580 }
581
582 /* Instead of major changes to the data-structure, we have a some 'special'
583 * identifiers for sources that changed meaning between adapters.
584 */
585 enum aeu_invert_reg_special_type {
586 AEU_INVERT_REG_SPECIAL_CNIG_0,
587 AEU_INVERT_REG_SPECIAL_CNIG_1,
588 AEU_INVERT_REG_SPECIAL_CNIG_2,
589 AEU_INVERT_REG_SPECIAL_CNIG_3,
590 AEU_INVERT_REG_SPECIAL_MAX,
591 };
592
593 static struct aeu_invert_reg_bit
594 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
595 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
596 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
597 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
598 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
599 };
600
601 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
602 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] =
603 {
604 {
605 { /* After Invert 1 */
606 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
607 }
608 },
609
610 {
611 { /* After Invert 2 */
612 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
613 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
614 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
615 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
616 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
617 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
618 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
619 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
620 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, BLOCK_PGLCS},
621 }
622 },
623
624 {
625 { /* After Invert 3 */
626 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
627 }
628 },
629
630 {
631 { /* After Invert 4 */
632 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_fw_assertion, MAX_BLOCK_ID},
633 {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID},
634 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_general_attention_35, MAX_BLOCK_ID},
635 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
636 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0) , OSAL_NULL, BLOCK_NWS},
637 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
638 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL, BLOCK_NWS},
639 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
640 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL, BLOCK_NWM},
641 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
642 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL, BLOCK_NWM},
643 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
644 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
645 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
646 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
647 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
648 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
649 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
650 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
651 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
652 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
653 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
654 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
655 }
656 },
657
658 {
659 { /* After Invert 5 */
660 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
661 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
662 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
663 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
664 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
665 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
666 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
667 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
668 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
669 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
670 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
671 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
672 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
673 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
674 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
675 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
676 }
677 },
678
679 {
680 { /* After Invert 6 */
681 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
682 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
683 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
684 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
685 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
686 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
687 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
688 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
689 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
690 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
691 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
692 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
693 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
694 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
695 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
696 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
697 }
698 },
699
700 {
701 { /* After Invert 7 */
702 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
703 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
704 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
705 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
706 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
707 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
708 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
709 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
710 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
711 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
712 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
713 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
714 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
715 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
716 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
717 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
718 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
719 }
720 },
721
722 {
723 { /* After Invert 8 */
724 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
725 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
726 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
727 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
728 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
729 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
730 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
731 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
732 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
733 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
734 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
735 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
736 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
737 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
738 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
739 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
740 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
741 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
742 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
743 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
744 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
745 }
746 },
747
748 {
749 { /* After Invert 9 */
750 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
751 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
752 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
753 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
754 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID },
755 }
756 },
757
758 };
759
760 static struct aeu_invert_reg_bit *
ecore_int_aeu_translate(struct ecore_hwfn * p_hwfn,struct aeu_invert_reg_bit * p_bit)761 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
762 struct aeu_invert_reg_bit *p_bit)
763 {
764 if (!ECORE_IS_BB(p_hwfn->p_dev))
765 return p_bit;
766
767 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
768 return p_bit;
769
770 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
771 ATTENTION_BB_SHIFT];
772 }
773
ecore_int_is_parity_flag(struct ecore_hwfn * p_hwfn,struct aeu_invert_reg_bit * p_bit)774 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
775 struct aeu_invert_reg_bit *p_bit)
776 {
777 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
778 ATTENTION_PARITY);
779 }
780
781 #define ATTN_STATE_BITS (0xfff)
782 #define ATTN_BITS_MASKABLE (0x3ff)
783 struct ecore_sb_attn_info {
784 /* Virtual & Physical address of the SB */
785 struct atten_status_block *sb_attn;
786 dma_addr_t sb_phys;
787
788 /* Last seen running index */
789 u16 index;
790
791 /* A mask of the AEU bits resulting in a parity error */
792 u32 parity_mask[NUM_ATTN_REGS];
793
794 /* A pointer to the attention description structure */
795 struct aeu_invert_reg *p_aeu_desc;
796
797 /* Previously asserted attentions, which are still unasserted */
798 u16 known_attn;
799
800 /* Cleanup address for the link's general hw attention */
801 u32 mfw_attn_addr;
802 };
803
ecore_attn_update_idx(struct ecore_hwfn * p_hwfn,struct ecore_sb_attn_info * p_sb_desc)804 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
805 struct ecore_sb_attn_info *p_sb_desc)
806 {
807 u16 rc = 0, index;
808
809 OSAL_MMIOWB(p_hwfn->p_dev);
810
811 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
812 if (p_sb_desc->index != index) {
813 p_sb_desc->index = index;
814 rc = ECORE_SB_ATT_IDX;
815 }
816
817 OSAL_MMIOWB(p_hwfn->p_dev);
818
819 return rc;
820 }
821
822 /**
823 * @brief ecore_int_assertion - handles asserted attention bits
824 *
825 * @param p_hwfn
826 * @param asserted_bits newly asserted bits
827 * @return enum _ecore_status_t
828 */
ecore_int_assertion(struct ecore_hwfn * p_hwfn,u16 asserted_bits)829 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
830 u16 asserted_bits)
831 {
832 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
833 u32 igu_mask;
834
835 /* Mask the source of the attention in the IGU */
836 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
837 IGU_REG_ATTENTION_ENABLE);
838 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
839 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
840 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
841 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
842
843 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
844 "inner known ATTN state: 0x%04x --> 0x%04x\n",
845 sb_attn_sw->known_attn,
846 sb_attn_sw->known_attn | asserted_bits);
847 sb_attn_sw->known_attn |= asserted_bits;
848
849 /* Handle MCP events */
850 if (asserted_bits & 0x100) {
851 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
852 /* Clean the MCP attention */
853 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
854 sb_attn_sw->mfw_attn_addr, 0);
855 }
856
857 /* FIXME - this will change once we'll have GOOD gtt definitions */
858 DIRECT_REG_WR(p_hwfn,
859 (u8 OSAL_IOMEM*)p_hwfn->regview +
860 GTT_BAR0_MAP_REG_IGU_CMD +
861 ((IGU_CMD_ATTN_BIT_SET_UPPER -
862 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
863
864 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
865 asserted_bits);
866
867 return ECORE_SUCCESS;
868 }
869
ecore_int_attn_print(struct ecore_hwfn * p_hwfn,enum block_id id,enum dbg_attn_type type,bool b_clear)870 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
871 enum block_id id, enum dbg_attn_type type,
872 bool b_clear)
873 {
874 struct dbg_attn_block_result attn_results;
875 enum dbg_status status;
876
877 OSAL_MEMSET(&attn_results, 0, sizeof(attn_results));
878
879 status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
880 b_clear, &attn_results);
881 #ifdef ATTN_DESC
882 if (status != DBG_STATUS_OK)
883 DP_NOTICE(p_hwfn, true,
884 "Failed to parse attention information [status: %s]\n",
885 ecore_dbg_get_status_str(status));
886 else
887 ecore_dbg_parse_attn(p_hwfn, &attn_results);
888 #else
889 if (status != DBG_STATUS_OK)
890 DP_NOTICE(p_hwfn, true,
891 "Failed to parse attention information [status: %d]\n",
892 status);
893 else
894 ecore_dbg_print_attn(p_hwfn, &attn_results);
895 #endif
896 }
897
898 /**
899 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
900 * cause of the attention
901 *
902 * @param p_hwfn
903 * @param p_aeu - descriptor of an AEU bit which caused the attention
904 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
905 * this bit to this group.
906 * @param bit_index - index of this bit in the aeu_en_reg
907 *
908 * @return enum _ecore_status_t
909 */
910 static enum _ecore_status_t
ecore_int_deassertion_aeu_bit(struct ecore_hwfn * p_hwfn,struct aeu_invert_reg_bit * p_aeu,u32 aeu_en_reg,const char * p_bit_name,u32 bitmask)911 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
912 struct aeu_invert_reg_bit *p_aeu,
913 u32 aeu_en_reg,
914 const char *p_bit_name,
915 u32 bitmask)
916 {
917 enum _ecore_status_t rc = ECORE_INVAL;
918 bool b_fatal = false;
919
920 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
921 p_bit_name, bitmask);
922
923 /* Call callback before clearing the interrupt status */
924 if (p_aeu->cb) {
925 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
926 p_bit_name);
927 rc = p_aeu->cb(p_hwfn);
928 }
929
930 if (rc != ECORE_SUCCESS)
931 b_fatal = true;
932
933 /* Print HW block interrupt registers */
934 if (p_aeu->block_index != MAX_BLOCK_ID)
935 ecore_int_attn_print(p_hwfn, p_aeu->block_index,
936 ATTN_TYPE_INTERRUPT, !b_fatal);
937
938 /* Reach assertion if attention is fatal */
939 if (b_fatal) {
940 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
941 p_bit_name);
942
943 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
944 }
945
946 /* Prevent this Attention from being asserted in the future */
947 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
948 p_hwfn->p_dev->attn_clr_en) {
949 u32 val;
950 u32 mask = ~bitmask;
951 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
952 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
953 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
954 p_bit_name);
955 }
956
957 return rc;
958 }
959
960 /**
961 * @brief ecore_int_deassertion_parity - handle a single parity AEU source
962 *
963 * @param p_hwfn
964 * @param p_aeu - descriptor of an AEU bit which caused the parity
965 * @param aeu_en_reg - address of the AEU enable register
966 * @param bit_index
967 */
ecore_int_deassertion_parity(struct ecore_hwfn * p_hwfn,struct aeu_invert_reg_bit * p_aeu,u32 aeu_en_reg,u8 bit_index)968 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
969 struct aeu_invert_reg_bit *p_aeu,
970 u32 aeu_en_reg, u8 bit_index)
971 {
972 u32 block_id = p_aeu->block_index, mask, val;
973
974 DP_NOTICE(p_hwfn->p_dev, false,
975 "%s parity attention is set [address 0x%08x, bit %d]\n",
976 p_aeu->bit_name, aeu_en_reg, bit_index);
977
978 if (block_id != MAX_BLOCK_ID) {
979 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
980
981 /* In A0, there's a single parity bit for several blocks */
982 if (block_id == BLOCK_BTB) {
983 ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
984 ATTN_TYPE_PARITY, false);
985 ecore_int_attn_print(p_hwfn, BLOCK_MCP,
986 ATTN_TYPE_PARITY, false);
987 }
988 }
989
990 /* Prevent this parity error from being re-asserted */
991 mask = ~(0x1 << bit_index);
992 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
993 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
994 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
995 p_aeu->bit_name);
996 }
997
998 /**
999 * @brief - handles deassertion of previously asserted attentions.
1000 *
1001 * @param p_hwfn
1002 * @param deasserted_bits - newly deasserted bits
1003 * @return enum _ecore_status_t
1004 *
1005 */
ecore_int_deassertion(struct ecore_hwfn * p_hwfn,u16 deasserted_bits)1006 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
1007 u16 deasserted_bits)
1008 {
1009 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
1010 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
1011 u8 i, j, k, bit_idx;
1012 enum _ecore_status_t rc = ECORE_SUCCESS;
1013
1014 /* Read the attention registers in the AEU */
1015 for (i = 0; i < NUM_ATTN_REGS; i++) {
1016 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1017 MISC_REG_AEU_AFTER_INVERT_1_IGU +
1018 i * 0x4);
1019 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1020 "Deasserted bits [%d]: %08x\n",
1021 i, aeu_inv_arr[i]);
1022 }
1023
1024 /* Handle parity attentions first */
1025 for (i = 0; i < NUM_ATTN_REGS; i++)
1026 {
1027 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
1028 u32 parities;
1029
1030 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
1031 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1032 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1033
1034 /* Skip register in which no parity bit is currently set */
1035 if (!parities)
1036 continue;
1037
1038 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1039 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1040
1041 if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
1042 !!(parities & (1 << bit_idx)))
1043 ecore_int_deassertion_parity(p_hwfn, p_bit,
1044 aeu_en, bit_idx);
1045
1046 bit_idx += ATTENTION_LENGTH(p_bit->flags);
1047 }
1048 }
1049
1050 /* Find non-parity cause for attention and act */
1051 for (k = 0; k < MAX_ATTN_GRPS; k++) {
1052 struct aeu_invert_reg_bit *p_aeu;
1053
1054 /* Handle only groups whose attention is currently deasserted */
1055 if (!(deasserted_bits & (1 << k)))
1056 continue;
1057
1058 for (i = 0; i < NUM_ATTN_REGS; i++) {
1059 u32 bits;
1060
1061 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1062 i * sizeof(u32) +
1063 k * sizeof(u32) * NUM_ATTN_REGS;
1064 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1065 bits = aeu_inv_arr[i] & en;
1066
1067 /* Skip if no bit from this group is currently set */
1068 if (!bits)
1069 continue;
1070
1071 /* Find all set bits from current register which belong
1072 * to current group, making them responsible for the
1073 * previous assertion.
1074 */
1075 for (j = 0, bit_idx = 0; bit_idx < 32; j++)
1076 {
1077 long unsigned int bitmask;
1078 u8 bit, bit_len;
1079
1080 /* Need to account bits with changed meaning */
1081 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1082 p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu);
1083
1084 bit = bit_idx;
1085 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1086 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
1087 /* Skip Parity */
1088 bit++;
1089 bit_len--;
1090 }
1091
1092 /* Find the bits relating to HW-block, then
1093 * shift so they'll become LSB.
1094 */
1095 bitmask = bits & (((1 << bit_len) - 1) << bit);
1096 bitmask >>= bit;
1097
1098 if (bitmask) {
1099 u32 flags = p_aeu->flags;
1100 char bit_name[30];
1101 u8 num;
1102
1103 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
1104 bit_len);
1105
1106 /* Some bits represent more than a
1107 * a single interrupt. Correctly print
1108 * their name.
1109 */
1110 if (ATTENTION_LENGTH(flags) > 2 ||
1111 ((flags & ATTENTION_PAR_INT) &&
1112 ATTENTION_LENGTH(flags) > 1))
1113 OSAL_SNPRINTF(bit_name, 30,
1114 p_aeu->bit_name,
1115 num);
1116 else
1117 OSAL_STRNCPY(bit_name,
1118 p_aeu->bit_name,
1119 30);
1120
1121 /* We now need to pass bitmask in its
1122 * correct position.
1123 */
1124 bitmask <<= bit;
1125
1126 /* Handle source of the attention */
1127 ecore_int_deassertion_aeu_bit(p_hwfn,
1128 p_aeu,
1129 aeu_en,
1130 bit_name,
1131 bitmask);
1132 }
1133
1134 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1135 }
1136 }
1137 }
1138
1139 /* Clear IGU indication for the deasserted bits */
1140 /* FIXME - this will change once we'll have GOOD gtt definitions */
1141 DIRECT_REG_WR(p_hwfn,
1142 (u8 OSAL_IOMEM*)p_hwfn->regview +
1143 GTT_BAR0_MAP_REG_IGU_CMD +
1144 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1145 IGU_CMD_INT_ACK_BASE) << 3),
1146 ~((u32)deasserted_bits));
1147
1148 /* Unmask deasserted attentions in IGU */
1149 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1150 IGU_REG_ATTENTION_ENABLE);
1151 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1152 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1153
1154 /* Clear deassertion from inner state */
1155 sb_attn_sw->known_attn &= ~deasserted_bits;
1156
1157 return rc;
1158 }
1159
ecore_int_attentions(struct ecore_hwfn * p_hwfn)1160 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1161 {
1162 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1163 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1164 u16 index = 0, asserted_bits, deasserted_bits;
1165 u32 attn_bits = 0, attn_acks = 0;
1166 enum _ecore_status_t rc = ECORE_SUCCESS;
1167
1168 /* Read current attention bits/acks - safeguard against attentions
1169 * by guaranting work on a synchronized timeframe
1170 */
1171 do {
1172 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1173 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1174 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1175 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1176 p_sb_attn->sb_index = index;
1177
1178 /* Attention / Deassertion are meaningful (and in correct state)
1179 * only when they differ and consistent with known state - deassertion
1180 * when previous attention & current ack, and assertion when current
1181 * attention with no previous attention
1182 */
1183 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1184 ~p_sb_attn_sw->known_attn;
1185 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1186 p_sb_attn_sw->known_attn;
1187
1188 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1189 DP_INFO(p_hwfn,
1190 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1191 index, attn_bits, attn_acks, asserted_bits,
1192 deasserted_bits, p_sb_attn_sw->known_attn);
1193 else if (asserted_bits == 0x100)
1194 DP_INFO(p_hwfn,
1195 "MFW indication via attention\n");
1196 else
1197 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1198 "MFW indication [deassertion]\n");
1199
1200 if (asserted_bits) {
1201 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1202 if (rc)
1203 return rc;
1204 }
1205
1206 if (deasserted_bits)
1207 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1208
1209 return rc;
1210 }
1211
ecore_sb_ack_attn(struct ecore_hwfn * p_hwfn,void OSAL_IOMEM * igu_addr,u32 ack_cons)1212 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1213 void OSAL_IOMEM *igu_addr, u32 ack_cons)
1214 {
1215 struct igu_prod_cons_update igu_ack = { 0 };
1216
1217 igu_ack.sb_id_and_flags =
1218 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1219 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1220 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1221 (IGU_SEG_ACCESS_ATTN <<
1222 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1223
1224 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1225
1226 /* Both segments (interrupts & acks) are written to same place address;
1227 * Need to guarantee all commands will be received (in-order) by HW.
1228 */
1229 OSAL_MMIOWB(p_hwfn->p_dev);
1230 OSAL_BARRIER(p_hwfn->p_dev);
1231 }
1232
ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)1233 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1234 {
1235 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1236 struct ecore_pi_info *pi_info = OSAL_NULL;
1237 struct ecore_sb_attn_info *sb_attn;
1238 struct ecore_sb_info *sb_info;
1239 int arr_size;
1240 u16 rc = 0;
1241
1242 if (!p_hwfn)
1243 return;
1244
1245 if (!p_hwfn->p_sp_sb) {
1246 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1247 return;
1248 }
1249
1250 sb_info = &p_hwfn->p_sp_sb->sb_info;
1251 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1252 if (!sb_info) {
1253 DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n");
1254 return;
1255 }
1256
1257 if (!p_hwfn->p_sb_attn) {
1258 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1259 return;
1260 }
1261 sb_attn = p_hwfn->p_sb_attn;
1262
1263 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1264 p_hwfn, p_hwfn->my_id);
1265
1266 /* Disable ack for def status block. Required both for msix +
1267 * inta in non-mask mode, in inta does no harm.
1268 */
1269 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1270
1271 /* Gather Interrupts/Attentions information */
1272 if (!sb_info->sb_virt) {
1273 DP_ERR(p_hwfn->p_dev, "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1274 } else {
1275 u32 tmp_index = sb_info->sb_ack;
1276 rc = ecore_sb_update_sb_idx(sb_info);
1277 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1278 "Interrupt indices: 0x%08x --> 0x%08x\n",
1279 tmp_index, sb_info->sb_ack);
1280 }
1281
1282 if (!sb_attn || !sb_attn->sb_attn) {
1283 DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n");
1284 } else {
1285 u16 tmp_index = sb_attn->index;
1286
1287 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1288 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1289 "Attention indices: 0x%08x --> 0x%08x\n",
1290 tmp_index, sb_attn->index);
1291 }
1292
1293 /* Check if we expect interrupts at this time. if not just ack them */
1294 if (!(rc & ECORE_SB_EVENT_MASK)) {
1295 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1296 return;
1297 }
1298
1299 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1300 if (!p_hwfn->p_dpc_ptt) {
1301 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1302 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1303 return;
1304 }
1305
1306 if (rc & ECORE_SB_ATT_IDX)
1307 ecore_int_attentions(p_hwfn);
1308
1309 if (rc & ECORE_SB_IDX) {
1310 int pi;
1311
1312 /* Since we only looked at the SB index, it's possible more
1313 * than a single protocol-index on the SB incremented.
1314 * Iterate over all configured protocol indices and check
1315 * whether something happened for each.
1316 */
1317 for (pi = 0; pi < arr_size; pi++) {
1318 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1319 if (pi_info->comp_cb != OSAL_NULL)
1320 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1321 }
1322 }
1323
1324 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1325 /* This should be done before the interrupts are enabled,
1326 * since otherwise a new attention will be generated.
1327 */
1328 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1329 }
1330
1331 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1332 }
1333
ecore_int_sb_attn_free(struct ecore_hwfn * p_hwfn)1334 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1335 {
1336 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1337
1338 if (!p_sb)
1339 return;
1340
1341 if (p_sb->sb_attn) {
1342 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1343 p_sb->sb_phys,
1344 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1345 }
1346
1347 OSAL_FREE(p_hwfn->p_dev, p_sb);
1348 p_hwfn->p_sb_attn = OSAL_NULL;
1349 }
1350
ecore_int_sb_attn_setup(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1351 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1352 struct ecore_ptt *p_ptt)
1353 {
1354 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1355
1356 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1357
1358 sb_info->index = 0;
1359 sb_info->known_attn = 0;
1360
1361 /* Configure Attention Status Block in IGU */
1362 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1363 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1364 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1365 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1366 }
1367
ecore_int_sb_attn_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,void * sb_virt_addr,dma_addr_t sb_phy_addr)1368 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1369 struct ecore_ptt *p_ptt,
1370 void *sb_virt_addr,
1371 dma_addr_t sb_phy_addr)
1372 {
1373 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1374 int i, j, k;
1375
1376 sb_info->sb_attn = sb_virt_addr;
1377 sb_info->sb_phys = sb_phy_addr;
1378
1379 /* Set the pointer to the AEU descriptors */
1380 sb_info->p_aeu_desc = aeu_descs;
1381
1382 /* Calculate Parity Masks */
1383 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1384 for (i = 0; i < NUM_ATTN_REGS; i++) {
1385 /* j is array index, k is bit index */
1386 for (j = 0, k = 0; k < 32; j++) {
1387 struct aeu_invert_reg_bit *p_aeu;
1388
1389 p_aeu = &aeu_descs[i].bits[j];
1390 if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1391 sb_info->parity_mask[i] |= 1 << k;
1392
1393 k += ATTENTION_LENGTH(p_aeu->flags);
1394 }
1395 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1396 "Attn Mask [Reg %d]: 0x%08x\n",
1397 i, sb_info->parity_mask[i]);
1398 }
1399
1400 /* Set the address of cleanup for the mcp attention */
1401 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1402 MISC_REG_AEU_GENERAL_ATTN_0;
1403
1404 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1405 }
1406
ecore_int_sb_attn_alloc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1407 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1408 struct ecore_ptt *p_ptt)
1409 {
1410 struct ecore_dev *p_dev = p_hwfn->p_dev;
1411 struct ecore_sb_attn_info *p_sb;
1412 dma_addr_t p_phys = 0;
1413 void *p_virt;
1414
1415 /* SB struct */
1416 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1417 if (!p_sb) {
1418 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n");
1419 return ECORE_NOMEM;
1420 }
1421
1422 /* SB ring */
1423 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1424 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1425 if (!p_virt) {
1426 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n");
1427 OSAL_FREE(p_dev, p_sb);
1428 return ECORE_NOMEM;
1429 }
1430
1431 /* Attention setup */
1432 p_hwfn->p_sb_attn = p_sb;
1433 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1434
1435 return ECORE_SUCCESS;
1436 }
1437
1438 /* coalescing timeout = timeset << (timer_res + 1) */
1439 #define ECORE_CAU_DEF_RX_USECS 24
1440 #define ECORE_CAU_DEF_TX_USECS 48
1441
ecore_init_cau_sb_entry(struct ecore_hwfn * p_hwfn,struct cau_sb_entry * p_sb_entry,u8 pf_id,u16 vf_number,u8 vf_valid)1442 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1443 struct cau_sb_entry *p_sb_entry,
1444 u8 pf_id, u16 vf_number, u8 vf_valid)
1445 {
1446 struct ecore_dev *p_dev = p_hwfn->p_dev;
1447 u32 cau_state;
1448 u8 timer_res;
1449
1450 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1451
1452 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1453 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1454 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1455 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1456 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1457
1458 cau_state = CAU_HC_DISABLE_STATE;
1459
1460 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1461 cau_state = CAU_HC_ENABLE_STATE;
1462 if (!p_dev->rx_coalesce_usecs)
1463 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1464 if (!p_dev->tx_coalesce_usecs)
1465 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1466 }
1467
1468 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1469 if (p_dev->rx_coalesce_usecs <= 0x7F)
1470 timer_res = 0;
1471 else if (p_dev->rx_coalesce_usecs <= 0xFF)
1472 timer_res = 1;
1473 else
1474 timer_res = 2;
1475 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1476
1477 if (p_dev->tx_coalesce_usecs <= 0x7F)
1478 timer_res = 0;
1479 else if (p_dev->tx_coalesce_usecs <= 0xFF)
1480 timer_res = 1;
1481 else
1482 timer_res = 2;
1483 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1484
1485 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1486 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1487 }
1488
_ecore_int_cau_conf_pi(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 igu_sb_id,u32 pi_index,enum ecore_coalescing_fsm coalescing_fsm,u8 timeset)1489 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1490 struct ecore_ptt *p_ptt,
1491 u16 igu_sb_id, u32 pi_index,
1492 enum ecore_coalescing_fsm coalescing_fsm,
1493 u8 timeset)
1494 {
1495 struct cau_pi_entry pi_entry;
1496 u32 sb_offset, pi_offset;
1497
1498 if (IS_VF(p_hwfn->p_dev))
1499 return;/* @@@TBD MichalK- VF CAU... */
1500
1501 sb_offset = igu_sb_id * PIS_PER_SB_E4;
1502 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1503
1504 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1505 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1506 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1507 else
1508 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1509
1510 pi_offset = sb_offset + pi_index;
1511 if (p_hwfn->hw_init_done) {
1512 ecore_wr(p_hwfn, p_ptt,
1513 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1514 *((u32 *)&(pi_entry)));
1515 } else {
1516 STORE_RT_REG(p_hwfn,
1517 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1518 *((u32 *)&(pi_entry)));
1519 }
1520 }
1521
ecore_int_cau_conf_pi(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_sb_info * p_sb,u32 pi_index,enum ecore_coalescing_fsm coalescing_fsm,u8 timeset)1522 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1523 struct ecore_ptt *p_ptt,
1524 struct ecore_sb_info *p_sb, u32 pi_index,
1525 enum ecore_coalescing_fsm coalescing_fsm,
1526 u8 timeset)
1527 {
1528 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
1529 pi_index, coalescing_fsm, timeset);
1530 }
1531
ecore_int_cau_conf_sb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,dma_addr_t sb_phys,u16 igu_sb_id,u16 vf_number,u8 vf_valid)1532 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1533 struct ecore_ptt *p_ptt,
1534 dma_addr_t sb_phys, u16 igu_sb_id,
1535 u16 vf_number, u8 vf_valid)
1536 {
1537 struct cau_sb_entry sb_entry;
1538
1539 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1540 vf_number, vf_valid);
1541
1542 if (p_hwfn->hw_init_done) {
1543 /* Wide-bus, initialize via DMAE */
1544 u64 phys_addr = (u64)sb_phys;
1545
1546 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr,
1547 CAU_REG_SB_ADDR_MEMORY +
1548 igu_sb_id * sizeof(u64), 2,
1549 OSAL_NULL /* default parameters */);
1550 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry,
1551 CAU_REG_SB_VAR_MEMORY +
1552 igu_sb_id * sizeof(u64), 2,
1553 OSAL_NULL /* default parameters */);
1554 } else {
1555 /* Initialize Status Block Address */
1556 STORE_RT_REG_AGG(p_hwfn,
1557 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2,
1558 sb_phys);
1559
1560 STORE_RT_REG_AGG(p_hwfn,
1561 CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2,
1562 sb_entry);
1563 }
1564
1565 /* Configure pi coalescing if set */
1566 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1567 /* eth will open queues for all tcs, so configure all of them
1568 * properly, rather than just the active ones
1569 */
1570 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1571
1572 u8 timeset, timer_res;
1573 u8 i;
1574
1575 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1576 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1577 timer_res = 0;
1578 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1579 timer_res = 1;
1580 else
1581 timer_res = 2;
1582 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1583 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1584 ECORE_COAL_RX_STATE_MACHINE,
1585 timeset);
1586
1587 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1588 timer_res = 0;
1589 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1590 timer_res = 1;
1591 else
1592 timer_res = 2;
1593 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1594 for (i = 0; i < num_tc; i++) {
1595 _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1596 igu_sb_id, TX_PI(i),
1597 ECORE_COAL_TX_STATE_MACHINE,
1598 timeset);
1599 }
1600 }
1601 }
1602
ecore_int_sb_setup(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_sb_info * sb_info)1603 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1604 struct ecore_ptt *p_ptt,
1605 struct ecore_sb_info *sb_info)
1606 {
1607 /* zero status block and ack counter */
1608 sb_info->sb_ack = 0;
1609 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1610
1611 if (IS_PF(p_hwfn->p_dev))
1612 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1613 sb_info->igu_sb_id, 0, 0);
1614 }
1615
1616 struct ecore_igu_block *
ecore_get_igu_free_sb(struct ecore_hwfn * p_hwfn,bool b_is_pf)1617 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
1618 {
1619 struct ecore_igu_block *p_block;
1620 u16 igu_id;
1621
1622 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1623 igu_id++) {
1624 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1625
1626 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1627 !(p_block->status & ECORE_IGU_STATUS_FREE))
1628 continue;
1629
1630 if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
1631 b_is_pf)
1632 return p_block;
1633 }
1634
1635 return OSAL_NULL;
1636 }
1637
ecore_get_pf_igu_sb_id(struct ecore_hwfn * p_hwfn,u16 vector_id)1638 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
1639 u16 vector_id)
1640 {
1641 struct ecore_igu_block *p_block;
1642 u16 igu_id;
1643
1644 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1645 igu_id++) {
1646 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1647
1648 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1649 !p_block->is_pf ||
1650 p_block->vector_number != vector_id)
1651 continue;
1652
1653 return igu_id;
1654 }
1655
1656 return ECORE_SB_INVALID_IDX;
1657 }
1658
ecore_get_igu_sb_id(struct ecore_hwfn * p_hwfn,u16 sb_id)1659 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1660 {
1661 u16 igu_sb_id;
1662
1663 /* Assuming continuous set of IGU SBs dedicated for given PF */
1664 if (sb_id == ECORE_SP_SB_ID)
1665 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1666 else if (IS_PF(p_hwfn->p_dev))
1667 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1668 else
1669 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1670
1671 if (igu_sb_id == ECORE_SB_INVALID_IDX)
1672 DP_NOTICE(p_hwfn, true,
1673 "Slowpath SB vector %04x doesn't exist\n",
1674 sb_id);
1675 else if (sb_id == ECORE_SP_SB_ID)
1676 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1677 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1678 else
1679 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1680 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1681
1682 return igu_sb_id;
1683 }
1684
ecore_int_sb_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_sb_info * sb_info,void * sb_virt_addr,dma_addr_t sb_phy_addr,u16 sb_id)1685 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1686 struct ecore_ptt *p_ptt,
1687 struct ecore_sb_info *sb_info,
1688 void *sb_virt_addr,
1689 dma_addr_t sb_phy_addr,
1690 u16 sb_id)
1691 {
1692 sb_info->sb_virt = sb_virt_addr;
1693 sb_info->sb_phys = sb_phy_addr;
1694
1695 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1696
1697 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
1698 return ECORE_INVAL;
1699
1700 /* Let the igu info reference the client's SB info */
1701 if (sb_id != ECORE_SP_SB_ID) {
1702 if (IS_PF(p_hwfn->p_dev)) {
1703 struct ecore_igu_info *p_info;
1704 struct ecore_igu_block *p_block;
1705
1706 p_info = p_hwfn->hw_info.p_igu_info;
1707 p_block = &p_info->entry[sb_info->igu_sb_id];
1708
1709 p_block->sb_info = sb_info;
1710 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1711 p_info->usage.free_cnt--;
1712 } else {
1713 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1714 }
1715 }
1716
1717 #ifdef ECORE_CONFIG_DIRECT_HWFN
1718 sb_info->p_hwfn = p_hwfn;
1719 #endif
1720 sb_info->p_dev = p_hwfn->p_dev;
1721
1722 /* The igu address will hold the absolute address that needs to be
1723 * written to for a specific status block
1724 */
1725 if (IS_PF(p_hwfn->p_dev)) {
1726 sb_info->igu_addr = (u8 OSAL_IOMEM*)p_hwfn->regview +
1727 GTT_BAR0_MAP_REG_IGU_CMD +
1728 (sb_info->igu_sb_id << 3);
1729
1730 } else {
1731 sb_info->igu_addr =
1732 (u8 OSAL_IOMEM*)p_hwfn->regview +
1733 PXP_VF_BAR0_START_IGU +
1734 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1735 }
1736
1737 sb_info->flags |= ECORE_SB_INFO_INIT;
1738
1739 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1740
1741 return ECORE_SUCCESS;
1742 }
1743
ecore_int_sb_release(struct ecore_hwfn * p_hwfn,struct ecore_sb_info * sb_info,u16 sb_id)1744 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1745 struct ecore_sb_info *sb_info,
1746 u16 sb_id)
1747 {
1748 struct ecore_igu_info *p_info;
1749 struct ecore_igu_block *p_block;
1750
1751 if (sb_info == OSAL_NULL)
1752 return ECORE_SUCCESS;
1753
1754 /* zero status block and ack counter */
1755 sb_info->sb_ack = 0;
1756 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1757
1758 if (IS_VF(p_hwfn->p_dev)) {
1759 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
1760 return ECORE_SUCCESS;
1761 }
1762
1763 p_info = p_hwfn->hw_info.p_igu_info;
1764 p_block = &p_info->entry[sb_info->igu_sb_id];
1765
1766 /* Vector 0 is reserved to Default SB */
1767 if (p_block->vector_number == 0) {
1768 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1769 return ECORE_INVAL;
1770 }
1771
1772 /* Lose reference to client's SB info, and fix counters */
1773 p_block->sb_info = OSAL_NULL;
1774 p_block->status |= ECORE_IGU_STATUS_FREE;
1775 p_info->usage.free_cnt++;
1776
1777 return ECORE_SUCCESS;
1778 }
1779
ecore_int_sp_sb_free(struct ecore_hwfn * p_hwfn)1780 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1781 {
1782 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1783
1784 if (!p_sb)
1785 return;
1786
1787 if (p_sb->sb_info.sb_virt) {
1788 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1789 p_sb->sb_info.sb_virt,
1790 p_sb->sb_info.sb_phys,
1791 SB_ALIGNED_SIZE(p_hwfn));
1792 }
1793
1794 OSAL_FREE(p_hwfn->p_dev, p_sb);
1795 p_hwfn->p_sp_sb = OSAL_NULL;
1796 }
1797
ecore_int_sp_sb_alloc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1798 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1799 struct ecore_ptt *p_ptt)
1800 {
1801 struct ecore_sb_sp_info *p_sb;
1802 dma_addr_t p_phys = 0;
1803 void *p_virt;
1804
1805 /* SB struct */
1806 p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));
1807 if (!p_sb) {
1808 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
1809 return ECORE_NOMEM;
1810 }
1811
1812 /* SB ring */
1813 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1814 &p_phys,
1815 SB_ALIGNED_SIZE(p_hwfn));
1816 if (!p_virt) {
1817 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
1818 OSAL_FREE(p_hwfn->p_dev, p_sb);
1819 return ECORE_NOMEM;
1820 }
1821
1822
1823 /* Status Block setup */
1824 p_hwfn->p_sp_sb = p_sb;
1825 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1826 p_virt, p_phys, ECORE_SP_SB_ID);
1827
1828 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1829
1830 return ECORE_SUCCESS;
1831 }
1832
ecore_int_register_cb(struct ecore_hwfn * p_hwfn,ecore_int_comp_cb_t comp_cb,void * cookie,u8 * sb_idx,__le16 ** p_fw_cons)1833 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1834 ecore_int_comp_cb_t comp_cb,
1835 void *cookie,
1836 u8 *sb_idx,
1837 __le16 **p_fw_cons)
1838 {
1839 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1840 enum _ecore_status_t rc = ECORE_NOMEM;
1841 u8 pi;
1842
1843 /* Look for a free index */
1844 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1845 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1846 continue;
1847
1848 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1849 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1850 *sb_idx = pi;
1851 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1852 rc = ECORE_SUCCESS;
1853 break;
1854 }
1855
1856 return rc;
1857 }
1858
ecore_int_unregister_cb(struct ecore_hwfn * p_hwfn,u8 pi)1859 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn,
1860 u8 pi)
1861 {
1862 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1863
1864 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1865 return ECORE_NOMEM;
1866
1867 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1868 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1869
1870 return ECORE_SUCCESS;
1871 }
1872
ecore_int_get_sp_sb_id(struct ecore_hwfn * p_hwfn)1873 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1874 {
1875 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1876 }
1877
ecore_int_igu_enable_int(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_int_mode int_mode)1878 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1879 struct ecore_ptt *p_ptt,
1880 enum ecore_int_mode int_mode)
1881 {
1882 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1883
1884 #ifndef ASIC_ONLY
1885 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1886 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1887 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1888 }
1889 #endif
1890
1891 p_hwfn->p_dev->int_mode = int_mode;
1892 switch (p_hwfn->p_dev->int_mode) {
1893 case ECORE_INT_MODE_INTA:
1894 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1895 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1896 break;
1897
1898 case ECORE_INT_MODE_MSI:
1899 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1900 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1901 break;
1902
1903 case ECORE_INT_MODE_MSIX:
1904 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1905 break;
1906 case ECORE_INT_MODE_POLL:
1907 break;
1908 }
1909
1910 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1911 }
1912
ecore_int_igu_enable_attn(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1913 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1914 struct ecore_ptt *p_ptt)
1915 {
1916 #ifndef ASIC_ONLY
1917 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1918 DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n");
1919 return;
1920 }
1921 #endif
1922
1923 /* Configure AEU signal change to produce attentions */
1924 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1925 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1926 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1927 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1928
1929 /* Flush the writes to IGU */
1930 OSAL_MMIOWB(p_hwfn->p_dev);
1931
1932 /* Unmask AEU signals toward IGU */
1933 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1934 }
1935
1936 enum _ecore_status_t
ecore_int_igu_enable(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_int_mode int_mode)1937 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1938 enum ecore_int_mode int_mode)
1939 {
1940 enum _ecore_status_t rc = ECORE_SUCCESS;
1941
1942 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1943
1944 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1945 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1946 if (rc != ECORE_SUCCESS) {
1947 DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n");
1948 return ECORE_NORESOURCES;
1949 }
1950 p_hwfn->b_int_requested = true;
1951 }
1952
1953 /* Enable interrupt Generation */
1954 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1955
1956 p_hwfn->b_int_enabled = 1;
1957
1958 return rc;
1959 }
1960
ecore_int_igu_disable_int(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1961 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1962 struct ecore_ptt *p_ptt)
1963 {
1964 p_hwfn->b_int_enabled = 0;
1965
1966 if (IS_VF(p_hwfn->p_dev))
1967 return;
1968
1969 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1970 }
1971
1972 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
ecore_int_igu_cleanup_sb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 igu_sb_id,bool cleanup_set,u16 opaque_fid)1973 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1974 struct ecore_ptt *p_ptt,
1975 u16 igu_sb_id,
1976 bool cleanup_set,
1977 u16 opaque_fid)
1978 {
1979 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1980 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1981 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1982 u8 type = 0; /* FIXME MichalS type??? */
1983
1984 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1985 IGU_REG_CLEANUP_STATUS_0) != 0x200);
1986
1987 /* USE Control Command Register to perform cleanup. There is an
1988 * option to do this using IGU bar, but then it can't be used for VFs.
1989 */
1990
1991 /* Set the data field */
1992 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1993 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1994 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1995
1996 /* Set the control register */
1997 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1998 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1999 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
2000
2001 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
2002
2003 OSAL_BARRIER(p_hwfn->p_dev);
2004
2005 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
2006
2007 /* Flush the write to IGU */
2008 OSAL_MMIOWB(p_hwfn->p_dev);
2009
2010 /* calculate where to read the status bit from */
2011 sb_bit = 1 << (igu_sb_id % 32);
2012 sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
2013
2014 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
2015
2016 /* Now wait for the command to complete */
2017 while (--sleep_cnt) {
2018 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
2019 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
2020 break;
2021 OSAL_MSLEEP(5);
2022 }
2023
2024 if (!sleep_cnt)
2025 DP_NOTICE(p_hwfn, true,
2026 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
2027 val, igu_sb_id);
2028 }
2029
ecore_int_igu_init_pure_rt_single(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 igu_sb_id,u16 opaque,bool b_set)2030 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
2031 struct ecore_ptt *p_ptt,
2032 u16 igu_sb_id, u16 opaque, bool b_set)
2033 {
2034 struct ecore_igu_block *p_block;
2035 int pi, i;
2036
2037 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2038 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2039 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
2040 igu_sb_id, p_block->function_id, p_block->is_pf,
2041 p_block->vector_number);
2042
2043 /* Set */
2044 if (b_set)
2045 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
2046
2047 /* Clear */
2048 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
2049
2050 /* Wait for the IGU SB to cleanup */
2051 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
2052 u32 val;
2053
2054 val = ecore_rd(p_hwfn, p_ptt,
2055 IGU_REG_WRITE_DONE_PENDING +
2056 ((igu_sb_id / 32) * 4));
2057 if (val & (1 << (igu_sb_id % 32)))
2058 OSAL_UDELAY(10);
2059 else
2060 break;
2061 }
2062 if (i == IGU_CLEANUP_SLEEP_LENGTH)
2063 DP_NOTICE(p_hwfn, true,
2064 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
2065 igu_sb_id);
2066
2067 /* Clear the CAU for the SB */
2068 for (pi = 0; pi < 12; pi++)
2069 ecore_wr(p_hwfn, p_ptt,
2070 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
2071 }
2072
ecore_int_igu_init_pure_rt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_set,bool b_slowpath)2073 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
2074 struct ecore_ptt *p_ptt,
2075 bool b_set,
2076 bool b_slowpath)
2077 {
2078 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2079 struct ecore_igu_block *p_block;
2080 u16 igu_sb_id = 0;
2081 u32 val = 0;
2082
2083 /* @@@TBD MichalK temporary... should be moved to init-tool... */
2084 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
2085 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
2086 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
2087 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
2088 /* end temporary */
2089
2090 for (igu_sb_id = 0;
2091 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2092 igu_sb_id++) {
2093 p_block = &p_info->entry[igu_sb_id];
2094
2095 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2096 !p_block->is_pf ||
2097 (p_block->status & ECORE_IGU_STATUS_DSB))
2098 continue;
2099
2100 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
2101 p_hwfn->hw_info.opaque_fid,
2102 b_set);
2103 }
2104
2105 if (b_slowpath)
2106 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2107 p_info->igu_dsb_id,
2108 p_hwfn->hw_info.opaque_fid,
2109 b_set);
2110 }
2111
ecore_int_igu_reset_cam(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2112 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
2113 struct ecore_ptt *p_ptt)
2114 {
2115 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2116 struct ecore_igu_block *p_block;
2117 int pf_sbs, vf_sbs;
2118 u16 igu_sb_id;
2119 u32 val, rval;
2120
2121 if (!RESC_NUM(p_hwfn, ECORE_SB)) {
2122 /* We're using an old MFW - have to prevent any switching
2123 * of SBs between PF and VFs as later driver wouldn't be
2124 * able to tell which belongs to which.
2125 */
2126 p_info->b_allow_pf_vf_change = false;
2127 } else {
2128 /* Use the numbers the MFW have provided -
2129 * don't forget MFW accounts for the default SB as well.
2130 */
2131 p_info->b_allow_pf_vf_change = true;
2132
2133 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
2134 DP_INFO(p_hwfn,
2135 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2136 RESC_NUM(p_hwfn, ECORE_SB) - 1,
2137 p_info->usage.cnt);
2138 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
2139 }
2140
2141 /* TODO - how do we learn about VF SBs from MFW? */
2142 if (IS_PF_SRIOV(p_hwfn)) {
2143 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2144
2145 if (vfs != p_info->usage.iov_cnt)
2146 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2147 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2148 p_info->usage.iov_cnt, vfs);
2149
2150 /* At this point we know how many SBs we have totally
2151 * in IGU + number of PF SBs. So we can validate that
2152 * we'd have sufficient for VF.
2153 */
2154 if (vfs > p_info->usage.free_cnt +
2155 p_info->usage.free_cnt_iov -
2156 p_info->usage.cnt) {
2157 DP_NOTICE(p_hwfn, true,
2158 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2159 p_info->usage.free_cnt +
2160 p_info->usage.free_cnt_iov,
2161 p_info->usage.cnt, vfs);
2162 return ECORE_INVAL;
2163 }
2164 }
2165 }
2166
2167 /* Cap the number of VFs SBs by the number of VFs */
2168 if (IS_PF_SRIOV(p_hwfn))
2169 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
2170
2171 /* Mark all SBs as free, now in the right PF/VFs division */
2172 p_info->usage.free_cnt = p_info->usage.cnt;
2173 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2174 p_info->usage.orig = p_info->usage.cnt;
2175 p_info->usage.iov_orig = p_info->usage.iov_cnt;
2176
2177 /* We now proceed to re-configure the IGU cam to reflect the initial
2178 * configuration. We can start with the Default SB.
2179 */
2180 pf_sbs = p_info->usage.cnt;
2181 vf_sbs = p_info->usage.iov_cnt;
2182
2183 for (igu_sb_id = p_info->igu_dsb_id;
2184 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2185 igu_sb_id++) {
2186 p_block = &p_info->entry[igu_sb_id];
2187 val = 0;
2188
2189 if (!(p_block->status & ECORE_IGU_STATUS_VALID))
2190 continue;
2191
2192 if (p_block->status & ECORE_IGU_STATUS_DSB) {
2193 p_block->function_id = p_hwfn->rel_pf_id;
2194 p_block->is_pf = 1;
2195 p_block->vector_number = 0;
2196 p_block->status = ECORE_IGU_STATUS_VALID |
2197 ECORE_IGU_STATUS_PF |
2198 ECORE_IGU_STATUS_DSB;
2199 } else if (pf_sbs) {
2200 pf_sbs--;
2201 p_block->function_id = p_hwfn->rel_pf_id;
2202 p_block->is_pf = 1;
2203 p_block->vector_number = p_info->usage.cnt - pf_sbs;
2204 p_block->status = ECORE_IGU_STATUS_VALID |
2205 ECORE_IGU_STATUS_PF |
2206 ECORE_IGU_STATUS_FREE;
2207 } else if (vf_sbs) {
2208 p_block->function_id =
2209 p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
2210 p_info->usage.iov_cnt - vf_sbs;
2211 p_block->is_pf = 0;
2212 p_block->vector_number = 0;
2213 p_block->status = ECORE_IGU_STATUS_VALID |
2214 ECORE_IGU_STATUS_FREE;
2215 vf_sbs--;
2216 } else {
2217 p_block->function_id = 0;
2218 p_block->is_pf = 0;
2219 p_block->vector_number = 0;
2220 }
2221
2222 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2223 p_block->function_id);
2224 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2225 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2226 p_block->vector_number);
2227
2228 /* VF entries would be enabled when VF is initializaed */
2229 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2230
2231 rval = ecore_rd(p_hwfn, p_ptt,
2232 IGU_REG_MAPPING_MEMORY +
2233 sizeof(u32) * igu_sb_id);
2234
2235 if (rval != val) {
2236 ecore_wr(p_hwfn, p_ptt,
2237 IGU_REG_MAPPING_MEMORY +
2238 sizeof(u32) * igu_sb_id,
2239 val);
2240
2241 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2242 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2243 igu_sb_id, p_block->function_id,
2244 p_block->is_pf, p_block->vector_number,
2245 rval, val);
2246 }
2247 }
2248
2249 return 0;
2250 }
2251
ecore_int_igu_reset_cam_default(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2252 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
2253 struct ecore_ptt *p_ptt)
2254 {
2255 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
2256
2257 /* Return all the usage indications to default prior to the reset;
2258 * The reset expects the !orig to reflect the initial status of the
2259 * SBs, and would re-calculate the originals based on those.
2260 */
2261 p_cnt->cnt = p_cnt->orig;
2262 p_cnt->free_cnt = p_cnt->orig;
2263 p_cnt->iov_cnt = p_cnt->iov_orig;
2264 p_cnt->free_cnt_iov = p_cnt->iov_orig;
2265 p_cnt->orig = 0;
2266 p_cnt->iov_orig = 0;
2267
2268 /* TODO - we probably need to re-configure the CAU as well... */
2269 return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
2270 }
2271
ecore_int_igu_read_cam_block(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 igu_sb_id)2272 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
2273 struct ecore_ptt *p_ptt,
2274 u16 igu_sb_id)
2275 {
2276 u32 val = ecore_rd(p_hwfn, p_ptt,
2277 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2278 struct ecore_igu_block *p_block;
2279
2280 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2281
2282 /* Fill the block information */
2283 p_block->function_id = GET_FIELD(val,
2284 IGU_MAPPING_LINE_FUNCTION_NUMBER);
2285 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2286 p_block->vector_number = GET_FIELD(val,
2287 IGU_MAPPING_LINE_VECTOR_NUMBER);
2288 p_block->igu_sb_id = igu_sb_id;
2289 }
2290
ecore_int_igu_read_cam(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2291 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
2292 struct ecore_ptt *p_ptt)
2293 {
2294 struct ecore_igu_info *p_igu_info;
2295 struct ecore_igu_block *p_block;
2296 u32 min_vf = 0, max_vf = 0;
2297 u16 igu_sb_id;
2298
2299 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
2300 GFP_KERNEL,
2301 sizeof(*p_igu_info));
2302 if (!p_hwfn->hw_info.p_igu_info)
2303 return ECORE_NOMEM;
2304 p_igu_info = p_hwfn->hw_info.p_igu_info;
2305
2306 /* Distinguish between existent and onn-existent default SB */
2307 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
2308
2309 /* Find the range of VF ids whose SB belong to this PF */
2310 if (p_hwfn->p_dev->p_iov_info) {
2311 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
2312
2313 min_vf = p_iov->first_vf_in_pf;
2314 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2315 }
2316
2317 for (igu_sb_id = 0;
2318 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2319 igu_sb_id++) {
2320 /* Read current entry; Notice it might not belong to this PF */
2321 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2322 p_block = &p_igu_info->entry[igu_sb_id];
2323
2324 if ((p_block->is_pf) &&
2325 (p_block->function_id == p_hwfn->rel_pf_id)) {
2326 p_block->status = ECORE_IGU_STATUS_PF |
2327 ECORE_IGU_STATUS_VALID |
2328 ECORE_IGU_STATUS_FREE;
2329
2330 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2331 p_igu_info->usage.cnt++;
2332 } else if (!(p_block->is_pf) &&
2333 (p_block->function_id >= min_vf) &&
2334 (p_block->function_id < max_vf)) {
2335 /* Available for VFs of this PF */
2336 p_block->status = ECORE_IGU_STATUS_VALID |
2337 ECORE_IGU_STATUS_FREE;
2338
2339 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2340 p_igu_info->usage.iov_cnt++;
2341 }
2342
2343 /* Mark the First entry belonging to the PF or its VFs
2344 * as the default SB [we'll reset IGU prior to first usage].
2345 */
2346 if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
2347 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
2348 p_igu_info->igu_dsb_id = igu_sb_id;
2349 p_block->status |= ECORE_IGU_STATUS_DSB;
2350 }
2351
2352 /* While this isn't suitable for all clients, limit number
2353 * of prints by having each PF print only its entries with the
2354 * exception of PF0 which would print everything.
2355 */
2356 if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
2357 (p_hwfn->abs_pf_id == 0))
2358 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2359 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2360 igu_sb_id, p_block->function_id,
2361 p_block->is_pf, p_block->vector_number);
2362 }
2363
2364 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
2365 DP_NOTICE(p_hwfn, true,
2366 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2367 p_igu_info->igu_dsb_id);
2368 return ECORE_INVAL;
2369 }
2370
2371 /* All non default SB are considered free at this point */
2372 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2373 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2374
2375 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2376 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2377 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
2378 p_igu_info->usage.iov_cnt);
2379
2380 return ECORE_SUCCESS;
2381 }
2382
2383 enum _ecore_status_t
ecore_int_igu_relocate_sb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 sb_id,bool b_to_vf)2384 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2385 u16 sb_id, bool b_to_vf)
2386 {
2387 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2388 struct ecore_igu_block *p_block = OSAL_NULL;
2389 u16 igu_sb_id = 0, vf_num = 0;
2390 u32 val = 0;
2391
2392 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
2393 return ECORE_INVAL;
2394
2395 if (sb_id == ECORE_SP_SB_ID)
2396 return ECORE_INVAL;
2397
2398 if (!p_info->b_allow_pf_vf_change) {
2399 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
2400 return ECORE_INVAL;
2401 }
2402
2403 /* If we're moving a SB from PF to VF, the client had to specify
2404 * which vector it wants to move.
2405 */
2406 if (b_to_vf) {
2407 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
2408 if (igu_sb_id == ECORE_SB_INVALID_IDX)
2409 return ECORE_INVAL;
2410 }
2411
2412 /* If we're moving a SB from VF to PF, need to validate there isn't
2413 * already a line configured for that vector.
2414 */
2415 if (!b_to_vf) {
2416 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
2417 ECORE_SB_INVALID_IDX)
2418 return ECORE_INVAL;
2419 }
2420
2421 /* We need to validate that the SB can actually be relocated.
2422 * This would also handle the previous case where we've explicitly
2423 * stated which IGU SB needs to move.
2424 */
2425 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2426 igu_sb_id++) {
2427 p_block = &p_info->entry[igu_sb_id];
2428
2429 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2430 !(p_block->status & ECORE_IGU_STATUS_FREE) ||
2431 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
2432 if (b_to_vf)
2433 return ECORE_INVAL;
2434 else
2435 continue;
2436 }
2437
2438 break;
2439 }
2440
2441 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
2442 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2443 "Failed to find a free SB to move\n");
2444 return ECORE_INVAL;
2445 }
2446
2447 if (p_block == OSAL_NULL) {
2448 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2449 "SB address (p_block) is NULL\n");
2450 return ECORE_INVAL;
2451 }
2452
2453 /* At this point, p_block points to the SB we want to relocate */
2454 if (b_to_vf) {
2455 p_block->status &= ~ECORE_IGU_STATUS_PF;
2456
2457 /* It doesn't matter which VF number we choose, since we're
2458 * going to disable the line; But let's keep it in range.
2459 */
2460 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
2461
2462 p_block->function_id = (u8)vf_num;
2463 p_block->is_pf = 0;
2464 p_block->vector_number = 0;
2465
2466 p_info->usage.cnt--;
2467 p_info->usage.free_cnt--;
2468 p_info->usage.iov_cnt++;
2469 p_info->usage.free_cnt_iov++;
2470
2471 /* TODO - if SBs aren't really the limiting factor,
2472 * then it might not be accurate [in the since that
2473 * we might not need decrement the feature].
2474 */
2475 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
2476 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
2477 } else {
2478 p_block->status |= ECORE_IGU_STATUS_PF;
2479 p_block->function_id = p_hwfn->rel_pf_id;
2480 p_block->is_pf = 1;
2481 p_block->vector_number = sb_id + 1;
2482
2483 p_info->usage.cnt++;
2484 p_info->usage.free_cnt++;
2485 p_info->usage.iov_cnt--;
2486 p_info->usage.free_cnt_iov--;
2487
2488 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
2489 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
2490 }
2491
2492 /* Update the IGU and CAU with the new configuration */
2493 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2494 p_block->function_id);
2495 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2496 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2497 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2498 p_block->vector_number);
2499
2500 ecore_wr(p_hwfn, p_ptt,
2501 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
2502 val);
2503
2504 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
2505 igu_sb_id, vf_num,
2506 p_block->is_pf ? 0 : 1);
2507
2508 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2509 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2510 igu_sb_id, p_block->function_id,
2511 p_block->is_pf, p_block->vector_number);
2512
2513 return ECORE_SUCCESS;
2514 }
2515
2516 /**
2517 * @brief Initialize igu runtime registers
2518 *
2519 * @param p_hwfn
2520 */
ecore_int_igu_init_rt(struct ecore_hwfn * p_hwfn)2521 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2522 {
2523 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2524
2525 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2526 }
2527
2528 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2529 IGU_CMD_INT_ACK_BASE)
2530 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2531 IGU_CMD_INT_ACK_BASE)
ecore_int_igu_read_sisr_reg(struct ecore_hwfn * p_hwfn)2532 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2533 {
2534 u32 intr_status_hi = 0, intr_status_lo = 0;
2535 u64 intr_status = 0;
2536
2537 intr_status_lo = REG_RD(p_hwfn,
2538 GTT_BAR0_MAP_REG_IGU_CMD +
2539 LSB_IGU_CMD_ADDR * 8);
2540 intr_status_hi = REG_RD(p_hwfn,
2541 GTT_BAR0_MAP_REG_IGU_CMD +
2542 MSB_IGU_CMD_ADDR * 8);
2543 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2544
2545 return intr_status;
2546 }
2547
ecore_int_sp_dpc_setup(struct ecore_hwfn * p_hwfn)2548 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2549 {
2550 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2551 p_hwfn->b_sp_dpc_enabled = true;
2552 }
2553
ecore_int_sp_dpc_alloc(struct ecore_hwfn * p_hwfn)2554 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2555 {
2556 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2557 if (!p_hwfn->sp_dpc)
2558 return ECORE_NOMEM;
2559
2560 return ECORE_SUCCESS;
2561 }
2562
ecore_int_sp_dpc_free(struct ecore_hwfn * p_hwfn)2563 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2564 {
2565 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2566 p_hwfn->sp_dpc = OSAL_NULL;
2567 }
2568
ecore_int_alloc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2569 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2570 struct ecore_ptt *p_ptt)
2571 {
2572 enum _ecore_status_t rc = ECORE_SUCCESS;
2573
2574 rc = ecore_int_sp_dpc_alloc(p_hwfn);
2575 if (rc != ECORE_SUCCESS) {
2576 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2577 return rc;
2578 }
2579
2580 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2581 if (rc != ECORE_SUCCESS) {
2582 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2583 return rc;
2584 }
2585
2586 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2587 if (rc != ECORE_SUCCESS)
2588 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2589
2590 return rc;
2591 }
2592
ecore_int_free(struct ecore_hwfn * p_hwfn)2593 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2594 {
2595 ecore_int_sp_sb_free(p_hwfn);
2596 ecore_int_sb_attn_free(p_hwfn);
2597 ecore_int_sp_dpc_free(p_hwfn);
2598 }
2599
ecore_int_setup(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2600 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2601 {
2602 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2603 return;
2604
2605 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2606 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2607 ecore_int_sp_dpc_setup(p_hwfn);
2608 }
2609
ecore_int_get_num_sbs(struct ecore_hwfn * p_hwfn,struct ecore_sb_cnt_info * p_sb_cnt_info)2610 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2611 struct ecore_sb_cnt_info *p_sb_cnt_info)
2612 {
2613 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
2614
2615 if (!p_igu_info || !p_sb_cnt_info)
2616 return;
2617
2618 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
2619 sizeof(*p_sb_cnt_info));
2620 }
2621
ecore_int_disable_post_isr_release(struct ecore_dev * p_dev)2622 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2623 {
2624 int i;
2625
2626 for_each_hwfn(p_dev, i)
2627 p_dev->hwfns[i].b_int_requested = false;
2628 }
2629
ecore_int_attn_clr_enable(struct ecore_dev * p_dev,bool clr_enable)2630 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2631 {
2632 p_dev->attn_clr_en = clr_enable;
2633 }
2634
ecore_int_set_timer_res(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 timer_res,u16 sb_id,bool tx)2635 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2636 struct ecore_ptt *p_ptt,
2637 u8 timer_res, u16 sb_id, bool tx)
2638 {
2639 struct cau_sb_entry sb_entry;
2640 enum _ecore_status_t rc;
2641
2642 if (!p_hwfn->hw_init_done) {
2643 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2644 return ECORE_INVAL;
2645 }
2646
2647 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2648 sb_id * sizeof(u64),
2649 (u64)(osal_uintptr_t)&sb_entry, 2,
2650 OSAL_NULL /* default parameters */);
2651 if (rc != ECORE_SUCCESS) {
2652 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2653 return rc;
2654 }
2655
2656 if (tx)
2657 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2658 else
2659 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2660
2661 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2662 (u64)(osal_uintptr_t)&sb_entry,
2663 CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2,
2664 OSAL_NULL /* default parameters */);
2665 if (rc != ECORE_SUCCESS) {
2666 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2667 return rc;
2668 }
2669
2670 return rc;
2671 }
2672
ecore_int_get_sb_dbg(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_sb_info * p_sb,struct ecore_sb_info_dbg * p_info)2673 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2674 struct ecore_ptt *p_ptt,
2675 struct ecore_sb_info *p_sb,
2676 struct ecore_sb_info_dbg *p_info)
2677 {
2678 u16 sbid = p_sb->igu_sb_id;
2679 int i;
2680
2681 if (IS_VF(p_hwfn->p_dev))
2682 return ECORE_INVAL;
2683
2684 if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2685 return ECORE_INVAL;
2686
2687 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2688 IGU_REG_PRODUCER_MEMORY + sbid * 4);
2689 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2690 IGU_REG_CONSUMER_MEM + sbid * 4);
2691
2692 for (i = 0; i < PIS_PER_SB_E4; i++)
2693 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2694 CAU_REG_PI_MEMORY +
2695 sbid * 4 * PIS_PER_SB_E4 + i * 4);
2696
2697 return ECORE_SUCCESS;
2698 }
2699