1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
3 * All rights reserved.
4 */
5
6 #include <netinet/in.h>
7
8 #include <rte_interrupts.h>
9 #include <rte_log.h>
10 #include <rte_debug.h>
11 #include <rte_pci.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_memory.h>
14 #include <rte_tailq.h>
15 #include <rte_eal.h>
16 #include <rte_alarm.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_malloc.h>
20 #include <rte_random.h>
21 #include <rte_dev.h>
22 #include <rte_byteorder.h>
23
24 #include "common.h"
25 #include "t4_regs.h"
26 #include "t4_regs_values.h"
27 #include "t4fw_interface.h"
28
29 /**
30 * t4_read_mtu_tbl - returns the values in the HW path MTU table
31 * @adap: the adapter
32 * @mtus: where to store the MTU values
33 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
34 *
35 * Reads the HW path MTU table.
36 */
t4_read_mtu_tbl(struct adapter * adap,u16 * mtus,u8 * mtu_log)37 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
38 {
39 u32 v;
40 int i;
41
42 for (i = 0; i < NMTUS; ++i) {
43 t4_write_reg(adap, A_TP_MTU_TABLE,
44 V_MTUINDEX(0xff) | V_MTUVALUE(i));
45 v = t4_read_reg(adap, A_TP_MTU_TABLE);
46 mtus[i] = G_MTUVALUE(v);
47 if (mtu_log)
48 mtu_log[i] = G_MTUWIDTH(v);
49 }
50 }
51
52 /**
53 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
54 * @adap: the adapter
55 * @addr: the indirect TP register address
56 * @mask: specifies the field within the register to modify
57 * @val: new value for the field
58 *
59 * Sets a field of an indirect TP register to the given value.
60 */
t4_tp_wr_bits_indirect(struct adapter * adap,unsigned int addr,unsigned int mask,unsigned int val)61 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
62 unsigned int mask, unsigned int val)
63 {
64 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
65 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
66 t4_write_reg(adap, A_TP_PIO_DATA, val);
67 }
68
69 /* The minimum additive increment value for the congestion control table */
70 #define CC_MIN_INCR 2U
71
72 /**
73 * t4_load_mtus - write the MTU and congestion control HW tables
74 * @adap: the adapter
75 * @mtus: the values for the MTU table
76 * @alpha: the values for the congestion control alpha parameter
77 * @beta: the values for the congestion control beta parameter
78 *
79 * Write the HW MTU table with the supplied MTUs and the high-speed
80 * congestion control table with the supplied alpha, beta, and MTUs.
81 * We write the two tables together because the additive increments
82 * depend on the MTUs.
83 */
t4_load_mtus(struct adapter * adap,const unsigned short * mtus,const unsigned short * alpha,const unsigned short * beta)84 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
85 const unsigned short *alpha, const unsigned short *beta)
86 {
87 static const unsigned int avg_pkts[NCCTRL_WIN] = {
88 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
89 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
90 28672, 40960, 57344, 81920, 114688, 163840, 229376
91 };
92
93 unsigned int i, w;
94
95 for (i = 0; i < NMTUS; ++i) {
96 unsigned int mtu = mtus[i];
97 unsigned int log2 = cxgbe_fls(mtu);
98
99 if (!(mtu & ((1 << log2) >> 2))) /* round */
100 log2--;
101 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
102 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
103
104 for (w = 0; w < NCCTRL_WIN; ++w) {
105 unsigned int inc;
106
107 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
108 CC_MIN_INCR);
109
110 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
111 (w << 16) | (beta[w] << 13) | inc);
112 }
113 }
114 }
115
116 /**
117 * t4_wait_op_done_val - wait until an operation is completed
118 * @adapter: the adapter performing the operation
119 * @reg: the register to check for completion
120 * @mask: a single-bit field within @reg that indicates completion
121 * @polarity: the value of the field when the operation is completed
122 * @attempts: number of check iterations
123 * @delay: delay in usecs between iterations
124 * @valp: where to store the value of the register at completion time
125 *
126 * Wait until an operation is completed by checking a bit in a register
127 * up to @attempts times. If @valp is not NULL the value of the register
128 * at the time it indicated completion is stored there. Returns 0 if the
129 * operation completes and -EAGAIN otherwise.
130 */
t4_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)131 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
132 int polarity, int attempts, int delay, u32 *valp)
133 {
134 while (1) {
135 u32 val = t4_read_reg(adapter, reg);
136
137 if (!!(val & mask) == polarity) {
138 if (valp)
139 *valp = val;
140 return 0;
141 }
142 if (--attempts == 0)
143 return -EAGAIN;
144 if (delay)
145 udelay(delay);
146 }
147 }
148
149 /**
150 * t4_set_reg_field - set a register field to a value
151 * @adapter: the adapter to program
152 * @addr: the register address
153 * @mask: specifies the portion of the register to modify
154 * @val: the new value for the register field
155 *
156 * Sets a register field specified by the supplied mask to the
157 * given value.
158 */
t4_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)159 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
160 u32 val)
161 {
162 u32 v = t4_read_reg(adapter, addr) & ~mask;
163
164 t4_write_reg(adapter, addr, v | val);
165 (void)t4_read_reg(adapter, addr); /* flush */
166 }
167
168 /**
169 * t4_read_indirect - read indirectly addressed registers
170 * @adap: the adapter
171 * @addr_reg: register holding the indirect address
172 * @data_reg: register holding the value of the indirect register
173 * @vals: where the read register values are stored
174 * @nregs: how many indirect registers to read
175 * @start_idx: index of first indirect register to read
176 *
177 * Reads registers that are accessed indirectly through an address/data
178 * register pair.
179 */
t4_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)180 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
181 unsigned int data_reg, u32 *vals, unsigned int nregs,
182 unsigned int start_idx)
183 {
184 while (nregs--) {
185 t4_write_reg(adap, addr_reg, start_idx);
186 *vals++ = t4_read_reg(adap, data_reg);
187 start_idx++;
188 }
189 }
190
191 /**
192 * t4_write_indirect - write indirectly addressed registers
193 * @adap: the adapter
194 * @addr_reg: register holding the indirect addresses
195 * @data_reg: register holding the value for the indirect registers
196 * @vals: values to write
197 * @nregs: how many indirect registers to write
198 * @start_idx: address of first indirect register to write
199 *
200 * Writes a sequential block of registers that are accessed indirectly
201 * through an address/data register pair.
202 */
t4_write_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,const u32 * vals,unsigned int nregs,unsigned int start_idx)203 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
204 unsigned int data_reg, const u32 *vals,
205 unsigned int nregs, unsigned int start_idx)
206 {
207 while (nregs--) {
208 t4_write_reg(adap, addr_reg, start_idx++);
209 t4_write_reg(adap, data_reg, *vals++);
210 }
211 }
212
213 /**
214 * t4_report_fw_error - report firmware error
215 * @adap: the adapter
216 *
217 * The adapter firmware can indicate error conditions to the host.
218 * If the firmware has indicated an error, print out the reason for
219 * the firmware error.
220 */
t4_report_fw_error(struct adapter * adap)221 static void t4_report_fw_error(struct adapter *adap)
222 {
223 static const char * const reason[] = {
224 "Crash", /* PCIE_FW_EVAL_CRASH */
225 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
226 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
227 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
228 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
229 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
230 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
231 "Reserved", /* reserved */
232 };
233 u32 pcie_fw;
234
235 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
236 if (pcie_fw & F_PCIE_FW_ERR)
237 pr_err("%s: Firmware reports adapter error: %s\n",
238 __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
239 }
240
241 /*
242 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
243 */
get_mbox_rpl(struct adapter * adap,__be64 * rpl,int nflit,u32 mbox_addr)244 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
245 u32 mbox_addr)
246 {
247 for ( ; nflit; nflit--, mbox_addr += 8)
248 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
249 }
250
251 /*
252 * Handle a FW assertion reported in a mailbox.
253 */
fw_asrt(struct adapter * adap,u32 mbox_addr)254 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
255 {
256 struct fw_debug_cmd asrt;
257
258 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
259 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
260 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
261 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
262 }
263
264 #define X_CIM_PF_NOACCESS 0xeeeeeeee
265
266 /**
267 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
268 * @adap: the adapter
269 * @mbox: index of the mailbox to use
270 * @cmd: the command to write
271 * @size: command length in bytes
272 * @rpl: where to optionally store the reply
273 * @sleep_ok: if true we may sleep while awaiting command completion
274 * @timeout: time to wait for command to finish before timing out
275 * (negative implies @sleep_ok=false)
276 *
277 * Sends the given command to FW through the selected mailbox and waits
278 * for the FW to execute the command. If @rpl is not %NULL it is used to
279 * store the FW's reply to the command. The command and its optional
280 * reply are of the same length. Some FW commands like RESET and
281 * INITIALIZE can take a considerable amount of time to execute.
282 * @sleep_ok determines whether we may sleep while awaiting the response.
283 * If sleeping is allowed we use progressive backoff otherwise we spin.
284 * Note that passing in a negative @timeout is an alternate mechanism
285 * for specifying @sleep_ok=false. This is useful when a higher level
286 * interface allows for specification of @timeout but not @sleep_ok ...
287 *
288 * Returns 0 on success or a negative errno on failure. A
289 * failure can happen either because we are not able to execute the
290 * command or FW executes it but signals an error. In the latter case
291 * the return value is the error code indicated by FW (negated).
292 */
t4_wr_mbox_meat_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok,int timeout)293 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
294 const void __attribute__((__may_alias__)) *cmd,
295 int size, void *rpl, bool sleep_ok, int timeout)
296 {
297 /*
298 * We delay in small increments at first in an effort to maintain
299 * responsiveness for simple, fast executing commands but then back
300 * off to larger delays to a maximum retry delay.
301 */
302 static const int delay[] = {
303 1, 1, 3, 5, 10, 10, 20, 50, 100
304 };
305
306 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
307 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
308 struct mbox_entry *entry;
309 u32 v, ctl, pcie_fw = 0;
310 unsigned int delay_idx;
311 const __be64 *p;
312 int i, ms, ret;
313 u64 res;
314
315 if ((size & 15) != 0 || size > MBOX_LEN)
316 return -EINVAL;
317
318 /*
319 * If we have a negative timeout, that implies that we can't sleep.
320 */
321 if (timeout < 0) {
322 sleep_ok = false;
323 timeout = -timeout;
324 }
325
326 entry = t4_os_alloc(sizeof(*entry));
327 if (entry == NULL)
328 return -ENOMEM;
329
330 /*
331 * Queue ourselves onto the mailbox access list. When our entry is at
332 * the front of the list, we have rights to access the mailbox. So we
333 * wait [for a while] till we're at the front [or bail out with an
334 * EBUSY] ...
335 */
336 t4_os_atomic_add_tail(entry, &adap->mbox_list, &adap->mbox_lock);
337
338 delay_idx = 0;
339 ms = delay[0];
340
341 for (i = 0; ; i += ms) {
342 /*
343 * If we've waited too long, return a busy indication. This
344 * really ought to be based on our initial position in the
345 * mailbox access list but this is a start. We very rarely
346 * contend on access to the mailbox ... Also check for a
347 * firmware error which we'll report as a device error.
348 */
349 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
350 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
351 t4_os_atomic_list_del(entry, &adap->mbox_list,
352 &adap->mbox_lock);
353 t4_report_fw_error(adap);
354 ret = ((pcie_fw & F_PCIE_FW_ERR) != 0) ? -ENXIO : -EBUSY;
355 goto out_free;
356 }
357
358 /*
359 * If we're at the head, break out and start the mailbox
360 * protocol.
361 */
362 if (t4_os_list_first_entry(&adap->mbox_list) == entry)
363 break;
364
365 /*
366 * Delay for a bit before checking again ...
367 */
368 if (sleep_ok) {
369 ms = delay[delay_idx]; /* last element may repeat */
370 if (delay_idx < ARRAY_SIZE(delay) - 1)
371 delay_idx++;
372 msleep(ms);
373 } else {
374 rte_delay_ms(ms);
375 }
376 }
377
378 /*
379 * Attempt to gain access to the mailbox.
380 */
381 for (i = 0; i < 4; i++) {
382 ctl = t4_read_reg(adap, ctl_reg);
383 v = G_MBOWNER(ctl);
384 if (v != X_MBOWNER_NONE)
385 break;
386 }
387
388 /*
389 * If we were unable to gain access, dequeue ourselves from the
390 * mailbox atomic access list and report the error to our caller.
391 */
392 if (v != X_MBOWNER_PL) {
393 t4_os_atomic_list_del(entry, &adap->mbox_list,
394 &adap->mbox_lock);
395 t4_report_fw_error(adap);
396 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
397 goto out_free;
398 }
399
400 /*
401 * If we gain ownership of the mailbox and there's a "valid" message
402 * in it, this is likely an asynchronous error message from the
403 * firmware. So we'll report that and then proceed on with attempting
404 * to issue our own command ... which may well fail if the error
405 * presaged the firmware crashing ...
406 */
407 if (ctl & F_MBMSGVALID) {
408 dev_err(adap, "found VALID command in mbox %u: "
409 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
410 (unsigned long long)t4_read_reg64(adap, data_reg),
411 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
412 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
413 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
414 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
415 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
416 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
417 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
418 }
419
420 /*
421 * Copy in the new mailbox command and send it on its way ...
422 */
423 for (i = 0, p = cmd; i < size; i += 8, p++)
424 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
425
426 CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
427 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
428 (unsigned long long)t4_read_reg64(adap, data_reg),
429 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
430 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
431 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
432 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
433 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
434 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
435 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
436
437 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
438 t4_read_reg(adap, ctl_reg); /* flush write */
439
440 delay_idx = 0;
441 ms = delay[0];
442
443 /*
444 * Loop waiting for the reply; bail out if we time out or the firmware
445 * reports an error.
446 */
447 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
448 for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
449 if (sleep_ok) {
450 ms = delay[delay_idx]; /* last element may repeat */
451 if (delay_idx < ARRAY_SIZE(delay) - 1)
452 delay_idx++;
453 msleep(ms);
454 } else {
455 msleep(ms);
456 }
457
458 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
459 v = t4_read_reg(adap, ctl_reg);
460 if (v == X_CIM_PF_NOACCESS)
461 continue;
462 if (G_MBOWNER(v) == X_MBOWNER_PL) {
463 if (!(v & F_MBMSGVALID)) {
464 t4_write_reg(adap, ctl_reg,
465 V_MBOWNER(X_MBOWNER_NONE));
466 continue;
467 }
468
469 CXGBE_DEBUG_MBOX(adap,
470 "%s: mbox %u: %016llx %016llx %016llx %016llx "
471 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
472 (unsigned long long)t4_read_reg64(adap, data_reg),
473 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
474 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
475 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
476 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
477 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
478 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
479 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
480
481 CXGBE_DEBUG_MBOX(adap,
482 "command %#x completed in %d ms (%ssleeping)\n",
483 *(const u8 *)cmd,
484 i + ms, sleep_ok ? "" : "non-");
485
486 res = t4_read_reg64(adap, data_reg);
487 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
488 fw_asrt(adap, data_reg);
489 res = V_FW_CMD_RETVAL(EIO);
490 } else if (rpl) {
491 get_mbox_rpl(adap, rpl, size / 8, data_reg);
492 }
493 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
494 t4_os_atomic_list_del(entry, &adap->mbox_list,
495 &adap->mbox_lock);
496 ret = -G_FW_CMD_RETVAL((int)res);
497 goto out_free;
498 }
499 }
500
501 /*
502 * We timed out waiting for a reply to our mailbox command. Report
503 * the error and also check to see if the firmware reported any
504 * errors ...
505 */
506 dev_err(adap, "command %#x in mailbox %d timed out\n",
507 *(const u8 *)cmd, mbox);
508 t4_os_atomic_list_del(entry, &adap->mbox_list, &adap->mbox_lock);
509 t4_report_fw_error(adap);
510 ret = ((pcie_fw & F_PCIE_FW_ERR) != 0) ? -ENXIO : -ETIMEDOUT;
511
512 out_free:
513 t4_os_free(entry);
514 return ret;
515 }
516
t4_wr_mbox_meat(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok)517 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
518 void *rpl, bool sleep_ok)
519 {
520 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
521 FW_CMD_MAX_TIMEOUT);
522 }
523
524 /**
525 * t4_get_regs_len - return the size of the chips register set
526 * @adapter: the adapter
527 *
528 * Returns the size of the chip's BAR0 register space.
529 */
t4_get_regs_len(struct adapter * adapter)530 unsigned int t4_get_regs_len(struct adapter *adapter)
531 {
532 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
533
534 switch (chip_version) {
535 case CHELSIO_T5:
536 case CHELSIO_T6:
537 return T5_REGMAP_SIZE;
538 }
539
540 dev_err(adapter,
541 "Unsupported chip version %d\n", chip_version);
542 return 0;
543 }
544
545 /**
546 * t4_get_regs - read chip registers into provided buffer
547 * @adap: the adapter
548 * @buf: register buffer
549 * @buf_size: size (in bytes) of register buffer
550 *
551 * If the provided register buffer isn't large enough for the chip's
552 * full register range, the register dump will be truncated to the
553 * register buffer's size.
554 */
t4_get_regs(struct adapter * adap,void * buf,size_t buf_size)555 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
556 {
557 static const unsigned int t5_reg_ranges[] = {
558 0x1008, 0x10c0,
559 0x10cc, 0x10f8,
560 0x1100, 0x1100,
561 0x110c, 0x1148,
562 0x1180, 0x1184,
563 0x1190, 0x1194,
564 0x11a0, 0x11a4,
565 0x11b0, 0x11b4,
566 0x11fc, 0x123c,
567 0x1280, 0x173c,
568 0x1800, 0x18fc,
569 0x3000, 0x3028,
570 0x3060, 0x30b0,
571 0x30b8, 0x30d8,
572 0x30e0, 0x30fc,
573 0x3140, 0x357c,
574 0x35a8, 0x35cc,
575 0x35ec, 0x35ec,
576 0x3600, 0x5624,
577 0x56cc, 0x56ec,
578 0x56f4, 0x5720,
579 0x5728, 0x575c,
580 0x580c, 0x5814,
581 0x5890, 0x589c,
582 0x58a4, 0x58ac,
583 0x58b8, 0x58bc,
584 0x5940, 0x59c8,
585 0x59d0, 0x59dc,
586 0x59fc, 0x5a18,
587 0x5a60, 0x5a70,
588 0x5a80, 0x5a9c,
589 0x5b94, 0x5bfc,
590 0x6000, 0x6020,
591 0x6028, 0x6040,
592 0x6058, 0x609c,
593 0x60a8, 0x614c,
594 0x7700, 0x7798,
595 0x77c0, 0x78fc,
596 0x7b00, 0x7b58,
597 0x7b60, 0x7b84,
598 0x7b8c, 0x7c54,
599 0x7d00, 0x7d38,
600 0x7d40, 0x7d80,
601 0x7d8c, 0x7ddc,
602 0x7de4, 0x7e04,
603 0x7e10, 0x7e1c,
604 0x7e24, 0x7e38,
605 0x7e40, 0x7e44,
606 0x7e4c, 0x7e78,
607 0x7e80, 0x7edc,
608 0x7ee8, 0x7efc,
609 0x8dc0, 0x8de0,
610 0x8df8, 0x8e04,
611 0x8e10, 0x8e84,
612 0x8ea0, 0x8f84,
613 0x8fc0, 0x9058,
614 0x9060, 0x9060,
615 0x9068, 0x90f8,
616 0x9400, 0x9408,
617 0x9410, 0x9470,
618 0x9600, 0x9600,
619 0x9608, 0x9638,
620 0x9640, 0x96f4,
621 0x9800, 0x9808,
622 0x9820, 0x983c,
623 0x9850, 0x9864,
624 0x9c00, 0x9c6c,
625 0x9c80, 0x9cec,
626 0x9d00, 0x9d6c,
627 0x9d80, 0x9dec,
628 0x9e00, 0x9e6c,
629 0x9e80, 0x9eec,
630 0x9f00, 0x9f6c,
631 0x9f80, 0xa020,
632 0xd004, 0xd004,
633 0xd010, 0xd03c,
634 0xdfc0, 0xdfe0,
635 0xe000, 0x1106c,
636 0x11074, 0x11088,
637 0x1109c, 0x1117c,
638 0x11190, 0x11204,
639 0x19040, 0x1906c,
640 0x19078, 0x19080,
641 0x1908c, 0x190e8,
642 0x190f0, 0x190f8,
643 0x19100, 0x19110,
644 0x19120, 0x19124,
645 0x19150, 0x19194,
646 0x1919c, 0x191b0,
647 0x191d0, 0x191e8,
648 0x19238, 0x19290,
649 0x193f8, 0x19428,
650 0x19430, 0x19444,
651 0x1944c, 0x1946c,
652 0x19474, 0x19474,
653 0x19490, 0x194cc,
654 0x194f0, 0x194f8,
655 0x19c00, 0x19c08,
656 0x19c10, 0x19c60,
657 0x19c94, 0x19ce4,
658 0x19cf0, 0x19d40,
659 0x19d50, 0x19d94,
660 0x19da0, 0x19de8,
661 0x19df0, 0x19e10,
662 0x19e50, 0x19e90,
663 0x19ea0, 0x19f24,
664 0x19f34, 0x19f34,
665 0x19f40, 0x19f50,
666 0x19f90, 0x19fb4,
667 0x19fc4, 0x19fe4,
668 0x1a000, 0x1a004,
669 0x1a010, 0x1a06c,
670 0x1a0b0, 0x1a0e4,
671 0x1a0ec, 0x1a0f8,
672 0x1a100, 0x1a108,
673 0x1a114, 0x1a120,
674 0x1a128, 0x1a130,
675 0x1a138, 0x1a138,
676 0x1a190, 0x1a1c4,
677 0x1a1fc, 0x1a1fc,
678 0x1e008, 0x1e00c,
679 0x1e040, 0x1e044,
680 0x1e04c, 0x1e04c,
681 0x1e284, 0x1e290,
682 0x1e2c0, 0x1e2c0,
683 0x1e2e0, 0x1e2e0,
684 0x1e300, 0x1e384,
685 0x1e3c0, 0x1e3c8,
686 0x1e408, 0x1e40c,
687 0x1e440, 0x1e444,
688 0x1e44c, 0x1e44c,
689 0x1e684, 0x1e690,
690 0x1e6c0, 0x1e6c0,
691 0x1e6e0, 0x1e6e0,
692 0x1e700, 0x1e784,
693 0x1e7c0, 0x1e7c8,
694 0x1e808, 0x1e80c,
695 0x1e840, 0x1e844,
696 0x1e84c, 0x1e84c,
697 0x1ea84, 0x1ea90,
698 0x1eac0, 0x1eac0,
699 0x1eae0, 0x1eae0,
700 0x1eb00, 0x1eb84,
701 0x1ebc0, 0x1ebc8,
702 0x1ec08, 0x1ec0c,
703 0x1ec40, 0x1ec44,
704 0x1ec4c, 0x1ec4c,
705 0x1ee84, 0x1ee90,
706 0x1eec0, 0x1eec0,
707 0x1eee0, 0x1eee0,
708 0x1ef00, 0x1ef84,
709 0x1efc0, 0x1efc8,
710 0x1f008, 0x1f00c,
711 0x1f040, 0x1f044,
712 0x1f04c, 0x1f04c,
713 0x1f284, 0x1f290,
714 0x1f2c0, 0x1f2c0,
715 0x1f2e0, 0x1f2e0,
716 0x1f300, 0x1f384,
717 0x1f3c0, 0x1f3c8,
718 0x1f408, 0x1f40c,
719 0x1f440, 0x1f444,
720 0x1f44c, 0x1f44c,
721 0x1f684, 0x1f690,
722 0x1f6c0, 0x1f6c0,
723 0x1f6e0, 0x1f6e0,
724 0x1f700, 0x1f784,
725 0x1f7c0, 0x1f7c8,
726 0x1f808, 0x1f80c,
727 0x1f840, 0x1f844,
728 0x1f84c, 0x1f84c,
729 0x1fa84, 0x1fa90,
730 0x1fac0, 0x1fac0,
731 0x1fae0, 0x1fae0,
732 0x1fb00, 0x1fb84,
733 0x1fbc0, 0x1fbc8,
734 0x1fc08, 0x1fc0c,
735 0x1fc40, 0x1fc44,
736 0x1fc4c, 0x1fc4c,
737 0x1fe84, 0x1fe90,
738 0x1fec0, 0x1fec0,
739 0x1fee0, 0x1fee0,
740 0x1ff00, 0x1ff84,
741 0x1ffc0, 0x1ffc8,
742 0x30000, 0x30030,
743 0x30038, 0x30038,
744 0x30040, 0x30040,
745 0x30100, 0x30144,
746 0x30190, 0x301a0,
747 0x301a8, 0x301b8,
748 0x301c4, 0x301c8,
749 0x301d0, 0x301d0,
750 0x30200, 0x30318,
751 0x30400, 0x304b4,
752 0x304c0, 0x3052c,
753 0x30540, 0x3061c,
754 0x30800, 0x30828,
755 0x30834, 0x30834,
756 0x308c0, 0x30908,
757 0x30910, 0x309ac,
758 0x30a00, 0x30a14,
759 0x30a1c, 0x30a2c,
760 0x30a44, 0x30a50,
761 0x30a74, 0x30a74,
762 0x30a7c, 0x30afc,
763 0x30b08, 0x30c24,
764 0x30d00, 0x30d00,
765 0x30d08, 0x30d14,
766 0x30d1c, 0x30d20,
767 0x30d3c, 0x30d3c,
768 0x30d48, 0x30d50,
769 0x31200, 0x3120c,
770 0x31220, 0x31220,
771 0x31240, 0x31240,
772 0x31600, 0x3160c,
773 0x31a00, 0x31a1c,
774 0x31e00, 0x31e20,
775 0x31e38, 0x31e3c,
776 0x31e80, 0x31e80,
777 0x31e88, 0x31ea8,
778 0x31eb0, 0x31eb4,
779 0x31ec8, 0x31ed4,
780 0x31fb8, 0x32004,
781 0x32200, 0x32200,
782 0x32208, 0x32240,
783 0x32248, 0x32280,
784 0x32288, 0x322c0,
785 0x322c8, 0x322fc,
786 0x32600, 0x32630,
787 0x32a00, 0x32abc,
788 0x32b00, 0x32b10,
789 0x32b20, 0x32b30,
790 0x32b40, 0x32b50,
791 0x32b60, 0x32b70,
792 0x33000, 0x33028,
793 0x33030, 0x33048,
794 0x33060, 0x33068,
795 0x33070, 0x3309c,
796 0x330f0, 0x33128,
797 0x33130, 0x33148,
798 0x33160, 0x33168,
799 0x33170, 0x3319c,
800 0x331f0, 0x33238,
801 0x33240, 0x33240,
802 0x33248, 0x33250,
803 0x3325c, 0x33264,
804 0x33270, 0x332b8,
805 0x332c0, 0x332e4,
806 0x332f8, 0x33338,
807 0x33340, 0x33340,
808 0x33348, 0x33350,
809 0x3335c, 0x33364,
810 0x33370, 0x333b8,
811 0x333c0, 0x333e4,
812 0x333f8, 0x33428,
813 0x33430, 0x33448,
814 0x33460, 0x33468,
815 0x33470, 0x3349c,
816 0x334f0, 0x33528,
817 0x33530, 0x33548,
818 0x33560, 0x33568,
819 0x33570, 0x3359c,
820 0x335f0, 0x33638,
821 0x33640, 0x33640,
822 0x33648, 0x33650,
823 0x3365c, 0x33664,
824 0x33670, 0x336b8,
825 0x336c0, 0x336e4,
826 0x336f8, 0x33738,
827 0x33740, 0x33740,
828 0x33748, 0x33750,
829 0x3375c, 0x33764,
830 0x33770, 0x337b8,
831 0x337c0, 0x337e4,
832 0x337f8, 0x337fc,
833 0x33814, 0x33814,
834 0x3382c, 0x3382c,
835 0x33880, 0x3388c,
836 0x338e8, 0x338ec,
837 0x33900, 0x33928,
838 0x33930, 0x33948,
839 0x33960, 0x33968,
840 0x33970, 0x3399c,
841 0x339f0, 0x33a38,
842 0x33a40, 0x33a40,
843 0x33a48, 0x33a50,
844 0x33a5c, 0x33a64,
845 0x33a70, 0x33ab8,
846 0x33ac0, 0x33ae4,
847 0x33af8, 0x33b10,
848 0x33b28, 0x33b28,
849 0x33b3c, 0x33b50,
850 0x33bf0, 0x33c10,
851 0x33c28, 0x33c28,
852 0x33c3c, 0x33c50,
853 0x33cf0, 0x33cfc,
854 0x34000, 0x34030,
855 0x34038, 0x34038,
856 0x34040, 0x34040,
857 0x34100, 0x34144,
858 0x34190, 0x341a0,
859 0x341a8, 0x341b8,
860 0x341c4, 0x341c8,
861 0x341d0, 0x341d0,
862 0x34200, 0x34318,
863 0x34400, 0x344b4,
864 0x344c0, 0x3452c,
865 0x34540, 0x3461c,
866 0x34800, 0x34828,
867 0x34834, 0x34834,
868 0x348c0, 0x34908,
869 0x34910, 0x349ac,
870 0x34a00, 0x34a14,
871 0x34a1c, 0x34a2c,
872 0x34a44, 0x34a50,
873 0x34a74, 0x34a74,
874 0x34a7c, 0x34afc,
875 0x34b08, 0x34c24,
876 0x34d00, 0x34d00,
877 0x34d08, 0x34d14,
878 0x34d1c, 0x34d20,
879 0x34d3c, 0x34d3c,
880 0x34d48, 0x34d50,
881 0x35200, 0x3520c,
882 0x35220, 0x35220,
883 0x35240, 0x35240,
884 0x35600, 0x3560c,
885 0x35a00, 0x35a1c,
886 0x35e00, 0x35e20,
887 0x35e38, 0x35e3c,
888 0x35e80, 0x35e80,
889 0x35e88, 0x35ea8,
890 0x35eb0, 0x35eb4,
891 0x35ec8, 0x35ed4,
892 0x35fb8, 0x36004,
893 0x36200, 0x36200,
894 0x36208, 0x36240,
895 0x36248, 0x36280,
896 0x36288, 0x362c0,
897 0x362c8, 0x362fc,
898 0x36600, 0x36630,
899 0x36a00, 0x36abc,
900 0x36b00, 0x36b10,
901 0x36b20, 0x36b30,
902 0x36b40, 0x36b50,
903 0x36b60, 0x36b70,
904 0x37000, 0x37028,
905 0x37030, 0x37048,
906 0x37060, 0x37068,
907 0x37070, 0x3709c,
908 0x370f0, 0x37128,
909 0x37130, 0x37148,
910 0x37160, 0x37168,
911 0x37170, 0x3719c,
912 0x371f0, 0x37238,
913 0x37240, 0x37240,
914 0x37248, 0x37250,
915 0x3725c, 0x37264,
916 0x37270, 0x372b8,
917 0x372c0, 0x372e4,
918 0x372f8, 0x37338,
919 0x37340, 0x37340,
920 0x37348, 0x37350,
921 0x3735c, 0x37364,
922 0x37370, 0x373b8,
923 0x373c0, 0x373e4,
924 0x373f8, 0x37428,
925 0x37430, 0x37448,
926 0x37460, 0x37468,
927 0x37470, 0x3749c,
928 0x374f0, 0x37528,
929 0x37530, 0x37548,
930 0x37560, 0x37568,
931 0x37570, 0x3759c,
932 0x375f0, 0x37638,
933 0x37640, 0x37640,
934 0x37648, 0x37650,
935 0x3765c, 0x37664,
936 0x37670, 0x376b8,
937 0x376c0, 0x376e4,
938 0x376f8, 0x37738,
939 0x37740, 0x37740,
940 0x37748, 0x37750,
941 0x3775c, 0x37764,
942 0x37770, 0x377b8,
943 0x377c0, 0x377e4,
944 0x377f8, 0x377fc,
945 0x37814, 0x37814,
946 0x3782c, 0x3782c,
947 0x37880, 0x3788c,
948 0x378e8, 0x378ec,
949 0x37900, 0x37928,
950 0x37930, 0x37948,
951 0x37960, 0x37968,
952 0x37970, 0x3799c,
953 0x379f0, 0x37a38,
954 0x37a40, 0x37a40,
955 0x37a48, 0x37a50,
956 0x37a5c, 0x37a64,
957 0x37a70, 0x37ab8,
958 0x37ac0, 0x37ae4,
959 0x37af8, 0x37b10,
960 0x37b28, 0x37b28,
961 0x37b3c, 0x37b50,
962 0x37bf0, 0x37c10,
963 0x37c28, 0x37c28,
964 0x37c3c, 0x37c50,
965 0x37cf0, 0x37cfc,
966 0x38000, 0x38030,
967 0x38038, 0x38038,
968 0x38040, 0x38040,
969 0x38100, 0x38144,
970 0x38190, 0x381a0,
971 0x381a8, 0x381b8,
972 0x381c4, 0x381c8,
973 0x381d0, 0x381d0,
974 0x38200, 0x38318,
975 0x38400, 0x384b4,
976 0x384c0, 0x3852c,
977 0x38540, 0x3861c,
978 0x38800, 0x38828,
979 0x38834, 0x38834,
980 0x388c0, 0x38908,
981 0x38910, 0x389ac,
982 0x38a00, 0x38a14,
983 0x38a1c, 0x38a2c,
984 0x38a44, 0x38a50,
985 0x38a74, 0x38a74,
986 0x38a7c, 0x38afc,
987 0x38b08, 0x38c24,
988 0x38d00, 0x38d00,
989 0x38d08, 0x38d14,
990 0x38d1c, 0x38d20,
991 0x38d3c, 0x38d3c,
992 0x38d48, 0x38d50,
993 0x39200, 0x3920c,
994 0x39220, 0x39220,
995 0x39240, 0x39240,
996 0x39600, 0x3960c,
997 0x39a00, 0x39a1c,
998 0x39e00, 0x39e20,
999 0x39e38, 0x39e3c,
1000 0x39e80, 0x39e80,
1001 0x39e88, 0x39ea8,
1002 0x39eb0, 0x39eb4,
1003 0x39ec8, 0x39ed4,
1004 0x39fb8, 0x3a004,
1005 0x3a200, 0x3a200,
1006 0x3a208, 0x3a240,
1007 0x3a248, 0x3a280,
1008 0x3a288, 0x3a2c0,
1009 0x3a2c8, 0x3a2fc,
1010 0x3a600, 0x3a630,
1011 0x3aa00, 0x3aabc,
1012 0x3ab00, 0x3ab10,
1013 0x3ab20, 0x3ab30,
1014 0x3ab40, 0x3ab50,
1015 0x3ab60, 0x3ab70,
1016 0x3b000, 0x3b028,
1017 0x3b030, 0x3b048,
1018 0x3b060, 0x3b068,
1019 0x3b070, 0x3b09c,
1020 0x3b0f0, 0x3b128,
1021 0x3b130, 0x3b148,
1022 0x3b160, 0x3b168,
1023 0x3b170, 0x3b19c,
1024 0x3b1f0, 0x3b238,
1025 0x3b240, 0x3b240,
1026 0x3b248, 0x3b250,
1027 0x3b25c, 0x3b264,
1028 0x3b270, 0x3b2b8,
1029 0x3b2c0, 0x3b2e4,
1030 0x3b2f8, 0x3b338,
1031 0x3b340, 0x3b340,
1032 0x3b348, 0x3b350,
1033 0x3b35c, 0x3b364,
1034 0x3b370, 0x3b3b8,
1035 0x3b3c0, 0x3b3e4,
1036 0x3b3f8, 0x3b428,
1037 0x3b430, 0x3b448,
1038 0x3b460, 0x3b468,
1039 0x3b470, 0x3b49c,
1040 0x3b4f0, 0x3b528,
1041 0x3b530, 0x3b548,
1042 0x3b560, 0x3b568,
1043 0x3b570, 0x3b59c,
1044 0x3b5f0, 0x3b638,
1045 0x3b640, 0x3b640,
1046 0x3b648, 0x3b650,
1047 0x3b65c, 0x3b664,
1048 0x3b670, 0x3b6b8,
1049 0x3b6c0, 0x3b6e4,
1050 0x3b6f8, 0x3b738,
1051 0x3b740, 0x3b740,
1052 0x3b748, 0x3b750,
1053 0x3b75c, 0x3b764,
1054 0x3b770, 0x3b7b8,
1055 0x3b7c0, 0x3b7e4,
1056 0x3b7f8, 0x3b7fc,
1057 0x3b814, 0x3b814,
1058 0x3b82c, 0x3b82c,
1059 0x3b880, 0x3b88c,
1060 0x3b8e8, 0x3b8ec,
1061 0x3b900, 0x3b928,
1062 0x3b930, 0x3b948,
1063 0x3b960, 0x3b968,
1064 0x3b970, 0x3b99c,
1065 0x3b9f0, 0x3ba38,
1066 0x3ba40, 0x3ba40,
1067 0x3ba48, 0x3ba50,
1068 0x3ba5c, 0x3ba64,
1069 0x3ba70, 0x3bab8,
1070 0x3bac0, 0x3bae4,
1071 0x3baf8, 0x3bb10,
1072 0x3bb28, 0x3bb28,
1073 0x3bb3c, 0x3bb50,
1074 0x3bbf0, 0x3bc10,
1075 0x3bc28, 0x3bc28,
1076 0x3bc3c, 0x3bc50,
1077 0x3bcf0, 0x3bcfc,
1078 0x3c000, 0x3c030,
1079 0x3c038, 0x3c038,
1080 0x3c040, 0x3c040,
1081 0x3c100, 0x3c144,
1082 0x3c190, 0x3c1a0,
1083 0x3c1a8, 0x3c1b8,
1084 0x3c1c4, 0x3c1c8,
1085 0x3c1d0, 0x3c1d0,
1086 0x3c200, 0x3c318,
1087 0x3c400, 0x3c4b4,
1088 0x3c4c0, 0x3c52c,
1089 0x3c540, 0x3c61c,
1090 0x3c800, 0x3c828,
1091 0x3c834, 0x3c834,
1092 0x3c8c0, 0x3c908,
1093 0x3c910, 0x3c9ac,
1094 0x3ca00, 0x3ca14,
1095 0x3ca1c, 0x3ca2c,
1096 0x3ca44, 0x3ca50,
1097 0x3ca74, 0x3ca74,
1098 0x3ca7c, 0x3cafc,
1099 0x3cb08, 0x3cc24,
1100 0x3cd00, 0x3cd00,
1101 0x3cd08, 0x3cd14,
1102 0x3cd1c, 0x3cd20,
1103 0x3cd3c, 0x3cd3c,
1104 0x3cd48, 0x3cd50,
1105 0x3d200, 0x3d20c,
1106 0x3d220, 0x3d220,
1107 0x3d240, 0x3d240,
1108 0x3d600, 0x3d60c,
1109 0x3da00, 0x3da1c,
1110 0x3de00, 0x3de20,
1111 0x3de38, 0x3de3c,
1112 0x3de80, 0x3de80,
1113 0x3de88, 0x3dea8,
1114 0x3deb0, 0x3deb4,
1115 0x3dec8, 0x3ded4,
1116 0x3dfb8, 0x3e004,
1117 0x3e200, 0x3e200,
1118 0x3e208, 0x3e240,
1119 0x3e248, 0x3e280,
1120 0x3e288, 0x3e2c0,
1121 0x3e2c8, 0x3e2fc,
1122 0x3e600, 0x3e630,
1123 0x3ea00, 0x3eabc,
1124 0x3eb00, 0x3eb10,
1125 0x3eb20, 0x3eb30,
1126 0x3eb40, 0x3eb50,
1127 0x3eb60, 0x3eb70,
1128 0x3f000, 0x3f028,
1129 0x3f030, 0x3f048,
1130 0x3f060, 0x3f068,
1131 0x3f070, 0x3f09c,
1132 0x3f0f0, 0x3f128,
1133 0x3f130, 0x3f148,
1134 0x3f160, 0x3f168,
1135 0x3f170, 0x3f19c,
1136 0x3f1f0, 0x3f238,
1137 0x3f240, 0x3f240,
1138 0x3f248, 0x3f250,
1139 0x3f25c, 0x3f264,
1140 0x3f270, 0x3f2b8,
1141 0x3f2c0, 0x3f2e4,
1142 0x3f2f8, 0x3f338,
1143 0x3f340, 0x3f340,
1144 0x3f348, 0x3f350,
1145 0x3f35c, 0x3f364,
1146 0x3f370, 0x3f3b8,
1147 0x3f3c0, 0x3f3e4,
1148 0x3f3f8, 0x3f428,
1149 0x3f430, 0x3f448,
1150 0x3f460, 0x3f468,
1151 0x3f470, 0x3f49c,
1152 0x3f4f0, 0x3f528,
1153 0x3f530, 0x3f548,
1154 0x3f560, 0x3f568,
1155 0x3f570, 0x3f59c,
1156 0x3f5f0, 0x3f638,
1157 0x3f640, 0x3f640,
1158 0x3f648, 0x3f650,
1159 0x3f65c, 0x3f664,
1160 0x3f670, 0x3f6b8,
1161 0x3f6c0, 0x3f6e4,
1162 0x3f6f8, 0x3f738,
1163 0x3f740, 0x3f740,
1164 0x3f748, 0x3f750,
1165 0x3f75c, 0x3f764,
1166 0x3f770, 0x3f7b8,
1167 0x3f7c0, 0x3f7e4,
1168 0x3f7f8, 0x3f7fc,
1169 0x3f814, 0x3f814,
1170 0x3f82c, 0x3f82c,
1171 0x3f880, 0x3f88c,
1172 0x3f8e8, 0x3f8ec,
1173 0x3f900, 0x3f928,
1174 0x3f930, 0x3f948,
1175 0x3f960, 0x3f968,
1176 0x3f970, 0x3f99c,
1177 0x3f9f0, 0x3fa38,
1178 0x3fa40, 0x3fa40,
1179 0x3fa48, 0x3fa50,
1180 0x3fa5c, 0x3fa64,
1181 0x3fa70, 0x3fab8,
1182 0x3fac0, 0x3fae4,
1183 0x3faf8, 0x3fb10,
1184 0x3fb28, 0x3fb28,
1185 0x3fb3c, 0x3fb50,
1186 0x3fbf0, 0x3fc10,
1187 0x3fc28, 0x3fc28,
1188 0x3fc3c, 0x3fc50,
1189 0x3fcf0, 0x3fcfc,
1190 0x40000, 0x4000c,
1191 0x40040, 0x40050,
1192 0x40060, 0x40068,
1193 0x4007c, 0x4008c,
1194 0x40094, 0x400b0,
1195 0x400c0, 0x40144,
1196 0x40180, 0x4018c,
1197 0x40200, 0x40254,
1198 0x40260, 0x40264,
1199 0x40270, 0x40288,
1200 0x40290, 0x40298,
1201 0x402ac, 0x402c8,
1202 0x402d0, 0x402e0,
1203 0x402f0, 0x402f0,
1204 0x40300, 0x4033c,
1205 0x403f8, 0x403fc,
1206 0x41304, 0x413c4,
1207 0x41400, 0x4140c,
1208 0x41414, 0x4141c,
1209 0x41480, 0x414d0,
1210 0x44000, 0x44054,
1211 0x4405c, 0x44078,
1212 0x440c0, 0x44174,
1213 0x44180, 0x441ac,
1214 0x441b4, 0x441b8,
1215 0x441c0, 0x44254,
1216 0x4425c, 0x44278,
1217 0x442c0, 0x44374,
1218 0x44380, 0x443ac,
1219 0x443b4, 0x443b8,
1220 0x443c0, 0x44454,
1221 0x4445c, 0x44478,
1222 0x444c0, 0x44574,
1223 0x44580, 0x445ac,
1224 0x445b4, 0x445b8,
1225 0x445c0, 0x44654,
1226 0x4465c, 0x44678,
1227 0x446c0, 0x44774,
1228 0x44780, 0x447ac,
1229 0x447b4, 0x447b8,
1230 0x447c0, 0x44854,
1231 0x4485c, 0x44878,
1232 0x448c0, 0x44974,
1233 0x44980, 0x449ac,
1234 0x449b4, 0x449b8,
1235 0x449c0, 0x449fc,
1236 0x45000, 0x45004,
1237 0x45010, 0x45030,
1238 0x45040, 0x45060,
1239 0x45068, 0x45068,
1240 0x45080, 0x45084,
1241 0x450a0, 0x450b0,
1242 0x45200, 0x45204,
1243 0x45210, 0x45230,
1244 0x45240, 0x45260,
1245 0x45268, 0x45268,
1246 0x45280, 0x45284,
1247 0x452a0, 0x452b0,
1248 0x460c0, 0x460e4,
1249 0x47000, 0x4703c,
1250 0x47044, 0x4708c,
1251 0x47200, 0x47250,
1252 0x47400, 0x47408,
1253 0x47414, 0x47420,
1254 0x47600, 0x47618,
1255 0x47800, 0x47814,
1256 0x48000, 0x4800c,
1257 0x48040, 0x48050,
1258 0x48060, 0x48068,
1259 0x4807c, 0x4808c,
1260 0x48094, 0x480b0,
1261 0x480c0, 0x48144,
1262 0x48180, 0x4818c,
1263 0x48200, 0x48254,
1264 0x48260, 0x48264,
1265 0x48270, 0x48288,
1266 0x48290, 0x48298,
1267 0x482ac, 0x482c8,
1268 0x482d0, 0x482e0,
1269 0x482f0, 0x482f0,
1270 0x48300, 0x4833c,
1271 0x483f8, 0x483fc,
1272 0x49304, 0x493c4,
1273 0x49400, 0x4940c,
1274 0x49414, 0x4941c,
1275 0x49480, 0x494d0,
1276 0x4c000, 0x4c054,
1277 0x4c05c, 0x4c078,
1278 0x4c0c0, 0x4c174,
1279 0x4c180, 0x4c1ac,
1280 0x4c1b4, 0x4c1b8,
1281 0x4c1c0, 0x4c254,
1282 0x4c25c, 0x4c278,
1283 0x4c2c0, 0x4c374,
1284 0x4c380, 0x4c3ac,
1285 0x4c3b4, 0x4c3b8,
1286 0x4c3c0, 0x4c454,
1287 0x4c45c, 0x4c478,
1288 0x4c4c0, 0x4c574,
1289 0x4c580, 0x4c5ac,
1290 0x4c5b4, 0x4c5b8,
1291 0x4c5c0, 0x4c654,
1292 0x4c65c, 0x4c678,
1293 0x4c6c0, 0x4c774,
1294 0x4c780, 0x4c7ac,
1295 0x4c7b4, 0x4c7b8,
1296 0x4c7c0, 0x4c854,
1297 0x4c85c, 0x4c878,
1298 0x4c8c0, 0x4c974,
1299 0x4c980, 0x4c9ac,
1300 0x4c9b4, 0x4c9b8,
1301 0x4c9c0, 0x4c9fc,
1302 0x4d000, 0x4d004,
1303 0x4d010, 0x4d030,
1304 0x4d040, 0x4d060,
1305 0x4d068, 0x4d068,
1306 0x4d080, 0x4d084,
1307 0x4d0a0, 0x4d0b0,
1308 0x4d200, 0x4d204,
1309 0x4d210, 0x4d230,
1310 0x4d240, 0x4d260,
1311 0x4d268, 0x4d268,
1312 0x4d280, 0x4d284,
1313 0x4d2a0, 0x4d2b0,
1314 0x4e0c0, 0x4e0e4,
1315 0x4f000, 0x4f03c,
1316 0x4f044, 0x4f08c,
1317 0x4f200, 0x4f250,
1318 0x4f400, 0x4f408,
1319 0x4f414, 0x4f420,
1320 0x4f600, 0x4f618,
1321 0x4f800, 0x4f814,
1322 0x50000, 0x50084,
1323 0x50090, 0x500cc,
1324 0x50400, 0x50400,
1325 0x50800, 0x50884,
1326 0x50890, 0x508cc,
1327 0x50c00, 0x50c00,
1328 0x51000, 0x5101c,
1329 0x51300, 0x51308,
1330 };
1331
1332 static const unsigned int t6_reg_ranges[] = {
1333 0x1008, 0x101c,
1334 0x1024, 0x10a8,
1335 0x10b4, 0x10f8,
1336 0x1100, 0x1114,
1337 0x111c, 0x112c,
1338 0x1138, 0x113c,
1339 0x1144, 0x114c,
1340 0x1180, 0x1184,
1341 0x1190, 0x1194,
1342 0x11a0, 0x11a4,
1343 0x11b0, 0x11b4,
1344 0x11fc, 0x1274,
1345 0x1280, 0x133c,
1346 0x1800, 0x18fc,
1347 0x3000, 0x302c,
1348 0x3060, 0x30b0,
1349 0x30b8, 0x30d8,
1350 0x30e0, 0x30fc,
1351 0x3140, 0x357c,
1352 0x35a8, 0x35cc,
1353 0x35ec, 0x35ec,
1354 0x3600, 0x5624,
1355 0x56cc, 0x56ec,
1356 0x56f4, 0x5720,
1357 0x5728, 0x575c,
1358 0x580c, 0x5814,
1359 0x5890, 0x589c,
1360 0x58a4, 0x58ac,
1361 0x58b8, 0x58bc,
1362 0x5940, 0x595c,
1363 0x5980, 0x598c,
1364 0x59b0, 0x59c8,
1365 0x59d0, 0x59dc,
1366 0x59fc, 0x5a18,
1367 0x5a60, 0x5a6c,
1368 0x5a80, 0x5a8c,
1369 0x5a94, 0x5a9c,
1370 0x5b94, 0x5bfc,
1371 0x5c10, 0x5e48,
1372 0x5e50, 0x5e94,
1373 0x5ea0, 0x5eb0,
1374 0x5ec0, 0x5ec0,
1375 0x5ec8, 0x5ed0,
1376 0x5ee0, 0x5ee0,
1377 0x5ef0, 0x5ef0,
1378 0x5f00, 0x5f00,
1379 0x6000, 0x6020,
1380 0x6028, 0x6040,
1381 0x6058, 0x609c,
1382 0x60a8, 0x619c,
1383 0x7700, 0x7798,
1384 0x77c0, 0x7880,
1385 0x78cc, 0x78fc,
1386 0x7b00, 0x7b58,
1387 0x7b60, 0x7b84,
1388 0x7b8c, 0x7c54,
1389 0x7d00, 0x7d38,
1390 0x7d40, 0x7d84,
1391 0x7d8c, 0x7ddc,
1392 0x7de4, 0x7e04,
1393 0x7e10, 0x7e1c,
1394 0x7e24, 0x7e38,
1395 0x7e40, 0x7e44,
1396 0x7e4c, 0x7e78,
1397 0x7e80, 0x7edc,
1398 0x7ee8, 0x7efc,
1399 0x8dc0, 0x8de4,
1400 0x8df8, 0x8e04,
1401 0x8e10, 0x8e84,
1402 0x8ea0, 0x8f88,
1403 0x8fb8, 0x9058,
1404 0x9060, 0x9060,
1405 0x9068, 0x90f8,
1406 0x9100, 0x9124,
1407 0x9400, 0x9470,
1408 0x9600, 0x9600,
1409 0x9608, 0x9638,
1410 0x9640, 0x9704,
1411 0x9710, 0x971c,
1412 0x9800, 0x9808,
1413 0x9820, 0x983c,
1414 0x9850, 0x9864,
1415 0x9c00, 0x9c6c,
1416 0x9c80, 0x9cec,
1417 0x9d00, 0x9d6c,
1418 0x9d80, 0x9dec,
1419 0x9e00, 0x9e6c,
1420 0x9e80, 0x9eec,
1421 0x9f00, 0x9f6c,
1422 0x9f80, 0xa020,
1423 0xd004, 0xd03c,
1424 0xd100, 0xd118,
1425 0xd200, 0xd214,
1426 0xd220, 0xd234,
1427 0xd240, 0xd254,
1428 0xd260, 0xd274,
1429 0xd280, 0xd294,
1430 0xd2a0, 0xd2b4,
1431 0xd2c0, 0xd2d4,
1432 0xd2e0, 0xd2f4,
1433 0xd300, 0xd31c,
1434 0xdfc0, 0xdfe0,
1435 0xe000, 0xf008,
1436 0xf010, 0xf018,
1437 0xf020, 0xf028,
1438 0x11000, 0x11014,
1439 0x11048, 0x1106c,
1440 0x11074, 0x11088,
1441 0x11098, 0x11120,
1442 0x1112c, 0x1117c,
1443 0x11190, 0x112e0,
1444 0x11300, 0x1130c,
1445 0x12000, 0x1206c,
1446 0x19040, 0x1906c,
1447 0x19078, 0x19080,
1448 0x1908c, 0x190e8,
1449 0x190f0, 0x190f8,
1450 0x19100, 0x19110,
1451 0x19120, 0x19124,
1452 0x19150, 0x19194,
1453 0x1919c, 0x191b0,
1454 0x191d0, 0x191e8,
1455 0x19238, 0x19290,
1456 0x192a4, 0x192b0,
1457 0x192bc, 0x192bc,
1458 0x19348, 0x1934c,
1459 0x193f8, 0x19418,
1460 0x19420, 0x19428,
1461 0x19430, 0x19444,
1462 0x1944c, 0x1946c,
1463 0x19474, 0x19474,
1464 0x19490, 0x194cc,
1465 0x194f0, 0x194f8,
1466 0x19c00, 0x19c48,
1467 0x19c50, 0x19c80,
1468 0x19c94, 0x19c98,
1469 0x19ca0, 0x19cbc,
1470 0x19ce4, 0x19ce4,
1471 0x19cf0, 0x19cf8,
1472 0x19d00, 0x19d28,
1473 0x19d50, 0x19d78,
1474 0x19d94, 0x19d98,
1475 0x19da0, 0x19dc8,
1476 0x19df0, 0x19e10,
1477 0x19e50, 0x19e6c,
1478 0x19ea0, 0x19ebc,
1479 0x19ec4, 0x19ef4,
1480 0x19f04, 0x19f2c,
1481 0x19f34, 0x19f34,
1482 0x19f40, 0x19f50,
1483 0x19f90, 0x19fac,
1484 0x19fc4, 0x19fc8,
1485 0x19fd0, 0x19fe4,
1486 0x1a000, 0x1a004,
1487 0x1a010, 0x1a06c,
1488 0x1a0b0, 0x1a0e4,
1489 0x1a0ec, 0x1a0f8,
1490 0x1a100, 0x1a108,
1491 0x1a114, 0x1a120,
1492 0x1a128, 0x1a130,
1493 0x1a138, 0x1a138,
1494 0x1a190, 0x1a1c4,
1495 0x1a1fc, 0x1a1fc,
1496 0x1e008, 0x1e00c,
1497 0x1e040, 0x1e044,
1498 0x1e04c, 0x1e04c,
1499 0x1e284, 0x1e290,
1500 0x1e2c0, 0x1e2c0,
1501 0x1e2e0, 0x1e2e0,
1502 0x1e300, 0x1e384,
1503 0x1e3c0, 0x1e3c8,
1504 0x1e408, 0x1e40c,
1505 0x1e440, 0x1e444,
1506 0x1e44c, 0x1e44c,
1507 0x1e684, 0x1e690,
1508 0x1e6c0, 0x1e6c0,
1509 0x1e6e0, 0x1e6e0,
1510 0x1e700, 0x1e784,
1511 0x1e7c0, 0x1e7c8,
1512 0x1e808, 0x1e80c,
1513 0x1e840, 0x1e844,
1514 0x1e84c, 0x1e84c,
1515 0x1ea84, 0x1ea90,
1516 0x1eac0, 0x1eac0,
1517 0x1eae0, 0x1eae0,
1518 0x1eb00, 0x1eb84,
1519 0x1ebc0, 0x1ebc8,
1520 0x1ec08, 0x1ec0c,
1521 0x1ec40, 0x1ec44,
1522 0x1ec4c, 0x1ec4c,
1523 0x1ee84, 0x1ee90,
1524 0x1eec0, 0x1eec0,
1525 0x1eee0, 0x1eee0,
1526 0x1ef00, 0x1ef84,
1527 0x1efc0, 0x1efc8,
1528 0x1f008, 0x1f00c,
1529 0x1f040, 0x1f044,
1530 0x1f04c, 0x1f04c,
1531 0x1f284, 0x1f290,
1532 0x1f2c0, 0x1f2c0,
1533 0x1f2e0, 0x1f2e0,
1534 0x1f300, 0x1f384,
1535 0x1f3c0, 0x1f3c8,
1536 0x1f408, 0x1f40c,
1537 0x1f440, 0x1f444,
1538 0x1f44c, 0x1f44c,
1539 0x1f684, 0x1f690,
1540 0x1f6c0, 0x1f6c0,
1541 0x1f6e0, 0x1f6e0,
1542 0x1f700, 0x1f784,
1543 0x1f7c0, 0x1f7c8,
1544 0x1f808, 0x1f80c,
1545 0x1f840, 0x1f844,
1546 0x1f84c, 0x1f84c,
1547 0x1fa84, 0x1fa90,
1548 0x1fac0, 0x1fac0,
1549 0x1fae0, 0x1fae0,
1550 0x1fb00, 0x1fb84,
1551 0x1fbc0, 0x1fbc8,
1552 0x1fc08, 0x1fc0c,
1553 0x1fc40, 0x1fc44,
1554 0x1fc4c, 0x1fc4c,
1555 0x1fe84, 0x1fe90,
1556 0x1fec0, 0x1fec0,
1557 0x1fee0, 0x1fee0,
1558 0x1ff00, 0x1ff84,
1559 0x1ffc0, 0x1ffc8,
1560 0x30000, 0x30030,
1561 0x30100, 0x30168,
1562 0x30190, 0x301a0,
1563 0x301a8, 0x301b8,
1564 0x301c4, 0x301c8,
1565 0x301d0, 0x301d0,
1566 0x30200, 0x30320,
1567 0x30400, 0x304b4,
1568 0x304c0, 0x3052c,
1569 0x30540, 0x3061c,
1570 0x30800, 0x308a0,
1571 0x308c0, 0x30908,
1572 0x30910, 0x309b8,
1573 0x30a00, 0x30a04,
1574 0x30a0c, 0x30a14,
1575 0x30a1c, 0x30a2c,
1576 0x30a44, 0x30a50,
1577 0x30a74, 0x30a74,
1578 0x30a7c, 0x30afc,
1579 0x30b08, 0x30c24,
1580 0x30d00, 0x30d14,
1581 0x30d1c, 0x30d3c,
1582 0x30d44, 0x30d4c,
1583 0x30d54, 0x30d74,
1584 0x30d7c, 0x30d7c,
1585 0x30de0, 0x30de0,
1586 0x30e00, 0x30ed4,
1587 0x30f00, 0x30fa4,
1588 0x30fc0, 0x30fc4,
1589 0x31000, 0x31004,
1590 0x31080, 0x310fc,
1591 0x31208, 0x31220,
1592 0x3123c, 0x31254,
1593 0x31300, 0x31300,
1594 0x31308, 0x3131c,
1595 0x31338, 0x3133c,
1596 0x31380, 0x31380,
1597 0x31388, 0x313a8,
1598 0x313b4, 0x313b4,
1599 0x31400, 0x31420,
1600 0x31438, 0x3143c,
1601 0x31480, 0x31480,
1602 0x314a8, 0x314a8,
1603 0x314b0, 0x314b4,
1604 0x314c8, 0x314d4,
1605 0x31a40, 0x31a4c,
1606 0x31af0, 0x31b20,
1607 0x31b38, 0x31b3c,
1608 0x31b80, 0x31b80,
1609 0x31ba8, 0x31ba8,
1610 0x31bb0, 0x31bb4,
1611 0x31bc8, 0x31bd4,
1612 0x32140, 0x3218c,
1613 0x321f0, 0x321f4,
1614 0x32200, 0x32200,
1615 0x32218, 0x32218,
1616 0x32400, 0x32400,
1617 0x32408, 0x3241c,
1618 0x32618, 0x32620,
1619 0x32664, 0x32664,
1620 0x326a8, 0x326a8,
1621 0x326ec, 0x326ec,
1622 0x32a00, 0x32abc,
1623 0x32b00, 0x32b38,
1624 0x32b20, 0x32b38,
1625 0x32b40, 0x32b58,
1626 0x32b60, 0x32b78,
1627 0x32c00, 0x32c00,
1628 0x32c08, 0x32c3c,
1629 0x33000, 0x3302c,
1630 0x33034, 0x33050,
1631 0x33058, 0x33058,
1632 0x33060, 0x3308c,
1633 0x3309c, 0x330ac,
1634 0x330c0, 0x330c0,
1635 0x330c8, 0x330d0,
1636 0x330d8, 0x330e0,
1637 0x330ec, 0x3312c,
1638 0x33134, 0x33150,
1639 0x33158, 0x33158,
1640 0x33160, 0x3318c,
1641 0x3319c, 0x331ac,
1642 0x331c0, 0x331c0,
1643 0x331c8, 0x331d0,
1644 0x331d8, 0x331e0,
1645 0x331ec, 0x33290,
1646 0x33298, 0x332c4,
1647 0x332e4, 0x33390,
1648 0x33398, 0x333c4,
1649 0x333e4, 0x3342c,
1650 0x33434, 0x33450,
1651 0x33458, 0x33458,
1652 0x33460, 0x3348c,
1653 0x3349c, 0x334ac,
1654 0x334c0, 0x334c0,
1655 0x334c8, 0x334d0,
1656 0x334d8, 0x334e0,
1657 0x334ec, 0x3352c,
1658 0x33534, 0x33550,
1659 0x33558, 0x33558,
1660 0x33560, 0x3358c,
1661 0x3359c, 0x335ac,
1662 0x335c0, 0x335c0,
1663 0x335c8, 0x335d0,
1664 0x335d8, 0x335e0,
1665 0x335ec, 0x33690,
1666 0x33698, 0x336c4,
1667 0x336e4, 0x33790,
1668 0x33798, 0x337c4,
1669 0x337e4, 0x337fc,
1670 0x33814, 0x33814,
1671 0x33854, 0x33868,
1672 0x33880, 0x3388c,
1673 0x338c0, 0x338d0,
1674 0x338e8, 0x338ec,
1675 0x33900, 0x3392c,
1676 0x33934, 0x33950,
1677 0x33958, 0x33958,
1678 0x33960, 0x3398c,
1679 0x3399c, 0x339ac,
1680 0x339c0, 0x339c0,
1681 0x339c8, 0x339d0,
1682 0x339d8, 0x339e0,
1683 0x339ec, 0x33a90,
1684 0x33a98, 0x33ac4,
1685 0x33ae4, 0x33b10,
1686 0x33b24, 0x33b28,
1687 0x33b38, 0x33b50,
1688 0x33bf0, 0x33c10,
1689 0x33c24, 0x33c28,
1690 0x33c38, 0x33c50,
1691 0x33cf0, 0x33cfc,
1692 0x34000, 0x34030,
1693 0x34100, 0x34168,
1694 0x34190, 0x341a0,
1695 0x341a8, 0x341b8,
1696 0x341c4, 0x341c8,
1697 0x341d0, 0x341d0,
1698 0x34200, 0x34320,
1699 0x34400, 0x344b4,
1700 0x344c0, 0x3452c,
1701 0x34540, 0x3461c,
1702 0x34800, 0x348a0,
1703 0x348c0, 0x34908,
1704 0x34910, 0x349b8,
1705 0x34a00, 0x34a04,
1706 0x34a0c, 0x34a14,
1707 0x34a1c, 0x34a2c,
1708 0x34a44, 0x34a50,
1709 0x34a74, 0x34a74,
1710 0x34a7c, 0x34afc,
1711 0x34b08, 0x34c24,
1712 0x34d00, 0x34d14,
1713 0x34d1c, 0x34d3c,
1714 0x34d44, 0x34d4c,
1715 0x34d54, 0x34d74,
1716 0x34d7c, 0x34d7c,
1717 0x34de0, 0x34de0,
1718 0x34e00, 0x34ed4,
1719 0x34f00, 0x34fa4,
1720 0x34fc0, 0x34fc4,
1721 0x35000, 0x35004,
1722 0x35080, 0x350fc,
1723 0x35208, 0x35220,
1724 0x3523c, 0x35254,
1725 0x35300, 0x35300,
1726 0x35308, 0x3531c,
1727 0x35338, 0x3533c,
1728 0x35380, 0x35380,
1729 0x35388, 0x353a8,
1730 0x353b4, 0x353b4,
1731 0x35400, 0x35420,
1732 0x35438, 0x3543c,
1733 0x35480, 0x35480,
1734 0x354a8, 0x354a8,
1735 0x354b0, 0x354b4,
1736 0x354c8, 0x354d4,
1737 0x35a40, 0x35a4c,
1738 0x35af0, 0x35b20,
1739 0x35b38, 0x35b3c,
1740 0x35b80, 0x35b80,
1741 0x35ba8, 0x35ba8,
1742 0x35bb0, 0x35bb4,
1743 0x35bc8, 0x35bd4,
1744 0x36140, 0x3618c,
1745 0x361f0, 0x361f4,
1746 0x36200, 0x36200,
1747 0x36218, 0x36218,
1748 0x36400, 0x36400,
1749 0x36408, 0x3641c,
1750 0x36618, 0x36620,
1751 0x36664, 0x36664,
1752 0x366a8, 0x366a8,
1753 0x366ec, 0x366ec,
1754 0x36a00, 0x36abc,
1755 0x36b00, 0x36b38,
1756 0x36b20, 0x36b38,
1757 0x36b40, 0x36b58,
1758 0x36b60, 0x36b78,
1759 0x36c00, 0x36c00,
1760 0x36c08, 0x36c3c,
1761 0x37000, 0x3702c,
1762 0x37034, 0x37050,
1763 0x37058, 0x37058,
1764 0x37060, 0x3708c,
1765 0x3709c, 0x370ac,
1766 0x370c0, 0x370c0,
1767 0x370c8, 0x370d0,
1768 0x370d8, 0x370e0,
1769 0x370ec, 0x3712c,
1770 0x37134, 0x37150,
1771 0x37158, 0x37158,
1772 0x37160, 0x3718c,
1773 0x3719c, 0x371ac,
1774 0x371c0, 0x371c0,
1775 0x371c8, 0x371d0,
1776 0x371d8, 0x371e0,
1777 0x371ec, 0x37290,
1778 0x37298, 0x372c4,
1779 0x372e4, 0x37390,
1780 0x37398, 0x373c4,
1781 0x373e4, 0x3742c,
1782 0x37434, 0x37450,
1783 0x37458, 0x37458,
1784 0x37460, 0x3748c,
1785 0x3749c, 0x374ac,
1786 0x374c0, 0x374c0,
1787 0x374c8, 0x374d0,
1788 0x374d8, 0x374e0,
1789 0x374ec, 0x3752c,
1790 0x37534, 0x37550,
1791 0x37558, 0x37558,
1792 0x37560, 0x3758c,
1793 0x3759c, 0x375ac,
1794 0x375c0, 0x375c0,
1795 0x375c8, 0x375d0,
1796 0x375d8, 0x375e0,
1797 0x375ec, 0x37690,
1798 0x37698, 0x376c4,
1799 0x376e4, 0x37790,
1800 0x37798, 0x377c4,
1801 0x377e4, 0x377fc,
1802 0x37814, 0x37814,
1803 0x37854, 0x37868,
1804 0x37880, 0x3788c,
1805 0x378c0, 0x378d0,
1806 0x378e8, 0x378ec,
1807 0x37900, 0x3792c,
1808 0x37934, 0x37950,
1809 0x37958, 0x37958,
1810 0x37960, 0x3798c,
1811 0x3799c, 0x379ac,
1812 0x379c0, 0x379c0,
1813 0x379c8, 0x379d0,
1814 0x379d8, 0x379e0,
1815 0x379ec, 0x37a90,
1816 0x37a98, 0x37ac4,
1817 0x37ae4, 0x37b10,
1818 0x37b24, 0x37b28,
1819 0x37b38, 0x37b50,
1820 0x37bf0, 0x37c10,
1821 0x37c24, 0x37c28,
1822 0x37c38, 0x37c50,
1823 0x37cf0, 0x37cfc,
1824 0x40040, 0x40040,
1825 0x40080, 0x40084,
1826 0x40100, 0x40100,
1827 0x40140, 0x401bc,
1828 0x40200, 0x40214,
1829 0x40228, 0x40228,
1830 0x40240, 0x40258,
1831 0x40280, 0x40280,
1832 0x40304, 0x40304,
1833 0x40330, 0x4033c,
1834 0x41304, 0x413c8,
1835 0x413d0, 0x413dc,
1836 0x413f0, 0x413f0,
1837 0x41400, 0x4140c,
1838 0x41414, 0x4141c,
1839 0x41480, 0x414d0,
1840 0x44000, 0x4407c,
1841 0x440c0, 0x441ac,
1842 0x441b4, 0x4427c,
1843 0x442c0, 0x443ac,
1844 0x443b4, 0x4447c,
1845 0x444c0, 0x445ac,
1846 0x445b4, 0x4467c,
1847 0x446c0, 0x447ac,
1848 0x447b4, 0x4487c,
1849 0x448c0, 0x449ac,
1850 0x449b4, 0x44a7c,
1851 0x44ac0, 0x44bac,
1852 0x44bb4, 0x44c7c,
1853 0x44cc0, 0x44dac,
1854 0x44db4, 0x44e7c,
1855 0x44ec0, 0x44fac,
1856 0x44fb4, 0x4507c,
1857 0x450c0, 0x451ac,
1858 0x451b4, 0x451fc,
1859 0x45800, 0x45804,
1860 0x45810, 0x45830,
1861 0x45840, 0x45860,
1862 0x45868, 0x45868,
1863 0x45880, 0x45884,
1864 0x458a0, 0x458b0,
1865 0x45a00, 0x45a04,
1866 0x45a10, 0x45a30,
1867 0x45a40, 0x45a60,
1868 0x45a68, 0x45a68,
1869 0x45a80, 0x45a84,
1870 0x45aa0, 0x45ab0,
1871 0x460c0, 0x460e4,
1872 0x47000, 0x4703c,
1873 0x47044, 0x4708c,
1874 0x47200, 0x47250,
1875 0x47400, 0x47408,
1876 0x47414, 0x47420,
1877 0x47600, 0x47618,
1878 0x47800, 0x47814,
1879 0x47820, 0x4782c,
1880 0x50000, 0x50084,
1881 0x50090, 0x500cc,
1882 0x50300, 0x50384,
1883 0x50400, 0x50400,
1884 0x50800, 0x50884,
1885 0x50890, 0x508cc,
1886 0x50b00, 0x50b84,
1887 0x50c00, 0x50c00,
1888 0x51000, 0x51020,
1889 0x51028, 0x510b0,
1890 0x51300, 0x51324,
1891 };
1892
1893 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1894 const unsigned int *reg_ranges;
1895 int reg_ranges_size, range;
1896 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1897
1898 /* Select the right set of register ranges to dump depending on the
1899 * adapter chip type.
1900 */
1901 switch (chip_version) {
1902 case CHELSIO_T5:
1903 reg_ranges = t5_reg_ranges;
1904 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1905 break;
1906
1907 case CHELSIO_T6:
1908 reg_ranges = t6_reg_ranges;
1909 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1910 break;
1911
1912 default:
1913 dev_err(adap,
1914 "Unsupported chip version %d\n", chip_version);
1915 return;
1916 }
1917
1918 /* Clear the register buffer and insert the appropriate register
1919 * values selected by the above register ranges.
1920 */
1921 memset(buf, 0, buf_size);
1922 for (range = 0; range < reg_ranges_size; range += 2) {
1923 unsigned int reg = reg_ranges[range];
1924 unsigned int last_reg = reg_ranges[range + 1];
1925 u32 *bufp = (u32 *)((char *)buf + reg);
1926
1927 /* Iterate across the register range filling in the register
1928 * buffer but don't write past the end of the register buffer.
1929 */
1930 while (reg <= last_reg && bufp < buf_end) {
1931 *bufp++ = t4_read_reg(adap, reg);
1932 reg += sizeof(u32);
1933 }
1934 }
1935 }
1936
1937 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1938 #define EEPROM_DELAY 10 /* 10us per poll spin */
1939 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
1940
1941 #define EEPROM_STAT_ADDR 0x7bfc
1942
1943 /**
1944 * Small utility function to wait till any outstanding VPD Access is complete.
1945 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1946 * VPD Access in flight. This allows us to handle the problem of having a
1947 * previous VPD Access time out and prevent an attempt to inject a new VPD
1948 * Request before any in-flight VPD request has completed.
1949 */
t4_seeprom_wait(struct adapter * adapter)1950 static int t4_seeprom_wait(struct adapter *adapter)
1951 {
1952 unsigned int base = adapter->params.pci.vpd_cap_addr;
1953 int max_poll;
1954
1955 /* If no VPD Access is in flight, we can just return success right
1956 * away.
1957 */
1958 if (!adapter->vpd_busy)
1959 return 0;
1960
1961 /* Poll the VPD Capability Address/Flag register waiting for it
1962 * to indicate that the operation is complete.
1963 */
1964 max_poll = EEPROM_MAX_POLL;
1965 do {
1966 u16 val;
1967
1968 udelay(EEPROM_DELAY);
1969 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
1970
1971 /* If the operation is complete, mark the VPD as no longer
1972 * busy and return success.
1973 */
1974 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
1975 adapter->vpd_busy = 0;
1976 return 0;
1977 }
1978 } while (--max_poll);
1979
1980 /* Failure! Note that we leave the VPD Busy status set in order to
1981 * avoid pushing a new VPD Access request into the VPD Capability till
1982 * the current operation eventually succeeds. It's a bug to issue a
1983 * new request when an existing request is in flight and will result
1984 * in corrupt hardware state.
1985 */
1986 return -ETIMEDOUT;
1987 }
1988
1989 /**
1990 * t4_seeprom_read - read a serial EEPROM location
1991 * @adapter: adapter to read
1992 * @addr: EEPROM virtual address
1993 * @data: where to store the read data
1994 *
1995 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
1996 * VPD capability. Note that this function must be called with a virtual
1997 * address.
1998 */
t4_seeprom_read(struct adapter * adapter,u32 addr,u32 * data)1999 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2000 {
2001 unsigned int base = adapter->params.pci.vpd_cap_addr;
2002 int ret;
2003
2004 /* VPD Accesses must alway be 4-byte aligned!
2005 */
2006 if (addr >= EEPROMVSIZE || (addr & 3))
2007 return -EINVAL;
2008
2009 /* Wait for any previous operation which may still be in flight to
2010 * complete.
2011 */
2012 ret = t4_seeprom_wait(adapter);
2013 if (ret) {
2014 dev_err(adapter, "VPD still busy from previous operation\n");
2015 return ret;
2016 }
2017
2018 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2019 * for our request to complete. If it doesn't complete, note the
2020 * error and return it to our caller. Note that we do not reset the
2021 * VPD Busy status!
2022 */
2023 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2024 adapter->vpd_busy = 1;
2025 adapter->vpd_flag = PCI_VPD_ADDR_F;
2026 ret = t4_seeprom_wait(adapter);
2027 if (ret) {
2028 dev_err(adapter, "VPD read of address %#x failed\n", addr);
2029 return ret;
2030 }
2031
2032 /* Grab the returned data, swizzle it into our endianness and
2033 * return success.
2034 */
2035 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2036 *data = le32_to_cpu(*data);
2037 return 0;
2038 }
2039
2040 /**
2041 * t4_seeprom_write - write a serial EEPROM location
2042 * @adapter: adapter to write
2043 * @addr: virtual EEPROM address
2044 * @data: value to write
2045 *
2046 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2047 * VPD capability. Note that this function must be called with a virtual
2048 * address.
2049 */
t4_seeprom_write(struct adapter * adapter,u32 addr,u32 data)2050 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2051 {
2052 unsigned int base = adapter->params.pci.vpd_cap_addr;
2053 int ret;
2054 u32 stats_reg = 0;
2055 int max_poll;
2056
2057 /* VPD Accesses must alway be 4-byte aligned!
2058 */
2059 if (addr >= EEPROMVSIZE || (addr & 3))
2060 return -EINVAL;
2061
2062 /* Wait for any previous operation which may still be in flight to
2063 * complete.
2064 */
2065 ret = t4_seeprom_wait(adapter);
2066 if (ret) {
2067 dev_err(adapter, "VPD still busy from previous operation\n");
2068 return ret;
2069 }
2070
2071 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2072 * for our request to complete. If it doesn't complete, note the
2073 * error and return it to our caller. Note that we do not reset the
2074 * VPD Busy status!
2075 */
2076 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2077 cpu_to_le32(data));
2078 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2079 (u16)addr | PCI_VPD_ADDR_F);
2080 adapter->vpd_busy = 1;
2081 adapter->vpd_flag = 0;
2082 ret = t4_seeprom_wait(adapter);
2083 if (ret) {
2084 dev_err(adapter, "VPD write of address %#x failed\n", addr);
2085 return ret;
2086 }
2087
2088 /* Reset PCI_VPD_DATA register after a transaction and wait for our
2089 * request to complete. If it doesn't complete, return error.
2090 */
2091 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2092 max_poll = EEPROM_MAX_POLL;
2093 do {
2094 udelay(EEPROM_DELAY);
2095 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2096 } while ((stats_reg & 0x1) && --max_poll);
2097 if (!max_poll)
2098 return -ETIMEDOUT;
2099
2100 /* Return success! */
2101 return 0;
2102 }
2103
2104 /**
2105 * t4_seeprom_wp - enable/disable EEPROM write protection
2106 * @adapter: the adapter
2107 * @enable: whether to enable or disable write protection
2108 *
2109 * Enables or disables write protection on the serial EEPROM.
2110 */
t4_seeprom_wp(struct adapter * adapter,int enable)2111 int t4_seeprom_wp(struct adapter *adapter, int enable)
2112 {
2113 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2114 }
2115
2116 /**
2117 * t4_fw_tp_pio_rw - Access TP PIO through LDST
2118 * @adap: the adapter
2119 * @vals: where the indirect register values are stored/written
2120 * @nregs: how many indirect registers to read/write
2121 * @start_idx: index of first indirect register to read/write
2122 * @rw: Read (1) or Write (0)
2123 *
2124 * Access TP PIO registers through LDST
2125 */
t4_fw_tp_pio_rw(struct adapter * adap,u32 * vals,unsigned int nregs,unsigned int start_index,unsigned int rw)2126 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
2127 unsigned int start_index, unsigned int rw)
2128 {
2129 int cmd = FW_LDST_ADDRSPC_TP_PIO;
2130 struct fw_ldst_cmd c;
2131 unsigned int i;
2132 int ret;
2133
2134 for (i = 0 ; i < nregs; i++) {
2135 memset(&c, 0, sizeof(c));
2136 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
2137 F_FW_CMD_REQUEST |
2138 (rw ? F_FW_CMD_READ :
2139 F_FW_CMD_WRITE) |
2140 V_FW_LDST_CMD_ADDRSPACE(cmd));
2141 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
2142
2143 c.u.addrval.addr = cpu_to_be32(start_index + i);
2144 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
2145 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2146 if (ret == 0) {
2147 if (rw)
2148 vals[i] = be32_to_cpu(c.u.addrval.val);
2149 }
2150 }
2151 }
2152
2153 /**
2154 * t4_read_rss_key - read the global RSS key
2155 * @adap: the adapter
2156 * @key: 10-entry array holding the 320-bit RSS key
2157 *
2158 * Reads the global 320-bit RSS key.
2159 */
t4_read_rss_key(struct adapter * adap,u32 * key)2160 void t4_read_rss_key(struct adapter *adap, u32 *key)
2161 {
2162 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
2163 }
2164
2165 /**
2166 * t4_write_rss_key - program one of the RSS keys
2167 * @adap: the adapter
2168 * @key: 10-entry array holding the 320-bit RSS key
2169 * @idx: which RSS key to write
2170 *
2171 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2172 * 0..15 the corresponding entry in the RSS key table is written,
2173 * otherwise the global RSS key is written.
2174 */
t4_write_rss_key(struct adapter * adap,u32 * key,int idx)2175 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
2176 {
2177 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
2178 u8 rss_key_addr_cnt = 16;
2179
2180 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
2181 * allows access to key addresses 16-63 by using KeyWrAddrX
2182 * as index[5:4](upper 2) into key table
2183 */
2184 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
2185 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
2186 rss_key_addr_cnt = 32;
2187
2188 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
2189
2190 if (idx >= 0 && idx < rss_key_addr_cnt) {
2191 if (rss_key_addr_cnt > 16)
2192 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2193 V_KEYWRADDRX(idx >> 4) |
2194 V_T6_VFWRADDR(idx) | F_KEYWREN);
2195 else
2196 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2197 V_KEYWRADDR(idx) | F_KEYWREN);
2198 }
2199 }
2200
2201 /**
2202 * t4_config_rss_range - configure a portion of the RSS mapping table
2203 * @adapter: the adapter
2204 * @mbox: mbox to use for the FW command
2205 * @viid: virtual interface whose RSS subtable is to be written
2206 * @start: start entry in the table to write
2207 * @n: how many table entries to write
2208 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2209 * @nrspq: number of values in @rspq
2210 *
2211 * Programs the selected part of the VI's RSS mapping table with the
2212 * provided values. If @nrspq < @n the supplied values are used repeatedly
2213 * until the full table range is populated.
2214 *
2215 * The caller must ensure the values in @rspq are in the range allowed for
2216 * @viid.
2217 */
t4_config_rss_range(struct adapter * adapter,int mbox,unsigned int viid,int start,int n,const u16 * rspq,unsigned int nrspq)2218 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2219 int start, int n, const u16 *rspq, unsigned int nrspq)
2220 {
2221 int ret;
2222 const u16 *rsp = rspq;
2223 const u16 *rsp_end = rspq + nrspq;
2224 struct fw_rss_ind_tbl_cmd cmd;
2225
2226 memset(&cmd, 0, sizeof(cmd));
2227 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2228 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2229 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2230 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2231
2232 /*
2233 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2234 * Queue Identifiers. These Ingress Queue IDs are packed three to
2235 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2236 * reserved.
2237 */
2238 while (n > 0) {
2239 int nq = min(n, 32);
2240 int nq_packed = 0;
2241 __be32 *qp = &cmd.iq0_to_iq2;
2242
2243 /*
2244 * Set up the firmware RSS command header to send the next
2245 * "nq" Ingress Queue IDs to the firmware.
2246 */
2247 cmd.niqid = cpu_to_be16(nq);
2248 cmd.startidx = cpu_to_be16(start);
2249
2250 /*
2251 * "nq" more done for the start of the next loop.
2252 */
2253 start += nq;
2254 n -= nq;
2255
2256 /*
2257 * While there are still Ingress Queue IDs to stuff into the
2258 * current firmware RSS command, retrieve them from the
2259 * Ingress Queue ID array and insert them into the command.
2260 */
2261 while (nq > 0) {
2262 /*
2263 * Grab up to the next 3 Ingress Queue IDs (wrapping
2264 * around the Ingress Queue ID array if necessary) and
2265 * insert them into the firmware RSS command at the
2266 * current 3-tuple position within the commad.
2267 */
2268 u16 qbuf[3];
2269 u16 *qbp = qbuf;
2270 int nqbuf = min(3, nq);
2271
2272 nq -= nqbuf;
2273 qbuf[0] = 0;
2274 qbuf[1] = 0;
2275 qbuf[2] = 0;
2276 while (nqbuf && nq_packed < 32) {
2277 nqbuf--;
2278 nq_packed++;
2279 *qbp++ = *rsp++;
2280 if (rsp >= rsp_end)
2281 rsp = rspq;
2282 }
2283 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2284 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2285 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2286 }
2287
2288 /*
2289 * Send this portion of the RRS table update to the firmware;
2290 * bail out on any errors.
2291 */
2292 if (is_pf4(adapter))
2293 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
2294 NULL);
2295 else
2296 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
2297 if (ret)
2298 return ret;
2299 }
2300
2301 return 0;
2302 }
2303
2304 /**
2305 * t4_config_vi_rss - configure per VI RSS settings
2306 * @adapter: the adapter
2307 * @mbox: mbox to use for the FW command
2308 * @viid: the VI id
2309 * @flags: RSS flags
2310 * @defq: id of the default RSS queue for the VI.
2311 *
2312 * Configures VI-specific RSS properties.
2313 */
t4_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,unsigned int flags,unsigned int defq)2314 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2315 unsigned int flags, unsigned int defq)
2316 {
2317 struct fw_rss_vi_config_cmd c;
2318
2319 memset(&c, 0, sizeof(c));
2320 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2321 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2322 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2323 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2324 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
2325 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2326 if (is_pf4(adapter))
2327 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2328 else
2329 return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
2330 }
2331
2332 /**
2333 * t4_read_config_vi_rss - read the configured per VI RSS settings
2334 * @adapter: the adapter
2335 * @mbox: mbox to use for the FW command
2336 * @viid: the VI id
2337 * @flags: where to place the configured flags
2338 * @defq: where to place the id of the default RSS queue for the VI.
2339 *
2340 * Read configured VI-specific RSS properties.
2341 */
t4_read_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,u64 * flags,unsigned int * defq)2342 int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2343 u64 *flags, unsigned int *defq)
2344 {
2345 struct fw_rss_vi_config_cmd c;
2346 unsigned int result;
2347 int ret;
2348
2349 memset(&c, 0, sizeof(c));
2350 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2351 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2352 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2353 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2354 ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
2355 if (!ret) {
2356 result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
2357 if (defq)
2358 *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
2359 if (flags)
2360 *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
2361 }
2362
2363 return ret;
2364 }
2365
2366 /**
2367 * init_cong_ctrl - initialize congestion control parameters
2368 * @a: the alpha values for congestion control
2369 * @b: the beta values for congestion control
2370 *
2371 * Initialize the congestion control parameters.
2372 */
init_cong_ctrl(unsigned short * a,unsigned short * b)2373 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2374 {
2375 int i;
2376
2377 for (i = 0; i < 9; i++) {
2378 a[i] = 1;
2379 b[i] = 0;
2380 }
2381
2382 a[9] = 2;
2383 a[10] = 3;
2384 a[11] = 4;
2385 a[12] = 5;
2386 a[13] = 6;
2387 a[14] = 7;
2388 a[15] = 8;
2389 a[16] = 9;
2390 a[17] = 10;
2391 a[18] = 14;
2392 a[19] = 17;
2393 a[20] = 21;
2394 a[21] = 25;
2395 a[22] = 30;
2396 a[23] = 35;
2397 a[24] = 45;
2398 a[25] = 60;
2399 a[26] = 80;
2400 a[27] = 100;
2401 a[28] = 200;
2402 a[29] = 300;
2403 a[30] = 400;
2404 a[31] = 500;
2405
2406 b[9] = 1;
2407 b[10] = 1;
2408 b[11] = 2;
2409 b[12] = 2;
2410 b[13] = 3;
2411 b[14] = 3;
2412 b[15] = 3;
2413 b[16] = 3;
2414 b[17] = 4;
2415 b[18] = 4;
2416 b[19] = 4;
2417 b[20] = 4;
2418 b[21] = 4;
2419 b[22] = 5;
2420 b[23] = 5;
2421 b[24] = 5;
2422 b[25] = 5;
2423 b[26] = 5;
2424 b[27] = 5;
2425 b[28] = 6;
2426 b[29] = 6;
2427 b[30] = 7;
2428 b[31] = 7;
2429 }
2430
2431 #define INIT_CMD(var, cmd, rd_wr) do { \
2432 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
2433 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
2434 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
2435 } while (0)
2436
t4_get_core_clock(struct adapter * adapter,struct vpd_params * p)2437 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
2438 {
2439 u32 cclk_param, cclk_val;
2440 int ret;
2441
2442 /*
2443 * Ask firmware for the Core Clock since it knows how to translate the
2444 * Reference Clock ('V2') VPD field into a Core Clock value ...
2445 */
2446 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2447 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
2448 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2449 1, &cclk_param, &cclk_val);
2450 if (ret) {
2451 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
2452 __func__, ret);
2453 return ret;
2454 }
2455
2456 p->cclk = cclk_val;
2457 dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
2458 return 0;
2459 }
2460
2461 /**
2462 * t4_get_pfres - retrieve VF resource limits
2463 * @adapter: the adapter
2464 *
2465 * Retrieves configured resource limits and capabilities for a physical
2466 * function. The results are stored in @adapter->pfres.
2467 */
t4_get_pfres(struct adapter * adapter)2468 int t4_get_pfres(struct adapter *adapter)
2469 {
2470 struct pf_resources *pfres = &adapter->params.pfres;
2471 struct fw_pfvf_cmd cmd, rpl;
2472 u32 word;
2473 int v;
2474
2475 /*
2476 * Execute PFVF Read command to get VF resource limits; bail out early
2477 * with error on command failure.
2478 */
2479 memset(&cmd, 0, sizeof(cmd));
2480 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
2481 F_FW_CMD_REQUEST |
2482 F_FW_CMD_READ |
2483 V_FW_PFVF_CMD_PFN(adapter->pf) |
2484 V_FW_PFVF_CMD_VFN(0));
2485 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2486 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2487 if (v != FW_SUCCESS)
2488 return v;
2489
2490 /*
2491 * Extract PF resource limits and return success.
2492 */
2493 word = be32_to_cpu(rpl.niqflint_niq);
2494 pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
2495
2496 word = be32_to_cpu(rpl.type_to_neq);
2497 pfres->neq = G_FW_PFVF_CMD_NEQ(word);
2498
2499 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2500 pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
2501
2502 return 0;
2503 }
2504
2505 /* serial flash and firmware constants and flash config file constants */
2506 enum {
2507 SF_ATTEMPTS = 10, /* max retries for SF operations */
2508
2509 /* flash command opcodes */
2510 SF_PROG_PAGE = 2, /* program page */
2511 SF_WR_DISABLE = 4, /* disable writes */
2512 SF_RD_STATUS = 5, /* read status register */
2513 SF_WR_ENABLE = 6, /* enable writes */
2514 SF_RD_DATA_FAST = 0xb, /* read flash */
2515 SF_RD_ID = 0x9f, /* read ID */
2516 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2517 };
2518
2519 /**
2520 * sf1_read - read data from the serial flash
2521 * @adapter: the adapter
2522 * @byte_cnt: number of bytes to read
2523 * @cont: whether another operation will be chained
2524 * @lock: whether to lock SF for PL access only
2525 * @valp: where to store the read data
2526 *
2527 * Reads up to 4 bytes of data from the serial flash. The location of
2528 * the read needs to be specified prior to calling this by issuing the
2529 * appropriate commands to the serial flash.
2530 */
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 * valp)2531 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2532 int lock, u32 *valp)
2533 {
2534 int ret;
2535
2536 if (!byte_cnt || byte_cnt > 4)
2537 return -EINVAL;
2538 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2539 return -EBUSY;
2540 t4_write_reg(adapter, A_SF_OP,
2541 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2542 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2543 if (!ret)
2544 *valp = t4_read_reg(adapter, A_SF_DATA);
2545 return ret;
2546 }
2547
2548 /**
2549 * sf1_write - write data to the serial flash
2550 * @adapter: the adapter
2551 * @byte_cnt: number of bytes to write
2552 * @cont: whether another operation will be chained
2553 * @lock: whether to lock SF for PL access only
2554 * @val: value to write
2555 *
2556 * Writes up to 4 bytes of data to the serial flash. The location of
2557 * the write needs to be specified prior to calling this by issuing the
2558 * appropriate commands to the serial flash.
2559 */
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 val)2560 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2561 int lock, u32 val)
2562 {
2563 if (!byte_cnt || byte_cnt > 4)
2564 return -EINVAL;
2565 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2566 return -EBUSY;
2567 t4_write_reg(adapter, A_SF_DATA, val);
2568 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
2569 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
2570 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2571 }
2572
2573 /**
2574 * t4_read_flash - read words from serial flash
2575 * @adapter: the adapter
2576 * @addr: the start address for the read
2577 * @nwords: how many 32-bit words to read
2578 * @data: where to store the read data
2579 * @byte_oriented: whether to store data as bytes or as words
2580 *
2581 * Read the specified number of 32-bit words from the serial flash.
2582 * If @byte_oriented is set the read data is stored as a byte array
2583 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2584 * natural endianness.
2585 */
t4_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)2586 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2587 unsigned int nwords, u32 *data, int byte_oriented)
2588 {
2589 int ret;
2590
2591 if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
2592 (addr & 3))
2593 return -EINVAL;
2594
2595 addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
2596
2597 ret = sf1_write(adapter, 4, 1, 0, addr);
2598 if (ret != 0)
2599 return ret;
2600
2601 ret = sf1_read(adapter, 1, 1, 0, data);
2602 if (ret != 0)
2603 return ret;
2604
2605 for ( ; nwords; nwords--, data++) {
2606 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2607 if (nwords == 1)
2608 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
2609 if (ret)
2610 return ret;
2611 if (byte_oriented)
2612 *data = cpu_to_be32(*data);
2613 }
2614 return 0;
2615 }
2616
2617 /**
2618 * t4_get_exprom_version - return the Expansion ROM version (if any)
2619 * @adapter: the adapter
2620 * @vers: where to place the version
2621 *
2622 * Reads the Expansion ROM header from FLASH and returns the version
2623 * number (if present) through the @vers return value pointer. We return
2624 * this in the Firmware Version Format since it's convenient. Return
2625 * 0 on success, -ENOENT if no Expansion ROM is present.
2626 */
t4_get_exprom_version(struct adapter * adapter,u32 * vers)2627 static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
2628 {
2629 struct exprom_header {
2630 unsigned char hdr_arr[16]; /* must start with 0x55aa */
2631 unsigned char hdr_ver[4]; /* Expansion ROM version */
2632 } *hdr;
2633 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2634 sizeof(u32))];
2635 int ret;
2636
2637 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
2638 ARRAY_SIZE(exprom_header_buf),
2639 exprom_header_buf, 0);
2640 if (ret)
2641 return ret;
2642
2643 hdr = (struct exprom_header *)exprom_header_buf;
2644 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2645 return -ENOENT;
2646
2647 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
2648 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
2649 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
2650 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
2651 return 0;
2652 }
2653
2654 /**
2655 * t4_get_fw_version - read the firmware version
2656 * @adapter: the adapter
2657 * @vers: where to place the version
2658 *
2659 * Reads the FW version from flash.
2660 */
t4_get_fw_version(struct adapter * adapter,u32 * vers)2661 static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2662 {
2663 return t4_read_flash(adapter, FLASH_FW_START +
2664 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
2665 }
2666
2667 /**
2668 * t4_get_bs_version - read the firmware bootstrap version
2669 * @adapter: the adapter
2670 * @vers: where to place the version
2671 *
2672 * Reads the FW Bootstrap version from flash.
2673 */
t4_get_bs_version(struct adapter * adapter,u32 * vers)2674 static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
2675 {
2676 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
2677 offsetof(struct fw_hdr, fw_ver), 1,
2678 vers, 0);
2679 }
2680
2681 /**
2682 * t4_get_tp_version - read the TP microcode version
2683 * @adapter: the adapter
2684 * @vers: where to place the version
2685 *
2686 * Reads the TP microcode version from flash.
2687 */
t4_get_tp_version(struct adapter * adapter,u32 * vers)2688 static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2689 {
2690 return t4_read_flash(adapter, FLASH_FW_START +
2691 offsetof(struct fw_hdr, tp_microcode_ver),
2692 1, vers, 0);
2693 }
2694
2695 /**
2696 * t4_get_version_info - extract various chip/firmware version information
2697 * @adapter: the adapter
2698 *
2699 * Reads various chip/firmware version numbers and stores them into the
2700 * adapter Adapter Parameters structure. If any of the efforts fails
2701 * the first failure will be returned, but all of the version numbers
2702 * will be read.
2703 */
t4_get_version_info(struct adapter * adapter)2704 int t4_get_version_info(struct adapter *adapter)
2705 {
2706 int ret = 0;
2707
2708 #define FIRST_RET(__getvinfo) \
2709 do { \
2710 int __ret = __getvinfo; \
2711 if (__ret && !ret) \
2712 ret = __ret; \
2713 } while (0)
2714
2715 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
2716 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
2717 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
2718 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
2719
2720 #undef FIRST_RET
2721
2722 return ret;
2723 }
2724
2725 /**
2726 * t4_dump_version_info - dump all of the adapter configuration IDs
2727 * @adapter: the adapter
2728 *
2729 * Dumps all of the various bits of adapter configuration version/revision
2730 * IDs information. This is typically called at some point after
2731 * t4_get_version_info() has been called.
2732 */
t4_dump_version_info(struct adapter * adapter)2733 void t4_dump_version_info(struct adapter *adapter)
2734 {
2735 /**
2736 * Device information.
2737 */
2738 dev_info(adapter, "Chelsio rev %d\n",
2739 CHELSIO_CHIP_RELEASE(adapter->params.chip));
2740
2741 /**
2742 * Firmware Version.
2743 */
2744 if (!adapter->params.fw_vers)
2745 dev_warn(adapter, "No firmware loaded\n");
2746 else
2747 dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
2748 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
2749 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
2750 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
2751 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
2752
2753 /**
2754 * Bootstrap Firmware Version.
2755 */
2756 if (!adapter->params.bs_vers)
2757 dev_warn(adapter, "No bootstrap loaded\n");
2758 else
2759 dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
2760 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
2761 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
2762 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
2763 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
2764
2765 /**
2766 * TP Microcode Version.
2767 */
2768 if (!adapter->params.tp_vers)
2769 dev_warn(adapter, "No TP Microcode loaded\n");
2770 else
2771 dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
2772 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
2773 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
2774 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
2775 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
2776
2777 /**
2778 * Expansion ROM version.
2779 */
2780 if (!adapter->params.er_vers)
2781 dev_info(adapter, "No Expansion ROM loaded\n");
2782 else
2783 dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
2784 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
2785 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
2786 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
2787 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
2788 }
2789
2790 /**
2791 * t4_link_l1cfg_core - apply link configuration to MAC/PHY
2792 * @pi: the port info
2793 * @caps: link capabilities to configure
2794 * @sleep_ok: if true we may sleep while awaiting command completion
2795 *
2796 * Set up a port's MAC and PHY according to a desired link configuration.
2797 * - If the PHY can auto-negotiate first decide what to advertise, then
2798 * enable/disable auto-negotiation as desired, and reset.
2799 * - If the PHY does not auto-negotiate just reset it.
2800 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2801 * otherwise do it later based on the outcome of auto-negotiation.
2802 */
t4_link_l1cfg_core(struct port_info * pi,u32 caps,u8 sleep_ok)2803 int t4_link_l1cfg_core(struct port_info *pi, u32 caps, u8 sleep_ok)
2804 {
2805 struct link_config *lc = &pi->link_cfg;
2806 struct adapter *adap = pi->adapter;
2807 struct fw_port_cmd cmd;
2808 int ret;
2809
2810 memset(&cmd, 0, sizeof(cmd));
2811 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2812 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2813 V_FW_PORT_CMD_PORTID(pi->port_id));
2814 cmd.action_to_len16 =
2815 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
2816 FW_LEN16(cmd));
2817
2818 cmd.u.l1cfg32.rcap32 = cpu_to_be32(caps);
2819
2820 if (sleep_ok)
2821 ret = t4_wr_mbox(adap, adap->mbox, &cmd, sizeof(cmd), NULL);
2822 else
2823 ret = t4_wr_mbox_ns(adap, adap->mbox, &cmd, sizeof(cmd), NULL);
2824
2825 if (ret == FW_SUCCESS)
2826 lc->link_caps = caps;
2827 else
2828 dev_err(adap,
2829 "Requested Port Capabilities %#x rejected, error %d\n",
2830 caps, ret);
2831
2832 return ret;
2833 }
2834
2835 /**
2836 * t4_flash_cfg_addr - return the address of the flash configuration file
2837 * @adapter: the adapter
2838 *
2839 * Return the address within the flash where the Firmware Configuration
2840 * File is stored, or an error if the device FLASH is too small to contain
2841 * a Firmware Configuration File.
2842 */
t4_flash_cfg_addr(struct adapter * adapter)2843 int t4_flash_cfg_addr(struct adapter *adapter)
2844 {
2845 /*
2846 * If the device FLASH isn't large enough to hold a Firmware
2847 * Configuration File, return an error.
2848 */
2849 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
2850 return -ENOSPC;
2851
2852 return FLASH_CFG_START;
2853 }
2854
2855 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2856
2857 /**
2858 * t4_intr_enable - enable interrupts
2859 * @adapter: the adapter whose interrupts should be enabled
2860 *
2861 * Enable PF-specific interrupts for the calling function and the top-level
2862 * interrupt concentrator for global interrupts. Interrupts are already
2863 * enabled at each module, here we just enable the roots of the interrupt
2864 * hierarchies.
2865 *
2866 * Note: this function should be called only when the driver manages
2867 * non PF-specific interrupts from the various HW modules. Only one PCI
2868 * function at a time should be doing this.
2869 */
t4_intr_enable(struct adapter * adapter)2870 void t4_intr_enable(struct adapter *adapter)
2871 {
2872 u32 val = 0;
2873 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2874 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
2875 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2876
2877 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2878 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
2879 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2880 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2881 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
2882 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2883 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2884 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2885 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
2886 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2887 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2888 }
2889
2890 /**
2891 * t4_intr_disable - disable interrupts
2892 * @adapter: the adapter whose interrupts should be disabled
2893 *
2894 * Disable interrupts. We only disable the top-level interrupt
2895 * concentrators. The caller must be a PCI function managing global
2896 * interrupts.
2897 */
t4_intr_disable(struct adapter * adapter)2898 void t4_intr_disable(struct adapter *adapter)
2899 {
2900 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2901 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
2902 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2903
2904 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2905 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2906 }
2907
2908 /**
2909 * t4_get_port_type_description - return Port Type string description
2910 * @port_type: firmware Port Type enumeration
2911 */
t4_get_port_type_description(enum fw_port_type port_type)2912 const char *t4_get_port_type_description(enum fw_port_type port_type)
2913 {
2914 static const char * const port_type_description[] = {
2915 "Fiber_XFI",
2916 "Fiber_XAUI",
2917 "BT_SGMII",
2918 "BT_XFI",
2919 "BT_XAUI",
2920 "KX4",
2921 "CX4",
2922 "KX",
2923 "KR",
2924 "SFP",
2925 "BP_AP",
2926 "BP4_AP",
2927 "QSFP_10G",
2928 "QSA",
2929 "QSFP",
2930 "BP40_BA",
2931 "KR4_100G",
2932 "CR4_QSFP",
2933 "CR_QSFP",
2934 "CR2_QSFP",
2935 "SFP28",
2936 "KR_SFP28",
2937 };
2938
2939 if (port_type < ARRAY_SIZE(port_type_description))
2940 return port_type_description[port_type];
2941 return "UNKNOWN";
2942 }
2943
2944 /**
2945 * t4_get_mps_bg_map - return the buffer groups associated with a port
2946 * @adap: the adapter
2947 * @pidx: the port index
2948 *
2949 * Returns a bitmap indicating which MPS buffer groups are associated
2950 * with the given port. Bit i is set if buffer group i is used by the
2951 * port.
2952 */
t4_get_mps_bg_map(struct adapter * adap,unsigned int pidx)2953 unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
2954 {
2955 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2956 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
2957 A_MPS_CMN_CTL));
2958
2959 if (pidx >= nports) {
2960 dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
2961 pidx, nports);
2962 return 0;
2963 }
2964
2965 switch (chip_version) {
2966 case CHELSIO_T4:
2967 case CHELSIO_T5:
2968 switch (nports) {
2969 case 1: return 0xf;
2970 case 2: return 3 << (2 * pidx);
2971 case 4: return 1 << pidx;
2972 }
2973 break;
2974
2975 case CHELSIO_T6:
2976 switch (nports) {
2977 case 2: return 1 << (2 * pidx);
2978 }
2979 break;
2980 }
2981
2982 dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
2983 chip_version, nports);
2984 return 0;
2985 }
2986
2987 /**
2988 * t4_get_tp_ch_map - return TP ingress channels associated with a port
2989 * @adapter: the adapter
2990 * @pidx: the port index
2991 *
2992 * Returns a bitmap indicating which TP Ingress Channels are associated with
2993 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
2994 */
t4_get_tp_ch_map(struct adapter * adapter,unsigned int pidx)2995 unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
2996 {
2997 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
2998 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
2999 A_MPS_CMN_CTL));
3000
3001 if (pidx >= nports) {
3002 dev_warn(adap, "TP Port Index %d >= Nports %d\n",
3003 pidx, nports);
3004 return 0;
3005 }
3006
3007 switch (chip_version) {
3008 case CHELSIO_T4:
3009 case CHELSIO_T5:
3010 /* Note that this happens to be the same values as the MPS
3011 * Buffer Group Map for these Chips. But we replicate the code
3012 * here because they're really separate concepts.
3013 */
3014 switch (nports) {
3015 case 1: return 0xf;
3016 case 2: return 3 << (2 * pidx);
3017 case 4: return 1 << pidx;
3018 }
3019 break;
3020
3021 case CHELSIO_T6:
3022 switch (nports) {
3023 case 2: return 1 << pidx;
3024 }
3025 break;
3026 }
3027
3028 dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
3029 chip_version, nports);
3030 return 0;
3031 }
3032
3033 /**
3034 * t4_get_port_stats - collect port statistics
3035 * @adap: the adapter
3036 * @idx: the port index
3037 * @p: the stats structure to fill
3038 *
3039 * Collect statistics related to the given port from HW.
3040 */
t4_get_port_stats(struct adapter * adap,int idx,struct port_stats * p)3041 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3042 {
3043 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3044 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
3045
3046 #define GET_STAT(name) \
3047 t4_read_reg64(adap, \
3048 (is_t4(adap->params.chip) ? \
3049 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
3050 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3051 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3052
3053 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3054 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3055 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3056 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3057 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3058 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3059 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3060 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3061 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3062 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3063 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3064 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3065 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3066 p->tx_drop = GET_STAT(TX_PORT_DROP);
3067 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3068 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3069 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3070 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3071 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3072 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3073 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3074 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3075 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3076
3077 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3078 if (stat_ctl & F_COUNTPAUSESTATTX) {
3079 p->tx_frames -= p->tx_pause;
3080 p->tx_octets -= p->tx_pause * 64;
3081 }
3082 if (stat_ctl & F_COUNTPAUSEMCTX)
3083 p->tx_mcast_frames -= p->tx_pause;
3084 }
3085
3086 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3087 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3088 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3089 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3090 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3091 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3092 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3093 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3094 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3095 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3096 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3097 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3098 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3099 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3100 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3101 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3102 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3103 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3104 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3105 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3106 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3107 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3108 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3109 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3110 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3111 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3112 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3113
3114 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3115 if (stat_ctl & F_COUNTPAUSESTATRX) {
3116 p->rx_frames -= p->rx_pause;
3117 p->rx_octets -= p->rx_pause * 64;
3118 }
3119 if (stat_ctl & F_COUNTPAUSEMCRX)
3120 p->rx_mcast_frames -= p->rx_pause;
3121 }
3122
3123 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3124 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3125 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3126 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3127 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3128 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3129 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3130 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3131
3132 #undef GET_STAT
3133 #undef GET_STAT_COM
3134 }
3135
3136 /**
3137 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
3138 * @adap: The adapter
3139 * @idx: The port
3140 * @stats: Current stats to fill
3141 * @offset: Previous stats snapshot
3142 */
t4_get_port_stats_offset(struct adapter * adap,int idx,struct port_stats * stats,struct port_stats * offset)3143 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3144 struct port_stats *stats,
3145 struct port_stats *offset)
3146 {
3147 u64 *s, *o;
3148 unsigned int i;
3149
3150 t4_get_port_stats(adap, idx, stats);
3151 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
3152 i < (sizeof(struct port_stats) / sizeof(u64));
3153 i++, s++, o++)
3154 *s -= *o;
3155 }
3156
3157 /**
3158 * t4_clr_port_stats - clear port statistics
3159 * @adap: the adapter
3160 * @idx: the port index
3161 *
3162 * Clear HW statistics for the given port.
3163 */
t4_clr_port_stats(struct adapter * adap,int idx)3164 void t4_clr_port_stats(struct adapter *adap, int idx)
3165 {
3166 unsigned int i;
3167 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3168 u32 port_base_addr;
3169
3170 if (is_t4(adap->params.chip))
3171 port_base_addr = PORT_BASE(idx);
3172 else
3173 port_base_addr = T5_PORT_BASE(idx);
3174
3175 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3176 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3177 t4_write_reg(adap, port_base_addr + i, 0);
3178 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3179 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3180 t4_write_reg(adap, port_base_addr + i, 0);
3181 for (i = 0; i < 4; i++)
3182 if (bgmap & (1 << i)) {
3183 t4_write_reg(adap,
3184 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
3185 i * 8, 0);
3186 t4_write_reg(adap,
3187 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
3188 i * 8, 0);
3189 }
3190 }
3191
3192 /**
3193 * t4_fw_hello - establish communication with FW
3194 * @adap: the adapter
3195 * @mbox: mailbox to use for the FW command
3196 * @evt_mbox: mailbox to receive async FW events
3197 * @master: specifies the caller's willingness to be the device master
3198 * @state: returns the current device state (if non-NULL)
3199 *
3200 * Issues a command to establish communication with FW. Returns either
3201 * an error (negative integer) or the mailbox of the Master PF.
3202 */
t4_fw_hello(struct adapter * adap,unsigned int mbox,unsigned int evt_mbox,enum dev_master master,enum dev_state * state)3203 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3204 enum dev_master master, enum dev_state *state)
3205 {
3206 int ret;
3207 struct fw_hello_cmd c;
3208 u32 v;
3209 unsigned int master_mbox;
3210 int retries = FW_CMD_HELLO_RETRIES;
3211
3212 retry:
3213 memset(&c, 0, sizeof(c));
3214 INIT_CMD(c, HELLO, WRITE);
3215 c.err_to_clearinit = cpu_to_be32(
3216 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3217 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3218 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3219 M_FW_HELLO_CMD_MBMASTER) |
3220 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3221 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3222 F_FW_HELLO_CMD_CLEARINIT);
3223
3224 /*
3225 * Issue the HELLO command to the firmware. If it's not successful
3226 * but indicates that we got a "busy" or "timeout" condition, retry
3227 * the HELLO until we exhaust our retry limit. If we do exceed our
3228 * retry limit, check to see if the firmware left us any error
3229 * information and report that if so ...
3230 */
3231 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3232 if (ret != FW_SUCCESS) {
3233 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3234 goto retry;
3235 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
3236 t4_report_fw_error(adap);
3237 return ret;
3238 }
3239
3240 v = be32_to_cpu(c.err_to_clearinit);
3241 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3242 if (state) {
3243 if (v & F_FW_HELLO_CMD_ERR)
3244 *state = DEV_STATE_ERR;
3245 else if (v & F_FW_HELLO_CMD_INIT)
3246 *state = DEV_STATE_INIT;
3247 else
3248 *state = DEV_STATE_UNINIT;
3249 }
3250
3251 /*
3252 * If we're not the Master PF then we need to wait around for the
3253 * Master PF Driver to finish setting up the adapter.
3254 *
3255 * Note that we also do this wait if we're a non-Master-capable PF and
3256 * there is no current Master PF; a Master PF may show up momentarily
3257 * and we wouldn't want to fail pointlessly. (This can happen when an
3258 * OS loads lots of different drivers rapidly at the same time). In
3259 * this case, the Master PF returned by the firmware will be
3260 * M_PCIE_FW_MASTER so the test below will work ...
3261 */
3262 if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
3263 master_mbox != mbox) {
3264 int waiting = FW_CMD_HELLO_TIMEOUT;
3265
3266 /*
3267 * Wait for the firmware to either indicate an error or
3268 * initialized state. If we see either of these we bail out
3269 * and report the issue to the caller. If we exhaust the
3270 * "hello timeout" and we haven't exhausted our retries, try
3271 * again. Otherwise bail with a timeout error.
3272 */
3273 for (;;) {
3274 u32 pcie_fw;
3275
3276 msleep(50);
3277 waiting -= 50;
3278
3279 /*
3280 * If neither Error nor Initialialized are indicated
3281 * by the firmware keep waiting till we exaust our
3282 * timeout ... and then retry if we haven't exhausted
3283 * our retries ...
3284 */
3285 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3286 if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
3287 if (waiting <= 0) {
3288 if (retries-- > 0)
3289 goto retry;
3290
3291 return -ETIMEDOUT;
3292 }
3293 continue;
3294 }
3295
3296 /*
3297 * We either have an Error or Initialized condition
3298 * report errors preferentially.
3299 */
3300 if (state) {
3301 if (pcie_fw & F_PCIE_FW_ERR)
3302 *state = DEV_STATE_ERR;
3303 else if (pcie_fw & F_PCIE_FW_INIT)
3304 *state = DEV_STATE_INIT;
3305 }
3306
3307 /*
3308 * If we arrived before a Master PF was selected and
3309 * there's not a valid Master PF, grab its identity
3310 * for our caller.
3311 */
3312 if (master_mbox == M_PCIE_FW_MASTER &&
3313 (pcie_fw & F_PCIE_FW_MASTER_VLD))
3314 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3315 break;
3316 }
3317 }
3318
3319 return master_mbox;
3320 }
3321
3322 /**
3323 * t4_fw_bye - end communication with FW
3324 * @adap: the adapter
3325 * @mbox: mailbox to use for the FW command
3326 *
3327 * Issues a command to terminate communication with FW.
3328 */
t4_fw_bye(struct adapter * adap,unsigned int mbox)3329 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3330 {
3331 struct fw_bye_cmd c;
3332
3333 memset(&c, 0, sizeof(c));
3334 INIT_CMD(c, BYE, WRITE);
3335 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3336 }
3337
3338 /**
3339 * t4_fw_reset - issue a reset to FW
3340 * @adap: the adapter
3341 * @mbox: mailbox to use for the FW command
3342 * @reset: specifies the type of reset to perform
3343 *
3344 * Issues a reset command of the specified type to FW.
3345 */
t4_fw_reset(struct adapter * adap,unsigned int mbox,int reset)3346 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3347 {
3348 struct fw_reset_cmd c;
3349
3350 memset(&c, 0, sizeof(c));
3351 INIT_CMD(c, RESET, WRITE);
3352 c.val = cpu_to_be32(reset);
3353 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3354 }
3355
3356 /**
3357 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3358 * @adap: the adapter
3359 * @mbox: mailbox to use for the FW RESET command (if desired)
3360 * @force: force uP into RESET even if FW RESET command fails
3361 *
3362 * Issues a RESET command to firmware (if desired) with a HALT indication
3363 * and then puts the microprocessor into RESET state. The RESET command
3364 * will only be issued if a legitimate mailbox is provided (mbox <=
3365 * M_PCIE_FW_MASTER).
3366 *
3367 * This is generally used in order for the host to safely manipulate the
3368 * adapter without fear of conflicting with whatever the firmware might
3369 * be doing. The only way out of this state is to RESTART the firmware
3370 * ...
3371 */
t4_fw_halt(struct adapter * adap,unsigned int mbox,int force)3372 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3373 {
3374 int ret = 0;
3375
3376 /*
3377 * If a legitimate mailbox is provided, issue a RESET command
3378 * with a HALT indication.
3379 */
3380 if (mbox <= M_PCIE_FW_MASTER) {
3381 struct fw_reset_cmd c;
3382
3383 memset(&c, 0, sizeof(c));
3384 INIT_CMD(c, RESET, WRITE);
3385 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
3386 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
3387 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3388 }
3389
3390 /*
3391 * Normally we won't complete the operation if the firmware RESET
3392 * command fails but if our caller insists we'll go ahead and put the
3393 * uP into RESET. This can be useful if the firmware is hung or even
3394 * missing ... We'll have to take the risk of putting the uP into
3395 * RESET without the cooperation of firmware in that case.
3396 *
3397 * We also force the firmware's HALT flag to be on in case we bypassed
3398 * the firmware RESET command above or we're dealing with old firmware
3399 * which doesn't have the HALT capability. This will serve as a flag
3400 * for the incoming firmware to know that it's coming out of a HALT
3401 * rather than a RESET ... if it's new enough to understand that ...
3402 */
3403 if (ret == 0 || force) {
3404 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
3405 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
3406 F_PCIE_FW_HALT);
3407 }
3408
3409 /*
3410 * And we always return the result of the firmware RESET command
3411 * even when we force the uP into RESET ...
3412 */
3413 return ret;
3414 }
3415
3416 /**
3417 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3418 * @adap: the adapter
3419 * @mbox: mailbox to use for the FW RESET command (if desired)
3420 * @reset: if we want to do a RESET to restart things
3421 *
3422 * Restart firmware previously halted by t4_fw_halt(). On successful
3423 * return the previous PF Master remains as the new PF Master and there
3424 * is no need to issue a new HELLO command, etc.
3425 *
3426 * We do this in two ways:
3427 *
3428 * 1. If we're dealing with newer firmware we'll simply want to take
3429 * the chip's microprocessor out of RESET. This will cause the
3430 * firmware to start up from its start vector. And then we'll loop
3431 * until the firmware indicates it's started again (PCIE_FW.HALT
3432 * reset to 0) or we timeout.
3433 *
3434 * 2. If we're dealing with older firmware then we'll need to RESET
3435 * the chip since older firmware won't recognize the PCIE_FW.HALT
3436 * flag and automatically RESET itself on startup.
3437 */
t4_fw_restart(struct adapter * adap,unsigned int mbox,int reset)3438 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3439 {
3440 if (reset) {
3441 /*
3442 * Since we're directing the RESET instead of the firmware
3443 * doing it automatically, we need to clear the PCIE_FW.HALT
3444 * bit.
3445 */
3446 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
3447
3448 /*
3449 * If we've been given a valid mailbox, first try to get the
3450 * firmware to do the RESET. If that works, great and we can
3451 * return success. Otherwise, if we haven't been given a
3452 * valid mailbox or the RESET command failed, fall back to
3453 * hitting the chip with a hammer.
3454 */
3455 if (mbox <= M_PCIE_FW_MASTER) {
3456 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3457 msleep(100);
3458 if (t4_fw_reset(adap, mbox,
3459 F_PIORST | F_PIORSTMODE) == 0)
3460 return 0;
3461 }
3462
3463 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
3464 msleep(2000);
3465 } else {
3466 int ms;
3467
3468 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3469 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3470 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
3471 return FW_SUCCESS;
3472 msleep(100);
3473 ms += 100;
3474 }
3475 return -ETIMEDOUT;
3476 }
3477 return 0;
3478 }
3479
3480 /**
3481 * t4_fixup_host_params_compat - fix up host-dependent parameters
3482 * @adap: the adapter
3483 * @page_size: the host's Base Page Size
3484 * @cache_line_size: the host's Cache Line Size
3485 * @chip_compat: maintain compatibility with designated chip
3486 *
3487 * Various registers in the chip contain values which are dependent on the
3488 * host's Base Page and Cache Line Sizes. This function will fix all of
3489 * those registers with the appropriate values as passed in ...
3490 *
3491 * @chip_compat is used to limit the set of changes that are made
3492 * to be compatible with the indicated chip release. This is used by
3493 * drivers to maintain compatibility with chip register settings when
3494 * the drivers haven't [yet] been updated with new chip support.
3495 */
t4_fixup_host_params_compat(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size,enum chip_type chip_compat)3496 int t4_fixup_host_params_compat(struct adapter *adap,
3497 unsigned int page_size,
3498 unsigned int cache_line_size,
3499 enum chip_type chip_compat)
3500 {
3501 unsigned int page_shift = cxgbe_fls(page_size) - 1;
3502 unsigned int sge_hps = page_shift - 10;
3503 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3504 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3505 unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
3506
3507 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
3508 V_HOSTPAGESIZEPF0(sge_hps) |
3509 V_HOSTPAGESIZEPF1(sge_hps) |
3510 V_HOSTPAGESIZEPF2(sge_hps) |
3511 V_HOSTPAGESIZEPF3(sge_hps) |
3512 V_HOSTPAGESIZEPF4(sge_hps) |
3513 V_HOSTPAGESIZEPF5(sge_hps) |
3514 V_HOSTPAGESIZEPF6(sge_hps) |
3515 V_HOSTPAGESIZEPF7(sge_hps));
3516
3517 if (is_t4(adap->params.chip) || is_t4(chip_compat))
3518 t4_set_reg_field(adap, A_SGE_CONTROL,
3519 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3520 F_EGRSTATUSPAGESIZE,
3521 V_INGPADBOUNDARY(fl_align_log -
3522 X_INGPADBOUNDARY_SHIFT) |
3523 V_EGRSTATUSPAGESIZE(stat_len != 64));
3524 else {
3525 unsigned int pack_align;
3526 unsigned int ingpad, ingpack;
3527 unsigned int pcie_cap;
3528
3529 /*
3530 * T5 introduced the separation of the Free List Padding and
3531 * Packing Boundaries. Thus, we can select a smaller Padding
3532 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3533 * Bandwidth, and use a Packing Boundary which is large enough
3534 * to avoid false sharing between CPUs, etc.
3535 *
3536 * For the PCI Link, the smaller the Padding Boundary the
3537 * better. For the Memory Controller, a smaller Padding
3538 * Boundary is better until we cross under the Memory Line
3539 * Size (the minimum unit of transfer to/from Memory). If we
3540 * have a Padding Boundary which is smaller than the Memory
3541 * Line Size, that'll involve a Read-Modify-Write cycle on the
3542 * Memory Controller which is never good.
3543 */
3544
3545 /* We want the Packing Boundary to be based on the Cache Line
3546 * Size in order to help avoid False Sharing performance
3547 * issues between CPUs, etc. We also want the Packing
3548 * Boundary to incorporate the PCI-E Maximum Payload Size. We
3549 * get best performance when the Packing Boundary is a
3550 * multiple of the Maximum Payload Size.
3551 */
3552 pack_align = fl_align;
3553 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
3554 if (pcie_cap) {
3555 unsigned int mps, mps_log;
3556 u16 devctl;
3557
3558 /* The PCIe Device Control Maximum Payload Size field
3559 * [bits 7:5] encodes sizes as powers of 2 starting at
3560 * 128 bytes.
3561 */
3562 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
3563 &devctl);
3564 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
3565 mps = 1 << mps_log;
3566 if (mps > pack_align)
3567 pack_align = mps;
3568 }
3569
3570 /*
3571 * N.B. T5 has a different interpretation of the "0" value for
3572 * the Packing Boundary. This corresponds to 16 bytes instead
3573 * of the expected 32 bytes. We never have a Packing Boundary
3574 * less than 32 bytes so we can't use that special value but
3575 * on the other hand, if we wanted 32 bytes, the best we can
3576 * really do is 64 bytes ...
3577 */
3578 if (pack_align <= 16) {
3579 ingpack = X_INGPACKBOUNDARY_16B;
3580 fl_align = 16;
3581 } else if (pack_align == 32) {
3582 ingpack = X_INGPACKBOUNDARY_64B;
3583 fl_align = 64;
3584 } else {
3585 unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
3586
3587 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
3588 fl_align = pack_align;
3589 }
3590
3591 /* Use the smallest Ingress Padding which isn't smaller than
3592 * the Memory Controller Read/Write Size. We'll take that as
3593 * being 8 bytes since we don't know of any system with a
3594 * wider Memory Controller Bus Width.
3595 */
3596 if (is_t5(adap->params.chip))
3597 ingpad = X_INGPADBOUNDARY_32B;
3598 else
3599 ingpad = X_T6_INGPADBOUNDARY_8B;
3600 t4_set_reg_field(adap, A_SGE_CONTROL,
3601 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3602 F_EGRSTATUSPAGESIZE,
3603 V_INGPADBOUNDARY(ingpad) |
3604 V_EGRSTATUSPAGESIZE(stat_len != 64));
3605 t4_set_reg_field(adap, A_SGE_CONTROL2,
3606 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
3607 V_INGPACKBOUNDARY(ingpack));
3608 }
3609
3610 /*
3611 * Adjust various SGE Free List Host Buffer Sizes.
3612 *
3613 * The first four entries are:
3614 *
3615 * 0: Host Page Size
3616 * 1: 64KB
3617 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3618 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3619 *
3620 * For the single-MTU buffers in unpacked mode we need to include
3621 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3622 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3623 * Padding boundary. All of these are accommodated in the Factory
3624 * Default Firmware Configuration File but we need to adjust it for
3625 * this host's cache line size.
3626 */
3627 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
3628 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
3629 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
3630 & ~(fl_align - 1));
3631 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
3632 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
3633 & ~(fl_align - 1));
3634
3635 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
3636
3637 return 0;
3638 }
3639
3640 /**
3641 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
3642 * @adap: the adapter
3643 * @page_size: the host's Base Page Size
3644 * @cache_line_size: the host's Cache Line Size
3645 *
3646 * Various registers in T4 contain values which are dependent on the
3647 * host's Base Page and Cache Line Sizes. This function will fix all of
3648 * those registers with the appropriate values as passed in ...
3649 *
3650 * This routine makes changes which are compatible with T4 chips.
3651 */
t4_fixup_host_params(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size)3652 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3653 unsigned int cache_line_size)
3654 {
3655 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
3656 T4_LAST_REV);
3657 }
3658
3659 /**
3660 * t4_fw_initialize - ask FW to initialize the device
3661 * @adap: the adapter
3662 * @mbox: mailbox to use for the FW command
3663 *
3664 * Issues a command to FW to partially initialize the device. This
3665 * performs initialization that generally doesn't depend on user input.
3666 */
t4_fw_initialize(struct adapter * adap,unsigned int mbox)3667 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3668 {
3669 struct fw_initialize_cmd c;
3670
3671 memset(&c, 0, sizeof(c));
3672 INIT_CMD(c, INITIALIZE, WRITE);
3673 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3674 }
3675
3676 /**
3677 * t4_query_params_rw - query FW or device parameters
3678 * @adap: the adapter
3679 * @mbox: mailbox to use for the FW command
3680 * @pf: the PF
3681 * @vf: the VF
3682 * @nparams: the number of parameters
3683 * @params: the parameter names
3684 * @val: the parameter values
3685 * @rw: Write and read flag
3686 *
3687 * Reads the value of FW or device parameters. Up to 7 parameters can be
3688 * queried at once.
3689 */
t4_query_params_rw(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val,int rw)3690 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
3691 unsigned int pf, unsigned int vf,
3692 unsigned int nparams, const u32 *params,
3693 u32 *val, int rw)
3694 {
3695 unsigned int i;
3696 int ret;
3697 struct fw_params_cmd c;
3698 __be32 *p = &c.param[0].mnem;
3699
3700 if (nparams > 7)
3701 return -EINVAL;
3702
3703 memset(&c, 0, sizeof(c));
3704 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3705 F_FW_CMD_REQUEST | F_FW_CMD_READ |
3706 V_FW_PARAMS_CMD_PFN(pf) |
3707 V_FW_PARAMS_CMD_VFN(vf));
3708 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3709
3710 for (i = 0; i < nparams; i++) {
3711 *p++ = cpu_to_be32(*params++);
3712 if (rw)
3713 *p = cpu_to_be32(*(val + i));
3714 p++;
3715 }
3716
3717 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3718 if (ret == 0)
3719 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3720 *val++ = be32_to_cpu(*p);
3721 return ret;
3722 }
3723
t4_query_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)3724 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3725 unsigned int vf, unsigned int nparams, const u32 *params,
3726 u32 *val)
3727 {
3728 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
3729 }
3730
3731 /**
3732 * t4_set_params_timeout - sets FW or device parameters
3733 * @adap: the adapter
3734 * @mbox: mailbox to use for the FW command
3735 * @pf: the PF
3736 * @vf: the VF
3737 * @nparams: the number of parameters
3738 * @params: the parameter names
3739 * @val: the parameter values
3740 * @timeout: the timeout time
3741 *
3742 * Sets the value of FW or device parameters. Up to 7 parameters can be
3743 * specified at once.
3744 */
t4_set_params_timeout(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val,int timeout)3745 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
3746 unsigned int pf, unsigned int vf,
3747 unsigned int nparams, const u32 *params,
3748 const u32 *val, int timeout)
3749 {
3750 struct fw_params_cmd c;
3751 __be32 *p = &c.param[0].mnem;
3752
3753 if (nparams > 7)
3754 return -EINVAL;
3755
3756 memset(&c, 0, sizeof(c));
3757 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3758 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3759 V_FW_PARAMS_CMD_PFN(pf) |
3760 V_FW_PARAMS_CMD_VFN(vf));
3761 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3762
3763 while (nparams--) {
3764 *p++ = cpu_to_be32(*params++);
3765 *p++ = cpu_to_be32(*val++);
3766 }
3767
3768 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
3769 }
3770
t4_set_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val)3771 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3772 unsigned int vf, unsigned int nparams, const u32 *params,
3773 const u32 *val)
3774 {
3775 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
3776 FW_CMD_MAX_TIMEOUT);
3777 }
3778
3779 /**
3780 * t4_alloc_vi_func - allocate a virtual interface
3781 * @adap: the adapter
3782 * @mbox: mailbox to use for the FW command
3783 * @port: physical port associated with the VI
3784 * @pf: the PF owning the VI
3785 * @vf: the VF owning the VI
3786 * @nmac: number of MAC addresses needed (1 to 5)
3787 * @mac: the MAC addresses of the VI
3788 * @rss_size: size of RSS table slice associated with this VI
3789 * @portfunc: which Port Application Function MAC Address is desired
3790 * @idstype: Intrusion Detection Type
3791 *
3792 * Allocates a virtual interface for the given physical port. If @mac is
3793 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3794 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3795 * stored consecutively so the space needed is @nmac * 6 bytes.
3796 * Returns a negative error number or the non-negative VI id.
3797 */
t4_alloc_vi_func(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,unsigned int portfunc,unsigned int idstype,u8 * vivld,u8 * vin)3798 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
3799 unsigned int port, unsigned int pf, unsigned int vf,
3800 unsigned int nmac, u8 *mac, unsigned int *rss_size,
3801 unsigned int portfunc, unsigned int idstype,
3802 u8 *vivld, u8 *vin)
3803 {
3804 int ret;
3805 struct fw_vi_cmd c;
3806
3807 memset(&c, 0, sizeof(c));
3808 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3809 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
3810 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
3811 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
3812 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
3813 V_FW_VI_CMD_FUNC(portfunc));
3814 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
3815 c.nmac = nmac - 1;
3816
3817 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3818 if (ret)
3819 return ret;
3820
3821 if (mac) {
3822 memcpy(mac, c.mac, sizeof(c.mac));
3823 switch (nmac) {
3824 case 5:
3825 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3826 /* FALLTHROUGH */
3827 case 4:
3828 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3829 /* FALLTHROUGH */
3830 case 3:
3831 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3832 /* FALLTHROUGH */
3833 case 2:
3834 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3835 /* FALLTHROUGH */
3836 }
3837 }
3838 if (rss_size)
3839 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
3840 if (vivld)
3841 *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
3842 if (vin)
3843 *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
3844 return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
3845 }
3846
3847 /**
3848 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
3849 * @adap: the adapter
3850 * @mbox: mailbox to use for the FW command
3851 * @port: physical port associated with the VI
3852 * @pf: the PF owning the VI
3853 * @vf: the VF owning the VI
3854 * @nmac: number of MAC addresses needed (1 to 5)
3855 * @mac: the MAC addresses of the VI
3856 * @rss_size: size of RSS table slice associated with this VI
3857 *
3858 * Backwards compatible and convieniance routine to allocate a Virtual
3859 * Interface with a Ethernet Port Application Function and Intrustion
3860 * Detection System disabled.
3861 */
t4_alloc_vi(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,u8 * vivld,u8 * vin)3862 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3863 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3864 unsigned int *rss_size, u8 *vivld, u8 *vin)
3865 {
3866 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
3867 FW_VI_FUNC_ETH, 0, vivld, vin);
3868 }
3869
3870 /**
3871 * t4_free_vi - free a virtual interface
3872 * @adap: the adapter
3873 * @mbox: mailbox to use for the FW command
3874 * @pf: the PF owning the VI
3875 * @vf: the VF owning the VI
3876 * @viid: virtual interface identifiler
3877 *
3878 * Free a previously allocated virtual interface.
3879 */
t4_free_vi(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int viid)3880 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
3881 unsigned int vf, unsigned int viid)
3882 {
3883 struct fw_vi_cmd c;
3884
3885 memset(&c, 0, sizeof(c));
3886 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3887 F_FW_CMD_EXEC);
3888 if (is_pf4(adap))
3889 c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
3890 V_FW_VI_CMD_VFN(vf));
3891 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
3892 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
3893
3894 if (is_pf4(adap))
3895 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3896 else
3897 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
3898 }
3899
3900 /**
3901 * t4_set_rxmode - set Rx properties of a virtual interface
3902 * @adap: the adapter
3903 * @mbox: mailbox to use for the FW command
3904 * @viid: the VI id
3905 * @mtu: the new MTU or -1
3906 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3907 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3908 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3909 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
3910 * -1 no change
3911 * @sleep_ok: if true we may sleep while awaiting command completion
3912 *
3913 * Sets Rx properties of a virtual interface.
3914 */
t4_set_rxmode(struct adapter * adap,unsigned int mbox,unsigned int viid,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)3915 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3916 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3917 bool sleep_ok)
3918 {
3919 struct fw_vi_rxmode_cmd c;
3920
3921 /* convert to FW values */
3922 if (mtu < 0)
3923 mtu = M_FW_VI_RXMODE_CMD_MTU;
3924 if (promisc < 0)
3925 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
3926 if (all_multi < 0)
3927 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
3928 if (bcast < 0)
3929 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
3930 if (vlanex < 0)
3931 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
3932
3933 memset(&c, 0, sizeof(c));
3934 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
3935 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3936 V_FW_VI_RXMODE_CMD_VIID(viid));
3937 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3938 c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
3939 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3940 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3941 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3942 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
3943 if (is_pf4(adap))
3944 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
3945 sleep_ok);
3946 else
3947 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
3948 }
3949
3950 /**
3951 * t4_alloc_raw_mac_filt - Adds a raw mac entry in mps tcam
3952 * @adap: the adapter
3953 * @viid: the VI id
3954 * @mac: the MAC address
3955 * @mask: the mask
3956 * @idx: index at which to add this entry
3957 * @port_id: the port index
3958 * @lookup_type: MAC address for inner (1) or outer (0) header
3959 * @sleep_ok: call is allowed to sleep
3960 *
3961 * Adds the mac entry at the specified index using raw mac interface.
3962 *
3963 * Returns a negative error number or the allocated index for this mac.
3964 */
t4_alloc_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)3965 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
3966 const u8 *addr, const u8 *mask, unsigned int idx,
3967 u8 lookup_type, u8 port_id, bool sleep_ok)
3968 {
3969 int ret = 0;
3970 struct fw_vi_mac_cmd c;
3971 struct fw_vi_mac_raw *p = &c.u.raw;
3972 u32 val;
3973
3974 memset(&c, 0, sizeof(c));
3975 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
3976 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3977 V_FW_VI_MAC_CMD_VIID(viid));
3978 val = V_FW_CMD_LEN16(1) |
3979 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
3980 c.freemacs_to_len16 = cpu_to_be32(val);
3981
3982 /* Specify that this is an inner mac address */
3983 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
3984
3985 /* Lookup Type. Outer header: 0, Inner header: 1 */
3986 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
3987 V_DATAPORTNUM(port_id));
3988 /* Lookup mask and port mask */
3989 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
3990 V_DATAPORTNUM(M_DATAPORTNUM));
3991
3992 /* Copy the address and the mask */
3993 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
3994 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
3995
3996 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
3997 if (ret == 0) {
3998 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
3999 if (ret != (int)idx)
4000 ret = -ENOMEM;
4001 }
4002
4003 return ret;
4004 }
4005
4006 /**
4007 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
4008 * @adap: the adapter
4009 * @viid: the VI id
4010 * @addr: the MAC address
4011 * @mask: the mask
4012 * @idx: index of the entry in mps tcam
4013 * @lookup_type: MAC address for inner (1) or outer (0) header
4014 * @port_id: the port index
4015 * @sleep_ok: call is allowed to sleep
4016 *
4017 * Removes the mac entry at the specified index using raw mac interface.
4018 *
4019 * Returns a negative error number on failure.
4020 */
t4_free_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)4021 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
4022 const u8 *addr, const u8 *mask, unsigned int idx,
4023 u8 lookup_type, u8 port_id, bool sleep_ok)
4024 {
4025 struct fw_vi_mac_cmd c;
4026 struct fw_vi_mac_raw *p = &c.u.raw;
4027 u32 raw;
4028
4029 memset(&c, 0, sizeof(c));
4030 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4031 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4032 V_FW_CMD_EXEC(0) |
4033 V_FW_VI_MAC_CMD_VIID(viid));
4034 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
4035 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) |
4036 raw |
4037 V_FW_CMD_LEN16(1));
4038
4039 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
4040 FW_VI_MAC_ID_BASED_FREE);
4041
4042 /* Lookup Type. Outer header: 0, Inner header: 1 */
4043 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
4044 V_DATAPORTNUM(port_id));
4045 /* Lookup mask and port mask */
4046 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
4047 V_DATAPORTNUM(M_DATAPORTNUM));
4048
4049 /* Copy the address and the mask */
4050 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
4051 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
4052
4053 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
4054 }
4055
4056 /**
4057 * t4_change_mac - modifies the exact-match filter for a MAC address
4058 * @adap: the adapter
4059 * @mbox: mailbox to use for the FW command
4060 * @viid: the VI id
4061 * @idx: index of existing filter for old value of MAC address, or -1
4062 * @addr: the new MAC address value
4063 * @persist: whether a new MAC allocation should be persistent
4064 * @add_smt: if true also add the address to the HW SMT
4065 *
4066 * Modifies an exact-match filter and sets it to the new MAC address if
4067 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4068 * latter case the address is added persistently if @persist is %true.
4069 *
4070 * Note that in general it is not possible to modify the value of a given
4071 * filter so the generic way to modify an address filter is to free the one
4072 * being used by the old address value and allocate a new filter for the
4073 * new address value.
4074 *
4075 * Returns a negative error number or the index of the filter with the new
4076 * MAC value. Note that this index may differ from @idx.
4077 */
t4_change_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,bool add_smt)4078 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4079 int idx, const u8 *addr, bool persist, bool add_smt)
4080 {
4081 int ret, mode;
4082 struct fw_vi_mac_cmd c;
4083 struct fw_vi_mac_exact *p = c.u.exact;
4084 int max_mac_addr = adap->params.arch.mps_tcam_size;
4085
4086 if (idx < 0) /* new allocation */
4087 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4088 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4089
4090 memset(&c, 0, sizeof(c));
4091 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4092 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4093 V_FW_VI_MAC_CMD_VIID(viid));
4094 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
4095 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
4096 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4097 V_FW_VI_MAC_CMD_IDX(idx));
4098 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4099
4100 if (is_pf4(adap))
4101 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4102 else
4103 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
4104 if (ret == 0) {
4105 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
4106 if (ret >= max_mac_addr)
4107 ret = -ENOMEM;
4108 }
4109 return ret;
4110 }
4111
4112 /**
4113 * t4_enable_vi_params - enable/disable a virtual interface
4114 * @adap: the adapter
4115 * @mbox: mailbox to use for the FW command
4116 * @viid: the VI id
4117 * @rx_en: 1=enable Rx, 0=disable Rx
4118 * @tx_en: 1=enable Tx, 0=disable Tx
4119 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4120 *
4121 * Enables/disables a virtual interface. Note that setting DCB Enable
4122 * only makes sense when enabling a Virtual Interface ...
4123 */
t4_enable_vi_params(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en,bool dcb_en)4124 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
4125 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
4126 {
4127 struct fw_vi_enable_cmd c;
4128
4129 memset(&c, 0, sizeof(c));
4130 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
4131 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4132 V_FW_VI_ENABLE_CMD_VIID(viid));
4133 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4134 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
4135 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
4136 FW_LEN16(c));
4137 if (is_pf4(adap))
4138 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4139 else
4140 return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
4141 }
4142
4143 /**
4144 * t4_enable_vi - enable/disable a virtual interface
4145 * @adap: the adapter
4146 * @mbox: mailbox to use for the FW command
4147 * @viid: the VI id
4148 * @rx_en: 1=enable Rx, 0=disable Rx
4149 * @tx_en: 1=enable Tx, 0=disable Tx
4150 *
4151 * Enables/disables a virtual interface. Note that setting DCB Enable
4152 * only makes sense when enabling a Virtual Interface ...
4153 */
t4_enable_vi(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en)4154 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4155 bool rx_en, bool tx_en)
4156 {
4157 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
4158 }
4159
4160 /**
4161 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4162 * @adap: the adapter
4163 * @mbox: mailbox to use for the FW command
4164 * @start: %true to enable the queues, %false to disable them
4165 * @pf: the PF owning the queues
4166 * @vf: the VF owning the queues
4167 * @iqid: ingress queue id
4168 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4169 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4170 *
4171 * Starts or stops an ingress queue and its associated FLs, if any.
4172 */
t4_iq_start_stop(struct adapter * adap,unsigned int mbox,bool start,unsigned int pf,unsigned int vf,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)4173 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4174 unsigned int pf, unsigned int vf, unsigned int iqid,
4175 unsigned int fl0id, unsigned int fl1id)
4176 {
4177 struct fw_iq_cmd c;
4178
4179 memset(&c, 0, sizeof(c));
4180 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4181 F_FW_CMD_EXEC);
4182 c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
4183 V_FW_IQ_CMD_IQSTOP(!start) |
4184 FW_LEN16(c));
4185 c.iqid = cpu_to_be16(iqid);
4186 c.fl0id = cpu_to_be16(fl0id);
4187 c.fl1id = cpu_to_be16(fl1id);
4188 if (is_pf4(adap)) {
4189 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4190 V_FW_IQ_CMD_VFN(vf));
4191 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4192 } else {
4193 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4194 }
4195 }
4196
4197 /**
4198 * t4_iq_free - free an ingress queue and its FLs
4199 * @adap: the adapter
4200 * @mbox: mailbox to use for the FW command
4201 * @pf: the PF owning the queues
4202 * @vf: the VF owning the queues
4203 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4204 * @iqid: ingress queue id
4205 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4206 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4207 *
4208 * Frees an ingress queue and its associated FLs, if any.
4209 */
t4_iq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)4210 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4211 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4212 unsigned int fl0id, unsigned int fl1id)
4213 {
4214 struct fw_iq_cmd c;
4215
4216 memset(&c, 0, sizeof(c));
4217 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4218 F_FW_CMD_EXEC);
4219 if (is_pf4(adap))
4220 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4221 V_FW_IQ_CMD_VFN(vf));
4222 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4223 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
4224 c.iqid = cpu_to_be16(iqid);
4225 c.fl0id = cpu_to_be16(fl0id);
4226 c.fl1id = cpu_to_be16(fl1id);
4227 if (is_pf4(adap))
4228 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4229 else
4230 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4231 }
4232
4233 /**
4234 * t4_eth_eq_free - free an Ethernet egress queue
4235 * @adap: the adapter
4236 * @mbox: mailbox to use for the FW command
4237 * @pf: the PF owning the queue
4238 * @vf: the VF owning the queue
4239 * @eqid: egress queue id
4240 *
4241 * Frees an Ethernet egress queue.
4242 */
t4_eth_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)4243 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4244 unsigned int vf, unsigned int eqid)
4245 {
4246 struct fw_eq_eth_cmd c;
4247
4248 memset(&c, 0, sizeof(c));
4249 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
4250 F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
4251 if (is_pf4(adap))
4252 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4253 V_FW_IQ_CMD_VFN(vf));
4254 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4255 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
4256 if (is_pf4(adap))
4257 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4258 else
4259 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4260 }
4261
4262 /**
4263 * t4_link_down_rc_str - return a string for a Link Down Reason Code
4264 * @link_down_rc: Link Down Reason Code
4265 *
4266 * Returns a string representation of the Link Down Reason Code.
4267 */
t4_link_down_rc_str(unsigned char link_down_rc)4268 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
4269 {
4270 static const char * const reason[] = {
4271 "Link Down",
4272 "Remote Fault",
4273 "Auto-negotiation Failure",
4274 "Reserved",
4275 "Insufficient Airflow",
4276 "Unable To Determine Reason",
4277 "No RX Signal Detected",
4278 "Reserved",
4279 };
4280
4281 if (link_down_rc >= ARRAY_SIZE(reason))
4282 return "Bad Reason Code";
4283
4284 return reason[link_down_rc];
4285 }
4286
t4_speed_to_fwcap(u32 speed)4287 static u32 t4_speed_to_fwcap(u32 speed)
4288 {
4289 switch (speed) {
4290 case 100000:
4291 return FW_PORT_CAP32_SPEED_100G;
4292 case 50000:
4293 return FW_PORT_CAP32_SPEED_50G;
4294 case 40000:
4295 return FW_PORT_CAP32_SPEED_40G;
4296 case 25000:
4297 return FW_PORT_CAP32_SPEED_25G;
4298 case 10000:
4299 return FW_PORT_CAP32_SPEED_10G;
4300 case 1000:
4301 return FW_PORT_CAP32_SPEED_1G;
4302 case 100:
4303 return FW_PORT_CAP32_SPEED_100M;
4304 default:
4305 break;
4306 }
4307
4308 return 0;
4309 }
4310
4311 /* Return the highest speed set in the port capabilities, in Mb/s. */
t4_fwcap_to_speed(u32 caps)4312 unsigned int t4_fwcap_to_speed(u32 caps)
4313 {
4314 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
4315 do { \
4316 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
4317 return __speed; \
4318 } while (0)
4319
4320 TEST_SPEED_RETURN(100G, 100000);
4321 TEST_SPEED_RETURN(50G, 50000);
4322 TEST_SPEED_RETURN(40G, 40000);
4323 TEST_SPEED_RETURN(25G, 25000);
4324 TEST_SPEED_RETURN(10G, 10000);
4325 TEST_SPEED_RETURN(1G, 1000);
4326 TEST_SPEED_RETURN(100M, 100);
4327
4328 #undef TEST_SPEED_RETURN
4329
4330 return 0;
4331 }
4332
t4_set_link_autoneg_speed(struct port_info * pi,u32 * new_caps)4333 static void t4_set_link_autoneg_speed(struct port_info *pi, u32 *new_caps)
4334 {
4335 struct link_config *lc = &pi->link_cfg;
4336 u32 caps = *new_caps;
4337
4338 caps &= ~V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
4339 caps |= G_FW_PORT_CAP32_SPEED(lc->acaps);
4340
4341 *new_caps = caps;
4342 }
4343
t4_set_link_speed(struct port_info * pi,u32 speed,u32 * new_caps)4344 int t4_set_link_speed(struct port_info *pi, u32 speed, u32 *new_caps)
4345 {
4346 u32 fw_speed_cap = t4_speed_to_fwcap(speed);
4347 struct link_config *lc = &pi->link_cfg;
4348 u32 caps = *new_caps;
4349
4350 if (!(lc->pcaps & fw_speed_cap))
4351 return -EOPNOTSUPP;
4352
4353 caps &= ~V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
4354 caps |= fw_speed_cap;
4355
4356 *new_caps = caps;
4357
4358 return 0;
4359 }
4360
t4_set_link_pause(struct port_info * pi,u8 autoneg,u8 pause_tx,u8 pause_rx,u32 * new_caps)4361 int t4_set_link_pause(struct port_info *pi, u8 autoneg, u8 pause_tx,
4362 u8 pause_rx, u32 *new_caps)
4363 {
4364 struct link_config *lc = &pi->link_cfg;
4365 u32 caps = *new_caps;
4366 u32 max_speed;
4367
4368 max_speed = t4_fwcap_to_speed(lc->link_caps);
4369
4370 if (autoneg) {
4371 if (!(lc->pcaps & FW_PORT_CAP32_ANEG))
4372 return -EINVAL;
4373
4374 caps |= FW_PORT_CAP32_ANEG;
4375 t4_set_link_autoneg_speed(pi, &caps);
4376 } else {
4377 if (!max_speed)
4378 max_speed = t4_fwcap_to_speed(lc->acaps);
4379
4380 caps &= ~FW_PORT_CAP32_ANEG;
4381 t4_set_link_speed(pi, max_speed, &caps);
4382 }
4383
4384 if (lc->pcaps & FW_PORT_CAP32_MDIAUTO)
4385 caps |= V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
4386
4387 caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
4388 caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
4389 if (pause_tx && pause_rx) {
4390 caps |= FW_PORT_CAP32_FC_TX | FW_PORT_CAP32_FC_RX;
4391 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
4392 caps |= FW_PORT_CAP32_802_3_PAUSE;
4393 } else if (pause_tx) {
4394 caps |= FW_PORT_CAP32_FC_TX;
4395 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
4396 caps |= FW_PORT_CAP32_802_3_ASM_DIR;
4397 } else if (pause_rx) {
4398 caps |= FW_PORT_CAP32_FC_RX;
4399 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
4400 caps |= FW_PORT_CAP32_802_3_PAUSE;
4401
4402 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
4403 caps |= FW_PORT_CAP32_802_3_ASM_DIR;
4404 }
4405
4406 *new_caps = caps;
4407
4408 return 0;
4409 }
4410
t4_set_link_fec(struct port_info * pi,u8 fec_rs,u8 fec_baser,u8 fec_none,u32 * new_caps)4411 int t4_set_link_fec(struct port_info *pi, u8 fec_rs, u8 fec_baser,
4412 u8 fec_none, u32 *new_caps)
4413 {
4414 struct link_config *lc = &pi->link_cfg;
4415 u32 max_speed, caps = *new_caps;
4416
4417 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
4418 return -EOPNOTSUPP;
4419
4420 /* Link might be down. In that case consider the max
4421 * speed advertised
4422 */
4423 max_speed = t4_fwcap_to_speed(lc->link_caps);
4424 if (!max_speed)
4425 max_speed = t4_fwcap_to_speed(lc->acaps);
4426
4427 caps &= ~V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
4428 if (fec_rs) {
4429 switch (max_speed) {
4430 case 100000:
4431 case 25000:
4432 caps |= FW_PORT_CAP32_FEC_RS;
4433 break;
4434 default:
4435 return -EOPNOTSUPP;
4436 }
4437 }
4438
4439 if (fec_baser) {
4440 switch (max_speed) {
4441 case 50000:
4442 case 25000:
4443 caps |= FW_PORT_CAP32_FEC_BASER_RS;
4444 break;
4445 default:
4446 return -EOPNOTSUPP;
4447 }
4448 }
4449
4450 if (fec_none)
4451 caps |= FW_PORT_CAP32_FEC_NO_FEC;
4452
4453 if (!(caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC))) {
4454 /* No explicit encoding is requested.
4455 * So, default back to AUTO.
4456 */
4457 switch (max_speed) {
4458 case 100000:
4459 caps |= FW_PORT_CAP32_FEC_RS |
4460 FW_PORT_CAP32_FEC_NO_FEC;
4461 break;
4462 case 50000:
4463 caps |= FW_PORT_CAP32_FEC_BASER_RS |
4464 FW_PORT_CAP32_FEC_NO_FEC;
4465 break;
4466 case 25000:
4467 caps |= FW_PORT_CAP32_FEC_RS |
4468 FW_PORT_CAP32_FEC_BASER_RS |
4469 FW_PORT_CAP32_FEC_NO_FEC;
4470 break;
4471 default:
4472 return -EOPNOTSUPP;
4473 }
4474 }
4475
4476 *new_caps = caps;
4477
4478 return 0;
4479 }
4480
4481 /**
4482 * t4_handle_get_port_info - process a FW reply message
4483 * @pi: the port info
4484 * @rpl: start of the FW message
4485 *
4486 * Processes a GET_PORT_INFO FW reply message.
4487 */
t4_handle_get_port_info(struct port_info * pi,const __be64 * rpl)4488 static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
4489 {
4490 const struct fw_port_cmd *cmd = (const void *)rpl;
4491 u8 link_ok, link_down_rc, mod_type, port_type;
4492 u32 action, pcaps, acaps, link_caps, lstatus;
4493 struct link_config *lc = &pi->link_cfg;
4494 struct adapter *adapter = pi->adapter;
4495 u8 mod_changed = 0;
4496
4497 /* Extract the various fields from the Port Information message.
4498 */
4499 action = be32_to_cpu(cmd->action_to_len16);
4500 if (G_FW_PORT_CMD_ACTION(action) != FW_PORT_ACTION_GET_PORT_INFO32) {
4501 dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
4502 action);
4503 return;
4504 }
4505
4506 lstatus = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
4507 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS32) ? 1 : 0;
4508 link_down_rc = G_FW_PORT_CMD_LINKDNRC32(lstatus);
4509 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus);
4510 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus);
4511
4512 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
4513 acaps = be32_to_cpu(cmd->u.info32.acaps32);
4514 link_caps = be32_to_cpu(cmd->u.info32.linkattr32);
4515
4516 if (mod_type != lc->mod_type) {
4517 t4_init_link_config(pi, pcaps, acaps, lc->mdio_addr,
4518 port_type, mod_type);
4519 t4_os_portmod_changed(adapter, pi->pidx);
4520 mod_changed = 1;
4521 }
4522 if (link_ok != lc->link_ok || acaps != lc->acaps ||
4523 link_caps != lc->link_caps) { /* something changed */
4524 if (!link_ok && lc->link_ok) {
4525 lc->link_down_rc = link_down_rc;
4526 dev_warn(adap, "Port %d link down, reason: %s\n",
4527 pi->port_id,
4528 t4_link_down_rc_str(link_down_rc));
4529 }
4530 lc->link_ok = link_ok;
4531 lc->acaps = acaps;
4532 lc->link_caps = link_caps;
4533 t4_os_link_changed(adapter, pi->pidx);
4534 }
4535
4536 if (mod_changed != 0 && is_pf4(adapter) != 0) {
4537 u32 mod_caps = lc->admin_caps;
4538 int ret;
4539
4540 ret = t4_link_l1cfg_ns(pi, mod_caps);
4541 if (ret != FW_SUCCESS)
4542 dev_warn(adapter,
4543 "Attempt to update new Transceiver Module settings %#x failed with error: %d\n",
4544 mod_caps, ret);
4545 }
4546 }
4547
4548 /**
4549 * t4_ctrl_eq_free - free a control egress queue
4550 * @adap: the adapter
4551 * @mbox: mailbox to use for the FW command
4552 * @pf: the PF owning the queue
4553 * @vf: the VF owning the queue
4554 * @eqid: egress queue id
4555 *
4556 * Frees a control egress queue.
4557 */
t4_ctrl_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)4558 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4559 unsigned int vf, unsigned int eqid)
4560 {
4561 struct fw_eq_ctrl_cmd c;
4562
4563 memset(&c, 0, sizeof(c));
4564 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
4565 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4566 V_FW_EQ_CTRL_CMD_PFN(pf) |
4567 V_FW_EQ_CTRL_CMD_VFN(vf));
4568 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4569 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
4570 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4571 }
4572
4573 /**
4574 * t4_handle_fw_rpl - process a FW reply message
4575 * @adap: the adapter
4576 * @rpl: start of the FW message
4577 *
4578 * Processes a FW message, such as link state change messages.
4579 */
t4_handle_fw_rpl(struct adapter * adap,const __be64 * rpl)4580 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4581 {
4582 u8 opcode = *(const u8 *)rpl;
4583
4584 /*
4585 * This might be a port command ... this simplifies the following
4586 * conditionals ... We can get away with pre-dereferencing
4587 * action_to_len16 because it's in the first 16 bytes and all messages
4588 * will be at least that long.
4589 */
4590 const struct fw_port_cmd *p = (const void *)rpl;
4591 unsigned int action =
4592 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
4593
4594 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO32) {
4595 /* link/module state change message */
4596 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
4597 struct port_info *pi = NULL;
4598 int i;
4599
4600 for_each_port(adap, i) {
4601 pi = adap2pinfo(adap, i);
4602 if (pi->tx_chan == chan)
4603 break;
4604 }
4605
4606 t4_handle_get_port_info(pi, rpl);
4607 } else {
4608 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
4609 return -EINVAL;
4610 }
4611 return 0;
4612 }
4613
t4_reset_link_config(struct adapter * adap,int idx)4614 void t4_reset_link_config(struct adapter *adap, int idx)
4615 {
4616 struct port_info *pi = adap2pinfo(adap, idx);
4617 struct link_config *lc = &pi->link_cfg;
4618
4619 lc->link_ok = 0;
4620 lc->link_down_rc = 0;
4621 lc->link_caps = 0;
4622 }
4623
4624 /**
4625 * t4_init_link_config - initialize a link's SW state
4626 * @pi: the port info
4627 * @pcaps: link Port Capabilities
4628 * @acaps: link current Advertised Port Capabilities
4629 * @mdio_addr : address of the PHY
4630 * @port_type : firmware port type
4631 * @mod_type : firmware module type
4632 *
4633 * Initializes the SW state maintained for each link, including the link's
4634 * capabilities and default speed/flow-control/autonegotiation settings.
4635 */
t4_init_link_config(struct port_info * pi,u32 pcaps,u32 acaps,u8 mdio_addr,u8 port_type,u8 mod_type)4636 void t4_init_link_config(struct port_info *pi, u32 pcaps, u32 acaps,
4637 u8 mdio_addr, u8 port_type, u8 mod_type)
4638 {
4639 u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
4640 struct link_config *lc = &pi->link_cfg;
4641
4642 lc->pcaps = pcaps;
4643 lc->acaps = acaps;
4644 lc->admin_caps = acaps;
4645 lc->link_caps = 0;
4646
4647 lc->mdio_addr = mdio_addr;
4648 lc->port_type = port_type;
4649 lc->mod_type = mod_type;
4650
4651 lc->link_ok = 0;
4652 lc->link_down_rc = 0;
4653
4654 /* Turn Tx and Rx pause off by default */
4655 lc->admin_caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
4656 lc->admin_caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
4657 if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
4658 lc->admin_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
4659
4660 /* Reset FEC caps to default values */
4661 if (lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) {
4662 if (lc->acaps & FW_PORT_CAP32_FEC_RS)
4663 fec_rs = 1;
4664 else if (lc->acaps & FW_PORT_CAP32_FEC_BASER_RS)
4665 fec_baser = 1;
4666 else
4667 fec_none = 1;
4668
4669 lc->admin_caps &= ~V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
4670 t4_set_link_fec(pi, fec_rs, fec_baser, fec_none,
4671 &lc->admin_caps);
4672 }
4673
4674 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
4675 lc->admin_caps &= ~FW_PORT_CAP32_FORCE_FEC;
4676
4677 /* Reset MDI to AUTO */
4678 if (lc->pcaps & FW_PORT_CAP32_MDIAUTO) {
4679 lc->admin_caps &= ~V_FW_PORT_CAP32_MDI(M_FW_PORT_CAP32_MDI);
4680 lc->admin_caps |= V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
4681 }
4682 }
4683
4684 /**
4685 * t4_wait_dev_ready - wait till to reads of registers work
4686 *
4687 * Right after the device is RESET is can take a small amount of time
4688 * for it to respond to register reads. Until then, all reads will
4689 * return either 0xff...ff or 0xee...ee. Return an error if reads
4690 * don't work within a reasonable time frame.
4691 */
t4_wait_dev_ready(struct adapter * adapter)4692 static int t4_wait_dev_ready(struct adapter *adapter)
4693 {
4694 u32 whoami;
4695
4696 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4697
4698 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4699 return 0;
4700
4701 msleep(500);
4702 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4703 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4704 return 0;
4705
4706 dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
4707 whoami);
4708 return -EIO;
4709 }
4710
4711 struct flash_desc {
4712 u32 vendor_and_model_id;
4713 u32 size_mb;
4714 };
4715
t4_get_flash_params(struct adapter * adapter)4716 int t4_get_flash_params(struct adapter *adapter)
4717 {
4718 /*
4719 * Table for non-standard supported Flash parts. Note, all Flash
4720 * parts must have 64KB sectors.
4721 */
4722 static struct flash_desc supported_flash[] = {
4723 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
4724 };
4725
4726 int ret;
4727 u32 flashid = 0;
4728 unsigned int part, manufacturer;
4729 unsigned int density, size = 0;
4730
4731 /**
4732 * Issue a Read ID Command to the Flash part. We decode supported
4733 * Flash parts and their sizes from this. There's a newer Query
4734 * Command which can retrieve detailed geometry information but
4735 * many Flash parts don't support it.
4736 */
4737 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4738 if (!ret)
4739 ret = sf1_read(adapter, 3, 0, 1, &flashid);
4740 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4741 if (ret < 0)
4742 return ret;
4743
4744 /**
4745 * Check to see if it's one of our non-standard supported Flash parts.
4746 */
4747 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
4748 if (supported_flash[part].vendor_and_model_id == flashid) {
4749 adapter->params.sf_size =
4750 supported_flash[part].size_mb;
4751 adapter->params.sf_nsec =
4752 adapter->params.sf_size / SF_SEC_SIZE;
4753 goto found;
4754 }
4755 }
4756
4757 /**
4758 * Decode Flash part size. The code below looks repetative with
4759 * common encodings, but that's not guaranteed in the JEDEC
4760 * specification for the Read JADEC ID command. The only thing that
4761 * we're guaranteed by the JADEC specification is where the
4762 * Manufacturer ID is in the returned result. After that each
4763 * Manufacturer ~could~ encode things completely differently.
4764 * Note, all Flash parts must have 64KB sectors.
4765 */
4766 manufacturer = flashid & 0xff;
4767 switch (manufacturer) {
4768 case 0x20: { /* Micron/Numonix */
4769 /**
4770 * This Density -> Size decoding table is taken from Micron
4771 * Data Sheets.
4772 */
4773 density = (flashid >> 16) & 0xff;
4774 switch (density) {
4775 case 0x14:
4776 size = 1 << 20; /* 1MB */
4777 break;
4778 case 0x15:
4779 size = 1 << 21; /* 2MB */
4780 break;
4781 case 0x16:
4782 size = 1 << 22; /* 4MB */
4783 break;
4784 case 0x17:
4785 size = 1 << 23; /* 8MB */
4786 break;
4787 case 0x18:
4788 size = 1 << 24; /* 16MB */
4789 break;
4790 case 0x19:
4791 size = 1 << 25; /* 32MB */
4792 break;
4793 case 0x20:
4794 size = 1 << 26; /* 64MB */
4795 break;
4796 case 0x21:
4797 size = 1 << 27; /* 128MB */
4798 break;
4799 case 0x22:
4800 size = 1 << 28; /* 256MB */
4801 break;
4802 }
4803 break;
4804 }
4805
4806 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
4807 /**
4808 * This Density -> Size decoding table is taken from ISSI
4809 * Data Sheets.
4810 */
4811 density = (flashid >> 16) & 0xff;
4812 switch (density) {
4813 case 0x16:
4814 size = 1 << 25; /* 32MB */
4815 break;
4816 case 0x17:
4817 size = 1 << 26; /* 64MB */
4818 break;
4819 }
4820 break;
4821 }
4822
4823 case 0xc2: { /* Macronix */
4824 /**
4825 * This Density -> Size decoding table is taken from Macronix
4826 * Data Sheets.
4827 */
4828 density = (flashid >> 16) & 0xff;
4829 switch (density) {
4830 case 0x17:
4831 size = 1 << 23; /* 8MB */
4832 break;
4833 case 0x18:
4834 size = 1 << 24; /* 16MB */
4835 break;
4836 }
4837 break;
4838 }
4839
4840 case 0xef: { /* Winbond */
4841 /**
4842 * This Density -> Size decoding table is taken from Winbond
4843 * Data Sheets.
4844 */
4845 density = (flashid >> 16) & 0xff;
4846 switch (density) {
4847 case 0x17:
4848 size = 1 << 23; /* 8MB */
4849 break;
4850 case 0x18:
4851 size = 1 << 24; /* 16MB */
4852 break;
4853 }
4854 break;
4855 }
4856 }
4857
4858 /* If we didn't recognize the FLASH part, that's no real issue: the
4859 * Hardware/Software contract says that Hardware will _*ALWAYS*_
4860 * use a FLASH part which is at least 4MB in size and has 64KB
4861 * sectors. The unrecognized FLASH part is likely to be much larger
4862 * than 4MB, but that's all we really need.
4863 */
4864 if (size == 0) {
4865 dev_warn(adapter,
4866 "Unknown Flash Part, ID = %#x, assuming 4MB\n",
4867 flashid);
4868 size = 1 << 22;
4869 }
4870
4871 /**
4872 * Store decoded Flash size and fall through into vetting code.
4873 */
4874 adapter->params.sf_size = size;
4875 adapter->params.sf_nsec = size / SF_SEC_SIZE;
4876
4877 found:
4878 /*
4879 * We should reject adapters with FLASHes which are too small. So, emit
4880 * a warning.
4881 */
4882 if (adapter->params.sf_size < FLASH_MIN_SIZE)
4883 dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
4884 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
4885
4886 return 0;
4887 }
4888
set_pcie_completion_timeout(struct adapter * adapter,u8 range)4889 static void set_pcie_completion_timeout(struct adapter *adapter,
4890 u8 range)
4891 {
4892 u32 pcie_cap;
4893 u16 val;
4894
4895 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4896 if (pcie_cap) {
4897 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4898 val &= 0xfff0;
4899 val |= range;
4900 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4901 }
4902 }
4903
4904 /**
4905 * t4_get_chip_type - Determine chip type from device ID
4906 * @adap: the adapter
4907 * @ver: adapter version
4908 */
t4_get_chip_type(struct adapter * adap,int ver)4909 int t4_get_chip_type(struct adapter *adap, int ver)
4910 {
4911 enum chip_type chip = 0;
4912 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
4913
4914 /* Retrieve adapter's device ID */
4915 switch (ver) {
4916 case CHELSIO_T5:
4917 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4918 break;
4919 case CHELSIO_T6:
4920 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4921 break;
4922 default:
4923 dev_err(adap, "Device %d is not supported\n",
4924 adap->params.pci.device_id);
4925 return -EINVAL;
4926 }
4927
4928 return chip;
4929 }
4930
4931 /**
4932 * t4_prep_adapter - prepare SW and HW for operation
4933 * @adapter: the adapter
4934 *
4935 * Initialize adapter SW state for the various HW modules, set initial
4936 * values for some adapter tunables, take PHYs out of reset, and
4937 * initialize the MDIO interface.
4938 */
t4_prep_adapter(struct adapter * adapter)4939 int t4_prep_adapter(struct adapter *adapter)
4940 {
4941 int ret, ver;
4942 u32 pl_rev;
4943
4944 ret = t4_wait_dev_ready(adapter);
4945 if (ret < 0)
4946 return ret;
4947
4948 pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
4949 adapter->params.pci.device_id = adapter->pdev->id.device_id;
4950 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
4951
4952 /*
4953 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
4954 * ADAPTER (VERSION << 4 | REVISION)
4955 */
4956 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
4957 adapter->params.chip = 0;
4958 switch (ver) {
4959 case CHELSIO_T5:
4960 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4961 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
4962 adapter->params.arch.mps_tcam_size =
4963 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4964 adapter->params.arch.mps_rplc_size = 128;
4965 adapter->params.arch.nchan = NCHAN;
4966 adapter->params.arch.vfcount = 128;
4967 /* Congestion map is for 4 channels so that
4968 * MPS can have 4 priority per port.
4969 */
4970 adapter->params.arch.cng_ch_bits_log = 2;
4971 break;
4972 case CHELSIO_T6:
4973 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4974 adapter->params.arch.sge_fl_db = 0;
4975 adapter->params.arch.mps_tcam_size =
4976 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4977 adapter->params.arch.mps_rplc_size = 256;
4978 adapter->params.arch.nchan = 2;
4979 adapter->params.arch.vfcount = 256;
4980 /* Congestion map is for 2 channels so that
4981 * MPS can have 8 priority per port.
4982 */
4983 adapter->params.arch.cng_ch_bits_log = 3;
4984 break;
4985 default:
4986 dev_err(adapter, "%s: Device %d is not supported\n",
4987 __func__, adapter->params.pci.device_id);
4988 return -EINVAL;
4989 }
4990
4991 adapter->params.pci.vpd_cap_addr =
4992 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4993
4994 ret = t4_get_flash_params(adapter);
4995 if (ret < 0) {
4996 dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
4997 -ret);
4998 return ret;
4999 }
5000
5001 adapter->params.cim_la_size = CIMLA_SIZE;
5002
5003 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5004
5005 /*
5006 * Default port and clock for debugging in case we can't reach FW.
5007 */
5008 adapter->params.nports = 1;
5009 adapter->params.portvec = 1;
5010 adapter->params.vpd.cclk = 50000;
5011
5012 /* Set pci completion timeout value to 4 seconds. */
5013 set_pcie_completion_timeout(adapter, 0xd);
5014 return 0;
5015 }
5016
5017 /**
5018 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5019 * @adapter: the adapter
5020 * @qid: the Queue ID
5021 * @qtype: the Ingress or Egress type for @qid
5022 * @pbar2_qoffset: BAR2 Queue Offset
5023 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5024 *
5025 * Returns the BAR2 SGE Queue Registers information associated with the
5026 * indicated Absolute Queue ID. These are passed back in return value
5027 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5028 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5029 *
5030 * This may return an error which indicates that BAR2 SGE Queue
5031 * registers aren't available. If an error is not returned, then the
5032 * following values are returned:
5033 *
5034 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5035 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5036 *
5037 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5038 * require the "Inferred Queue ID" ability may be used. E.g. the
5039 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5040 * then these "Inferred Queue ID" register may not be used.
5041 */
t4_bar2_sge_qregs(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,u64 * pbar2_qoffset,unsigned int * pbar2_qid)5042 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
5043 enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
5044 unsigned int *pbar2_qid)
5045 {
5046 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
5047 u64 bar2_page_offset, bar2_qoffset;
5048 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
5049
5050 /*
5051 * T4 doesn't support BAR2 SGE Queue registers.
5052 */
5053 if (is_t4(adapter->params.chip))
5054 return -EINVAL;
5055
5056 /*
5057 * Get our SGE Page Size parameters.
5058 */
5059 page_shift = adapter->params.sge.hps + 10;
5060 page_size = 1 << page_shift;
5061
5062 /*
5063 * Get the right Queues per Page parameters for our Queue.
5064 */
5065 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
5066 adapter->params.sge.eq_qpp :
5067 adapter->params.sge.iq_qpp);
5068 qpp_mask = (1 << qpp_shift) - 1;
5069
5070 /*
5071 * Calculate the basics of the BAR2 SGE Queue register area:
5072 * o The BAR2 page the Queue registers will be in.
5073 * o The BAR2 Queue ID.
5074 * o The BAR2 Queue ID Offset into the BAR2 page.
5075 */
5076 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
5077 bar2_qid = qid & qpp_mask;
5078 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
5079
5080 /*
5081 * If the BAR2 Queue ID Offset is less than the Page Size, then the
5082 * hardware will infer the Absolute Queue ID simply from the writes to
5083 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5084 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
5085 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5086 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5087 * from the BAR2 Page and BAR2 Queue ID.
5088 *
5089 * One important censequence of this is that some BAR2 SGE registers
5090 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5091 * there. But other registers synthesize the SGE Queue ID purely
5092 * from the writes to the registers -- the Write Combined Doorbell
5093 * Buffer is a good example. These BAR2 SGE Registers are only
5094 * available for those BAR2 SGE Register areas where the SGE Absolute
5095 * Queue ID can be inferred from simple writes.
5096 */
5097 bar2_qoffset = bar2_page_offset;
5098 bar2_qinferred = (bar2_qid_offset < page_size);
5099 if (bar2_qinferred) {
5100 bar2_qoffset += bar2_qid_offset;
5101 bar2_qid = 0;
5102 }
5103
5104 *pbar2_qoffset = bar2_qoffset;
5105 *pbar2_qid = bar2_qid;
5106 return 0;
5107 }
5108
5109 /**
5110 * t4_init_sge_params - initialize adap->params.sge
5111 * @adapter: the adapter
5112 *
5113 * Initialize various fields of the adapter's SGE Parameters structure.
5114 */
t4_init_sge_params(struct adapter * adapter)5115 int t4_init_sge_params(struct adapter *adapter)
5116 {
5117 struct sge_params *sge_params = &adapter->params.sge;
5118 u32 hps, qpp;
5119 unsigned int s_hps, s_qpp;
5120
5121 /*
5122 * Extract the SGE Page Size for our PF.
5123 */
5124 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
5125 s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
5126 adapter->pf);
5127 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
5128
5129 /*
5130 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
5131 */
5132 s_qpp = (S_QUEUESPERPAGEPF0 +
5133 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
5134 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
5135 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
5136 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
5137 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
5138
5139 return 0;
5140 }
5141
5142 /**
5143 * t4_init_tp_params - initialize adap->params.tp
5144 * @adap: the adapter
5145 *
5146 * Initialize various fields of the adapter's TP Parameters structure.
5147 */
t4_init_tp_params(struct adapter * adap)5148 int t4_init_tp_params(struct adapter *adap)
5149 {
5150 int chan, ret;
5151 u32 param, v;
5152
5153 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5154 adap->params.tp.tre = G_TIMERRESOLUTION(v);
5155 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5156
5157 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5158 for (chan = 0; chan < NCHAN; chan++)
5159 adap->params.tp.tx_modq[chan] = chan;
5160
5161 /*
5162 * Cache the adapter's Compressed Filter Mode/Mask and global Ingress
5163 * Configuration.
5164 */
5165 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5166 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
5167 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
5168
5169 /* Read current value */
5170 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5171 1, ¶m, &v);
5172 if (!ret) {
5173 dev_info(adap, "Current filter mode/mask 0x%x:0x%x\n",
5174 G_FW_PARAMS_PARAM_FILTER_MODE(v),
5175 G_FW_PARAMS_PARAM_FILTER_MASK(v));
5176 adap->params.tp.vlan_pri_map =
5177 G_FW_PARAMS_PARAM_FILTER_MODE(v);
5178 adap->params.tp.filter_mask =
5179 G_FW_PARAMS_PARAM_FILTER_MASK(v);
5180 } else {
5181 dev_info(adap,
5182 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
5183
5184 /* In case of older-fw (which doesn't expose the api
5185 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
5186 * the fw api) combination, fall-back to older method of reading
5187 * the filter mode from indirect-register
5188 */
5189 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5190 &adap->params.tp.vlan_pri_map, 1,
5191 A_TP_VLAN_PRI_MAP);
5192
5193 /* With the older-fw and newer-driver combination we might run
5194 * into an issue when user wants to use hash filter region but
5195 * the filter_mask is zero, in this case filter_mask validation
5196 * is tough. To avoid that we set the filter_mask same as filter
5197 * mode, which will behave exactly as the older way of ignoring
5198 * the filter mask validation.
5199 */
5200 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
5201 }
5202
5203 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5204 &adap->params.tp.ingress_config, 1,
5205 A_TP_INGRESS_CONFIG);
5206
5207 /* For T6, cache the adapter's compressed error vector
5208 * and passing outer header info for encapsulated packets.
5209 */
5210 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
5211 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
5212 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
5213 }
5214
5215 /*
5216 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5217 * shift positions of several elements of the Compressed Filter Tuple
5218 * for this adapter which we need frequently ...
5219 */
5220 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5221 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5222 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5223 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
5224 F_PROTOCOL);
5225 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
5226 F_ETHERTYPE);
5227 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
5228 F_MACMATCH);
5229 adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
5230
5231 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
5232 adap->params.tp.hash_filter_mask = v;
5233 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
5234 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
5235
5236 return 0;
5237 }
5238
5239 /**
5240 * t4_filter_field_shift - calculate filter field shift
5241 * @adap: the adapter
5242 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5243 *
5244 * Return the shift position of a filter field within the Compressed
5245 * Filter Tuple. The filter field is specified via its selection bit
5246 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5247 */
t4_filter_field_shift(const struct adapter * adap,unsigned int filter_sel)5248 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
5249 {
5250 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5251 unsigned int sel;
5252 int field_shift;
5253
5254 if ((filter_mode & filter_sel) == 0)
5255 return -1;
5256
5257 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5258 switch (filter_mode & sel) {
5259 case F_FCOE:
5260 field_shift += W_FT_FCOE;
5261 break;
5262 case F_PORT:
5263 field_shift += W_FT_PORT;
5264 break;
5265 case F_VNIC_ID:
5266 field_shift += W_FT_VNIC_ID;
5267 break;
5268 case F_VLAN:
5269 field_shift += W_FT_VLAN;
5270 break;
5271 case F_TOS:
5272 field_shift += W_FT_TOS;
5273 break;
5274 case F_PROTOCOL:
5275 field_shift += W_FT_PROTOCOL;
5276 break;
5277 case F_ETHERTYPE:
5278 field_shift += W_FT_ETHERTYPE;
5279 break;
5280 case F_MACMATCH:
5281 field_shift += W_FT_MACMATCH;
5282 break;
5283 case F_MPSHITTYPE:
5284 field_shift += W_FT_MPSHITTYPE;
5285 break;
5286 case F_FRAGMENTATION:
5287 field_shift += W_FT_FRAGMENTATION;
5288 break;
5289 }
5290 }
5291 return field_shift;
5292 }
5293
t4_init_rss_mode(struct adapter * adap,int mbox)5294 int t4_init_rss_mode(struct adapter *adap, int mbox)
5295 {
5296 int i, ret;
5297 struct fw_rss_vi_config_cmd rvc;
5298
5299 memset(&rvc, 0, sizeof(rvc));
5300
5301 for_each_port(adap, i) {
5302 struct port_info *p = adap2pinfo(adap, i);
5303
5304 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5305 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5306 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
5307 rvc.retval_len16 = htonl(FW_LEN16(rvc));
5308 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
5309 if (ret)
5310 return ret;
5311 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
5312 }
5313 return 0;
5314 }
5315
t4_port_init(struct adapter * adap,int mbox,int pf,int vf)5316 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
5317 {
5318 u32 param, val, pcaps, acaps;
5319 enum fw_port_type port_type;
5320 struct fw_port_cmd cmd;
5321 u8 vivld = 0, vin = 0;
5322 int ret, i, j = 0;
5323 int mdio_addr;
5324 u8 addr[6];
5325
5326 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
5327 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
5328 val = 1;
5329 ret = t4_set_params(adap, mbox, pf, vf, 1, ¶m, &val);
5330 if (ret < 0)
5331 return ret;
5332
5333 memset(&cmd, 0, sizeof(cmd));
5334
5335 for_each_port(adap, i) {
5336 struct port_info *pi = adap2pinfo(adap, i);
5337 unsigned int rss_size = 0;
5338 u32 lstatus32;
5339
5340 while ((adap->params.portvec & (1 << j)) == 0)
5341 j++;
5342
5343 memset(&cmd, 0, sizeof(cmd));
5344 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
5345 F_FW_CMD_REQUEST |
5346 F_FW_CMD_READ |
5347 V_FW_PORT_CMD_PORTID(j));
5348 val = FW_PORT_ACTION_GET_PORT_INFO32;
5349 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(val) |
5350 FW_LEN16(cmd));
5351 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
5352 if (ret)
5353 return ret;
5354
5355 /* Extract the various fields from the Port Information
5356 * message.
5357 */
5358 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
5359
5360 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
5361 mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
5362 (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : -1;
5363 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
5364 acaps = be32_to_cpu(cmd.u.info32.acaps32);
5365
5366 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size,
5367 &vivld, &vin);
5368 if (ret < 0)
5369 return ret;
5370
5371 pi->viid = ret;
5372 pi->tx_chan = j;
5373 pi->rss_size = rss_size;
5374 t4_os_set_hw_addr(adap, i, addr);
5375
5376 /* If fw supports returning the VIN as part of FW_VI_CMD,
5377 * save the returned values.
5378 */
5379 if (adap->params.viid_smt_extn_support) {
5380 pi->vivld = vivld;
5381 pi->vin = vin;
5382 } else {
5383 /* Retrieve the values from VIID */
5384 pi->vivld = G_FW_VIID_VIVLD(pi->viid);
5385 pi->vin = G_FW_VIID_VIN(pi->viid);
5386 }
5387
5388 t4_init_link_config(pi, pcaps, acaps, mdio_addr, port_type,
5389 FW_PORT_MOD_TYPE_NA);
5390 j++;
5391 }
5392 return 0;
5393 }
5394
5395 /**
5396 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
5397 * @adap: the adapter
5398 * @win: PCI-E Memory Window to use
5399 * @addr: address within adapter memory
5400 * @len: amount of memory to transfer
5401 * @hbuf: host memory buffer
5402 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5403 *
5404 * Reads/writes an [almost] arbitrary memory region in the firmware: the
5405 * firmware memory address and host buffer must be aligned on 32-bit
5406 * boudaries; the length may be arbitrary.
5407 *
5408 * NOTES:
5409 * 1. The memory is transferred as a raw byte sequence from/to the
5410 * firmware's memory. If this memory contains data structures which
5411 * contain multi-byte integers, it's the caller's responsibility to
5412 * perform appropriate byte order conversions.
5413 *
5414 * 2. It is the Caller's responsibility to ensure that no other code
5415 * uses the specified PCI-E Memory Window while this routine is
5416 * using it. This is typically done via the use of OS-specific
5417 * locks, etc.
5418 */
t4_memory_rw_addr(struct adapter * adap,int win,u32 addr,u32 len,void * hbuf,int dir)5419 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
5420 u32 len, void *hbuf, int dir)
5421 {
5422 u32 pos, offset, resid;
5423 u32 win_pf, mem_reg, mem_aperture, mem_base;
5424 u32 *buf;
5425
5426 /* Argument sanity checks ...*/
5427 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
5428 return -EINVAL;
5429 buf = (u32 *)hbuf;
5430
5431 /* It's convenient to be able to handle lengths which aren't a
5432 * multiple of 32-bits because we often end up transferring files to
5433 * the firmware. So we'll handle that by normalizing the length here
5434 * and then handling any residual transfer at the end.
5435 */
5436 resid = len & 0x3;
5437 len -= resid;
5438
5439 /* Each PCI-E Memory Window is programmed with a window size -- or
5440 * "aperture" -- which controls the granularity of its mapping onto
5441 * adapter memory. We need to grab that aperture in order to know
5442 * how to use the specified window. The window is also programmed
5443 * with the base address of the Memory Window in BAR0's address
5444 * space. For T4 this is an absolute PCI-E Bus Address. For T5
5445 * the address is relative to BAR0.
5446 */
5447 mem_reg = t4_read_reg(adap,
5448 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
5449 win));
5450 mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
5451 mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
5452
5453 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
5454
5455 /* Calculate our initial PCI-E Memory Window Position and Offset into
5456 * that Window.
5457 */
5458 pos = addr & ~(mem_aperture - 1);
5459 offset = addr - pos;
5460
5461 /* Set up initial PCI-E Memory Window to cover the start of our
5462 * transfer. (Read it back to ensure that changes propagate before we
5463 * attempt to use the new value.)
5464 */
5465 t4_write_reg(adap,
5466 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
5467 pos | win_pf);
5468 t4_read_reg(adap,
5469 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
5470
5471 /* Transfer data to/from the adapter as long as there's an integral
5472 * number of 32-bit transfers to complete.
5473 *
5474 * A note on Endianness issues:
5475 *
5476 * The "register" reads and writes below from/to the PCI-E Memory
5477 * Window invoke the standard adapter Big-Endian to PCI-E Link
5478 * Little-Endian "swizzel." As a result, if we have the following
5479 * data in adapter memory:
5480 *
5481 * Memory: ... | b0 | b1 | b2 | b3 | ...
5482 * Address: i+0 i+1 i+2 i+3
5483 *
5484 * Then a read of the adapter memory via the PCI-E Memory Window
5485 * will yield:
5486 *
5487 * x = readl(i)
5488 * 31 0
5489 * [ b3 | b2 | b1 | b0 ]
5490 *
5491 * If this value is stored into local memory on a Little-Endian system
5492 * it will show up correctly in local memory as:
5493 *
5494 * ( ..., b0, b1, b2, b3, ... )
5495 *
5496 * But on a Big-Endian system, the store will show up in memory
5497 * incorrectly swizzled as:
5498 *
5499 * ( ..., b3, b2, b1, b0, ... )
5500 *
5501 * So we need to account for this in the reads and writes to the
5502 * PCI-E Memory Window below by undoing the register read/write
5503 * swizzels.
5504 */
5505 while (len > 0) {
5506 if (dir == T4_MEMORY_READ)
5507 *buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
5508 mem_base +
5509 offset));
5510 else
5511 t4_write_reg(adap, mem_base + offset,
5512 (u32)cpu_to_le32(*buf++));
5513 offset += sizeof(__be32);
5514 len -= sizeof(__be32);
5515
5516 /* If we've reached the end of our current window aperture,
5517 * move the PCI-E Memory Window on to the next. Note that
5518 * doing this here after "len" may be 0 allows us to set up
5519 * the PCI-E Memory Window for a possible final residual
5520 * transfer below ...
5521 */
5522 if (offset == mem_aperture) {
5523 pos += mem_aperture;
5524 offset = 0;
5525 t4_write_reg(adap,
5526 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
5527 win), pos | win_pf);
5528 t4_read_reg(adap,
5529 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
5530 win));
5531 }
5532 }
5533
5534 /* If the original transfer had a length which wasn't a multiple of
5535 * 32-bits, now's where we need to finish off the transfer of the
5536 * residual amount. The PCI-E Memory Window has already been moved
5537 * above (if necessary) to cover this final transfer.
5538 */
5539 if (resid) {
5540 union {
5541 u32 word;
5542 char byte[4];
5543 } last;
5544 unsigned char *bp;
5545 int i;
5546
5547 if (dir == T4_MEMORY_READ) {
5548 last.word = le32_to_cpu((__le32)t4_read_reg(adap,
5549 mem_base +
5550 offset));
5551 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
5552 bp[i] = last.byte[i];
5553 } else {
5554 last.word = *buf;
5555 for (i = resid; i < 4; i++)
5556 last.byte[i] = 0;
5557 t4_write_reg(adap, mem_base + offset,
5558 (u32)cpu_to_le32(last.word));
5559 }
5560 }
5561
5562 return 0;
5563 }
5564
5565 /**
5566 * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
5567 * @adap: the adapter
5568 * @win: PCI-E Memory Window to use
5569 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
5570 * @maddr: address within indicated memory type
5571 * @len: amount of memory to transfer
5572 * @hbuf: host memory buffer
5573 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5574 *
5575 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
5576 * provides an (memory type, address within memory type) interface.
5577 */
t4_memory_rw_mtype(struct adapter * adap,int win,int mtype,u32 maddr,u32 len,void * hbuf,int dir)5578 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
5579 u32 len, void *hbuf, int dir)
5580 {
5581 u32 mtype_offset;
5582 u32 edc_size, mc_size;
5583
5584 /* Offset into the region of memory which is being accessed
5585 * MEM_EDC0 = 0
5586 * MEM_EDC1 = 1
5587 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
5588 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
5589 */
5590 edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
5591 if (mtype != MEM_MC1) {
5592 mtype_offset = (mtype * (edc_size * 1024 * 1024));
5593 } else {
5594 mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
5595 A_MA_EXT_MEMORY0_BAR));
5596 mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
5597 }
5598
5599 return t4_memory_rw_addr(adap, win,
5600 mtype_offset + maddr, len,
5601 hbuf, dir);
5602 }
5603