1 /***********************license start***************
2  * Copyright (c) 2003-2012  Cavium Inc. ([email protected]). All rights
3  * reserved.
4  *
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  *   * Redistributions of source code must retain the above copyright
11  *     notice, this list of conditions and the following disclaimer.
12  *
13  *   * Redistributions in binary form must reproduce the above
14  *     copyright notice, this list of conditions and the following
15  *     disclaimer in the documentation and/or other materials provided
16  *     with the distribution.
17 
18  *   * Neither the name of Cavium Inc. nor the names of
19  *     its contributors may be used to endorse or promote products
20  *     derived from this software without specific prior written
21  *     permission.
22 
23  * This Software, including technical data, may be subject to U.S. export  control
24  * laws, including the U.S. Export Administration Act and its  associated
25  * regulations, and may be subject to export or import  regulations in other
26  * countries.
27 
28  * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29  * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30  * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31  * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32  * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33  * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34  * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35  * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36  * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37  * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38  ***********************license end**************************************/
39 
40 
41 /**
42  * cvmx-gmxx-defs.h
43  *
44  * Configuration and status register (CSR) type definitions for
45  * Octeon gmxx.
46  *
47  * This file is auto generated. Do not edit.
48  *
49  * <hr>$Revision$<hr>
50  *
51  */
52 #ifndef __CVMX_GMXX_DEFS_H__
53 #define __CVMX_GMXX_DEFS_H__
54 
CVMX_GMXX_BAD_REG(unsigned long block_id)55 static inline uint64_t CVMX_GMXX_BAD_REG(unsigned long block_id)
56 {
57 	switch(cvmx_get_octeon_family()) {
58 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
59 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
60 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
61 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
62 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
63 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
64 			if ((block_id == 0))
65 				return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 0) * 0x8000000ull;
66 			break;
67 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
68 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
69 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
70 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
71 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
72 			if ((block_id <= 1))
73 				return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 1) * 0x8000000ull;
74 			break;
75 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
76 			if ((block_id <= 4))
77 				return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 7) * 0x1000000ull;
78 			break;
79 	}
80 	cvmx_warn("CVMX_GMXX_BAD_REG (block_id = %lu) not supported on this chip\n", block_id);
81 	return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 0) * 0x8000000ull;
82 }
CVMX_GMXX_BIST(unsigned long block_id)83 static inline uint64_t CVMX_GMXX_BIST(unsigned long block_id)
84 {
85 	switch(cvmx_get_octeon_family()) {
86 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
87 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
88 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
89 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
90 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
91 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
92 			if ((block_id == 0))
93 				return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 0) * 0x8000000ull;
94 			break;
95 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
96 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
97 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
98 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
99 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
100 			if ((block_id <= 1))
101 				return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 1) * 0x8000000ull;
102 			break;
103 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
104 			if ((block_id <= 4))
105 				return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 7) * 0x1000000ull;
106 			break;
107 	}
108 	cvmx_warn("CVMX_GMXX_BIST (block_id = %lu) not supported on this chip\n", block_id);
109 	return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 0) * 0x8000000ull;
110 }
111 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_BPID_MAPX(unsigned long offset,unsigned long block_id)112 static inline uint64_t CVMX_GMXX_BPID_MAPX(unsigned long offset, unsigned long block_id)
113 {
114 	if (!(
115 	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 15)) && ((block_id <= 4))))))
116 		cvmx_warn("CVMX_GMXX_BPID_MAPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
117 	return CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 15) + ((block_id) & 7) * 0x200000ull) * 8;
118 }
119 #else
120 #define CVMX_GMXX_BPID_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 15) + ((block_id) & 7) * 0x200000ull) * 8)
121 #endif
122 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_BPID_MSK(unsigned long block_id)123 static inline uint64_t CVMX_GMXX_BPID_MSK(unsigned long block_id)
124 {
125 	if (!(
126 	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
127 		cvmx_warn("CVMX_GMXX_BPID_MSK(%lu) is invalid on this chip\n", block_id);
128 	return CVMX_ADD_IO_SEG(0x0001180008000700ull) + ((block_id) & 7) * 0x1000000ull;
129 }
130 #else
131 #define CVMX_GMXX_BPID_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000700ull) + ((block_id) & 7) * 0x1000000ull)
132 #endif
CVMX_GMXX_CLK_EN(unsigned long block_id)133 static inline uint64_t CVMX_GMXX_CLK_EN(unsigned long block_id)
134 {
135 	switch(cvmx_get_octeon_family()) {
136 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
137 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
138 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
139 			if ((block_id == 0))
140 				return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 0) * 0x8000000ull;
141 			break;
142 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
143 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
144 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
145 			if ((block_id <= 1))
146 				return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 1) * 0x8000000ull;
147 			break;
148 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
149 			if ((block_id <= 4))
150 				return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 7) * 0x1000000ull;
151 			break;
152 	}
153 	cvmx_warn("CVMX_GMXX_CLK_EN (block_id = %lu) not supported on this chip\n", block_id);
154 	return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 0) * 0x8000000ull;
155 }
156 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_EBP_DIS(unsigned long block_id)157 static inline uint64_t CVMX_GMXX_EBP_DIS(unsigned long block_id)
158 {
159 	if (!(
160 	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
161 		cvmx_warn("CVMX_GMXX_EBP_DIS(%lu) is invalid on this chip\n", block_id);
162 	return CVMX_ADD_IO_SEG(0x0001180008000608ull) + ((block_id) & 7) * 0x1000000ull;
163 }
164 #else
165 #define CVMX_GMXX_EBP_DIS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000608ull) + ((block_id) & 7) * 0x1000000ull)
166 #endif
167 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_EBP_MSK(unsigned long block_id)168 static inline uint64_t CVMX_GMXX_EBP_MSK(unsigned long block_id)
169 {
170 	if (!(
171 	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
172 		cvmx_warn("CVMX_GMXX_EBP_MSK(%lu) is invalid on this chip\n", block_id);
173 	return CVMX_ADD_IO_SEG(0x0001180008000600ull) + ((block_id) & 7) * 0x1000000ull;
174 }
175 #else
176 #define CVMX_GMXX_EBP_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + ((block_id) & 7) * 0x1000000ull)
177 #endif
CVMX_GMXX_HG2_CONTROL(unsigned long block_id)178 static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id)
179 {
180 	switch(cvmx_get_octeon_family()) {
181 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
182 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
183 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
184 			if ((block_id == 0))
185 				return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 0) * 0x8000000ull;
186 			break;
187 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
188 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
189 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
190 			if ((block_id <= 1))
191 				return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 1) * 0x8000000ull;
192 			break;
193 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
194 			if ((block_id <= 4))
195 				return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 7) * 0x1000000ull;
196 			break;
197 	}
198 	cvmx_warn("CVMX_GMXX_HG2_CONTROL (block_id = %lu) not supported on this chip\n", block_id);
199 	return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 0) * 0x8000000ull;
200 }
CVMX_GMXX_INF_MODE(unsigned long block_id)201 static inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id)
202 {
203 	switch(cvmx_get_octeon_family()) {
204 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
205 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
206 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
207 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
208 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
209 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
210 			if ((block_id == 0))
211 				return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 0) * 0x8000000ull;
212 			break;
213 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
214 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
215 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
216 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
217 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
218 			if ((block_id <= 1))
219 				return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 1) * 0x8000000ull;
220 			break;
221 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
222 			if ((block_id <= 4))
223 				return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 7) * 0x1000000ull;
224 			break;
225 	}
226 	cvmx_warn("CVMX_GMXX_INF_MODE (block_id = %lu) not supported on this chip\n", block_id);
227 	return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 0) * 0x8000000ull;
228 }
CVMX_GMXX_NXA_ADR(unsigned long block_id)229 static inline uint64_t CVMX_GMXX_NXA_ADR(unsigned long block_id)
230 {
231 	switch(cvmx_get_octeon_family()) {
232 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
233 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
234 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
235 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
236 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
237 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
238 			if ((block_id == 0))
239 				return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 0) * 0x8000000ull;
240 			break;
241 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
242 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
243 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
244 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
245 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
246 			if ((block_id <= 1))
247 				return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 1) * 0x8000000ull;
248 			break;
249 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
250 			if ((block_id <= 4))
251 				return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 7) * 0x1000000ull;
252 			break;
253 	}
254 	cvmx_warn("CVMX_GMXX_NXA_ADR (block_id = %lu) not supported on this chip\n", block_id);
255 	return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 0) * 0x8000000ull;
256 }
257 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_PIPE_STATUS(unsigned long block_id)258 static inline uint64_t CVMX_GMXX_PIPE_STATUS(unsigned long block_id)
259 {
260 	if (!(
261 	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
262 		cvmx_warn("CVMX_GMXX_PIPE_STATUS(%lu) is invalid on this chip\n", block_id);
263 	return CVMX_ADD_IO_SEG(0x0001180008000760ull) + ((block_id) & 7) * 0x1000000ull;
264 }
265 #else
266 #define CVMX_GMXX_PIPE_STATUS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000760ull) + ((block_id) & 7) * 0x1000000ull)
267 #endif
CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset,unsigned long block_id)268 static inline uint64_t CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset, unsigned long block_id)
269 {
270 	switch(cvmx_get_octeon_family()) {
271 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
272 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
273 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
274 			if (((offset == 0)) && ((block_id == 0)))
275 				return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 0) * 0x8000000ull;
276 			break;
277 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
278 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
279 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
280 			if (((offset == 0)) && ((block_id <= 1)))
281 				return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 1) * 0x8000000ull;
282 			break;
283 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
284 			if (((offset == 0)) && ((block_id <= 4)))
285 				return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 7) * 0x1000000ull;
286 			break;
287 	}
288 	cvmx_warn("CVMX_GMXX_PRTX_CBFC_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
289 	return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 0) * 0x8000000ull;
290 }
CVMX_GMXX_PRTX_CFG(unsigned long offset,unsigned long block_id)291 static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id)
292 {
293 	switch(cvmx_get_octeon_family()) {
294 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
295 			if (((offset <= 1)) && ((block_id == 0)))
296 				return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
297 			break;
298 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
299 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
300 			if (((offset <= 2)) && ((block_id == 0)))
301 				return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
302 			break;
303 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
304 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
305 			if (((offset <= 3)) && ((block_id == 0)))
306 				return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
307 			break;
308 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
309 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
310 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
311 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
312 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
313 			if (((offset <= 3)) && ((block_id <= 1)))
314 				return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
315 			break;
316 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
317 			if (((offset <= 2)) && ((block_id == 0)))
318 				return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
319 			break;
320 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
321 			if (((offset <= 3)) && ((block_id <= 4)))
322 				return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
323 			break;
324 	}
325 	cvmx_warn("CVMX_GMXX_PRTX_CFG (%lu, %lu) not supported on this chip\n", offset, block_id);
326 	return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
327 }
328 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_RXAUI_CTL(unsigned long block_id)329 static inline uint64_t CVMX_GMXX_RXAUI_CTL(unsigned long block_id)
330 {
331 	if (!(
332 	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 4)))))
333 		cvmx_warn("CVMX_GMXX_RXAUI_CTL(%lu) is invalid on this chip\n", block_id);
334 	return CVMX_ADD_IO_SEG(0x0001180008000740ull) + ((block_id) & 7) * 0x1000000ull;
335 }
336 #else
337 #define CVMX_GMXX_RXAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000740ull) + ((block_id) & 7) * 0x1000000ull)
338 #endif
CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset,unsigned long block_id)339 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id)
340 {
341 	switch(cvmx_get_octeon_family()) {
342 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
343 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
344 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
345 			if (((offset <= 3)) && ((block_id == 0)))
346 				return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
347 			break;
348 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
349 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
350 			if (((offset <= 2)) && ((block_id == 0)))
351 				return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
352 			break;
353 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
354 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
355 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
356 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
357 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
358 			if (((offset <= 3)) && ((block_id <= 1)))
359 				return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
360 			break;
361 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
362 			if (((offset <= 2)) && ((block_id == 0)))
363 				return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
364 			break;
365 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
366 			if (((offset <= 3)) && ((block_id <= 4)))
367 				return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
368 			break;
369 	}
370 	cvmx_warn("CVMX_GMXX_RXX_ADR_CAM0 (%lu, %lu) not supported on this chip\n", offset, block_id);
371 	return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
372 }
CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset,unsigned long block_id)373 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id)
374 {
375 	switch(cvmx_get_octeon_family()) {
376 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
377 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
378 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
379 			if (((offset <= 3)) && ((block_id == 0)))
380 				return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
381 			break;
382 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
383 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
384 			if (((offset <= 2)) && ((block_id == 0)))
385 				return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
386 			break;
387 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
388 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
389 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
390 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
391 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
392 			if (((offset <= 3)) && ((block_id <= 1)))
393 				return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
394 			break;
395 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
396 			if (((offset <= 2)) && ((block_id == 0)))
397 				return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
398 			break;
399 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
400 			if (((offset <= 3)) && ((block_id <= 4)))
401 				return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
402 			break;
403 	}
404 	cvmx_warn("CVMX_GMXX_RXX_ADR_CAM1 (%lu, %lu) not supported on this chip\n", offset, block_id);
405 	return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
406 }
CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset,unsigned long block_id)407 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id)
408 {
409 	switch(cvmx_get_octeon_family()) {
410 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
411 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
412 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
413 			if (((offset <= 3)) && ((block_id == 0)))
414 				return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
415 			break;
416 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
417 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
418 			if (((offset <= 2)) && ((block_id == 0)))
419 				return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
420 			break;
421 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
422 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
423 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
424 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
425 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
426 			if (((offset <= 3)) && ((block_id <= 1)))
427 				return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
428 			break;
429 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
430 			if (((offset <= 2)) && ((block_id == 0)))
431 				return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
432 			break;
433 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
434 			if (((offset <= 3)) && ((block_id <= 4)))
435 				return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
436 			break;
437 	}
438 	cvmx_warn("CVMX_GMXX_RXX_ADR_CAM2 (%lu, %lu) not supported on this chip\n", offset, block_id);
439 	return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
440 }
CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset,unsigned long block_id)441 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id)
442 {
443 	switch(cvmx_get_octeon_family()) {
444 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
445 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
446 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
447 			if (((offset <= 3)) && ((block_id == 0)))
448 				return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
449 			break;
450 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
451 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
452 			if (((offset <= 2)) && ((block_id == 0)))
453 				return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
454 			break;
455 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
456 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
457 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
458 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
459 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
460 			if (((offset <= 3)) && ((block_id <= 1)))
461 				return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
462 			break;
463 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
464 			if (((offset <= 2)) && ((block_id == 0)))
465 				return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
466 			break;
467 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
468 			if (((offset <= 3)) && ((block_id <= 4)))
469 				return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
470 			break;
471 	}
472 	cvmx_warn("CVMX_GMXX_RXX_ADR_CAM3 (%lu, %lu) not supported on this chip\n", offset, block_id);
473 	return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
474 }
CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset,unsigned long block_id)475 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id)
476 {
477 	switch(cvmx_get_octeon_family()) {
478 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
479 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
480 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
481 			if (((offset <= 3)) && ((block_id == 0)))
482 				return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
483 			break;
484 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
485 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
486 			if (((offset <= 2)) && ((block_id == 0)))
487 				return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
488 			break;
489 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
490 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
491 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
492 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
493 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
494 			if (((offset <= 3)) && ((block_id <= 1)))
495 				return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
496 			break;
497 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
498 			if (((offset <= 2)) && ((block_id == 0)))
499 				return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
500 			break;
501 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
502 			if (((offset <= 3)) && ((block_id <= 4)))
503 				return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
504 			break;
505 	}
506 	cvmx_warn("CVMX_GMXX_RXX_ADR_CAM4 (%lu, %lu) not supported on this chip\n", offset, block_id);
507 	return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
508 }
CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset,unsigned long block_id)509 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id)
510 {
511 	switch(cvmx_get_octeon_family()) {
512 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
513 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
514 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
515 			if (((offset <= 3)) && ((block_id == 0)))
516 				return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
517 			break;
518 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
519 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
520 			if (((offset <= 2)) && ((block_id == 0)))
521 				return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
522 			break;
523 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
524 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
525 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
526 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
527 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
528 			if (((offset <= 3)) && ((block_id <= 1)))
529 				return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
530 			break;
531 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
532 			if (((offset <= 2)) && ((block_id == 0)))
533 				return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
534 			break;
535 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
536 			if (((offset <= 3)) && ((block_id <= 4)))
537 				return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
538 			break;
539 	}
540 	cvmx_warn("CVMX_GMXX_RXX_ADR_CAM5 (%lu, %lu) not supported on this chip\n", offset, block_id);
541 	return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
542 }
CVMX_GMXX_RXX_ADR_CAM_ALL_EN(unsigned long offset,unsigned long block_id)543 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_ALL_EN(unsigned long offset, unsigned long block_id)
544 {
545 	switch(cvmx_get_octeon_family()) {
546 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
547 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
548 			if (((offset <= 3)) && ((block_id <= 1)))
549 				return CVMX_ADD_IO_SEG(0x0001180008000110ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
550 			break;
551 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
552 			if (((offset <= 1)) && ((block_id == 0)))
553 				return CVMX_ADD_IO_SEG(0x0001180008000110ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
554 			break;
555 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
556 			if (((offset <= 3)) && ((block_id <= 4)))
557 				return CVMX_ADD_IO_SEG(0x0001180008000110ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
558 			break;
559 	}
560 	cvmx_warn("CVMX_GMXX_RXX_ADR_CAM_ALL_EN (%lu, %lu) not supported on this chip\n", offset, block_id);
561 	return CVMX_ADD_IO_SEG(0x0001180008000110ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
562 }
CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset,unsigned long block_id)563 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id)
564 {
565 	switch(cvmx_get_octeon_family()) {
566 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
567 			if (((offset <= 1)) && ((block_id == 0)))
568 				return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
569 			break;
570 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
571 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
572 			if (((offset <= 2)) && ((block_id == 0)))
573 				return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
574 			break;
575 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
576 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
577 			if (((offset <= 3)) && ((block_id == 0)))
578 				return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
579 			break;
580 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
581 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
582 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
583 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
584 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
585 			if (((offset <= 3)) && ((block_id <= 1)))
586 				return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
587 			break;
588 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
589 			if (((offset <= 2)) && ((block_id == 0)))
590 				return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
591 			break;
592 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
593 			if (((offset <= 3)) && ((block_id <= 4)))
594 				return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
595 			break;
596 	}
597 	cvmx_warn("CVMX_GMXX_RXX_ADR_CAM_EN (%lu, %lu) not supported on this chip\n", offset, block_id);
598 	return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
599 }
CVMX_GMXX_RXX_ADR_CTL(unsigned long offset,unsigned long block_id)600 static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id)
601 {
602 	switch(cvmx_get_octeon_family()) {
603 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
604 			if (((offset <= 1)) && ((block_id == 0)))
605 				return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
606 			break;
607 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
608 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
609 			if (((offset <= 2)) && ((block_id == 0)))
610 				return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
611 			break;
612 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
613 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
614 			if (((offset <= 3)) && ((block_id == 0)))
615 				return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
616 			break;
617 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
618 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
619 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
620 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
621 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
622 			if (((offset <= 3)) && ((block_id <= 1)))
623 				return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
624 			break;
625 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
626 			if (((offset <= 2)) && ((block_id == 0)))
627 				return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
628 			break;
629 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
630 			if (((offset <= 3)) && ((block_id <= 4)))
631 				return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
632 			break;
633 	}
634 	cvmx_warn("CVMX_GMXX_RXX_ADR_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
635 	return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
636 }
CVMX_GMXX_RXX_DECISION(unsigned long offset,unsigned long block_id)637 static inline uint64_t CVMX_GMXX_RXX_DECISION(unsigned long offset, unsigned long block_id)
638 {
639 	switch(cvmx_get_octeon_family()) {
640 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
641 			if (((offset <= 1)) && ((block_id == 0)))
642 				return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
643 			break;
644 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
645 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
646 			if (((offset <= 2)) && ((block_id == 0)))
647 				return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
648 			break;
649 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
650 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
651 			if (((offset <= 3)) && ((block_id == 0)))
652 				return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
653 			break;
654 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
655 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
656 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
657 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
658 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
659 			if (((offset <= 3)) && ((block_id <= 1)))
660 				return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
661 			break;
662 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
663 			if (((offset <= 2)) && ((block_id == 0)))
664 				return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
665 			break;
666 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
667 			if (((offset <= 3)) && ((block_id <= 4)))
668 				return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
669 			break;
670 	}
671 	cvmx_warn("CVMX_GMXX_RXX_DECISION (%lu, %lu) not supported on this chip\n", offset, block_id);
672 	return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
673 }
CVMX_GMXX_RXX_FRM_CHK(unsigned long offset,unsigned long block_id)674 static inline uint64_t CVMX_GMXX_RXX_FRM_CHK(unsigned long offset, unsigned long block_id)
675 {
676 	switch(cvmx_get_octeon_family()) {
677 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
678 			if (((offset <= 1)) && ((block_id == 0)))
679 				return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
680 			break;
681 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
682 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
683 			if (((offset <= 2)) && ((block_id == 0)))
684 				return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
685 			break;
686 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
687 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
688 			if (((offset <= 3)) && ((block_id == 0)))
689 				return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
690 			break;
691 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
692 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
693 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
694 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
695 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
696 			if (((offset <= 3)) && ((block_id <= 1)))
697 				return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
698 			break;
699 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
700 			if (((offset <= 2)) && ((block_id == 0)))
701 				return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
702 			break;
703 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
704 			if (((offset <= 3)) && ((block_id <= 4)))
705 				return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
706 			break;
707 	}
708 	cvmx_warn("CVMX_GMXX_RXX_FRM_CHK (%lu, %lu) not supported on this chip\n", offset, block_id);
709 	return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
710 }
CVMX_GMXX_RXX_FRM_CTL(unsigned long offset,unsigned long block_id)711 static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id)
712 {
713 	switch(cvmx_get_octeon_family()) {
714 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
715 			if (((offset <= 1)) && ((block_id == 0)))
716 				return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
717 			break;
718 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
719 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
720 			if (((offset <= 2)) && ((block_id == 0)))
721 				return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
722 			break;
723 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
724 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
725 			if (((offset <= 3)) && ((block_id == 0)))
726 				return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
727 			break;
728 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
729 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
730 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
731 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
732 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
733 			if (((offset <= 3)) && ((block_id <= 1)))
734 				return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
735 			break;
736 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
737 			if (((offset <= 2)) && ((block_id == 0)))
738 				return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
739 			break;
740 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
741 			if (((offset <= 3)) && ((block_id <= 4)))
742 				return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
743 			break;
744 	}
745 	cvmx_warn("CVMX_GMXX_RXX_FRM_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
746 	return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
747 }
748 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_RXX_FRM_MAX(unsigned long offset,unsigned long block_id)749 static inline uint64_t CVMX_GMXX_RXX_FRM_MAX(unsigned long offset, unsigned long block_id)
750 {
751 	if (!(
752 	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
753 	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
754 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
755 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
756 		cvmx_warn("CVMX_GMXX_RXX_FRM_MAX(%lu,%lu) is invalid on this chip\n", offset, block_id);
757 	return CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
758 }
759 #else
760 #define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
761 #endif
762 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_RXX_FRM_MIN(unsigned long offset,unsigned long block_id)763 static inline uint64_t CVMX_GMXX_RXX_FRM_MIN(unsigned long offset, unsigned long block_id)
764 {
765 	if (!(
766 	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
767 	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
768 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
769 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
770 		cvmx_warn("CVMX_GMXX_RXX_FRM_MIN(%lu,%lu) is invalid on this chip\n", offset, block_id);
771 	return CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
772 }
773 #else
774 #define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
775 #endif
CVMX_GMXX_RXX_IFG(unsigned long offset,unsigned long block_id)776 static inline uint64_t CVMX_GMXX_RXX_IFG(unsigned long offset, unsigned long block_id)
777 {
778 	switch(cvmx_get_octeon_family()) {
779 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
780 			if (((offset <= 1)) && ((block_id == 0)))
781 				return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
782 			break;
783 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
784 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
785 			if (((offset <= 2)) && ((block_id == 0)))
786 				return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
787 			break;
788 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
789 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
790 			if (((offset <= 3)) && ((block_id == 0)))
791 				return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
792 			break;
793 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
794 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
795 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
796 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
797 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
798 			if (((offset <= 3)) && ((block_id <= 1)))
799 				return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
800 			break;
801 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
802 			if (((offset <= 2)) && ((block_id == 0)))
803 				return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
804 			break;
805 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
806 			if (((offset <= 3)) && ((block_id <= 4)))
807 				return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
808 			break;
809 	}
810 	cvmx_warn("CVMX_GMXX_RXX_IFG (%lu, %lu) not supported on this chip\n", offset, block_id);
811 	return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
812 }
CVMX_GMXX_RXX_INT_EN(unsigned long offset,unsigned long block_id)813 static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id)
814 {
815 	switch(cvmx_get_octeon_family()) {
816 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
817 			if (((offset <= 1)) && ((block_id == 0)))
818 				return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
819 			break;
820 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
821 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
822 			if (((offset <= 2)) && ((block_id == 0)))
823 				return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
824 			break;
825 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
826 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
827 			if (((offset <= 3)) && ((block_id == 0)))
828 				return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
829 			break;
830 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
831 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
832 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
833 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
834 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
835 			if (((offset <= 3)) && ((block_id <= 1)))
836 				return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
837 			break;
838 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
839 			if (((offset <= 2)) && ((block_id == 0)))
840 				return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
841 			break;
842 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
843 			if (((offset <= 3)) && ((block_id <= 4)))
844 				return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
845 			break;
846 	}
847 	cvmx_warn("CVMX_GMXX_RXX_INT_EN (%lu, %lu) not supported on this chip\n", offset, block_id);
848 	return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
849 }
CVMX_GMXX_RXX_INT_REG(unsigned long offset,unsigned long block_id)850 static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id)
851 {
852 	switch(cvmx_get_octeon_family()) {
853 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
854 			if (((offset <= 1)) && ((block_id == 0)))
855 				return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
856 			break;
857 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
858 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
859 			if (((offset <= 2)) && ((block_id == 0)))
860 				return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
861 			break;
862 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
863 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
864 			if (((offset <= 3)) && ((block_id == 0)))
865 				return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
866 			break;
867 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
868 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
869 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
870 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
871 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
872 			if (((offset <= 3)) && ((block_id <= 1)))
873 				return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
874 			break;
875 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
876 			if (((offset <= 2)) && ((block_id == 0)))
877 				return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
878 			break;
879 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
880 			if (((offset <= 3)) && ((block_id <= 4)))
881 				return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
882 			break;
883 	}
884 	cvmx_warn("CVMX_GMXX_RXX_INT_REG (%lu, %lu) not supported on this chip\n", offset, block_id);
885 	return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
886 }
CVMX_GMXX_RXX_JABBER(unsigned long offset,unsigned long block_id)887 static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id)
888 {
889 	switch(cvmx_get_octeon_family()) {
890 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
891 			if (((offset <= 1)) && ((block_id == 0)))
892 				return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
893 			break;
894 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
895 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
896 			if (((offset <= 2)) && ((block_id == 0)))
897 				return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
898 			break;
899 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
900 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
901 			if (((offset <= 3)) && ((block_id == 0)))
902 				return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
903 			break;
904 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
905 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
906 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
907 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
908 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
909 			if (((offset <= 3)) && ((block_id <= 1)))
910 				return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
911 			break;
912 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
913 			if (((offset <= 2)) && ((block_id == 0)))
914 				return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
915 			break;
916 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
917 			if (((offset <= 3)) && ((block_id <= 4)))
918 				return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
919 			break;
920 	}
921 	cvmx_warn("CVMX_GMXX_RXX_JABBER (%lu, %lu) not supported on this chip\n", offset, block_id);
922 	return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
923 }
CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset,unsigned long block_id)924 static inline uint64_t CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset, unsigned long block_id)
925 {
926 	switch(cvmx_get_octeon_family()) {
927 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
928 			if (((offset <= 1)) && ((block_id == 0)))
929 				return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
930 			break;
931 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
932 			if (((offset <= 3)) && ((block_id == 0)))
933 				return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
934 			break;
935 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
936 			if (((offset <= 2)) && ((block_id == 0)))
937 				return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
938 			break;
939 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
940 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
941 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
942 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
943 			if (((offset <= 3)) && ((block_id <= 1)))
944 				return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
945 			break;
946 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
947 			if (((offset <= 3)) && ((block_id == 0)))
948 				return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
949 			break;
950 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
951 			if (((offset <= 3)) && ((block_id <= 4)))
952 				return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
953 			break;
954 	}
955 	cvmx_warn("CVMX_GMXX_RXX_PAUSE_DROP_TIME (%lu, %lu) not supported on this chip\n", offset, block_id);
956 	return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
957 }
958 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_RXX_RX_INBND(unsigned long offset,unsigned long block_id)959 static inline uint64_t CVMX_GMXX_RXX_RX_INBND(unsigned long offset, unsigned long block_id)
960 {
961 	if (!(
962 	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
963 	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
964 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
965 	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
966 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
967 		cvmx_warn("CVMX_GMXX_RXX_RX_INBND(%lu,%lu) is invalid on this chip\n", offset, block_id);
968 	return CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
969 }
970 #else
971 #define CVMX_GMXX_RXX_RX_INBND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
972 #endif
CVMX_GMXX_RXX_STATS_CTL(unsigned long offset,unsigned long block_id)973 static inline uint64_t CVMX_GMXX_RXX_STATS_CTL(unsigned long offset, unsigned long block_id)
974 {
975 	switch(cvmx_get_octeon_family()) {
976 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
977 			if (((offset <= 1)) && ((block_id == 0)))
978 				return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
979 			break;
980 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
981 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
982 			if (((offset <= 2)) && ((block_id == 0)))
983 				return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
984 			break;
985 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
986 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
987 			if (((offset <= 3)) && ((block_id == 0)))
988 				return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
989 			break;
990 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
991 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
992 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
993 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
994 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
995 			if (((offset <= 3)) && ((block_id <= 1)))
996 				return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
997 			break;
998 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
999 			if (((offset <= 2)) && ((block_id == 0)))
1000 				return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1001 			break;
1002 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1003 			if (((offset <= 3)) && ((block_id <= 4)))
1004 				return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1005 			break;
1006 	}
1007 	cvmx_warn("CVMX_GMXX_RXX_STATS_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
1008 	return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1009 }
CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset,unsigned long block_id)1010 static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset, unsigned long block_id)
1011 {
1012 	switch(cvmx_get_octeon_family()) {
1013 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1014 			if (((offset <= 1)) && ((block_id == 0)))
1015 				return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1016 			break;
1017 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1018 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1019 			if (((offset <= 2)) && ((block_id == 0)))
1020 				return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1021 			break;
1022 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1023 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1024 			if (((offset <= 3)) && ((block_id == 0)))
1025 				return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1026 			break;
1027 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1028 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1029 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1030 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1031 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1032 			if (((offset <= 3)) && ((block_id <= 1)))
1033 				return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1034 			break;
1035 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1036 			if (((offset <= 2)) && ((block_id == 0)))
1037 				return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1038 			break;
1039 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1040 			if (((offset <= 3)) && ((block_id <= 4)))
1041 				return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1042 			break;
1043 	}
1044 	cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS (%lu, %lu) not supported on this chip\n", offset, block_id);
1045 	return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1046 }
CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset,unsigned long block_id)1047 static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset, unsigned long block_id)
1048 {
1049 	switch(cvmx_get_octeon_family()) {
1050 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1051 			if (((offset <= 1)) && ((block_id == 0)))
1052 				return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1053 			break;
1054 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1055 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1056 			if (((offset <= 2)) && ((block_id == 0)))
1057 				return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1058 			break;
1059 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1060 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1061 			if (((offset <= 3)) && ((block_id == 0)))
1062 				return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1063 			break;
1064 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1065 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1066 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1067 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1068 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1069 			if (((offset <= 3)) && ((block_id <= 1)))
1070 				return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1071 			break;
1072 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1073 			if (((offset <= 2)) && ((block_id == 0)))
1074 				return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1075 			break;
1076 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1077 			if (((offset <= 3)) && ((block_id <= 4)))
1078 				return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1079 			break;
1080 	}
1081 	cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
1082 	return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1083 }
CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset,unsigned long block_id)1084 static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset, unsigned long block_id)
1085 {
1086 	switch(cvmx_get_octeon_family()) {
1087 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1088 			if (((offset <= 1)) && ((block_id == 0)))
1089 				return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1090 			break;
1091 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1092 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1093 			if (((offset <= 2)) && ((block_id == 0)))
1094 				return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1095 			break;
1096 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1097 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1098 			if (((offset <= 3)) && ((block_id == 0)))
1099 				return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1100 			break;
1101 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1102 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1103 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1104 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1105 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1106 			if (((offset <= 3)) && ((block_id <= 1)))
1107 				return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1108 			break;
1109 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1110 			if (((offset <= 2)) && ((block_id == 0)))
1111 				return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1112 			break;
1113 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1114 			if (((offset <= 3)) && ((block_id <= 4)))
1115 				return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1116 			break;
1117 	}
1118 	cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DMAC (%lu, %lu) not supported on this chip\n", offset, block_id);
1119 	return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1120 }
CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset,unsigned long block_id)1121 static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset, unsigned long block_id)
1122 {
1123 	switch(cvmx_get_octeon_family()) {
1124 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1125 			if (((offset <= 1)) && ((block_id == 0)))
1126 				return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1127 			break;
1128 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1129 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1130 			if (((offset <= 2)) && ((block_id == 0)))
1131 				return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1132 			break;
1133 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1134 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1135 			if (((offset <= 3)) && ((block_id == 0)))
1136 				return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1137 			break;
1138 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1139 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1140 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1141 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1142 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1143 			if (((offset <= 3)) && ((block_id <= 1)))
1144 				return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1145 			break;
1146 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1147 			if (((offset <= 2)) && ((block_id == 0)))
1148 				return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1149 			break;
1150 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1151 			if (((offset <= 3)) && ((block_id <= 4)))
1152 				return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1153 			break;
1154 	}
1155 	cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DRP (%lu, %lu) not supported on this chip\n", offset, block_id);
1156 	return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1157 }
CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset,unsigned long block_id)1158 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset, unsigned long block_id)
1159 {
1160 	switch(cvmx_get_octeon_family()) {
1161 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1162 			if (((offset <= 1)) && ((block_id == 0)))
1163 				return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1164 			break;
1165 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1166 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1167 			if (((offset <= 2)) && ((block_id == 0)))
1168 				return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1169 			break;
1170 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1171 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1172 			if (((offset <= 3)) && ((block_id == 0)))
1173 				return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1174 			break;
1175 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1176 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1177 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1178 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1179 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1180 			if (((offset <= 3)) && ((block_id <= 1)))
1181 				return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1182 			break;
1183 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1184 			if (((offset <= 2)) && ((block_id == 0)))
1185 				return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1186 			break;
1187 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1188 			if (((offset <= 3)) && ((block_id <= 4)))
1189 				return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1190 			break;
1191 	}
1192 	cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS (%lu, %lu) not supported on this chip\n", offset, block_id);
1193 	return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1194 }
CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset,unsigned long block_id)1195 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset, unsigned long block_id)
1196 {
1197 	switch(cvmx_get_octeon_family()) {
1198 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1199 			if (((offset <= 1)) && ((block_id == 0)))
1200 				return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1201 			break;
1202 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1203 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1204 			if (((offset <= 2)) && ((block_id == 0)))
1205 				return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1206 			break;
1207 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1208 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1209 			if (((offset <= 3)) && ((block_id == 0)))
1210 				return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1211 			break;
1212 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1213 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1214 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1215 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1216 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1217 			if (((offset <= 3)) && ((block_id <= 1)))
1218 				return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1219 			break;
1220 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1221 			if (((offset <= 2)) && ((block_id == 0)))
1222 				return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1223 			break;
1224 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1225 			if (((offset <= 3)) && ((block_id <= 4)))
1226 				return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1227 			break;
1228 	}
1229 	cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_BAD (%lu, %lu) not supported on this chip\n", offset, block_id);
1230 	return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1231 }
CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset,unsigned long block_id)1232 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset, unsigned long block_id)
1233 {
1234 	switch(cvmx_get_octeon_family()) {
1235 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1236 			if (((offset <= 1)) && ((block_id == 0)))
1237 				return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1238 			break;
1239 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1240 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1241 			if (((offset <= 2)) && ((block_id == 0)))
1242 				return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1243 			break;
1244 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1245 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1246 			if (((offset <= 3)) && ((block_id == 0)))
1247 				return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1248 			break;
1249 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1250 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1251 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1252 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1253 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1254 			if (((offset <= 3)) && ((block_id <= 1)))
1255 				return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1256 			break;
1257 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1258 			if (((offset <= 2)) && ((block_id == 0)))
1259 				return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1260 			break;
1261 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1262 			if (((offset <= 3)) && ((block_id <= 4)))
1263 				return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1264 			break;
1265 	}
1266 	cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
1267 	return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1268 }
CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset,unsigned long block_id)1269 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset, unsigned long block_id)
1270 {
1271 	switch(cvmx_get_octeon_family()) {
1272 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1273 			if (((offset <= 1)) && ((block_id == 0)))
1274 				return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1275 			break;
1276 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1277 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1278 			if (((offset <= 2)) && ((block_id == 0)))
1279 				return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1280 			break;
1281 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1282 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1283 			if (((offset <= 3)) && ((block_id == 0)))
1284 				return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1285 			break;
1286 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1287 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1288 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1289 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1290 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1291 			if (((offset <= 3)) && ((block_id <= 1)))
1292 				return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1293 			break;
1294 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1295 			if (((offset <= 2)) && ((block_id == 0)))
1296 				return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1297 			break;
1298 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1299 			if (((offset <= 3)) && ((block_id <= 4)))
1300 				return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1301 			break;
1302 	}
1303 	cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DMAC (%lu, %lu) not supported on this chip\n", offset, block_id);
1304 	return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1305 }
CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset,unsigned long block_id)1306 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset, unsigned long block_id)
1307 {
1308 	switch(cvmx_get_octeon_family()) {
1309 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1310 			if (((offset <= 1)) && ((block_id == 0)))
1311 				return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1312 			break;
1313 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1314 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1315 			if (((offset <= 2)) && ((block_id == 0)))
1316 				return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1317 			break;
1318 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1319 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1320 			if (((offset <= 3)) && ((block_id == 0)))
1321 				return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1322 			break;
1323 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1324 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1325 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1326 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1327 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1328 			if (((offset <= 3)) && ((block_id <= 1)))
1329 				return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1330 			break;
1331 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1332 			if (((offset <= 2)) && ((block_id == 0)))
1333 				return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1334 			break;
1335 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1336 			if (((offset <= 3)) && ((block_id <= 4)))
1337 				return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1338 			break;
1339 	}
1340 	cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DRP (%lu, %lu) not supported on this chip\n", offset, block_id);
1341 	return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1342 }
CVMX_GMXX_RXX_UDD_SKP(unsigned long offset,unsigned long block_id)1343 static inline uint64_t CVMX_GMXX_RXX_UDD_SKP(unsigned long offset, unsigned long block_id)
1344 {
1345 	switch(cvmx_get_octeon_family()) {
1346 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1347 			if (((offset <= 1)) && ((block_id == 0)))
1348 				return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1349 			break;
1350 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1351 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1352 			if (((offset <= 2)) && ((block_id == 0)))
1353 				return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1354 			break;
1355 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1356 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1357 			if (((offset <= 3)) && ((block_id == 0)))
1358 				return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1359 			break;
1360 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1361 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1362 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1363 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1364 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1365 			if (((offset <= 3)) && ((block_id <= 1)))
1366 				return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1367 			break;
1368 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1369 			if (((offset <= 2)) && ((block_id == 0)))
1370 				return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1371 			break;
1372 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1373 			if (((offset <= 3)) && ((block_id <= 4)))
1374 				return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1375 			break;
1376 	}
1377 	cvmx_warn("CVMX_GMXX_RXX_UDD_SKP (%lu, %lu) not supported on this chip\n", offset, block_id);
1378 	return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1379 }
CVMX_GMXX_RX_BP_DROPX(unsigned long offset,unsigned long block_id)1380 static inline uint64_t CVMX_GMXX_RX_BP_DROPX(unsigned long offset, unsigned long block_id)
1381 {
1382 	switch(cvmx_get_octeon_family()) {
1383 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1384 			if (((offset <= 1)) && ((block_id == 0)))
1385 				return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
1386 			break;
1387 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1388 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1389 			if (((offset <= 2)) && ((block_id == 0)))
1390 				return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
1391 			break;
1392 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1393 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1394 			if (((offset <= 3)) && ((block_id == 0)))
1395 				return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
1396 			break;
1397 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1398 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1399 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1400 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1401 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1402 			if (((offset <= 3)) && ((block_id <= 1)))
1403 				return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
1404 			break;
1405 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1406 			if (((offset <= 2)) && ((block_id == 0)))
1407 				return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8;
1408 			break;
1409 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1410 			if (((offset <= 3)) && ((block_id <= 4)))
1411 				return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 7) * 0x200000ull) * 8;
1412 			break;
1413 	}
1414 	cvmx_warn("CVMX_GMXX_RX_BP_DROPX (%lu, %lu) not supported on this chip\n", offset, block_id);
1415 	return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
1416 }
CVMX_GMXX_RX_BP_OFFX(unsigned long offset,unsigned long block_id)1417 static inline uint64_t CVMX_GMXX_RX_BP_OFFX(unsigned long offset, unsigned long block_id)
1418 {
1419 	switch(cvmx_get_octeon_family()) {
1420 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1421 			if (((offset <= 1)) && ((block_id == 0)))
1422 				return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
1423 			break;
1424 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1425 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1426 			if (((offset <= 2)) && ((block_id == 0)))
1427 				return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
1428 			break;
1429 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1430 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1431 			if (((offset <= 3)) && ((block_id == 0)))
1432 				return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
1433 			break;
1434 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1435 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1436 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1437 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1438 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1439 			if (((offset <= 3)) && ((block_id <= 1)))
1440 				return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
1441 			break;
1442 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1443 			if (((offset <= 2)) && ((block_id == 0)))
1444 				return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8;
1445 			break;
1446 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1447 			if (((offset <= 3)) && ((block_id <= 4)))
1448 				return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 7) * 0x200000ull) * 8;
1449 			break;
1450 	}
1451 	cvmx_warn("CVMX_GMXX_RX_BP_OFFX (%lu, %lu) not supported on this chip\n", offset, block_id);
1452 	return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
1453 }
CVMX_GMXX_RX_BP_ONX(unsigned long offset,unsigned long block_id)1454 static inline uint64_t CVMX_GMXX_RX_BP_ONX(unsigned long offset, unsigned long block_id)
1455 {
1456 	switch(cvmx_get_octeon_family()) {
1457 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1458 			if (((offset <= 1)) && ((block_id == 0)))
1459 				return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
1460 			break;
1461 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1462 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1463 			if (((offset <= 2)) && ((block_id == 0)))
1464 				return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
1465 			break;
1466 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1467 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1468 			if (((offset <= 3)) && ((block_id == 0)))
1469 				return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 0) * 0x1000000ull) * 8;
1470 			break;
1471 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1472 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1473 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1474 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1475 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1476 			if (((offset <= 3)) && ((block_id <= 1)))
1477 				return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
1478 			break;
1479 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1480 			if (((offset <= 2)) && ((block_id == 0)))
1481 				return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8;
1482 			break;
1483 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1484 			if (((offset <= 3)) && ((block_id <= 4)))
1485 				return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 7) * 0x200000ull) * 8;
1486 			break;
1487 	}
1488 	cvmx_warn("CVMX_GMXX_RX_BP_ONX (%lu, %lu) not supported on this chip\n", offset, block_id);
1489 	return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 1) + ((block_id) & 0) * 0x1000000ull) * 8;
1490 }
CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id)1491 static inline uint64_t CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id)
1492 {
1493 	switch(cvmx_get_octeon_family()) {
1494 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1495 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1496 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1497 			if ((block_id == 0))
1498 				return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 0) * 0x8000000ull;
1499 			break;
1500 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1501 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1502 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1503 			if ((block_id <= 1))
1504 				return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 1) * 0x8000000ull;
1505 			break;
1506 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1507 			if ((block_id <= 4))
1508 				return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 7) * 0x1000000ull;
1509 			break;
1510 	}
1511 	cvmx_warn("CVMX_GMXX_RX_HG2_STATUS (block_id = %lu) not supported on this chip\n", block_id);
1512 	return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 0) * 0x8000000ull;
1513 }
1514 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_RX_PASS_EN(unsigned long block_id)1515 static inline uint64_t CVMX_GMXX_RX_PASS_EN(unsigned long block_id)
1516 {
1517 	if (!(
1518 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1519 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1520 		cvmx_warn("CVMX_GMXX_RX_PASS_EN(%lu) is invalid on this chip\n", block_id);
1521 	return CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull;
1522 }
1523 #else
1524 #define CVMX_GMXX_RX_PASS_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull)
1525 #endif
1526 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_RX_PASS_MAPX(unsigned long offset,unsigned long block_id)1527 static inline uint64_t CVMX_GMXX_RX_PASS_MAPX(unsigned long offset, unsigned long block_id)
1528 {
1529 	if (!(
1530 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
1531 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 15)) && ((block_id <= 1))))))
1532 		cvmx_warn("CVMX_GMXX_RX_PASS_MAPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
1533 	return CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8;
1534 }
1535 #else
1536 #define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8)
1537 #endif
CVMX_GMXX_RX_PRTS(unsigned long block_id)1538 static inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id)
1539 {
1540 	switch(cvmx_get_octeon_family()) {
1541 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1542 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1543 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1544 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1545 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1546 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1547 			if ((block_id == 0))
1548 				return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 0) * 0x8000000ull;
1549 			break;
1550 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1551 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1552 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1553 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1554 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1555 			if ((block_id <= 1))
1556 				return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 1) * 0x8000000ull;
1557 			break;
1558 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1559 			if ((block_id <= 4))
1560 				return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 7) * 0x1000000ull;
1561 			break;
1562 	}
1563 	cvmx_warn("CVMX_GMXX_RX_PRTS (block_id = %lu) not supported on this chip\n", block_id);
1564 	return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 0) * 0x8000000ull;
1565 }
CVMX_GMXX_RX_PRT_INFO(unsigned long block_id)1566 static inline uint64_t CVMX_GMXX_RX_PRT_INFO(unsigned long block_id)
1567 {
1568 	switch(cvmx_get_octeon_family()) {
1569 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1570 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1571 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1572 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1573 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1574 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1575 			if ((block_id == 0))
1576 				return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 0) * 0x8000000ull;
1577 			break;
1578 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1579 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1580 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1581 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1582 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1583 			if ((block_id <= 1))
1584 				return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 1) * 0x8000000ull;
1585 			break;
1586 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1587 			if ((block_id <= 4))
1588 				return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 7) * 0x1000000ull;
1589 			break;
1590 	}
1591 	cvmx_warn("CVMX_GMXX_RX_PRT_INFO (block_id = %lu) not supported on this chip\n", block_id);
1592 	return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 0) * 0x8000000ull;
1593 }
1594 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_RX_TX_STATUS(unsigned long block_id)1595 static inline uint64_t CVMX_GMXX_RX_TX_STATUS(unsigned long block_id)
1596 {
1597 	if (!(
1598 	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1599 	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1600 	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
1601 		cvmx_warn("CVMX_GMXX_RX_TX_STATUS(%lu) is invalid on this chip\n", block_id);
1602 	return CVMX_ADD_IO_SEG(0x00011800080007E8ull);
1603 }
1604 #else
1605 #define CVMX_GMXX_RX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull))
1606 #endif
CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id)1607 static inline uint64_t CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id)
1608 {
1609 	switch(cvmx_get_octeon_family()) {
1610 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1611 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1612 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1613 			if ((block_id == 0))
1614 				return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 0) * 0x8000000ull;
1615 			break;
1616 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1617 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1618 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1619 			if ((block_id <= 1))
1620 				return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 1) * 0x8000000ull;
1621 			break;
1622 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1623 			if ((block_id <= 4))
1624 				return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 7) * 0x1000000ull;
1625 			break;
1626 	}
1627 	cvmx_warn("CVMX_GMXX_RX_XAUI_BAD_COL (block_id = %lu) not supported on this chip\n", block_id);
1628 	return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 0) * 0x8000000ull;
1629 }
CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)1630 static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)
1631 {
1632 	switch(cvmx_get_octeon_family()) {
1633 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1634 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1635 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1636 			if ((block_id == 0))
1637 				return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 0) * 0x8000000ull;
1638 			break;
1639 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1640 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1641 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1642 			if ((block_id <= 1))
1643 				return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 1) * 0x8000000ull;
1644 			break;
1645 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1646 			if ((block_id <= 4))
1647 				return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 7) * 0x1000000ull;
1648 			break;
1649 	}
1650 	cvmx_warn("CVMX_GMXX_RX_XAUI_CTL (block_id = %lu) not supported on this chip\n", block_id);
1651 	return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 0) * 0x8000000ull;
1652 }
CVMX_GMXX_SMACX(unsigned long offset,unsigned long block_id)1653 static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id)
1654 {
1655 	switch(cvmx_get_octeon_family()) {
1656 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1657 			if (((offset <= 1)) && ((block_id == 0)))
1658 				return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1659 			break;
1660 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1661 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1662 			if (((offset <= 2)) && ((block_id == 0)))
1663 				return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1664 			break;
1665 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1666 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1667 			if (((offset <= 3)) && ((block_id == 0)))
1668 				return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1669 			break;
1670 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1671 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1672 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1673 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1674 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1675 			if (((offset <= 3)) && ((block_id <= 1)))
1676 				return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1677 			break;
1678 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1679 			if (((offset <= 2)) && ((block_id == 0)))
1680 				return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1681 			break;
1682 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1683 			if (((offset <= 3)) && ((block_id <= 4)))
1684 				return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1685 			break;
1686 	}
1687 	cvmx_warn("CVMX_GMXX_SMACX (%lu, %lu) not supported on this chip\n", offset, block_id);
1688 	return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1689 }
CVMX_GMXX_SOFT_BIST(unsigned long block_id)1690 static inline uint64_t CVMX_GMXX_SOFT_BIST(unsigned long block_id)
1691 {
1692 	switch(cvmx_get_octeon_family()) {
1693 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1694 			if ((block_id <= 1))
1695 				return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + ((block_id) & 1) * 0x8000000ull;
1696 			break;
1697 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1698 			if ((block_id == 0))
1699 				return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + ((block_id) & 0) * 0x8000000ull;
1700 			break;
1701 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1702 			if ((block_id <= 4))
1703 				return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + ((block_id) & 7) * 0x1000000ull;
1704 			break;
1705 	}
1706 	cvmx_warn("CVMX_GMXX_SOFT_BIST (block_id = %lu) not supported on this chip\n", block_id);
1707 	return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + ((block_id) & 7) * 0x1000000ull;
1708 }
CVMX_GMXX_STAT_BP(unsigned long block_id)1709 static inline uint64_t CVMX_GMXX_STAT_BP(unsigned long block_id)
1710 {
1711 	switch(cvmx_get_octeon_family()) {
1712 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1713 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1714 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1715 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1716 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1717 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1718 			if ((block_id == 0))
1719 				return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 0) * 0x8000000ull;
1720 			break;
1721 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1722 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1723 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1724 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1725 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1726 			if ((block_id <= 1))
1727 				return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 1) * 0x8000000ull;
1728 			break;
1729 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1730 			if ((block_id <= 4))
1731 				return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 7) * 0x1000000ull;
1732 			break;
1733 	}
1734 	cvmx_warn("CVMX_GMXX_STAT_BP (block_id = %lu) not supported on this chip\n", block_id);
1735 	return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 0) * 0x8000000ull;
1736 }
CVMX_GMXX_TB_REG(unsigned long block_id)1737 static inline uint64_t CVMX_GMXX_TB_REG(unsigned long block_id)
1738 {
1739 	switch(cvmx_get_octeon_family()) {
1740 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1741 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1742 			if ((block_id <= 1))
1743 				return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + ((block_id) & 1) * 0x8000000ull;
1744 			break;
1745 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1746 			if ((block_id == 0))
1747 				return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + ((block_id) & 0) * 0x8000000ull;
1748 			break;
1749 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1750 			if ((block_id <= 4))
1751 				return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + ((block_id) & 7) * 0x1000000ull;
1752 			break;
1753 	}
1754 	cvmx_warn("CVMX_GMXX_TB_REG (block_id = %lu) not supported on this chip\n", block_id);
1755 	return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + ((block_id) & 0) * 0x8000000ull;
1756 }
CVMX_GMXX_TXX_APPEND(unsigned long offset,unsigned long block_id)1757 static inline uint64_t CVMX_GMXX_TXX_APPEND(unsigned long offset, unsigned long block_id)
1758 {
1759 	switch(cvmx_get_octeon_family()) {
1760 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1761 			if (((offset <= 1)) && ((block_id == 0)))
1762 				return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1763 			break;
1764 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1765 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1766 			if (((offset <= 2)) && ((block_id == 0)))
1767 				return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1768 			break;
1769 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1770 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1771 			if (((offset <= 3)) && ((block_id == 0)))
1772 				return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1773 			break;
1774 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1775 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1776 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1777 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1778 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1779 			if (((offset <= 3)) && ((block_id <= 1)))
1780 				return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1781 			break;
1782 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1783 			if (((offset <= 2)) && ((block_id == 0)))
1784 				return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1785 			break;
1786 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1787 			if (((offset <= 3)) && ((block_id <= 4)))
1788 				return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1789 			break;
1790 	}
1791 	cvmx_warn("CVMX_GMXX_TXX_APPEND (%lu, %lu) not supported on this chip\n", offset, block_id);
1792 	return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1793 }
CVMX_GMXX_TXX_BURST(unsigned long offset,unsigned long block_id)1794 static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id)
1795 {
1796 	switch(cvmx_get_octeon_family()) {
1797 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1798 			if (((offset <= 1)) && ((block_id == 0)))
1799 				return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1800 			break;
1801 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1802 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1803 			if (((offset <= 2)) && ((block_id == 0)))
1804 				return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1805 			break;
1806 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1807 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1808 			if (((offset <= 3)) && ((block_id == 0)))
1809 				return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1810 			break;
1811 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1812 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1813 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1814 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1815 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1816 			if (((offset <= 3)) && ((block_id <= 1)))
1817 				return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1818 			break;
1819 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1820 			if (((offset <= 2)) && ((block_id == 0)))
1821 				return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1822 			break;
1823 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1824 			if (((offset <= 3)) && ((block_id <= 4)))
1825 				return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1826 			break;
1827 	}
1828 	cvmx_warn("CVMX_GMXX_TXX_BURST (%lu, %lu) not supported on this chip\n", offset, block_id);
1829 	return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1830 }
CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset,unsigned long block_id)1831 static inline uint64_t CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset, unsigned long block_id)
1832 {
1833 	switch(cvmx_get_octeon_family()) {
1834 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1835 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1836 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1837 			if (((offset == 0)) && ((block_id == 0)))
1838 				return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 0) * 0x8000000ull;
1839 			break;
1840 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1841 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1842 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1843 			if (((offset == 0)) && ((block_id <= 1)))
1844 				return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 1) * 0x8000000ull;
1845 			break;
1846 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1847 			if (((offset == 0)) && ((block_id <= 4)))
1848 				return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 7) * 0x1000000ull;
1849 			break;
1850 	}
1851 	cvmx_warn("CVMX_GMXX_TXX_CBFC_XOFF (%lu, %lu) not supported on this chip\n", offset, block_id);
1852 	return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 0) * 0x8000000ull;
1853 }
CVMX_GMXX_TXX_CBFC_XON(unsigned long offset,unsigned long block_id)1854 static inline uint64_t CVMX_GMXX_TXX_CBFC_XON(unsigned long offset, unsigned long block_id)
1855 {
1856 	switch(cvmx_get_octeon_family()) {
1857 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1858 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1859 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1860 			if (((offset == 0)) && ((block_id == 0)))
1861 				return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 0) * 0x8000000ull;
1862 			break;
1863 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1864 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1865 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1866 			if (((offset == 0)) && ((block_id <= 1)))
1867 				return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 1) * 0x8000000ull;
1868 			break;
1869 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1870 			if (((offset == 0)) && ((block_id <= 4)))
1871 				return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 7) * 0x1000000ull;
1872 			break;
1873 	}
1874 	cvmx_warn("CVMX_GMXX_TXX_CBFC_XON (%lu, %lu) not supported on this chip\n", offset, block_id);
1875 	return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 0) * 0x8000000ull;
1876 }
1877 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_TXX_CLK(unsigned long offset,unsigned long block_id)1878 static inline uint64_t CVMX_GMXX_TXX_CLK(unsigned long offset, unsigned long block_id)
1879 {
1880 	if (!(
1881 	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1882 	      (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1883 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1884 	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1885 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
1886 		cvmx_warn("CVMX_GMXX_TXX_CLK(%lu,%lu) is invalid on this chip\n", offset, block_id);
1887 	return CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1888 }
1889 #else
1890 #define CVMX_GMXX_TXX_CLK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1891 #endif
CVMX_GMXX_TXX_CTL(unsigned long offset,unsigned long block_id)1892 static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id)
1893 {
1894 	switch(cvmx_get_octeon_family()) {
1895 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1896 			if (((offset <= 1)) && ((block_id == 0)))
1897 				return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1898 			break;
1899 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1900 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1901 			if (((offset <= 2)) && ((block_id == 0)))
1902 				return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1903 			break;
1904 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1905 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1906 			if (((offset <= 3)) && ((block_id == 0)))
1907 				return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1908 			break;
1909 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1910 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1911 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1912 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1913 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1914 			if (((offset <= 3)) && ((block_id <= 1)))
1915 				return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1916 			break;
1917 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1918 			if (((offset <= 2)) && ((block_id == 0)))
1919 				return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1920 			break;
1921 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1922 			if (((offset <= 3)) && ((block_id <= 4)))
1923 				return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1924 			break;
1925 	}
1926 	cvmx_warn("CVMX_GMXX_TXX_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
1927 	return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1928 }
CVMX_GMXX_TXX_MIN_PKT(unsigned long offset,unsigned long block_id)1929 static inline uint64_t CVMX_GMXX_TXX_MIN_PKT(unsigned long offset, unsigned long block_id)
1930 {
1931 	switch(cvmx_get_octeon_family()) {
1932 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1933 			if (((offset <= 1)) && ((block_id == 0)))
1934 				return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1935 			break;
1936 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1937 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1938 			if (((offset <= 2)) && ((block_id == 0)))
1939 				return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1940 			break;
1941 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1942 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1943 			if (((offset <= 3)) && ((block_id == 0)))
1944 				return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1945 			break;
1946 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1947 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1948 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1949 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1950 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1951 			if (((offset <= 3)) && ((block_id <= 1)))
1952 				return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1953 			break;
1954 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1955 			if (((offset <= 2)) && ((block_id == 0)))
1956 				return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1957 			break;
1958 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1959 			if (((offset <= 3)) && ((block_id <= 4)))
1960 				return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1961 			break;
1962 	}
1963 	cvmx_warn("CVMX_GMXX_TXX_MIN_PKT (%lu, %lu) not supported on this chip\n", offset, block_id);
1964 	return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1965 }
CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset,unsigned long block_id)1966 static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id)
1967 {
1968 	switch(cvmx_get_octeon_family()) {
1969 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
1970 			if (((offset <= 1)) && ((block_id == 0)))
1971 				return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
1972 			break;
1973 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
1974 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
1975 			if (((offset <= 2)) && ((block_id == 0)))
1976 				return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1977 			break;
1978 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
1979 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
1980 			if (((offset <= 3)) && ((block_id == 0)))
1981 				return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
1982 			break;
1983 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
1984 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
1985 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
1986 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
1987 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
1988 			if (((offset <= 3)) && ((block_id <= 1)))
1989 				return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1990 			break;
1991 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
1992 			if (((offset <= 2)) && ((block_id == 0)))
1993 				return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
1994 			break;
1995 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
1996 			if (((offset <= 3)) && ((block_id <= 4)))
1997 				return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
1998 			break;
1999 	}
2000 	cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL (%lu, %lu) not supported on this chip\n", offset, block_id);
2001 	return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2002 }
CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset,unsigned long block_id)2003 static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id)
2004 {
2005 	switch(cvmx_get_octeon_family()) {
2006 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2007 			if (((offset <= 1)) && ((block_id == 0)))
2008 				return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2009 			break;
2010 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2011 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2012 			if (((offset <= 2)) && ((block_id == 0)))
2013 				return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2014 			break;
2015 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2016 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2017 			if (((offset <= 3)) && ((block_id == 0)))
2018 				return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2019 			break;
2020 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2021 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2022 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2023 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2024 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2025 			if (((offset <= 3)) && ((block_id <= 1)))
2026 				return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2027 			break;
2028 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2029 			if (((offset <= 2)) && ((block_id == 0)))
2030 				return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2031 			break;
2032 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2033 			if (((offset <= 3)) && ((block_id <= 4)))
2034 				return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2035 			break;
2036 	}
2037 	cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_TIME (%lu, %lu) not supported on this chip\n", offset, block_id);
2038 	return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2039 }
CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset,unsigned long block_id)2040 static inline uint64_t CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset, unsigned long block_id)
2041 {
2042 	switch(cvmx_get_octeon_family()) {
2043 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2044 			if (((offset <= 1)) && ((block_id == 0)))
2045 				return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2046 			break;
2047 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2048 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2049 			if (((offset <= 2)) && ((block_id == 0)))
2050 				return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2051 			break;
2052 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2053 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2054 			if (((offset <= 3)) && ((block_id == 0)))
2055 				return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2056 			break;
2057 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2058 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2059 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2060 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2061 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2062 			if (((offset <= 3)) && ((block_id <= 1)))
2063 				return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2064 			break;
2065 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2066 			if (((offset <= 2)) && ((block_id == 0)))
2067 				return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2068 			break;
2069 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2070 			if (((offset <= 3)) && ((block_id <= 4)))
2071 				return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2072 			break;
2073 	}
2074 	cvmx_warn("CVMX_GMXX_TXX_PAUSE_TOGO (%lu, %lu) not supported on this chip\n", offset, block_id);
2075 	return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2076 }
CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset,unsigned long block_id)2077 static inline uint64_t CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset, unsigned long block_id)
2078 {
2079 	switch(cvmx_get_octeon_family()) {
2080 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2081 			if (((offset <= 1)) && ((block_id == 0)))
2082 				return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2083 			break;
2084 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2085 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2086 			if (((offset <= 2)) && ((block_id == 0)))
2087 				return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2088 			break;
2089 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2090 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2091 			if (((offset <= 3)) && ((block_id == 0)))
2092 				return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2093 			break;
2094 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2095 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2096 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2097 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2098 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2099 			if (((offset <= 3)) && ((block_id <= 1)))
2100 				return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2101 			break;
2102 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2103 			if (((offset <= 2)) && ((block_id == 0)))
2104 				return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2105 			break;
2106 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2107 			if (((offset <= 3)) && ((block_id <= 4)))
2108 				return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2109 			break;
2110 	}
2111 	cvmx_warn("CVMX_GMXX_TXX_PAUSE_ZERO (%lu, %lu) not supported on this chip\n", offset, block_id);
2112 	return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2113 }
2114 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_TXX_PIPE(unsigned long offset,unsigned long block_id)2115 static inline uint64_t CVMX_GMXX_TXX_PIPE(unsigned long offset, unsigned long block_id)
2116 {
2117 	if (!(
2118 	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 4))))))
2119 		cvmx_warn("CVMX_GMXX_TXX_PIPE(%lu,%lu) is invalid on this chip\n", offset, block_id);
2120 	return CVMX_ADD_IO_SEG(0x0001180008000310ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2121 }
2122 #else
2123 #define CVMX_GMXX_TXX_PIPE(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000310ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048)
2124 #endif
CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset,unsigned long block_id)2125 static inline uint64_t CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset, unsigned long block_id)
2126 {
2127 	switch(cvmx_get_octeon_family()) {
2128 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2129 			if (((offset <= 1)) && ((block_id == 0)))
2130 				return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2131 			break;
2132 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2133 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2134 			if (((offset <= 3)) && ((block_id == 0)))
2135 				return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2136 			break;
2137 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2138 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2139 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2140 			if (((offset <= 3)) && ((block_id <= 1)))
2141 				return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2142 			break;
2143 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2144 			if (((offset <= 3)) && ((block_id <= 4)))
2145 				return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2146 			break;
2147 	}
2148 	cvmx_warn("CVMX_GMXX_TXX_SGMII_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
2149 	return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2150 }
CVMX_GMXX_TXX_SLOT(unsigned long offset,unsigned long block_id)2151 static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id)
2152 {
2153 	switch(cvmx_get_octeon_family()) {
2154 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2155 			if (((offset <= 1)) && ((block_id == 0)))
2156 				return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2157 			break;
2158 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2159 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2160 			if (((offset <= 2)) && ((block_id == 0)))
2161 				return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2162 			break;
2163 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2164 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2165 			if (((offset <= 3)) && ((block_id == 0)))
2166 				return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2167 			break;
2168 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2169 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2170 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2171 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2172 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2173 			if (((offset <= 3)) && ((block_id <= 1)))
2174 				return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2175 			break;
2176 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2177 			if (((offset <= 2)) && ((block_id == 0)))
2178 				return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2179 			break;
2180 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2181 			if (((offset <= 3)) && ((block_id <= 4)))
2182 				return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2183 			break;
2184 	}
2185 	cvmx_warn("CVMX_GMXX_TXX_SLOT (%lu, %lu) not supported on this chip\n", offset, block_id);
2186 	return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2187 }
CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset,unsigned long block_id)2188 static inline uint64_t CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset, unsigned long block_id)
2189 {
2190 	switch(cvmx_get_octeon_family()) {
2191 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2192 			if (((offset <= 1)) && ((block_id == 0)))
2193 				return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2194 			break;
2195 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2196 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2197 			if (((offset <= 2)) && ((block_id == 0)))
2198 				return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2199 			break;
2200 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2201 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2202 			if (((offset <= 3)) && ((block_id == 0)))
2203 				return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2204 			break;
2205 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2206 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2207 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2208 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2209 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2210 			if (((offset <= 3)) && ((block_id <= 1)))
2211 				return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2212 			break;
2213 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2214 			if (((offset <= 2)) && ((block_id == 0)))
2215 				return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2216 			break;
2217 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2218 			if (((offset <= 3)) && ((block_id <= 4)))
2219 				return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2220 			break;
2221 	}
2222 	cvmx_warn("CVMX_GMXX_TXX_SOFT_PAUSE (%lu, %lu) not supported on this chip\n", offset, block_id);
2223 	return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2224 }
CVMX_GMXX_TXX_STAT0(unsigned long offset,unsigned long block_id)2225 static inline uint64_t CVMX_GMXX_TXX_STAT0(unsigned long offset, unsigned long block_id)
2226 {
2227 	switch(cvmx_get_octeon_family()) {
2228 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2229 			if (((offset <= 1)) && ((block_id == 0)))
2230 				return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2231 			break;
2232 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2233 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2234 			if (((offset <= 2)) && ((block_id == 0)))
2235 				return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2236 			break;
2237 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2238 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2239 			if (((offset <= 3)) && ((block_id == 0)))
2240 				return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2241 			break;
2242 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2243 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2244 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2245 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2246 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2247 			if (((offset <= 3)) && ((block_id <= 1)))
2248 				return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2249 			break;
2250 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2251 			if (((offset <= 2)) && ((block_id == 0)))
2252 				return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2253 			break;
2254 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2255 			if (((offset <= 3)) && ((block_id <= 4)))
2256 				return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2257 			break;
2258 	}
2259 	cvmx_warn("CVMX_GMXX_TXX_STAT0 (%lu, %lu) not supported on this chip\n", offset, block_id);
2260 	return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2261 }
CVMX_GMXX_TXX_STAT1(unsigned long offset,unsigned long block_id)2262 static inline uint64_t CVMX_GMXX_TXX_STAT1(unsigned long offset, unsigned long block_id)
2263 {
2264 	switch(cvmx_get_octeon_family()) {
2265 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2266 			if (((offset <= 1)) && ((block_id == 0)))
2267 				return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2268 			break;
2269 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2270 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2271 			if (((offset <= 2)) && ((block_id == 0)))
2272 				return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2273 			break;
2274 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2275 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2276 			if (((offset <= 3)) && ((block_id == 0)))
2277 				return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2278 			break;
2279 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2280 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2281 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2282 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2283 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2284 			if (((offset <= 3)) && ((block_id <= 1)))
2285 				return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2286 			break;
2287 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2288 			if (((offset <= 2)) && ((block_id == 0)))
2289 				return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2290 			break;
2291 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2292 			if (((offset <= 3)) && ((block_id <= 4)))
2293 				return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2294 			break;
2295 	}
2296 	cvmx_warn("CVMX_GMXX_TXX_STAT1 (%lu, %lu) not supported on this chip\n", offset, block_id);
2297 	return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2298 }
CVMX_GMXX_TXX_STAT2(unsigned long offset,unsigned long block_id)2299 static inline uint64_t CVMX_GMXX_TXX_STAT2(unsigned long offset, unsigned long block_id)
2300 {
2301 	switch(cvmx_get_octeon_family()) {
2302 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2303 			if (((offset <= 1)) && ((block_id == 0)))
2304 				return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2305 			break;
2306 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2307 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2308 			if (((offset <= 2)) && ((block_id == 0)))
2309 				return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2310 			break;
2311 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2312 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2313 			if (((offset <= 3)) && ((block_id == 0)))
2314 				return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2315 			break;
2316 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2317 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2318 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2319 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2320 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2321 			if (((offset <= 3)) && ((block_id <= 1)))
2322 				return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2323 			break;
2324 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2325 			if (((offset <= 2)) && ((block_id == 0)))
2326 				return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2327 			break;
2328 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2329 			if (((offset <= 3)) && ((block_id <= 4)))
2330 				return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2331 			break;
2332 	}
2333 	cvmx_warn("CVMX_GMXX_TXX_STAT2 (%lu, %lu) not supported on this chip\n", offset, block_id);
2334 	return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2335 }
CVMX_GMXX_TXX_STAT3(unsigned long offset,unsigned long block_id)2336 static inline uint64_t CVMX_GMXX_TXX_STAT3(unsigned long offset, unsigned long block_id)
2337 {
2338 	switch(cvmx_get_octeon_family()) {
2339 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2340 			if (((offset <= 1)) && ((block_id == 0)))
2341 				return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2342 			break;
2343 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2344 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2345 			if (((offset <= 2)) && ((block_id == 0)))
2346 				return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2347 			break;
2348 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2349 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2350 			if (((offset <= 3)) && ((block_id == 0)))
2351 				return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2352 			break;
2353 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2354 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2355 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2356 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2357 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2358 			if (((offset <= 3)) && ((block_id <= 1)))
2359 				return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2360 			break;
2361 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2362 			if (((offset <= 2)) && ((block_id == 0)))
2363 				return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2364 			break;
2365 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2366 			if (((offset <= 3)) && ((block_id <= 4)))
2367 				return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2368 			break;
2369 	}
2370 	cvmx_warn("CVMX_GMXX_TXX_STAT3 (%lu, %lu) not supported on this chip\n", offset, block_id);
2371 	return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2372 }
CVMX_GMXX_TXX_STAT4(unsigned long offset,unsigned long block_id)2373 static inline uint64_t CVMX_GMXX_TXX_STAT4(unsigned long offset, unsigned long block_id)
2374 {
2375 	switch(cvmx_get_octeon_family()) {
2376 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2377 			if (((offset <= 1)) && ((block_id == 0)))
2378 				return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2379 			break;
2380 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2381 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2382 			if (((offset <= 2)) && ((block_id == 0)))
2383 				return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2384 			break;
2385 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2386 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2387 			if (((offset <= 3)) && ((block_id == 0)))
2388 				return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2389 			break;
2390 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2391 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2392 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2393 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2394 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2395 			if (((offset <= 3)) && ((block_id <= 1)))
2396 				return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2397 			break;
2398 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2399 			if (((offset <= 2)) && ((block_id == 0)))
2400 				return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2401 			break;
2402 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2403 			if (((offset <= 3)) && ((block_id <= 4)))
2404 				return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2405 			break;
2406 	}
2407 	cvmx_warn("CVMX_GMXX_TXX_STAT4 (%lu, %lu) not supported on this chip\n", offset, block_id);
2408 	return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2409 }
CVMX_GMXX_TXX_STAT5(unsigned long offset,unsigned long block_id)2410 static inline uint64_t CVMX_GMXX_TXX_STAT5(unsigned long offset, unsigned long block_id)
2411 {
2412 	switch(cvmx_get_octeon_family()) {
2413 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2414 			if (((offset <= 1)) && ((block_id == 0)))
2415 				return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2416 			break;
2417 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2418 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2419 			if (((offset <= 2)) && ((block_id == 0)))
2420 				return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2421 			break;
2422 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2423 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2424 			if (((offset <= 3)) && ((block_id == 0)))
2425 				return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2426 			break;
2427 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2428 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2429 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2430 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2431 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2432 			if (((offset <= 3)) && ((block_id <= 1)))
2433 				return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2434 			break;
2435 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2436 			if (((offset <= 2)) && ((block_id == 0)))
2437 				return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2438 			break;
2439 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2440 			if (((offset <= 3)) && ((block_id <= 4)))
2441 				return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2442 			break;
2443 	}
2444 	cvmx_warn("CVMX_GMXX_TXX_STAT5 (%lu, %lu) not supported on this chip\n", offset, block_id);
2445 	return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2446 }
CVMX_GMXX_TXX_STAT6(unsigned long offset,unsigned long block_id)2447 static inline uint64_t CVMX_GMXX_TXX_STAT6(unsigned long offset, unsigned long block_id)
2448 {
2449 	switch(cvmx_get_octeon_family()) {
2450 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2451 			if (((offset <= 1)) && ((block_id == 0)))
2452 				return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2453 			break;
2454 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2455 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2456 			if (((offset <= 2)) && ((block_id == 0)))
2457 				return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2458 			break;
2459 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2460 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2461 			if (((offset <= 3)) && ((block_id == 0)))
2462 				return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2463 			break;
2464 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2465 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2466 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2467 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2468 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2469 			if (((offset <= 3)) && ((block_id <= 1)))
2470 				return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2471 			break;
2472 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2473 			if (((offset <= 2)) && ((block_id == 0)))
2474 				return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2475 			break;
2476 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2477 			if (((offset <= 3)) && ((block_id <= 4)))
2478 				return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2479 			break;
2480 	}
2481 	cvmx_warn("CVMX_GMXX_TXX_STAT6 (%lu, %lu) not supported on this chip\n", offset, block_id);
2482 	return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2483 }
CVMX_GMXX_TXX_STAT7(unsigned long offset,unsigned long block_id)2484 static inline uint64_t CVMX_GMXX_TXX_STAT7(unsigned long offset, unsigned long block_id)
2485 {
2486 	switch(cvmx_get_octeon_family()) {
2487 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2488 			if (((offset <= 1)) && ((block_id == 0)))
2489 				return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2490 			break;
2491 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2492 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2493 			if (((offset <= 2)) && ((block_id == 0)))
2494 				return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2495 			break;
2496 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2497 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2498 			if (((offset <= 3)) && ((block_id == 0)))
2499 				return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2500 			break;
2501 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2502 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2503 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2504 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2505 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2506 			if (((offset <= 3)) && ((block_id <= 1)))
2507 				return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2508 			break;
2509 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2510 			if (((offset <= 2)) && ((block_id == 0)))
2511 				return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2512 			break;
2513 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2514 			if (((offset <= 3)) && ((block_id <= 4)))
2515 				return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2516 			break;
2517 	}
2518 	cvmx_warn("CVMX_GMXX_TXX_STAT7 (%lu, %lu) not supported on this chip\n", offset, block_id);
2519 	return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2520 }
CVMX_GMXX_TXX_STAT8(unsigned long offset,unsigned long block_id)2521 static inline uint64_t CVMX_GMXX_TXX_STAT8(unsigned long offset, unsigned long block_id)
2522 {
2523 	switch(cvmx_get_octeon_family()) {
2524 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2525 			if (((offset <= 1)) && ((block_id == 0)))
2526 				return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2527 			break;
2528 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2529 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2530 			if (((offset <= 2)) && ((block_id == 0)))
2531 				return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2532 			break;
2533 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2534 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2535 			if (((offset <= 3)) && ((block_id == 0)))
2536 				return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2537 			break;
2538 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2539 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2540 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2541 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2542 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2543 			if (((offset <= 3)) && ((block_id <= 1)))
2544 				return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2545 			break;
2546 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2547 			if (((offset <= 2)) && ((block_id == 0)))
2548 				return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2549 			break;
2550 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2551 			if (((offset <= 3)) && ((block_id <= 4)))
2552 				return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2553 			break;
2554 	}
2555 	cvmx_warn("CVMX_GMXX_TXX_STAT8 (%lu, %lu) not supported on this chip\n", offset, block_id);
2556 	return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2557 }
CVMX_GMXX_TXX_STAT9(unsigned long offset,unsigned long block_id)2558 static inline uint64_t CVMX_GMXX_TXX_STAT9(unsigned long offset, unsigned long block_id)
2559 {
2560 	switch(cvmx_get_octeon_family()) {
2561 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2562 			if (((offset <= 1)) && ((block_id == 0)))
2563 				return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2564 			break;
2565 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2566 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2567 			if (((offset <= 2)) && ((block_id == 0)))
2568 				return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2569 			break;
2570 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2571 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2572 			if (((offset <= 3)) && ((block_id == 0)))
2573 				return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2574 			break;
2575 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2576 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2577 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2578 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2579 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2580 			if (((offset <= 3)) && ((block_id <= 1)))
2581 				return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2582 			break;
2583 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2584 			if (((offset <= 2)) && ((block_id == 0)))
2585 				return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2586 			break;
2587 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2588 			if (((offset <= 3)) && ((block_id <= 4)))
2589 				return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2590 			break;
2591 	}
2592 	cvmx_warn("CVMX_GMXX_TXX_STAT9 (%lu, %lu) not supported on this chip\n", offset, block_id);
2593 	return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2594 }
CVMX_GMXX_TXX_STATS_CTL(unsigned long offset,unsigned long block_id)2595 static inline uint64_t CVMX_GMXX_TXX_STATS_CTL(unsigned long offset, unsigned long block_id)
2596 {
2597 	switch(cvmx_get_octeon_family()) {
2598 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2599 			if (((offset <= 1)) && ((block_id == 0)))
2600 				return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2601 			break;
2602 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2603 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2604 			if (((offset <= 2)) && ((block_id == 0)))
2605 				return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2606 			break;
2607 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2608 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2609 			if (((offset <= 3)) && ((block_id == 0)))
2610 				return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2611 			break;
2612 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2613 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2614 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2615 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2616 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2617 			if (((offset <= 3)) && ((block_id <= 1)))
2618 				return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2619 			break;
2620 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2621 			if (((offset <= 2)) && ((block_id == 0)))
2622 				return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2623 			break;
2624 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2625 			if (((offset <= 3)) && ((block_id <= 4)))
2626 				return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2627 			break;
2628 	}
2629 	cvmx_warn("CVMX_GMXX_TXX_STATS_CTL (%lu, %lu) not supported on this chip\n", offset, block_id);
2630 	return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2631 }
CVMX_GMXX_TXX_THRESH(unsigned long offset,unsigned long block_id)2632 static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id)
2633 {
2634 	switch(cvmx_get_octeon_family()) {
2635 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2636 			if (((offset <= 1)) && ((block_id == 0)))
2637 				return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2638 			break;
2639 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2640 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2641 			if (((offset <= 2)) && ((block_id == 0)))
2642 				return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2643 			break;
2644 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2645 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2646 			if (((offset <= 3)) && ((block_id == 0)))
2647 				return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 0) * 0x10000ull) * 2048;
2648 			break;
2649 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2650 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2651 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2652 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2653 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2654 			if (((offset <= 3)) && ((block_id <= 1)))
2655 				return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
2656 			break;
2657 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2658 			if (((offset <= 2)) && ((block_id == 0)))
2659 				return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 2048;
2660 			break;
2661 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2662 			if (((offset <= 3)) && ((block_id <= 4)))
2663 				return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048;
2664 			break;
2665 	}
2666 	cvmx_warn("CVMX_GMXX_TXX_THRESH (%lu, %lu) not supported on this chip\n", offset, block_id);
2667 	return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 1) + ((block_id) & 0) * 0x10000ull) * 2048;
2668 }
CVMX_GMXX_TX_BP(unsigned long block_id)2669 static inline uint64_t CVMX_GMXX_TX_BP(unsigned long block_id)
2670 {
2671 	switch(cvmx_get_octeon_family()) {
2672 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2673 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2674 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2675 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2676 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2677 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2678 			if ((block_id == 0))
2679 				return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 0) * 0x8000000ull;
2680 			break;
2681 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2682 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2683 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2684 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2685 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2686 			if ((block_id <= 1))
2687 				return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 1) * 0x8000000ull;
2688 			break;
2689 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2690 			if ((block_id <= 4))
2691 				return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 7) * 0x1000000ull;
2692 			break;
2693 	}
2694 	cvmx_warn("CVMX_GMXX_TX_BP (block_id = %lu) not supported on this chip\n", block_id);
2695 	return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 0) * 0x8000000ull;
2696 }
2697 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_TX_CLK_MSKX(unsigned long offset,unsigned long block_id)2698 static inline uint64_t CVMX_GMXX_TX_CLK_MSKX(unsigned long offset, unsigned long block_id)
2699 {
2700 	if (!(
2701 	      (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 1)) && ((block_id == 0)))) ||
2702 	      (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 1)) && ((block_id == 0))))))
2703 		cvmx_warn("CVMX_GMXX_TX_CLK_MSKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
2704 	return CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8;
2705 }
2706 #else
2707 #define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
2708 #endif
CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id)2709 static inline uint64_t CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id)
2710 {
2711 	switch(cvmx_get_octeon_family()) {
2712 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2713 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2714 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2715 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2716 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2717 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2718 			if ((block_id == 0))
2719 				return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 0) * 0x8000000ull;
2720 			break;
2721 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2722 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2723 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2724 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2725 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2726 			if ((block_id <= 1))
2727 				return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 1) * 0x8000000ull;
2728 			break;
2729 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2730 			if ((block_id <= 4))
2731 				return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 7) * 0x1000000ull;
2732 			break;
2733 	}
2734 	cvmx_warn("CVMX_GMXX_TX_COL_ATTEMPT (block_id = %lu) not supported on this chip\n", block_id);
2735 	return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 0) * 0x8000000ull;
2736 }
CVMX_GMXX_TX_CORRUPT(unsigned long block_id)2737 static inline uint64_t CVMX_GMXX_TX_CORRUPT(unsigned long block_id)
2738 {
2739 	switch(cvmx_get_octeon_family()) {
2740 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2741 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2742 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2743 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2744 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2745 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2746 			if ((block_id == 0))
2747 				return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 0) * 0x8000000ull;
2748 			break;
2749 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2750 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2751 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2752 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2753 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2754 			if ((block_id <= 1))
2755 				return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 1) * 0x8000000ull;
2756 			break;
2757 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2758 			if ((block_id <= 4))
2759 				return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 7) * 0x1000000ull;
2760 			break;
2761 	}
2762 	cvmx_warn("CVMX_GMXX_TX_CORRUPT (block_id = %lu) not supported on this chip\n", block_id);
2763 	return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 0) * 0x8000000ull;
2764 }
CVMX_GMXX_TX_HG2_REG1(unsigned long block_id)2765 static inline uint64_t CVMX_GMXX_TX_HG2_REG1(unsigned long block_id)
2766 {
2767 	switch(cvmx_get_octeon_family()) {
2768 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2769 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2770 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2771 			if ((block_id == 0))
2772 				return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 0) * 0x8000000ull;
2773 			break;
2774 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2775 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2776 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2777 			if ((block_id <= 1))
2778 				return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 1) * 0x8000000ull;
2779 			break;
2780 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2781 			if ((block_id <= 4))
2782 				return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 7) * 0x1000000ull;
2783 			break;
2784 	}
2785 	cvmx_warn("CVMX_GMXX_TX_HG2_REG1 (block_id = %lu) not supported on this chip\n", block_id);
2786 	return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 0) * 0x8000000ull;
2787 }
CVMX_GMXX_TX_HG2_REG2(unsigned long block_id)2788 static inline uint64_t CVMX_GMXX_TX_HG2_REG2(unsigned long block_id)
2789 {
2790 	switch(cvmx_get_octeon_family()) {
2791 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2792 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2793 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2794 			if ((block_id == 0))
2795 				return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 0) * 0x8000000ull;
2796 			break;
2797 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2798 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2799 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2800 			if ((block_id <= 1))
2801 				return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 1) * 0x8000000ull;
2802 			break;
2803 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2804 			if ((block_id <= 4))
2805 				return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 7) * 0x1000000ull;
2806 			break;
2807 	}
2808 	cvmx_warn("CVMX_GMXX_TX_HG2_REG2 (block_id = %lu) not supported on this chip\n", block_id);
2809 	return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 0) * 0x8000000ull;
2810 }
CVMX_GMXX_TX_IFG(unsigned long block_id)2811 static inline uint64_t CVMX_GMXX_TX_IFG(unsigned long block_id)
2812 {
2813 	switch(cvmx_get_octeon_family()) {
2814 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2815 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2816 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2817 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2818 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2819 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2820 			if ((block_id == 0))
2821 				return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 0) * 0x8000000ull;
2822 			break;
2823 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2824 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2825 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2826 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2827 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2828 			if ((block_id <= 1))
2829 				return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 1) * 0x8000000ull;
2830 			break;
2831 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2832 			if ((block_id <= 4))
2833 				return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 7) * 0x1000000ull;
2834 			break;
2835 	}
2836 	cvmx_warn("CVMX_GMXX_TX_IFG (block_id = %lu) not supported on this chip\n", block_id);
2837 	return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 0) * 0x8000000ull;
2838 }
CVMX_GMXX_TX_INT_EN(unsigned long block_id)2839 static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id)
2840 {
2841 	switch(cvmx_get_octeon_family()) {
2842 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2843 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2844 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2845 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2846 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2847 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2848 			if ((block_id == 0))
2849 				return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 0) * 0x8000000ull;
2850 			break;
2851 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2852 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2853 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2854 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2855 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2856 			if ((block_id <= 1))
2857 				return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 1) * 0x8000000ull;
2858 			break;
2859 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2860 			if ((block_id <= 4))
2861 				return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 7) * 0x1000000ull;
2862 			break;
2863 	}
2864 	cvmx_warn("CVMX_GMXX_TX_INT_EN (block_id = %lu) not supported on this chip\n", block_id);
2865 	return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 0) * 0x8000000ull;
2866 }
CVMX_GMXX_TX_INT_REG(unsigned long block_id)2867 static inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id)
2868 {
2869 	switch(cvmx_get_octeon_family()) {
2870 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2871 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2872 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2873 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2874 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2875 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2876 			if ((block_id == 0))
2877 				return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 0) * 0x8000000ull;
2878 			break;
2879 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2880 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2881 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2882 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2883 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2884 			if ((block_id <= 1))
2885 				return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 1) * 0x8000000ull;
2886 			break;
2887 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2888 			if ((block_id <= 4))
2889 				return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 7) * 0x1000000ull;
2890 			break;
2891 	}
2892 	cvmx_warn("CVMX_GMXX_TX_INT_REG (block_id = %lu) not supported on this chip\n", block_id);
2893 	return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 0) * 0x8000000ull;
2894 }
CVMX_GMXX_TX_JAM(unsigned long block_id)2895 static inline uint64_t CVMX_GMXX_TX_JAM(unsigned long block_id)
2896 {
2897 	switch(cvmx_get_octeon_family()) {
2898 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2899 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2900 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2901 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2902 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2903 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2904 			if ((block_id == 0))
2905 				return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 0) * 0x8000000ull;
2906 			break;
2907 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2908 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2909 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2910 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2911 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2912 			if ((block_id <= 1))
2913 				return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 1) * 0x8000000ull;
2914 			break;
2915 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2916 			if ((block_id <= 4))
2917 				return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 7) * 0x1000000ull;
2918 			break;
2919 	}
2920 	cvmx_warn("CVMX_GMXX_TX_JAM (block_id = %lu) not supported on this chip\n", block_id);
2921 	return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 0) * 0x8000000ull;
2922 }
CVMX_GMXX_TX_LFSR(unsigned long block_id)2923 static inline uint64_t CVMX_GMXX_TX_LFSR(unsigned long block_id)
2924 {
2925 	switch(cvmx_get_octeon_family()) {
2926 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2927 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2928 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2929 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2930 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2931 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2932 			if ((block_id == 0))
2933 				return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 0) * 0x8000000ull;
2934 			break;
2935 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2936 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2937 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2938 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2939 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2940 			if ((block_id <= 1))
2941 				return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 1) * 0x8000000ull;
2942 			break;
2943 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2944 			if ((block_id <= 4))
2945 				return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 7) * 0x1000000ull;
2946 			break;
2947 	}
2948 	cvmx_warn("CVMX_GMXX_TX_LFSR (block_id = %lu) not supported on this chip\n", block_id);
2949 	return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 0) * 0x8000000ull;
2950 }
CVMX_GMXX_TX_OVR_BP(unsigned long block_id)2951 static inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id)
2952 {
2953 	switch(cvmx_get_octeon_family()) {
2954 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2955 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2956 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2957 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2958 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2959 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2960 			if ((block_id == 0))
2961 				return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 0) * 0x8000000ull;
2962 			break;
2963 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2964 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2965 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2966 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2967 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2968 			if ((block_id <= 1))
2969 				return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 1) * 0x8000000ull;
2970 			break;
2971 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
2972 			if ((block_id <= 4))
2973 				return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 7) * 0x1000000ull;
2974 			break;
2975 	}
2976 	cvmx_warn("CVMX_GMXX_TX_OVR_BP (block_id = %lu) not supported on this chip\n", block_id);
2977 	return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 0) * 0x8000000ull;
2978 }
CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id)2979 static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id)
2980 {
2981 	switch(cvmx_get_octeon_family()) {
2982 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
2983 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
2984 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
2985 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
2986 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
2987 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
2988 			if ((block_id == 0))
2989 				return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 0) * 0x8000000ull;
2990 			break;
2991 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
2992 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
2993 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
2994 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
2995 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
2996 			if ((block_id <= 1))
2997 				return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 1) * 0x8000000ull;
2998 			break;
2999 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
3000 			if ((block_id <= 4))
3001 				return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 7) * 0x1000000ull;
3002 			break;
3003 	}
3004 	cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_DMAC (block_id = %lu) not supported on this chip\n", block_id);
3005 	return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 0) * 0x8000000ull;
3006 }
CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id)3007 static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id)
3008 {
3009 	switch(cvmx_get_octeon_family()) {
3010 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
3011 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
3012 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
3013 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
3014 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
3015 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
3016 			if ((block_id == 0))
3017 				return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 0) * 0x8000000ull;
3018 			break;
3019 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
3020 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
3021 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
3022 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
3023 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
3024 			if ((block_id <= 1))
3025 				return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 1) * 0x8000000ull;
3026 			break;
3027 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
3028 			if ((block_id <= 4))
3029 				return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 7) * 0x1000000ull;
3030 			break;
3031 	}
3032 	cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_TYPE (block_id = %lu) not supported on this chip\n", block_id);
3033 	return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 0) * 0x8000000ull;
3034 }
CVMX_GMXX_TX_PRTS(unsigned long block_id)3035 static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id)
3036 {
3037 	switch(cvmx_get_octeon_family()) {
3038 		case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
3039 		case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
3040 		case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
3041 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
3042 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
3043 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
3044 			if ((block_id == 0))
3045 				return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 0) * 0x8000000ull;
3046 			break;
3047 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
3048 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
3049 		case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
3050 		case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
3051 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
3052 			if ((block_id <= 1))
3053 				return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 1) * 0x8000000ull;
3054 			break;
3055 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
3056 			if ((block_id <= 4))
3057 				return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 7) * 0x1000000ull;
3058 			break;
3059 	}
3060 	cvmx_warn("CVMX_GMXX_TX_PRTS (block_id = %lu) not supported on this chip\n", block_id);
3061 	return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 0) * 0x8000000ull;
3062 }
3063 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_TX_SPI_CTL(unsigned long block_id)3064 static inline uint64_t CVMX_GMXX_TX_SPI_CTL(unsigned long block_id)
3065 {
3066 	if (!(
3067 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
3068 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
3069 		cvmx_warn("CVMX_GMXX_TX_SPI_CTL(%lu) is invalid on this chip\n", block_id);
3070 	return CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull;
3071 }
3072 #else
3073 #define CVMX_GMXX_TX_SPI_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull)
3074 #endif
3075 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_TX_SPI_DRAIN(unsigned long block_id)3076 static inline uint64_t CVMX_GMXX_TX_SPI_DRAIN(unsigned long block_id)
3077 {
3078 	if (!(
3079 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
3080 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
3081 		cvmx_warn("CVMX_GMXX_TX_SPI_DRAIN(%lu) is invalid on this chip\n", block_id);
3082 	return CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull;
3083 }
3084 #else
3085 #define CVMX_GMXX_TX_SPI_DRAIN(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull)
3086 #endif
3087 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_TX_SPI_MAX(unsigned long block_id)3088 static inline uint64_t CVMX_GMXX_TX_SPI_MAX(unsigned long block_id)
3089 {
3090 	if (!(
3091 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
3092 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
3093 		cvmx_warn("CVMX_GMXX_TX_SPI_MAX(%lu) is invalid on this chip\n", block_id);
3094 	return CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull;
3095 }
3096 #else
3097 #define CVMX_GMXX_TX_SPI_MAX(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull)
3098 #endif
3099 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_TX_SPI_ROUNDX(unsigned long offset,unsigned long block_id)3100 static inline uint64_t CVMX_GMXX_TX_SPI_ROUNDX(unsigned long offset, unsigned long block_id)
3101 {
3102 	if (!(
3103 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 31)) && ((block_id <= 1))))))
3104 		cvmx_warn("CVMX_GMXX_TX_SPI_ROUNDX(%lu,%lu) is invalid on this chip\n", offset, block_id);
3105 	return CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8;
3106 }
3107 #else
3108 #define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
3109 #endif
3110 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
CVMX_GMXX_TX_SPI_THRESH(unsigned long block_id)3111 static inline uint64_t CVMX_GMXX_TX_SPI_THRESH(unsigned long block_id)
3112 {
3113 	if (!(
3114 	      (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
3115 	      (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
3116 		cvmx_warn("CVMX_GMXX_TX_SPI_THRESH(%lu) is invalid on this chip\n", block_id);
3117 	return CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull;
3118 }
3119 #else
3120 #define CVMX_GMXX_TX_SPI_THRESH(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull)
3121 #endif
CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id)3122 static inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id)
3123 {
3124 	switch(cvmx_get_octeon_family()) {
3125 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
3126 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
3127 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
3128 			if ((block_id == 0))
3129 				return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 0) * 0x8000000ull;
3130 			break;
3131 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
3132 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
3133 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
3134 			if ((block_id <= 1))
3135 				return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 1) * 0x8000000ull;
3136 			break;
3137 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
3138 			if ((block_id <= 4))
3139 				return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 7) * 0x1000000ull;
3140 			break;
3141 	}
3142 	cvmx_warn("CVMX_GMXX_TX_XAUI_CTL (block_id = %lu) not supported on this chip\n", block_id);
3143 	return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 0) * 0x8000000ull;
3144 }
CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)3145 static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
3146 {
3147 	switch(cvmx_get_octeon_family()) {
3148 		case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
3149 		case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
3150 		case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
3151 			if ((block_id == 0))
3152 				return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 0) * 0x8000000ull;
3153 			break;
3154 		case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
3155 		case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
3156 		case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
3157 			if ((block_id <= 1))
3158 				return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 1) * 0x8000000ull;
3159 			break;
3160 		case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
3161 			if ((block_id <= 4))
3162 				return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 7) * 0x1000000ull;
3163 			break;
3164 	}
3165 	cvmx_warn("CVMX_GMXX_XAUI_EXT_LOOPBACK (block_id = %lu) not supported on this chip\n", block_id);
3166 	return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 0) * 0x8000000ull;
3167 }
3168 
3169 /**
3170  * cvmx_gmx#_bad_reg
3171  *
3172  * GMX_BAD_REG = A collection of things that have gone very, very wrong
3173  *
3174  *
3175  * Notes:
3176  * In XAUI mode, only the lsb (corresponding to port0) of INB_NXA, LOSTSTAT, OUT_OVR, are used.
3177  *
3178  */
3179 union cvmx_gmxx_bad_reg {
3180 	uint64_t u64;
3181 	struct cvmx_gmxx_bad_reg_s {
3182 #ifdef __BIG_ENDIAN_BITFIELD
3183 	uint64_t reserved_31_63               : 33;
3184 	uint64_t inb_nxa                      : 4;  /**< Inbound port > GMX_RX_PRTS */
3185 	uint64_t statovr                      : 1;  /**< TX Statistics overflow
3186                                                          The common FIFO to SGMII and XAUI had an overflow
3187                                                          TX Stats are corrupted */
3188 	uint64_t loststat                     : 4;  /**< TX Statistics data was over-written
3189                                                          In SGMII, one bit per port
3190                                                          In XAUI, only port0 is used
3191                                                          TX Stats are corrupted */
3192 	uint64_t reserved_18_21               : 4;
3193 	uint64_t out_ovr                      : 16; /**< Outbound data FIFO overflow (per port) */
3194 	uint64_t ncb_ovr                      : 1;  /**< Outbound NCB FIFO Overflow */
3195 	uint64_t out_col                      : 1;  /**< Outbound collision occured between PKO and NCB */
3196 #else
3197 	uint64_t out_col                      : 1;
3198 	uint64_t ncb_ovr                      : 1;
3199 	uint64_t out_ovr                      : 16;
3200 	uint64_t reserved_18_21               : 4;
3201 	uint64_t loststat                     : 4;
3202 	uint64_t statovr                      : 1;
3203 	uint64_t inb_nxa                      : 4;
3204 	uint64_t reserved_31_63               : 33;
3205 #endif
3206 	} s;
3207 	struct cvmx_gmxx_bad_reg_cn30xx {
3208 #ifdef __BIG_ENDIAN_BITFIELD
3209 	uint64_t reserved_31_63               : 33;
3210 	uint64_t inb_nxa                      : 4;  /**< Inbound port > GMX_RX_PRTS */
3211 	uint64_t statovr                      : 1;  /**< TX Statistics overflow */
3212 	uint64_t reserved_25_25               : 1;
3213 	uint64_t loststat                     : 3;  /**< TX Statistics data was over-written (per RGM port)
3214                                                          TX Stats are corrupted */
3215 	uint64_t reserved_5_21                : 17;
3216 	uint64_t out_ovr                      : 3;  /**< Outbound data FIFO overflow (per port) */
3217 	uint64_t reserved_0_1                 : 2;
3218 #else
3219 	uint64_t reserved_0_1                 : 2;
3220 	uint64_t out_ovr                      : 3;
3221 	uint64_t reserved_5_21                : 17;
3222 	uint64_t loststat                     : 3;
3223 	uint64_t reserved_25_25               : 1;
3224 	uint64_t statovr                      : 1;
3225 	uint64_t inb_nxa                      : 4;
3226 	uint64_t reserved_31_63               : 33;
3227 #endif
3228 	} cn30xx;
3229 	struct cvmx_gmxx_bad_reg_cn30xx       cn31xx;
3230 	struct cvmx_gmxx_bad_reg_s            cn38xx;
3231 	struct cvmx_gmxx_bad_reg_s            cn38xxp2;
3232 	struct cvmx_gmxx_bad_reg_cn30xx       cn50xx;
3233 	struct cvmx_gmxx_bad_reg_cn52xx {
3234 #ifdef __BIG_ENDIAN_BITFIELD
3235 	uint64_t reserved_31_63               : 33;
3236 	uint64_t inb_nxa                      : 4;  /**< Inbound port > GMX_RX_PRTS */
3237 	uint64_t statovr                      : 1;  /**< TX Statistics overflow
3238                                                          The common FIFO to SGMII and XAUI had an overflow
3239                                                          TX Stats are corrupted */
3240 	uint64_t loststat                     : 4;  /**< TX Statistics data was over-written
3241                                                          In SGMII, one bit per port
3242                                                          In XAUI, only port0 is used
3243                                                          TX Stats are corrupted */
3244 	uint64_t reserved_6_21                : 16;
3245 	uint64_t out_ovr                      : 4;  /**< Outbound data FIFO overflow (per port) */
3246 	uint64_t reserved_0_1                 : 2;
3247 #else
3248 	uint64_t reserved_0_1                 : 2;
3249 	uint64_t out_ovr                      : 4;
3250 	uint64_t reserved_6_21                : 16;
3251 	uint64_t loststat                     : 4;
3252 	uint64_t statovr                      : 1;
3253 	uint64_t inb_nxa                      : 4;
3254 	uint64_t reserved_31_63               : 33;
3255 #endif
3256 	} cn52xx;
3257 	struct cvmx_gmxx_bad_reg_cn52xx       cn52xxp1;
3258 	struct cvmx_gmxx_bad_reg_cn52xx       cn56xx;
3259 	struct cvmx_gmxx_bad_reg_cn52xx       cn56xxp1;
3260 	struct cvmx_gmxx_bad_reg_s            cn58xx;
3261 	struct cvmx_gmxx_bad_reg_s            cn58xxp1;
3262 	struct cvmx_gmxx_bad_reg_cn52xx       cn61xx;
3263 	struct cvmx_gmxx_bad_reg_cn52xx       cn63xx;
3264 	struct cvmx_gmxx_bad_reg_cn52xx       cn63xxp1;
3265 	struct cvmx_gmxx_bad_reg_cn52xx       cn66xx;
3266 	struct cvmx_gmxx_bad_reg_cn52xx       cn68xx;
3267 	struct cvmx_gmxx_bad_reg_cn52xx       cn68xxp1;
3268 	struct cvmx_gmxx_bad_reg_cn52xx       cnf71xx;
3269 };
3270 typedef union cvmx_gmxx_bad_reg cvmx_gmxx_bad_reg_t;
3271 
3272 /**
3273  * cvmx_gmx#_bist
3274  *
3275  * GMX_BIST = GMX BIST Results
3276  *
3277  */
3278 union cvmx_gmxx_bist {
3279 	uint64_t u64;
3280 	struct cvmx_gmxx_bist_s {
3281 #ifdef __BIG_ENDIAN_BITFIELD
3282 	uint64_t reserved_25_63               : 39;
3283 	uint64_t status                       : 25; /**< BIST Results.
3284                                                          HW sets a bit in BIST for for memory that fails
3285                                                          - 0: gmx#.inb.fif_bnk0
3286                                                          - 1: gmx#.inb.fif_bnk1
3287                                                          - 2: gmx#.inb.fif_bnk2
3288                                                          - 3: gmx#.inb.fif_bnk3
3289                                                          - 4: gmx#.inb.fif_bnk_ext0
3290                                                          - 5: gmx#.inb.fif_bnk_ext1
3291                                                          - 6: gmx#.inb.fif_bnk_ext2
3292                                                          - 7: gmx#.inb.fif_bnk_ext3
3293                                                          - 8: gmx#.outb.fif.fif_bnk0
3294                                                          - 9: gmx#.outb.fif.fif_bnk1
3295                                                          - 10: gmx#.outb.fif.fif_bnk2
3296                                                          - 11: gmx#.outb.fif.fif_bnk3
3297                                                          - 12: gmx#.outb.fif.fif_bnk_ext0
3298                                                          - 13: gmx#.outb.fif.fif_bnk_ext1
3299                                                          - 14: gmx#.outb.fif.fif_bnk_ext2
3300                                                          - 15: gmx#.outb.fif.fif_bnk_ext3
3301                                                          - 16: gmx#.csr.gmi0.srf8x64m1_bist
3302                                                          - 17: gmx#.csr.gmi1.srf8x64m1_bist
3303                                                          - 18: gmx#.csr.gmi2.srf8x64m1_bist
3304                                                          - 19: gmx#.csr.gmi3.srf8x64m1_bist
3305                                                          - 20: gmx#.csr.drf20x32m2_bist
3306                                                          - 21: gmx#.csr.drf20x48m2_bist
3307                                                          - 22: gmx#.outb.stat.drf16x27m1_bist
3308                                                          - 23: gmx#.outb.stat.drf40x64m1_bist
3309                                                          - 24: xgmii.tx.drf16x38m1_async_bist */
3310 #else
3311 	uint64_t status                       : 25;
3312 	uint64_t reserved_25_63               : 39;
3313 #endif
3314 	} s;
3315 	struct cvmx_gmxx_bist_cn30xx {
3316 #ifdef __BIG_ENDIAN_BITFIELD
3317 	uint64_t reserved_10_63               : 54;
3318 	uint64_t status                       : 10; /**< BIST Results.
3319                                                           HW sets a bit in BIST for for memory that fails
3320                                                          - 0: gmx#.inb.dpr512x78m4_bist
3321                                                          - 1: gmx#.outb.fif.dpr512x71m4_bist
3322                                                          - 2: gmx#.csr.gmi0.srf8x64m1_bist
3323                                                          - 3: gmx#.csr.gmi1.srf8x64m1_bist
3324                                                          - 4: gmx#.csr.gmi2.srf8x64m1_bist
3325                                                          - 5: 0
3326                                                          - 6: gmx#.csr.drf20x80m1_bist
3327                                                          - 7: gmx#.outb.stat.drf16x27m1_bist
3328                                                          - 8: gmx#.outb.stat.drf40x64m1_bist
3329                                                          - 9: 0 */
3330 #else
3331 	uint64_t status                       : 10;
3332 	uint64_t reserved_10_63               : 54;
3333 #endif
3334 	} cn30xx;
3335 	struct cvmx_gmxx_bist_cn30xx          cn31xx;
3336 	struct cvmx_gmxx_bist_cn30xx          cn38xx;
3337 	struct cvmx_gmxx_bist_cn30xx          cn38xxp2;
3338 	struct cvmx_gmxx_bist_cn50xx {
3339 #ifdef __BIG_ENDIAN_BITFIELD
3340 	uint64_t reserved_12_63               : 52;
3341 	uint64_t status                       : 12; /**< BIST Results.
3342                                                          HW sets a bit in BIST for for memory that fails */
3343 #else
3344 	uint64_t status                       : 12;
3345 	uint64_t reserved_12_63               : 52;
3346 #endif
3347 	} cn50xx;
3348 	struct cvmx_gmxx_bist_cn52xx {
3349 #ifdef __BIG_ENDIAN_BITFIELD
3350 	uint64_t reserved_16_63               : 48;
3351 	uint64_t status                       : 16; /**< BIST Results.
3352                                                          HW sets a bit in BIST for for memory that fails
3353                                                          - 0: gmx#.inb.fif_bnk0
3354                                                          - 1: gmx#.inb.fif_bnk1
3355                                                          - 2: gmx#.inb.fif_bnk2
3356                                                          - 3: gmx#.inb.fif_bnk3
3357                                                          - 4: gmx#.outb.fif.fif_bnk0
3358                                                          - 5: gmx#.outb.fif.fif_bnk1
3359                                                          - 6: gmx#.outb.fif.fif_bnk2
3360                                                          - 7: gmx#.outb.fif.fif_bnk3
3361                                                          - 8: gmx#.csr.gmi0.srf8x64m1_bist
3362                                                          - 9: gmx#.csr.gmi1.srf8x64m1_bist
3363                                                          - 10: gmx#.csr.gmi2.srf8x64m1_bist
3364                                                          - 11: gmx#.csr.gmi3.srf8x64m1_bist
3365                                                          - 12: gmx#.csr.drf20x80m1_bist
3366                                                          - 13: gmx#.outb.stat.drf16x27m1_bist
3367                                                          - 14: gmx#.outb.stat.drf40x64m1_bist
3368                                                          - 15: xgmii.tx.drf16x38m1_async_bist */
3369 #else
3370 	uint64_t status                       : 16;
3371 	uint64_t reserved_16_63               : 48;
3372 #endif
3373 	} cn52xx;
3374 	struct cvmx_gmxx_bist_cn52xx          cn52xxp1;
3375 	struct cvmx_gmxx_bist_cn52xx          cn56xx;
3376 	struct cvmx_gmxx_bist_cn52xx          cn56xxp1;
3377 	struct cvmx_gmxx_bist_cn58xx {
3378 #ifdef __BIG_ENDIAN_BITFIELD
3379 	uint64_t reserved_17_63               : 47;
3380 	uint64_t status                       : 17; /**< BIST Results.
3381                                                          HW sets a bit in BIST for for memory that fails
3382                                                          - 0: gmx#.inb.fif_bnk0
3383                                                          - 1: gmx#.inb.fif_bnk1
3384                                                          - 2: gmx#.inb.fif_bnk2
3385                                                          - 3: gmx#.inb.fif_bnk3
3386                                                          - 4: gmx#.outb.fif.fif_bnk0
3387                                                          - 5: gmx#.outb.fif.fif_bnk1
3388                                                          - 6: gmx#.outb.fif.fif_bnk2
3389                                                          - 7: gmx#.outb.fif.fif_bnk3
3390                                                          - 8: gmx#.csr.gmi0.srf8x64m1_bist
3391                                                          - 9: gmx#.csr.gmi1.srf8x64m1_bist
3392                                                          - 10: gmx#.csr.gmi2.srf8x64m1_bist
3393                                                          - 11: gmx#.csr.gmi3.srf8x64m1_bist
3394                                                          - 12: gmx#.csr.drf20x80m1_bist
3395                                                          - 13: gmx#.outb.stat.drf16x27m1_bist
3396                                                          - 14: gmx#.outb.stat.drf40x64m1_bist
3397                                                          - 15: gmx#.outb.ncb.drf16x76m1_bist
3398                                                          - 16: gmx#.outb.fif.srf32x16m2_bist */
3399 #else
3400 	uint64_t status                       : 17;
3401 	uint64_t reserved_17_63               : 47;
3402 #endif
3403 	} cn58xx;
3404 	struct cvmx_gmxx_bist_cn58xx          cn58xxp1;
3405 	struct cvmx_gmxx_bist_s               cn61xx;
3406 	struct cvmx_gmxx_bist_s               cn63xx;
3407 	struct cvmx_gmxx_bist_s               cn63xxp1;
3408 	struct cvmx_gmxx_bist_s               cn66xx;
3409 	struct cvmx_gmxx_bist_s               cn68xx;
3410 	struct cvmx_gmxx_bist_s               cn68xxp1;
3411 	struct cvmx_gmxx_bist_s               cnf71xx;
3412 };
3413 typedef union cvmx_gmxx_bist cvmx_gmxx_bist_t;
3414 
3415 /**
3416  * cvmx_gmx#_bpid_map#
3417  *
3418  * Notes:
3419  * GMX will build BPID_VECTOR<15:0> using the 16 GMX_BPID_MAP entries and the BPID
3420  * state from IPD.  In XAUI/RXAUI mode when PFC/CBFC/HiGig2 is used, the
3421  * BPID_VECTOR becomes the logical backpressure.  In XAUI/RXAUI mode when
3422  * PFC/CBFC/HiGig2 is not used or when in 4xSGMII mode, the BPID_VECTOR can be used
3423  * with the GMX_BPID_MSK register to determine the physical backpressure.
3424  *
3425  * In XAUI/RXAUI mode, the entire BPID_VECTOR<15:0> is available determining physical
3426  * backpressure for the single XAUI/RXAUI interface.
3427  *
3428  * In SGMII mode, BPID_VECTOR is broken up as follows:
3429  *    SGMII interface0 uses BPID_VECTOR<3:0>
3430  *    SGMII interface1 uses BPID_VECTOR<7:4>
3431  *    SGMII interface2 uses BPID_VECTOR<11:8>
3432  *    SGMII interface3 uses BPID_VECTOR<15:12>
3433  *
3434  * In all SGMII configurations, and in some XAUI/RXAUI configurations, the
3435  * interface protocols only support physical backpressure. In these cases, a single
3436  * BPID will commonly drive the physical backpressure for the physical
3437  * interface. We provide example programmings for these simple cases.
3438  *
3439  * In XAUI/RXAUI mode where PFC/CBFC/HiGig2 is not used, an example programming
3440  * would be as follows:
3441  *
3442  *    @verbatim
3443  *    GMX_BPID_MAP0[VAL]    = 1;
3444  *    GMX_BPID_MAP0[BPID]   = xaui_bpid;
3445  *    GMX_BPID_MSK[MSK_OR]  = 1;
3446  *    GMX_BPID_MSK[MSK_AND] = 0;
3447  *    @endverbatim
3448  *
3449  * In SGMII mode, an example programming would be as follows:
3450  *
3451  *    @verbatim
3452  *    for (i=0; i<4; i++) [
3453  *       if (GMX_PRTi_CFG[EN]) [
3454  *          GMX_BPID_MAP(i*4)[VAL]    = 1;
3455  *          GMX_BPID_MAP(i*4)[BPID]   = sgmii_bpid(i);
3456  *          GMX_BPID_MSK[MSK_OR]      = (1 << (i*4)) | GMX_BPID_MSK[MSK_OR];
3457  *       ]
3458  *    ]
3459  *    GMX_BPID_MSK[MSK_AND] = 0;
3460  *    @endverbatim
3461  */
3462 union cvmx_gmxx_bpid_mapx {
3463 	uint64_t u64;
3464 	struct cvmx_gmxx_bpid_mapx_s {
3465 #ifdef __BIG_ENDIAN_BITFIELD
3466 	uint64_t reserved_17_63               : 47;
3467 	uint64_t status                       : 1;  /**< Current received BP from IPD */
3468 	uint64_t reserved_9_15                : 7;
3469 	uint64_t val                          : 1;  /**< Table entry is valid */
3470 	uint64_t reserved_6_7                 : 2;
3471 	uint64_t bpid                         : 6;  /**< Backpressure ID the entry maps to */
3472 #else
3473 	uint64_t bpid                         : 6;
3474 	uint64_t reserved_6_7                 : 2;
3475 	uint64_t val                          : 1;
3476 	uint64_t reserved_9_15                : 7;
3477 	uint64_t status                       : 1;
3478 	uint64_t reserved_17_63               : 47;
3479 #endif
3480 	} s;
3481 	struct cvmx_gmxx_bpid_mapx_s          cn68xx;
3482 	struct cvmx_gmxx_bpid_mapx_s          cn68xxp1;
3483 };
3484 typedef union cvmx_gmxx_bpid_mapx cvmx_gmxx_bpid_mapx_t;
3485 
3486 /**
3487  * cvmx_gmx#_bpid_msk
3488  */
3489 union cvmx_gmxx_bpid_msk {
3490 	uint64_t u64;
3491 	struct cvmx_gmxx_bpid_msk_s {
3492 #ifdef __BIG_ENDIAN_BITFIELD
3493 	uint64_t reserved_48_63               : 16;
3494 	uint64_t msk_or                       : 16; /**< Assert physical BP when the backpressure ID vector
3495                                                          combined with MSK_OR indicates BP as follows.
3496                                                          phys_bp_msk_or =
3497                                                           (BPID_VECTOR<x:y> & MSK_OR<x:y>) != 0
3498                                                          phys_bp = phys_bp_msk_or || phys_bp_msk_and
3499                                                          In XAUI/RXAUI mode, x=15, y=0
3500                                                          In SGMII mode, x/y are set depending on the SGMII
3501                                                          interface.
3502                                                          SGMII interface0, x=3,  y=0
3503                                                          SGMII interface1, x=7,  y=4
3504                                                          SGMII interface2, x=11, y=8
3505                                                          SGMII interface3, x=15, y=12 */
3506 	uint64_t reserved_16_31               : 16;
3507 	uint64_t msk_and                      : 16; /**< Assert physical BP when the backpressure ID vector
3508                                                          combined with MSK_AND indicates BP as follows.
3509                                                          phys_bp_msk_and =
3510                                                           (BPID_VECTOR<x:y> & MSK_AND<x:y>) == MSK_AND<x:y>
3511                                                          phys_bp = phys_bp_msk_or || phys_bp_msk_and
3512                                                          In XAUI/RXAUI mode, x=15, y=0
3513                                                          In SGMII mode, x/y are set depending on the SGMII
3514                                                          interface.
3515                                                          SGMII interface0, x=3,  y=0
3516                                                          SGMII interface1, x=7,  y=4
3517                                                          SGMII interface2, x=11, y=8
3518                                                          SGMII interface3, x=15, y=12 */
3519 #else
3520 	uint64_t msk_and                      : 16;
3521 	uint64_t reserved_16_31               : 16;
3522 	uint64_t msk_or                       : 16;
3523 	uint64_t reserved_48_63               : 16;
3524 #endif
3525 	} s;
3526 	struct cvmx_gmxx_bpid_msk_s           cn68xx;
3527 	struct cvmx_gmxx_bpid_msk_s           cn68xxp1;
3528 };
3529 typedef union cvmx_gmxx_bpid_msk cvmx_gmxx_bpid_msk_t;
3530 
3531 /**
3532  * cvmx_gmx#_clk_en
3533  *
3534  * DON'T PUT IN HRM*
3535  *
3536  */
3537 union cvmx_gmxx_clk_en {
3538 	uint64_t u64;
3539 	struct cvmx_gmxx_clk_en_s {
3540 #ifdef __BIG_ENDIAN_BITFIELD
3541 	uint64_t reserved_1_63                : 63;
3542 	uint64_t clk_en                       : 1;  /**< Force the clock enables on */
3543 #else
3544 	uint64_t clk_en                       : 1;
3545 	uint64_t reserved_1_63                : 63;
3546 #endif
3547 	} s;
3548 	struct cvmx_gmxx_clk_en_s             cn52xx;
3549 	struct cvmx_gmxx_clk_en_s             cn52xxp1;
3550 	struct cvmx_gmxx_clk_en_s             cn56xx;
3551 	struct cvmx_gmxx_clk_en_s             cn56xxp1;
3552 	struct cvmx_gmxx_clk_en_s             cn61xx;
3553 	struct cvmx_gmxx_clk_en_s             cn63xx;
3554 	struct cvmx_gmxx_clk_en_s             cn63xxp1;
3555 	struct cvmx_gmxx_clk_en_s             cn66xx;
3556 	struct cvmx_gmxx_clk_en_s             cn68xx;
3557 	struct cvmx_gmxx_clk_en_s             cn68xxp1;
3558 	struct cvmx_gmxx_clk_en_s             cnf71xx;
3559 };
3560 typedef union cvmx_gmxx_clk_en cvmx_gmxx_clk_en_t;
3561 
3562 /**
3563  * cvmx_gmx#_ebp_dis
3564  */
3565 union cvmx_gmxx_ebp_dis {
3566 	uint64_t u64;
3567 	struct cvmx_gmxx_ebp_dis_s {
3568 #ifdef __BIG_ENDIAN_BITFIELD
3569 	uint64_t reserved_16_63               : 48;
3570 	uint64_t dis                          : 16; /**< BP channel disable
3571                                                          GMX has the ability to remap unused channels
3572                                                          in order to get down to GMX_TX_PIPE[NUMP]
3573                                                          channels. */
3574 #else
3575 	uint64_t dis                          : 16;
3576 	uint64_t reserved_16_63               : 48;
3577 #endif
3578 	} s;
3579 	struct cvmx_gmxx_ebp_dis_s            cn68xx;
3580 	struct cvmx_gmxx_ebp_dis_s            cn68xxp1;
3581 };
3582 typedef union cvmx_gmxx_ebp_dis cvmx_gmxx_ebp_dis_t;
3583 
3584 /**
3585  * cvmx_gmx#_ebp_msk
3586  */
3587 union cvmx_gmxx_ebp_msk {
3588 	uint64_t u64;
3589 	struct cvmx_gmxx_ebp_msk_s {
3590 #ifdef __BIG_ENDIAN_BITFIELD
3591 	uint64_t reserved_16_63               : 48;
3592 	uint64_t msk                          : 16; /**< BP channel mask
3593                                                          GMX can completely ignore the channel BP for
3594                                                          channels specified by the MSK field.  Any channel
3595                                                          in which MSK == 1, will never send BP information
3596                                                          to PKO. */
3597 #else
3598 	uint64_t msk                          : 16;
3599 	uint64_t reserved_16_63               : 48;
3600 #endif
3601 	} s;
3602 	struct cvmx_gmxx_ebp_msk_s            cn68xx;
3603 	struct cvmx_gmxx_ebp_msk_s            cn68xxp1;
3604 };
3605 typedef union cvmx_gmxx_ebp_msk cvmx_gmxx_ebp_msk_t;
3606 
3607 /**
3608  * cvmx_gmx#_hg2_control
3609  *
3610  * Notes:
3611  * The HiGig2 TX and RX enable would normally be both set together for HiGig2 messaging. However
3612  * setting just the TX or RX bit will result in only the HG2 message transmit or the receive
3613  * capability.
3614  * PHYS_EN and LOGL_EN bits when 1, allow link pause or back pressure to PKO as per received
3615  * HiGig2 message. When 0, link pause and back pressure to PKO in response to received messages
3616  * are disabled.
3617  *
3618  * GMX*_TX_XAUI_CTL[HG_EN] must be set to one(to enable HiGig) whenever either HG2TX_EN or HG2RX_EN
3619  * are set.
3620  *
3621  * GMX*_RX0_UDD_SKP[LEN] must be set to 16 (to select HiGig2) whenever either HG2TX_EN or HG2RX_EN
3622  * are set.
3623  *
3624  * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
3625  * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol when
3626  * GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by GMX*_TX_XAUI_CTL[HG_EN]=1
3627  * and GMX*_RX0_UDD_SKP[LEN]=16.) The HW can only auto-generate backpressure via HiGig2 messages
3628  * (optionally, when HG2TX_EN=1) with the HiGig2 protocol.
3629  */
3630 union cvmx_gmxx_hg2_control {
3631 	uint64_t u64;
3632 	struct cvmx_gmxx_hg2_control_s {
3633 #ifdef __BIG_ENDIAN_BITFIELD
3634 	uint64_t reserved_19_63               : 45;
3635 	uint64_t hg2tx_en                     : 1;  /**< Enable Transmission of HG2 phys and logl messages
3636                                                          When set, also disables HW auto-generated (802.3
3637                                                          and CBFC) pause frames. (OCTEON cannot generate
3638                                                          proper 802.3 or CBFC pause frames in HiGig2 mode.) */
3639 	uint64_t hg2rx_en                     : 1;  /**< Enable extraction and processing of HG2 message
3640                                                          packet from RX flow. Physical logical pause info
3641                                                          is used to pause physical link, back pressure PKO
3642                                                          HG2RX_EN must be set when HiGig2 messages are
3643                                                          present in the receive stream. */
3644 	uint64_t phys_en                      : 1;  /**< 1 bit physical link pause enable for recevied
3645                                                          HiGig2 physical pause message */
3646 	uint64_t logl_en                      : 16; /**< 16 bit xof enables for recevied HiGig2 messages
3647                                                          or CBFC packets */
3648 #else
3649 	uint64_t logl_en                      : 16;
3650 	uint64_t phys_en                      : 1;
3651 	uint64_t hg2rx_en                     : 1;
3652 	uint64_t hg2tx_en                     : 1;
3653 	uint64_t reserved_19_63               : 45;
3654 #endif
3655 	} s;
3656 	struct cvmx_gmxx_hg2_control_s        cn52xx;
3657 	struct cvmx_gmxx_hg2_control_s        cn52xxp1;
3658 	struct cvmx_gmxx_hg2_control_s        cn56xx;
3659 	struct cvmx_gmxx_hg2_control_s        cn61xx;
3660 	struct cvmx_gmxx_hg2_control_s        cn63xx;
3661 	struct cvmx_gmxx_hg2_control_s        cn63xxp1;
3662 	struct cvmx_gmxx_hg2_control_s        cn66xx;
3663 	struct cvmx_gmxx_hg2_control_s        cn68xx;
3664 	struct cvmx_gmxx_hg2_control_s        cn68xxp1;
3665 	struct cvmx_gmxx_hg2_control_s        cnf71xx;
3666 };
3667 typedef union cvmx_gmxx_hg2_control cvmx_gmxx_hg2_control_t;
3668 
3669 /**
3670  * cvmx_gmx#_inf_mode
3671  *
3672  * GMX_INF_MODE = Interface Mode
3673  *
3674  */
3675 union cvmx_gmxx_inf_mode {
3676 	uint64_t u64;
3677 	struct cvmx_gmxx_inf_mode_s {
3678 #ifdef __BIG_ENDIAN_BITFIELD
3679 	uint64_t reserved_20_63               : 44;
3680 	uint64_t rate                         : 4;  /**< SERDES speed rate
3681                                                          reset value is based on the QLM speed select
3682                                                          0 = 1.25  Gbaud
3683                                                          1 = 3.125 Gbaud
3684                                                          (only valid for GMX0 instance)
3685                                                          Software must not change RATE from its reset value */
3686 	uint64_t reserved_12_15               : 4;
3687 	uint64_t speed                        : 4;  /**< Interface Speed
3688                                                          QLM speed pins  which select reference clock
3689                                                          period and interface data rate.  If the QLM PLL
3690                                                          inputs are correct, the speed setting correspond
3691                                                          to the following data rates (in Gbaud).
3692                                                          0  = 5
3693                                                          1  = 2.5
3694                                                          2  = 2.5
3695                                                          3  = 1.25
3696                                                          4  = 1.25
3697                                                          5  = 6.25
3698                                                          6  = 5
3699                                                          7  = 2.5
3700                                                          8  = 3.125
3701                                                          9  = 2.5
3702                                                          10 = 1.25
3703                                                          11 = 5
3704                                                          12 = 6.25
3705                                                          13 = 3.75
3706                                                          14 = 3.125
3707                                                          15 = QLM disabled */
3708 	uint64_t reserved_7_7                 : 1;
3709 	uint64_t mode                         : 3;  /**< Interface Electrical Operating Mode
3710                                                          - 0: SGMII (v1.8)
3711                                                          - 1: XAUI (IEEE 802.3-2005) */
3712 	uint64_t reserved_3_3                 : 1;
3713 	uint64_t p0mii                        : 1;  /**< Port 0 Interface Mode
3714                                                          - 0: Port 0 is RGMII
3715                                                          - 1: Port 0 is MII */
3716 	uint64_t en                           : 1;  /**< Interface Enable
3717                                                          Must be set to enable the packet interface.
3718                                                          Should be enabled before any other requests to
3719                                                          GMX including enabling port back pressure with
3720                                                          IPD_CTL_STATUS[PBP_EN] */
3721 	uint64_t type                         : 1;  /**< Interface Protocol Type
3722                                                          - 0: SGMII/1000Base-X
3723                                                          - 1: XAUI */
3724 #else
3725 	uint64_t type                         : 1;
3726 	uint64_t en                           : 1;
3727 	uint64_t p0mii                        : 1;
3728 	uint64_t reserved_3_3                 : 1;
3729 	uint64_t mode                         : 3;
3730 	uint64_t reserved_7_7                 : 1;
3731 	uint64_t speed                        : 4;
3732 	uint64_t reserved_12_15               : 4;
3733 	uint64_t rate                         : 4;
3734 	uint64_t reserved_20_63               : 44;
3735 #endif
3736 	} s;
3737 	struct cvmx_gmxx_inf_mode_cn30xx {
3738 #ifdef __BIG_ENDIAN_BITFIELD
3739 	uint64_t reserved_3_63                : 61;
3740 	uint64_t p0mii                        : 1;  /**< Port 0 Interface Mode
3741                                                          - 0: Port 0 is RGMII
3742                                                          - 1: Port 0 is MII */
3743 	uint64_t en                           : 1;  /**< Interface Enable
3744                                                          Must be set to enable the packet interface.
3745                                                          Should be enabled before any other requests to
3746                                                          GMX including enabling port back pressure with
3747                                                          IPD_CTL_STATUS[PBP_EN] */
3748 	uint64_t type                         : 1;  /**< Port 1/2 Interface Mode
3749                                                          - 0: Ports 1 and 2 are RGMII
3750                                                          - 1: Port  1 is GMII/MII, Port 2 is unused
3751                                                              GMII/MII is selected by GMX_PRT1_CFG[SPEED] */
3752 #else
3753 	uint64_t type                         : 1;
3754 	uint64_t en                           : 1;
3755 	uint64_t p0mii                        : 1;
3756 	uint64_t reserved_3_63                : 61;
3757 #endif
3758 	} cn30xx;
3759 	struct cvmx_gmxx_inf_mode_cn31xx {
3760 #ifdef __BIG_ENDIAN_BITFIELD
3761 	uint64_t reserved_2_63                : 62;
3762 	uint64_t en                           : 1;  /**< Interface Enable
3763                                                          Must be set to enable the packet interface.
3764                                                          Should be enabled before any other requests to
3765                                                          GMX including enabling port back pressure with
3766                                                          IPD_CTL_STATUS[PBP_EN] */
3767 	uint64_t type                         : 1;  /**< Interface Mode
3768                                                          - 0: All three ports are RGMII ports
3769                                                          - 1: prt0 is RGMII, prt1 is GMII, and prt2 is unused */
3770 #else
3771 	uint64_t type                         : 1;
3772 	uint64_t en                           : 1;
3773 	uint64_t reserved_2_63                : 62;
3774 #endif
3775 	} cn31xx;
3776 	struct cvmx_gmxx_inf_mode_cn31xx      cn38xx;
3777 	struct cvmx_gmxx_inf_mode_cn31xx      cn38xxp2;
3778 	struct cvmx_gmxx_inf_mode_cn30xx      cn50xx;
3779 	struct cvmx_gmxx_inf_mode_cn52xx {
3780 #ifdef __BIG_ENDIAN_BITFIELD
3781 	uint64_t reserved_10_63               : 54;
3782 	uint64_t speed                        : 2;  /**< Interface Speed
3783                                                          - 0: 1.250GHz
3784                                                          - 1: 2.500GHz
3785                                                          - 2: 3.125GHz
3786                                                          - 3: 3.750GHz */
3787 	uint64_t reserved_6_7                 : 2;
3788 	uint64_t mode                         : 2;  /**< Interface Electrical Operating Mode
3789                                                          - 0: Disabled (PCIe)
3790                                                          - 1: XAUI (IEEE 802.3-2005)
3791                                                          - 2: SGMII (v1.8)
3792                                                          - 3: PICMG3.1 */
3793 	uint64_t reserved_2_3                 : 2;
3794 	uint64_t en                           : 1;  /**< Interface Enable
3795                                                          Must be set to enable the packet interface.
3796                                                          Should be enabled before any other requests to
3797                                                          GMX including enabling port back pressure with
3798                                                          IPD_CTL_STATUS[PBP_EN] */
3799 	uint64_t type                         : 1;  /**< Interface Protocol Type
3800                                                          - 0: SGMII/1000Base-X
3801                                                          - 1: XAUI */
3802 #else
3803 	uint64_t type                         : 1;
3804 	uint64_t en                           : 1;
3805 	uint64_t reserved_2_3                 : 2;
3806 	uint64_t mode                         : 2;
3807 	uint64_t reserved_6_7                 : 2;
3808 	uint64_t speed                        : 2;
3809 	uint64_t reserved_10_63               : 54;
3810 #endif
3811 	} cn52xx;
3812 	struct cvmx_gmxx_inf_mode_cn52xx      cn52xxp1;
3813 	struct cvmx_gmxx_inf_mode_cn52xx      cn56xx;
3814 	struct cvmx_gmxx_inf_mode_cn52xx      cn56xxp1;
3815 	struct cvmx_gmxx_inf_mode_cn31xx      cn58xx;
3816 	struct cvmx_gmxx_inf_mode_cn31xx      cn58xxp1;
3817 	struct cvmx_gmxx_inf_mode_cn61xx {
3818 #ifdef __BIG_ENDIAN_BITFIELD
3819 	uint64_t reserved_12_63               : 52;
3820 	uint64_t speed                        : 4;  /**< Interface Speed
3821                                                          QLM speed pins  which select reference clock
3822                                                          period and interface data rate.  If the QLM PLL
3823                                                          inputs are correct, the speed setting correspond
3824                                                          to the following data rates (in Gbaud).
3825                                                          0  = 5
3826                                                          1  = 2.5
3827                                                          2  = 2.5
3828                                                          3  = 1.25
3829                                                          4  = 1.25
3830                                                          5  = 6.25
3831                                                          6  = 5
3832                                                          7  = 2.5
3833                                                          8  = 3.125
3834                                                          9  = 2.5
3835                                                          10 = 1.25
3836                                                          11 = 5
3837                                                          12 = 6.25
3838                                                          13 = 3.75
3839                                                          14 = 3.125
3840                                                          15 = QLM disabled */
3841 	uint64_t reserved_5_7                 : 3;
3842 	uint64_t mode                         : 1;  /**< Interface Electrical Operating Mode
3843                                                          - 0: SGMII (v1.8)
3844                                                          - 1: XAUI (IEEE 802.3-2005) */
3845 	uint64_t reserved_2_3                 : 2;
3846 	uint64_t en                           : 1;  /**< Interface Enable
3847                                                          Must be set to enable the packet interface.
3848                                                          Should be enabled before any other requests to
3849                                                          GMX including enabling port back pressure with
3850                                                          IPD_CTL_STATUS[PBP_EN] */
3851 	uint64_t type                         : 1;  /**< Interface Protocol Type
3852                                                          - 0: SGMII/1000Base-X
3853                                                          - 1: XAUI */
3854 #else
3855 	uint64_t type                         : 1;
3856 	uint64_t en                           : 1;
3857 	uint64_t reserved_2_3                 : 2;
3858 	uint64_t mode                         : 1;
3859 	uint64_t reserved_5_7                 : 3;
3860 	uint64_t speed                        : 4;
3861 	uint64_t reserved_12_63               : 52;
3862 #endif
3863 	} cn61xx;
3864 	struct cvmx_gmxx_inf_mode_cn61xx      cn63xx;
3865 	struct cvmx_gmxx_inf_mode_cn61xx      cn63xxp1;
3866 	struct cvmx_gmxx_inf_mode_cn66xx {
3867 #ifdef __BIG_ENDIAN_BITFIELD
3868 	uint64_t reserved_20_63               : 44;
3869 	uint64_t rate                         : 4;  /**< SERDES speed rate
3870                                                          reset value is based on the QLM speed select
3871                                                          0 = 1.25  Gbaud
3872                                                          1 = 3.125 Gbaud
3873                                                          (only valid for GMX0 instance)
3874                                                          Software must not change RATE from its reset value */
3875 	uint64_t reserved_12_15               : 4;
3876 	uint64_t speed                        : 4;  /**< Interface Speed
3877                                                          QLM speed pins  which select reference clock
3878                                                          period and interface data rate.  If the QLM PLL
3879                                                          inputs are correct, the speed setting correspond
3880                                                          to the following data rates (in Gbaud).
3881                                                          0  = 5
3882                                                          1  = 2.5
3883                                                          2  = 2.5
3884                                                          3  = 1.25
3885                                                          4  = 1.25
3886                                                          5  = 6.25
3887                                                          6  = 5
3888                                                          7  = 2.5
3889                                                          8  = 3.125
3890                                                          9  = 2.5
3891                                                          10 = 1.25
3892                                                          11 = 5
3893                                                          12 = 6.25
3894                                                          13 = 3.75
3895                                                          14 = 3.125
3896                                                          15 = QLM disabled */
3897 	uint64_t reserved_5_7                 : 3;
3898 	uint64_t mode                         : 1;  /**< Interface Electrical Operating Mode
3899                                                          - 0: SGMII (v1.8)
3900                                                          - 1: XAUI (IEEE 802.3-2005) */
3901 	uint64_t reserved_2_3                 : 2;
3902 	uint64_t en                           : 1;  /**< Interface Enable
3903                                                          Must be set to enable the packet interface.
3904                                                          Should be enabled before any other requests to
3905                                                          GMX including enabling port back pressure with
3906                                                          IPD_CTL_STATUS[PBP_EN] */
3907 	uint64_t type                         : 1;  /**< Interface Protocol Type
3908                                                          - 0: SGMII/1000Base-X
3909                                                          - 1: XAUI */
3910 #else
3911 	uint64_t type                         : 1;
3912 	uint64_t en                           : 1;
3913 	uint64_t reserved_2_3                 : 2;
3914 	uint64_t mode                         : 1;
3915 	uint64_t reserved_5_7                 : 3;
3916 	uint64_t speed                        : 4;
3917 	uint64_t reserved_12_15               : 4;
3918 	uint64_t rate                         : 4;
3919 	uint64_t reserved_20_63               : 44;
3920 #endif
3921 	} cn66xx;
3922 	struct cvmx_gmxx_inf_mode_cn68xx {
3923 #ifdef __BIG_ENDIAN_BITFIELD
3924 	uint64_t reserved_12_63               : 52;
3925 	uint64_t speed                        : 4;  /**< Interface Speed
3926                                                          QLM speed pins  which select reference clock
3927                                                          period and interface data rate.  If the QLM PLL
3928                                                          inputs are correct, the speed setting correspond
3929                                                          to the following data rates (in Gbaud).
3930                                                          0  = 5
3931                                                          1  = 2.5
3932                                                          2  = 2.5
3933                                                          3  = 1.25
3934                                                          4  = 1.25
3935                                                          5  = 6.25
3936                                                          6  = 5
3937                                                          7  = 2.5
3938                                                          8  = 3.125
3939                                                          9  = 2.5
3940                                                          10 = 1.25
3941                                                          11 = 5
3942                                                          12 = 6.25
3943                                                          13 = 3.75
3944                                                          14 = 3.125
3945                                                          15 = QLM disabled */
3946 	uint64_t reserved_7_7                 : 1;
3947 	uint64_t mode                         : 3;  /**< Interface Electrical Operating Mode
3948                                                          - 0: Reserved
3949                                                          - 1: Reserved
3950                                                          - 2: SGMII (v1.8)
3951                                                          - 3: XAUI (IEEE 802.3-2005)
3952                                                          - 4: Reserved
3953                                                          - 5: Reserved
3954                                                          - 6: Reserved
3955                                                          - 7: RXAUI */
3956 	uint64_t reserved_2_3                 : 2;
3957 	uint64_t en                           : 1;  /**< Interface Enable
3958                                                                    Must be set to enable the packet interface.
3959                                                                    Should be enabled before any other requests to
3960                                                                    GMX including enabling port back pressure with
3961                                                          b         IPD_CTL_STATUS[PBP_EN] */
3962 	uint64_t type                         : 1;  /**< Interface Protocol Type
3963                                                          - 0: SGMII/1000Base-X
3964                                                          - 1: XAUI/RXAUI */
3965 #else
3966 	uint64_t type                         : 1;
3967 	uint64_t en                           : 1;
3968 	uint64_t reserved_2_3                 : 2;
3969 	uint64_t mode                         : 3;
3970 	uint64_t reserved_7_7                 : 1;
3971 	uint64_t speed                        : 4;
3972 	uint64_t reserved_12_63               : 52;
3973 #endif
3974 	} cn68xx;
3975 	struct cvmx_gmxx_inf_mode_cn68xx      cn68xxp1;
3976 	struct cvmx_gmxx_inf_mode_cn61xx      cnf71xx;
3977 };
3978 typedef union cvmx_gmxx_inf_mode cvmx_gmxx_inf_mode_t;
3979 
3980 /**
3981  * cvmx_gmx#_nxa_adr
3982  *
3983  * GMX_NXA_ADR = NXA Port Address
3984  *
3985  */
3986 union cvmx_gmxx_nxa_adr {
3987 	uint64_t u64;
3988 	struct cvmx_gmxx_nxa_adr_s {
3989 #ifdef __BIG_ENDIAN_BITFIELD
3990 	uint64_t reserved_23_63               : 41;
3991 	uint64_t pipe                         : 7;  /**< Logged pipe for NXP exceptions */
3992 	uint64_t reserved_6_15                : 10;
3993 	uint64_t prt                          : 6;  /**< Logged address for NXA exceptions
3994                                                          The logged address will be from the first
3995                                                          exception that caused the problem.  NCB has
3996                                                          higher priority than PKO and will win.
3997                                                          (only PRT[3:0]) */
3998 #else
3999 	uint64_t prt                          : 6;
4000 	uint64_t reserved_6_15                : 10;
4001 	uint64_t pipe                         : 7;
4002 	uint64_t reserved_23_63               : 41;
4003 #endif
4004 	} s;
4005 	struct cvmx_gmxx_nxa_adr_cn30xx {
4006 #ifdef __BIG_ENDIAN_BITFIELD
4007 	uint64_t reserved_6_63                : 58;
4008 	uint64_t prt                          : 6;  /**< Logged address for NXA exceptions
4009                                                          The logged address will be from the first
4010                                                          exception that caused the problem.  NCB has
4011                                                          higher priority than PKO and will win. */
4012 #else
4013 	uint64_t prt                          : 6;
4014 	uint64_t reserved_6_63                : 58;
4015 #endif
4016 	} cn30xx;
4017 	struct cvmx_gmxx_nxa_adr_cn30xx       cn31xx;
4018 	struct cvmx_gmxx_nxa_adr_cn30xx       cn38xx;
4019 	struct cvmx_gmxx_nxa_adr_cn30xx       cn38xxp2;
4020 	struct cvmx_gmxx_nxa_adr_cn30xx       cn50xx;
4021 	struct cvmx_gmxx_nxa_adr_cn30xx       cn52xx;
4022 	struct cvmx_gmxx_nxa_adr_cn30xx       cn52xxp1;
4023 	struct cvmx_gmxx_nxa_adr_cn30xx       cn56xx;
4024 	struct cvmx_gmxx_nxa_adr_cn30xx       cn56xxp1;
4025 	struct cvmx_gmxx_nxa_adr_cn30xx       cn58xx;
4026 	struct cvmx_gmxx_nxa_adr_cn30xx       cn58xxp1;
4027 	struct cvmx_gmxx_nxa_adr_cn30xx       cn61xx;
4028 	struct cvmx_gmxx_nxa_adr_cn30xx       cn63xx;
4029 	struct cvmx_gmxx_nxa_adr_cn30xx       cn63xxp1;
4030 	struct cvmx_gmxx_nxa_adr_cn30xx       cn66xx;
4031 	struct cvmx_gmxx_nxa_adr_s            cn68xx;
4032 	struct cvmx_gmxx_nxa_adr_s            cn68xxp1;
4033 	struct cvmx_gmxx_nxa_adr_cn30xx       cnf71xx;
4034 };
4035 typedef union cvmx_gmxx_nxa_adr cvmx_gmxx_nxa_adr_t;
4036 
4037 /**
4038  * cvmx_gmx#_pipe_status
4039  *
4040  * DON'T PUT IN HRM*
4041  *
4042  */
4043 union cvmx_gmxx_pipe_status {
4044 	uint64_t u64;
4045 	struct cvmx_gmxx_pipe_status_s {
4046 #ifdef __BIG_ENDIAN_BITFIELD
4047 	uint64_t reserved_20_63               : 44;
4048 	uint64_t ovr                          : 4;  /**< Pipe credit return FIFO has overflowed. */
4049 	uint64_t reserved_12_15               : 4;
4050 	uint64_t bp                           : 4;  /**< Pipe credit return FIFO has filled up and asserted
4051                                                          backpressure to the datapath. */
4052 	uint64_t reserved_4_7                 : 4;
4053 	uint64_t stop                         : 4;  /**< PKO has asserted backpressure on the pipe credit
4054                                                          return interface. */
4055 #else
4056 	uint64_t stop                         : 4;
4057 	uint64_t reserved_4_7                 : 4;
4058 	uint64_t bp                           : 4;
4059 	uint64_t reserved_12_15               : 4;
4060 	uint64_t ovr                          : 4;
4061 	uint64_t reserved_20_63               : 44;
4062 #endif
4063 	} s;
4064 	struct cvmx_gmxx_pipe_status_s        cn68xx;
4065 	struct cvmx_gmxx_pipe_status_s        cn68xxp1;
4066 };
4067 typedef union cvmx_gmxx_pipe_status cvmx_gmxx_pipe_status_t;
4068 
4069 /**
4070  * cvmx_gmx#_prt#_cbfc_ctl
4071  *
4072  * ** HG2 message CSRs end
4073  *
4074  *
4075  * Notes:
4076  * XOFF for a specific port is XOFF<prt> = (PHYS_EN<prt> & PHYS_BP) | (LOGL_EN<prt> & LOGL_BP<prt>)
4077  *
4078  */
4079 union cvmx_gmxx_prtx_cbfc_ctl {
4080 	uint64_t u64;
4081 	struct cvmx_gmxx_prtx_cbfc_ctl_s {
4082 #ifdef __BIG_ENDIAN_BITFIELD
4083 	uint64_t phys_en                      : 16; /**< Determines which ports will have physical
4084                                                          backpressure pause packets.
4085                                                          The value pplaced in the Class Enable Vector
4086                                                          field of the CBFC pause packet will be
4087                                                          PHYS_EN | LOGL_EN */
4088 	uint64_t logl_en                      : 16; /**< Determines which ports will have logical
4089                                                          backpressure pause packets.
4090                                                          The value pplaced in the Class Enable Vector
4091                                                          field of the CBFC pause packet will be
4092                                                          PHYS_EN | LOGL_EN */
4093 	uint64_t phys_bp                      : 16; /**< When RX_EN is set and the HW is backpressuring any
4094                                                          ports (from either CBFC pause packets or the
4095                                                          GMX_TX_OVR_BP[TX_PRT_BP] register) and all ports
4096                                                          indiciated by PHYS_BP are backpressured, simulate
4097                                                          physical backpressure by defering all packets on
4098                                                          the transmitter. */
4099 	uint64_t reserved_4_15                : 12;
4100 	uint64_t bck_en                       : 1;  /**< Forward CBFC Pause information to BP block */
4101 	uint64_t drp_en                       : 1;  /**< Drop Control CBFC Pause Frames */
4102 	uint64_t tx_en                        : 1;  /**< When set, allow for CBFC Pause Packets
4103                                                          Must be clear in HiGig2 mode i.e. when
4104                                                          GMX_TX_XAUI_CTL[HG_EN]=1 and
4105                                                          GMX_RX_UDD_SKP[SKIP]=16. */
4106 	uint64_t rx_en                        : 1;  /**< When set, allow for CBFC Pause Packets
4107                                                          Must be clear in HiGig2 mode i.e. when
4108                                                          GMX_TX_XAUI_CTL[HG_EN]=1 and
4109                                                          GMX_RX_UDD_SKP[SKIP]=16. */
4110 #else
4111 	uint64_t rx_en                        : 1;
4112 	uint64_t tx_en                        : 1;
4113 	uint64_t drp_en                       : 1;
4114 	uint64_t bck_en                       : 1;
4115 	uint64_t reserved_4_15                : 12;
4116 	uint64_t phys_bp                      : 16;
4117 	uint64_t logl_en                      : 16;
4118 	uint64_t phys_en                      : 16;
4119 #endif
4120 	} s;
4121 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn52xx;
4122 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn56xx;
4123 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn61xx;
4124 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn63xx;
4125 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn63xxp1;
4126 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn66xx;
4127 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn68xx;
4128 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cn68xxp1;
4129 	struct cvmx_gmxx_prtx_cbfc_ctl_s      cnf71xx;
4130 };
4131 typedef union cvmx_gmxx_prtx_cbfc_ctl cvmx_gmxx_prtx_cbfc_ctl_t;
4132 
4133 /**
4134  * cvmx_gmx#_prt#_cfg
4135  *
4136  * GMX_PRT_CFG = Port description
4137  *
4138  */
4139 union cvmx_gmxx_prtx_cfg {
4140 	uint64_t u64;
4141 	struct cvmx_gmxx_prtx_cfg_s {
4142 #ifdef __BIG_ENDIAN_BITFIELD
4143 	uint64_t reserved_22_63               : 42;
4144 	uint64_t pknd                         : 6;  /**< Port Kind used for processing the packet by PKI */
4145 	uint64_t reserved_14_15               : 2;
4146 	uint64_t tx_idle                      : 1;  /**< TX Machine is idle */
4147 	uint64_t rx_idle                      : 1;  /**< RX Machine is idle */
4148 	uint64_t reserved_9_11                : 3;
4149 	uint64_t speed_msb                    : 1;  /**< Link Speed MSB [SPEED_MSB:SPEED]
4150                                                          10 = 10Mbs operation
4151                                                          00 = 100Mbs operation
4152                                                          01 = 1000Mbs operation
4153                                                          11 = Reserved
4154                                                          (SGMII/1000Base-X only) */
4155 	uint64_t reserved_4_7                 : 4;
4156 	uint64_t slottime                     : 1;  /**< Slot Time for Half-Duplex operation
4157                                                          0 = 512 bitimes (10/100Mbs operation)
4158                                                          1 = 4096 bitimes (1000Mbs operation)
4159                                                          (SGMII/1000Base-X only) */
4160 	uint64_t duplex                       : 1;  /**< Duplex
4161                                                          0 = Half Duplex (collisions/extentions/bursts)
4162                                                          1 = Full Duplex
4163                                                          (SGMII/1000Base-X only) */
4164 	uint64_t speed                        : 1;  /**< Link Speed LSB [SPEED_MSB:SPEED]
4165                                                          10 = 10Mbs operation
4166                                                          00 = 100Mbs operation
4167                                                          01 = 1000Mbs operation
4168                                                          11 = Reserved
4169                                                          (SGMII/1000Base-X only) */
4170 	uint64_t en                           : 1;  /**< Link Enable
4171                                                          When EN is clear, packets will not be received
4172                                                          or transmitted (including PAUSE and JAM packets).
4173                                                          If EN is cleared while a packet is currently
4174                                                          being received or transmitted, the packet will
4175                                                          be allowed to complete before the bus is idled.
4176                                                          On the RX side, subsequent packets in a burst
4177                                                          will be ignored. */
4178 #else
4179 	uint64_t en                           : 1;
4180 	uint64_t speed                        : 1;
4181 	uint64_t duplex                       : 1;
4182 	uint64_t slottime                     : 1;
4183 	uint64_t reserved_4_7                 : 4;
4184 	uint64_t speed_msb                    : 1;
4185 	uint64_t reserved_9_11                : 3;
4186 	uint64_t rx_idle                      : 1;
4187 	uint64_t tx_idle                      : 1;
4188 	uint64_t reserved_14_15               : 2;
4189 	uint64_t pknd                         : 6;
4190 	uint64_t reserved_22_63               : 42;
4191 #endif
4192 	} s;
4193 	struct cvmx_gmxx_prtx_cfg_cn30xx {
4194 #ifdef __BIG_ENDIAN_BITFIELD
4195 	uint64_t reserved_4_63                : 60;
4196 	uint64_t slottime                     : 1;  /**< Slot Time for Half-Duplex operation
4197                                                          0 = 512 bitimes (10/100Mbs operation)
4198                                                          1 = 4096 bitimes (1000Mbs operation) */
4199 	uint64_t duplex                       : 1;  /**< Duplex
4200                                                          0 = Half Duplex (collisions/extentions/bursts)
4201                                                          1 = Full Duplex */
4202 	uint64_t speed                        : 1;  /**< Link Speed
4203                                                          0 = 10/100Mbs operation
4204                                                              (in RGMII mode, GMX_TX_CLK[CLK_CNT] >  1)
4205                                                              (in MII   mode, GMX_TX_CLK[CLK_CNT] == 1)
4206                                                          1 = 1000Mbs operation */
4207 	uint64_t en                           : 1;  /**< Link Enable
4208                                                          When EN is clear, packets will not be received
4209                                                          or transmitted (including PAUSE and JAM packets).
4210                                                          If EN is cleared while a packet is currently
4211                                                          being received or transmitted, the packet will
4212                                                          be allowed to complete before the bus is idled.
4213                                                          On the RX side, subsequent packets in a burst
4214                                                          will be ignored. */
4215 #else
4216 	uint64_t en                           : 1;
4217 	uint64_t speed                        : 1;
4218 	uint64_t duplex                       : 1;
4219 	uint64_t slottime                     : 1;
4220 	uint64_t reserved_4_63                : 60;
4221 #endif
4222 	} cn30xx;
4223 	struct cvmx_gmxx_prtx_cfg_cn30xx      cn31xx;
4224 	struct cvmx_gmxx_prtx_cfg_cn30xx      cn38xx;
4225 	struct cvmx_gmxx_prtx_cfg_cn30xx      cn38xxp2;
4226 	struct cvmx_gmxx_prtx_cfg_cn30xx      cn50xx;
4227 	struct cvmx_gmxx_prtx_cfg_cn52xx {
4228 #ifdef __BIG_ENDIAN_BITFIELD
4229 	uint64_t reserved_14_63               : 50;
4230 	uint64_t tx_idle                      : 1;  /**< TX Machine is idle */
4231 	uint64_t rx_idle                      : 1;  /**< RX Machine is idle */
4232 	uint64_t reserved_9_11                : 3;
4233 	uint64_t speed_msb                    : 1;  /**< Link Speed MSB [SPEED_MSB:SPEED]
4234                                                          10 = 10Mbs operation
4235                                                          00 = 100Mbs operation
4236                                                          01 = 1000Mbs operation
4237                                                          11 = Reserved
4238                                                          (SGMII/1000Base-X only) */
4239 	uint64_t reserved_4_7                 : 4;
4240 	uint64_t slottime                     : 1;  /**< Slot Time for Half-Duplex operation
4241                                                          0 = 512 bitimes (10/100Mbs operation)
4242                                                          1 = 4096 bitimes (1000Mbs operation)
4243                                                          (SGMII/1000Base-X only) */
4244 	uint64_t duplex                       : 1;  /**< Duplex
4245                                                          0 = Half Duplex (collisions/extentions/bursts)
4246                                                          1 = Full Duplex
4247                                                          (SGMII/1000Base-X only) */
4248 	uint64_t speed                        : 1;  /**< Link Speed LSB [SPEED_MSB:SPEED]
4249                                                          10 = 10Mbs operation
4250                                                          00 = 100Mbs operation
4251                                                          01 = 1000Mbs operation
4252                                                          11 = Reserved
4253                                                          (SGMII/1000Base-X only) */
4254 	uint64_t en                           : 1;  /**< Link Enable
4255                                                          When EN is clear, packets will not be received
4256                                                          or transmitted (including PAUSE and JAM packets).
4257                                                          If EN is cleared while a packet is currently
4258                                                          being received or transmitted, the packet will
4259                                                          be allowed to complete before the bus is idled.
4260                                                          On the RX side, subsequent packets in a burst
4261                                                          will be ignored. */
4262 #else
4263 	uint64_t en                           : 1;
4264 	uint64_t speed                        : 1;
4265 	uint64_t duplex                       : 1;
4266 	uint64_t slottime                     : 1;
4267 	uint64_t reserved_4_7                 : 4;
4268 	uint64_t speed_msb                    : 1;
4269 	uint64_t reserved_9_11                : 3;
4270 	uint64_t rx_idle                      : 1;
4271 	uint64_t tx_idle                      : 1;
4272 	uint64_t reserved_14_63               : 50;
4273 #endif
4274 	} cn52xx;
4275 	struct cvmx_gmxx_prtx_cfg_cn52xx      cn52xxp1;
4276 	struct cvmx_gmxx_prtx_cfg_cn52xx      cn56xx;
4277 	struct cvmx_gmxx_prtx_cfg_cn52xx      cn56xxp1;
4278 	struct cvmx_gmxx_prtx_cfg_cn30xx      cn58xx;
4279 	struct cvmx_gmxx_prtx_cfg_cn30xx      cn58xxp1;
4280 	struct cvmx_gmxx_prtx_cfg_cn52xx      cn61xx;
4281 	struct cvmx_gmxx_prtx_cfg_cn52xx      cn63xx;
4282 	struct cvmx_gmxx_prtx_cfg_cn52xx      cn63xxp1;
4283 	struct cvmx_gmxx_prtx_cfg_cn52xx      cn66xx;
4284 	struct cvmx_gmxx_prtx_cfg_s           cn68xx;
4285 	struct cvmx_gmxx_prtx_cfg_s           cn68xxp1;
4286 	struct cvmx_gmxx_prtx_cfg_cn52xx      cnf71xx;
4287 };
4288 typedef union cvmx_gmxx_prtx_cfg cvmx_gmxx_prtx_cfg_t;
4289 
4290 /**
4291  * cvmx_gmx#_rx#_adr_cam0
4292  *
4293  * GMX_RX_ADR_CAM = Address Filtering Control
4294  *
4295  */
4296 union cvmx_gmxx_rxx_adr_cam0 {
4297 	uint64_t u64;
4298 	struct cvmx_gmxx_rxx_adr_cam0_s {
4299 #ifdef __BIG_ENDIAN_BITFIELD
4300 	uint64_t adr                          : 64; /**< The DMAC address to match on
4301 
4302                                                          Each entry contributes 8bits to one of 8 matchers.
4303                                                          The CAM matches against unicst or multicst DMAC
4304                                                          addresses.
4305 
4306                                                          ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
4307                                                          in either SGMII or XAUI mode such that any GMX
4308                                                          MAC can use any of the 32 common DMAC entries.
4309 
4310                                                          GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
4311                                                          registers used in XAUI mode. */
4312 #else
4313 	uint64_t adr                          : 64;
4314 #endif
4315 	} s;
4316 	struct cvmx_gmxx_rxx_adr_cam0_s       cn30xx;
4317 	struct cvmx_gmxx_rxx_adr_cam0_s       cn31xx;
4318 	struct cvmx_gmxx_rxx_adr_cam0_s       cn38xx;
4319 	struct cvmx_gmxx_rxx_adr_cam0_s       cn38xxp2;
4320 	struct cvmx_gmxx_rxx_adr_cam0_s       cn50xx;
4321 	struct cvmx_gmxx_rxx_adr_cam0_s       cn52xx;
4322 	struct cvmx_gmxx_rxx_adr_cam0_s       cn52xxp1;
4323 	struct cvmx_gmxx_rxx_adr_cam0_s       cn56xx;
4324 	struct cvmx_gmxx_rxx_adr_cam0_s       cn56xxp1;
4325 	struct cvmx_gmxx_rxx_adr_cam0_s       cn58xx;
4326 	struct cvmx_gmxx_rxx_adr_cam0_s       cn58xxp1;
4327 	struct cvmx_gmxx_rxx_adr_cam0_s       cn61xx;
4328 	struct cvmx_gmxx_rxx_adr_cam0_s       cn63xx;
4329 	struct cvmx_gmxx_rxx_adr_cam0_s       cn63xxp1;
4330 	struct cvmx_gmxx_rxx_adr_cam0_s       cn66xx;
4331 	struct cvmx_gmxx_rxx_adr_cam0_s       cn68xx;
4332 	struct cvmx_gmxx_rxx_adr_cam0_s       cn68xxp1;
4333 	struct cvmx_gmxx_rxx_adr_cam0_s       cnf71xx;
4334 };
4335 typedef union cvmx_gmxx_rxx_adr_cam0 cvmx_gmxx_rxx_adr_cam0_t;
4336 
4337 /**
4338  * cvmx_gmx#_rx#_adr_cam1
4339  *
4340  * GMX_RX_ADR_CAM = Address Filtering Control
4341  *
4342  */
4343 union cvmx_gmxx_rxx_adr_cam1 {
4344 	uint64_t u64;
4345 	struct cvmx_gmxx_rxx_adr_cam1_s {
4346 #ifdef __BIG_ENDIAN_BITFIELD
4347 	uint64_t adr                          : 64; /**< The DMAC address to match on
4348 
4349                                                          Each entry contributes 8bits to one of 8 matchers.
4350                                                          The CAM matches against unicst or multicst DMAC
4351                                                          addresses.
4352 
4353                                                          ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
4354                                                          in either SGMII or XAUI mode such that any GMX
4355                                                          MAC can use any of the 32 common DMAC entries.
4356 
4357                                                          GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
4358                                                          registers used in XAUI mode. */
4359 #else
4360 	uint64_t adr                          : 64;
4361 #endif
4362 	} s;
4363 	struct cvmx_gmxx_rxx_adr_cam1_s       cn30xx;
4364 	struct cvmx_gmxx_rxx_adr_cam1_s       cn31xx;
4365 	struct cvmx_gmxx_rxx_adr_cam1_s       cn38xx;
4366 	struct cvmx_gmxx_rxx_adr_cam1_s       cn38xxp2;
4367 	struct cvmx_gmxx_rxx_adr_cam1_s       cn50xx;
4368 	struct cvmx_gmxx_rxx_adr_cam1_s       cn52xx;
4369 	struct cvmx_gmxx_rxx_adr_cam1_s       cn52xxp1;
4370 	struct cvmx_gmxx_rxx_adr_cam1_s       cn56xx;
4371 	struct cvmx_gmxx_rxx_adr_cam1_s       cn56xxp1;
4372 	struct cvmx_gmxx_rxx_adr_cam1_s       cn58xx;
4373 	struct cvmx_gmxx_rxx_adr_cam1_s       cn58xxp1;
4374 	struct cvmx_gmxx_rxx_adr_cam1_s       cn61xx;
4375 	struct cvmx_gmxx_rxx_adr_cam1_s       cn63xx;
4376 	struct cvmx_gmxx_rxx_adr_cam1_s       cn63xxp1;
4377 	struct cvmx_gmxx_rxx_adr_cam1_s       cn66xx;
4378 	struct cvmx_gmxx_rxx_adr_cam1_s       cn68xx;
4379 	struct cvmx_gmxx_rxx_adr_cam1_s       cn68xxp1;
4380 	struct cvmx_gmxx_rxx_adr_cam1_s       cnf71xx;
4381 };
4382 typedef union cvmx_gmxx_rxx_adr_cam1 cvmx_gmxx_rxx_adr_cam1_t;
4383 
4384 /**
4385  * cvmx_gmx#_rx#_adr_cam2
4386  *
4387  * GMX_RX_ADR_CAM = Address Filtering Control
4388  *
4389  */
4390 union cvmx_gmxx_rxx_adr_cam2 {
4391 	uint64_t u64;
4392 	struct cvmx_gmxx_rxx_adr_cam2_s {
4393 #ifdef __BIG_ENDIAN_BITFIELD
4394 	uint64_t adr                          : 64; /**< The DMAC address to match on
4395 
4396                                                          Each entry contributes 8bits to one of 8 matchers.
4397                                                          The CAM matches against unicst or multicst DMAC
4398                                                          addresses.
4399 
4400                                                          ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
4401                                                          in either SGMII or XAUI mode such that any GMX
4402                                                          MAC can use any of the 32 common DMAC entries.
4403 
4404                                                          GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
4405                                                          registers used in XAUI mode. */
4406 #else
4407 	uint64_t adr                          : 64;
4408 #endif
4409 	} s;
4410 	struct cvmx_gmxx_rxx_adr_cam2_s       cn30xx;
4411 	struct cvmx_gmxx_rxx_adr_cam2_s       cn31xx;
4412 	struct cvmx_gmxx_rxx_adr_cam2_s       cn38xx;
4413 	struct cvmx_gmxx_rxx_adr_cam2_s       cn38xxp2;
4414 	struct cvmx_gmxx_rxx_adr_cam2_s       cn50xx;
4415 	struct cvmx_gmxx_rxx_adr_cam2_s       cn52xx;
4416 	struct cvmx_gmxx_rxx_adr_cam2_s       cn52xxp1;
4417 	struct cvmx_gmxx_rxx_adr_cam2_s       cn56xx;
4418 	struct cvmx_gmxx_rxx_adr_cam2_s       cn56xxp1;
4419 	struct cvmx_gmxx_rxx_adr_cam2_s       cn58xx;
4420 	struct cvmx_gmxx_rxx_adr_cam2_s       cn58xxp1;
4421 	struct cvmx_gmxx_rxx_adr_cam2_s       cn61xx;
4422 	struct cvmx_gmxx_rxx_adr_cam2_s       cn63xx;
4423 	struct cvmx_gmxx_rxx_adr_cam2_s       cn63xxp1;
4424 	struct cvmx_gmxx_rxx_adr_cam2_s       cn66xx;
4425 	struct cvmx_gmxx_rxx_adr_cam2_s       cn68xx;
4426 	struct cvmx_gmxx_rxx_adr_cam2_s       cn68xxp1;
4427 	struct cvmx_gmxx_rxx_adr_cam2_s       cnf71xx;
4428 };
4429 typedef union cvmx_gmxx_rxx_adr_cam2 cvmx_gmxx_rxx_adr_cam2_t;
4430 
4431 /**
4432  * cvmx_gmx#_rx#_adr_cam3
4433  *
4434  * GMX_RX_ADR_CAM = Address Filtering Control
4435  *
4436  */
4437 union cvmx_gmxx_rxx_adr_cam3 {
4438 	uint64_t u64;
4439 	struct cvmx_gmxx_rxx_adr_cam3_s {
4440 #ifdef __BIG_ENDIAN_BITFIELD
4441 	uint64_t adr                          : 64; /**< The DMAC address to match on
4442 
4443                                                          Each entry contributes 8bits to one of 8 matchers.
4444                                                          The CAM matches against unicst or multicst DMAC
4445                                                          addresses.
4446 
4447                                                          ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
4448                                                          in either SGMII or XAUI mode such that any GMX
4449                                                          MAC can use any of the 32 common DMAC entries.
4450 
4451                                                          GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
4452                                                          registers used in XAUI mode. */
4453 #else
4454 	uint64_t adr                          : 64;
4455 #endif
4456 	} s;
4457 	struct cvmx_gmxx_rxx_adr_cam3_s       cn30xx;
4458 	struct cvmx_gmxx_rxx_adr_cam3_s       cn31xx;
4459 	struct cvmx_gmxx_rxx_adr_cam3_s       cn38xx;
4460 	struct cvmx_gmxx_rxx_adr_cam3_s       cn38xxp2;
4461 	struct cvmx_gmxx_rxx_adr_cam3_s       cn50xx;
4462 	struct cvmx_gmxx_rxx_adr_cam3_s       cn52xx;
4463 	struct cvmx_gmxx_rxx_adr_cam3_s       cn52xxp1;
4464 	struct cvmx_gmxx_rxx_adr_cam3_s       cn56xx;
4465 	struct cvmx_gmxx_rxx_adr_cam3_s       cn56xxp1;
4466 	struct cvmx_gmxx_rxx_adr_cam3_s       cn58xx;
4467 	struct cvmx_gmxx_rxx_adr_cam3_s       cn58xxp1;
4468 	struct cvmx_gmxx_rxx_adr_cam3_s       cn61xx;
4469 	struct cvmx_gmxx_rxx_adr_cam3_s       cn63xx;
4470 	struct cvmx_gmxx_rxx_adr_cam3_s       cn63xxp1;
4471 	struct cvmx_gmxx_rxx_adr_cam3_s       cn66xx;
4472 	struct cvmx_gmxx_rxx_adr_cam3_s       cn68xx;
4473 	struct cvmx_gmxx_rxx_adr_cam3_s       cn68xxp1;
4474 	struct cvmx_gmxx_rxx_adr_cam3_s       cnf71xx;
4475 };
4476 typedef union cvmx_gmxx_rxx_adr_cam3 cvmx_gmxx_rxx_adr_cam3_t;
4477 
4478 /**
4479  * cvmx_gmx#_rx#_adr_cam4
4480  *
4481  * GMX_RX_ADR_CAM = Address Filtering Control
4482  *
4483  */
4484 union cvmx_gmxx_rxx_adr_cam4 {
4485 	uint64_t u64;
4486 	struct cvmx_gmxx_rxx_adr_cam4_s {
4487 #ifdef __BIG_ENDIAN_BITFIELD
4488 	uint64_t adr                          : 64; /**< The DMAC address to match on
4489 
4490                                                          Each entry contributes 8bits to one of 8 matchers.
4491                                                          The CAM matches against unicst or multicst DMAC
4492                                                          addresses.
4493 
4494                                                          ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
4495                                                          in either SGMII or XAUI mode such that any GMX
4496                                                          MAC can use any of the 32 common DMAC entries.
4497 
4498                                                          GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
4499                                                          registers used in XAUI mode. */
4500 #else
4501 	uint64_t adr                          : 64;
4502 #endif
4503 	} s;
4504 	struct cvmx_gmxx_rxx_adr_cam4_s       cn30xx;
4505 	struct cvmx_gmxx_rxx_adr_cam4_s       cn31xx;
4506 	struct cvmx_gmxx_rxx_adr_cam4_s       cn38xx;
4507 	struct cvmx_gmxx_rxx_adr_cam4_s       cn38xxp2;
4508 	struct cvmx_gmxx_rxx_adr_cam4_s       cn50xx;
4509 	struct cvmx_gmxx_rxx_adr_cam4_s       cn52xx;
4510 	struct cvmx_gmxx_rxx_adr_cam4_s       cn52xxp1;
4511 	struct cvmx_gmxx_rxx_adr_cam4_s       cn56xx;
4512 	struct cvmx_gmxx_rxx_adr_cam4_s       cn56xxp1;
4513 	struct cvmx_gmxx_rxx_adr_cam4_s       cn58xx;
4514 	struct cvmx_gmxx_rxx_adr_cam4_s       cn58xxp1;
4515 	struct cvmx_gmxx_rxx_adr_cam4_s       cn61xx;
4516 	struct cvmx_gmxx_rxx_adr_cam4_s       cn63xx;
4517 	struct cvmx_gmxx_rxx_adr_cam4_s       cn63xxp1;
4518 	struct cvmx_gmxx_rxx_adr_cam4_s       cn66xx;
4519 	struct cvmx_gmxx_rxx_adr_cam4_s       cn68xx;
4520 	struct cvmx_gmxx_rxx_adr_cam4_s       cn68xxp1;
4521 	struct cvmx_gmxx_rxx_adr_cam4_s       cnf71xx;
4522 };
4523 typedef union cvmx_gmxx_rxx_adr_cam4 cvmx_gmxx_rxx_adr_cam4_t;
4524 
4525 /**
4526  * cvmx_gmx#_rx#_adr_cam5
4527  *
4528  * GMX_RX_ADR_CAM = Address Filtering Control
4529  *
4530  */
4531 union cvmx_gmxx_rxx_adr_cam5 {
4532 	uint64_t u64;
4533 	struct cvmx_gmxx_rxx_adr_cam5_s {
4534 #ifdef __BIG_ENDIAN_BITFIELD
4535 	uint64_t adr                          : 64; /**< The DMAC address to match on
4536 
4537                                                          Each entry contributes 8bits to one of 8 matchers.
4538                                                          The CAM matches against unicst or multicst DMAC
4539                                                          addresses.
4540 
4541                                                          ALL GMX_RX[0..3]_ADR_CAM[0..5] CSRs may be used
4542                                                          in either SGMII or XAUI mode such that any GMX
4543                                                          MAC can use any of the 32 common DMAC entries.
4544 
4545                                                          GMX_RX[1..3]_ADR_CAM[0..5] are the only non-port0
4546                                                          registers used in XAUI mode. */
4547 #else
4548 	uint64_t adr                          : 64;
4549 #endif
4550 	} s;
4551 	struct cvmx_gmxx_rxx_adr_cam5_s       cn30xx;
4552 	struct cvmx_gmxx_rxx_adr_cam5_s       cn31xx;
4553 	struct cvmx_gmxx_rxx_adr_cam5_s       cn38xx;
4554 	struct cvmx_gmxx_rxx_adr_cam5_s       cn38xxp2;
4555 	struct cvmx_gmxx_rxx_adr_cam5_s       cn50xx;
4556 	struct cvmx_gmxx_rxx_adr_cam5_s       cn52xx;
4557 	struct cvmx_gmxx_rxx_adr_cam5_s       cn52xxp1;
4558 	struct cvmx_gmxx_rxx_adr_cam5_s       cn56xx;
4559 	struct cvmx_gmxx_rxx_adr_cam5_s       cn56xxp1;
4560 	struct cvmx_gmxx_rxx_adr_cam5_s       cn58xx;
4561 	struct cvmx_gmxx_rxx_adr_cam5_s       cn58xxp1;
4562 	struct cvmx_gmxx_rxx_adr_cam5_s       cn61xx;
4563 	struct cvmx_gmxx_rxx_adr_cam5_s       cn63xx;
4564 	struct cvmx_gmxx_rxx_adr_cam5_s       cn63xxp1;
4565 	struct cvmx_gmxx_rxx_adr_cam5_s       cn66xx;
4566 	struct cvmx_gmxx_rxx_adr_cam5_s       cn68xx;
4567 	struct cvmx_gmxx_rxx_adr_cam5_s       cn68xxp1;
4568 	struct cvmx_gmxx_rxx_adr_cam5_s       cnf71xx;
4569 };
4570 typedef union cvmx_gmxx_rxx_adr_cam5 cvmx_gmxx_rxx_adr_cam5_t;
4571 
4572 /**
4573  * cvmx_gmx#_rx#_adr_cam_all_en
4574  *
4575  * GMX_RX_ADR_CAM_ALL_EN = Address Filtering Control Enable
4576  *
4577  */
4578 union cvmx_gmxx_rxx_adr_cam_all_en {
4579 	uint64_t u64;
4580 	struct cvmx_gmxx_rxx_adr_cam_all_en_s {
4581 #ifdef __BIG_ENDIAN_BITFIELD
4582 	uint64_t reserved_32_63               : 32;
4583 	uint64_t en                           : 32; /**< CAM Entry Enables
4584 
4585                                                          GMX has 32 DMAC entries that can be accessed with
4586                                                          the GMX_RX[0..3]_ADR_CAM[0..5] CSRs.
4587                                                          These 32 DMAC entries can be used by any of the
4588                                                          four SGMII MACs or the XAUI MAC.
4589 
4590                                                          Each port interface has independent control of
4591                                                          which of the 32 DMAC entries to include in the
4592                                                          CAM lookup.
4593 
4594                                                          GMX_RXx_ADR_CAM_ALL_EN was not present in legacy
4595                                                          GMX implemenations which had only eight DMAC CAM
4596                                                          entries. New applications may choose to ignore
4597                                                          GMX_RXx_ADR_CAM_EN using GMX_RX_ADR_CAM_ALL_EN
4598                                                          instead.
4599 
4600                                                          EN represents the full 32 indepedent per MAC
4601                                                          enables.
4602 
4603                                                          Writes to EN will be reflected in
4604                                                          GMX_RXx_ADR_CAM_EN[EN] and writes to
4605                                                          GMX_RXx_ADR_CAM_EN[EN] will be reflected in EN.
4606                                                          Refer to GMX_RXx_ADR_CAM_EN for the CSR mapping.
4607 
4608                                                          In XAUI mode, only GMX_RX0_ADR_CAM_ALL_EN is used
4609                                                          and GMX_RX[1,2,3]_ADR_CAM_ALL_EN should not be
4610                                                          used. */
4611 #else
4612 	uint64_t en                           : 32;
4613 	uint64_t reserved_32_63               : 32;
4614 #endif
4615 	} s;
4616 	struct cvmx_gmxx_rxx_adr_cam_all_en_s cn61xx;
4617 	struct cvmx_gmxx_rxx_adr_cam_all_en_s cn66xx;
4618 	struct cvmx_gmxx_rxx_adr_cam_all_en_s cn68xx;
4619 	struct cvmx_gmxx_rxx_adr_cam_all_en_s cnf71xx;
4620 };
4621 typedef union cvmx_gmxx_rxx_adr_cam_all_en cvmx_gmxx_rxx_adr_cam_all_en_t;
4622 
4623 /**
4624  * cvmx_gmx#_rx#_adr_cam_en
4625  *
4626  * GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
4627  *
4628  */
4629 union cvmx_gmxx_rxx_adr_cam_en {
4630 	uint64_t u64;
4631 	struct cvmx_gmxx_rxx_adr_cam_en_s {
4632 #ifdef __BIG_ENDIAN_BITFIELD
4633 	uint64_t reserved_8_63                : 56;
4634 	uint64_t en                           : 8;  /**< CAM Entry Enables
4635 
4636                                                          GMX has 32 DMAC entries that can be accessed with
4637                                                          the GMX_RX[0..3]_ADR_CAM[0..5] CSRs.
4638                                                          These 32 DMAC entries can be used by any of the
4639                                                          four SGMII MACs or the XAUI MAC.
4640 
4641                                                          Each port interface has independent control of
4642                                                          which of the 32 DMAC entries to include in the
4643                                                          CAM lookup.
4644 
4645                                                          Legacy GMX implementations were able to CAM
4646                                                          against eight DMAC entries while current
4647                                                          implementations use 32 common entries.
4648                                                          This register is intended for legacy applications
4649                                                          that only require eight DMAC CAM entries per MAC.
4650                                                          New applications may choose to ignore
4651                                                          GMX_RXx_ADR_CAM_EN using GMX_RXx_ADR_CAM_ALL_EN
4652                                                          instead.
4653 
4654                                                          EN controls the enables for the eight legacy CAM
4655                                                          entries as follows:
4656                                                           port0, EN = GMX_RX0_ADR_CAM_ALL_EN[EN<7:0>]
4657                                                           port1, EN = GMX_RX1_ADR_CAM_ALL_EN[EN<15:8>]
4658                                                           port2, EN = GMX_RX2_ADR_CAM_ALL_EN[EN<23:16>]
4659                                                           port3, EN = GMX_RX3_ADR_CAM_ALL_EN[EN<31:24>]
4660 
4661                                                          The full 32 indepedent per MAC enables are in
4662                                                          GMX_RX_ADR_CAM_ALL_EN.
4663 
4664                                                          Therefore, writes to GMX_RXX_ADR_CAM_ALL_EN[EN]
4665                                                          will be reflected in EN and writes to EN will be
4666                                                          reflected in GMX_RXX_ADR_CAM_ALL_EN[EN].
4667 
4668                                                          In XAUI mode, only GMX_RX0_ADR_CAM_EN is used and
4669                                                          GMX_RX[1,2,3]_ADR_CAM_EN should not be used. */
4670 #else
4671 	uint64_t en                           : 8;
4672 	uint64_t reserved_8_63                : 56;
4673 #endif
4674 	} s;
4675 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn30xx;
4676 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn31xx;
4677 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn38xx;
4678 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn38xxp2;
4679 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn50xx;
4680 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn52xx;
4681 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn52xxp1;
4682 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn56xx;
4683 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn56xxp1;
4684 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn58xx;
4685 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn58xxp1;
4686 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn61xx;
4687 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn63xx;
4688 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn63xxp1;
4689 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn66xx;
4690 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn68xx;
4691 	struct cvmx_gmxx_rxx_adr_cam_en_s     cn68xxp1;
4692 	struct cvmx_gmxx_rxx_adr_cam_en_s     cnf71xx;
4693 };
4694 typedef union cvmx_gmxx_rxx_adr_cam_en cvmx_gmxx_rxx_adr_cam_en_t;
4695 
4696 /**
4697  * cvmx_gmx#_rx#_adr_ctl
4698  *
4699  * GMX_RX_ADR_CTL = Address Filtering Control
4700  *
4701  *
4702  * Notes:
4703  * * ALGORITHM
4704  *   Here is some pseudo code that represents the address filter behavior.
4705  *
4706  *      @verbatim
4707  *      bool dmac_addr_filter(uint8 prt, uint48 dmac) [
4708  *        ASSERT(prt >= 0 && prt <= 3);
4709  *        if (is_bcst(dmac))                               // broadcast accept
4710  *          return (GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
4711  *        if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 1)   // multicast reject
4712  *          return REJECT;
4713  *        if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 2)   // multicast accept
4714  *          return ACCEPT;
4715  *
4716  *        cam_hit = 0;
4717  *
4718  *        for (i=0; i<32; i++) [
4719  *          if (GMX_RX[prt]_ADR_CAM_ALL_EN[EN<i>] == 0)
4720  *            continue;
4721  *          uint48 unswizzled_mac_adr = 0x0;
4722  *          for (j=5; j>=0; j--) [
4723  *             unswizzled_mac_adr = (unswizzled_mac_adr << 8) | GMX_RX[i>>3]_ADR_CAM[j][ADR<(i&7)*8+7:(i&7)*8>];
4724  *          ]
4725  *          if (unswizzled_mac_adr == dmac) [
4726  *            cam_hit = 1;
4727  *            break;
4728  *          ]
4729  *        ]
4730  *
4731  *        if (cam_hit)
4732  *          return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
4733  *        else
4734  *          return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
4735  *      ]
4736  *      @endverbatim
4737  *
4738  * * XAUI Mode
4739  *
4740  *   In XAUI mode, only GMX_RX0_ADR_CTL is used.  GMX_RX[1,2,3]_ADR_CTL should not be used.
4741  */
4742 union cvmx_gmxx_rxx_adr_ctl {
4743 	uint64_t u64;
4744 	struct cvmx_gmxx_rxx_adr_ctl_s {
4745 #ifdef __BIG_ENDIAN_BITFIELD
4746 	uint64_t reserved_4_63                : 60;
4747 	uint64_t cam_mode                     : 1;  /**< Allow or deny DMAC address filter
4748                                                          0 = reject the packet on DMAC address match
4749                                                          1 = accept the packet on DMAC address match */
4750 	uint64_t mcst                         : 2;  /**< Multicast Mode
4751                                                          0 = Use the Address Filter CAM
4752                                                          1 = Force reject all multicast packets
4753                                                          2 = Force accept all multicast packets
4754                                                          3 = Reserved */
4755 	uint64_t bcst                         : 1;  /**< Accept All Broadcast Packets */
4756 #else
4757 	uint64_t bcst                         : 1;
4758 	uint64_t mcst                         : 2;
4759 	uint64_t cam_mode                     : 1;
4760 	uint64_t reserved_4_63                : 60;
4761 #endif
4762 	} s;
4763 	struct cvmx_gmxx_rxx_adr_ctl_s        cn30xx;
4764 	struct cvmx_gmxx_rxx_adr_ctl_s        cn31xx;
4765 	struct cvmx_gmxx_rxx_adr_ctl_s        cn38xx;
4766 	struct cvmx_gmxx_rxx_adr_ctl_s        cn38xxp2;
4767 	struct cvmx_gmxx_rxx_adr_ctl_s        cn50xx;
4768 	struct cvmx_gmxx_rxx_adr_ctl_s        cn52xx;
4769 	struct cvmx_gmxx_rxx_adr_ctl_s        cn52xxp1;
4770 	struct cvmx_gmxx_rxx_adr_ctl_s        cn56xx;
4771 	struct cvmx_gmxx_rxx_adr_ctl_s        cn56xxp1;
4772 	struct cvmx_gmxx_rxx_adr_ctl_s        cn58xx;
4773 	struct cvmx_gmxx_rxx_adr_ctl_s        cn58xxp1;
4774 	struct cvmx_gmxx_rxx_adr_ctl_s        cn61xx;
4775 	struct cvmx_gmxx_rxx_adr_ctl_s        cn63xx;
4776 	struct cvmx_gmxx_rxx_adr_ctl_s        cn63xxp1;
4777 	struct cvmx_gmxx_rxx_adr_ctl_s        cn66xx;
4778 	struct cvmx_gmxx_rxx_adr_ctl_s        cn68xx;
4779 	struct cvmx_gmxx_rxx_adr_ctl_s        cn68xxp1;
4780 	struct cvmx_gmxx_rxx_adr_ctl_s        cnf71xx;
4781 };
4782 typedef union cvmx_gmxx_rxx_adr_ctl cvmx_gmxx_rxx_adr_ctl_t;
4783 
4784 /**
4785  * cvmx_gmx#_rx#_decision
4786  *
4787  * GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
4788  *
4789  *
4790  * Notes:
4791  * As each byte in a packet is received by GMX, the L2 byte count is compared
4792  * against the GMX_RX_DECISION[CNT].  The L2 byte count is the number of bytes
4793  * from the beginning of the L2 header (DMAC).  In normal operation, the L2
4794  * header begins after the PREAMBLE+SFD (GMX_RX_FRM_CTL[PRE_CHK]=1) and any
4795  * optional UDD skip data (GMX_RX_UDD_SKP[LEN]).
4796  *
4797  * When GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
4798  * packet and would require UDD skip length to account for them.
4799  *
4800  *                                                 L2 Size
4801  * Port Mode             <GMX_RX_DECISION bytes (default=24)       >=GMX_RX_DECISION bytes (default=24)
4802  *
4803  * Full Duplex           accept packet                             apply filters
4804  *                       no filtering is applied                   accept packet based on DMAC and PAUSE packet filters
4805  *
4806  * Half Duplex           drop packet                               apply filters
4807  *                       packet is unconditionally dropped         accept packet based on DMAC
4808  *
4809  * where l2_size = MAX(0, total_packet_size - GMX_RX_UDD_SKP[LEN] - ((GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
4810  */
4811 union cvmx_gmxx_rxx_decision {
4812 	uint64_t u64;
4813 	struct cvmx_gmxx_rxx_decision_s {
4814 #ifdef __BIG_ENDIAN_BITFIELD
4815 	uint64_t reserved_5_63                : 59;
4816 	uint64_t cnt                          : 5;  /**< The byte count to decide when to accept or filter
4817                                                          a packet. */
4818 #else
4819 	uint64_t cnt                          : 5;
4820 	uint64_t reserved_5_63                : 59;
4821 #endif
4822 	} s;
4823 	struct cvmx_gmxx_rxx_decision_s       cn30xx;
4824 	struct cvmx_gmxx_rxx_decision_s       cn31xx;
4825 	struct cvmx_gmxx_rxx_decision_s       cn38xx;
4826 	struct cvmx_gmxx_rxx_decision_s       cn38xxp2;
4827 	struct cvmx_gmxx_rxx_decision_s       cn50xx;
4828 	struct cvmx_gmxx_rxx_decision_s       cn52xx;
4829 	struct cvmx_gmxx_rxx_decision_s       cn52xxp1;
4830 	struct cvmx_gmxx_rxx_decision_s       cn56xx;
4831 	struct cvmx_gmxx_rxx_decision_s       cn56xxp1;
4832 	struct cvmx_gmxx_rxx_decision_s       cn58xx;
4833 	struct cvmx_gmxx_rxx_decision_s       cn58xxp1;
4834 	struct cvmx_gmxx_rxx_decision_s       cn61xx;
4835 	struct cvmx_gmxx_rxx_decision_s       cn63xx;
4836 	struct cvmx_gmxx_rxx_decision_s       cn63xxp1;
4837 	struct cvmx_gmxx_rxx_decision_s       cn66xx;
4838 	struct cvmx_gmxx_rxx_decision_s       cn68xx;
4839 	struct cvmx_gmxx_rxx_decision_s       cn68xxp1;
4840 	struct cvmx_gmxx_rxx_decision_s       cnf71xx;
4841 };
4842 typedef union cvmx_gmxx_rxx_decision cvmx_gmxx_rxx_decision_t;
4843 
4844 /**
4845  * cvmx_gmx#_rx#_frm_chk
4846  *
4847  * GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
4848  *
4849  *
4850  * Notes:
4851  * If GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
4852  *
4853  * In XAUI mode prt0 is used for checking.
4854  */
4855 union cvmx_gmxx_rxx_frm_chk {
4856 	uint64_t u64;
4857 	struct cvmx_gmxx_rxx_frm_chk_s {
4858 #ifdef __BIG_ENDIAN_BITFIELD
4859 	uint64_t reserved_10_63               : 54;
4860 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
4861 	uint64_t skperr                       : 1;  /**< Skipper error */
4862 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
4863 	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
4864 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
4865 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4866 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4867 	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
4868 	uint64_t carext                       : 1;  /**< Carrier extend error
4869                                                          (SGMII/1000Base-X only) */
4870 	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize */
4871 #else
4872 	uint64_t minerr                       : 1;
4873 	uint64_t carext                       : 1;
4874 	uint64_t maxerr                       : 1;
4875 	uint64_t jabber                       : 1;
4876 	uint64_t fcserr                       : 1;
4877 	uint64_t alnerr                       : 1;
4878 	uint64_t lenerr                       : 1;
4879 	uint64_t rcverr                       : 1;
4880 	uint64_t skperr                       : 1;
4881 	uint64_t niberr                       : 1;
4882 	uint64_t reserved_10_63               : 54;
4883 #endif
4884 	} s;
4885 	struct cvmx_gmxx_rxx_frm_chk_s        cn30xx;
4886 	struct cvmx_gmxx_rxx_frm_chk_s        cn31xx;
4887 	struct cvmx_gmxx_rxx_frm_chk_s        cn38xx;
4888 	struct cvmx_gmxx_rxx_frm_chk_s        cn38xxp2;
4889 	struct cvmx_gmxx_rxx_frm_chk_cn50xx {
4890 #ifdef __BIG_ENDIAN_BITFIELD
4891 	uint64_t reserved_10_63               : 54;
4892 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
4893 	uint64_t skperr                       : 1;  /**< Skipper error */
4894 	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
4895 	uint64_t reserved_6_6                 : 1;
4896 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
4897 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4898 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4899 	uint64_t reserved_2_2                 : 1;
4900 	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
4901 	uint64_t reserved_0_0                 : 1;
4902 #else
4903 	uint64_t reserved_0_0                 : 1;
4904 	uint64_t carext                       : 1;
4905 	uint64_t reserved_2_2                 : 1;
4906 	uint64_t jabber                       : 1;
4907 	uint64_t fcserr                       : 1;
4908 	uint64_t alnerr                       : 1;
4909 	uint64_t reserved_6_6                 : 1;
4910 	uint64_t rcverr                       : 1;
4911 	uint64_t skperr                       : 1;
4912 	uint64_t niberr                       : 1;
4913 	uint64_t reserved_10_63               : 54;
4914 #endif
4915 	} cn50xx;
4916 	struct cvmx_gmxx_rxx_frm_chk_cn52xx {
4917 #ifdef __BIG_ENDIAN_BITFIELD
4918 	uint64_t reserved_9_63                : 55;
4919 	uint64_t skperr                       : 1;  /**< Skipper error */
4920 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
4921 	uint64_t reserved_5_6                 : 2;
4922 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4923 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4924 	uint64_t reserved_2_2                 : 1;
4925 	uint64_t carext                       : 1;  /**< Carrier extend error
4926                                                          (SGMII/1000Base-X only) */
4927 	uint64_t reserved_0_0                 : 1;
4928 #else
4929 	uint64_t reserved_0_0                 : 1;
4930 	uint64_t carext                       : 1;
4931 	uint64_t reserved_2_2                 : 1;
4932 	uint64_t jabber                       : 1;
4933 	uint64_t fcserr                       : 1;
4934 	uint64_t reserved_5_6                 : 2;
4935 	uint64_t rcverr                       : 1;
4936 	uint64_t skperr                       : 1;
4937 	uint64_t reserved_9_63                : 55;
4938 #endif
4939 	} cn52xx;
4940 	struct cvmx_gmxx_rxx_frm_chk_cn52xx   cn52xxp1;
4941 	struct cvmx_gmxx_rxx_frm_chk_cn52xx   cn56xx;
4942 	struct cvmx_gmxx_rxx_frm_chk_cn52xx   cn56xxp1;
4943 	struct cvmx_gmxx_rxx_frm_chk_s        cn58xx;
4944 	struct cvmx_gmxx_rxx_frm_chk_s        cn58xxp1;
4945 	struct cvmx_gmxx_rxx_frm_chk_cn61xx {
4946 #ifdef __BIG_ENDIAN_BITFIELD
4947 	uint64_t reserved_9_63                : 55;
4948 	uint64_t skperr                       : 1;  /**< Skipper error */
4949 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
4950 	uint64_t reserved_5_6                 : 2;
4951 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
4952 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
4953 	uint64_t reserved_2_2                 : 1;
4954 	uint64_t carext                       : 1;  /**< Carrier extend error
4955                                                          (SGMII/1000Base-X only) */
4956 	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize */
4957 #else
4958 	uint64_t minerr                       : 1;
4959 	uint64_t carext                       : 1;
4960 	uint64_t reserved_2_2                 : 1;
4961 	uint64_t jabber                       : 1;
4962 	uint64_t fcserr                       : 1;
4963 	uint64_t reserved_5_6                 : 2;
4964 	uint64_t rcverr                       : 1;
4965 	uint64_t skperr                       : 1;
4966 	uint64_t reserved_9_63                : 55;
4967 #endif
4968 	} cn61xx;
4969 	struct cvmx_gmxx_rxx_frm_chk_cn61xx   cn63xx;
4970 	struct cvmx_gmxx_rxx_frm_chk_cn61xx   cn63xxp1;
4971 	struct cvmx_gmxx_rxx_frm_chk_cn61xx   cn66xx;
4972 	struct cvmx_gmxx_rxx_frm_chk_cn61xx   cn68xx;
4973 	struct cvmx_gmxx_rxx_frm_chk_cn61xx   cn68xxp1;
4974 	struct cvmx_gmxx_rxx_frm_chk_cn61xx   cnf71xx;
4975 };
4976 typedef union cvmx_gmxx_rxx_frm_chk cvmx_gmxx_rxx_frm_chk_t;
4977 
4978 /**
4979  * cvmx_gmx#_rx#_frm_ctl
4980  *
4981  * GMX_RX_FRM_CTL = Frame Control
4982  *
4983  *
4984  * Notes:
4985  * * PRE_STRP
4986  *   When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
4987  *   determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
4988  *   core as part of the packet.
4989  *
4990  *   In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
4991  *   size when checking against the MIN and MAX bounds.  Furthermore, the bytes
4992  *   are skipped when locating the start of the L2 header for DMAC and Control
4993  *   frame recognition.
4994  *
4995  * * CTL_BCK/CTL_DRP
4996  *   These bits control how the HW handles incoming PAUSE packets.  Here are
4997  *   the most common modes of operation:
4998  *     CTL_BCK=1,CTL_DRP=1   - HW does it all
4999  *     CTL_BCK=0,CTL_DRP=0   - SW sees all pause frames
5000  *     CTL_BCK=0,CTL_DRP=1   - all pause frames are completely ignored
5001  *
5002  *   These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
5003  *   Since PAUSE packets only apply to fulldup operation, any PAUSE packet
5004  *   would constitute an exception which should be handled by the processing
5005  *   cores.  PAUSE packets should not be forwarded.
5006  */
5007 union cvmx_gmxx_rxx_frm_ctl {
5008 	uint64_t u64;
5009 	struct cvmx_gmxx_rxx_frm_ctl_s {
5010 #ifdef __BIG_ENDIAN_BITFIELD
5011 	uint64_t reserved_13_63               : 51;
5012 	uint64_t ptp_mode                     : 1;  /**< Timestamp mode
5013                                                          When PTP_MODE is set, a 64-bit timestamp will be
5014                                                          prepended to every incoming packet. The timestamp
5015                                                          bytes are added to the packet in such a way as to
5016                                                          not modify the packet's receive byte count.  This
5017                                                          implies that the GMX_RX_JABBER, MINERR,
5018                                                          GMX_RX_DECISION, GMX_RX_UDD_SKP, and the
5019                                                          GMX_RX_STATS_* do not require any adjustment as
5020                                                          they operate on the received packet size.
5021                                                          When the packet reaches PKI, its size will
5022                                                          reflect the additional bytes and is subject to
5023                                                          the restrictions below.
5024                                                          If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1.
5025                                                          If PTP_MODE=1,
5026                                                           PIP_PRT_CFGx[SKIP] should be increased by 8.
5027                                                           PIP_PRT_CFGx[HIGIG_EN] should be 0.
5028                                                           PIP_FRM_CHKx[MAXLEN] should be increased by 8.
5029                                                           PIP_FRM_CHKx[MINLEN] should be increased by 8.
5030                                                           PIP_TAG_INCx[EN] should be adjusted.
5031                                                           PIP_PRT_CFGBx[ALT_SKP_EN] should be 0. */
5032 	uint64_t reserved_11_11               : 1;
5033 	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
5034                                                          due to PARITAL packets */
5035 	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
5036                                                          regardless of the number of previous PREAMBLE
5037                                                          nibbles.  In this mode, PRE_STRP should be set to
5038                                                          account for the variable nature of the PREAMBLE.
5039                                                          PRE_CHK must be set to enable this and all
5040                                                          PREAMBLE features.
5041                                                          (SGMII at 10/100Mbs only) */
5042 	uint64_t pad_len                      : 1;  /**< When set, disables the length check for non-min
5043                                                          sized pkts with padding in the client data
5044                                                          (PASS3 Only) */
5045 	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
5046 	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
5047                                                          GMX will begin the frame at the first SFD.
5048                                                          PRE_CHK must be set to enable this and all
5049                                                          PREAMBLE features.
5050                                                          (SGMII/1000Base-X only) */
5051 	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
5052 	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
5053                                                          Multicast address */
5054 	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
5055 	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
5056 	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
5057                                                          0=PREAMBLE+SFD is sent to core as part of frame
5058                                                          1=PREAMBLE+SFD is dropped
5059                                                          PRE_CHK must be set to enable this and all
5060                                                          PREAMBLE features.
5061                                                          If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
5062 	uint64_t pre_chk                      : 1;  /**< This port is configured to send a valid 802.3
5063                                                          PREAMBLE to begin every frame. GMX checks that a
5064                                                          valid PREAMBLE is received (based on PRE_FREE).
5065                                                          When a problem does occur within the PREAMBLE
5066                                                          seqeunce, the frame is marked as bad and not sent
5067                                                          into the core.  The GMX_GMX_RX_INT_REG[PCTERR]
5068                                                          interrupt is also raised.
5069                                                          When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
5070                                                          must be zero.
5071                                                          If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
5072 #else
5073 	uint64_t pre_chk                      : 1;
5074 	uint64_t pre_strp                     : 1;
5075 	uint64_t ctl_drp                      : 1;
5076 	uint64_t ctl_bck                      : 1;
5077 	uint64_t ctl_mcst                     : 1;
5078 	uint64_t ctl_smac                     : 1;
5079 	uint64_t pre_free                     : 1;
5080 	uint64_t vlan_len                     : 1;
5081 	uint64_t pad_len                      : 1;
5082 	uint64_t pre_align                    : 1;
5083 	uint64_t null_dis                     : 1;
5084 	uint64_t reserved_11_11               : 1;
5085 	uint64_t ptp_mode                     : 1;
5086 	uint64_t reserved_13_63               : 51;
5087 #endif
5088 	} s;
5089 	struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
5090 #ifdef __BIG_ENDIAN_BITFIELD
5091 	uint64_t reserved_9_63                : 55;
5092 	uint64_t pad_len                      : 1;  /**< When set, disables the length check for non-min
5093                                                          sized pkts with padding in the client data */
5094 	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
5095 	uint64_t pre_free                     : 1;  /**< Allows for less strict PREAMBLE checking.
5096                                                          0-7 cycles of PREAMBLE followed by SFD (pass 1.0)
5097                                                          0-254 cycles of PREAMBLE followed by SFD (else) */
5098 	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
5099 	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
5100                                                          Multicast address */
5101 	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
5102 	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
5103 	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
5104                                                          0=PREAMBLE+SFD is sent to core as part of frame
5105                                                          1=PREAMBLE+SFD is dropped */
5106 	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
5107                                                          to begin every frame.  GMX checks that the
5108                                                          PREAMBLE is sent correctly */
5109 #else
5110 	uint64_t pre_chk                      : 1;
5111 	uint64_t pre_strp                     : 1;
5112 	uint64_t ctl_drp                      : 1;
5113 	uint64_t ctl_bck                      : 1;
5114 	uint64_t ctl_mcst                     : 1;
5115 	uint64_t ctl_smac                     : 1;
5116 	uint64_t pre_free                     : 1;
5117 	uint64_t vlan_len                     : 1;
5118 	uint64_t pad_len                      : 1;
5119 	uint64_t reserved_9_63                : 55;
5120 #endif
5121 	} cn30xx;
5122 	struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
5123 #ifdef __BIG_ENDIAN_BITFIELD
5124 	uint64_t reserved_8_63                : 56;
5125 	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
5126 	uint64_t pre_free                     : 1;  /**< Allows for less strict PREAMBLE checking.
5127                                                          0 - 7 cycles of PREAMBLE followed by SFD (pass1.0)
5128                                                          0 - 254 cycles of PREAMBLE followed by SFD (else) */
5129 	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
5130 	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
5131                                                          Multicast address */
5132 	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
5133 	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
5134 	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
5135                                                          0=PREAMBLE+SFD is sent to core as part of frame
5136                                                          1=PREAMBLE+SFD is dropped */
5137 	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
5138                                                          to begin every frame.  GMX checks that the
5139                                                          PREAMBLE is sent correctly */
5140 #else
5141 	uint64_t pre_chk                      : 1;
5142 	uint64_t pre_strp                     : 1;
5143 	uint64_t ctl_drp                      : 1;
5144 	uint64_t ctl_bck                      : 1;
5145 	uint64_t ctl_mcst                     : 1;
5146 	uint64_t ctl_smac                     : 1;
5147 	uint64_t pre_free                     : 1;
5148 	uint64_t vlan_len                     : 1;
5149 	uint64_t reserved_8_63                : 56;
5150 #endif
5151 	} cn31xx;
5152 	struct cvmx_gmxx_rxx_frm_ctl_cn30xx   cn38xx;
5153 	struct cvmx_gmxx_rxx_frm_ctl_cn31xx   cn38xxp2;
5154 	struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
5155 #ifdef __BIG_ENDIAN_BITFIELD
5156 	uint64_t reserved_11_63               : 53;
5157 	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
5158                                                          due to PARITAL packets */
5159 	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
5160                                                          regardless of the number of previous PREAMBLE
5161                                                          nibbles.  In this mode, PREAMBLE can be consumed
5162                                                          by the HW so when PRE_ALIGN is set, PRE_FREE,
5163                                                          PRE_STRP must be set for correct operation.
5164                                                          PRE_CHK must be set to enable this and all
5165                                                          PREAMBLE features. */
5166 	uint64_t reserved_7_8                 : 2;
5167 	uint64_t pre_free                     : 1;  /**< Allows for less strict PREAMBLE checking.
5168                                                          0-254 cycles of PREAMBLE followed by SFD */
5169 	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
5170 	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
5171                                                          Multicast address */
5172 	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
5173 	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
5174 	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
5175                                                          0=PREAMBLE+SFD is sent to core as part of frame
5176                                                          1=PREAMBLE+SFD is dropped */
5177 	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
5178                                                          to begin every frame.  GMX checks that the
5179                                                          PREAMBLE is sent correctly */
5180 #else
5181 	uint64_t pre_chk                      : 1;
5182 	uint64_t pre_strp                     : 1;
5183 	uint64_t ctl_drp                      : 1;
5184 	uint64_t ctl_bck                      : 1;
5185 	uint64_t ctl_mcst                     : 1;
5186 	uint64_t ctl_smac                     : 1;
5187 	uint64_t pre_free                     : 1;
5188 	uint64_t reserved_7_8                 : 2;
5189 	uint64_t pre_align                    : 1;
5190 	uint64_t null_dis                     : 1;
5191 	uint64_t reserved_11_63               : 53;
5192 #endif
5193 	} cn50xx;
5194 	struct cvmx_gmxx_rxx_frm_ctl_cn50xx   cn52xx;
5195 	struct cvmx_gmxx_rxx_frm_ctl_cn50xx   cn52xxp1;
5196 	struct cvmx_gmxx_rxx_frm_ctl_cn50xx   cn56xx;
5197 	struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
5198 #ifdef __BIG_ENDIAN_BITFIELD
5199 	uint64_t reserved_10_63               : 54;
5200 	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
5201                                                          regardless of the number of previous PREAMBLE
5202                                                          nibbles.  In this mode, PRE_STRP should be set to
5203                                                          account for the variable nature of the PREAMBLE.
5204                                                          PRE_CHK must be set to enable this and all
5205                                                          PREAMBLE features.
5206                                                          (SGMII at 10/100Mbs only) */
5207 	uint64_t reserved_7_8                 : 2;
5208 	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
5209                                                          0 - 254 cycles of PREAMBLE followed by SFD
5210                                                          PRE_CHK must be set to enable this and all
5211                                                          PREAMBLE features.
5212                                                          (SGMII/1000Base-X only) */
5213 	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
5214 	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
5215                                                          Multicast address */
5216 	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
5217 	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
5218 	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
5219                                                          0=PREAMBLE+SFD is sent to core as part of frame
5220                                                          1=PREAMBLE+SFD is dropped
5221                                                          PRE_CHK must be set to enable this and all
5222                                                          PREAMBLE features. */
5223 	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
5224                                                          to begin every frame.  GMX checks that the
5225                                                          PREAMBLE is sent correctly.
5226                                                          When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
5227                                                          must be zero. */
5228 #else
5229 	uint64_t pre_chk                      : 1;
5230 	uint64_t pre_strp                     : 1;
5231 	uint64_t ctl_drp                      : 1;
5232 	uint64_t ctl_bck                      : 1;
5233 	uint64_t ctl_mcst                     : 1;
5234 	uint64_t ctl_smac                     : 1;
5235 	uint64_t pre_free                     : 1;
5236 	uint64_t reserved_7_8                 : 2;
5237 	uint64_t pre_align                    : 1;
5238 	uint64_t reserved_10_63               : 54;
5239 #endif
5240 	} cn56xxp1;
5241 	struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
5242 #ifdef __BIG_ENDIAN_BITFIELD
5243 	uint64_t reserved_11_63               : 53;
5244 	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
5245                                                          due to PARITAL packets
5246                                                          In spi4 mode, all ports use prt0 for checking. */
5247 	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
5248                                                          regardless of the number of previous PREAMBLE
5249                                                          nibbles.  In this mode, PREAMBLE can be consumed
5250                                                          by the HW so when PRE_ALIGN is set, PRE_FREE,
5251                                                          PRE_STRP must be set for correct operation.
5252                                                          PRE_CHK must be set to enable this and all
5253                                                          PREAMBLE features. */
5254 	uint64_t pad_len                      : 1;  /**< When set, disables the length check for non-min
5255                                                          sized pkts with padding in the client data
5256                                                          (PASS3 Only) */
5257 	uint64_t vlan_len                     : 1;  /**< When set, disables the length check for VLAN pkts */
5258 	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
5259                                                          0 - 254 cycles of PREAMBLE followed by SFD */
5260 	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
5261 	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
5262                                                          Multicast address */
5263 	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
5264 	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
5265 	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
5266                                                          0=PREAMBLE+SFD is sent to core as part of frame
5267                                                          1=PREAMBLE+SFD is dropped */
5268 	uint64_t pre_chk                      : 1;  /**< This port is configured to send PREAMBLE+SFD
5269                                                          to begin every frame.  GMX checks that the
5270                                                          PREAMBLE is sent correctly */
5271 #else
5272 	uint64_t pre_chk                      : 1;
5273 	uint64_t pre_strp                     : 1;
5274 	uint64_t ctl_drp                      : 1;
5275 	uint64_t ctl_bck                      : 1;
5276 	uint64_t ctl_mcst                     : 1;
5277 	uint64_t ctl_smac                     : 1;
5278 	uint64_t pre_free                     : 1;
5279 	uint64_t vlan_len                     : 1;
5280 	uint64_t pad_len                      : 1;
5281 	uint64_t pre_align                    : 1;
5282 	uint64_t null_dis                     : 1;
5283 	uint64_t reserved_11_63               : 53;
5284 #endif
5285 	} cn58xx;
5286 	struct cvmx_gmxx_rxx_frm_ctl_cn30xx   cn58xxp1;
5287 	struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
5288 #ifdef __BIG_ENDIAN_BITFIELD
5289 	uint64_t reserved_13_63               : 51;
5290 	uint64_t ptp_mode                     : 1;  /**< Timestamp mode
5291                                                          When PTP_MODE is set, a 64-bit timestamp will be
5292                                                          prepended to every incoming packet. The timestamp
5293                                                          bytes are added to the packet in such a way as to
5294                                                          not modify the packet's receive byte count.  This
5295                                                          implies that the GMX_RX_JABBER, MINERR,
5296                                                          GMX_RX_DECISION, GMX_RX_UDD_SKP, and the
5297                                                          GMX_RX_STATS_* do not require any adjustment as
5298                                                          they operate on the received packet size.
5299                                                          When the packet reaches PKI, its size will
5300                                                          reflect the additional bytes and is subject to
5301                                                          the restrictions below.
5302                                                          If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1.
5303                                                          If PTP_MODE=1,
5304                                                           PIP_PRT_CFGx[SKIP] should be increased by 8.
5305                                                           PIP_PRT_CFGx[HIGIG_EN] should be 0.
5306                                                           PIP_FRM_CHKx[MAXLEN] should be increased by 8.
5307                                                           PIP_FRM_CHKx[MINLEN] should be increased by 8.
5308                                                           PIP_TAG_INCx[EN] should be adjusted.
5309                                                           PIP_PRT_CFGBx[ALT_SKP_EN] should be 0. */
5310 	uint64_t reserved_11_11               : 1;
5311 	uint64_t null_dis                     : 1;  /**< When set, do not modify the MOD bits on NULL ticks
5312                                                          due to PARITAL packets */
5313 	uint64_t pre_align                    : 1;  /**< When set, PREAMBLE parser aligns the the SFD byte
5314                                                          regardless of the number of previous PREAMBLE
5315                                                          nibbles.  In this mode, PRE_STRP should be set to
5316                                                          account for the variable nature of the PREAMBLE.
5317                                                          PRE_CHK must be set to enable this and all
5318                                                          PREAMBLE features.
5319                                                          (SGMII at 10/100Mbs only) */
5320 	uint64_t reserved_7_8                 : 2;
5321 	uint64_t pre_free                     : 1;  /**< When set, PREAMBLE checking is  less strict.
5322                                                          GMX will begin the frame at the first SFD.
5323                                                          PRE_CHK must be set to enable this and all
5324                                                          PREAMBLE features.
5325                                                          (SGMII/1000Base-X only) */
5326 	uint64_t ctl_smac                     : 1;  /**< Control Pause Frames can match station SMAC */
5327 	uint64_t ctl_mcst                     : 1;  /**< Control Pause Frames can match globally assign
5328                                                          Multicast address */
5329 	uint64_t ctl_bck                      : 1;  /**< Forward pause information to TX block */
5330 	uint64_t ctl_drp                      : 1;  /**< Drop Control Pause Frames */
5331 	uint64_t pre_strp                     : 1;  /**< Strip off the preamble (when present)
5332                                                          0=PREAMBLE+SFD is sent to core as part of frame
5333                                                          1=PREAMBLE+SFD is dropped
5334                                                          PRE_CHK must be set to enable this and all
5335                                                          PREAMBLE features.
5336                                                          If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
5337 	uint64_t pre_chk                      : 1;  /**< This port is configured to send a valid 802.3
5338                                                          PREAMBLE to begin every frame. GMX checks that a
5339                                                          valid PREAMBLE is received (based on PRE_FREE).
5340                                                          When a problem does occur within the PREAMBLE
5341                                                          seqeunce, the frame is marked as bad and not sent
5342                                                          into the core.  The GMX_GMX_RX_INT_REG[PCTERR]
5343                                                          interrupt is also raised.
5344                                                          When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
5345                                                          must be zero.
5346                                                          If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
5347 #else
5348 	uint64_t pre_chk                      : 1;
5349 	uint64_t pre_strp                     : 1;
5350 	uint64_t ctl_drp                      : 1;
5351 	uint64_t ctl_bck                      : 1;
5352 	uint64_t ctl_mcst                     : 1;
5353 	uint64_t ctl_smac                     : 1;
5354 	uint64_t pre_free                     : 1;
5355 	uint64_t reserved_7_8                 : 2;
5356 	uint64_t pre_align                    : 1;
5357 	uint64_t null_dis                     : 1;
5358 	uint64_t reserved_11_11               : 1;
5359 	uint64_t ptp_mode                     : 1;
5360 	uint64_t reserved_13_63               : 51;
5361 #endif
5362 	} cn61xx;
5363 	struct cvmx_gmxx_rxx_frm_ctl_cn61xx   cn63xx;
5364 	struct cvmx_gmxx_rxx_frm_ctl_cn61xx   cn63xxp1;
5365 	struct cvmx_gmxx_rxx_frm_ctl_cn61xx   cn66xx;
5366 	struct cvmx_gmxx_rxx_frm_ctl_cn61xx   cn68xx;
5367 	struct cvmx_gmxx_rxx_frm_ctl_cn61xx   cn68xxp1;
5368 	struct cvmx_gmxx_rxx_frm_ctl_cn61xx   cnf71xx;
5369 };
5370 typedef union cvmx_gmxx_rxx_frm_ctl cvmx_gmxx_rxx_frm_ctl_t;
5371 
5372 /**
5373  * cvmx_gmx#_rx#_frm_max
5374  *
5375  * GMX_RX_FRM_MAX = Frame Max length
5376  *
5377  *
5378  * Notes:
5379  * In spi4 mode, all spi4 ports use prt0 for checking.
5380  *
5381  * When changing the LEN field, be sure that LEN does not exceed
5382  * GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
5383  * are within the maximum length parameter to be rejected because they exceed
5384  * the GMX_RX_JABBER[CNT] limit.
5385  */
5386 union cvmx_gmxx_rxx_frm_max {
5387 	uint64_t u64;
5388 	struct cvmx_gmxx_rxx_frm_max_s {
5389 #ifdef __BIG_ENDIAN_BITFIELD
5390 	uint64_t reserved_16_63               : 48;
5391 	uint64_t len                          : 16; /**< Byte count for Max-sized frame check
5392                                                          GMX_RXn_FRM_CHK[MAXERR] enables the check for
5393                                                          port n.
5394                                                          If enabled, failing packets set the MAXERR
5395                                                          interrupt and work-queue entry WORD2[opcode] is
5396                                                          set to OVER_FCS (0x3, if packet has bad FCS) or
5397                                                          OVER_ERR (0x4, if packet has good FCS).
5398                                                          LEN =< GMX_RX_JABBER[CNT] */
5399 #else
5400 	uint64_t len                          : 16;
5401 	uint64_t reserved_16_63               : 48;
5402 #endif
5403 	} s;
5404 	struct cvmx_gmxx_rxx_frm_max_s        cn30xx;
5405 	struct cvmx_gmxx_rxx_frm_max_s        cn31xx;
5406 	struct cvmx_gmxx_rxx_frm_max_s        cn38xx;
5407 	struct cvmx_gmxx_rxx_frm_max_s        cn38xxp2;
5408 	struct cvmx_gmxx_rxx_frm_max_s        cn58xx;
5409 	struct cvmx_gmxx_rxx_frm_max_s        cn58xxp1;
5410 };
5411 typedef union cvmx_gmxx_rxx_frm_max cvmx_gmxx_rxx_frm_max_t;
5412 
5413 /**
5414  * cvmx_gmx#_rx#_frm_min
5415  *
5416  * GMX_RX_FRM_MIN = Frame Min length
5417  *
5418  *
5419  * Notes:
5420  * In spi4 mode, all spi4 ports use prt0 for checking.
5421  *
5422  */
5423 union cvmx_gmxx_rxx_frm_min {
5424 	uint64_t u64;
5425 	struct cvmx_gmxx_rxx_frm_min_s {
5426 #ifdef __BIG_ENDIAN_BITFIELD
5427 	uint64_t reserved_16_63               : 48;
5428 	uint64_t len                          : 16; /**< Byte count for Min-sized frame check
5429                                                          GMX_RXn_FRM_CHK[MINERR] enables the check for
5430                                                          port n.
5431                                                          If enabled, failing packets set the MINERR
5432                                                          interrupt and work-queue entry WORD2[opcode] is
5433                                                          set to UNDER_FCS (0x6, if packet has bad FCS) or
5434                                                          UNDER_ERR (0x8, if packet has good FCS). */
5435 #else
5436 	uint64_t len                          : 16;
5437 	uint64_t reserved_16_63               : 48;
5438 #endif
5439 	} s;
5440 	struct cvmx_gmxx_rxx_frm_min_s        cn30xx;
5441 	struct cvmx_gmxx_rxx_frm_min_s        cn31xx;
5442 	struct cvmx_gmxx_rxx_frm_min_s        cn38xx;
5443 	struct cvmx_gmxx_rxx_frm_min_s        cn38xxp2;
5444 	struct cvmx_gmxx_rxx_frm_min_s        cn58xx;
5445 	struct cvmx_gmxx_rxx_frm_min_s        cn58xxp1;
5446 };
5447 typedef union cvmx_gmxx_rxx_frm_min cvmx_gmxx_rxx_frm_min_t;
5448 
5449 /**
5450  * cvmx_gmx#_rx#_ifg
5451  *
5452  * GMX_RX_IFG = RX Min IFG
5453  *
5454  */
5455 union cvmx_gmxx_rxx_ifg {
5456 	uint64_t u64;
5457 	struct cvmx_gmxx_rxx_ifg_s {
5458 #ifdef __BIG_ENDIAN_BITFIELD
5459 	uint64_t reserved_4_63                : 60;
5460 	uint64_t ifg                          : 4;  /**< Min IFG (in IFG*8 bits) between packets used to
5461                                                          determine IFGERR. Normally IFG is 96 bits.
5462                                                          Note in some operating modes, IFG cycles can be
5463                                                          inserted or removed in order to achieve clock rate
5464                                                          adaptation. For these reasons, the default value
5465                                                          is slightly conservative and does not check upto
5466                                                          the full 96 bits of IFG.
5467                                                          (SGMII/1000Base-X only) */
5468 #else
5469 	uint64_t ifg                          : 4;
5470 	uint64_t reserved_4_63                : 60;
5471 #endif
5472 	} s;
5473 	struct cvmx_gmxx_rxx_ifg_s            cn30xx;
5474 	struct cvmx_gmxx_rxx_ifg_s            cn31xx;
5475 	struct cvmx_gmxx_rxx_ifg_s            cn38xx;
5476 	struct cvmx_gmxx_rxx_ifg_s            cn38xxp2;
5477 	struct cvmx_gmxx_rxx_ifg_s            cn50xx;
5478 	struct cvmx_gmxx_rxx_ifg_s            cn52xx;
5479 	struct cvmx_gmxx_rxx_ifg_s            cn52xxp1;
5480 	struct cvmx_gmxx_rxx_ifg_s            cn56xx;
5481 	struct cvmx_gmxx_rxx_ifg_s            cn56xxp1;
5482 	struct cvmx_gmxx_rxx_ifg_s            cn58xx;
5483 	struct cvmx_gmxx_rxx_ifg_s            cn58xxp1;
5484 	struct cvmx_gmxx_rxx_ifg_s            cn61xx;
5485 	struct cvmx_gmxx_rxx_ifg_s            cn63xx;
5486 	struct cvmx_gmxx_rxx_ifg_s            cn63xxp1;
5487 	struct cvmx_gmxx_rxx_ifg_s            cn66xx;
5488 	struct cvmx_gmxx_rxx_ifg_s            cn68xx;
5489 	struct cvmx_gmxx_rxx_ifg_s            cn68xxp1;
5490 	struct cvmx_gmxx_rxx_ifg_s            cnf71xx;
5491 };
5492 typedef union cvmx_gmxx_rxx_ifg cvmx_gmxx_rxx_ifg_t;
5493 
5494 /**
5495  * cvmx_gmx#_rx#_int_en
5496  *
5497  * GMX_RX_INT_EN = Interrupt Enable
5498  *
5499  *
5500  * Notes:
5501  * In XAUI mode prt0 is used for checking.
5502  *
5503  */
5504 union cvmx_gmxx_rxx_int_en {
5505 	uint64_t u64;
5506 	struct cvmx_gmxx_rxx_int_en_s {
5507 #ifdef __BIG_ENDIAN_BITFIELD
5508 	uint64_t reserved_29_63               : 35;
5509 	uint64_t hg2cc                        : 1;  /**< HiGig2 CRC8 or Control char error interrupt enable */
5510 	uint64_t hg2fld                       : 1;  /**< HiGig2 Bad field error interrupt enable */
5511 	uint64_t undat                        : 1;  /**< Unexpected Data
5512                                                          (XAUI Mode only) */
5513 	uint64_t uneop                        : 1;  /**< Unexpected EOP
5514                                                          (XAUI Mode only) */
5515 	uint64_t unsop                        : 1;  /**< Unexpected SOP
5516                                                          (XAUI Mode only) */
5517 	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
5518                                                          than /T/.  The error propagation control
5519                                                          character /E/ will be included as part of the
5520                                                          frame and does not cause a frame termination.
5521                                                          (XAUI Mode only) */
5522 	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
5523                                                          (XAUI Mode only) */
5524 	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
5525                                                          (XAUI Mode only) */
5526 	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
5527                                                          (XAUI Mode only) */
5528 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
5529 	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
5530 	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
5531 	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
5532 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
5533                                                          (SGMII/1000Base-X only) */
5534 	uint64_t coldet                       : 1;  /**< Collision Detection
5535                                                          (SGMII/1000Base-X half-duplex only) */
5536 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
5537                                                          (SGMII/1000Base-X only) */
5538 	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
5539 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
5540 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
5541                                                          (SGMII/1000Base-X only) */
5542 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
5543 	uint64_t skperr                       : 1;  /**< Skipper error */
5544 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
5545 	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
5546 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
5547 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
5548 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
5549 	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
5550 	uint64_t carext                       : 1;  /**< Carrier extend error
5551                                                          (SGMII/1000Base-X only) */
5552 	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize */
5553 #else
5554 	uint64_t minerr                       : 1;
5555 	uint64_t carext                       : 1;
5556 	uint64_t maxerr                       : 1;
5557 	uint64_t jabber                       : 1;
5558 	uint64_t fcserr                       : 1;
5559 	uint64_t alnerr                       : 1;
5560 	uint64_t lenerr                       : 1;
5561 	uint64_t rcverr                       : 1;
5562 	uint64_t skperr                       : 1;
5563 	uint64_t niberr                       : 1;
5564 	uint64_t ovrerr                       : 1;
5565 	uint64_t pcterr                       : 1;
5566 	uint64_t rsverr                       : 1;
5567 	uint64_t falerr                       : 1;
5568 	uint64_t coldet                       : 1;
5569 	uint64_t ifgerr                       : 1;
5570 	uint64_t phy_link                     : 1;
5571 	uint64_t phy_spd                      : 1;
5572 	uint64_t phy_dupx                     : 1;
5573 	uint64_t pause_drp                    : 1;
5574 	uint64_t loc_fault                    : 1;
5575 	uint64_t rem_fault                    : 1;
5576 	uint64_t bad_seq                      : 1;
5577 	uint64_t bad_term                     : 1;
5578 	uint64_t unsop                        : 1;
5579 	uint64_t uneop                        : 1;
5580 	uint64_t undat                        : 1;
5581 	uint64_t hg2fld                       : 1;
5582 	uint64_t hg2cc                        : 1;
5583 	uint64_t reserved_29_63               : 35;
5584 #endif
5585 	} s;
5586 	struct cvmx_gmxx_rxx_int_en_cn30xx {
5587 #ifdef __BIG_ENDIAN_BITFIELD
5588 	uint64_t reserved_19_63               : 45;
5589 	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
5590 	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
5591 	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
5592 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation */
5593 	uint64_t coldet                       : 1;  /**< Collision Detection */
5594 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
5595 	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
5596 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
5597 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow */
5598 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
5599 	uint64_t skperr                       : 1;  /**< Skipper error */
5600 	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
5601 	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
5602 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
5603 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
5604 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
5605 	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
5606 	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
5607 	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
5608 #else
5609 	uint64_t minerr                       : 1;
5610 	uint64_t carext                       : 1;
5611 	uint64_t maxerr                       : 1;
5612 	uint64_t jabber                       : 1;
5613 	uint64_t fcserr                       : 1;
5614 	uint64_t alnerr                       : 1;
5615 	uint64_t lenerr                       : 1;
5616 	uint64_t rcverr                       : 1;
5617 	uint64_t skperr                       : 1;
5618 	uint64_t niberr                       : 1;
5619 	uint64_t ovrerr                       : 1;
5620 	uint64_t pcterr                       : 1;
5621 	uint64_t rsverr                       : 1;
5622 	uint64_t falerr                       : 1;
5623 	uint64_t coldet                       : 1;
5624 	uint64_t ifgerr                       : 1;
5625 	uint64_t phy_link                     : 1;
5626 	uint64_t phy_spd                      : 1;
5627 	uint64_t phy_dupx                     : 1;
5628 	uint64_t reserved_19_63               : 45;
5629 #endif
5630 	} cn30xx;
5631 	struct cvmx_gmxx_rxx_int_en_cn30xx    cn31xx;
5632 	struct cvmx_gmxx_rxx_int_en_cn30xx    cn38xx;
5633 	struct cvmx_gmxx_rxx_int_en_cn30xx    cn38xxp2;
5634 	struct cvmx_gmxx_rxx_int_en_cn50xx {
5635 #ifdef __BIG_ENDIAN_BITFIELD
5636 	uint64_t reserved_20_63               : 44;
5637 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
5638 	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
5639 	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
5640 	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
5641 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation */
5642 	uint64_t coldet                       : 1;  /**< Collision Detection */
5643 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
5644 	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
5645 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
5646 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow */
5647 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
5648 	uint64_t skperr                       : 1;  /**< Skipper error */
5649 	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
5650 	uint64_t reserved_6_6                 : 1;
5651 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
5652 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
5653 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
5654 	uint64_t reserved_2_2                 : 1;
5655 	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
5656 	uint64_t reserved_0_0                 : 1;
5657 #else
5658 	uint64_t reserved_0_0                 : 1;
5659 	uint64_t carext                       : 1;
5660 	uint64_t reserved_2_2                 : 1;
5661 	uint64_t jabber                       : 1;
5662 	uint64_t fcserr                       : 1;
5663 	uint64_t alnerr                       : 1;
5664 	uint64_t reserved_6_6                 : 1;
5665 	uint64_t rcverr                       : 1;
5666 	uint64_t skperr                       : 1;
5667 	uint64_t niberr                       : 1;
5668 	uint64_t ovrerr                       : 1;
5669 	uint64_t pcterr                       : 1;
5670 	uint64_t rsverr                       : 1;
5671 	uint64_t falerr                       : 1;
5672 	uint64_t coldet                       : 1;
5673 	uint64_t ifgerr                       : 1;
5674 	uint64_t phy_link                     : 1;
5675 	uint64_t phy_spd                      : 1;
5676 	uint64_t phy_dupx                     : 1;
5677 	uint64_t pause_drp                    : 1;
5678 	uint64_t reserved_20_63               : 44;
5679 #endif
5680 	} cn50xx;
5681 	struct cvmx_gmxx_rxx_int_en_cn52xx {
5682 #ifdef __BIG_ENDIAN_BITFIELD
5683 	uint64_t reserved_29_63               : 35;
5684 	uint64_t hg2cc                        : 1;  /**< HiGig2 CRC8 or Control char error interrupt enable */
5685 	uint64_t hg2fld                       : 1;  /**< HiGig2 Bad field error interrupt enable */
5686 	uint64_t undat                        : 1;  /**< Unexpected Data
5687                                                          (XAUI Mode only) */
5688 	uint64_t uneop                        : 1;  /**< Unexpected EOP
5689                                                          (XAUI Mode only) */
5690 	uint64_t unsop                        : 1;  /**< Unexpected SOP
5691                                                          (XAUI Mode only) */
5692 	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
5693                                                          than /T/.  The error propagation control
5694                                                          character /E/ will be included as part of the
5695                                                          frame and does not cause a frame termination.
5696                                                          (XAUI Mode only) */
5697 	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
5698                                                          (XAUI Mode only) */
5699 	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
5700                                                          (XAUI Mode only) */
5701 	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
5702                                                          (XAUI Mode only) */
5703 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
5704 	uint64_t reserved_16_18               : 3;
5705 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
5706                                                          (SGMII/1000Base-X only) */
5707 	uint64_t coldet                       : 1;  /**< Collision Detection
5708                                                          (SGMII/1000Base-X half-duplex only) */
5709 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
5710                                                          (SGMII/1000Base-X only) */
5711 	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
5712 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
5713 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
5714                                                          (SGMII/1000Base-X only) */
5715 	uint64_t reserved_9_9                 : 1;
5716 	uint64_t skperr                       : 1;  /**< Skipper error */
5717 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
5718 	uint64_t reserved_5_6                 : 2;
5719 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
5720 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
5721 	uint64_t reserved_2_2                 : 1;
5722 	uint64_t carext                       : 1;  /**< Carrier extend error
5723                                                          (SGMII/1000Base-X only) */
5724 	uint64_t reserved_0_0                 : 1;
5725 #else
5726 	uint64_t reserved_0_0                 : 1;
5727 	uint64_t carext                       : 1;
5728 	uint64_t reserved_2_2                 : 1;
5729 	uint64_t jabber                       : 1;
5730 	uint64_t fcserr                       : 1;
5731 	uint64_t reserved_5_6                 : 2;
5732 	uint64_t rcverr                       : 1;
5733 	uint64_t skperr                       : 1;
5734 	uint64_t reserved_9_9                 : 1;
5735 	uint64_t ovrerr                       : 1;
5736 	uint64_t pcterr                       : 1;
5737 	uint64_t rsverr                       : 1;
5738 	uint64_t falerr                       : 1;
5739 	uint64_t coldet                       : 1;
5740 	uint64_t ifgerr                       : 1;
5741 	uint64_t reserved_16_18               : 3;
5742 	uint64_t pause_drp                    : 1;
5743 	uint64_t loc_fault                    : 1;
5744 	uint64_t rem_fault                    : 1;
5745 	uint64_t bad_seq                      : 1;
5746 	uint64_t bad_term                     : 1;
5747 	uint64_t unsop                        : 1;
5748 	uint64_t uneop                        : 1;
5749 	uint64_t undat                        : 1;
5750 	uint64_t hg2fld                       : 1;
5751 	uint64_t hg2cc                        : 1;
5752 	uint64_t reserved_29_63               : 35;
5753 #endif
5754 	} cn52xx;
5755 	struct cvmx_gmxx_rxx_int_en_cn52xx    cn52xxp1;
5756 	struct cvmx_gmxx_rxx_int_en_cn52xx    cn56xx;
5757 	struct cvmx_gmxx_rxx_int_en_cn56xxp1 {
5758 #ifdef __BIG_ENDIAN_BITFIELD
5759 	uint64_t reserved_27_63               : 37;
5760 	uint64_t undat                        : 1;  /**< Unexpected Data
5761                                                          (XAUI Mode only) */
5762 	uint64_t uneop                        : 1;  /**< Unexpected EOP
5763                                                          (XAUI Mode only) */
5764 	uint64_t unsop                        : 1;  /**< Unexpected SOP
5765                                                          (XAUI Mode only) */
5766 	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
5767                                                          than /T/.  The error propagation control
5768                                                          character /E/ will be included as part of the
5769                                                          frame and does not cause a frame termination.
5770                                                          (XAUI Mode only) */
5771 	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
5772                                                          (XAUI Mode only) */
5773 	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
5774                                                          (XAUI Mode only) */
5775 	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
5776                                                          (XAUI Mode only) */
5777 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
5778 	uint64_t reserved_16_18               : 3;
5779 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
5780                                                          (SGMII/1000Base-X only) */
5781 	uint64_t coldet                       : 1;  /**< Collision Detection
5782                                                          (SGMII/1000Base-X half-duplex only) */
5783 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
5784                                                          (SGMII/1000Base-X only) */
5785 	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
5786 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
5787 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
5788                                                          (SGMII/1000Base-X only) */
5789 	uint64_t reserved_9_9                 : 1;
5790 	uint64_t skperr                       : 1;  /**< Skipper error */
5791 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
5792 	uint64_t reserved_5_6                 : 2;
5793 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
5794 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
5795 	uint64_t reserved_2_2                 : 1;
5796 	uint64_t carext                       : 1;  /**< Carrier extend error
5797                                                          (SGMII/1000Base-X only) */
5798 	uint64_t reserved_0_0                 : 1;
5799 #else
5800 	uint64_t reserved_0_0                 : 1;
5801 	uint64_t carext                       : 1;
5802 	uint64_t reserved_2_2                 : 1;
5803 	uint64_t jabber                       : 1;
5804 	uint64_t fcserr                       : 1;
5805 	uint64_t reserved_5_6                 : 2;
5806 	uint64_t rcverr                       : 1;
5807 	uint64_t skperr                       : 1;
5808 	uint64_t reserved_9_9                 : 1;
5809 	uint64_t ovrerr                       : 1;
5810 	uint64_t pcterr                       : 1;
5811 	uint64_t rsverr                       : 1;
5812 	uint64_t falerr                       : 1;
5813 	uint64_t coldet                       : 1;
5814 	uint64_t ifgerr                       : 1;
5815 	uint64_t reserved_16_18               : 3;
5816 	uint64_t pause_drp                    : 1;
5817 	uint64_t loc_fault                    : 1;
5818 	uint64_t rem_fault                    : 1;
5819 	uint64_t bad_seq                      : 1;
5820 	uint64_t bad_term                     : 1;
5821 	uint64_t unsop                        : 1;
5822 	uint64_t uneop                        : 1;
5823 	uint64_t undat                        : 1;
5824 	uint64_t reserved_27_63               : 37;
5825 #endif
5826 	} cn56xxp1;
5827 	struct cvmx_gmxx_rxx_int_en_cn58xx {
5828 #ifdef __BIG_ENDIAN_BITFIELD
5829 	uint64_t reserved_20_63               : 44;
5830 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
5831 	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
5832 	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
5833 	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
5834 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation */
5835 	uint64_t coldet                       : 1;  /**< Collision Detection */
5836 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
5837 	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
5838 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
5839 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow */
5840 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
5841 	uint64_t skperr                       : 1;  /**< Skipper error */
5842 	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
5843 	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
5844 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
5845 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
5846 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
5847 	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
5848 	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
5849 	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
5850 #else
5851 	uint64_t minerr                       : 1;
5852 	uint64_t carext                       : 1;
5853 	uint64_t maxerr                       : 1;
5854 	uint64_t jabber                       : 1;
5855 	uint64_t fcserr                       : 1;
5856 	uint64_t alnerr                       : 1;
5857 	uint64_t lenerr                       : 1;
5858 	uint64_t rcverr                       : 1;
5859 	uint64_t skperr                       : 1;
5860 	uint64_t niberr                       : 1;
5861 	uint64_t ovrerr                       : 1;
5862 	uint64_t pcterr                       : 1;
5863 	uint64_t rsverr                       : 1;
5864 	uint64_t falerr                       : 1;
5865 	uint64_t coldet                       : 1;
5866 	uint64_t ifgerr                       : 1;
5867 	uint64_t phy_link                     : 1;
5868 	uint64_t phy_spd                      : 1;
5869 	uint64_t phy_dupx                     : 1;
5870 	uint64_t pause_drp                    : 1;
5871 	uint64_t reserved_20_63               : 44;
5872 #endif
5873 	} cn58xx;
5874 	struct cvmx_gmxx_rxx_int_en_cn58xx    cn58xxp1;
5875 	struct cvmx_gmxx_rxx_int_en_cn61xx {
5876 #ifdef __BIG_ENDIAN_BITFIELD
5877 	uint64_t reserved_29_63               : 35;
5878 	uint64_t hg2cc                        : 1;  /**< HiGig2 CRC8 or Control char error interrupt enable */
5879 	uint64_t hg2fld                       : 1;  /**< HiGig2 Bad field error interrupt enable */
5880 	uint64_t undat                        : 1;  /**< Unexpected Data
5881                                                          (XAUI Mode only) */
5882 	uint64_t uneop                        : 1;  /**< Unexpected EOP
5883                                                          (XAUI Mode only) */
5884 	uint64_t unsop                        : 1;  /**< Unexpected SOP
5885                                                          (XAUI Mode only) */
5886 	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
5887                                                          than /T/.  The error propagation control
5888                                                          character /E/ will be included as part of the
5889                                                          frame and does not cause a frame termination.
5890                                                          (XAUI Mode only) */
5891 	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
5892                                                          (XAUI Mode only) */
5893 	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
5894                                                          (XAUI Mode only) */
5895 	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
5896                                                          (XAUI Mode only) */
5897 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
5898 	uint64_t reserved_16_18               : 3;
5899 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
5900                                                          (SGMII/1000Base-X only) */
5901 	uint64_t coldet                       : 1;  /**< Collision Detection
5902                                                          (SGMII/1000Base-X half-duplex only) */
5903 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
5904                                                          (SGMII/1000Base-X only) */
5905 	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
5906 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
5907 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
5908                                                          (SGMII/1000Base-X only) */
5909 	uint64_t reserved_9_9                 : 1;
5910 	uint64_t skperr                       : 1;  /**< Skipper error */
5911 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
5912 	uint64_t reserved_5_6                 : 2;
5913 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
5914 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
5915 	uint64_t reserved_2_2                 : 1;
5916 	uint64_t carext                       : 1;  /**< Carrier extend error
5917                                                          (SGMII/1000Base-X only) */
5918 	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize */
5919 #else
5920 	uint64_t minerr                       : 1;
5921 	uint64_t carext                       : 1;
5922 	uint64_t reserved_2_2                 : 1;
5923 	uint64_t jabber                       : 1;
5924 	uint64_t fcserr                       : 1;
5925 	uint64_t reserved_5_6                 : 2;
5926 	uint64_t rcverr                       : 1;
5927 	uint64_t skperr                       : 1;
5928 	uint64_t reserved_9_9                 : 1;
5929 	uint64_t ovrerr                       : 1;
5930 	uint64_t pcterr                       : 1;
5931 	uint64_t rsverr                       : 1;
5932 	uint64_t falerr                       : 1;
5933 	uint64_t coldet                       : 1;
5934 	uint64_t ifgerr                       : 1;
5935 	uint64_t reserved_16_18               : 3;
5936 	uint64_t pause_drp                    : 1;
5937 	uint64_t loc_fault                    : 1;
5938 	uint64_t rem_fault                    : 1;
5939 	uint64_t bad_seq                      : 1;
5940 	uint64_t bad_term                     : 1;
5941 	uint64_t unsop                        : 1;
5942 	uint64_t uneop                        : 1;
5943 	uint64_t undat                        : 1;
5944 	uint64_t hg2fld                       : 1;
5945 	uint64_t hg2cc                        : 1;
5946 	uint64_t reserved_29_63               : 35;
5947 #endif
5948 	} cn61xx;
5949 	struct cvmx_gmxx_rxx_int_en_cn61xx    cn63xx;
5950 	struct cvmx_gmxx_rxx_int_en_cn61xx    cn63xxp1;
5951 	struct cvmx_gmxx_rxx_int_en_cn61xx    cn66xx;
5952 	struct cvmx_gmxx_rxx_int_en_cn61xx    cn68xx;
5953 	struct cvmx_gmxx_rxx_int_en_cn61xx    cn68xxp1;
5954 	struct cvmx_gmxx_rxx_int_en_cn61xx    cnf71xx;
5955 };
5956 typedef union cvmx_gmxx_rxx_int_en cvmx_gmxx_rxx_int_en_t;
5957 
5958 /**
5959  * cvmx_gmx#_rx#_int_reg
5960  *
5961  * GMX_RX_INT_REG = Interrupt Register
5962  *
5963  *
5964  * Notes:
5965  * (1) exceptions will only be raised to the control processor if the
5966  *     corresponding bit in the GMX_RX_INT_EN register is set.
5967  *
5968  * (2) exception conditions 10:0 can also set the rcv/opcode in the received
5969  *     packet's workQ entry.  The GMX_RX_FRM_CHK register provides a bit mask
5970  *     for configuring which conditions set the error.
5971  *
5972  * (3) in half duplex operation, the expectation is that collisions will appear
5973  *     as either MINERR o r CAREXT errors.
5974  *
5975  * (4) JABBER - An RX Jabber error indicates that a packet was received which
5976  *              is longer than the maximum allowed packet as defined by the
5977  *              system.  GMX will truncate the packet at the JABBER count.
5978  *              Failure to do so could lead to system instabilty.
5979  *
5980  * (5) NIBERR - This error is illegal at 1000Mbs speeds
5981  *              (GMX_RX_PRT_CFG[SPEED]==0) and will never assert.
5982  *
5983  * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
5984  *              GMX_RX_FRM_MAX.  For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
5985  *              > GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
5986  *
5987  * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < 64
5988  *
5989  * (8) ALNERR - Indicates that the packet received was not an integer number of
5990  *              bytes.  If FCS checking is enabled, ALNERR will only assert if
5991  *              the FCS is bad.  If FCS checking is disabled, ALNERR will
5992  *              assert in all non-integer frame cases.
5993  *
5994  * (9) Collisions - Collisions can only occur in half-duplex mode.  A collision
5995  *                  is assumed by the receiver when the slottime
5996  *                  (GMX_PRT_CFG[SLOTTIME]) is not satisfied.  In 10/100 mode,
5997  *                  this will result in a frame < SLOTTIME.  In 1000 mode, it
5998  *                  could result either in frame < SLOTTIME or a carrier extend
5999  *                  error with the SLOTTIME.  These conditions are visible by...
6000  *
6001  *                  . transfer ended before slottime - COLDET
6002  *                  . carrier extend error           - CAREXT
6003  *
6004  * (A) LENERR - Length errors occur when the received packet does not match the
6005  *              length field.  LENERR is only checked for packets between 64
6006  *              and 1500 bytes.  For untagged frames, the length must exact
6007  *              match.  For tagged frames the length or length+4 must match.
6008  *
6009  * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
6010  *              Does not check the number of PREAMBLE cycles.
6011  *
6012  * (C) OVRERR -
6013  *
6014  *              OVRERR is an architectural assertion check internal to GMX to
6015  *              make sure no assumption was violated.  In a correctly operating
6016  *              system, this interrupt can never fire.
6017  *
6018  *              GMX has an internal arbiter which selects which of 4 ports to
6019  *              buffer in the main RX FIFO.  If we normally buffer 8 bytes,
6020  *              then each port will typically push a tick every 8 cycles - if
6021  *              the packet interface is going as fast as possible.  If there
6022  *              are four ports, they push every two cycles.  So that's the
6023  *              assumption.  That the inbound module will always be able to
6024  *              consume the tick before another is produced.  If that doesn't
6025  *              happen - that's when OVRERR will assert.
6026  *
6027  * (D) In XAUI mode prt0 is used for interrupt logging.
6028  */
6029 union cvmx_gmxx_rxx_int_reg {
6030 	uint64_t u64;
6031 	struct cvmx_gmxx_rxx_int_reg_s {
6032 #ifdef __BIG_ENDIAN_BITFIELD
6033 	uint64_t reserved_29_63               : 35;
6034 	uint64_t hg2cc                        : 1;  /**< HiGig2 received message CRC or Control char  error
6035                                                          Set when either CRC8 error detected or when
6036                                                          a Control Character is found in the message
6037                                                          bytes after the K.SOM
6038                                                          NOTE: HG2CC has higher priority than HG2FLD
6039                                                                i.e. a HiGig2 message that results in HG2CC
6040                                                                getting set, will never set HG2FLD. */
6041 	uint64_t hg2fld                       : 1;  /**< HiGig2 received message field error, as below
6042                                                          1) MSG_TYPE field not 6'b00_0000
6043                                                             i.e. it is not a FLOW CONTROL message, which
6044                                                             is the only defined type for HiGig2
6045                                                          2) FWD_TYPE field not 2'b00 i.e. Link Level msg
6046                                                             which is the only defined type for HiGig2
6047                                                          3) FC_OBJECT field is neither 4'b0000 for
6048                                                             Physical Link nor 4'b0010 for Logical Link.
6049                                                             Those are the only two defined types in HiGig2 */
6050 	uint64_t undat                        : 1;  /**< Unexpected Data
6051                                                          (XAUI Mode only) */
6052 	uint64_t uneop                        : 1;  /**< Unexpected EOP
6053                                                          (XAUI Mode only) */
6054 	uint64_t unsop                        : 1;  /**< Unexpected SOP
6055                                                          (XAUI Mode only) */
6056 	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
6057                                                          than /T/.  The error propagation control
6058                                                          character /E/ will be included as part of the
6059                                                          frame and does not cause a frame termination.
6060                                                          (XAUI Mode only) */
6061 	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
6062                                                          (XAUI Mode only) */
6063 	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
6064                                                          (XAUI Mode only) */
6065 	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
6066                                                          (XAUI Mode only) */
6067 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
6068 	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
6069 	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
6070 	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
6071 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
6072                                                          Does not necessarily indicate a failure
6073                                                          (SGMII/1000Base-X only) */
6074 	uint64_t coldet                       : 1;  /**< Collision Detection
6075                                                          (SGMII/1000Base-X half-duplex only) */
6076 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
6077                                                          (SGMII/1000Base-X only) */
6078 	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
6079 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol
6080                                                          In XAUI mode, the column of data that was bad
6081                                                          will be logged in GMX_RX_XAUI_BAD_COL */
6082 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
6083                                                          This interrupt should never assert
6084                                                          (SGMII/1000Base-X only) */
6085 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
6086 	uint64_t skperr                       : 1;  /**< Skipper error */
6087 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
6088 	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
6089 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
6090 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
6091 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
6092 	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
6093 	uint64_t carext                       : 1;  /**< Carrier extend error
6094                                                          (SGMII/1000Base-X only) */
6095 	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize
6096                                                          Frame length checks are typically handled in PIP
6097                                                          (PIP_INT_REG[MINERR]), but pause frames are
6098                                                          normally discarded before being inspected by PIP. */
6099 #else
6100 	uint64_t minerr                       : 1;
6101 	uint64_t carext                       : 1;
6102 	uint64_t maxerr                       : 1;
6103 	uint64_t jabber                       : 1;
6104 	uint64_t fcserr                       : 1;
6105 	uint64_t alnerr                       : 1;
6106 	uint64_t lenerr                       : 1;
6107 	uint64_t rcverr                       : 1;
6108 	uint64_t skperr                       : 1;
6109 	uint64_t niberr                       : 1;
6110 	uint64_t ovrerr                       : 1;
6111 	uint64_t pcterr                       : 1;
6112 	uint64_t rsverr                       : 1;
6113 	uint64_t falerr                       : 1;
6114 	uint64_t coldet                       : 1;
6115 	uint64_t ifgerr                       : 1;
6116 	uint64_t phy_link                     : 1;
6117 	uint64_t phy_spd                      : 1;
6118 	uint64_t phy_dupx                     : 1;
6119 	uint64_t pause_drp                    : 1;
6120 	uint64_t loc_fault                    : 1;
6121 	uint64_t rem_fault                    : 1;
6122 	uint64_t bad_seq                      : 1;
6123 	uint64_t bad_term                     : 1;
6124 	uint64_t unsop                        : 1;
6125 	uint64_t uneop                        : 1;
6126 	uint64_t undat                        : 1;
6127 	uint64_t hg2fld                       : 1;
6128 	uint64_t hg2cc                        : 1;
6129 	uint64_t reserved_29_63               : 35;
6130 #endif
6131 	} s;
6132 	struct cvmx_gmxx_rxx_int_reg_cn30xx {
6133 #ifdef __BIG_ENDIAN_BITFIELD
6134 	uint64_t reserved_19_63               : 45;
6135 	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
6136 	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
6137 	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
6138 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
6139                                                          Does not necessarily indicate a failure */
6140 	uint64_t coldet                       : 1;  /**< Collision Detection */
6141 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
6142 	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
6143 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
6144 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
6145                                                          This interrupt should never assert */
6146 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
6147 	uint64_t skperr                       : 1;  /**< Skipper error */
6148 	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
6149 	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
6150 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
6151 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
6152 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
6153 	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
6154 	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
6155 	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
6156 #else
6157 	uint64_t minerr                       : 1;
6158 	uint64_t carext                       : 1;
6159 	uint64_t maxerr                       : 1;
6160 	uint64_t jabber                       : 1;
6161 	uint64_t fcserr                       : 1;
6162 	uint64_t alnerr                       : 1;
6163 	uint64_t lenerr                       : 1;
6164 	uint64_t rcverr                       : 1;
6165 	uint64_t skperr                       : 1;
6166 	uint64_t niberr                       : 1;
6167 	uint64_t ovrerr                       : 1;
6168 	uint64_t pcterr                       : 1;
6169 	uint64_t rsverr                       : 1;
6170 	uint64_t falerr                       : 1;
6171 	uint64_t coldet                       : 1;
6172 	uint64_t ifgerr                       : 1;
6173 	uint64_t phy_link                     : 1;
6174 	uint64_t phy_spd                      : 1;
6175 	uint64_t phy_dupx                     : 1;
6176 	uint64_t reserved_19_63               : 45;
6177 #endif
6178 	} cn30xx;
6179 	struct cvmx_gmxx_rxx_int_reg_cn30xx   cn31xx;
6180 	struct cvmx_gmxx_rxx_int_reg_cn30xx   cn38xx;
6181 	struct cvmx_gmxx_rxx_int_reg_cn30xx   cn38xxp2;
6182 	struct cvmx_gmxx_rxx_int_reg_cn50xx {
6183 #ifdef __BIG_ENDIAN_BITFIELD
6184 	uint64_t reserved_20_63               : 44;
6185 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
6186 	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
6187 	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
6188 	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
6189 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
6190                                                          Does not necessarily indicate a failure */
6191 	uint64_t coldet                       : 1;  /**< Collision Detection */
6192 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
6193 	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
6194 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
6195 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
6196                                                          This interrupt should never assert */
6197 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
6198 	uint64_t skperr                       : 1;  /**< Skipper error */
6199 	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
6200 	uint64_t reserved_6_6                 : 1;
6201 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
6202 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
6203 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
6204 	uint64_t reserved_2_2                 : 1;
6205 	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
6206 	uint64_t reserved_0_0                 : 1;
6207 #else
6208 	uint64_t reserved_0_0                 : 1;
6209 	uint64_t carext                       : 1;
6210 	uint64_t reserved_2_2                 : 1;
6211 	uint64_t jabber                       : 1;
6212 	uint64_t fcserr                       : 1;
6213 	uint64_t alnerr                       : 1;
6214 	uint64_t reserved_6_6                 : 1;
6215 	uint64_t rcverr                       : 1;
6216 	uint64_t skperr                       : 1;
6217 	uint64_t niberr                       : 1;
6218 	uint64_t ovrerr                       : 1;
6219 	uint64_t pcterr                       : 1;
6220 	uint64_t rsverr                       : 1;
6221 	uint64_t falerr                       : 1;
6222 	uint64_t coldet                       : 1;
6223 	uint64_t ifgerr                       : 1;
6224 	uint64_t phy_link                     : 1;
6225 	uint64_t phy_spd                      : 1;
6226 	uint64_t phy_dupx                     : 1;
6227 	uint64_t pause_drp                    : 1;
6228 	uint64_t reserved_20_63               : 44;
6229 #endif
6230 	} cn50xx;
6231 	struct cvmx_gmxx_rxx_int_reg_cn52xx {
6232 #ifdef __BIG_ENDIAN_BITFIELD
6233 	uint64_t reserved_29_63               : 35;
6234 	uint64_t hg2cc                        : 1;  /**< HiGig2 received message CRC or Control char  error
6235                                                          Set when either CRC8 error detected or when
6236                                                          a Control Character is found in the message
6237                                                          bytes after the K.SOM
6238                                                          NOTE: HG2CC has higher priority than HG2FLD
6239                                                                i.e. a HiGig2 message that results in HG2CC
6240                                                                getting set, will never set HG2FLD. */
6241 	uint64_t hg2fld                       : 1;  /**< HiGig2 received message field error, as below
6242                                                          1) MSG_TYPE field not 6'b00_0000
6243                                                             i.e. it is not a FLOW CONTROL message, which
6244                                                             is the only defined type for HiGig2
6245                                                          2) FWD_TYPE field not 2'b00 i.e. Link Level msg
6246                                                             which is the only defined type for HiGig2
6247                                                          3) FC_OBJECT field is neither 4'b0000 for
6248                                                             Physical Link nor 4'b0010 for Logical Link.
6249                                                             Those are the only two defined types in HiGig2 */
6250 	uint64_t undat                        : 1;  /**< Unexpected Data
6251                                                          (XAUI Mode only) */
6252 	uint64_t uneop                        : 1;  /**< Unexpected EOP
6253                                                          (XAUI Mode only) */
6254 	uint64_t unsop                        : 1;  /**< Unexpected SOP
6255                                                          (XAUI Mode only) */
6256 	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
6257                                                          than /T/.  The error propagation control
6258                                                          character /E/ will be included as part of the
6259                                                          frame and does not cause a frame termination.
6260                                                          (XAUI Mode only) */
6261 	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
6262                                                          (XAUI Mode only) */
6263 	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
6264                                                          (XAUI Mode only) */
6265 	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
6266                                                          (XAUI Mode only) */
6267 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
6268 	uint64_t reserved_16_18               : 3;
6269 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
6270                                                          Does not necessarily indicate a failure
6271                                                          (SGMII/1000Base-X only) */
6272 	uint64_t coldet                       : 1;  /**< Collision Detection
6273                                                          (SGMII/1000Base-X half-duplex only) */
6274 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
6275                                                          (SGMII/1000Base-X only) */
6276 	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
6277 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol
6278                                                          In XAUI mode, the column of data that was bad
6279                                                          will be logged in GMX_RX_XAUI_BAD_COL */
6280 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
6281                                                          This interrupt should never assert
6282                                                          (SGMII/1000Base-X only) */
6283 	uint64_t reserved_9_9                 : 1;
6284 	uint64_t skperr                       : 1;  /**< Skipper error */
6285 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
6286 	uint64_t reserved_5_6                 : 2;
6287 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
6288 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
6289 	uint64_t reserved_2_2                 : 1;
6290 	uint64_t carext                       : 1;  /**< Carrier extend error
6291                                                          (SGMII/1000Base-X only) */
6292 	uint64_t reserved_0_0                 : 1;
6293 #else
6294 	uint64_t reserved_0_0                 : 1;
6295 	uint64_t carext                       : 1;
6296 	uint64_t reserved_2_2                 : 1;
6297 	uint64_t jabber                       : 1;
6298 	uint64_t fcserr                       : 1;
6299 	uint64_t reserved_5_6                 : 2;
6300 	uint64_t rcverr                       : 1;
6301 	uint64_t skperr                       : 1;
6302 	uint64_t reserved_9_9                 : 1;
6303 	uint64_t ovrerr                       : 1;
6304 	uint64_t pcterr                       : 1;
6305 	uint64_t rsverr                       : 1;
6306 	uint64_t falerr                       : 1;
6307 	uint64_t coldet                       : 1;
6308 	uint64_t ifgerr                       : 1;
6309 	uint64_t reserved_16_18               : 3;
6310 	uint64_t pause_drp                    : 1;
6311 	uint64_t loc_fault                    : 1;
6312 	uint64_t rem_fault                    : 1;
6313 	uint64_t bad_seq                      : 1;
6314 	uint64_t bad_term                     : 1;
6315 	uint64_t unsop                        : 1;
6316 	uint64_t uneop                        : 1;
6317 	uint64_t undat                        : 1;
6318 	uint64_t hg2fld                       : 1;
6319 	uint64_t hg2cc                        : 1;
6320 	uint64_t reserved_29_63               : 35;
6321 #endif
6322 	} cn52xx;
6323 	struct cvmx_gmxx_rxx_int_reg_cn52xx   cn52xxp1;
6324 	struct cvmx_gmxx_rxx_int_reg_cn52xx   cn56xx;
6325 	struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
6326 #ifdef __BIG_ENDIAN_BITFIELD
6327 	uint64_t reserved_27_63               : 37;
6328 	uint64_t undat                        : 1;  /**< Unexpected Data
6329                                                          (XAUI Mode only) */
6330 	uint64_t uneop                        : 1;  /**< Unexpected EOP
6331                                                          (XAUI Mode only) */
6332 	uint64_t unsop                        : 1;  /**< Unexpected SOP
6333                                                          (XAUI Mode only) */
6334 	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
6335                                                          than /T/.  The error propagation control
6336                                                          character /E/ will be included as part of the
6337                                                          frame and does not cause a frame termination.
6338                                                          (XAUI Mode only) */
6339 	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
6340                                                          (XAUI Mode only) */
6341 	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
6342                                                          (XAUI Mode only) */
6343 	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
6344                                                          (XAUI Mode only) */
6345 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
6346 	uint64_t reserved_16_18               : 3;
6347 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
6348                                                          Does not necessarily indicate a failure
6349                                                          (SGMII/1000Base-X only) */
6350 	uint64_t coldet                       : 1;  /**< Collision Detection
6351                                                          (SGMII/1000Base-X half-duplex only) */
6352 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
6353                                                          (SGMII/1000Base-X only) */
6354 	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
6355 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol
6356                                                          In XAUI mode, the column of data that was bad
6357                                                          will be logged in GMX_RX_XAUI_BAD_COL */
6358 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
6359                                                          This interrupt should never assert
6360                                                          (SGMII/1000Base-X only) */
6361 	uint64_t reserved_9_9                 : 1;
6362 	uint64_t skperr                       : 1;  /**< Skipper error */
6363 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
6364 	uint64_t reserved_5_6                 : 2;
6365 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
6366 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
6367 	uint64_t reserved_2_2                 : 1;
6368 	uint64_t carext                       : 1;  /**< Carrier extend error
6369                                                          (SGMII/1000Base-X only) */
6370 	uint64_t reserved_0_0                 : 1;
6371 #else
6372 	uint64_t reserved_0_0                 : 1;
6373 	uint64_t carext                       : 1;
6374 	uint64_t reserved_2_2                 : 1;
6375 	uint64_t jabber                       : 1;
6376 	uint64_t fcserr                       : 1;
6377 	uint64_t reserved_5_6                 : 2;
6378 	uint64_t rcverr                       : 1;
6379 	uint64_t skperr                       : 1;
6380 	uint64_t reserved_9_9                 : 1;
6381 	uint64_t ovrerr                       : 1;
6382 	uint64_t pcterr                       : 1;
6383 	uint64_t rsverr                       : 1;
6384 	uint64_t falerr                       : 1;
6385 	uint64_t coldet                       : 1;
6386 	uint64_t ifgerr                       : 1;
6387 	uint64_t reserved_16_18               : 3;
6388 	uint64_t pause_drp                    : 1;
6389 	uint64_t loc_fault                    : 1;
6390 	uint64_t rem_fault                    : 1;
6391 	uint64_t bad_seq                      : 1;
6392 	uint64_t bad_term                     : 1;
6393 	uint64_t unsop                        : 1;
6394 	uint64_t uneop                        : 1;
6395 	uint64_t undat                        : 1;
6396 	uint64_t reserved_27_63               : 37;
6397 #endif
6398 	} cn56xxp1;
6399 	struct cvmx_gmxx_rxx_int_reg_cn58xx {
6400 #ifdef __BIG_ENDIAN_BITFIELD
6401 	uint64_t reserved_20_63               : 44;
6402 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
6403 	uint64_t phy_dupx                     : 1;  /**< Change in the RMGII inbound LinkDuplex */
6404 	uint64_t phy_spd                      : 1;  /**< Change in the RMGII inbound LinkSpeed */
6405 	uint64_t phy_link                     : 1;  /**< Change in the RMGII inbound LinkStatus */
6406 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
6407                                                          Does not necessarily indicate a failure */
6408 	uint64_t coldet                       : 1;  /**< Collision Detection */
6409 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime */
6410 	uint64_t rsverr                       : 1;  /**< RGMII reserved opcodes */
6411 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol */
6412 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
6413                                                          This interrupt should never assert */
6414 	uint64_t niberr                       : 1;  /**< Nibble error (hi_nibble != lo_nibble) */
6415 	uint64_t skperr                       : 1;  /**< Skipper error */
6416 	uint64_t rcverr                       : 1;  /**< Frame was received with RMGII Data reception error */
6417 	uint64_t lenerr                       : 1;  /**< Frame was received with length error */
6418 	uint64_t alnerr                       : 1;  /**< Frame was received with an alignment error */
6419 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
6420 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
6421 	uint64_t maxerr                       : 1;  /**< Frame was received with length > max_length */
6422 	uint64_t carext                       : 1;  /**< RGMII carrier extend error */
6423 	uint64_t minerr                       : 1;  /**< Frame was received with length < min_length */
6424 #else
6425 	uint64_t minerr                       : 1;
6426 	uint64_t carext                       : 1;
6427 	uint64_t maxerr                       : 1;
6428 	uint64_t jabber                       : 1;
6429 	uint64_t fcserr                       : 1;
6430 	uint64_t alnerr                       : 1;
6431 	uint64_t lenerr                       : 1;
6432 	uint64_t rcverr                       : 1;
6433 	uint64_t skperr                       : 1;
6434 	uint64_t niberr                       : 1;
6435 	uint64_t ovrerr                       : 1;
6436 	uint64_t pcterr                       : 1;
6437 	uint64_t rsverr                       : 1;
6438 	uint64_t falerr                       : 1;
6439 	uint64_t coldet                       : 1;
6440 	uint64_t ifgerr                       : 1;
6441 	uint64_t phy_link                     : 1;
6442 	uint64_t phy_spd                      : 1;
6443 	uint64_t phy_dupx                     : 1;
6444 	uint64_t pause_drp                    : 1;
6445 	uint64_t reserved_20_63               : 44;
6446 #endif
6447 	} cn58xx;
6448 	struct cvmx_gmxx_rxx_int_reg_cn58xx   cn58xxp1;
6449 	struct cvmx_gmxx_rxx_int_reg_cn61xx {
6450 #ifdef __BIG_ENDIAN_BITFIELD
6451 	uint64_t reserved_29_63               : 35;
6452 	uint64_t hg2cc                        : 1;  /**< HiGig2 received message CRC or Control char  error
6453                                                          Set when either CRC8 error detected or when
6454                                                          a Control Character is found in the message
6455                                                          bytes after the K.SOM
6456                                                          NOTE: HG2CC has higher priority than HG2FLD
6457                                                                i.e. a HiGig2 message that results in HG2CC
6458                                                                getting set, will never set HG2FLD. */
6459 	uint64_t hg2fld                       : 1;  /**< HiGig2 received message field error, as below
6460                                                          1) MSG_TYPE field not 6'b00_0000
6461                                                             i.e. it is not a FLOW CONTROL message, which
6462                                                             is the only defined type for HiGig2
6463                                                          2) FWD_TYPE field not 2'b00 i.e. Link Level msg
6464                                                             which is the only defined type for HiGig2
6465                                                          3) FC_OBJECT field is neither 4'b0000 for
6466                                                             Physical Link nor 4'b0010 for Logical Link.
6467                                                             Those are the only two defined types in HiGig2 */
6468 	uint64_t undat                        : 1;  /**< Unexpected Data
6469                                                          (XAUI Mode only) */
6470 	uint64_t uneop                        : 1;  /**< Unexpected EOP
6471                                                          (XAUI Mode only) */
6472 	uint64_t unsop                        : 1;  /**< Unexpected SOP
6473                                                          (XAUI Mode only) */
6474 	uint64_t bad_term                     : 1;  /**< Frame is terminated by control character other
6475                                                          than /T/.  The error propagation control
6476                                                          character /E/ will be included as part of the
6477                                                          frame and does not cause a frame termination.
6478                                                          (XAUI Mode only) */
6479 	uint64_t bad_seq                      : 1;  /**< Reserved Sequence Deteted
6480                                                          (XAUI Mode only) */
6481 	uint64_t rem_fault                    : 1;  /**< Remote Fault Sequence Deteted
6482                                                          (XAUI Mode only) */
6483 	uint64_t loc_fault                    : 1;  /**< Local Fault Sequence Deteted
6484                                                          (XAUI Mode only) */
6485 	uint64_t pause_drp                    : 1;  /**< Pause packet was dropped due to full GMX RX FIFO */
6486 	uint64_t reserved_16_18               : 3;
6487 	uint64_t ifgerr                       : 1;  /**< Interframe Gap Violation
6488                                                          Does not necessarily indicate a failure
6489                                                          (SGMII/1000Base-X only) */
6490 	uint64_t coldet                       : 1;  /**< Collision Detection
6491                                                          (SGMII/1000Base-X half-duplex only) */
6492 	uint64_t falerr                       : 1;  /**< False carrier error or extend error after slottime
6493                                                          (SGMII/1000Base-X only) */
6494 	uint64_t rsverr                       : 1;  /**< Reserved opcodes */
6495 	uint64_t pcterr                       : 1;  /**< Bad Preamble / Protocol
6496                                                          In XAUI mode, the column of data that was bad
6497                                                          will be logged in GMX_RX_XAUI_BAD_COL */
6498 	uint64_t ovrerr                       : 1;  /**< Internal Data Aggregation Overflow
6499                                                          This interrupt should never assert
6500                                                          (SGMII/1000Base-X only) */
6501 	uint64_t reserved_9_9                 : 1;
6502 	uint64_t skperr                       : 1;  /**< Skipper error */
6503 	uint64_t rcverr                       : 1;  /**< Frame was received with Data reception error */
6504 	uint64_t reserved_5_6                 : 2;
6505 	uint64_t fcserr                       : 1;  /**< Frame was received with FCS/CRC error */
6506 	uint64_t jabber                       : 1;  /**< Frame was received with length > sys_length */
6507 	uint64_t reserved_2_2                 : 1;
6508 	uint64_t carext                       : 1;  /**< Carrier extend error
6509                                                          (SGMII/1000Base-X only) */
6510 	uint64_t minerr                       : 1;  /**< Pause Frame was received with length<minFrameSize
6511                                                          Frame length checks are typically handled in PIP
6512                                                          (PIP_INT_REG[MINERR]), but pause frames are
6513                                                          normally discarded before being inspected by PIP. */
6514 #else
6515 	uint64_t minerr                       : 1;
6516 	uint64_t carext                       : 1;
6517 	uint64_t reserved_2_2                 : 1;
6518 	uint64_t jabber                       : 1;
6519 	uint64_t fcserr                       : 1;
6520 	uint64_t reserved_5_6                 : 2;
6521 	uint64_t rcverr                       : 1;
6522 	uint64_t skperr                       : 1;
6523 	uint64_t reserved_9_9                 : 1;
6524 	uint64_t ovrerr                       : 1;
6525 	uint64_t pcterr                       : 1;
6526 	uint64_t rsverr                       : 1;
6527 	uint64_t falerr                       : 1;
6528 	uint64_t coldet                       : 1;
6529 	uint64_t ifgerr                       : 1;
6530 	uint64_t reserved_16_18               : 3;
6531 	uint64_t pause_drp                    : 1;
6532 	uint64_t loc_fault                    : 1;
6533 	uint64_t rem_fault                    : 1;
6534 	uint64_t bad_seq                      : 1;
6535 	uint64_t bad_term                     : 1;
6536 	uint64_t unsop                        : 1;
6537 	uint64_t uneop                        : 1;
6538 	uint64_t undat                        : 1;
6539 	uint64_t hg2fld                       : 1;
6540 	uint64_t hg2cc                        : 1;
6541 	uint64_t reserved_29_63               : 35;
6542 #endif
6543 	} cn61xx;
6544 	struct cvmx_gmxx_rxx_int_reg_cn61xx   cn63xx;
6545 	struct cvmx_gmxx_rxx_int_reg_cn61xx   cn63xxp1;
6546 	struct cvmx_gmxx_rxx_int_reg_cn61xx   cn66xx;
6547 	struct cvmx_gmxx_rxx_int_reg_cn61xx   cn68xx;
6548 	struct cvmx_gmxx_rxx_int_reg_cn61xx   cn68xxp1;
6549 	struct cvmx_gmxx_rxx_int_reg_cn61xx   cnf71xx;
6550 };
6551 typedef union cvmx_gmxx_rxx_int_reg cvmx_gmxx_rxx_int_reg_t;
6552 
6553 /**
6554  * cvmx_gmx#_rx#_jabber
6555  *
6556  * GMX_RX_JABBER = The max size packet after which GMX will truncate
6557  *
6558  *
6559  * Notes:
6560  * CNT must be 8-byte aligned such that CNT[2:0] == 0
6561  *
6562  * The packet that will be sent to the packet input logic will have an
6563  * additionl 8 bytes if GMX_RX_FRM_CTL[PRE_CHK] is set and
6564  * GMX_RX_FRM_CTL[PRE_STRP] is clear.  The max packet that will be sent is
6565  * defined as...
6566  *
6567  *      max_sized_packet = GMX_RX_JABBER[CNT]+((GMX_RX_FRM_CTL[PRE_CHK] & !GMX_RX_FRM_CTL[PRE_STRP])*8)
6568  *
6569  * In XAUI mode prt0 is used for checking.
6570  */
6571 union cvmx_gmxx_rxx_jabber {
6572 	uint64_t u64;
6573 	struct cvmx_gmxx_rxx_jabber_s {
6574 #ifdef __BIG_ENDIAN_BITFIELD
6575 	uint64_t reserved_16_63               : 48;
6576 	uint64_t cnt                          : 16; /**< Byte count for jabber check
6577                                                          Failing packets set the JABBER interrupt and are
6578                                                          optionally sent with opcode==JABBER
6579                                                          GMX will truncate the packet to CNT bytes */
6580 #else
6581 	uint64_t cnt                          : 16;
6582 	uint64_t reserved_16_63               : 48;
6583 #endif
6584 	} s;
6585 	struct cvmx_gmxx_rxx_jabber_s         cn30xx;
6586 	struct cvmx_gmxx_rxx_jabber_s         cn31xx;
6587 	struct cvmx_gmxx_rxx_jabber_s         cn38xx;
6588 	struct cvmx_gmxx_rxx_jabber_s         cn38xxp2;
6589 	struct cvmx_gmxx_rxx_jabber_s         cn50xx;
6590 	struct cvmx_gmxx_rxx_jabber_s         cn52xx;
6591 	struct cvmx_gmxx_rxx_jabber_s         cn52xxp1;
6592 	struct cvmx_gmxx_rxx_jabber_s         cn56xx;
6593 	struct cvmx_gmxx_rxx_jabber_s         cn56xxp1;
6594 	struct cvmx_gmxx_rxx_jabber_s         cn58xx;
6595 	struct cvmx_gmxx_rxx_jabber_s         cn58xxp1;
6596 	struct cvmx_gmxx_rxx_jabber_s         cn61xx;
6597 	struct cvmx_gmxx_rxx_jabber_s         cn63xx;
6598 	struct cvmx_gmxx_rxx_jabber_s         cn63xxp1;
6599 	struct cvmx_gmxx_rxx_jabber_s         cn66xx;
6600 	struct cvmx_gmxx_rxx_jabber_s         cn68xx;
6601 	struct cvmx_gmxx_rxx_jabber_s         cn68xxp1;
6602 	struct cvmx_gmxx_rxx_jabber_s         cnf71xx;
6603 };
6604 typedef union cvmx_gmxx_rxx_jabber cvmx_gmxx_rxx_jabber_t;
6605 
6606 /**
6607  * cvmx_gmx#_rx#_pause_drop_time
6608  *
6609  * GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
6610  *
6611  */
6612 union cvmx_gmxx_rxx_pause_drop_time {
6613 	uint64_t u64;
6614 	struct cvmx_gmxx_rxx_pause_drop_time_s {
6615 #ifdef __BIG_ENDIAN_BITFIELD
6616 	uint64_t reserved_16_63               : 48;
6617 	uint64_t status                       : 16; /**< Time extracted from the dropped PAUSE packet */
6618 #else
6619 	uint64_t status                       : 16;
6620 	uint64_t reserved_16_63               : 48;
6621 #endif
6622 	} s;
6623 	struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
6624 	struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
6625 	struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
6626 	struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
6627 	struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
6628 	struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
6629 	struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
6630 	struct cvmx_gmxx_rxx_pause_drop_time_s cn61xx;
6631 	struct cvmx_gmxx_rxx_pause_drop_time_s cn63xx;
6632 	struct cvmx_gmxx_rxx_pause_drop_time_s cn63xxp1;
6633 	struct cvmx_gmxx_rxx_pause_drop_time_s cn66xx;
6634 	struct cvmx_gmxx_rxx_pause_drop_time_s cn68xx;
6635 	struct cvmx_gmxx_rxx_pause_drop_time_s cn68xxp1;
6636 	struct cvmx_gmxx_rxx_pause_drop_time_s cnf71xx;
6637 };
6638 typedef union cvmx_gmxx_rxx_pause_drop_time cvmx_gmxx_rxx_pause_drop_time_t;
6639 
6640 /**
6641  * cvmx_gmx#_rx#_rx_inbnd
6642  *
6643  * GMX_RX_INBND = RGMII InBand Link Status
6644  *
6645  *
6646  * Notes:
6647  * These fields are only valid if the attached PHY is operating in RGMII mode
6648  * and supports the optional in-band status (see section 3.4.1 of the RGMII
6649  * specification, version 1.3 for more information).
6650  */
6651 union cvmx_gmxx_rxx_rx_inbnd {
6652 	uint64_t u64;
6653 	struct cvmx_gmxx_rxx_rx_inbnd_s {
6654 #ifdef __BIG_ENDIAN_BITFIELD
6655 	uint64_t reserved_4_63                : 60;
6656 	uint64_t duplex                       : 1;  /**< RGMII Inbound LinkDuplex
6657                                                          0=half-duplex
6658                                                          1=full-duplex */
6659 	uint64_t speed                        : 2;  /**< RGMII Inbound LinkSpeed
6660                                                          00=2.5MHz
6661                                                          01=25MHz
6662                                                          10=125MHz
6663                                                          11=Reserved */
6664 	uint64_t status                       : 1;  /**< RGMII Inbound LinkStatus
6665                                                          0=down
6666                                                          1=up */
6667 #else
6668 	uint64_t status                       : 1;
6669 	uint64_t speed                        : 2;
6670 	uint64_t duplex                       : 1;
6671 	uint64_t reserved_4_63                : 60;
6672 #endif
6673 	} s;
6674 	struct cvmx_gmxx_rxx_rx_inbnd_s       cn30xx;
6675 	struct cvmx_gmxx_rxx_rx_inbnd_s       cn31xx;
6676 	struct cvmx_gmxx_rxx_rx_inbnd_s       cn38xx;
6677 	struct cvmx_gmxx_rxx_rx_inbnd_s       cn38xxp2;
6678 	struct cvmx_gmxx_rxx_rx_inbnd_s       cn50xx;
6679 	struct cvmx_gmxx_rxx_rx_inbnd_s       cn58xx;
6680 	struct cvmx_gmxx_rxx_rx_inbnd_s       cn58xxp1;
6681 };
6682 typedef union cvmx_gmxx_rxx_rx_inbnd cvmx_gmxx_rxx_rx_inbnd_t;
6683 
6684 /**
6685  * cvmx_gmx#_rx#_stats_ctl
6686  *
6687  * GMX_RX_STATS_CTL = RX Stats Control register
6688  *
6689  */
6690 union cvmx_gmxx_rxx_stats_ctl {
6691 	uint64_t u64;
6692 	struct cvmx_gmxx_rxx_stats_ctl_s {
6693 #ifdef __BIG_ENDIAN_BITFIELD
6694 	uint64_t reserved_1_63                : 63;
6695 	uint64_t rd_clr                       : 1;  /**< RX Stats registers will clear on reads */
6696 #else
6697 	uint64_t rd_clr                       : 1;
6698 	uint64_t reserved_1_63                : 63;
6699 #endif
6700 	} s;
6701 	struct cvmx_gmxx_rxx_stats_ctl_s      cn30xx;
6702 	struct cvmx_gmxx_rxx_stats_ctl_s      cn31xx;
6703 	struct cvmx_gmxx_rxx_stats_ctl_s      cn38xx;
6704 	struct cvmx_gmxx_rxx_stats_ctl_s      cn38xxp2;
6705 	struct cvmx_gmxx_rxx_stats_ctl_s      cn50xx;
6706 	struct cvmx_gmxx_rxx_stats_ctl_s      cn52xx;
6707 	struct cvmx_gmxx_rxx_stats_ctl_s      cn52xxp1;
6708 	struct cvmx_gmxx_rxx_stats_ctl_s      cn56xx;
6709 	struct cvmx_gmxx_rxx_stats_ctl_s      cn56xxp1;
6710 	struct cvmx_gmxx_rxx_stats_ctl_s      cn58xx;
6711 	struct cvmx_gmxx_rxx_stats_ctl_s      cn58xxp1;
6712 	struct cvmx_gmxx_rxx_stats_ctl_s      cn61xx;
6713 	struct cvmx_gmxx_rxx_stats_ctl_s      cn63xx;
6714 	struct cvmx_gmxx_rxx_stats_ctl_s      cn63xxp1;
6715 	struct cvmx_gmxx_rxx_stats_ctl_s      cn66xx;
6716 	struct cvmx_gmxx_rxx_stats_ctl_s      cn68xx;
6717 	struct cvmx_gmxx_rxx_stats_ctl_s      cn68xxp1;
6718 	struct cvmx_gmxx_rxx_stats_ctl_s      cnf71xx;
6719 };
6720 typedef union cvmx_gmxx_rxx_stats_ctl cvmx_gmxx_rxx_stats_ctl_t;
6721 
6722 /**
6723  * cvmx_gmx#_rx#_stats_octs
6724  *
6725  * Notes:
6726  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
6727  * - Counters will wrap
6728  */
6729 union cvmx_gmxx_rxx_stats_octs {
6730 	uint64_t u64;
6731 	struct cvmx_gmxx_rxx_stats_octs_s {
6732 #ifdef __BIG_ENDIAN_BITFIELD
6733 	uint64_t reserved_48_63               : 16;
6734 	uint64_t cnt                          : 48; /**< Octet count of received good packets */
6735 #else
6736 	uint64_t cnt                          : 48;
6737 	uint64_t reserved_48_63               : 16;
6738 #endif
6739 	} s;
6740 	struct cvmx_gmxx_rxx_stats_octs_s     cn30xx;
6741 	struct cvmx_gmxx_rxx_stats_octs_s     cn31xx;
6742 	struct cvmx_gmxx_rxx_stats_octs_s     cn38xx;
6743 	struct cvmx_gmxx_rxx_stats_octs_s     cn38xxp2;
6744 	struct cvmx_gmxx_rxx_stats_octs_s     cn50xx;
6745 	struct cvmx_gmxx_rxx_stats_octs_s     cn52xx;
6746 	struct cvmx_gmxx_rxx_stats_octs_s     cn52xxp1;
6747 	struct cvmx_gmxx_rxx_stats_octs_s     cn56xx;
6748 	struct cvmx_gmxx_rxx_stats_octs_s     cn56xxp1;
6749 	struct cvmx_gmxx_rxx_stats_octs_s     cn58xx;
6750 	struct cvmx_gmxx_rxx_stats_octs_s     cn58xxp1;
6751 	struct cvmx_gmxx_rxx_stats_octs_s     cn61xx;
6752 	struct cvmx_gmxx_rxx_stats_octs_s     cn63xx;
6753 	struct cvmx_gmxx_rxx_stats_octs_s     cn63xxp1;
6754 	struct cvmx_gmxx_rxx_stats_octs_s     cn66xx;
6755 	struct cvmx_gmxx_rxx_stats_octs_s     cn68xx;
6756 	struct cvmx_gmxx_rxx_stats_octs_s     cn68xxp1;
6757 	struct cvmx_gmxx_rxx_stats_octs_s     cnf71xx;
6758 };
6759 typedef union cvmx_gmxx_rxx_stats_octs cvmx_gmxx_rxx_stats_octs_t;
6760 
6761 /**
6762  * cvmx_gmx#_rx#_stats_octs_ctl
6763  *
6764  * Notes:
6765  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
6766  * - Counters will wrap
6767  */
6768 union cvmx_gmxx_rxx_stats_octs_ctl {
6769 	uint64_t u64;
6770 	struct cvmx_gmxx_rxx_stats_octs_ctl_s {
6771 #ifdef __BIG_ENDIAN_BITFIELD
6772 	uint64_t reserved_48_63               : 16;
6773 	uint64_t cnt                          : 48; /**< Octet count of received pause packets */
6774 #else
6775 	uint64_t cnt                          : 48;
6776 	uint64_t reserved_48_63               : 16;
6777 #endif
6778 	} s;
6779 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
6780 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
6781 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
6782 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
6783 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
6784 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
6785 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
6786 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
6787 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
6788 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
6789 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
6790 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn61xx;
6791 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xx;
6792 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xxp1;
6793 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn66xx;
6794 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn68xx;
6795 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cn68xxp1;
6796 	struct cvmx_gmxx_rxx_stats_octs_ctl_s cnf71xx;
6797 };
6798 typedef union cvmx_gmxx_rxx_stats_octs_ctl cvmx_gmxx_rxx_stats_octs_ctl_t;
6799 
6800 /**
6801  * cvmx_gmx#_rx#_stats_octs_dmac
6802  *
6803  * Notes:
6804  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
6805  * - Counters will wrap
6806  */
6807 union cvmx_gmxx_rxx_stats_octs_dmac {
6808 	uint64_t u64;
6809 	struct cvmx_gmxx_rxx_stats_octs_dmac_s {
6810 #ifdef __BIG_ENDIAN_BITFIELD
6811 	uint64_t reserved_48_63               : 16;
6812 	uint64_t cnt                          : 48; /**< Octet count of filtered dmac packets */
6813 #else
6814 	uint64_t cnt                          : 48;
6815 	uint64_t reserved_48_63               : 16;
6816 #endif
6817 	} s;
6818 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
6819 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
6820 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
6821 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
6822 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
6823 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
6824 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
6825 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
6826 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
6827 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
6828 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
6829 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn61xx;
6830 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xx;
6831 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xxp1;
6832 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn66xx;
6833 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn68xx;
6834 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cn68xxp1;
6835 	struct cvmx_gmxx_rxx_stats_octs_dmac_s cnf71xx;
6836 };
6837 typedef union cvmx_gmxx_rxx_stats_octs_dmac cvmx_gmxx_rxx_stats_octs_dmac_t;
6838 
6839 /**
6840  * cvmx_gmx#_rx#_stats_octs_drp
6841  *
6842  * Notes:
6843  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
6844  * - Counters will wrap
6845  */
6846 union cvmx_gmxx_rxx_stats_octs_drp {
6847 	uint64_t u64;
6848 	struct cvmx_gmxx_rxx_stats_octs_drp_s {
6849 #ifdef __BIG_ENDIAN_BITFIELD
6850 	uint64_t reserved_48_63               : 16;
6851 	uint64_t cnt                          : 48; /**< Octet count of dropped packets */
6852 #else
6853 	uint64_t cnt                          : 48;
6854 	uint64_t reserved_48_63               : 16;
6855 #endif
6856 	} s;
6857 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
6858 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
6859 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
6860 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
6861 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
6862 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
6863 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
6864 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
6865 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
6866 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
6867 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
6868 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn61xx;
6869 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xx;
6870 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xxp1;
6871 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn66xx;
6872 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn68xx;
6873 	struct cvmx_gmxx_rxx_stats_octs_drp_s cn68xxp1;
6874 	struct cvmx_gmxx_rxx_stats_octs_drp_s cnf71xx;
6875 };
6876 typedef union cvmx_gmxx_rxx_stats_octs_drp cvmx_gmxx_rxx_stats_octs_drp_t;
6877 
6878 /**
6879  * cvmx_gmx#_rx#_stats_pkts
6880  *
6881  * GMX_RX_STATS_PKTS
6882  *
6883  * Count of good received packets - packets that are not recognized as PAUSE
6884  * packets, dropped due the DMAC filter, dropped due FIFO full status, or
6885  * have any other OPCODE (FCS, Length, etc).
6886  *
6887  * Notes:
6888  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
6889  * - Counters will wrap
6890  */
6891 union cvmx_gmxx_rxx_stats_pkts {
6892 	uint64_t u64;
6893 	struct cvmx_gmxx_rxx_stats_pkts_s {
6894 #ifdef __BIG_ENDIAN_BITFIELD
6895 	uint64_t reserved_32_63               : 32;
6896 	uint64_t cnt                          : 32; /**< Count of received good packets */
6897 #else
6898 	uint64_t cnt                          : 32;
6899 	uint64_t reserved_32_63               : 32;
6900 #endif
6901 	} s;
6902 	struct cvmx_gmxx_rxx_stats_pkts_s     cn30xx;
6903 	struct cvmx_gmxx_rxx_stats_pkts_s     cn31xx;
6904 	struct cvmx_gmxx_rxx_stats_pkts_s     cn38xx;
6905 	struct cvmx_gmxx_rxx_stats_pkts_s     cn38xxp2;
6906 	struct cvmx_gmxx_rxx_stats_pkts_s     cn50xx;
6907 	struct cvmx_gmxx_rxx_stats_pkts_s     cn52xx;
6908 	struct cvmx_gmxx_rxx_stats_pkts_s     cn52xxp1;
6909 	struct cvmx_gmxx_rxx_stats_pkts_s     cn56xx;
6910 	struct cvmx_gmxx_rxx_stats_pkts_s     cn56xxp1;
6911 	struct cvmx_gmxx_rxx_stats_pkts_s     cn58xx;
6912 	struct cvmx_gmxx_rxx_stats_pkts_s     cn58xxp1;
6913 	struct cvmx_gmxx_rxx_stats_pkts_s     cn61xx;
6914 	struct cvmx_gmxx_rxx_stats_pkts_s     cn63xx;
6915 	struct cvmx_gmxx_rxx_stats_pkts_s     cn63xxp1;
6916 	struct cvmx_gmxx_rxx_stats_pkts_s     cn66xx;
6917 	struct cvmx_gmxx_rxx_stats_pkts_s     cn68xx;
6918 	struct cvmx_gmxx_rxx_stats_pkts_s     cn68xxp1;
6919 	struct cvmx_gmxx_rxx_stats_pkts_s     cnf71xx;
6920 };
6921 typedef union cvmx_gmxx_rxx_stats_pkts cvmx_gmxx_rxx_stats_pkts_t;
6922 
6923 /**
6924  * cvmx_gmx#_rx#_stats_pkts_bad
6925  *
6926  * GMX_RX_STATS_PKTS_BAD
6927  *
6928  * Count of all packets received with some error that were not dropped
6929  * either due to the dmac filter or lack of room in the receive FIFO.
6930  *
6931  * Notes:
6932  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
6933  * - Counters will wrap
6934  */
6935 union cvmx_gmxx_rxx_stats_pkts_bad {
6936 	uint64_t u64;
6937 	struct cvmx_gmxx_rxx_stats_pkts_bad_s {
6938 #ifdef __BIG_ENDIAN_BITFIELD
6939 	uint64_t reserved_32_63               : 32;
6940 	uint64_t cnt                          : 32; /**< Count of bad packets */
6941 #else
6942 	uint64_t cnt                          : 32;
6943 	uint64_t reserved_32_63               : 32;
6944 #endif
6945 	} s;
6946 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
6947 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
6948 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
6949 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
6950 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
6951 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
6952 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
6953 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
6954 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
6955 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
6956 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
6957 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn61xx;
6958 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xx;
6959 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xxp1;
6960 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn66xx;
6961 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn68xx;
6962 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cn68xxp1;
6963 	struct cvmx_gmxx_rxx_stats_pkts_bad_s cnf71xx;
6964 };
6965 typedef union cvmx_gmxx_rxx_stats_pkts_bad cvmx_gmxx_rxx_stats_pkts_bad_t;
6966 
6967 /**
6968  * cvmx_gmx#_rx#_stats_pkts_ctl
6969  *
6970  * GMX_RX_STATS_PKTS_CTL
6971  *
6972  * Count of all packets received that were recognized as Flow Control or
6973  * PAUSE packets.  PAUSE packets with any kind of error are counted in
6974  * GMX_RX_STATS_PKTS_BAD.  Pause packets can be optionally dropped or
6975  * forwarded based on the GMX_RX_FRM_CTL[CTL_DRP] bit.  This count
6976  * increments regardless of whether the packet is dropped.  Pause packets
6977  * will never be counted in GMX_RX_STATS_PKTS.  Packets dropped due the dmac
6978  * filter will be counted in GMX_RX_STATS_PKTS_DMAC and not here.
6979  *
6980  * Notes:
6981  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
6982  * - Counters will wrap
6983  */
6984 union cvmx_gmxx_rxx_stats_pkts_ctl {
6985 	uint64_t u64;
6986 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s {
6987 #ifdef __BIG_ENDIAN_BITFIELD
6988 	uint64_t reserved_32_63               : 32;
6989 	uint64_t cnt                          : 32; /**< Count of received pause packets */
6990 #else
6991 	uint64_t cnt                          : 32;
6992 	uint64_t reserved_32_63               : 32;
6993 #endif
6994 	} s;
6995 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
6996 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
6997 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
6998 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
6999 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
7000 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
7001 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
7002 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
7003 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
7004 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
7005 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
7006 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn61xx;
7007 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xx;
7008 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xxp1;
7009 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn66xx;
7010 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn68xx;
7011 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn68xxp1;
7012 	struct cvmx_gmxx_rxx_stats_pkts_ctl_s cnf71xx;
7013 };
7014 typedef union cvmx_gmxx_rxx_stats_pkts_ctl cvmx_gmxx_rxx_stats_pkts_ctl_t;
7015 
7016 /**
7017  * cvmx_gmx#_rx#_stats_pkts_dmac
7018  *
7019  * GMX_RX_STATS_PKTS_DMAC
7020  *
7021  * Count of all packets received that were dropped by the dmac filter.
7022  * Packets that match the DMAC will be dropped and counted here regardless
7023  * of if they were bad packets.  These packets will never be counted in
7024  * GMX_RX_STATS_PKTS.
7025  *
7026  * Some packets that were not able to satisify the DECISION_CNT may not
7027  * actually be dropped by Octeon, but they will be counted here as if they
7028  * were dropped.
7029  *
7030  * Notes:
7031  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
7032  * - Counters will wrap
7033  */
7034 union cvmx_gmxx_rxx_stats_pkts_dmac {
7035 	uint64_t u64;
7036 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s {
7037 #ifdef __BIG_ENDIAN_BITFIELD
7038 	uint64_t reserved_32_63               : 32;
7039 	uint64_t cnt                          : 32; /**< Count of filtered dmac packets */
7040 #else
7041 	uint64_t cnt                          : 32;
7042 	uint64_t reserved_32_63               : 32;
7043 #endif
7044 	} s;
7045 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
7046 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
7047 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
7048 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
7049 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
7050 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
7051 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
7052 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
7053 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
7054 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
7055 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
7056 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn61xx;
7057 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xx;
7058 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xxp1;
7059 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn66xx;
7060 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn68xx;
7061 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn68xxp1;
7062 	struct cvmx_gmxx_rxx_stats_pkts_dmac_s cnf71xx;
7063 };
7064 typedef union cvmx_gmxx_rxx_stats_pkts_dmac cvmx_gmxx_rxx_stats_pkts_dmac_t;
7065 
7066 /**
7067  * cvmx_gmx#_rx#_stats_pkts_drp
7068  *
7069  * GMX_RX_STATS_PKTS_DRP
7070  *
7071  * Count of all packets received that were dropped due to a full receive FIFO.
7072  * This counts both partial packets in which there was enough space in the RX
7073  * FIFO to begin to buffer and the packet and total drops in which no packet was
7074  * sent to PKI.  This counts good and bad packets received - all packets dropped
7075  * by the FIFO.  It does not count packets dropped by the dmac or pause packet
7076  * filters.
7077  *
7078  * Notes:
7079  * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
7080  * - Counters will wrap
7081  */
7082 union cvmx_gmxx_rxx_stats_pkts_drp {
7083 	uint64_t u64;
7084 	struct cvmx_gmxx_rxx_stats_pkts_drp_s {
7085 #ifdef __BIG_ENDIAN_BITFIELD
7086 	uint64_t reserved_32_63               : 32;
7087 	uint64_t cnt                          : 32; /**< Count of dropped packets */
7088 #else
7089 	uint64_t cnt                          : 32;
7090 	uint64_t reserved_32_63               : 32;
7091 #endif
7092 	} s;
7093 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
7094 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
7095 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
7096 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
7097 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
7098 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
7099 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
7100 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
7101 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
7102 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
7103 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
7104 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn61xx;
7105 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xx;
7106 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xxp1;
7107 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn66xx;
7108 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn68xx;
7109 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cn68xxp1;
7110 	struct cvmx_gmxx_rxx_stats_pkts_drp_s cnf71xx;
7111 };
7112 typedef union cvmx_gmxx_rxx_stats_pkts_drp cvmx_gmxx_rxx_stats_pkts_drp_t;
7113 
7114 /**
7115  * cvmx_gmx#_rx#_udd_skp
7116  *
7117  * GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
7118  *
7119  *
7120  * Notes:
7121  * (1) The skip bytes are part of the packet and will be sent down the NCB
7122  *     packet interface and will be handled by PKI.
7123  *
7124  * (2) The system can determine if the UDD bytes are included in the FCS check
7125  *     by using the FCSSEL field - if the FCS check is enabled.
7126  *
7127  * (3) Assume that the preamble/sfd is always at the start of the frame - even
7128  *     before UDD bytes.  In most cases, there will be no preamble in these
7129  *     cases since it will be packet interface in direct communication to
7130  *     another packet interface (MAC to MAC) without a PHY involved.
7131  *
7132  * (4) We can still do address filtering and control packet filtering is the
7133  *     user desires.
7134  *
7135  * (5) UDD_SKP must be 0 in half-duplex operation unless
7136  *     GMX_RX_FRM_CTL[PRE_CHK] is clear.  If GMX_RX_FRM_CTL[PRE_CHK] is clear,
7137  *     then UDD_SKP will normally be 8.
7138  *
7139  * (6) In all cases, the UDD bytes will be sent down the packet interface as
7140  *     part of the packet.  The UDD bytes are never stripped from the actual
7141  *     packet.
7142  *
7143  * (7) If LEN != 0, then GMX_RX_FRM_CHK[LENERR] will be disabled and GMX_RX_INT_REG[LENERR] will be zero
7144  */
7145 union cvmx_gmxx_rxx_udd_skp {
7146 	uint64_t u64;
7147 	struct cvmx_gmxx_rxx_udd_skp_s {
7148 #ifdef __BIG_ENDIAN_BITFIELD
7149 	uint64_t reserved_9_63                : 55;
7150 	uint64_t fcssel                       : 1;  /**< Include the skip bytes in the FCS calculation
7151                                                          0 = all skip bytes are included in FCS
7152                                                          1 = the skip bytes are not included in FCS
7153                                                          When GMX_TX_XAUI_CTL[HG_EN] is set, FCSSEL must
7154                                                          be zero. */
7155 	uint64_t reserved_7_7                 : 1;
7156 	uint64_t len                          : 7;  /**< Amount of User-defined data before the start of
7157                                                          the L2 data.  Zero means L2 comes first.
7158                                                          Max value is 64.
7159                                                          When GMX_TX_XAUI_CTL[HG_EN] is set, LEN must be
7160                                                          set to 12 or 16 (depending on HiGig header size)
7161                                                          to account for the HiGig header. LEN=12 selects
7162                                                          HiGig/HiGig+, and LEN=16 selects HiGig2. */
7163 #else
7164 	uint64_t len                          : 7;
7165 	uint64_t reserved_7_7                 : 1;
7166 	uint64_t fcssel                       : 1;
7167 	uint64_t reserved_9_63                : 55;
7168 #endif
7169 	} s;
7170 	struct cvmx_gmxx_rxx_udd_skp_s        cn30xx;
7171 	struct cvmx_gmxx_rxx_udd_skp_s        cn31xx;
7172 	struct cvmx_gmxx_rxx_udd_skp_s        cn38xx;
7173 	struct cvmx_gmxx_rxx_udd_skp_s        cn38xxp2;
7174 	struct cvmx_gmxx_rxx_udd_skp_s        cn50xx;
7175 	struct cvmx_gmxx_rxx_udd_skp_s        cn52xx;
7176 	struct cvmx_gmxx_rxx_udd_skp_s        cn52xxp1;
7177 	struct cvmx_gmxx_rxx_udd_skp_s        cn56xx;
7178 	struct cvmx_gmxx_rxx_udd_skp_s        cn56xxp1;
7179 	struct cvmx_gmxx_rxx_udd_skp_s        cn58xx;
7180 	struct cvmx_gmxx_rxx_udd_skp_s        cn58xxp1;
7181 	struct cvmx_gmxx_rxx_udd_skp_s        cn61xx;
7182 	struct cvmx_gmxx_rxx_udd_skp_s        cn63xx;
7183 	struct cvmx_gmxx_rxx_udd_skp_s        cn63xxp1;
7184 	struct cvmx_gmxx_rxx_udd_skp_s        cn66xx;
7185 	struct cvmx_gmxx_rxx_udd_skp_s        cn68xx;
7186 	struct cvmx_gmxx_rxx_udd_skp_s        cn68xxp1;
7187 	struct cvmx_gmxx_rxx_udd_skp_s        cnf71xx;
7188 };
7189 typedef union cvmx_gmxx_rxx_udd_skp cvmx_gmxx_rxx_udd_skp_t;
7190 
7191 /**
7192  * cvmx_gmx#_rx_bp_drop#
7193  *
7194  * GMX_RX_BP_DROP = FIFO mark for packet drop
7195  *
7196  *
7197  * Notes:
7198  * The actual watermark is dynamic with respect to the GMX_RX_PRTS
7199  * register.  The GMX_RX_PRTS controls the depth of the port's
7200  * FIFO so as ports are added or removed, the drop point may change.
7201  *
7202  * In XAUI mode prt0 is used for checking.
7203  */
7204 union cvmx_gmxx_rx_bp_dropx {
7205 	uint64_t u64;
7206 	struct cvmx_gmxx_rx_bp_dropx_s {
7207 #ifdef __BIG_ENDIAN_BITFIELD
7208 	uint64_t reserved_6_63                : 58;
7209 	uint64_t mark                         : 6;  /**< Number of 8B ticks to reserve in the RX FIFO.
7210                                                          When the FIFO exceeds this count, packets will
7211                                                          be dropped and not buffered.
7212                                                          MARK should typically be programmed to ports+1.
7213                                                          Failure to program correctly can lead to system
7214                                                          instability. */
7215 #else
7216 	uint64_t mark                         : 6;
7217 	uint64_t reserved_6_63                : 58;
7218 #endif
7219 	} s;
7220 	struct cvmx_gmxx_rx_bp_dropx_s        cn30xx;
7221 	struct cvmx_gmxx_rx_bp_dropx_s        cn31xx;
7222 	struct cvmx_gmxx_rx_bp_dropx_s        cn38xx;
7223 	struct cvmx_gmxx_rx_bp_dropx_s        cn38xxp2;
7224 	struct cvmx_gmxx_rx_bp_dropx_s        cn50xx;
7225 	struct cvmx_gmxx_rx_bp_dropx_s        cn52xx;
7226 	struct cvmx_gmxx_rx_bp_dropx_s        cn52xxp1;
7227 	struct cvmx_gmxx_rx_bp_dropx_s        cn56xx;
7228 	struct cvmx_gmxx_rx_bp_dropx_s        cn56xxp1;
7229 	struct cvmx_gmxx_rx_bp_dropx_s        cn58xx;
7230 	struct cvmx_gmxx_rx_bp_dropx_s        cn58xxp1;
7231 	struct cvmx_gmxx_rx_bp_dropx_s        cn61xx;
7232 	struct cvmx_gmxx_rx_bp_dropx_s        cn63xx;
7233 	struct cvmx_gmxx_rx_bp_dropx_s        cn63xxp1;
7234 	struct cvmx_gmxx_rx_bp_dropx_s        cn66xx;
7235 	struct cvmx_gmxx_rx_bp_dropx_s        cn68xx;
7236 	struct cvmx_gmxx_rx_bp_dropx_s        cn68xxp1;
7237 	struct cvmx_gmxx_rx_bp_dropx_s        cnf71xx;
7238 };
7239 typedef union cvmx_gmxx_rx_bp_dropx cvmx_gmxx_rx_bp_dropx_t;
7240 
7241 /**
7242  * cvmx_gmx#_rx_bp_off#
7243  *
7244  * GMX_RX_BP_OFF = Lowater mark for packet drop
7245  *
7246  *
7247  * Notes:
7248  * In XAUI mode, prt0 is used for checking.
7249  *
7250  */
7251 union cvmx_gmxx_rx_bp_offx {
7252 	uint64_t u64;
7253 	struct cvmx_gmxx_rx_bp_offx_s {
7254 #ifdef __BIG_ENDIAN_BITFIELD
7255 	uint64_t reserved_6_63                : 58;
7256 	uint64_t mark                         : 6;  /**< Water mark (8B ticks) to deassert backpressure */
7257 #else
7258 	uint64_t mark                         : 6;
7259 	uint64_t reserved_6_63                : 58;
7260 #endif
7261 	} s;
7262 	struct cvmx_gmxx_rx_bp_offx_s         cn30xx;
7263 	struct cvmx_gmxx_rx_bp_offx_s         cn31xx;
7264 	struct cvmx_gmxx_rx_bp_offx_s         cn38xx;
7265 	struct cvmx_gmxx_rx_bp_offx_s         cn38xxp2;
7266 	struct cvmx_gmxx_rx_bp_offx_s         cn50xx;
7267 	struct cvmx_gmxx_rx_bp_offx_s         cn52xx;
7268 	struct cvmx_gmxx_rx_bp_offx_s         cn52xxp1;
7269 	struct cvmx_gmxx_rx_bp_offx_s         cn56xx;
7270 	struct cvmx_gmxx_rx_bp_offx_s         cn56xxp1;
7271 	struct cvmx_gmxx_rx_bp_offx_s         cn58xx;
7272 	struct cvmx_gmxx_rx_bp_offx_s         cn58xxp1;
7273 	struct cvmx_gmxx_rx_bp_offx_s         cn61xx;
7274 	struct cvmx_gmxx_rx_bp_offx_s         cn63xx;
7275 	struct cvmx_gmxx_rx_bp_offx_s         cn63xxp1;
7276 	struct cvmx_gmxx_rx_bp_offx_s         cn66xx;
7277 	struct cvmx_gmxx_rx_bp_offx_s         cn68xx;
7278 	struct cvmx_gmxx_rx_bp_offx_s         cn68xxp1;
7279 	struct cvmx_gmxx_rx_bp_offx_s         cnf71xx;
7280 };
7281 typedef union cvmx_gmxx_rx_bp_offx cvmx_gmxx_rx_bp_offx_t;
7282 
7283 /**
7284  * cvmx_gmx#_rx_bp_on#
7285  *
7286  * GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
7287  *
7288  *
7289  * Notes:
7290  * In XAUI mode, prt0 is used for checking.
7291  *
7292  */
7293 union cvmx_gmxx_rx_bp_onx {
7294 	uint64_t u64;
7295 	struct cvmx_gmxx_rx_bp_onx_s {
7296 #ifdef __BIG_ENDIAN_BITFIELD
7297 	uint64_t reserved_11_63               : 53;
7298 	uint64_t mark                         : 11; /**< Hiwater mark (8B ticks) for backpressure.
7299                                                          Each register is for an individual port.  In XAUI
7300                                                          mode, prt0 is used for the unified RX FIFO
7301                                                          GMX_RX_BP_ON must satisfy
7302                                                          BP_OFF <= BP_ON < (FIFO_SIZE - BP_DROP)
7303                                                          A value of zero will immediately assert back
7304                                                          pressure. */
7305 #else
7306 	uint64_t mark                         : 11;
7307 	uint64_t reserved_11_63               : 53;
7308 #endif
7309 	} s;
7310 	struct cvmx_gmxx_rx_bp_onx_cn30xx {
7311 #ifdef __BIG_ENDIAN_BITFIELD
7312 	uint64_t reserved_9_63                : 55;
7313 	uint64_t mark                         : 9;  /**< Hiwater mark (8B ticks) for backpressure.
7314                                                          In RGMII mode, the backpressure is given per
7315                                                          port.  In Spi4 mode, the backpressure is for the
7316                                                          entire interface.  GMX_RX_BP_ON must satisfy
7317                                                          BP_OFF <= BP_ON < (FIFO_SIZE - BP_DROP)
7318                                                          The reset value is half the FIFO.
7319                                                          Reset value RGMII mode = 0x40  (512bytes)
7320                                                          Reset value Spi4 mode  = 0x100 (2048bytes)
7321                                                          A value of zero will immediately assert back
7322                                                          pressure. */
7323 #else
7324 	uint64_t mark                         : 9;
7325 	uint64_t reserved_9_63                : 55;
7326 #endif
7327 	} cn30xx;
7328 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn31xx;
7329 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn38xx;
7330 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn38xxp2;
7331 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn50xx;
7332 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn52xx;
7333 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn52xxp1;
7334 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn56xx;
7335 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn56xxp1;
7336 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn58xx;
7337 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn58xxp1;
7338 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn61xx;
7339 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn63xx;
7340 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn63xxp1;
7341 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cn66xx;
7342 	struct cvmx_gmxx_rx_bp_onx_s          cn68xx;
7343 	struct cvmx_gmxx_rx_bp_onx_s          cn68xxp1;
7344 	struct cvmx_gmxx_rx_bp_onx_cn30xx     cnf71xx;
7345 };
7346 typedef union cvmx_gmxx_rx_bp_onx cvmx_gmxx_rx_bp_onx_t;
7347 
7348 /**
7349  * cvmx_gmx#_rx_hg2_status
7350  *
7351  * ** HG2 message CSRs
7352  *
7353  */
7354 union cvmx_gmxx_rx_hg2_status {
7355 	uint64_t u64;
7356 	struct cvmx_gmxx_rx_hg2_status_s {
7357 #ifdef __BIG_ENDIAN_BITFIELD
7358 	uint64_t reserved_48_63               : 16;
7359 	uint64_t phtim2go                     : 16; /**< Physical time to go for removal of physical link
7360                                                          pause. Initial value from received HiGig2 msg pkt
7361                                                          Non-zero only when physical back pressure active */
7362 	uint64_t xof                          : 16; /**< 16 bit xof back pressure vector from HiGig2 msg pkt
7363                                                          or from CBFC packets.
7364                                                          Non-zero only when logical back pressure is active
7365                                                          All bits will be 0 when LGTIM2GO=0 */
7366 	uint64_t lgtim2go                     : 16; /**< Logical packet flow back pressure time remaining
7367                                                          Initial value set from xof time field of HiGig2
7368                                                          message packet received or a function of the
7369                                                          enabled and current timers for CBFC packets.
7370                                                          Non-zero only when logical back pressure is active */
7371 #else
7372 	uint64_t lgtim2go                     : 16;
7373 	uint64_t xof                          : 16;
7374 	uint64_t phtim2go                     : 16;
7375 	uint64_t reserved_48_63               : 16;
7376 #endif
7377 	} s;
7378 	struct cvmx_gmxx_rx_hg2_status_s      cn52xx;
7379 	struct cvmx_gmxx_rx_hg2_status_s      cn52xxp1;
7380 	struct cvmx_gmxx_rx_hg2_status_s      cn56xx;
7381 	struct cvmx_gmxx_rx_hg2_status_s      cn61xx;
7382 	struct cvmx_gmxx_rx_hg2_status_s      cn63xx;
7383 	struct cvmx_gmxx_rx_hg2_status_s      cn63xxp1;
7384 	struct cvmx_gmxx_rx_hg2_status_s      cn66xx;
7385 	struct cvmx_gmxx_rx_hg2_status_s      cn68xx;
7386 	struct cvmx_gmxx_rx_hg2_status_s      cn68xxp1;
7387 	struct cvmx_gmxx_rx_hg2_status_s      cnf71xx;
7388 };
7389 typedef union cvmx_gmxx_rx_hg2_status cvmx_gmxx_rx_hg2_status_t;
7390 
7391 /**
7392  * cvmx_gmx#_rx_pass_en
7393  *
7394  * GMX_RX_PASS_EN = Packet pass through mode enable
7395  *
7396  * When both Octane ports are running in Spi4 mode, packets can be directly
7397  * passed from one SPX interface to the other without being processed by the
7398  * core or PP's.  The register has one bit for each port to enable the pass
7399  * through feature.
7400  *
7401  * Notes:
7402  * (1) Can only be used in dual Spi4 configs
7403  *
7404  * (2) The mapped pass through output port cannot be the destination port for
7405  *     any Octane core traffic.
7406  */
7407 union cvmx_gmxx_rx_pass_en {
7408 	uint64_t u64;
7409 	struct cvmx_gmxx_rx_pass_en_s {
7410 #ifdef __BIG_ENDIAN_BITFIELD
7411 	uint64_t reserved_16_63               : 48;
7412 	uint64_t en                           : 16; /**< Which ports to configure in pass through mode */
7413 #else
7414 	uint64_t en                           : 16;
7415 	uint64_t reserved_16_63               : 48;
7416 #endif
7417 	} s;
7418 	struct cvmx_gmxx_rx_pass_en_s         cn38xx;
7419 	struct cvmx_gmxx_rx_pass_en_s         cn38xxp2;
7420 	struct cvmx_gmxx_rx_pass_en_s         cn58xx;
7421 	struct cvmx_gmxx_rx_pass_en_s         cn58xxp1;
7422 };
7423 typedef union cvmx_gmxx_rx_pass_en cvmx_gmxx_rx_pass_en_t;
7424 
7425 /**
7426  * cvmx_gmx#_rx_pass_map#
7427  *
7428  * GMX_RX_PASS_MAP = Packet pass through port map
7429  *
7430  */
7431 union cvmx_gmxx_rx_pass_mapx {
7432 	uint64_t u64;
7433 	struct cvmx_gmxx_rx_pass_mapx_s {
7434 #ifdef __BIG_ENDIAN_BITFIELD
7435 	uint64_t reserved_4_63                : 60;
7436 	uint64_t dprt                         : 4;  /**< Destination port to map Spi pass through traffic */
7437 #else
7438 	uint64_t dprt                         : 4;
7439 	uint64_t reserved_4_63                : 60;
7440 #endif
7441 	} s;
7442 	struct cvmx_gmxx_rx_pass_mapx_s       cn38xx;
7443 	struct cvmx_gmxx_rx_pass_mapx_s       cn38xxp2;
7444 	struct cvmx_gmxx_rx_pass_mapx_s       cn58xx;
7445 	struct cvmx_gmxx_rx_pass_mapx_s       cn58xxp1;
7446 };
7447 typedef union cvmx_gmxx_rx_pass_mapx cvmx_gmxx_rx_pass_mapx_t;
7448 
7449 /**
7450  * cvmx_gmx#_rx_prt_info
7451  *
7452  * GMX_RX_PRT_INFO = Report the RX status for port
7453  *
7454  *
7455  * Notes:
7456  * In XAUI mode, only the lsb (corresponding to port0) of DROP and COMMIT are used.
7457  *
7458  */
7459 union cvmx_gmxx_rx_prt_info {
7460 	uint64_t u64;
7461 	struct cvmx_gmxx_rx_prt_info_s {
7462 #ifdef __BIG_ENDIAN_BITFIELD
7463 	uint64_t reserved_32_63               : 32;
7464 	uint64_t drop                         : 16; /**< Per port indication that data was dropped */
7465 	uint64_t commit                       : 16; /**< Per port indication that SOP was accepted */
7466 #else
7467 	uint64_t commit                       : 16;
7468 	uint64_t drop                         : 16;
7469 	uint64_t reserved_32_63               : 32;
7470 #endif
7471 	} s;
7472 	struct cvmx_gmxx_rx_prt_info_cn30xx {
7473 #ifdef __BIG_ENDIAN_BITFIELD
7474 	uint64_t reserved_19_63               : 45;
7475 	uint64_t drop                         : 3;  /**< Per port indication that data was dropped */
7476 	uint64_t reserved_3_15                : 13;
7477 	uint64_t commit                       : 3;  /**< Per port indication that SOP was accepted */
7478 #else
7479 	uint64_t commit                       : 3;
7480 	uint64_t reserved_3_15                : 13;
7481 	uint64_t drop                         : 3;
7482 	uint64_t reserved_19_63               : 45;
7483 #endif
7484 	} cn30xx;
7485 	struct cvmx_gmxx_rx_prt_info_cn30xx   cn31xx;
7486 	struct cvmx_gmxx_rx_prt_info_s        cn38xx;
7487 	struct cvmx_gmxx_rx_prt_info_cn30xx   cn50xx;
7488 	struct cvmx_gmxx_rx_prt_info_cn52xx {
7489 #ifdef __BIG_ENDIAN_BITFIELD
7490 	uint64_t reserved_20_63               : 44;
7491 	uint64_t drop                         : 4;  /**< Per port indication that data was dropped */
7492 	uint64_t reserved_4_15                : 12;
7493 	uint64_t commit                       : 4;  /**< Per port indication that SOP was accepted */
7494 #else
7495 	uint64_t commit                       : 4;
7496 	uint64_t reserved_4_15                : 12;
7497 	uint64_t drop                         : 4;
7498 	uint64_t reserved_20_63               : 44;
7499 #endif
7500 	} cn52xx;
7501 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn52xxp1;
7502 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn56xx;
7503 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn56xxp1;
7504 	struct cvmx_gmxx_rx_prt_info_s        cn58xx;
7505 	struct cvmx_gmxx_rx_prt_info_s        cn58xxp1;
7506 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn61xx;
7507 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn63xx;
7508 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn63xxp1;
7509 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn66xx;
7510 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn68xx;
7511 	struct cvmx_gmxx_rx_prt_info_cn52xx   cn68xxp1;
7512 	struct cvmx_gmxx_rx_prt_info_cnf71xx {
7513 #ifdef __BIG_ENDIAN_BITFIELD
7514 	uint64_t reserved_18_63               : 46;
7515 	uint64_t drop                         : 2;  /**< Per port indication that data was dropped */
7516 	uint64_t reserved_2_15                : 14;
7517 	uint64_t commit                       : 2;  /**< Per port indication that SOP was accepted */
7518 #else
7519 	uint64_t commit                       : 2;
7520 	uint64_t reserved_2_15                : 14;
7521 	uint64_t drop                         : 2;
7522 	uint64_t reserved_18_63               : 46;
7523 #endif
7524 	} cnf71xx;
7525 };
7526 typedef union cvmx_gmxx_rx_prt_info cvmx_gmxx_rx_prt_info_t;
7527 
7528 /**
7529  * cvmx_gmx#_rx_prts
7530  *
7531  * GMX_RX_PRTS = Number of FIFOs to carve the RX buffer into
7532  *
7533  *
7534  * Notes:
7535  * GMX_RX_PRTS[PRTS] must be set to '1' in XAUI mode.
7536  *
7537  */
7538 union cvmx_gmxx_rx_prts {
7539 	uint64_t u64;
7540 	struct cvmx_gmxx_rx_prts_s {
7541 #ifdef __BIG_ENDIAN_BITFIELD
7542 	uint64_t reserved_3_63                : 61;
7543 	uint64_t prts                         : 3;  /**< In SGMII/1000Base-X mode, the RX buffer can be
7544                                                          carved into several logical buffers depending on
7545                                                          the number or implemented ports.
7546                                                          0 or 1 port  = 512ticks / 4096bytes
7547                                                          2 ports      = 256ticks / 2048bytes
7548                                                          3 or 4 ports = 128ticks / 1024bytes */
7549 #else
7550 	uint64_t prts                         : 3;
7551 	uint64_t reserved_3_63                : 61;
7552 #endif
7553 	} s;
7554 	struct cvmx_gmxx_rx_prts_s            cn30xx;
7555 	struct cvmx_gmxx_rx_prts_s            cn31xx;
7556 	struct cvmx_gmxx_rx_prts_s            cn38xx;
7557 	struct cvmx_gmxx_rx_prts_s            cn38xxp2;
7558 	struct cvmx_gmxx_rx_prts_s            cn50xx;
7559 	struct cvmx_gmxx_rx_prts_s            cn52xx;
7560 	struct cvmx_gmxx_rx_prts_s            cn52xxp1;
7561 	struct cvmx_gmxx_rx_prts_s            cn56xx;
7562 	struct cvmx_gmxx_rx_prts_s            cn56xxp1;
7563 	struct cvmx_gmxx_rx_prts_s            cn58xx;
7564 	struct cvmx_gmxx_rx_prts_s            cn58xxp1;
7565 	struct cvmx_gmxx_rx_prts_s            cn61xx;
7566 	struct cvmx_gmxx_rx_prts_s            cn63xx;
7567 	struct cvmx_gmxx_rx_prts_s            cn63xxp1;
7568 	struct cvmx_gmxx_rx_prts_s            cn66xx;
7569 	struct cvmx_gmxx_rx_prts_s            cn68xx;
7570 	struct cvmx_gmxx_rx_prts_s            cn68xxp1;
7571 	struct cvmx_gmxx_rx_prts_s            cnf71xx;
7572 };
7573 typedef union cvmx_gmxx_rx_prts cvmx_gmxx_rx_prts_t;
7574 
7575 /**
7576  * cvmx_gmx#_rx_tx_status
7577  *
7578  * GMX_RX_TX_STATUS = GMX RX/TX Status
7579  *
7580  */
7581 union cvmx_gmxx_rx_tx_status {
7582 	uint64_t u64;
7583 	struct cvmx_gmxx_rx_tx_status_s {
7584 #ifdef __BIG_ENDIAN_BITFIELD
7585 	uint64_t reserved_7_63                : 57;
7586 	uint64_t tx                           : 3;  /**< Transmit data since last read */
7587 	uint64_t reserved_3_3                 : 1;
7588 	uint64_t rx                           : 3;  /**< Receive data since last read */
7589 #else
7590 	uint64_t rx                           : 3;
7591 	uint64_t reserved_3_3                 : 1;
7592 	uint64_t tx                           : 3;
7593 	uint64_t reserved_7_63                : 57;
7594 #endif
7595 	} s;
7596 	struct cvmx_gmxx_rx_tx_status_s       cn30xx;
7597 	struct cvmx_gmxx_rx_tx_status_s       cn31xx;
7598 	struct cvmx_gmxx_rx_tx_status_s       cn50xx;
7599 };
7600 typedef union cvmx_gmxx_rx_tx_status cvmx_gmxx_rx_tx_status_t;
7601 
7602 /**
7603  * cvmx_gmx#_rx_xaui_bad_col
7604  */
7605 union cvmx_gmxx_rx_xaui_bad_col {
7606 	uint64_t u64;
7607 	struct cvmx_gmxx_rx_xaui_bad_col_s {
7608 #ifdef __BIG_ENDIAN_BITFIELD
7609 	uint64_t reserved_40_63               : 24;
7610 	uint64_t val                          : 1;  /**< Set when GMX_RX_INT_REG[PCTERR] is set.
7611                                                          (XAUI mode only) */
7612 	uint64_t state                        : 3;  /**< When GMX_RX_INT_REG[PCTERR] is set, STATE will
7613                                                          conatin the receive state at the time of the
7614                                                          error.
7615                                                          (XAUI mode only) */
7616 	uint64_t lane_rxc                     : 4;  /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXC will
7617                                                          conatin the XAUI column at the time of the error.
7618                                                          (XAUI mode only) */
7619 	uint64_t lane_rxd                     : 32; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXD will
7620                                                          conatin the XAUI column at the time of the error.
7621                                                          (XAUI mode only) */
7622 #else
7623 	uint64_t lane_rxd                     : 32;
7624 	uint64_t lane_rxc                     : 4;
7625 	uint64_t state                        : 3;
7626 	uint64_t val                          : 1;
7627 	uint64_t reserved_40_63               : 24;
7628 #endif
7629 	} s;
7630 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn52xx;
7631 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn52xxp1;
7632 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn56xx;
7633 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn56xxp1;
7634 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn61xx;
7635 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn63xx;
7636 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn63xxp1;
7637 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn66xx;
7638 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn68xx;
7639 	struct cvmx_gmxx_rx_xaui_bad_col_s    cn68xxp1;
7640 	struct cvmx_gmxx_rx_xaui_bad_col_s    cnf71xx;
7641 };
7642 typedef union cvmx_gmxx_rx_xaui_bad_col cvmx_gmxx_rx_xaui_bad_col_t;
7643 
7644 /**
7645  * cvmx_gmx#_rx_xaui_ctl
7646  */
7647 union cvmx_gmxx_rx_xaui_ctl {
7648 	uint64_t u64;
7649 	struct cvmx_gmxx_rx_xaui_ctl_s {
7650 #ifdef __BIG_ENDIAN_BITFIELD
7651 	uint64_t reserved_2_63                : 62;
7652 	uint64_t status                       : 2;  /**< Link Status
7653                                                          0=Link OK
7654                                                          1=Local Fault
7655                                                          2=Remote Fault
7656                                                          3=Reserved
7657                                                          (XAUI mode only) */
7658 #else
7659 	uint64_t status                       : 2;
7660 	uint64_t reserved_2_63                : 62;
7661 #endif
7662 	} s;
7663 	struct cvmx_gmxx_rx_xaui_ctl_s        cn52xx;
7664 	struct cvmx_gmxx_rx_xaui_ctl_s        cn52xxp1;
7665 	struct cvmx_gmxx_rx_xaui_ctl_s        cn56xx;
7666 	struct cvmx_gmxx_rx_xaui_ctl_s        cn56xxp1;
7667 	struct cvmx_gmxx_rx_xaui_ctl_s        cn61xx;
7668 	struct cvmx_gmxx_rx_xaui_ctl_s        cn63xx;
7669 	struct cvmx_gmxx_rx_xaui_ctl_s        cn63xxp1;
7670 	struct cvmx_gmxx_rx_xaui_ctl_s        cn66xx;
7671 	struct cvmx_gmxx_rx_xaui_ctl_s        cn68xx;
7672 	struct cvmx_gmxx_rx_xaui_ctl_s        cn68xxp1;
7673 	struct cvmx_gmxx_rx_xaui_ctl_s        cnf71xx;
7674 };
7675 typedef union cvmx_gmxx_rx_xaui_ctl cvmx_gmxx_rx_xaui_ctl_t;
7676 
7677 /**
7678  * cvmx_gmx#_rxaui_ctl
7679  */
7680 union cvmx_gmxx_rxaui_ctl {
7681 	uint64_t u64;
7682 	struct cvmx_gmxx_rxaui_ctl_s {
7683 #ifdef __BIG_ENDIAN_BITFIELD
7684 	uint64_t reserved_1_63                : 63;
7685 	uint64_t disparity                    : 1;  /**< Selects which disparity calculation to use when
7686                                                          combining or splitting the RXAUI lanes.
7687                                                          0=Interleave lanes before PCS layer
7688                                                            As described in the Dune Networks/Broadcom
7689                                                            RXAUI v2.1 specification.
7690                                                            (obeys 6.25GHz SERDES disparity)
7691                                                          1=Interleave lanes after PCS layer
7692                                                            As described in the Marvell RXAUI Interface
7693                                                            specification.
7694                                                            (does not obey 6.25GHz SERDES disparity)
7695                                                          (RXAUI mode only) */
7696 #else
7697 	uint64_t disparity                    : 1;
7698 	uint64_t reserved_1_63                : 63;
7699 #endif
7700 	} s;
7701 	struct cvmx_gmxx_rxaui_ctl_s          cn68xx;
7702 	struct cvmx_gmxx_rxaui_ctl_s          cn68xxp1;
7703 };
7704 typedef union cvmx_gmxx_rxaui_ctl cvmx_gmxx_rxaui_ctl_t;
7705 
7706 /**
7707  * cvmx_gmx#_smac#
7708  *
7709  * GMX_SMAC = Packet SMAC
7710  *
7711  */
7712 union cvmx_gmxx_smacx {
7713 	uint64_t u64;
7714 	struct cvmx_gmxx_smacx_s {
7715 #ifdef __BIG_ENDIAN_BITFIELD
7716 	uint64_t reserved_48_63               : 16;
7717 	uint64_t smac                         : 48; /**< The SMAC field is used for generating and
7718                                                          accepting Control Pause packets */
7719 #else
7720 	uint64_t smac                         : 48;
7721 	uint64_t reserved_48_63               : 16;
7722 #endif
7723 	} s;
7724 	struct cvmx_gmxx_smacx_s              cn30xx;
7725 	struct cvmx_gmxx_smacx_s              cn31xx;
7726 	struct cvmx_gmxx_smacx_s              cn38xx;
7727 	struct cvmx_gmxx_smacx_s              cn38xxp2;
7728 	struct cvmx_gmxx_smacx_s              cn50xx;
7729 	struct cvmx_gmxx_smacx_s              cn52xx;
7730 	struct cvmx_gmxx_smacx_s              cn52xxp1;
7731 	struct cvmx_gmxx_smacx_s              cn56xx;
7732 	struct cvmx_gmxx_smacx_s              cn56xxp1;
7733 	struct cvmx_gmxx_smacx_s              cn58xx;
7734 	struct cvmx_gmxx_smacx_s              cn58xxp1;
7735 	struct cvmx_gmxx_smacx_s              cn61xx;
7736 	struct cvmx_gmxx_smacx_s              cn63xx;
7737 	struct cvmx_gmxx_smacx_s              cn63xxp1;
7738 	struct cvmx_gmxx_smacx_s              cn66xx;
7739 	struct cvmx_gmxx_smacx_s              cn68xx;
7740 	struct cvmx_gmxx_smacx_s              cn68xxp1;
7741 	struct cvmx_gmxx_smacx_s              cnf71xx;
7742 };
7743 typedef union cvmx_gmxx_smacx cvmx_gmxx_smacx_t;
7744 
7745 /**
7746  * cvmx_gmx#_soft_bist
7747  *
7748  * GMX_SOFT_BIST = Software BIST Control
7749  *
7750  */
7751 union cvmx_gmxx_soft_bist {
7752 	uint64_t u64;
7753 	struct cvmx_gmxx_soft_bist_s {
7754 #ifdef __BIG_ENDIAN_BITFIELD
7755 	uint64_t reserved_2_63                : 62;
7756 	uint64_t start_bist                   : 1;  /**< Run BIST on all memories in the XAUI/RXAUI
7757                                                          CLK domain */
7758 	uint64_t clear_bist                   : 1;  /**< Choose between full BIST and CLEAR bist
7759                                                          0=Run full BIST
7760                                                          1=Only run clear BIST */
7761 #else
7762 	uint64_t clear_bist                   : 1;
7763 	uint64_t start_bist                   : 1;
7764 	uint64_t reserved_2_63                : 62;
7765 #endif
7766 	} s;
7767 	struct cvmx_gmxx_soft_bist_s          cn63xx;
7768 	struct cvmx_gmxx_soft_bist_s          cn63xxp1;
7769 	struct cvmx_gmxx_soft_bist_s          cn66xx;
7770 	struct cvmx_gmxx_soft_bist_s          cn68xx;
7771 	struct cvmx_gmxx_soft_bist_s          cn68xxp1;
7772 };
7773 typedef union cvmx_gmxx_soft_bist cvmx_gmxx_soft_bist_t;
7774 
7775 /**
7776  * cvmx_gmx#_stat_bp
7777  *
7778  * GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
7779  *
7780  *
7781  * Notes:
7782  * It has no relationship with the TX FIFO per se.  The TX engine sends packets
7783  * from PKO and upon completion, sends a command to the TX stats block for an
7784  * update based on the packet size.  The stats operation can take a few cycles -
7785  * normally not enough to be visible considering the 64B min packet size that is
7786  * ethernet convention.
7787  *
7788  * In the rare case in which SW attempted to schedule really, really, small packets
7789  * or the sclk (6xxx) is running ass-slow, then the stats updates may not happen in
7790  * real time and can back up the TX engine.
7791  *
7792  * This counter is the number of cycles in which the TX engine was stalled.  In
7793  * normal operation, it should always be zeros.
7794  */
7795 union cvmx_gmxx_stat_bp {
7796 	uint64_t u64;
7797 	struct cvmx_gmxx_stat_bp_s {
7798 #ifdef __BIG_ENDIAN_BITFIELD
7799 	uint64_t reserved_17_63               : 47;
7800 	uint64_t bp                           : 1;  /**< Current TX stats BP state
7801                                                          When the TX stats machine cannot update the stats
7802                                                          registers quickly enough, the machine has the
7803                                                          ability to BP TX datapath.  This is a rare event
7804                                                          and will not occur in normal operation.
7805                                                          0 = no backpressure is applied
7806                                                          1 = backpressure is applied to TX datapath to
7807                                                              allow stat update operations to complete */
7808 	uint64_t cnt                          : 16; /**< Number of cycles that BP has been asserted
7809                                                          Saturating counter */
7810 #else
7811 	uint64_t cnt                          : 16;
7812 	uint64_t bp                           : 1;
7813 	uint64_t reserved_17_63               : 47;
7814 #endif
7815 	} s;
7816 	struct cvmx_gmxx_stat_bp_s            cn30xx;
7817 	struct cvmx_gmxx_stat_bp_s            cn31xx;
7818 	struct cvmx_gmxx_stat_bp_s            cn38xx;
7819 	struct cvmx_gmxx_stat_bp_s            cn38xxp2;
7820 	struct cvmx_gmxx_stat_bp_s            cn50xx;
7821 	struct cvmx_gmxx_stat_bp_s            cn52xx;
7822 	struct cvmx_gmxx_stat_bp_s            cn52xxp1;
7823 	struct cvmx_gmxx_stat_bp_s            cn56xx;
7824 	struct cvmx_gmxx_stat_bp_s            cn56xxp1;
7825 	struct cvmx_gmxx_stat_bp_s            cn58xx;
7826 	struct cvmx_gmxx_stat_bp_s            cn58xxp1;
7827 	struct cvmx_gmxx_stat_bp_s            cn61xx;
7828 	struct cvmx_gmxx_stat_bp_s            cn63xx;
7829 	struct cvmx_gmxx_stat_bp_s            cn63xxp1;
7830 	struct cvmx_gmxx_stat_bp_s            cn66xx;
7831 	struct cvmx_gmxx_stat_bp_s            cn68xx;
7832 	struct cvmx_gmxx_stat_bp_s            cn68xxp1;
7833 	struct cvmx_gmxx_stat_bp_s            cnf71xx;
7834 };
7835 typedef union cvmx_gmxx_stat_bp cvmx_gmxx_stat_bp_t;
7836 
7837 /**
7838  * cvmx_gmx#_tb_reg
7839  *
7840  * DON'T PUT IN HRM*
7841  *
7842  */
7843 union cvmx_gmxx_tb_reg {
7844 	uint64_t u64;
7845 	struct cvmx_gmxx_tb_reg_s {
7846 #ifdef __BIG_ENDIAN_BITFIELD
7847 	uint64_t reserved_1_63                : 63;
7848 	uint64_t wr_magic                     : 1;  /**< Enter stats model magic mode */
7849 #else
7850 	uint64_t wr_magic                     : 1;
7851 	uint64_t reserved_1_63                : 63;
7852 #endif
7853 	} s;
7854 	struct cvmx_gmxx_tb_reg_s             cn61xx;
7855 	struct cvmx_gmxx_tb_reg_s             cn66xx;
7856 	struct cvmx_gmxx_tb_reg_s             cn68xx;
7857 	struct cvmx_gmxx_tb_reg_s             cnf71xx;
7858 };
7859 typedef union cvmx_gmxx_tb_reg cvmx_gmxx_tb_reg_t;
7860 
7861 /**
7862  * cvmx_gmx#_tx#_append
7863  *
7864  * GMX_TX_APPEND = Packet TX Append Control
7865  *
7866  */
7867 union cvmx_gmxx_txx_append {
7868 	uint64_t u64;
7869 	struct cvmx_gmxx_txx_append_s {
7870 #ifdef __BIG_ENDIAN_BITFIELD
7871 	uint64_t reserved_4_63                : 60;
7872 	uint64_t force_fcs                    : 1;  /**< Append the Ethernet FCS on each pause packet
7873                                                          when FCS is clear.  Pause packets are normally
7874                                                          padded to 60 bytes.  If GMX_TX_MIN_PKT[MIN_SIZE]
7875                                                          exceeds 59, then FORCE_FCS will not be used. */
7876 	uint64_t fcs                          : 1;  /**< Append the Ethernet FCS on each packet */
7877 	uint64_t pad                          : 1;  /**< Append PAD bytes such that min sized */
7878 	uint64_t preamble                     : 1;  /**< Prepend the Ethernet preamble on each transfer
7879                                                          When GMX_TX_XAUI_CTL[HG_EN] is set, PREAMBLE
7880                                                          must be zero. */
7881 #else
7882 	uint64_t preamble                     : 1;
7883 	uint64_t pad                          : 1;
7884 	uint64_t fcs                          : 1;
7885 	uint64_t force_fcs                    : 1;
7886 	uint64_t reserved_4_63                : 60;
7887 #endif
7888 	} s;
7889 	struct cvmx_gmxx_txx_append_s         cn30xx;
7890 	struct cvmx_gmxx_txx_append_s         cn31xx;
7891 	struct cvmx_gmxx_txx_append_s         cn38xx;
7892 	struct cvmx_gmxx_txx_append_s         cn38xxp2;
7893 	struct cvmx_gmxx_txx_append_s         cn50xx;
7894 	struct cvmx_gmxx_txx_append_s         cn52xx;
7895 	struct cvmx_gmxx_txx_append_s         cn52xxp1;
7896 	struct cvmx_gmxx_txx_append_s         cn56xx;
7897 	struct cvmx_gmxx_txx_append_s         cn56xxp1;
7898 	struct cvmx_gmxx_txx_append_s         cn58xx;
7899 	struct cvmx_gmxx_txx_append_s         cn58xxp1;
7900 	struct cvmx_gmxx_txx_append_s         cn61xx;
7901 	struct cvmx_gmxx_txx_append_s         cn63xx;
7902 	struct cvmx_gmxx_txx_append_s         cn63xxp1;
7903 	struct cvmx_gmxx_txx_append_s         cn66xx;
7904 	struct cvmx_gmxx_txx_append_s         cn68xx;
7905 	struct cvmx_gmxx_txx_append_s         cn68xxp1;
7906 	struct cvmx_gmxx_txx_append_s         cnf71xx;
7907 };
7908 typedef union cvmx_gmxx_txx_append cvmx_gmxx_txx_append_t;
7909 
7910 /**
7911  * cvmx_gmx#_tx#_burst
7912  *
7913  * GMX_TX_BURST = Packet TX Burst Counter
7914  *
7915  */
7916 union cvmx_gmxx_txx_burst {
7917 	uint64_t u64;
7918 	struct cvmx_gmxx_txx_burst_s {
7919 #ifdef __BIG_ENDIAN_BITFIELD
7920 	uint64_t reserved_16_63               : 48;
7921 	uint64_t burst                        : 16; /**< Burst (refer to 802.3 to set correctly)
7922                                                          Only valid for 1000Mbs half-duplex operation
7923                                                           halfdup / 1000Mbs: 0x2000
7924                                                           all other modes:   0x0
7925                                                          (SGMII/1000Base-X only) */
7926 #else
7927 	uint64_t burst                        : 16;
7928 	uint64_t reserved_16_63               : 48;
7929 #endif
7930 	} s;
7931 	struct cvmx_gmxx_txx_burst_s          cn30xx;
7932 	struct cvmx_gmxx_txx_burst_s          cn31xx;
7933 	struct cvmx_gmxx_txx_burst_s          cn38xx;
7934 	struct cvmx_gmxx_txx_burst_s          cn38xxp2;
7935 	struct cvmx_gmxx_txx_burst_s          cn50xx;
7936 	struct cvmx_gmxx_txx_burst_s          cn52xx;
7937 	struct cvmx_gmxx_txx_burst_s          cn52xxp1;
7938 	struct cvmx_gmxx_txx_burst_s          cn56xx;
7939 	struct cvmx_gmxx_txx_burst_s          cn56xxp1;
7940 	struct cvmx_gmxx_txx_burst_s          cn58xx;
7941 	struct cvmx_gmxx_txx_burst_s          cn58xxp1;
7942 	struct cvmx_gmxx_txx_burst_s          cn61xx;
7943 	struct cvmx_gmxx_txx_burst_s          cn63xx;
7944 	struct cvmx_gmxx_txx_burst_s          cn63xxp1;
7945 	struct cvmx_gmxx_txx_burst_s          cn66xx;
7946 	struct cvmx_gmxx_txx_burst_s          cn68xx;
7947 	struct cvmx_gmxx_txx_burst_s          cn68xxp1;
7948 	struct cvmx_gmxx_txx_burst_s          cnf71xx;
7949 };
7950 typedef union cvmx_gmxx_txx_burst cvmx_gmxx_txx_burst_t;
7951 
7952 /**
7953  * cvmx_gmx#_tx#_cbfc_xoff
7954  */
7955 union cvmx_gmxx_txx_cbfc_xoff {
7956 	uint64_t u64;
7957 	struct cvmx_gmxx_txx_cbfc_xoff_s {
7958 #ifdef __BIG_ENDIAN_BITFIELD
7959 	uint64_t reserved_16_63               : 48;
7960 	uint64_t xoff                         : 16; /**< Which ports to backpressure
7961                                                          Do not write in HiGig2 mode i.e. when
7962                                                          GMX_TX_XAUI_CTL[HG_EN]=1 and
7963                                                          GMX_RX_UDD_SKP[SKIP]=16. */
7964 #else
7965 	uint64_t xoff                         : 16;
7966 	uint64_t reserved_16_63               : 48;
7967 #endif
7968 	} s;
7969 	struct cvmx_gmxx_txx_cbfc_xoff_s      cn52xx;
7970 	struct cvmx_gmxx_txx_cbfc_xoff_s      cn56xx;
7971 	struct cvmx_gmxx_txx_cbfc_xoff_s      cn61xx;
7972 	struct cvmx_gmxx_txx_cbfc_xoff_s      cn63xx;
7973 	struct cvmx_gmxx_txx_cbfc_xoff_s      cn63xxp1;
7974 	struct cvmx_gmxx_txx_cbfc_xoff_s      cn66xx;
7975 	struct cvmx_gmxx_txx_cbfc_xoff_s      cn68xx;
7976 	struct cvmx_gmxx_txx_cbfc_xoff_s      cn68xxp1;
7977 	struct cvmx_gmxx_txx_cbfc_xoff_s      cnf71xx;
7978 };
7979 typedef union cvmx_gmxx_txx_cbfc_xoff cvmx_gmxx_txx_cbfc_xoff_t;
7980 
7981 /**
7982  * cvmx_gmx#_tx#_cbfc_xon
7983  */
7984 union cvmx_gmxx_txx_cbfc_xon {
7985 	uint64_t u64;
7986 	struct cvmx_gmxx_txx_cbfc_xon_s {
7987 #ifdef __BIG_ENDIAN_BITFIELD
7988 	uint64_t reserved_16_63               : 48;
7989 	uint64_t xon                          : 16; /**< Which ports to stop backpressure
7990                                                          Do not write in HiGig2 mode i.e. when
7991                                                          GMX_TX_XAUI_CTL[HG_EN]=1 and
7992                                                          GMX_RX_UDD_SKP[SKIP]=16. */
7993 #else
7994 	uint64_t xon                          : 16;
7995 	uint64_t reserved_16_63               : 48;
7996 #endif
7997 	} s;
7998 	struct cvmx_gmxx_txx_cbfc_xon_s       cn52xx;
7999 	struct cvmx_gmxx_txx_cbfc_xon_s       cn56xx;
8000 	struct cvmx_gmxx_txx_cbfc_xon_s       cn61xx;
8001 	struct cvmx_gmxx_txx_cbfc_xon_s       cn63xx;
8002 	struct cvmx_gmxx_txx_cbfc_xon_s       cn63xxp1;
8003 	struct cvmx_gmxx_txx_cbfc_xon_s       cn66xx;
8004 	struct cvmx_gmxx_txx_cbfc_xon_s       cn68xx;
8005 	struct cvmx_gmxx_txx_cbfc_xon_s       cn68xxp1;
8006 	struct cvmx_gmxx_txx_cbfc_xon_s       cnf71xx;
8007 };
8008 typedef union cvmx_gmxx_txx_cbfc_xon cvmx_gmxx_txx_cbfc_xon_t;
8009 
8010 /**
8011  * cvmx_gmx#_tx#_clk
8012  *
8013  * Per Port
8014  *
8015  *
8016  * GMX_TX_CLK = RGMII TX Clock Generation Register
8017  *
8018  * Notes:
8019  * Programming Restrictions:
8020  *  (1) In RGMII mode, if GMX_PRT_CFG[SPEED]==0, then CLK_CNT must be > 1.
8021  *  (2) In MII mode, CLK_CNT == 1
8022  *  (3) In RGMII or GMII mode, if CLK_CNT==0, Octeon will not generate a tx clock.
8023  *
8024  * RGMII Example:
8025  *  Given a 125MHz PLL reference clock...
8026  *   CLK_CNT ==  1 ==> 125.0MHz TXC clock period (8ns* 1)
8027  *   CLK_CNT ==  5 ==>  25.0MHz TXC clock period (8ns* 5)
8028  *   CLK_CNT == 50 ==>   2.5MHz TXC clock period (8ns*50)
8029  */
8030 union cvmx_gmxx_txx_clk {
8031 	uint64_t u64;
8032 	struct cvmx_gmxx_txx_clk_s {
8033 #ifdef __BIG_ENDIAN_BITFIELD
8034 	uint64_t reserved_6_63                : 58;
8035 	uint64_t clk_cnt                      : 6;  /**< Controls the RGMII TXC frequency
8036                                                          When PLL is used, TXC(phase) =
8037                                                           spi4_tx_pll_ref_clk(period)/2*CLK_CNT
8038                                                          When PLL bypass is used, TXC(phase) =
8039                                                           spi4_tx_pll_ref_clk(period)*2*CLK_CNT
8040                                                          NOTE: CLK_CNT==0 will not generate any clock
8041                                                          if CLK_CNT > 1 if GMX_PRT_CFG[SPEED]==0 */
8042 #else
8043 	uint64_t clk_cnt                      : 6;
8044 	uint64_t reserved_6_63                : 58;
8045 #endif
8046 	} s;
8047 	struct cvmx_gmxx_txx_clk_s            cn30xx;
8048 	struct cvmx_gmxx_txx_clk_s            cn31xx;
8049 	struct cvmx_gmxx_txx_clk_s            cn38xx;
8050 	struct cvmx_gmxx_txx_clk_s            cn38xxp2;
8051 	struct cvmx_gmxx_txx_clk_s            cn50xx;
8052 	struct cvmx_gmxx_txx_clk_s            cn58xx;
8053 	struct cvmx_gmxx_txx_clk_s            cn58xxp1;
8054 };
8055 typedef union cvmx_gmxx_txx_clk cvmx_gmxx_txx_clk_t;
8056 
8057 /**
8058  * cvmx_gmx#_tx#_ctl
8059  *
8060  * GMX_TX_CTL = TX Control register
8061  *
8062  */
8063 union cvmx_gmxx_txx_ctl {
8064 	uint64_t u64;
8065 	struct cvmx_gmxx_txx_ctl_s {
8066 #ifdef __BIG_ENDIAN_BITFIELD
8067 	uint64_t reserved_2_63                : 62;
8068 	uint64_t xsdef_en                     : 1;  /**< Enables the excessive deferral check for stats
8069                                                          and interrupts
8070                                                          (SGMII/1000Base-X half-duplex only) */
8071 	uint64_t xscol_en                     : 1;  /**< Enables the excessive collision check for stats
8072                                                          and interrupts
8073                                                          (SGMII/1000Base-X half-duplex only) */
8074 #else
8075 	uint64_t xscol_en                     : 1;
8076 	uint64_t xsdef_en                     : 1;
8077 	uint64_t reserved_2_63                : 62;
8078 #endif
8079 	} s;
8080 	struct cvmx_gmxx_txx_ctl_s            cn30xx;
8081 	struct cvmx_gmxx_txx_ctl_s            cn31xx;
8082 	struct cvmx_gmxx_txx_ctl_s            cn38xx;
8083 	struct cvmx_gmxx_txx_ctl_s            cn38xxp2;
8084 	struct cvmx_gmxx_txx_ctl_s            cn50xx;
8085 	struct cvmx_gmxx_txx_ctl_s            cn52xx;
8086 	struct cvmx_gmxx_txx_ctl_s            cn52xxp1;
8087 	struct cvmx_gmxx_txx_ctl_s            cn56xx;
8088 	struct cvmx_gmxx_txx_ctl_s            cn56xxp1;
8089 	struct cvmx_gmxx_txx_ctl_s            cn58xx;
8090 	struct cvmx_gmxx_txx_ctl_s            cn58xxp1;
8091 	struct cvmx_gmxx_txx_ctl_s            cn61xx;
8092 	struct cvmx_gmxx_txx_ctl_s            cn63xx;
8093 	struct cvmx_gmxx_txx_ctl_s            cn63xxp1;
8094 	struct cvmx_gmxx_txx_ctl_s            cn66xx;
8095 	struct cvmx_gmxx_txx_ctl_s            cn68xx;
8096 	struct cvmx_gmxx_txx_ctl_s            cn68xxp1;
8097 	struct cvmx_gmxx_txx_ctl_s            cnf71xx;
8098 };
8099 typedef union cvmx_gmxx_txx_ctl cvmx_gmxx_txx_ctl_t;
8100 
8101 /**
8102  * cvmx_gmx#_tx#_min_pkt
8103  *
8104  * GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
8105  *
8106  */
8107 union cvmx_gmxx_txx_min_pkt {
8108 	uint64_t u64;
8109 	struct cvmx_gmxx_txx_min_pkt_s {
8110 #ifdef __BIG_ENDIAN_BITFIELD
8111 	uint64_t reserved_8_63                : 56;
8112 	uint64_t min_size                     : 8;  /**< Min frame in bytes before the FCS is applied
8113                                                          Padding is only appened when GMX_TX_APPEND[PAD]
8114                                                          for the coresponding port is set.
8115                                                          In SGMII mode, packets will be padded to
8116                                                           MIN_SIZE+1. The reset value will pad to 60 bytes.
8117                                                          In XAUI mode, packets will be padded to
8118                                                           MIN(252,(MIN_SIZE+1 & ~0x3))
8119                                                          When GMX_TX_XAUI_CTL[HG_EN] is set, the HiGig
8120                                                           header (12B or 16B) is normally added to the
8121                                                           packet, so MIN_SIZE should be 59+12=71B for
8122                                                           HiGig or 59+16=75B for HiGig2. */
8123 #else
8124 	uint64_t min_size                     : 8;
8125 	uint64_t reserved_8_63                : 56;
8126 #endif
8127 	} s;
8128 	struct cvmx_gmxx_txx_min_pkt_s        cn30xx;
8129 	struct cvmx_gmxx_txx_min_pkt_s        cn31xx;
8130 	struct cvmx_gmxx_txx_min_pkt_s        cn38xx;
8131 	struct cvmx_gmxx_txx_min_pkt_s        cn38xxp2;
8132 	struct cvmx_gmxx_txx_min_pkt_s        cn50xx;
8133 	struct cvmx_gmxx_txx_min_pkt_s        cn52xx;
8134 	struct cvmx_gmxx_txx_min_pkt_s        cn52xxp1;
8135 	struct cvmx_gmxx_txx_min_pkt_s        cn56xx;
8136 	struct cvmx_gmxx_txx_min_pkt_s        cn56xxp1;
8137 	struct cvmx_gmxx_txx_min_pkt_s        cn58xx;
8138 	struct cvmx_gmxx_txx_min_pkt_s        cn58xxp1;
8139 	struct cvmx_gmxx_txx_min_pkt_s        cn61xx;
8140 	struct cvmx_gmxx_txx_min_pkt_s        cn63xx;
8141 	struct cvmx_gmxx_txx_min_pkt_s        cn63xxp1;
8142 	struct cvmx_gmxx_txx_min_pkt_s        cn66xx;
8143 	struct cvmx_gmxx_txx_min_pkt_s        cn68xx;
8144 	struct cvmx_gmxx_txx_min_pkt_s        cn68xxp1;
8145 	struct cvmx_gmxx_txx_min_pkt_s        cnf71xx;
8146 };
8147 typedef union cvmx_gmxx_txx_min_pkt cvmx_gmxx_txx_min_pkt_t;
8148 
8149 /**
8150  * cvmx_gmx#_tx#_pause_pkt_interval
8151  *
8152  * GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
8153  *
8154  *
8155  * Notes:
8156  * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and
8157  * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
8158  * designer.  It is suggested that TIME be much greater than INTERVAL and
8159  * GMX_TX_PAUSE_ZERO[SEND] be set.  This allows a periodic refresh of the PAUSE
8160  * count and then when the backpressure condition is lifted, a PAUSE packet
8161  * with TIME==0 will be sent indicating that Octane is ready for additional
8162  * data.
8163  *
8164  * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is
8165  * suggested that TIME and INTERVAL are programmed such that they satisify the
8166  * following rule...
8167  *
8168  *    INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
8169  *
8170  * where largest_pkt_size is that largest packet that the system can send
8171  * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
8172  * of the PAUSE packet (normally 64B).
8173  */
8174 union cvmx_gmxx_txx_pause_pkt_interval {
8175 	uint64_t u64;
8176 	struct cvmx_gmxx_txx_pause_pkt_interval_s {
8177 #ifdef __BIG_ENDIAN_BITFIELD
8178 	uint64_t reserved_16_63               : 48;
8179 	uint64_t interval                     : 16; /**< Arbitrate for a 802.3 pause packet, HiGig2 message,
8180                                                          or CBFC pause packet every (INTERVAL*512)
8181                                                          bit-times.
8182                                                          Normally, 0 < INTERVAL < GMX_TX_PAUSE_PKT_TIME
8183                                                          INTERVAL=0, will only send a single PAUSE packet
8184                                                          for each backpressure event */
8185 #else
8186 	uint64_t interval                     : 16;
8187 	uint64_t reserved_16_63               : 48;
8188 #endif
8189 	} s;
8190 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
8191 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
8192 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
8193 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
8194 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
8195 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
8196 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
8197 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
8198 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
8199 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
8200 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
8201 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn61xx;
8202 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xx;
8203 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xxp1;
8204 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn66xx;
8205 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn68xx;
8206 	struct cvmx_gmxx_txx_pause_pkt_interval_s cn68xxp1;
8207 	struct cvmx_gmxx_txx_pause_pkt_interval_s cnf71xx;
8208 };
8209 typedef union cvmx_gmxx_txx_pause_pkt_interval cvmx_gmxx_txx_pause_pkt_interval_t;
8210 
8211 /**
8212  * cvmx_gmx#_tx#_pause_pkt_time
8213  *
8214  * GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
8215  *
8216  *
8217  * Notes:
8218  * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and
8219  * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
8220  * designer.  It is suggested that TIME be much greater than INTERVAL and
8221  * GMX_TX_PAUSE_ZERO[SEND] be set.  This allows a periodic refresh of the PAUSE
8222  * count and then when the backpressure condition is lifted, a PAUSE packet
8223  * with TIME==0 will be sent indicating that Octane is ready for additional
8224  * data.
8225  *
8226  * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is
8227  * suggested that TIME and INTERVAL are programmed such that they satisify the
8228  * following rule...
8229  *
8230  *    INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
8231  *
8232  * where largest_pkt_size is that largest packet that the system can send
8233  * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
8234  * of the PAUSE packet (normally 64B).
8235  */
8236 union cvmx_gmxx_txx_pause_pkt_time {
8237 	uint64_t u64;
8238 	struct cvmx_gmxx_txx_pause_pkt_time_s {
8239 #ifdef __BIG_ENDIAN_BITFIELD
8240 	uint64_t reserved_16_63               : 48;
8241 	uint64_t time                         : 16; /**< The pause_time field placed in outbnd 802.3 pause
8242                                                          packets, HiGig2 messages, or CBFC pause packets.
8243                                                          pause_time is in 512 bit-times
8244                                                          Normally, TIME > GMX_TX_PAUSE_PKT_INTERVAL */
8245 #else
8246 	uint64_t time                         : 16;
8247 	uint64_t reserved_16_63               : 48;
8248 #endif
8249 	} s;
8250 	struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
8251 	struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
8252 	struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
8253 	struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
8254 	struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
8255 	struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
8256 	struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
8257 	struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
8258 	struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
8259 	struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
8260 	struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
8261 	struct cvmx_gmxx_txx_pause_pkt_time_s cn61xx;
8262 	struct cvmx_gmxx_txx_pause_pkt_time_s cn63xx;
8263 	struct cvmx_gmxx_txx_pause_pkt_time_s cn63xxp1;
8264 	struct cvmx_gmxx_txx_pause_pkt_time_s cn66xx;
8265 	struct cvmx_gmxx_txx_pause_pkt_time_s cn68xx;
8266 	struct cvmx_gmxx_txx_pause_pkt_time_s cn68xxp1;
8267 	struct cvmx_gmxx_txx_pause_pkt_time_s cnf71xx;
8268 };
8269 typedef union cvmx_gmxx_txx_pause_pkt_time cvmx_gmxx_txx_pause_pkt_time_t;
8270 
8271 /**
8272  * cvmx_gmx#_tx#_pause_togo
8273  *
8274  * GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
8275  *
8276  */
8277 union cvmx_gmxx_txx_pause_togo {
8278 	uint64_t u64;
8279 	struct cvmx_gmxx_txx_pause_togo_s {
8280 #ifdef __BIG_ENDIAN_BITFIELD
8281 	uint64_t reserved_32_63               : 32;
8282 	uint64_t msg_time                     : 16; /**< Amount of time remaining to backpressure
8283                                                          From the higig2 physical message pause timer
8284                                                          (only valid on port0) */
8285 	uint64_t time                         : 16; /**< Amount of time remaining to backpressure
8286                                                          From the standard 802.3 pause timer */
8287 #else
8288 	uint64_t time                         : 16;
8289 	uint64_t msg_time                     : 16;
8290 	uint64_t reserved_32_63               : 32;
8291 #endif
8292 	} s;
8293 	struct cvmx_gmxx_txx_pause_togo_cn30xx {
8294 #ifdef __BIG_ENDIAN_BITFIELD
8295 	uint64_t reserved_16_63               : 48;
8296 	uint64_t time                         : 16; /**< Amount of time remaining to backpressure */
8297 #else
8298 	uint64_t time                         : 16;
8299 	uint64_t reserved_16_63               : 48;
8300 #endif
8301 	} cn30xx;
8302 	struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
8303 	struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
8304 	struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
8305 	struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
8306 	struct cvmx_gmxx_txx_pause_togo_s     cn52xx;
8307 	struct cvmx_gmxx_txx_pause_togo_s     cn52xxp1;
8308 	struct cvmx_gmxx_txx_pause_togo_s     cn56xx;
8309 	struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
8310 	struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
8311 	struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
8312 	struct cvmx_gmxx_txx_pause_togo_s     cn61xx;
8313 	struct cvmx_gmxx_txx_pause_togo_s     cn63xx;
8314 	struct cvmx_gmxx_txx_pause_togo_s     cn63xxp1;
8315 	struct cvmx_gmxx_txx_pause_togo_s     cn66xx;
8316 	struct cvmx_gmxx_txx_pause_togo_s     cn68xx;
8317 	struct cvmx_gmxx_txx_pause_togo_s     cn68xxp1;
8318 	struct cvmx_gmxx_txx_pause_togo_s     cnf71xx;
8319 };
8320 typedef union cvmx_gmxx_txx_pause_togo cvmx_gmxx_txx_pause_togo_t;
8321 
8322 /**
8323  * cvmx_gmx#_tx#_pause_zero
8324  *
8325  * GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
8326  *
8327  */
8328 union cvmx_gmxx_txx_pause_zero {
8329 	uint64_t u64;
8330 	struct cvmx_gmxx_txx_pause_zero_s {
8331 #ifdef __BIG_ENDIAN_BITFIELD
8332 	uint64_t reserved_1_63                : 63;
8333 	uint64_t send                         : 1;  /**< When backpressure condition clear, send PAUSE
8334                                                          packet with pause_time of zero to enable the
8335                                                          channel */
8336 #else
8337 	uint64_t send                         : 1;
8338 	uint64_t reserved_1_63                : 63;
8339 #endif
8340 	} s;
8341 	struct cvmx_gmxx_txx_pause_zero_s     cn30xx;
8342 	struct cvmx_gmxx_txx_pause_zero_s     cn31xx;
8343 	struct cvmx_gmxx_txx_pause_zero_s     cn38xx;
8344 	struct cvmx_gmxx_txx_pause_zero_s     cn38xxp2;
8345 	struct cvmx_gmxx_txx_pause_zero_s     cn50xx;
8346 	struct cvmx_gmxx_txx_pause_zero_s     cn52xx;
8347 	struct cvmx_gmxx_txx_pause_zero_s     cn52xxp1;
8348 	struct cvmx_gmxx_txx_pause_zero_s     cn56xx;
8349 	struct cvmx_gmxx_txx_pause_zero_s     cn56xxp1;
8350 	struct cvmx_gmxx_txx_pause_zero_s     cn58xx;
8351 	struct cvmx_gmxx_txx_pause_zero_s     cn58xxp1;
8352 	struct cvmx_gmxx_txx_pause_zero_s     cn61xx;
8353 	struct cvmx_gmxx_txx_pause_zero_s     cn63xx;
8354 	struct cvmx_gmxx_txx_pause_zero_s     cn63xxp1;
8355 	struct cvmx_gmxx_txx_pause_zero_s     cn66xx;
8356 	struct cvmx_gmxx_txx_pause_zero_s     cn68xx;
8357 	struct cvmx_gmxx_txx_pause_zero_s     cn68xxp1;
8358 	struct cvmx_gmxx_txx_pause_zero_s     cnf71xx;
8359 };
8360 typedef union cvmx_gmxx_txx_pause_zero cvmx_gmxx_txx_pause_zero_t;
8361 
8362 /**
8363  * cvmx_gmx#_tx#_pipe
8364  */
8365 union cvmx_gmxx_txx_pipe {
8366 	uint64_t u64;
8367 	struct cvmx_gmxx_txx_pipe_s {
8368 #ifdef __BIG_ENDIAN_BITFIELD
8369 	uint64_t reserved_33_63               : 31;
8370 	uint64_t ign_bp                       : 1;  /**< When set, GMX will not throttle the TX machines
8371                                                          if the PIPE return FIFO fills up.
8372                                                          IGN_BP should be clear in normal operation. */
8373 	uint64_t reserved_21_31               : 11;
8374 	uint64_t nump                         : 5;  /**< Number of pipes this port|channel supports.
8375                                                          In SGMII mode, each port binds to one pipe.
8376                                                          In XAUI/RXAUI mode, the port can bind upto 16
8377                                                          consecutive pipes.
8378                                                          SGMII      mode, NUMP = 0 or 1.
8379                                                          XAUI/RXAUI mode, NUMP = 0 or 1-16.
8380                                                          0 = Disabled */
8381 	uint64_t reserved_7_15                : 9;
8382 	uint64_t base                         : 7;  /**< When NUMP is non-zero, indicates the base pipe
8383                                                          number this port|channel will accept.
8384                                                          This port will accept pko packets from pipes in
8385                                                          the range of:
8386                                                            BASE .. (BASE+(NUMP-1))
8387                                                          BASE and NUMP must be constrained such that
8388                                                            1) BASE+(NUMP-1) < 127
8389                                                            2) Each used PKO pipe must map to exactly
8390                                                               one port|channel
8391                                                            3) The pipe ranges must be consistent with
8392                                                               the PKO configuration. */
8393 #else
8394 	uint64_t base                         : 7;
8395 	uint64_t reserved_7_15                : 9;
8396 	uint64_t nump                         : 5;
8397 	uint64_t reserved_21_31               : 11;
8398 	uint64_t ign_bp                       : 1;
8399 	uint64_t reserved_33_63               : 31;
8400 #endif
8401 	} s;
8402 	struct cvmx_gmxx_txx_pipe_s           cn68xx;
8403 	struct cvmx_gmxx_txx_pipe_s           cn68xxp1;
8404 };
8405 typedef union cvmx_gmxx_txx_pipe cvmx_gmxx_txx_pipe_t;
8406 
8407 /**
8408  * cvmx_gmx#_tx#_sgmii_ctl
8409  */
8410 union cvmx_gmxx_txx_sgmii_ctl {
8411 	uint64_t u64;
8412 	struct cvmx_gmxx_txx_sgmii_ctl_s {
8413 #ifdef __BIG_ENDIAN_BITFIELD
8414 	uint64_t reserved_1_63                : 63;
8415 	uint64_t align                        : 1;  /**< Align the transmission to even cycles
8416 
8417                                                          Recommended value is:
8418                                                             ALIGN = !GMX_TX_APPEND[PREAMBLE]
8419 
8420                                                          (See the Transmit Conversion to Code groups
8421                                                           section in the SGMII Interface chapter of the
8422                                                           HRM for a complete discussion)
8423 
8424                                                          0 = Data can be sent on any cycle
8425                                                              In this mode, the interface will function at
8426                                                              maximum bandwidth. It is possible to for the
8427                                                              TX PCS machine to drop first byte of the TX
8428                                                              frame.  When GMX_TX_APPEND[PREAMBLE] is set,
8429                                                              the first byte will be a preamble byte which
8430                                                              can be dropped to compensate for an extended
8431                                                              IPG.
8432 
8433                                                          1 = Data will only be sent on even cycles.
8434                                                              In this mode, there can be bandwidth
8435                                                              implications when sending odd-byte packets as
8436                                                              the IPG can extend an extra cycle.
8437                                                              There will be no loss of data.
8438 
8439                                                          (SGMII/1000Base-X only) */
8440 #else
8441 	uint64_t align                        : 1;
8442 	uint64_t reserved_1_63                : 63;
8443 #endif
8444 	} s;
8445 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn52xx;
8446 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn52xxp1;
8447 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn56xx;
8448 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn56xxp1;
8449 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn61xx;
8450 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn63xx;
8451 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn63xxp1;
8452 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn66xx;
8453 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn68xx;
8454 	struct cvmx_gmxx_txx_sgmii_ctl_s      cn68xxp1;
8455 	struct cvmx_gmxx_txx_sgmii_ctl_s      cnf71xx;
8456 };
8457 typedef union cvmx_gmxx_txx_sgmii_ctl cvmx_gmxx_txx_sgmii_ctl_t;
8458 
8459 /**
8460  * cvmx_gmx#_tx#_slot
8461  *
8462  * GMX_TX_SLOT = Packet TX Slottime Counter
8463  *
8464  */
8465 union cvmx_gmxx_txx_slot {
8466 	uint64_t u64;
8467 	struct cvmx_gmxx_txx_slot_s {
8468 #ifdef __BIG_ENDIAN_BITFIELD
8469 	uint64_t reserved_10_63               : 54;
8470 	uint64_t slot                         : 10; /**< Slottime (refer to 802.3 to set correctly)
8471                                                          10/100Mbs: 0x40
8472                                                          1000Mbs:   0x200
8473                                                          (SGMII/1000Base-X only) */
8474 #else
8475 	uint64_t slot                         : 10;
8476 	uint64_t reserved_10_63               : 54;
8477 #endif
8478 	} s;
8479 	struct cvmx_gmxx_txx_slot_s           cn30xx;
8480 	struct cvmx_gmxx_txx_slot_s           cn31xx;
8481 	struct cvmx_gmxx_txx_slot_s           cn38xx;
8482 	struct cvmx_gmxx_txx_slot_s           cn38xxp2;
8483 	struct cvmx_gmxx_txx_slot_s           cn50xx;
8484 	struct cvmx_gmxx_txx_slot_s           cn52xx;
8485 	struct cvmx_gmxx_txx_slot_s           cn52xxp1;
8486 	struct cvmx_gmxx_txx_slot_s           cn56xx;
8487 	struct cvmx_gmxx_txx_slot_s           cn56xxp1;
8488 	struct cvmx_gmxx_txx_slot_s           cn58xx;
8489 	struct cvmx_gmxx_txx_slot_s           cn58xxp1;
8490 	struct cvmx_gmxx_txx_slot_s           cn61xx;
8491 	struct cvmx_gmxx_txx_slot_s           cn63xx;
8492 	struct cvmx_gmxx_txx_slot_s           cn63xxp1;
8493 	struct cvmx_gmxx_txx_slot_s           cn66xx;
8494 	struct cvmx_gmxx_txx_slot_s           cn68xx;
8495 	struct cvmx_gmxx_txx_slot_s           cn68xxp1;
8496 	struct cvmx_gmxx_txx_slot_s           cnf71xx;
8497 };
8498 typedef union cvmx_gmxx_txx_slot cvmx_gmxx_txx_slot_t;
8499 
8500 /**
8501  * cvmx_gmx#_tx#_soft_pause
8502  *
8503  * GMX_TX_SOFT_PAUSE = Packet TX Software Pause
8504  *
8505  */
8506 union cvmx_gmxx_txx_soft_pause {
8507 	uint64_t u64;
8508 	struct cvmx_gmxx_txx_soft_pause_s {
8509 #ifdef __BIG_ENDIAN_BITFIELD
8510 	uint64_t reserved_16_63               : 48;
8511 	uint64_t time                         : 16; /**< Back off the TX bus for (TIME*512) bit-times */
8512 #else
8513 	uint64_t time                         : 16;
8514 	uint64_t reserved_16_63               : 48;
8515 #endif
8516 	} s;
8517 	struct cvmx_gmxx_txx_soft_pause_s     cn30xx;
8518 	struct cvmx_gmxx_txx_soft_pause_s     cn31xx;
8519 	struct cvmx_gmxx_txx_soft_pause_s     cn38xx;
8520 	struct cvmx_gmxx_txx_soft_pause_s     cn38xxp2;
8521 	struct cvmx_gmxx_txx_soft_pause_s     cn50xx;
8522 	struct cvmx_gmxx_txx_soft_pause_s     cn52xx;
8523 	struct cvmx_gmxx_txx_soft_pause_s     cn52xxp1;
8524 	struct cvmx_gmxx_txx_soft_pause_s     cn56xx;
8525 	struct cvmx_gmxx_txx_soft_pause_s     cn56xxp1;
8526 	struct cvmx_gmxx_txx_soft_pause_s     cn58xx;
8527 	struct cvmx_gmxx_txx_soft_pause_s     cn58xxp1;
8528 	struct cvmx_gmxx_txx_soft_pause_s     cn61xx;
8529 	struct cvmx_gmxx_txx_soft_pause_s     cn63xx;
8530 	struct cvmx_gmxx_txx_soft_pause_s     cn63xxp1;
8531 	struct cvmx_gmxx_txx_soft_pause_s     cn66xx;
8532 	struct cvmx_gmxx_txx_soft_pause_s     cn68xx;
8533 	struct cvmx_gmxx_txx_soft_pause_s     cn68xxp1;
8534 	struct cvmx_gmxx_txx_soft_pause_s     cnf71xx;
8535 };
8536 typedef union cvmx_gmxx_txx_soft_pause cvmx_gmxx_txx_soft_pause_t;
8537 
8538 /**
8539  * cvmx_gmx#_tx#_stat0
8540  *
8541  * GMX_TX_STAT0 = GMX_TX_STATS_XSDEF / GMX_TX_STATS_XSCOL
8542  *
8543  *
8544  * Notes:
8545  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8546  * - Counters will wrap
8547  */
8548 union cvmx_gmxx_txx_stat0 {
8549 	uint64_t u64;
8550 	struct cvmx_gmxx_txx_stat0_s {
8551 #ifdef __BIG_ENDIAN_BITFIELD
8552 	uint64_t xsdef                        : 32; /**< Number of packets dropped (never successfully
8553                                                          sent) due to excessive deferal
8554                                                          (SGMII/1000Base-X half-duplex only) */
8555 	uint64_t xscol                        : 32; /**< Number of packets dropped (never successfully
8556                                                          sent) due to excessive collision.  Defined by
8557                                                          GMX_TX_COL_ATTEMPT[LIMIT].
8558                                                          (SGMII/1000Base-X half-duplex only) */
8559 #else
8560 	uint64_t xscol                        : 32;
8561 	uint64_t xsdef                        : 32;
8562 #endif
8563 	} s;
8564 	struct cvmx_gmxx_txx_stat0_s          cn30xx;
8565 	struct cvmx_gmxx_txx_stat0_s          cn31xx;
8566 	struct cvmx_gmxx_txx_stat0_s          cn38xx;
8567 	struct cvmx_gmxx_txx_stat0_s          cn38xxp2;
8568 	struct cvmx_gmxx_txx_stat0_s          cn50xx;
8569 	struct cvmx_gmxx_txx_stat0_s          cn52xx;
8570 	struct cvmx_gmxx_txx_stat0_s          cn52xxp1;
8571 	struct cvmx_gmxx_txx_stat0_s          cn56xx;
8572 	struct cvmx_gmxx_txx_stat0_s          cn56xxp1;
8573 	struct cvmx_gmxx_txx_stat0_s          cn58xx;
8574 	struct cvmx_gmxx_txx_stat0_s          cn58xxp1;
8575 	struct cvmx_gmxx_txx_stat0_s          cn61xx;
8576 	struct cvmx_gmxx_txx_stat0_s          cn63xx;
8577 	struct cvmx_gmxx_txx_stat0_s          cn63xxp1;
8578 	struct cvmx_gmxx_txx_stat0_s          cn66xx;
8579 	struct cvmx_gmxx_txx_stat0_s          cn68xx;
8580 	struct cvmx_gmxx_txx_stat0_s          cn68xxp1;
8581 	struct cvmx_gmxx_txx_stat0_s          cnf71xx;
8582 };
8583 typedef union cvmx_gmxx_txx_stat0 cvmx_gmxx_txx_stat0_t;
8584 
8585 /**
8586  * cvmx_gmx#_tx#_stat1
8587  *
8588  * GMX_TX_STAT1 = GMX_TX_STATS_SCOL  / GMX_TX_STATS_MCOL
8589  *
8590  *
8591  * Notes:
8592  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8593  * - Counters will wrap
8594  */
8595 union cvmx_gmxx_txx_stat1 {
8596 	uint64_t u64;
8597 	struct cvmx_gmxx_txx_stat1_s {
8598 #ifdef __BIG_ENDIAN_BITFIELD
8599 	uint64_t scol                         : 32; /**< Number of packets sent with a single collision
8600                                                          (SGMII/1000Base-X half-duplex only) */
8601 	uint64_t mcol                         : 32; /**< Number of packets sent with multiple collisions
8602                                                          but < GMX_TX_COL_ATTEMPT[LIMIT].
8603                                                          (SGMII/1000Base-X half-duplex only) */
8604 #else
8605 	uint64_t mcol                         : 32;
8606 	uint64_t scol                         : 32;
8607 #endif
8608 	} s;
8609 	struct cvmx_gmxx_txx_stat1_s          cn30xx;
8610 	struct cvmx_gmxx_txx_stat1_s          cn31xx;
8611 	struct cvmx_gmxx_txx_stat1_s          cn38xx;
8612 	struct cvmx_gmxx_txx_stat1_s          cn38xxp2;
8613 	struct cvmx_gmxx_txx_stat1_s          cn50xx;
8614 	struct cvmx_gmxx_txx_stat1_s          cn52xx;
8615 	struct cvmx_gmxx_txx_stat1_s          cn52xxp1;
8616 	struct cvmx_gmxx_txx_stat1_s          cn56xx;
8617 	struct cvmx_gmxx_txx_stat1_s          cn56xxp1;
8618 	struct cvmx_gmxx_txx_stat1_s          cn58xx;
8619 	struct cvmx_gmxx_txx_stat1_s          cn58xxp1;
8620 	struct cvmx_gmxx_txx_stat1_s          cn61xx;
8621 	struct cvmx_gmxx_txx_stat1_s          cn63xx;
8622 	struct cvmx_gmxx_txx_stat1_s          cn63xxp1;
8623 	struct cvmx_gmxx_txx_stat1_s          cn66xx;
8624 	struct cvmx_gmxx_txx_stat1_s          cn68xx;
8625 	struct cvmx_gmxx_txx_stat1_s          cn68xxp1;
8626 	struct cvmx_gmxx_txx_stat1_s          cnf71xx;
8627 };
8628 typedef union cvmx_gmxx_txx_stat1 cvmx_gmxx_txx_stat1_t;
8629 
8630 /**
8631  * cvmx_gmx#_tx#_stat2
8632  *
8633  * GMX_TX_STAT2 = GMX_TX_STATS_OCTS
8634  *
8635  *
8636  * Notes:
8637  * - Octect counts are the sum of all data transmitted on the wire including
8638  *   packet data, pad bytes, fcs bytes, pause bytes, and jam bytes.  The octect
8639  *   counts do not include PREAMBLE byte or EXTEND cycles.
8640  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8641  * - Counters will wrap
8642  */
8643 union cvmx_gmxx_txx_stat2 {
8644 	uint64_t u64;
8645 	struct cvmx_gmxx_txx_stat2_s {
8646 #ifdef __BIG_ENDIAN_BITFIELD
8647 	uint64_t reserved_48_63               : 16;
8648 	uint64_t octs                         : 48; /**< Number of total octets sent on the interface.
8649                                                          Does not count octets from frames that were
8650                                                          truncated due to collisions in halfdup mode. */
8651 #else
8652 	uint64_t octs                         : 48;
8653 	uint64_t reserved_48_63               : 16;
8654 #endif
8655 	} s;
8656 	struct cvmx_gmxx_txx_stat2_s          cn30xx;
8657 	struct cvmx_gmxx_txx_stat2_s          cn31xx;
8658 	struct cvmx_gmxx_txx_stat2_s          cn38xx;
8659 	struct cvmx_gmxx_txx_stat2_s          cn38xxp2;
8660 	struct cvmx_gmxx_txx_stat2_s          cn50xx;
8661 	struct cvmx_gmxx_txx_stat2_s          cn52xx;
8662 	struct cvmx_gmxx_txx_stat2_s          cn52xxp1;
8663 	struct cvmx_gmxx_txx_stat2_s          cn56xx;
8664 	struct cvmx_gmxx_txx_stat2_s          cn56xxp1;
8665 	struct cvmx_gmxx_txx_stat2_s          cn58xx;
8666 	struct cvmx_gmxx_txx_stat2_s          cn58xxp1;
8667 	struct cvmx_gmxx_txx_stat2_s          cn61xx;
8668 	struct cvmx_gmxx_txx_stat2_s          cn63xx;
8669 	struct cvmx_gmxx_txx_stat2_s          cn63xxp1;
8670 	struct cvmx_gmxx_txx_stat2_s          cn66xx;
8671 	struct cvmx_gmxx_txx_stat2_s          cn68xx;
8672 	struct cvmx_gmxx_txx_stat2_s          cn68xxp1;
8673 	struct cvmx_gmxx_txx_stat2_s          cnf71xx;
8674 };
8675 typedef union cvmx_gmxx_txx_stat2 cvmx_gmxx_txx_stat2_t;
8676 
8677 /**
8678  * cvmx_gmx#_tx#_stat3
8679  *
8680  * GMX_TX_STAT3 = GMX_TX_STATS_PKTS
8681  *
8682  *
8683  * Notes:
8684  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8685  * - Counters will wrap
8686  */
8687 union cvmx_gmxx_txx_stat3 {
8688 	uint64_t u64;
8689 	struct cvmx_gmxx_txx_stat3_s {
8690 #ifdef __BIG_ENDIAN_BITFIELD
8691 	uint64_t reserved_32_63               : 32;
8692 	uint64_t pkts                         : 32; /**< Number of total frames sent on the interface.
8693                                                          Does not count frames that were truncated due to
8694                                                           collisions in halfdup mode. */
8695 #else
8696 	uint64_t pkts                         : 32;
8697 	uint64_t reserved_32_63               : 32;
8698 #endif
8699 	} s;
8700 	struct cvmx_gmxx_txx_stat3_s          cn30xx;
8701 	struct cvmx_gmxx_txx_stat3_s          cn31xx;
8702 	struct cvmx_gmxx_txx_stat3_s          cn38xx;
8703 	struct cvmx_gmxx_txx_stat3_s          cn38xxp2;
8704 	struct cvmx_gmxx_txx_stat3_s          cn50xx;
8705 	struct cvmx_gmxx_txx_stat3_s          cn52xx;
8706 	struct cvmx_gmxx_txx_stat3_s          cn52xxp1;
8707 	struct cvmx_gmxx_txx_stat3_s          cn56xx;
8708 	struct cvmx_gmxx_txx_stat3_s          cn56xxp1;
8709 	struct cvmx_gmxx_txx_stat3_s          cn58xx;
8710 	struct cvmx_gmxx_txx_stat3_s          cn58xxp1;
8711 	struct cvmx_gmxx_txx_stat3_s          cn61xx;
8712 	struct cvmx_gmxx_txx_stat3_s          cn63xx;
8713 	struct cvmx_gmxx_txx_stat3_s          cn63xxp1;
8714 	struct cvmx_gmxx_txx_stat3_s          cn66xx;
8715 	struct cvmx_gmxx_txx_stat3_s          cn68xx;
8716 	struct cvmx_gmxx_txx_stat3_s          cn68xxp1;
8717 	struct cvmx_gmxx_txx_stat3_s          cnf71xx;
8718 };
8719 typedef union cvmx_gmxx_txx_stat3 cvmx_gmxx_txx_stat3_t;
8720 
8721 /**
8722  * cvmx_gmx#_tx#_stat4
8723  *
8724  * GMX_TX_STAT4 = GMX_TX_STATS_HIST1 (64) / GMX_TX_STATS_HIST0 (<64)
8725  *
8726  *
8727  * Notes:
8728  * - Packet length is the sum of all data transmitted on the wire for the given
8729  *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
8730  *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
8731  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8732  * - Counters will wrap
8733  */
8734 union cvmx_gmxx_txx_stat4 {
8735 	uint64_t u64;
8736 	struct cvmx_gmxx_txx_stat4_s {
8737 #ifdef __BIG_ENDIAN_BITFIELD
8738 	uint64_t hist1                        : 32; /**< Number of packets sent with an octet count of 64. */
8739 	uint64_t hist0                        : 32; /**< Number of packets sent with an octet count
8740                                                          of < 64. */
8741 #else
8742 	uint64_t hist0                        : 32;
8743 	uint64_t hist1                        : 32;
8744 #endif
8745 	} s;
8746 	struct cvmx_gmxx_txx_stat4_s          cn30xx;
8747 	struct cvmx_gmxx_txx_stat4_s          cn31xx;
8748 	struct cvmx_gmxx_txx_stat4_s          cn38xx;
8749 	struct cvmx_gmxx_txx_stat4_s          cn38xxp2;
8750 	struct cvmx_gmxx_txx_stat4_s          cn50xx;
8751 	struct cvmx_gmxx_txx_stat4_s          cn52xx;
8752 	struct cvmx_gmxx_txx_stat4_s          cn52xxp1;
8753 	struct cvmx_gmxx_txx_stat4_s          cn56xx;
8754 	struct cvmx_gmxx_txx_stat4_s          cn56xxp1;
8755 	struct cvmx_gmxx_txx_stat4_s          cn58xx;
8756 	struct cvmx_gmxx_txx_stat4_s          cn58xxp1;
8757 	struct cvmx_gmxx_txx_stat4_s          cn61xx;
8758 	struct cvmx_gmxx_txx_stat4_s          cn63xx;
8759 	struct cvmx_gmxx_txx_stat4_s          cn63xxp1;
8760 	struct cvmx_gmxx_txx_stat4_s          cn66xx;
8761 	struct cvmx_gmxx_txx_stat4_s          cn68xx;
8762 	struct cvmx_gmxx_txx_stat4_s          cn68xxp1;
8763 	struct cvmx_gmxx_txx_stat4_s          cnf71xx;
8764 };
8765 typedef union cvmx_gmxx_txx_stat4 cvmx_gmxx_txx_stat4_t;
8766 
8767 /**
8768  * cvmx_gmx#_tx#_stat5
8769  *
8770  * GMX_TX_STAT5 = GMX_TX_STATS_HIST3 (128- 255) / GMX_TX_STATS_HIST2 (65- 127)
8771  *
8772  *
8773  * Notes:
8774  * - Packet length is the sum of all data transmitted on the wire for the given
8775  *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
8776  *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
8777  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8778  * - Counters will wrap
8779  */
8780 union cvmx_gmxx_txx_stat5 {
8781 	uint64_t u64;
8782 	struct cvmx_gmxx_txx_stat5_s {
8783 #ifdef __BIG_ENDIAN_BITFIELD
8784 	uint64_t hist3                        : 32; /**< Number of packets sent with an octet count of
8785                                                          128 - 255. */
8786 	uint64_t hist2                        : 32; /**< Number of packets sent with an octet count of
8787                                                          65 - 127. */
8788 #else
8789 	uint64_t hist2                        : 32;
8790 	uint64_t hist3                        : 32;
8791 #endif
8792 	} s;
8793 	struct cvmx_gmxx_txx_stat5_s          cn30xx;
8794 	struct cvmx_gmxx_txx_stat5_s          cn31xx;
8795 	struct cvmx_gmxx_txx_stat5_s          cn38xx;
8796 	struct cvmx_gmxx_txx_stat5_s          cn38xxp2;
8797 	struct cvmx_gmxx_txx_stat5_s          cn50xx;
8798 	struct cvmx_gmxx_txx_stat5_s          cn52xx;
8799 	struct cvmx_gmxx_txx_stat5_s          cn52xxp1;
8800 	struct cvmx_gmxx_txx_stat5_s          cn56xx;
8801 	struct cvmx_gmxx_txx_stat5_s          cn56xxp1;
8802 	struct cvmx_gmxx_txx_stat5_s          cn58xx;
8803 	struct cvmx_gmxx_txx_stat5_s          cn58xxp1;
8804 	struct cvmx_gmxx_txx_stat5_s          cn61xx;
8805 	struct cvmx_gmxx_txx_stat5_s          cn63xx;
8806 	struct cvmx_gmxx_txx_stat5_s          cn63xxp1;
8807 	struct cvmx_gmxx_txx_stat5_s          cn66xx;
8808 	struct cvmx_gmxx_txx_stat5_s          cn68xx;
8809 	struct cvmx_gmxx_txx_stat5_s          cn68xxp1;
8810 	struct cvmx_gmxx_txx_stat5_s          cnf71xx;
8811 };
8812 typedef union cvmx_gmxx_txx_stat5 cvmx_gmxx_txx_stat5_t;
8813 
8814 /**
8815  * cvmx_gmx#_tx#_stat6
8816  *
8817  * GMX_TX_STAT6 = GMX_TX_STATS_HIST5 (512-1023) / GMX_TX_STATS_HIST4 (256-511)
8818  *
8819  *
8820  * Notes:
8821  * - Packet length is the sum of all data transmitted on the wire for the given
8822  *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
8823  *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
8824  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8825  * - Counters will wrap
8826  */
8827 union cvmx_gmxx_txx_stat6 {
8828 	uint64_t u64;
8829 	struct cvmx_gmxx_txx_stat6_s {
8830 #ifdef __BIG_ENDIAN_BITFIELD
8831 	uint64_t hist5                        : 32; /**< Number of packets sent with an octet count of
8832                                                          512 - 1023. */
8833 	uint64_t hist4                        : 32; /**< Number of packets sent with an octet count of
8834                                                          256 - 511. */
8835 #else
8836 	uint64_t hist4                        : 32;
8837 	uint64_t hist5                        : 32;
8838 #endif
8839 	} s;
8840 	struct cvmx_gmxx_txx_stat6_s          cn30xx;
8841 	struct cvmx_gmxx_txx_stat6_s          cn31xx;
8842 	struct cvmx_gmxx_txx_stat6_s          cn38xx;
8843 	struct cvmx_gmxx_txx_stat6_s          cn38xxp2;
8844 	struct cvmx_gmxx_txx_stat6_s          cn50xx;
8845 	struct cvmx_gmxx_txx_stat6_s          cn52xx;
8846 	struct cvmx_gmxx_txx_stat6_s          cn52xxp1;
8847 	struct cvmx_gmxx_txx_stat6_s          cn56xx;
8848 	struct cvmx_gmxx_txx_stat6_s          cn56xxp1;
8849 	struct cvmx_gmxx_txx_stat6_s          cn58xx;
8850 	struct cvmx_gmxx_txx_stat6_s          cn58xxp1;
8851 	struct cvmx_gmxx_txx_stat6_s          cn61xx;
8852 	struct cvmx_gmxx_txx_stat6_s          cn63xx;
8853 	struct cvmx_gmxx_txx_stat6_s          cn63xxp1;
8854 	struct cvmx_gmxx_txx_stat6_s          cn66xx;
8855 	struct cvmx_gmxx_txx_stat6_s          cn68xx;
8856 	struct cvmx_gmxx_txx_stat6_s          cn68xxp1;
8857 	struct cvmx_gmxx_txx_stat6_s          cnf71xx;
8858 };
8859 typedef union cvmx_gmxx_txx_stat6 cvmx_gmxx_txx_stat6_t;
8860 
8861 /**
8862  * cvmx_gmx#_tx#_stat7
8863  *
8864  * GMX_TX_STAT7 = GMX_TX_STATS_HIST7 (1024-1518) / GMX_TX_STATS_HIST6 (>1518)
8865  *
8866  *
8867  * Notes:
8868  * - Packet length is the sum of all data transmitted on the wire for the given
8869  *   packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
8870  *   bytes.  The octect counts do not include PREAMBLE byte or EXTEND cycles.
8871  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8872  * - Counters will wrap
8873  */
8874 union cvmx_gmxx_txx_stat7 {
8875 	uint64_t u64;
8876 	struct cvmx_gmxx_txx_stat7_s {
8877 #ifdef __BIG_ENDIAN_BITFIELD
8878 	uint64_t hist7                        : 32; /**< Number of packets sent with an octet count
8879                                                          of > 1518. */
8880 	uint64_t hist6                        : 32; /**< Number of packets sent with an octet count of
8881                                                          1024 - 1518. */
8882 #else
8883 	uint64_t hist6                        : 32;
8884 	uint64_t hist7                        : 32;
8885 #endif
8886 	} s;
8887 	struct cvmx_gmxx_txx_stat7_s          cn30xx;
8888 	struct cvmx_gmxx_txx_stat7_s          cn31xx;
8889 	struct cvmx_gmxx_txx_stat7_s          cn38xx;
8890 	struct cvmx_gmxx_txx_stat7_s          cn38xxp2;
8891 	struct cvmx_gmxx_txx_stat7_s          cn50xx;
8892 	struct cvmx_gmxx_txx_stat7_s          cn52xx;
8893 	struct cvmx_gmxx_txx_stat7_s          cn52xxp1;
8894 	struct cvmx_gmxx_txx_stat7_s          cn56xx;
8895 	struct cvmx_gmxx_txx_stat7_s          cn56xxp1;
8896 	struct cvmx_gmxx_txx_stat7_s          cn58xx;
8897 	struct cvmx_gmxx_txx_stat7_s          cn58xxp1;
8898 	struct cvmx_gmxx_txx_stat7_s          cn61xx;
8899 	struct cvmx_gmxx_txx_stat7_s          cn63xx;
8900 	struct cvmx_gmxx_txx_stat7_s          cn63xxp1;
8901 	struct cvmx_gmxx_txx_stat7_s          cn66xx;
8902 	struct cvmx_gmxx_txx_stat7_s          cn68xx;
8903 	struct cvmx_gmxx_txx_stat7_s          cn68xxp1;
8904 	struct cvmx_gmxx_txx_stat7_s          cnf71xx;
8905 };
8906 typedef union cvmx_gmxx_txx_stat7 cvmx_gmxx_txx_stat7_t;
8907 
8908 /**
8909  * cvmx_gmx#_tx#_stat8
8910  *
8911  * GMX_TX_STAT8 = GMX_TX_STATS_MCST  / GMX_TX_STATS_BCST
8912  *
8913  *
8914  * Notes:
8915  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8916  * - Counters will wrap
8917  * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
8918  *   packet.  GMX assumes that the DMAC lies in the first 6 bytes of the packet
8919  *   as per the 802.3 frame definition.  If the system requires additional data
8920  *   before the L2 header, then the MCST and BCST counters may not reflect
8921  *   reality and should be ignored by software.
8922  */
8923 union cvmx_gmxx_txx_stat8 {
8924 	uint64_t u64;
8925 	struct cvmx_gmxx_txx_stat8_s {
8926 #ifdef __BIG_ENDIAN_BITFIELD
8927 	uint64_t mcst                         : 32; /**< Number of packets sent to multicast DMAC.
8928                                                          Does not include BCST packets. */
8929 	uint64_t bcst                         : 32; /**< Number of packets sent to broadcast DMAC.
8930                                                          Does not include MCST packets. */
8931 #else
8932 	uint64_t bcst                         : 32;
8933 	uint64_t mcst                         : 32;
8934 #endif
8935 	} s;
8936 	struct cvmx_gmxx_txx_stat8_s          cn30xx;
8937 	struct cvmx_gmxx_txx_stat8_s          cn31xx;
8938 	struct cvmx_gmxx_txx_stat8_s          cn38xx;
8939 	struct cvmx_gmxx_txx_stat8_s          cn38xxp2;
8940 	struct cvmx_gmxx_txx_stat8_s          cn50xx;
8941 	struct cvmx_gmxx_txx_stat8_s          cn52xx;
8942 	struct cvmx_gmxx_txx_stat8_s          cn52xxp1;
8943 	struct cvmx_gmxx_txx_stat8_s          cn56xx;
8944 	struct cvmx_gmxx_txx_stat8_s          cn56xxp1;
8945 	struct cvmx_gmxx_txx_stat8_s          cn58xx;
8946 	struct cvmx_gmxx_txx_stat8_s          cn58xxp1;
8947 	struct cvmx_gmxx_txx_stat8_s          cn61xx;
8948 	struct cvmx_gmxx_txx_stat8_s          cn63xx;
8949 	struct cvmx_gmxx_txx_stat8_s          cn63xxp1;
8950 	struct cvmx_gmxx_txx_stat8_s          cn66xx;
8951 	struct cvmx_gmxx_txx_stat8_s          cn68xx;
8952 	struct cvmx_gmxx_txx_stat8_s          cn68xxp1;
8953 	struct cvmx_gmxx_txx_stat8_s          cnf71xx;
8954 };
8955 typedef union cvmx_gmxx_txx_stat8 cvmx_gmxx_txx_stat8_t;
8956 
8957 /**
8958  * cvmx_gmx#_tx#_stat9
8959  *
8960  * GMX_TX_STAT9 = GMX_TX_STATS_UNDFLW / GMX_TX_STATS_CTL
8961  *
8962  *
8963  * Notes:
8964  * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
8965  * - Counters will wrap
8966  */
8967 union cvmx_gmxx_txx_stat9 {
8968 	uint64_t u64;
8969 	struct cvmx_gmxx_txx_stat9_s {
8970 #ifdef __BIG_ENDIAN_BITFIELD
8971 	uint64_t undflw                       : 32; /**< Number of underflow packets */
8972 	uint64_t ctl                          : 32; /**< Number of Control packets (PAUSE flow control)
8973                                                          generated by GMX.  It does not include control
8974                                                          packets forwarded or generated by the PP's.
8975                                                          CTL will count the number of generated PFC frames.
8976                                                          CTL will not track the number of generated HG2
8977                                                          messages. */
8978 #else
8979 	uint64_t ctl                          : 32;
8980 	uint64_t undflw                       : 32;
8981 #endif
8982 	} s;
8983 	struct cvmx_gmxx_txx_stat9_s          cn30xx;
8984 	struct cvmx_gmxx_txx_stat9_s          cn31xx;
8985 	struct cvmx_gmxx_txx_stat9_s          cn38xx;
8986 	struct cvmx_gmxx_txx_stat9_s          cn38xxp2;
8987 	struct cvmx_gmxx_txx_stat9_s          cn50xx;
8988 	struct cvmx_gmxx_txx_stat9_s          cn52xx;
8989 	struct cvmx_gmxx_txx_stat9_s          cn52xxp1;
8990 	struct cvmx_gmxx_txx_stat9_s          cn56xx;
8991 	struct cvmx_gmxx_txx_stat9_s          cn56xxp1;
8992 	struct cvmx_gmxx_txx_stat9_s          cn58xx;
8993 	struct cvmx_gmxx_txx_stat9_s          cn58xxp1;
8994 	struct cvmx_gmxx_txx_stat9_s          cn61xx;
8995 	struct cvmx_gmxx_txx_stat9_s          cn63xx;
8996 	struct cvmx_gmxx_txx_stat9_s          cn63xxp1;
8997 	struct cvmx_gmxx_txx_stat9_s          cn66xx;
8998 	struct cvmx_gmxx_txx_stat9_s          cn68xx;
8999 	struct cvmx_gmxx_txx_stat9_s          cn68xxp1;
9000 	struct cvmx_gmxx_txx_stat9_s          cnf71xx;
9001 };
9002 typedef union cvmx_gmxx_txx_stat9 cvmx_gmxx_txx_stat9_t;
9003 
9004 /**
9005  * cvmx_gmx#_tx#_stats_ctl
9006  *
9007  * GMX_TX_STATS_CTL = TX Stats Control register
9008  *
9009  */
9010 union cvmx_gmxx_txx_stats_ctl {
9011 	uint64_t u64;
9012 	struct cvmx_gmxx_txx_stats_ctl_s {
9013 #ifdef __BIG_ENDIAN_BITFIELD
9014 	uint64_t reserved_1_63                : 63;
9015 	uint64_t rd_clr                       : 1;  /**< Stats registers will clear on reads */
9016 #else
9017 	uint64_t rd_clr                       : 1;
9018 	uint64_t reserved_1_63                : 63;
9019 #endif
9020 	} s;
9021 	struct cvmx_gmxx_txx_stats_ctl_s      cn30xx;
9022 	struct cvmx_gmxx_txx_stats_ctl_s      cn31xx;
9023 	struct cvmx_gmxx_txx_stats_ctl_s      cn38xx;
9024 	struct cvmx_gmxx_txx_stats_ctl_s      cn38xxp2;
9025 	struct cvmx_gmxx_txx_stats_ctl_s      cn50xx;
9026 	struct cvmx_gmxx_txx_stats_ctl_s      cn52xx;
9027 	struct cvmx_gmxx_txx_stats_ctl_s      cn52xxp1;
9028 	struct cvmx_gmxx_txx_stats_ctl_s      cn56xx;
9029 	struct cvmx_gmxx_txx_stats_ctl_s      cn56xxp1;
9030 	struct cvmx_gmxx_txx_stats_ctl_s      cn58xx;
9031 	struct cvmx_gmxx_txx_stats_ctl_s      cn58xxp1;
9032 	struct cvmx_gmxx_txx_stats_ctl_s      cn61xx;
9033 	struct cvmx_gmxx_txx_stats_ctl_s      cn63xx;
9034 	struct cvmx_gmxx_txx_stats_ctl_s      cn63xxp1;
9035 	struct cvmx_gmxx_txx_stats_ctl_s      cn66xx;
9036 	struct cvmx_gmxx_txx_stats_ctl_s      cn68xx;
9037 	struct cvmx_gmxx_txx_stats_ctl_s      cn68xxp1;
9038 	struct cvmx_gmxx_txx_stats_ctl_s      cnf71xx;
9039 };
9040 typedef union cvmx_gmxx_txx_stats_ctl cvmx_gmxx_txx_stats_ctl_t;
9041 
9042 /**
9043  * cvmx_gmx#_tx#_thresh
9044  *
9045  * Per Port
9046  *
9047  *
9048  * GMX_TX_THRESH = Packet TX Threshold
9049  *
9050  * Notes:
9051  * In XAUI mode, prt0 is used for checking.  Since XAUI mode uses a single TX FIFO and is higher data rate, recommended value is 0x100.
9052  *
9053  */
9054 union cvmx_gmxx_txx_thresh {
9055 	uint64_t u64;
9056 	struct cvmx_gmxx_txx_thresh_s {
9057 #ifdef __BIG_ENDIAN_BITFIELD
9058 	uint64_t reserved_10_63               : 54;
9059 	uint64_t cnt                          : 10; /**< Number of 16B ticks to accumulate in the TX FIFO
9060                                                          before sending on the packet interface
9061                                                          This register should be large enough to prevent
9062                                                          underflow on the packet interface and must never
9063                                                          be set to zero.  This register cannot exceed the
9064                                                          the TX FIFO depth which is...
9065                                                           GMX_TX_PRTS==0,1:  CNT MAX = 0x100
9066                                                           GMX_TX_PRTS==2  :  CNT MAX = 0x080
9067                                                           GMX_TX_PRTS==3,4:  CNT MAX = 0x040 */
9068 #else
9069 	uint64_t cnt                          : 10;
9070 	uint64_t reserved_10_63               : 54;
9071 #endif
9072 	} s;
9073 	struct cvmx_gmxx_txx_thresh_cn30xx {
9074 #ifdef __BIG_ENDIAN_BITFIELD
9075 	uint64_t reserved_7_63                : 57;
9076 	uint64_t cnt                          : 7;  /**< Number of 16B ticks to accumulate in the TX FIFO
9077                                                          before sending on the RGMII interface
9078                                                          This register should be large enough to prevent
9079                                                          underflow on the RGMII interface and must never
9080                                                          be set below 4.  This register cannot exceed the
9081                                                          the TX FIFO depth which is 64 16B entries. */
9082 #else
9083 	uint64_t cnt                          : 7;
9084 	uint64_t reserved_7_63                : 57;
9085 #endif
9086 	} cn30xx;
9087 	struct cvmx_gmxx_txx_thresh_cn30xx    cn31xx;
9088 	struct cvmx_gmxx_txx_thresh_cn38xx {
9089 #ifdef __BIG_ENDIAN_BITFIELD
9090 	uint64_t reserved_9_63                : 55;
9091 	uint64_t cnt                          : 9;  /**< Number of 16B ticks to accumulate in the TX FIFO
9092                                                           before sending on the RGMII interface
9093                                                           This register should be large enough to prevent
9094                                                           underflow on the RGMII interface and must never
9095                                                           be set to zero.  This register cannot exceed the
9096                                                           the TX FIFO depth which is...
9097                                                            GMX_TX_PRTS==0,1:  CNT MAX = 0x100
9098                                                            GMX_TX_PRTS==2  :  CNT MAX = 0x080
9099                                                            GMX_TX_PRTS==3,4:  CNT MAX = 0x040
9100                                                          (PASS2 expands from 6 to 9 bits) */
9101 #else
9102 	uint64_t cnt                          : 9;
9103 	uint64_t reserved_9_63                : 55;
9104 #endif
9105 	} cn38xx;
9106 	struct cvmx_gmxx_txx_thresh_cn38xx    cn38xxp2;
9107 	struct cvmx_gmxx_txx_thresh_cn30xx    cn50xx;
9108 	struct cvmx_gmxx_txx_thresh_cn38xx    cn52xx;
9109 	struct cvmx_gmxx_txx_thresh_cn38xx    cn52xxp1;
9110 	struct cvmx_gmxx_txx_thresh_cn38xx    cn56xx;
9111 	struct cvmx_gmxx_txx_thresh_cn38xx    cn56xxp1;
9112 	struct cvmx_gmxx_txx_thresh_cn38xx    cn58xx;
9113 	struct cvmx_gmxx_txx_thresh_cn38xx    cn58xxp1;
9114 	struct cvmx_gmxx_txx_thresh_cn38xx    cn61xx;
9115 	struct cvmx_gmxx_txx_thresh_cn38xx    cn63xx;
9116 	struct cvmx_gmxx_txx_thresh_cn38xx    cn63xxp1;
9117 	struct cvmx_gmxx_txx_thresh_cn38xx    cn66xx;
9118 	struct cvmx_gmxx_txx_thresh_s         cn68xx;
9119 	struct cvmx_gmxx_txx_thresh_s         cn68xxp1;
9120 	struct cvmx_gmxx_txx_thresh_cn38xx    cnf71xx;
9121 };
9122 typedef union cvmx_gmxx_txx_thresh cvmx_gmxx_txx_thresh_t;
9123 
9124 /**
9125  * cvmx_gmx#_tx_bp
9126  *
9127  * GMX_TX_BP = Packet Interface TX BackPressure Register
9128  *
9129  *
9130  * Notes:
9131  * In XAUI mode, only the lsb (corresponding to port0) of BP is used.
9132  *
9133  */
9134 union cvmx_gmxx_tx_bp {
9135 	uint64_t u64;
9136 	struct cvmx_gmxx_tx_bp_s {
9137 #ifdef __BIG_ENDIAN_BITFIELD
9138 	uint64_t reserved_4_63                : 60;
9139 	uint64_t bp                           : 4;  /**< Per port BackPressure status
9140                                                          0=Port is available
9141                                                          1=Port should be back pressured */
9142 #else
9143 	uint64_t bp                           : 4;
9144 	uint64_t reserved_4_63                : 60;
9145 #endif
9146 	} s;
9147 	struct cvmx_gmxx_tx_bp_cn30xx {
9148 #ifdef __BIG_ENDIAN_BITFIELD
9149 	uint64_t reserved_3_63                : 61;
9150 	uint64_t bp                           : 3;  /**< Per port BackPressure status
9151                                                          0=Port is available
9152                                                          1=Port should be back pressured */
9153 #else
9154 	uint64_t bp                           : 3;
9155 	uint64_t reserved_3_63                : 61;
9156 #endif
9157 	} cn30xx;
9158 	struct cvmx_gmxx_tx_bp_cn30xx         cn31xx;
9159 	struct cvmx_gmxx_tx_bp_s              cn38xx;
9160 	struct cvmx_gmxx_tx_bp_s              cn38xxp2;
9161 	struct cvmx_gmxx_tx_bp_cn30xx         cn50xx;
9162 	struct cvmx_gmxx_tx_bp_s              cn52xx;
9163 	struct cvmx_gmxx_tx_bp_s              cn52xxp1;
9164 	struct cvmx_gmxx_tx_bp_s              cn56xx;
9165 	struct cvmx_gmxx_tx_bp_s              cn56xxp1;
9166 	struct cvmx_gmxx_tx_bp_s              cn58xx;
9167 	struct cvmx_gmxx_tx_bp_s              cn58xxp1;
9168 	struct cvmx_gmxx_tx_bp_s              cn61xx;
9169 	struct cvmx_gmxx_tx_bp_s              cn63xx;
9170 	struct cvmx_gmxx_tx_bp_s              cn63xxp1;
9171 	struct cvmx_gmxx_tx_bp_s              cn66xx;
9172 	struct cvmx_gmxx_tx_bp_s              cn68xx;
9173 	struct cvmx_gmxx_tx_bp_s              cn68xxp1;
9174 	struct cvmx_gmxx_tx_bp_cnf71xx {
9175 #ifdef __BIG_ENDIAN_BITFIELD
9176 	uint64_t reserved_2_63                : 62;
9177 	uint64_t bp                           : 2;  /**< Per port BackPressure status
9178                                                          0=Port is available
9179                                                          1=Port should be back pressured */
9180 #else
9181 	uint64_t bp                           : 2;
9182 	uint64_t reserved_2_63                : 62;
9183 #endif
9184 	} cnf71xx;
9185 };
9186 typedef union cvmx_gmxx_tx_bp cvmx_gmxx_tx_bp_t;
9187 
9188 /**
9189  * cvmx_gmx#_tx_clk_msk#
9190  *
9191  * GMX_TX_CLK_MSK = GMX Clock Select
9192  *
9193  */
9194 union cvmx_gmxx_tx_clk_mskx {
9195 	uint64_t u64;
9196 	struct cvmx_gmxx_tx_clk_mskx_s {
9197 #ifdef __BIG_ENDIAN_BITFIELD
9198 	uint64_t reserved_1_63                : 63;
9199 	uint64_t msk                          : 1;  /**< Write this bit to a 1 when switching clks */
9200 #else
9201 	uint64_t msk                          : 1;
9202 	uint64_t reserved_1_63                : 63;
9203 #endif
9204 	} s;
9205 	struct cvmx_gmxx_tx_clk_mskx_s        cn30xx;
9206 	struct cvmx_gmxx_tx_clk_mskx_s        cn50xx;
9207 };
9208 typedef union cvmx_gmxx_tx_clk_mskx cvmx_gmxx_tx_clk_mskx_t;
9209 
9210 /**
9211  * cvmx_gmx#_tx_col_attempt
9212  *
9213  * GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
9214  *
9215  */
9216 union cvmx_gmxx_tx_col_attempt {
9217 	uint64_t u64;
9218 	struct cvmx_gmxx_tx_col_attempt_s {
9219 #ifdef __BIG_ENDIAN_BITFIELD
9220 	uint64_t reserved_5_63                : 59;
9221 	uint64_t limit                        : 5;  /**< Collision Attempts
9222                                                          (SGMII/1000Base-X half-duplex only) */
9223 #else
9224 	uint64_t limit                        : 5;
9225 	uint64_t reserved_5_63                : 59;
9226 #endif
9227 	} s;
9228 	struct cvmx_gmxx_tx_col_attempt_s     cn30xx;
9229 	struct cvmx_gmxx_tx_col_attempt_s     cn31xx;
9230 	struct cvmx_gmxx_tx_col_attempt_s     cn38xx;
9231 	struct cvmx_gmxx_tx_col_attempt_s     cn38xxp2;
9232 	struct cvmx_gmxx_tx_col_attempt_s     cn50xx;
9233 	struct cvmx_gmxx_tx_col_attempt_s     cn52xx;
9234 	struct cvmx_gmxx_tx_col_attempt_s     cn52xxp1;
9235 	struct cvmx_gmxx_tx_col_attempt_s     cn56xx;
9236 	struct cvmx_gmxx_tx_col_attempt_s     cn56xxp1;
9237 	struct cvmx_gmxx_tx_col_attempt_s     cn58xx;
9238 	struct cvmx_gmxx_tx_col_attempt_s     cn58xxp1;
9239 	struct cvmx_gmxx_tx_col_attempt_s     cn61xx;
9240 	struct cvmx_gmxx_tx_col_attempt_s     cn63xx;
9241 	struct cvmx_gmxx_tx_col_attempt_s     cn63xxp1;
9242 	struct cvmx_gmxx_tx_col_attempt_s     cn66xx;
9243 	struct cvmx_gmxx_tx_col_attempt_s     cn68xx;
9244 	struct cvmx_gmxx_tx_col_attempt_s     cn68xxp1;
9245 	struct cvmx_gmxx_tx_col_attempt_s     cnf71xx;
9246 };
9247 typedef union cvmx_gmxx_tx_col_attempt cvmx_gmxx_tx_col_attempt_t;
9248 
9249 /**
9250  * cvmx_gmx#_tx_corrupt
9251  *
9252  * GMX_TX_CORRUPT = TX - Corrupt TX packets with the ERR bit set
9253  *
9254  *
9255  * Notes:
9256  * Packets sent from PKO with the ERR wire asserted will be corrupted by
9257  * the transmitter if CORRUPT[prt] is set (XAUI uses prt==0).
9258  *
9259  * Corruption means that GMX will send a bad FCS value.  If GMX_TX_APPEND[FCS]
9260  * is clear then no FCS is sent and the GMX cannot corrupt it.  The corrupt FCS
9261  * value is 0xeeeeeeee for SGMII/1000Base-X and 4 bytes of the error
9262  * propagation code in XAUI mode.
9263  */
9264 union cvmx_gmxx_tx_corrupt {
9265 	uint64_t u64;
9266 	struct cvmx_gmxx_tx_corrupt_s {
9267 #ifdef __BIG_ENDIAN_BITFIELD
9268 	uint64_t reserved_4_63                : 60;
9269 	uint64_t corrupt                      : 4;  /**< Per port error propagation
9270                                                          0=Never corrupt packets
9271                                                          1=Corrupt packets with ERR */
9272 #else
9273 	uint64_t corrupt                      : 4;
9274 	uint64_t reserved_4_63                : 60;
9275 #endif
9276 	} s;
9277 	struct cvmx_gmxx_tx_corrupt_cn30xx {
9278 #ifdef __BIG_ENDIAN_BITFIELD
9279 	uint64_t reserved_3_63                : 61;
9280 	uint64_t corrupt                      : 3;  /**< Per port error propagation
9281                                                          0=Never corrupt packets
9282                                                          1=Corrupt packets with ERR */
9283 #else
9284 	uint64_t corrupt                      : 3;
9285 	uint64_t reserved_3_63                : 61;
9286 #endif
9287 	} cn30xx;
9288 	struct cvmx_gmxx_tx_corrupt_cn30xx    cn31xx;
9289 	struct cvmx_gmxx_tx_corrupt_s         cn38xx;
9290 	struct cvmx_gmxx_tx_corrupt_s         cn38xxp2;
9291 	struct cvmx_gmxx_tx_corrupt_cn30xx    cn50xx;
9292 	struct cvmx_gmxx_tx_corrupt_s         cn52xx;
9293 	struct cvmx_gmxx_tx_corrupt_s         cn52xxp1;
9294 	struct cvmx_gmxx_tx_corrupt_s         cn56xx;
9295 	struct cvmx_gmxx_tx_corrupt_s         cn56xxp1;
9296 	struct cvmx_gmxx_tx_corrupt_s         cn58xx;
9297 	struct cvmx_gmxx_tx_corrupt_s         cn58xxp1;
9298 	struct cvmx_gmxx_tx_corrupt_s         cn61xx;
9299 	struct cvmx_gmxx_tx_corrupt_s         cn63xx;
9300 	struct cvmx_gmxx_tx_corrupt_s         cn63xxp1;
9301 	struct cvmx_gmxx_tx_corrupt_s         cn66xx;
9302 	struct cvmx_gmxx_tx_corrupt_s         cn68xx;
9303 	struct cvmx_gmxx_tx_corrupt_s         cn68xxp1;
9304 	struct cvmx_gmxx_tx_corrupt_cnf71xx {
9305 #ifdef __BIG_ENDIAN_BITFIELD
9306 	uint64_t reserved_2_63                : 62;
9307 	uint64_t corrupt                      : 2;  /**< Per port error propagation
9308                                                          0=Never corrupt packets
9309                                                          1=Corrupt packets with ERR */
9310 #else
9311 	uint64_t corrupt                      : 2;
9312 	uint64_t reserved_2_63                : 62;
9313 #endif
9314 	} cnf71xx;
9315 };
9316 typedef union cvmx_gmxx_tx_corrupt cvmx_gmxx_tx_corrupt_t;
9317 
9318 /**
9319  * cvmx_gmx#_tx_hg2_reg1
9320  *
9321  * Notes:
9322  * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in
9323  * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of
9324  * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of
9325  * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior.
9326  * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values.
9327  */
9328 union cvmx_gmxx_tx_hg2_reg1 {
9329 	uint64_t u64;
9330 	struct cvmx_gmxx_tx_hg2_reg1_s {
9331 #ifdef __BIG_ENDIAN_BITFIELD
9332 	uint64_t reserved_16_63               : 48;
9333 	uint64_t tx_xof                       : 16; /**< TX HiGig2 message for logical link pause when any
9334                                                          bit value changes
9335                                                           Only write in HiGig2 mode i.e. when
9336                                                           GMX_TX_XAUI_CTL[HG_EN]=1 and
9337                                                           GMX_RX_UDD_SKP[SKIP]=16. */
9338 #else
9339 	uint64_t tx_xof                       : 16;
9340 	uint64_t reserved_16_63               : 48;
9341 #endif
9342 	} s;
9343 	struct cvmx_gmxx_tx_hg2_reg1_s        cn52xx;
9344 	struct cvmx_gmxx_tx_hg2_reg1_s        cn52xxp1;
9345 	struct cvmx_gmxx_tx_hg2_reg1_s        cn56xx;
9346 	struct cvmx_gmxx_tx_hg2_reg1_s        cn61xx;
9347 	struct cvmx_gmxx_tx_hg2_reg1_s        cn63xx;
9348 	struct cvmx_gmxx_tx_hg2_reg1_s        cn63xxp1;
9349 	struct cvmx_gmxx_tx_hg2_reg1_s        cn66xx;
9350 	struct cvmx_gmxx_tx_hg2_reg1_s        cn68xx;
9351 	struct cvmx_gmxx_tx_hg2_reg1_s        cn68xxp1;
9352 	struct cvmx_gmxx_tx_hg2_reg1_s        cnf71xx;
9353 };
9354 typedef union cvmx_gmxx_tx_hg2_reg1 cvmx_gmxx_tx_hg2_reg1_t;
9355 
9356 /**
9357  * cvmx_gmx#_tx_hg2_reg2
9358  *
9359  * Notes:
9360  * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in
9361  * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address  of
9362  * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of
9363  * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior.
9364  * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values.
9365  */
9366 union cvmx_gmxx_tx_hg2_reg2 {
9367 	uint64_t u64;
9368 	struct cvmx_gmxx_tx_hg2_reg2_s {
9369 #ifdef __BIG_ENDIAN_BITFIELD
9370 	uint64_t reserved_16_63               : 48;
9371 	uint64_t tx_xon                       : 16; /**< TX HiGig2 message for logical link pause when any
9372                                                          bit value changes
9373                                                           Only write in HiGig2 mode i.e. when
9374                                                           GMX_TX_XAUI_CTL[HG_EN]=1 and
9375                                                           GMX_RX_UDD_SKP[SKIP]=16. */
9376 #else
9377 	uint64_t tx_xon                       : 16;
9378 	uint64_t reserved_16_63               : 48;
9379 #endif
9380 	} s;
9381 	struct cvmx_gmxx_tx_hg2_reg2_s        cn52xx;
9382 	struct cvmx_gmxx_tx_hg2_reg2_s        cn52xxp1;
9383 	struct cvmx_gmxx_tx_hg2_reg2_s        cn56xx;
9384 	struct cvmx_gmxx_tx_hg2_reg2_s        cn61xx;
9385 	struct cvmx_gmxx_tx_hg2_reg2_s        cn63xx;
9386 	struct cvmx_gmxx_tx_hg2_reg2_s        cn63xxp1;
9387 	struct cvmx_gmxx_tx_hg2_reg2_s        cn66xx;
9388 	struct cvmx_gmxx_tx_hg2_reg2_s        cn68xx;
9389 	struct cvmx_gmxx_tx_hg2_reg2_s        cn68xxp1;
9390 	struct cvmx_gmxx_tx_hg2_reg2_s        cnf71xx;
9391 };
9392 typedef union cvmx_gmxx_tx_hg2_reg2 cvmx_gmxx_tx_hg2_reg2_t;
9393 
9394 /**
9395  * cvmx_gmx#_tx_ifg
9396  *
9397  * GMX_TX_IFG = Packet TX Interframe Gap
9398  *
9399  *
9400  * Notes:
9401  * * Programming IFG1 and IFG2.
9402  *
9403  * For 10/100/1000Mbs half-duplex systems that require IEEE 802.3
9404  * compatibility, IFG1 must be in the range of 1-8, IFG2 must be in the range
9405  * of 4-12, and the IFG1+IFG2 sum must be 12.
9406  *
9407  * For 10/100/1000Mbs full-duplex systems that require IEEE 802.3
9408  * compatibility, IFG1 must be in the range of 1-11, IFG2 must be in the range
9409  * of 1-11, and the IFG1+IFG2 sum must be 12.
9410  *
9411  * For XAUI/10Gbs systems that require IEEE 802.3 compatibility, the
9412  * IFG1+IFG2 sum must be 12.  IFG1[1:0] and IFG2[1:0] must be zero.
9413  *
9414  * For all other systems, IFG1 and IFG2 can be any value in the range of
9415  * 1-15.  Allowing for a total possible IFG sum of 2-30.
9416  */
9417 union cvmx_gmxx_tx_ifg {
9418 	uint64_t u64;
9419 	struct cvmx_gmxx_tx_ifg_s {
9420 #ifdef __BIG_ENDIAN_BITFIELD
9421 	uint64_t reserved_8_63                : 56;
9422 	uint64_t ifg2                         : 4;  /**< 1/3 of the interframe gap timing (in IFG2*8 bits)
9423                                                          If CRS is detected during IFG2, then the
9424                                                          interFrameSpacing timer is not reset and a frame
9425                                                          is transmited once the timer expires. */
9426 	uint64_t ifg1                         : 4;  /**< 2/3 of the interframe gap timing (in IFG1*8 bits)
9427                                                          If CRS is detected during IFG1, then the
9428                                                          interFrameSpacing timer is reset and a frame is
9429                                                          not transmited. */
9430 #else
9431 	uint64_t ifg1                         : 4;
9432 	uint64_t ifg2                         : 4;
9433 	uint64_t reserved_8_63                : 56;
9434 #endif
9435 	} s;
9436 	struct cvmx_gmxx_tx_ifg_s             cn30xx;
9437 	struct cvmx_gmxx_tx_ifg_s             cn31xx;
9438 	struct cvmx_gmxx_tx_ifg_s             cn38xx;
9439 	struct cvmx_gmxx_tx_ifg_s             cn38xxp2;
9440 	struct cvmx_gmxx_tx_ifg_s             cn50xx;
9441 	struct cvmx_gmxx_tx_ifg_s             cn52xx;
9442 	struct cvmx_gmxx_tx_ifg_s             cn52xxp1;
9443 	struct cvmx_gmxx_tx_ifg_s             cn56xx;
9444 	struct cvmx_gmxx_tx_ifg_s             cn56xxp1;
9445 	struct cvmx_gmxx_tx_ifg_s             cn58xx;
9446 	struct cvmx_gmxx_tx_ifg_s             cn58xxp1;
9447 	struct cvmx_gmxx_tx_ifg_s             cn61xx;
9448 	struct cvmx_gmxx_tx_ifg_s             cn63xx;
9449 	struct cvmx_gmxx_tx_ifg_s             cn63xxp1;
9450 	struct cvmx_gmxx_tx_ifg_s             cn66xx;
9451 	struct cvmx_gmxx_tx_ifg_s             cn68xx;
9452 	struct cvmx_gmxx_tx_ifg_s             cn68xxp1;
9453 	struct cvmx_gmxx_tx_ifg_s             cnf71xx;
9454 };
9455 typedef union cvmx_gmxx_tx_ifg cvmx_gmxx_tx_ifg_t;
9456 
9457 /**
9458  * cvmx_gmx#_tx_int_en
9459  *
9460  * GMX_TX_INT_EN = Interrupt Enable
9461  *
9462  *
9463  * Notes:
9464  * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used.
9465  *
9466  */
9467 union cvmx_gmxx_tx_int_en {
9468 	uint64_t u64;
9469 	struct cvmx_gmxx_tx_int_en_s {
9470 #ifdef __BIG_ENDIAN_BITFIELD
9471 	uint64_t reserved_25_63               : 39;
9472 	uint64_t xchange                      : 1;  /**< XAUI link status changed - this denotes a change
9473                                                          to GMX_RX_XAUI_CTL[STATUS]
9474                                                          (XAUI mode only) */
9475 	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
9476                                                          sent due to XSCOL */
9477 	uint64_t late_col                     : 4;  /**< TX Late Collision
9478                                                          (SGMII/1000Base-X half-duplex only) */
9479 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
9480                                                          (SGMII/1000Base-X half-duplex only) */
9481 	uint64_t xscol                        : 4;  /**< TX Excessive collisions
9482                                                          (SGMII/1000Base-X half-duplex only) */
9483 	uint64_t reserved_6_7                 : 2;
9484 	uint64_t undflw                       : 4;  /**< TX Underflow */
9485 	uint64_t reserved_1_1                 : 1;
9486 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9487 #else
9488 	uint64_t pko_nxa                      : 1;
9489 	uint64_t reserved_1_1                 : 1;
9490 	uint64_t undflw                       : 4;
9491 	uint64_t reserved_6_7                 : 2;
9492 	uint64_t xscol                        : 4;
9493 	uint64_t xsdef                        : 4;
9494 	uint64_t late_col                     : 4;
9495 	uint64_t ptp_lost                     : 4;
9496 	uint64_t xchange                      : 1;
9497 	uint64_t reserved_25_63               : 39;
9498 #endif
9499 	} s;
9500 	struct cvmx_gmxx_tx_int_en_cn30xx {
9501 #ifdef __BIG_ENDIAN_BITFIELD
9502 	uint64_t reserved_19_63               : 45;
9503 	uint64_t late_col                     : 3;  /**< TX Late Collision */
9504 	uint64_t reserved_15_15               : 1;
9505 	uint64_t xsdef                        : 3;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
9506 	uint64_t reserved_11_11               : 1;
9507 	uint64_t xscol                        : 3;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
9508 	uint64_t reserved_5_7                 : 3;
9509 	uint64_t undflw                       : 3;  /**< TX Underflow (RGMII mode only) */
9510 	uint64_t reserved_1_1                 : 1;
9511 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9512 #else
9513 	uint64_t pko_nxa                      : 1;
9514 	uint64_t reserved_1_1                 : 1;
9515 	uint64_t undflw                       : 3;
9516 	uint64_t reserved_5_7                 : 3;
9517 	uint64_t xscol                        : 3;
9518 	uint64_t reserved_11_11               : 1;
9519 	uint64_t xsdef                        : 3;
9520 	uint64_t reserved_15_15               : 1;
9521 	uint64_t late_col                     : 3;
9522 	uint64_t reserved_19_63               : 45;
9523 #endif
9524 	} cn30xx;
9525 	struct cvmx_gmxx_tx_int_en_cn31xx {
9526 #ifdef __BIG_ENDIAN_BITFIELD
9527 	uint64_t reserved_15_63               : 49;
9528 	uint64_t xsdef                        : 3;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
9529 	uint64_t reserved_11_11               : 1;
9530 	uint64_t xscol                        : 3;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
9531 	uint64_t reserved_5_7                 : 3;
9532 	uint64_t undflw                       : 3;  /**< TX Underflow (RGMII mode only) */
9533 	uint64_t reserved_1_1                 : 1;
9534 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9535 #else
9536 	uint64_t pko_nxa                      : 1;
9537 	uint64_t reserved_1_1                 : 1;
9538 	uint64_t undflw                       : 3;
9539 	uint64_t reserved_5_7                 : 3;
9540 	uint64_t xscol                        : 3;
9541 	uint64_t reserved_11_11               : 1;
9542 	uint64_t xsdef                        : 3;
9543 	uint64_t reserved_15_63               : 49;
9544 #endif
9545 	} cn31xx;
9546 	struct cvmx_gmxx_tx_int_en_cn38xx {
9547 #ifdef __BIG_ENDIAN_BITFIELD
9548 	uint64_t reserved_20_63               : 44;
9549 	uint64_t late_col                     : 4;  /**< TX Late Collision
9550                                                          (PASS3 only) */
9551 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
9552 	uint64_t xscol                        : 4;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
9553 	uint64_t reserved_6_7                 : 2;
9554 	uint64_t undflw                       : 4;  /**< TX Underflow (RGMII mode only) */
9555 	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
9556 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9557 #else
9558 	uint64_t pko_nxa                      : 1;
9559 	uint64_t ncb_nxa                      : 1;
9560 	uint64_t undflw                       : 4;
9561 	uint64_t reserved_6_7                 : 2;
9562 	uint64_t xscol                        : 4;
9563 	uint64_t xsdef                        : 4;
9564 	uint64_t late_col                     : 4;
9565 	uint64_t reserved_20_63               : 44;
9566 #endif
9567 	} cn38xx;
9568 	struct cvmx_gmxx_tx_int_en_cn38xxp2 {
9569 #ifdef __BIG_ENDIAN_BITFIELD
9570 	uint64_t reserved_16_63               : 48;
9571 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
9572 	uint64_t xscol                        : 4;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
9573 	uint64_t reserved_6_7                 : 2;
9574 	uint64_t undflw                       : 4;  /**< TX Underflow (RGMII mode only) */
9575 	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
9576 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9577 #else
9578 	uint64_t pko_nxa                      : 1;
9579 	uint64_t ncb_nxa                      : 1;
9580 	uint64_t undflw                       : 4;
9581 	uint64_t reserved_6_7                 : 2;
9582 	uint64_t xscol                        : 4;
9583 	uint64_t xsdef                        : 4;
9584 	uint64_t reserved_16_63               : 48;
9585 #endif
9586 	} cn38xxp2;
9587 	struct cvmx_gmxx_tx_int_en_cn30xx     cn50xx;
9588 	struct cvmx_gmxx_tx_int_en_cn52xx {
9589 #ifdef __BIG_ENDIAN_BITFIELD
9590 	uint64_t reserved_20_63               : 44;
9591 	uint64_t late_col                     : 4;  /**< TX Late Collision
9592                                                          (SGMII/1000Base-X half-duplex only) */
9593 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
9594                                                          (SGMII/1000Base-X half-duplex only) */
9595 	uint64_t xscol                        : 4;  /**< TX Excessive collisions
9596                                                          (SGMII/1000Base-X half-duplex only) */
9597 	uint64_t reserved_6_7                 : 2;
9598 	uint64_t undflw                       : 4;  /**< TX Underflow */
9599 	uint64_t reserved_1_1                 : 1;
9600 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9601 #else
9602 	uint64_t pko_nxa                      : 1;
9603 	uint64_t reserved_1_1                 : 1;
9604 	uint64_t undflw                       : 4;
9605 	uint64_t reserved_6_7                 : 2;
9606 	uint64_t xscol                        : 4;
9607 	uint64_t xsdef                        : 4;
9608 	uint64_t late_col                     : 4;
9609 	uint64_t reserved_20_63               : 44;
9610 #endif
9611 	} cn52xx;
9612 	struct cvmx_gmxx_tx_int_en_cn52xx     cn52xxp1;
9613 	struct cvmx_gmxx_tx_int_en_cn52xx     cn56xx;
9614 	struct cvmx_gmxx_tx_int_en_cn52xx     cn56xxp1;
9615 	struct cvmx_gmxx_tx_int_en_cn38xx     cn58xx;
9616 	struct cvmx_gmxx_tx_int_en_cn38xx     cn58xxp1;
9617 	struct cvmx_gmxx_tx_int_en_s          cn61xx;
9618 	struct cvmx_gmxx_tx_int_en_cn63xx {
9619 #ifdef __BIG_ENDIAN_BITFIELD
9620 	uint64_t reserved_24_63               : 40;
9621 	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
9622                                                          sent due to XSCOL */
9623 	uint64_t late_col                     : 4;  /**< TX Late Collision
9624                                                          (SGMII/1000Base-X half-duplex only) */
9625 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
9626                                                          (SGMII/1000Base-X half-duplex only) */
9627 	uint64_t xscol                        : 4;  /**< TX Excessive collisions
9628                                                          (SGMII/1000Base-X half-duplex only) */
9629 	uint64_t reserved_6_7                 : 2;
9630 	uint64_t undflw                       : 4;  /**< TX Underflow */
9631 	uint64_t reserved_1_1                 : 1;
9632 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9633 #else
9634 	uint64_t pko_nxa                      : 1;
9635 	uint64_t reserved_1_1                 : 1;
9636 	uint64_t undflw                       : 4;
9637 	uint64_t reserved_6_7                 : 2;
9638 	uint64_t xscol                        : 4;
9639 	uint64_t xsdef                        : 4;
9640 	uint64_t late_col                     : 4;
9641 	uint64_t ptp_lost                     : 4;
9642 	uint64_t reserved_24_63               : 40;
9643 #endif
9644 	} cn63xx;
9645 	struct cvmx_gmxx_tx_int_en_cn63xx     cn63xxp1;
9646 	struct cvmx_gmxx_tx_int_en_s          cn66xx;
9647 	struct cvmx_gmxx_tx_int_en_cn68xx {
9648 #ifdef __BIG_ENDIAN_BITFIELD
9649 	uint64_t reserved_25_63               : 39;
9650 	uint64_t xchange                      : 1;  /**< XAUI/RXAUI link status changed - this denotes a
9651                                                          change to GMX_RX_XAUI_CTL[STATUS]
9652                                                          (XAUI/RXAUI mode only) */
9653 	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
9654                                                          sent due to XSCOL */
9655 	uint64_t late_col                     : 4;  /**< TX Late Collision
9656                                                          (SGMII/1000Base-X half-duplex only) */
9657 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
9658                                                          (SGMII/1000Base-X half-duplex only) */
9659 	uint64_t xscol                        : 4;  /**< TX Excessive collisions
9660                                                          (SGMII/1000Base-X half-duplex only) */
9661 	uint64_t reserved_6_7                 : 2;
9662 	uint64_t undflw                       : 4;  /**< TX Underflow */
9663 	uint64_t pko_nxp                      : 1;  /**< Port pipe out-of-range from PKO Interface */
9664 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9665 #else
9666 	uint64_t pko_nxa                      : 1;
9667 	uint64_t pko_nxp                      : 1;
9668 	uint64_t undflw                       : 4;
9669 	uint64_t reserved_6_7                 : 2;
9670 	uint64_t xscol                        : 4;
9671 	uint64_t xsdef                        : 4;
9672 	uint64_t late_col                     : 4;
9673 	uint64_t ptp_lost                     : 4;
9674 	uint64_t xchange                      : 1;
9675 	uint64_t reserved_25_63               : 39;
9676 #endif
9677 	} cn68xx;
9678 	struct cvmx_gmxx_tx_int_en_cn68xx     cn68xxp1;
9679 	struct cvmx_gmxx_tx_int_en_cnf71xx {
9680 #ifdef __BIG_ENDIAN_BITFIELD
9681 	uint64_t reserved_25_63               : 39;
9682 	uint64_t xchange                      : 1;  /**< XAUI link status changed - this denotes a change
9683                                                          to GMX_RX_XAUI_CTL[STATUS]
9684                                                          (XAUI mode only) */
9685 	uint64_t reserved_22_23               : 2;
9686 	uint64_t ptp_lost                     : 2;  /**< A packet with a PTP request was not able to be
9687                                                          sent due to XSCOL */
9688 	uint64_t reserved_18_19               : 2;
9689 	uint64_t late_col                     : 2;  /**< TX Late Collision
9690                                                          (SGMII/1000Base-X half-duplex only) */
9691 	uint64_t reserved_14_15               : 2;
9692 	uint64_t xsdef                        : 2;  /**< TX Excessive deferral
9693                                                          (SGMII/1000Base-X half-duplex only) */
9694 	uint64_t reserved_10_11               : 2;
9695 	uint64_t xscol                        : 2;  /**< TX Excessive collisions
9696                                                          (SGMII/1000Base-X half-duplex only) */
9697 	uint64_t reserved_4_7                 : 4;
9698 	uint64_t undflw                       : 2;  /**< TX Underflow */
9699 	uint64_t reserved_1_1                 : 1;
9700 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9701 #else
9702 	uint64_t pko_nxa                      : 1;
9703 	uint64_t reserved_1_1                 : 1;
9704 	uint64_t undflw                       : 2;
9705 	uint64_t reserved_4_7                 : 4;
9706 	uint64_t xscol                        : 2;
9707 	uint64_t reserved_10_11               : 2;
9708 	uint64_t xsdef                        : 2;
9709 	uint64_t reserved_14_15               : 2;
9710 	uint64_t late_col                     : 2;
9711 	uint64_t reserved_18_19               : 2;
9712 	uint64_t ptp_lost                     : 2;
9713 	uint64_t reserved_22_23               : 2;
9714 	uint64_t xchange                      : 1;
9715 	uint64_t reserved_25_63               : 39;
9716 #endif
9717 	} cnf71xx;
9718 };
9719 typedef union cvmx_gmxx_tx_int_en cvmx_gmxx_tx_int_en_t;
9720 
9721 /**
9722  * cvmx_gmx#_tx_int_reg
9723  *
9724  * GMX_TX_INT_REG = Interrupt Register
9725  *
9726  *
9727  * Notes:
9728  * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used.
9729  *
9730  */
9731 union cvmx_gmxx_tx_int_reg {
9732 	uint64_t u64;
9733 	struct cvmx_gmxx_tx_int_reg_s {
9734 #ifdef __BIG_ENDIAN_BITFIELD
9735 	uint64_t reserved_25_63               : 39;
9736 	uint64_t xchange                      : 1;  /**< XAUI link status changed - this denotes a change
9737                                                          to GMX_RX_XAUI_CTL[STATUS]
9738                                                          (XAUI mode only) */
9739 	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
9740                                                          sent due to XSCOL */
9741 	uint64_t late_col                     : 4;  /**< TX Late Collision
9742                                                          (SGMII/1000Base-X half-duplex only) */
9743 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
9744                                                          (SGMII/1000Base-X half-duplex only) */
9745 	uint64_t xscol                        : 4;  /**< TX Excessive collisions
9746                                                          (SGMII/1000Base-X half-duplex only) */
9747 	uint64_t reserved_6_7                 : 2;
9748 	uint64_t undflw                       : 4;  /**< TX Underflow */
9749 	uint64_t reserved_1_1                 : 1;
9750 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9751 #else
9752 	uint64_t pko_nxa                      : 1;
9753 	uint64_t reserved_1_1                 : 1;
9754 	uint64_t undflw                       : 4;
9755 	uint64_t reserved_6_7                 : 2;
9756 	uint64_t xscol                        : 4;
9757 	uint64_t xsdef                        : 4;
9758 	uint64_t late_col                     : 4;
9759 	uint64_t ptp_lost                     : 4;
9760 	uint64_t xchange                      : 1;
9761 	uint64_t reserved_25_63               : 39;
9762 #endif
9763 	} s;
9764 	struct cvmx_gmxx_tx_int_reg_cn30xx {
9765 #ifdef __BIG_ENDIAN_BITFIELD
9766 	uint64_t reserved_19_63               : 45;
9767 	uint64_t late_col                     : 3;  /**< TX Late Collision */
9768 	uint64_t reserved_15_15               : 1;
9769 	uint64_t xsdef                        : 3;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
9770 	uint64_t reserved_11_11               : 1;
9771 	uint64_t xscol                        : 3;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
9772 	uint64_t reserved_5_7                 : 3;
9773 	uint64_t undflw                       : 3;  /**< TX Underflow (RGMII mode only) */
9774 	uint64_t reserved_1_1                 : 1;
9775 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9776 #else
9777 	uint64_t pko_nxa                      : 1;
9778 	uint64_t reserved_1_1                 : 1;
9779 	uint64_t undflw                       : 3;
9780 	uint64_t reserved_5_7                 : 3;
9781 	uint64_t xscol                        : 3;
9782 	uint64_t reserved_11_11               : 1;
9783 	uint64_t xsdef                        : 3;
9784 	uint64_t reserved_15_15               : 1;
9785 	uint64_t late_col                     : 3;
9786 	uint64_t reserved_19_63               : 45;
9787 #endif
9788 	} cn30xx;
9789 	struct cvmx_gmxx_tx_int_reg_cn31xx {
9790 #ifdef __BIG_ENDIAN_BITFIELD
9791 	uint64_t reserved_15_63               : 49;
9792 	uint64_t xsdef                        : 3;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
9793 	uint64_t reserved_11_11               : 1;
9794 	uint64_t xscol                        : 3;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
9795 	uint64_t reserved_5_7                 : 3;
9796 	uint64_t undflw                       : 3;  /**< TX Underflow (RGMII mode only) */
9797 	uint64_t reserved_1_1                 : 1;
9798 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9799 #else
9800 	uint64_t pko_nxa                      : 1;
9801 	uint64_t reserved_1_1                 : 1;
9802 	uint64_t undflw                       : 3;
9803 	uint64_t reserved_5_7                 : 3;
9804 	uint64_t xscol                        : 3;
9805 	uint64_t reserved_11_11               : 1;
9806 	uint64_t xsdef                        : 3;
9807 	uint64_t reserved_15_63               : 49;
9808 #endif
9809 	} cn31xx;
9810 	struct cvmx_gmxx_tx_int_reg_cn38xx {
9811 #ifdef __BIG_ENDIAN_BITFIELD
9812 	uint64_t reserved_20_63               : 44;
9813 	uint64_t late_col                     : 4;  /**< TX Late Collision
9814                                                          (PASS3 only) */
9815 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
9816 	uint64_t xscol                        : 4;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
9817 	uint64_t reserved_6_7                 : 2;
9818 	uint64_t undflw                       : 4;  /**< TX Underflow (RGMII mode only) */
9819 	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
9820 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9821 #else
9822 	uint64_t pko_nxa                      : 1;
9823 	uint64_t ncb_nxa                      : 1;
9824 	uint64_t undflw                       : 4;
9825 	uint64_t reserved_6_7                 : 2;
9826 	uint64_t xscol                        : 4;
9827 	uint64_t xsdef                        : 4;
9828 	uint64_t late_col                     : 4;
9829 	uint64_t reserved_20_63               : 44;
9830 #endif
9831 	} cn38xx;
9832 	struct cvmx_gmxx_tx_int_reg_cn38xxp2 {
9833 #ifdef __BIG_ENDIAN_BITFIELD
9834 	uint64_t reserved_16_63               : 48;
9835 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral (RGMII/halfdup mode only) */
9836 	uint64_t xscol                        : 4;  /**< TX Excessive collisions (RGMII/halfdup mode only) */
9837 	uint64_t reserved_6_7                 : 2;
9838 	uint64_t undflw                       : 4;  /**< TX Underflow (RGMII mode only) */
9839 	uint64_t ncb_nxa                      : 1;  /**< Port address out-of-range from NCB Interface */
9840 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9841 #else
9842 	uint64_t pko_nxa                      : 1;
9843 	uint64_t ncb_nxa                      : 1;
9844 	uint64_t undflw                       : 4;
9845 	uint64_t reserved_6_7                 : 2;
9846 	uint64_t xscol                        : 4;
9847 	uint64_t xsdef                        : 4;
9848 	uint64_t reserved_16_63               : 48;
9849 #endif
9850 	} cn38xxp2;
9851 	struct cvmx_gmxx_tx_int_reg_cn30xx    cn50xx;
9852 	struct cvmx_gmxx_tx_int_reg_cn52xx {
9853 #ifdef __BIG_ENDIAN_BITFIELD
9854 	uint64_t reserved_20_63               : 44;
9855 	uint64_t late_col                     : 4;  /**< TX Late Collision
9856                                                          (SGMII/1000Base-X half-duplex only) */
9857 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
9858                                                          (SGMII/1000Base-X half-duplex only) */
9859 	uint64_t xscol                        : 4;  /**< TX Excessive collisions
9860                                                          (SGMII/1000Base-X half-duplex only) */
9861 	uint64_t reserved_6_7                 : 2;
9862 	uint64_t undflw                       : 4;  /**< TX Underflow */
9863 	uint64_t reserved_1_1                 : 1;
9864 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9865 #else
9866 	uint64_t pko_nxa                      : 1;
9867 	uint64_t reserved_1_1                 : 1;
9868 	uint64_t undflw                       : 4;
9869 	uint64_t reserved_6_7                 : 2;
9870 	uint64_t xscol                        : 4;
9871 	uint64_t xsdef                        : 4;
9872 	uint64_t late_col                     : 4;
9873 	uint64_t reserved_20_63               : 44;
9874 #endif
9875 	} cn52xx;
9876 	struct cvmx_gmxx_tx_int_reg_cn52xx    cn52xxp1;
9877 	struct cvmx_gmxx_tx_int_reg_cn52xx    cn56xx;
9878 	struct cvmx_gmxx_tx_int_reg_cn52xx    cn56xxp1;
9879 	struct cvmx_gmxx_tx_int_reg_cn38xx    cn58xx;
9880 	struct cvmx_gmxx_tx_int_reg_cn38xx    cn58xxp1;
9881 	struct cvmx_gmxx_tx_int_reg_s         cn61xx;
9882 	struct cvmx_gmxx_tx_int_reg_cn63xx {
9883 #ifdef __BIG_ENDIAN_BITFIELD
9884 	uint64_t reserved_24_63               : 40;
9885 	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
9886                                                          sent due to XSCOL */
9887 	uint64_t late_col                     : 4;  /**< TX Late Collision
9888                                                          (SGMII/1000Base-X half-duplex only) */
9889 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
9890                                                          (SGMII/1000Base-X half-duplex only) */
9891 	uint64_t xscol                        : 4;  /**< TX Excessive collisions
9892                                                          (SGMII/1000Base-X half-duplex only) */
9893 	uint64_t reserved_6_7                 : 2;
9894 	uint64_t undflw                       : 4;  /**< TX Underflow */
9895 	uint64_t reserved_1_1                 : 1;
9896 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9897 #else
9898 	uint64_t pko_nxa                      : 1;
9899 	uint64_t reserved_1_1                 : 1;
9900 	uint64_t undflw                       : 4;
9901 	uint64_t reserved_6_7                 : 2;
9902 	uint64_t xscol                        : 4;
9903 	uint64_t xsdef                        : 4;
9904 	uint64_t late_col                     : 4;
9905 	uint64_t ptp_lost                     : 4;
9906 	uint64_t reserved_24_63               : 40;
9907 #endif
9908 	} cn63xx;
9909 	struct cvmx_gmxx_tx_int_reg_cn63xx    cn63xxp1;
9910 	struct cvmx_gmxx_tx_int_reg_s         cn66xx;
9911 	struct cvmx_gmxx_tx_int_reg_cn68xx {
9912 #ifdef __BIG_ENDIAN_BITFIELD
9913 	uint64_t reserved_25_63               : 39;
9914 	uint64_t xchange                      : 1;  /**< XAUI/RXAUI link status changed - this denotes ae
9915                                                          change to GMX_RX_XAUI_CTL[STATUS]
9916                                                          (XAUI/RXAUI mode only) */
9917 	uint64_t ptp_lost                     : 4;  /**< A packet with a PTP request was not able to be
9918                                                          sent due to XSCOL */
9919 	uint64_t late_col                     : 4;  /**< TX Late Collision
9920                                                          (SGMII/1000Base-X half-duplex only) */
9921 	uint64_t xsdef                        : 4;  /**< TX Excessive deferral
9922                                                          (SGMII/1000Base-X half-duplex only) */
9923 	uint64_t xscol                        : 4;  /**< TX Excessive collisions
9924                                                          (SGMII/1000Base-X half-duplex only) */
9925 	uint64_t reserved_6_7                 : 2;
9926 	uint64_t undflw                       : 4;  /**< TX Underflow */
9927 	uint64_t pko_nxp                      : 1;  /**< Port pipe out-of-range from PKO Interface */
9928 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9929 #else
9930 	uint64_t pko_nxa                      : 1;
9931 	uint64_t pko_nxp                      : 1;
9932 	uint64_t undflw                       : 4;
9933 	uint64_t reserved_6_7                 : 2;
9934 	uint64_t xscol                        : 4;
9935 	uint64_t xsdef                        : 4;
9936 	uint64_t late_col                     : 4;
9937 	uint64_t ptp_lost                     : 4;
9938 	uint64_t xchange                      : 1;
9939 	uint64_t reserved_25_63               : 39;
9940 #endif
9941 	} cn68xx;
9942 	struct cvmx_gmxx_tx_int_reg_cn68xx    cn68xxp1;
9943 	struct cvmx_gmxx_tx_int_reg_cnf71xx {
9944 #ifdef __BIG_ENDIAN_BITFIELD
9945 	uint64_t reserved_25_63               : 39;
9946 	uint64_t xchange                      : 1;  /**< XAUI link status changed - this denotes a change
9947                                                          to GMX_RX_XAUI_CTL[STATUS]
9948                                                          (XAUI mode only) */
9949 	uint64_t reserved_22_23               : 2;
9950 	uint64_t ptp_lost                     : 2;  /**< A packet with a PTP request was not able to be
9951                                                          sent due to XSCOL */
9952 	uint64_t reserved_18_19               : 2;
9953 	uint64_t late_col                     : 2;  /**< TX Late Collision
9954                                                          (SGMII/1000Base-X half-duplex only) */
9955 	uint64_t reserved_14_15               : 2;
9956 	uint64_t xsdef                        : 2;  /**< TX Excessive deferral
9957                                                          (SGMII/1000Base-X half-duplex only) */
9958 	uint64_t reserved_10_11               : 2;
9959 	uint64_t xscol                        : 2;  /**< TX Excessive collisions
9960                                                          (SGMII/1000Base-X half-duplex only) */
9961 	uint64_t reserved_4_7                 : 4;
9962 	uint64_t undflw                       : 2;  /**< TX Underflow */
9963 	uint64_t reserved_1_1                 : 1;
9964 	uint64_t pko_nxa                      : 1;  /**< Port address out-of-range from PKO Interface */
9965 #else
9966 	uint64_t pko_nxa                      : 1;
9967 	uint64_t reserved_1_1                 : 1;
9968 	uint64_t undflw                       : 2;
9969 	uint64_t reserved_4_7                 : 4;
9970 	uint64_t xscol                        : 2;
9971 	uint64_t reserved_10_11               : 2;
9972 	uint64_t xsdef                        : 2;
9973 	uint64_t reserved_14_15               : 2;
9974 	uint64_t late_col                     : 2;
9975 	uint64_t reserved_18_19               : 2;
9976 	uint64_t ptp_lost                     : 2;
9977 	uint64_t reserved_22_23               : 2;
9978 	uint64_t xchange                      : 1;
9979 	uint64_t reserved_25_63               : 39;
9980 #endif
9981 	} cnf71xx;
9982 };
9983 typedef union cvmx_gmxx_tx_int_reg cvmx_gmxx_tx_int_reg_t;
9984 
9985 /**
9986  * cvmx_gmx#_tx_jam
9987  *
9988  * GMX_TX_JAM = Packet TX Jam Pattern
9989  *
9990  */
9991 union cvmx_gmxx_tx_jam {
9992 	uint64_t u64;
9993 	struct cvmx_gmxx_tx_jam_s {
9994 #ifdef __BIG_ENDIAN_BITFIELD
9995 	uint64_t reserved_8_63                : 56;
9996 	uint64_t jam                          : 8;  /**< Jam pattern
9997                                                          (SGMII/1000Base-X half-duplex only) */
9998 #else
9999 	uint64_t jam                          : 8;
10000 	uint64_t reserved_8_63                : 56;
10001 #endif
10002 	} s;
10003 	struct cvmx_gmxx_tx_jam_s             cn30xx;
10004 	struct cvmx_gmxx_tx_jam_s             cn31xx;
10005 	struct cvmx_gmxx_tx_jam_s             cn38xx;
10006 	struct cvmx_gmxx_tx_jam_s             cn38xxp2;
10007 	struct cvmx_gmxx_tx_jam_s             cn50xx;
10008 	struct cvmx_gmxx_tx_jam_s             cn52xx;
10009 	struct cvmx_gmxx_tx_jam_s             cn52xxp1;
10010 	struct cvmx_gmxx_tx_jam_s             cn56xx;
10011 	struct cvmx_gmxx_tx_jam_s             cn56xxp1;
10012 	struct cvmx_gmxx_tx_jam_s             cn58xx;
10013 	struct cvmx_gmxx_tx_jam_s             cn58xxp1;
10014 	struct cvmx_gmxx_tx_jam_s             cn61xx;
10015 	struct cvmx_gmxx_tx_jam_s             cn63xx;
10016 	struct cvmx_gmxx_tx_jam_s             cn63xxp1;
10017 	struct cvmx_gmxx_tx_jam_s             cn66xx;
10018 	struct cvmx_gmxx_tx_jam_s             cn68xx;
10019 	struct cvmx_gmxx_tx_jam_s             cn68xxp1;
10020 	struct cvmx_gmxx_tx_jam_s             cnf71xx;
10021 };
10022 typedef union cvmx_gmxx_tx_jam cvmx_gmxx_tx_jam_t;
10023 
10024 /**
10025  * cvmx_gmx#_tx_lfsr
10026  *
10027  * GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
10028  *
10029  */
10030 union cvmx_gmxx_tx_lfsr {
10031 	uint64_t u64;
10032 	struct cvmx_gmxx_tx_lfsr_s {
10033 #ifdef __BIG_ENDIAN_BITFIELD
10034 	uint64_t reserved_16_63               : 48;
10035 	uint64_t lfsr                         : 16; /**< The current state of the LFSR used to feed random
10036                                                          numbers to compute truncated binary exponential
10037                                                          backoff.
10038                                                          (SGMII/1000Base-X half-duplex only) */
10039 #else
10040 	uint64_t lfsr                         : 16;
10041 	uint64_t reserved_16_63               : 48;
10042 #endif
10043 	} s;
10044 	struct cvmx_gmxx_tx_lfsr_s            cn30xx;
10045 	struct cvmx_gmxx_tx_lfsr_s            cn31xx;
10046 	struct cvmx_gmxx_tx_lfsr_s            cn38xx;
10047 	struct cvmx_gmxx_tx_lfsr_s            cn38xxp2;
10048 	struct cvmx_gmxx_tx_lfsr_s            cn50xx;
10049 	struct cvmx_gmxx_tx_lfsr_s            cn52xx;
10050 	struct cvmx_gmxx_tx_lfsr_s            cn52xxp1;
10051 	struct cvmx_gmxx_tx_lfsr_s            cn56xx;
10052 	struct cvmx_gmxx_tx_lfsr_s            cn56xxp1;
10053 	struct cvmx_gmxx_tx_lfsr_s            cn58xx;
10054 	struct cvmx_gmxx_tx_lfsr_s            cn58xxp1;
10055 	struct cvmx_gmxx_tx_lfsr_s            cn61xx;
10056 	struct cvmx_gmxx_tx_lfsr_s            cn63xx;
10057 	struct cvmx_gmxx_tx_lfsr_s            cn63xxp1;
10058 	struct cvmx_gmxx_tx_lfsr_s            cn66xx;
10059 	struct cvmx_gmxx_tx_lfsr_s            cn68xx;
10060 	struct cvmx_gmxx_tx_lfsr_s            cn68xxp1;
10061 	struct cvmx_gmxx_tx_lfsr_s            cnf71xx;
10062 };
10063 typedef union cvmx_gmxx_tx_lfsr cvmx_gmxx_tx_lfsr_t;
10064 
10065 /**
10066  * cvmx_gmx#_tx_ovr_bp
10067  *
10068  * GMX_TX_OVR_BP = Packet Interface TX Override BackPressure
10069  *
10070  *
10071  * Notes:
10072  * In XAUI mode, only the lsb (corresponding to port0) of EN, BP, and IGN_FULL are used.
10073  *
10074  * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
10075  * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol
10076  * when GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by
10077  * GMX*_TX_XAUI_CTL[HG_EN]=1 and GMX*_RX0_UDD_SKP[LEN]=16.) HW can only auto-generate backpressure
10078  * through HiGig2 messages (optionally, when GMX*_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2
10079  * protocol.
10080  */
10081 union cvmx_gmxx_tx_ovr_bp {
10082 	uint64_t u64;
10083 	struct cvmx_gmxx_tx_ovr_bp_s {
10084 #ifdef __BIG_ENDIAN_BITFIELD
10085 	uint64_t reserved_48_63               : 16;
10086 	uint64_t tx_prt_bp                    : 16; /**< Per port BP sent to PKO
10087                                                          0=Port is available
10088                                                          1=Port should be back pressured
10089                                                          TX_PRT_BP should not be set until
10090                                                          GMX_INF_MODE[EN] has been enabled */
10091 	uint64_t reserved_12_31               : 20;
10092 	uint64_t en                           : 4;  /**< Per port Enable back pressure override */
10093 	uint64_t bp                           : 4;  /**< Per port BackPressure status to use
10094                                                          0=Port is available
10095                                                          1=Port should be back pressured */
10096 	uint64_t ign_full                     : 4;  /**< Ignore the RX FIFO full when computing BP */
10097 #else
10098 	uint64_t ign_full                     : 4;
10099 	uint64_t bp                           : 4;
10100 	uint64_t en                           : 4;
10101 	uint64_t reserved_12_31               : 20;
10102 	uint64_t tx_prt_bp                    : 16;
10103 	uint64_t reserved_48_63               : 16;
10104 #endif
10105 	} s;
10106 	struct cvmx_gmxx_tx_ovr_bp_cn30xx {
10107 #ifdef __BIG_ENDIAN_BITFIELD
10108 	uint64_t reserved_11_63               : 53;
10109 	uint64_t en                           : 3;  /**< Per port Enable back pressure override */
10110 	uint64_t reserved_7_7                 : 1;
10111 	uint64_t bp                           : 3;  /**< Per port BackPressure status to use
10112                                                          0=Port is available
10113                                                          1=Port should be back pressured */
10114 	uint64_t reserved_3_3                 : 1;
10115 	uint64_t ign_full                     : 3;  /**< Ignore the RX FIFO full when computing BP */
10116 #else
10117 	uint64_t ign_full                     : 3;
10118 	uint64_t reserved_3_3                 : 1;
10119 	uint64_t bp                           : 3;
10120 	uint64_t reserved_7_7                 : 1;
10121 	uint64_t en                           : 3;
10122 	uint64_t reserved_11_63               : 53;
10123 #endif
10124 	} cn30xx;
10125 	struct cvmx_gmxx_tx_ovr_bp_cn30xx     cn31xx;
10126 	struct cvmx_gmxx_tx_ovr_bp_cn38xx {
10127 #ifdef __BIG_ENDIAN_BITFIELD
10128 	uint64_t reserved_12_63               : 52;
10129 	uint64_t en                           : 4;  /**< Per port Enable back pressure override */
10130 	uint64_t bp                           : 4;  /**< Per port BackPressure status to use
10131                                                          0=Port is available
10132                                                          1=Port should be back pressured */
10133 	uint64_t ign_full                     : 4;  /**< Ignore the RX FIFO full when computing BP */
10134 #else
10135 	uint64_t ign_full                     : 4;
10136 	uint64_t bp                           : 4;
10137 	uint64_t en                           : 4;
10138 	uint64_t reserved_12_63               : 52;
10139 #endif
10140 	} cn38xx;
10141 	struct cvmx_gmxx_tx_ovr_bp_cn38xx     cn38xxp2;
10142 	struct cvmx_gmxx_tx_ovr_bp_cn30xx     cn50xx;
10143 	struct cvmx_gmxx_tx_ovr_bp_s          cn52xx;
10144 	struct cvmx_gmxx_tx_ovr_bp_s          cn52xxp1;
10145 	struct cvmx_gmxx_tx_ovr_bp_s          cn56xx;
10146 	struct cvmx_gmxx_tx_ovr_bp_s          cn56xxp1;
10147 	struct cvmx_gmxx_tx_ovr_bp_cn38xx     cn58xx;
10148 	struct cvmx_gmxx_tx_ovr_bp_cn38xx     cn58xxp1;
10149 	struct cvmx_gmxx_tx_ovr_bp_s          cn61xx;
10150 	struct cvmx_gmxx_tx_ovr_bp_s          cn63xx;
10151 	struct cvmx_gmxx_tx_ovr_bp_s          cn63xxp1;
10152 	struct cvmx_gmxx_tx_ovr_bp_s          cn66xx;
10153 	struct cvmx_gmxx_tx_ovr_bp_s          cn68xx;
10154 	struct cvmx_gmxx_tx_ovr_bp_s          cn68xxp1;
10155 	struct cvmx_gmxx_tx_ovr_bp_cnf71xx {
10156 #ifdef __BIG_ENDIAN_BITFIELD
10157 	uint64_t reserved_48_63               : 16;
10158 	uint64_t tx_prt_bp                    : 16; /**< Per port BP sent to PKO
10159                                                          0=Port is available
10160                                                          1=Port should be back pressured
10161                                                          TX_PRT_BP should not be set until
10162                                                          GMX_INF_MODE[EN] has been enabled */
10163 	uint64_t reserved_10_31               : 22;
10164 	uint64_t en                           : 2;  /**< Per port Enable back pressure override */
10165 	uint64_t reserved_6_7                 : 2;
10166 	uint64_t bp                           : 2;  /**< Per port BackPressure status to use
10167                                                          0=Port is available
10168                                                          1=Port should be back pressured */
10169 	uint64_t reserved_2_3                 : 2;
10170 	uint64_t ign_full                     : 2;  /**< Ignore the RX FIFO full when computing BP */
10171 #else
10172 	uint64_t ign_full                     : 2;
10173 	uint64_t reserved_2_3                 : 2;
10174 	uint64_t bp                           : 2;
10175 	uint64_t reserved_6_7                 : 2;
10176 	uint64_t en                           : 2;
10177 	uint64_t reserved_10_31               : 22;
10178 	uint64_t tx_prt_bp                    : 16;
10179 	uint64_t reserved_48_63               : 16;
10180 #endif
10181 	} cnf71xx;
10182 };
10183 typedef union cvmx_gmxx_tx_ovr_bp cvmx_gmxx_tx_ovr_bp_t;
10184 
10185 /**
10186  * cvmx_gmx#_tx_pause_pkt_dmac
10187  *
10188  * GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
10189  *
10190  */
10191 union cvmx_gmxx_tx_pause_pkt_dmac {
10192 	uint64_t u64;
10193 	struct cvmx_gmxx_tx_pause_pkt_dmac_s {
10194 #ifdef __BIG_ENDIAN_BITFIELD
10195 	uint64_t reserved_48_63               : 16;
10196 	uint64_t dmac                         : 48; /**< The DMAC field placed is outbnd pause pkts */
10197 #else
10198 	uint64_t dmac                         : 48;
10199 	uint64_t reserved_48_63               : 16;
10200 #endif
10201 	} s;
10202 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn30xx;
10203 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn31xx;
10204 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn38xx;
10205 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn38xxp2;
10206 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn50xx;
10207 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn52xx;
10208 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn52xxp1;
10209 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn56xx;
10210 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn56xxp1;
10211 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn58xx;
10212 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn58xxp1;
10213 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn61xx;
10214 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn63xx;
10215 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn63xxp1;
10216 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn66xx;
10217 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn68xx;
10218 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cn68xxp1;
10219 	struct cvmx_gmxx_tx_pause_pkt_dmac_s  cnf71xx;
10220 };
10221 typedef union cvmx_gmxx_tx_pause_pkt_dmac cvmx_gmxx_tx_pause_pkt_dmac_t;
10222 
10223 /**
10224  * cvmx_gmx#_tx_pause_pkt_type
10225  *
10226  * GMX_TX_PAUSE_PKT_TYPE = Packet Interface TX Pause Packet TYPE field
10227  *
10228  */
10229 union cvmx_gmxx_tx_pause_pkt_type {
10230 	uint64_t u64;
10231 	struct cvmx_gmxx_tx_pause_pkt_type_s {
10232 #ifdef __BIG_ENDIAN_BITFIELD
10233 	uint64_t reserved_16_63               : 48;
10234 	uint64_t type                         : 16; /**< The TYPE field placed is outbnd pause pkts */
10235 #else
10236 	uint64_t type                         : 16;
10237 	uint64_t reserved_16_63               : 48;
10238 #endif
10239 	} s;
10240 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn30xx;
10241 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn31xx;
10242 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn38xx;
10243 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn38xxp2;
10244 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn50xx;
10245 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn52xx;
10246 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn52xxp1;
10247 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn56xx;
10248 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn56xxp1;
10249 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn58xx;
10250 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn58xxp1;
10251 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn61xx;
10252 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn63xx;
10253 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn63xxp1;
10254 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn66xx;
10255 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn68xx;
10256 	struct cvmx_gmxx_tx_pause_pkt_type_s  cn68xxp1;
10257 	struct cvmx_gmxx_tx_pause_pkt_type_s  cnf71xx;
10258 };
10259 typedef union cvmx_gmxx_tx_pause_pkt_type cvmx_gmxx_tx_pause_pkt_type_t;
10260 
10261 /**
10262  * cvmx_gmx#_tx_prts
10263  *
10264  * Common
10265  *
10266  *
10267  * GMX_TX_PRTS = TX Ports
10268  *
10269  * Notes:
10270  * * The value programmed for PRTS is the number of the highest architected
10271  * port number on the interface, plus 1.  For example, if port 2 is the
10272  * highest architected port, then the programmed value should be 3 since
10273  * there are 3 ports in the system - 0, 1, and 2.
10274  */
10275 union cvmx_gmxx_tx_prts {
10276 	uint64_t u64;
10277 	struct cvmx_gmxx_tx_prts_s {
10278 #ifdef __BIG_ENDIAN_BITFIELD
10279 	uint64_t reserved_5_63                : 59;
10280 	uint64_t prts                         : 5;  /**< Number of ports allowed on the interface
10281                                                          (SGMII/1000Base-X only) */
10282 #else
10283 	uint64_t prts                         : 5;
10284 	uint64_t reserved_5_63                : 59;
10285 #endif
10286 	} s;
10287 	struct cvmx_gmxx_tx_prts_s            cn30xx;
10288 	struct cvmx_gmxx_tx_prts_s            cn31xx;
10289 	struct cvmx_gmxx_tx_prts_s            cn38xx;
10290 	struct cvmx_gmxx_tx_prts_s            cn38xxp2;
10291 	struct cvmx_gmxx_tx_prts_s            cn50xx;
10292 	struct cvmx_gmxx_tx_prts_s            cn52xx;
10293 	struct cvmx_gmxx_tx_prts_s            cn52xxp1;
10294 	struct cvmx_gmxx_tx_prts_s            cn56xx;
10295 	struct cvmx_gmxx_tx_prts_s            cn56xxp1;
10296 	struct cvmx_gmxx_tx_prts_s            cn58xx;
10297 	struct cvmx_gmxx_tx_prts_s            cn58xxp1;
10298 	struct cvmx_gmxx_tx_prts_s            cn61xx;
10299 	struct cvmx_gmxx_tx_prts_s            cn63xx;
10300 	struct cvmx_gmxx_tx_prts_s            cn63xxp1;
10301 	struct cvmx_gmxx_tx_prts_s            cn66xx;
10302 	struct cvmx_gmxx_tx_prts_s            cn68xx;
10303 	struct cvmx_gmxx_tx_prts_s            cn68xxp1;
10304 	struct cvmx_gmxx_tx_prts_s            cnf71xx;
10305 };
10306 typedef union cvmx_gmxx_tx_prts cvmx_gmxx_tx_prts_t;
10307 
10308 /**
10309  * cvmx_gmx#_tx_spi_ctl
10310  *
10311  * GMX_TX_SPI_CTL = Spi4 TX ModesSpi4
10312  *
10313  */
10314 union cvmx_gmxx_tx_spi_ctl {
10315 	uint64_t u64;
10316 	struct cvmx_gmxx_tx_spi_ctl_s {
10317 #ifdef __BIG_ENDIAN_BITFIELD
10318 	uint64_t reserved_2_63                : 62;
10319 	uint64_t tpa_clr                      : 1;  /**< TPA Clear Mode
10320                                                          Clear credit counter when satisifed status */
10321 	uint64_t cont_pkt                     : 1;  /**< Contiguous Packet Mode
10322                                                          Finish one packet before switching to another
10323                                                          Cannot be set in Spi4 pass-through mode */
10324 #else
10325 	uint64_t cont_pkt                     : 1;
10326 	uint64_t tpa_clr                      : 1;
10327 	uint64_t reserved_2_63                : 62;
10328 #endif
10329 	} s;
10330 	struct cvmx_gmxx_tx_spi_ctl_s         cn38xx;
10331 	struct cvmx_gmxx_tx_spi_ctl_s         cn38xxp2;
10332 	struct cvmx_gmxx_tx_spi_ctl_s         cn58xx;
10333 	struct cvmx_gmxx_tx_spi_ctl_s         cn58xxp1;
10334 };
10335 typedef union cvmx_gmxx_tx_spi_ctl cvmx_gmxx_tx_spi_ctl_t;
10336 
10337 /**
10338  * cvmx_gmx#_tx_spi_drain
10339  *
10340  * GMX_TX_SPI_DRAIN = Drain out Spi TX FIFO
10341  *
10342  */
10343 union cvmx_gmxx_tx_spi_drain {
10344 	uint64_t u64;
10345 	struct cvmx_gmxx_tx_spi_drain_s {
10346 #ifdef __BIG_ENDIAN_BITFIELD
10347 	uint64_t reserved_16_63               : 48;
10348 	uint64_t drain                        : 16; /**< Per port drain control
10349                                                          0=Normal operation
10350                                                          1=GMX TX will be popped, but no valid data will
10351                                                            be sent to SPX.  Credits are correctly returned
10352                                                            to PKO.  STX_IGN_CAL should be set to ignore
10353                                                            TPA and not stall due to back-pressure.
10354                                                          (PASS3 only) */
10355 #else
10356 	uint64_t drain                        : 16;
10357 	uint64_t reserved_16_63               : 48;
10358 #endif
10359 	} s;
10360 	struct cvmx_gmxx_tx_spi_drain_s       cn38xx;
10361 	struct cvmx_gmxx_tx_spi_drain_s       cn58xx;
10362 	struct cvmx_gmxx_tx_spi_drain_s       cn58xxp1;
10363 };
10364 typedef union cvmx_gmxx_tx_spi_drain cvmx_gmxx_tx_spi_drain_t;
10365 
10366 /**
10367  * cvmx_gmx#_tx_spi_max
10368  *
10369  * GMX_TX_SPI_MAX = RGMII TX Spi4 MAX
10370  *
10371  */
10372 union cvmx_gmxx_tx_spi_max {
10373 	uint64_t u64;
10374 	struct cvmx_gmxx_tx_spi_max_s {
10375 #ifdef __BIG_ENDIAN_BITFIELD
10376 	uint64_t reserved_23_63               : 41;
10377 	uint64_t slice                        : 7;  /**< Number of 16B blocks to transmit in a burst before
10378                                                          switching to the next port. SLICE does not always
10379                                                          limit the burst length transmitted by OCTEON.
10380                                                          Depending on the traffic pattern and
10381                                                          GMX_TX_SPI_ROUND programming, the next port could
10382                                                          be the same as the current port. In this case,
10383                                                          OCTEON may merge multiple sub-SLICE bursts into
10384                                                          one contiguous burst that is longer than SLICE
10385                                                          (as long as the burst does not cross a packet
10386                                                          boundary).
10387                                                          SLICE must be programmed to be >=
10388                                                            GMX_TX_SPI_THRESH[THRESH]
10389                                                          If SLICE==0, then the transmitter will tend to
10390                                                          send the complete packet. The port will only
10391                                                          switch if credits are exhausted or PKO cannot
10392                                                          keep up.
10393                                                          (90nm ONLY) */
10394 	uint64_t max2                         : 8;  /**< MAX2 (per Spi4.2 spec) */
10395 	uint64_t max1                         : 8;  /**< MAX1 (per Spi4.2 spec)
10396                                                          MAX1 >= GMX_TX_SPI_THRESH[THRESH] */
10397 #else
10398 	uint64_t max1                         : 8;
10399 	uint64_t max2                         : 8;
10400 	uint64_t slice                        : 7;
10401 	uint64_t reserved_23_63               : 41;
10402 #endif
10403 	} s;
10404 	struct cvmx_gmxx_tx_spi_max_cn38xx {
10405 #ifdef __BIG_ENDIAN_BITFIELD
10406 	uint64_t reserved_16_63               : 48;
10407 	uint64_t max2                         : 8;  /**< MAX2 (per Spi4.2 spec) */
10408 	uint64_t max1                         : 8;  /**< MAX1 (per Spi4.2 spec)
10409                                                          MAX1 >= GMX_TX_SPI_THRESH[THRESH] */
10410 #else
10411 	uint64_t max1                         : 8;
10412 	uint64_t max2                         : 8;
10413 	uint64_t reserved_16_63               : 48;
10414 #endif
10415 	} cn38xx;
10416 	struct cvmx_gmxx_tx_spi_max_cn38xx    cn38xxp2;
10417 	struct cvmx_gmxx_tx_spi_max_s         cn58xx;
10418 	struct cvmx_gmxx_tx_spi_max_s         cn58xxp1;
10419 };
10420 typedef union cvmx_gmxx_tx_spi_max cvmx_gmxx_tx_spi_max_t;
10421 
10422 /**
10423  * cvmx_gmx#_tx_spi_round#
10424  *
10425  * GMX_TX_SPI_ROUND = Controls SPI4 TX Arbitration
10426  *
10427  */
10428 union cvmx_gmxx_tx_spi_roundx {
10429 	uint64_t u64;
10430 	struct cvmx_gmxx_tx_spi_roundx_s {
10431 #ifdef __BIG_ENDIAN_BITFIELD
10432 	uint64_t reserved_16_63               : 48;
10433 	uint64_t round                        : 16; /**< Which Spi ports participate in each arbitration
10434                                                           round.  Each bit corresponds to a spi port
10435                                                          - 0: this port will arb in this round
10436                                                          - 1: this port will not arb in this round
10437                                                           (90nm ONLY) */
10438 #else
10439 	uint64_t round                        : 16;
10440 	uint64_t reserved_16_63               : 48;
10441 #endif
10442 	} s;
10443 	struct cvmx_gmxx_tx_spi_roundx_s      cn58xx;
10444 	struct cvmx_gmxx_tx_spi_roundx_s      cn58xxp1;
10445 };
10446 typedef union cvmx_gmxx_tx_spi_roundx cvmx_gmxx_tx_spi_roundx_t;
10447 
10448 /**
10449  * cvmx_gmx#_tx_spi_thresh
10450  *
10451  * GMX_TX_SPI_THRESH = RGMII TX Spi4 Transmit Threshold
10452  *
10453  *
10454  * Notes:
10455  * Note: zero will map to 0x20
10456  *
10457  * This will normally creates Spi4 traffic bursts at least THRESH in length.
10458  * If dclk > eclk, then this rule may not always hold and Octeon may split
10459  * transfers into smaller bursts - some of which could be as short as 16B.
10460  * Octeon will never violate the Spi4.2 spec and send a non-EOP burst that is
10461  * not a multiple of 16B.
10462  */
10463 union cvmx_gmxx_tx_spi_thresh {
10464 	uint64_t u64;
10465 	struct cvmx_gmxx_tx_spi_thresh_s {
10466 #ifdef __BIG_ENDIAN_BITFIELD
10467 	uint64_t reserved_6_63                : 58;
10468 	uint64_t thresh                       : 6;  /**< Transmit threshold in 16B blocks - cannot be zero
10469                                                          THRESH <= TX_FIFO size   (in non-passthrough mode)
10470                                                          THRESH <= TX_FIFO size-2 (in passthrough mode)
10471                                                          THRESH <= GMX_TX_SPI_MAX[MAX1]
10472                                                          THRESH <= GMX_TX_SPI_MAX[MAX2], if not then is it
10473                                                           possible for Octeon to send a Spi4 data burst of
10474                                                           MAX2 <= burst <= THRESH 16B ticks
10475                                                          GMX_TX_SPI_MAX[SLICE] must be programmed to be >=
10476                                                            THRESH */
10477 #else
10478 	uint64_t thresh                       : 6;
10479 	uint64_t reserved_6_63                : 58;
10480 #endif
10481 	} s;
10482 	struct cvmx_gmxx_tx_spi_thresh_s      cn38xx;
10483 	struct cvmx_gmxx_tx_spi_thresh_s      cn38xxp2;
10484 	struct cvmx_gmxx_tx_spi_thresh_s      cn58xx;
10485 	struct cvmx_gmxx_tx_spi_thresh_s      cn58xxp1;
10486 };
10487 typedef union cvmx_gmxx_tx_spi_thresh cvmx_gmxx_tx_spi_thresh_t;
10488 
10489 /**
10490  * cvmx_gmx#_tx_xaui_ctl
10491  */
10492 union cvmx_gmxx_tx_xaui_ctl {
10493 	uint64_t u64;
10494 	struct cvmx_gmxx_tx_xaui_ctl_s {
10495 #ifdef __BIG_ENDIAN_BITFIELD
10496 	uint64_t reserved_11_63               : 53;
10497 	uint64_t hg_pause_hgi                 : 2;  /**< HGI Field for HW generated HiGig pause packets
10498                                                          (XAUI mode only) */
10499 	uint64_t hg_en                        : 1;  /**< Enable HiGig Mode
10500                                                          When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=12
10501                                                           the interface is in HiGig/HiGig+ mode and the
10502                                                           following must be set:
10503                                                           GMX_RX_FRM_CTL[PRE_CHK] == 0
10504                                                           GMX_RX_UDD_SKP[FCSSEL] == 0
10505                                                           GMX_RX_UDD_SKP[SKIP] == 12
10506                                                           GMX_TX_APPEND[PREAMBLE] == 0
10507                                                          When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=16
10508                                                           the interface is in HiGig2 mode and the
10509                                                           following must be set:
10510                                                           GMX_RX_FRM_CTL[PRE_CHK] == 0
10511                                                           GMX_RX_UDD_SKP[FCSSEL] == 0
10512                                                           GMX_RX_UDD_SKP[SKIP] == 16
10513                                                           GMX_TX_APPEND[PREAMBLE] == 0
10514                                                           GMX_PRT0_CBFC_CTL[RX_EN] == 0
10515                                                           GMX_PRT0_CBFC_CTL[TX_EN] == 0
10516                                                          (XAUI mode only) */
10517 	uint64_t reserved_7_7                 : 1;
10518 	uint64_t ls_byp                       : 1;  /**< Bypass the link status as determined by the XGMII
10519                                                          receiver and set the link status of the
10520                                                          transmitter to LS.
10521                                                          (XAUI mode only) */
10522 	uint64_t ls                           : 2;  /**< Link Status
10523                                                          0 = Link Ok
10524                                                              Link runs normally. RS passes MAC data to PCS
10525                                                          1 = Local Fault
10526                                                              RS layer sends continuous remote fault
10527                                                               sequences.
10528                                                          2 = Remote Fault
10529                                                              RS layer sends continuous idles sequences
10530                                                          3 = Link Drain
10531                                                              RS layer drops full packets to allow GMX and
10532                                                               PKO to drain their FIFOs
10533                                                          (XAUI mode only) */
10534 	uint64_t reserved_2_3                 : 2;
10535 	uint64_t uni_en                       : 1;  /**< Enable Unidirectional Mode (IEEE Clause 66)
10536                                                          (XAUI mode only) */
10537 	uint64_t dic_en                       : 1;  /**< Enable the deficit idle counter for IFG averaging
10538                                                          (XAUI mode only) */
10539 #else
10540 	uint64_t dic_en                       : 1;
10541 	uint64_t uni_en                       : 1;
10542 	uint64_t reserved_2_3                 : 2;
10543 	uint64_t ls                           : 2;
10544 	uint64_t ls_byp                       : 1;
10545 	uint64_t reserved_7_7                 : 1;
10546 	uint64_t hg_en                        : 1;
10547 	uint64_t hg_pause_hgi                 : 2;
10548 	uint64_t reserved_11_63               : 53;
10549 #endif
10550 	} s;
10551 	struct cvmx_gmxx_tx_xaui_ctl_s        cn52xx;
10552 	struct cvmx_gmxx_tx_xaui_ctl_s        cn52xxp1;
10553 	struct cvmx_gmxx_tx_xaui_ctl_s        cn56xx;
10554 	struct cvmx_gmxx_tx_xaui_ctl_s        cn56xxp1;
10555 	struct cvmx_gmxx_tx_xaui_ctl_s        cn61xx;
10556 	struct cvmx_gmxx_tx_xaui_ctl_s        cn63xx;
10557 	struct cvmx_gmxx_tx_xaui_ctl_s        cn63xxp1;
10558 	struct cvmx_gmxx_tx_xaui_ctl_s        cn66xx;
10559 	struct cvmx_gmxx_tx_xaui_ctl_s        cn68xx;
10560 	struct cvmx_gmxx_tx_xaui_ctl_s        cn68xxp1;
10561 	struct cvmx_gmxx_tx_xaui_ctl_s        cnf71xx;
10562 };
10563 typedef union cvmx_gmxx_tx_xaui_ctl cvmx_gmxx_tx_xaui_ctl_t;
10564 
10565 /**
10566  * cvmx_gmx#_xaui_ext_loopback
10567  */
10568 union cvmx_gmxx_xaui_ext_loopback {
10569 	uint64_t u64;
10570 	struct cvmx_gmxx_xaui_ext_loopback_s {
10571 #ifdef __BIG_ENDIAN_BITFIELD
10572 	uint64_t reserved_5_63                : 59;
10573 	uint64_t en                           : 1;  /**< Loopback enable
10574                                                          Puts the packet interface in external loopback
10575                                                          mode on the XAUI bus in which the RX lines are
10576                                                          reflected on the TX lines.
10577                                                          (XAUI mode only) */
10578 	uint64_t thresh                       : 4;  /**< Threshhold on the TX FIFO
10579                                                          SW must only write the typical value.  Any other
10580                                                          value will cause loopback mode not to function
10581                                                          correctly.
10582                                                          (XAUI mode only) */
10583 #else
10584 	uint64_t thresh                       : 4;
10585 	uint64_t en                           : 1;
10586 	uint64_t reserved_5_63                : 59;
10587 #endif
10588 	} s;
10589 	struct cvmx_gmxx_xaui_ext_loopback_s  cn52xx;
10590 	struct cvmx_gmxx_xaui_ext_loopback_s  cn52xxp1;
10591 	struct cvmx_gmxx_xaui_ext_loopback_s  cn56xx;
10592 	struct cvmx_gmxx_xaui_ext_loopback_s  cn56xxp1;
10593 	struct cvmx_gmxx_xaui_ext_loopback_s  cn61xx;
10594 	struct cvmx_gmxx_xaui_ext_loopback_s  cn63xx;
10595 	struct cvmx_gmxx_xaui_ext_loopback_s  cn63xxp1;
10596 	struct cvmx_gmxx_xaui_ext_loopback_s  cn66xx;
10597 	struct cvmx_gmxx_xaui_ext_loopback_s  cn68xx;
10598 	struct cvmx_gmxx_xaui_ext_loopback_s  cn68xxp1;
10599 	struct cvmx_gmxx_xaui_ext_loopback_s  cnf71xx;
10600 };
10601 typedef union cvmx_gmxx_xaui_ext_loopback cvmx_gmxx_xaui_ext_loopback_t;
10602 
10603 #endif
10604