xref: /dpdk/drivers/common/cnxk/roc_mbox.h (revision e746aec1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #ifndef __ROC_MBOX_H__
6 #define __ROC_MBOX_H__
7 
8 #include <errno.h>
9 #include <stdbool.h>
10 #include <stdint.h>
11 
12 /* Device memory does not support unaligned access, instruct compiler to
13  * not optimize the memory access when working with mailbox memory.
14  */
15 #define __io volatile
16 
17 /* Header which precedes all mbox messages */
18 struct mbox_hdr {
19 	uint64_t __io msg_size; /* Total msgs size embedded */
20 	uint16_t __io num_msgs; /* No of msgs embedded */
21 };
22 
23 /* Header which precedes every msg and is also part of it */
24 struct mbox_msghdr {
25 	uint16_t __io pcifunc; /* Who's sending this msg */
26 	uint16_t __io id;      /* Mbox message ID */
27 #define MBOX_REQ_SIG (0xdead)
28 #define MBOX_RSP_SIG (0xbeef)
29 	/* Signature, for validating corrupted msgs */
30 	uint16_t __io sig;
31 #define MBOX_VERSION (0x000b)
32 	/* Version of msg's structure for this ID */
33 	uint16_t __io ver;
34 	/* Offset of next msg within mailbox region */
35 	uint16_t __io next_msgoff;
36 	int __io rc; /* Msg processed response code */
37 };
38 
39 /* Mailbox message types */
40 #define MBOX_MSG_MASK	 0xFFFF
41 #define MBOX_MSG_INVALID 0xFFFE
42 #define MBOX_MSG_MAX	 0xFFFF
43 
44 #define MBOX_MESSAGES                                                          \
45 	/* Generic mbox IDs (range 0x000 - 0x1FF) */                           \
46 	M(READY, 0x001, ready, msg_req, ready_msg_rsp)                         \
47 	M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach_req, msg_rsp) \
48 	M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach_req, msg_rsp) \
49 	M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp)        \
50 	M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp)           \
51 	M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp)                             \
52 	M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp)                             \
53 	M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp)              \
54 	M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp)               \
55 	M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req,        \
56 	  msg_rsp)                                                             \
57 	/* CGX mbox IDs (range 0x200 - 0x3FF) */                               \
58 	M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp)             \
59 	M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp)               \
60 	M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp)                 \
61 	M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get,  \
62 	  cgx_mac_addr_set_or_get)                                             \
63 	M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get,  \
64 	  cgx_mac_addr_set_or_get)                                             \
65 	M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp)     \
66 	M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp)   \
67 	M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
68 	M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp)   \
69 	M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req,                  \
70 	  cgx_link_info_msg)                                                   \
71 	M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp)       \
72 	M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp)     \
73 	M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp)       \
74 	M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp)     \
75 	M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg,      \
76 	  cgx_pause_frm_cfg)                                                   \
77 	M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
78 	M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode)           \
79 	M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req,     \
80 	  cgx_mac_addr_add_rsp)                                                \
81 	M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req,     \
82 	  msg_rsp)                                                             \
83 	M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req,    \
84 	  cgx_max_dmac_entries_get_rsp)                                        \
85 	M(CGX_SET_LINK_STATE, 0x214, cgx_set_link_state,                       \
86 	  cgx_set_link_state_msg, msg_rsp)                                     \
87 	M(CGX_GET_PHY_MOD_TYPE, 0x215, cgx_get_phy_mod_type, msg_req,          \
88 	  cgx_phy_mod_type)                                                    \
89 	M(CGX_SET_PHY_MOD_TYPE, 0x216, cgx_set_phy_mod_type, cgx_phy_mod_type, \
90 	  msg_rsp)                                                             \
91 	M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp)     \
92 	M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,  \
93 	  cgx_set_link_mode_rsp)                                               \
94 	M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req,        \
95 	  msg_rsp)                                                             \
96 	M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp)               \
97 	M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp)                 \
98 	M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg,  \
99 	  cgx_pfc_rsp)                                                         \
100 	/* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
101 	M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req,                 \
102 	  npa_lf_alloc_rsp)                                                    \
103 	M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp)                   \
104 	M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp)       \
105 	M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req,      \
106 	  msg_rsp)                                                             \
107 	/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */                          \
108 	M(SSO_LF_ALLOC, 0x600, sso_lf_alloc, sso_lf_alloc_req,                 \
109 	  sso_lf_alloc_rsp)                                                    \
110 	M(SSO_LF_FREE, 0x601, sso_lf_free, sso_lf_free_req, msg_rsp)           \
111 	M(SSOW_LF_ALLOC, 0x602, ssow_lf_alloc, ssow_lf_alloc_req, msg_rsp)     \
112 	M(SSOW_LF_FREE, 0x603, ssow_lf_free, ssow_lf_free_req, msg_rsp)        \
113 	M(SSO_HW_SETCONFIG, 0x604, sso_hw_setconfig, sso_hw_setconfig,         \
114 	  msg_rsp)                                                             \
115 	M(SSO_GRP_SET_PRIORITY, 0x605, sso_grp_set_priority, sso_grp_priority, \
116 	  msg_rsp)                                                             \
117 	M(SSO_GRP_GET_PRIORITY, 0x606, sso_grp_get_priority, sso_info_req,     \
118 	  sso_grp_priority)                                                    \
119 	M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, ssow_lf_inv_req, msg_rsp) \
120 	M(SSO_GRP_QOS_CONFIG, 0x608, sso_grp_qos_config, sso_grp_qos_cfg,      \
121 	  msg_rsp)                                                             \
122 	M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req,           \
123 	  sso_grp_stats)                                                       \
124 	M(SSO_HWS_GET_STATS, 0x610, sso_hws_get_stats, sso_info_req,           \
125 	  sso_hws_stats)                                                       \
126 	M(SSO_HW_RELEASE_XAQ, 0x611, sso_hw_release_xaq_aura,                  \
127 	  sso_hw_xaq_release, msg_rsp)                                         \
128 	M(SSO_CONFIG_LSW, 0x612, ssow_config_lsw, ssow_config_lsw, msg_rsp)    \
129 	M(SSO_HWS_CHNG_MSHIP, 0x613, ssow_chng_mship, ssow_chng_mship,         \
130 	  msg_rsp)                                                             \
131 	/* TIM mbox IDs (range 0x800 - 0x9FF) */                               \
132 	M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, tim_lf_alloc_req,                 \
133 	  tim_lf_alloc_rsp)                                                    \
134 	M(TIM_LF_FREE, 0x801, tim_lf_free, tim_ring_req, msg_rsp)              \
135 	M(TIM_CONFIG_RING, 0x802, tim_config_ring, tim_config_req, msg_rsp)    \
136 	M(TIM_ENABLE_RING, 0x803, tim_enable_ring, tim_ring_req,               \
137 	  tim_enable_rsp)                                                      \
138 	M(TIM_DISABLE_RING, 0x804, tim_disable_ring, tim_ring_req, msg_rsp)    \
139 	M(TIM_GET_MIN_INTVL, 0x805, tim_get_min_intvl, tim_intvl_req,          \
140 	  tim_intvl_rsp)                                                       \
141 	/* CPT mbox IDs (range 0xA00 - 0xBFF) */                               \
142 	M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, msg_rsp)    \
143 	M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp)                   \
144 	M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg,    \
145 	  cpt_rd_wr_reg_msg)                                                   \
146 	M(CPT_SET_CRYPTO_GRP, 0xA03, cpt_set_crypto_grp,                       \
147 	  cpt_set_crypto_grp_req_msg, msg_rsp)                                 \
148 	M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg,                   \
149 	  cpt_inline_ipsec_cfg_msg, msg_rsp)                                   \
150 	M(CPT_STATS, 0xA05, cpt_sts_get, cpt_sts_req, cpt_sts_rsp)             \
151 	M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req,     \
152 	  msg_rsp)                                                             \
153 	M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp)     \
154 	M(CPT_RX_INLINE_LF_CFG, 0xBFE, cpt_rx_inline_lf_cfg,                   \
155 	  cpt_rx_inline_lf_cfg_msg, msg_rsp)                                   \
156 	M(CPT_GET_CAPS, 0xBFD, cpt_caps_get, msg_req, cpt_caps_rsp_msg)        \
157 	M(CPT_GET_ENG_GRP, 0xBFF, cpt_eng_grp_get, cpt_eng_grp_req,            \
158 	  cpt_eng_grp_rsp)                                                     \
159 	/* REE mbox IDs (range 0xE00 - 0xFFF) */                               \
160 	M(REE_CONFIG_LF, 0xE01, ree_config_lf, ree_lf_req_msg, msg_rsp)        \
161 	M(REE_RD_WR_REGISTER, 0xE02, ree_rd_wr_register, ree_rd_wr_reg_msg,    \
162 	  ree_rd_wr_reg_msg)                                                   \
163 	M(REE_RULE_DB_PROG, 0xE03, ree_rule_db_prog, ree_rule_db_prog_req_msg, \
164 	  msg_rsp)                                                             \
165 	M(REE_RULE_DB_LEN_GET, 0xE04, ree_rule_db_len_get, ree_req_msg,        \
166 	  ree_rule_db_len_rsp_msg)                                             \
167 	M(REE_RULE_DB_GET, 0xE05, ree_rule_db_get, ree_rule_db_get_req_msg,    \
168 	  ree_rule_db_get_rsp_msg)                                             \
169 	/* SDP mbox IDs (range 0x1000 - 0x11FF) */                             \
170 	M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg,     \
171 	  msg_rsp)                                                             \
172 	/* NPC mbox IDs (range 0x6000 - 0x7FFF) */                             \
173 	M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry,                  \
174 	  npc_mcam_alloc_entry_req, npc_mcam_alloc_entry_rsp)                  \
175 	M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry,                    \
176 	  npc_mcam_free_entry_req, msg_rsp)                                    \
177 	M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry,                  \
178 	  npc_mcam_write_entry_req, msg_rsp)                                   \
179 	M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry,                      \
180 	  npc_mcam_ena_dis_entry_req, msg_rsp)                                 \
181 	M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry,                      \
182 	  npc_mcam_ena_dis_entry_req, msg_rsp)                                 \
183 	M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry,                  \
184 	  npc_mcam_shift_entry_req, npc_mcam_shift_entry_rsp)                  \
185 	M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter,              \
186 	  npc_mcam_alloc_counter_req, npc_mcam_alloc_counter_rsp)              \
187 	M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter,                \
188 	  npc_mcam_oper_counter_req, msg_rsp)                                  \
189 	M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter,              \
190 	  npc_mcam_unmap_counter_req, msg_rsp)                                 \
191 	M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter,              \
192 	  npc_mcam_oper_counter_req, msg_rsp)                                  \
193 	M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats,              \
194 	  npc_mcam_oper_counter_req, npc_mcam_oper_counter_rsp)                \
195 	M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b,                              \
196 	  npc_mcam_alloc_and_write_entry, npc_mcam_alloc_and_write_entry_req,  \
197 	  npc_mcam_alloc_and_write_entry_rsp)                                  \
198 	M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, msg_req,                   \
199 	  npc_get_kex_cfg_rsp)                                                 \
200 	M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, npc_install_flow_req,    \
201 	  npc_install_flow_rsp)                                                \
202 	M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, npc_delete_flow_req,       \
203 	  msg_rsp)                                                             \
204 	M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry,                    \
205 	  npc_mcam_read_entry_req, npc_mcam_read_entry_rsp)                    \
206 	M(NPC_SET_PKIND, 0x6010, npc_set_pkind, npc_set_pkind, msg_rsp)        \
207 	M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, msg_req,  \
208 	  npc_mcam_read_base_rule_rsp)                                         \
209 	M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats,                    \
210 	  npc_mcam_get_stats_req, npc_mcam_get_stats_rsp)                      \
211 	/* NIX mbox IDs (range 0x8000 - 0xFFFF) */                             \
212 	M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, nix_lf_alloc_req,                \
213 	  nix_lf_alloc_rsp)                                                    \
214 	M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp)          \
215 	M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp)      \
216 	M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, hwctx_disable_req,     \
217 	  msg_rsp)                                                             \
218 	M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, nix_txsch_alloc_req,       \
219 	  nix_txsch_alloc_rsp)                                                 \
220 	M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
221 	M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config,           \
222 	  nix_txschq_config)                                                   \
223 	M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp)              \
224 	M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp)        \
225 	M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg,                    \
226 	  nix_rss_flowkey_cfg, nix_rss_flowkey_cfg_rsp)                        \
227 	M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr,        \
228 	  msg_rsp)                                                             \
229 	M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp)      \
230 	M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp)        \
231 	M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp)          \
232 	M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp)            \
233 	M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg,                    \
234 	  nix_mark_format_cfg, nix_mark_format_cfg_rsp)                        \
235 	M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp)         \
236 	M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, nix_lso_format_cfg,  \
237 	  nix_lso_format_cfg_rsp)                                              \
238 	M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req,         \
239 	  msg_rsp)                                                             \
240 	M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req,       \
241 	  msg_rsp)                                                             \
242 	M(NIX_SET_VLAN_TPID, 0x8015, nix_set_vlan_tpid, nix_set_vlan_tpid,     \
243 	  msg_rsp)                                                             \
244 	M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req,                \
245 	  nix_bp_cfg_rsp)                                                      \
246 	M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp)     \
247 	M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req,                 \
248 	  nix_get_mac_addr_rsp)                                                \
249 	M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg,                  \
250 	  nix_inline_ipsec_cfg, msg_rsp)                                       \
251 	M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg,            \
252 	  nix_inline_ipsec_lf_cfg, msg_rsp)                                    \
253 	M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req,    \
254 	  nix_cn10k_aq_enq_rsp)                                                \
255 	M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info)      \
256 	M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc,                      \
257 	  nix_bandprof_alloc_req, nix_bandprof_alloc_rsp)                      \
258 	M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
259 	  msg_rsp)                                                             \
260 	M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req,   \
261 	  nix_bandprof_get_hwinfo_rsp)                                         \
262 	M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req,        \
263 	  nix_bp_cfg_rsp)                                                      \
264 	M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req,      \
265 	  msg_rsp)                                                             \
266 	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)
267 
268 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
269 #define MBOX_UP_CGX_MESSAGES                                                   \
270 	M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)   \
271 	M(CGX_PTP_RX_INFO, 0xC01, cgx_ptp_rx_info, cgx_ptp_rx_info_msg, msg_rsp)
272 
273 enum {
274 #define M(_name, _id, _1, _2, _3) MBOX_MSG_##_name = _id,
275 	MBOX_MESSAGES MBOX_UP_CGX_MESSAGES
276 #undef M
277 };
278 
279 /* Mailbox message formats */
280 
281 #define RVU_DEFAULT_PF_FUNC 0xFFFF
282 
283 /* Generic request msg used for those mbox messages which
284  * don't send any data in the request.
285  */
286 struct msg_req {
287 	struct mbox_msghdr hdr;
288 };
289 
290 /* Generic response msg used a ack or response for those mbox
291  * messages which does not have a specific rsp msg format.
292  */
293 struct msg_rsp {
294 	struct mbox_msghdr hdr;
295 };
296 
297 /* RVU mailbox error codes
298  * Range 256 - 300.
299  */
300 enum rvu_af_status {
301 	RVU_INVALID_VF_ID = -256,
302 };
303 
304 struct ready_msg_rsp {
305 	struct mbox_msghdr hdr;
306 	uint16_t __io sclk_freq; /* SCLK frequency */
307 	uint16_t __io rclk_freq; /* RCLK frequency */
308 };
309 
310 enum npc_pkind_type {
311 	NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
312 	NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
313 	NPC_RX_CHLEN24B_PKIND,
314 	NPC_RX_CPT_HDR_PKIND,
315 	NPC_RX_CHLEN90B_PKIND,
316 	NPC_TX_HIGIG_PKIND,
317 	NPC_RX_HIGIG_PKIND,
318 	NPC_RX_EXDSA_PKIND,
319 	NPC_RX_EDSA_PKIND,
320 	NPC_TX_DEF_PKIND,
321 };
322 
323 /* Struct to set pkind */
324 struct npc_set_pkind {
325 	struct mbox_msghdr hdr;
326 #define ROC_PRIV_FLAGS_DEFAULT	  BIT_ULL(0)
327 #define ROC_PRIV_FLAGS_EDSA	  BIT_ULL(1)
328 #define ROC_PRIV_FLAGS_HIGIG	  BIT_ULL(2)
329 #define ROC_PRIV_FLAGS_LEN_90B	  BIT_ULL(3)
330 #define ROC_PRIV_FLAGS_EXDSA	  BIT_ULL(4)
331 #define ROC_PRIV_FLAGS_VLAN_EXDSA BIT_ULL(5)
332 #define ROC_PRIV_FLAGS_PRE_L2	  BIT_ULL(6)
333 #define ROC_PRIV_FLAGS_CUSTOM	  BIT_ULL(63)
334 	uint64_t __io mode;
335 #define PKIND_TX BIT_ULL(0)
336 #define PKIND_RX BIT_ULL(1)
337 	uint8_t __io dir;
338 	uint8_t __io pkind; /* valid only in case custom flag */
339 	uint8_t __io var_len_off;
340 	/* Offset of custom header length field.
341 	 * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
342 	 */
343 	uint8_t __io var_len_off_mask; /* Mask for length with in offset */
344 	uint8_t __io shift_dir;
345 	/* Shift direction to get length of the
346 	 * header at var_len_off
347 	 */
348 };
349 
350 /* Structure for requesting resource provisioning.
351  * 'modify' flag to be used when either requesting more
352  * or to detach partial of a certain resource type.
353  * Rest of the fields specify how many of what type to
354  * be attached.
355  * To request LFs from two blocks of same type this mailbox
356  * can be sent twice as below:
357  *      struct rsrc_attach *attach;
358  *       .. Allocate memory for message ..
359  *       attach->cptlfs = 3; <3 LFs from CPT0>
360  *       .. Send message ..
361  *       .. Allocate memory for message ..
362  *       attach->modify = 1;
363  *       attach->cpt_blkaddr = BLKADDR_CPT1;
364  *       attach->cptlfs = 2; <2 LFs from CPT1>
365  *       .. Send message ..
366  */
367 struct rsrc_attach_req {
368 	struct mbox_msghdr hdr;
369 	uint8_t __io modify : 1;
370 	uint8_t __io npalf : 1;
371 	uint8_t __io nixlf : 1;
372 	uint16_t __io sso;
373 	uint16_t __io ssow;
374 	uint16_t __io timlfs;
375 	uint16_t __io cptlfs;
376 	uint16_t __io reelfs;
377 	/* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
378 	int __io cpt_blkaddr;
379 	/* BLKADDR_REE0/BLKADDR_REE1 or 0 for BLKADDR_REE0 */
380 	int __io ree_blkaddr;
381 };
382 
383 /* Structure for relinquishing resources.
384  * 'partial' flag to be used when relinquishing all resources
385  * but only of a certain type. If not set, all resources of all
386  * types provisioned to the RVU function will be detached.
387  */
388 struct rsrc_detach_req {
389 	struct mbox_msghdr hdr;
390 	uint8_t __io partial : 1;
391 	uint8_t __io npalf : 1;
392 	uint8_t __io nixlf : 1;
393 	uint8_t __io sso : 1;
394 	uint8_t __io ssow : 1;
395 	uint8_t __io timlfs : 1;
396 	uint8_t __io cptlfs : 1;
397 	uint8_t __io reelfs : 1;
398 };
399 
400 /* NIX Transmit schedulers */
401 #define NIX_TXSCH_LVL_SMQ 0x0
402 #define NIX_TXSCH_LVL_MDQ 0x0
403 #define NIX_TXSCH_LVL_TL4 0x1
404 #define NIX_TXSCH_LVL_TL3 0x2
405 #define NIX_TXSCH_LVL_TL2 0x3
406 #define NIX_TXSCH_LVL_TL1 0x4
407 #define NIX_TXSCH_LVL_CNT 0x5
408 
409 /*
410  * Number of resources available to the caller.
411  * In reply to MBOX_MSG_FREE_RSRC_CNT.
412  */
413 struct free_rsrcs_rsp {
414 	struct mbox_msghdr hdr;
415 	uint16_t __io schq[NIX_TXSCH_LVL_CNT];
416 	uint16_t __io sso;
417 	uint16_t __io tim;
418 	uint16_t __io ssow;
419 	uint16_t __io cpt;
420 	uint8_t __io npa;
421 	uint8_t __io nix;
422 	uint16_t __io schq_nix1[NIX_TXSCH_LVL_CNT];
423 	uint8_t __io nix1;
424 	uint8_t __io cpt1;
425 	uint8_t __io ree0;
426 	uint8_t __io ree1;
427 };
428 
429 #define MSIX_VECTOR_INVALID 0xFFFF
430 #define MAX_RVU_BLKLF_CNT   256
431 
432 struct msix_offset_rsp {
433 	struct mbox_msghdr hdr;
434 	uint16_t __io npa_msixoff;
435 	uint16_t __io nix_msixoff;
436 	uint16_t __io sso;
437 	uint16_t __io ssow;
438 	uint16_t __io timlfs;
439 	uint16_t __io cptlfs;
440 	uint16_t __io sso_msixoff[MAX_RVU_BLKLF_CNT];
441 	uint16_t __io ssow_msixoff[MAX_RVU_BLKLF_CNT];
442 	uint16_t __io timlf_msixoff[MAX_RVU_BLKLF_CNT];
443 	uint16_t __io cptlf_msixoff[MAX_RVU_BLKLF_CNT];
444 	uint16_t __io cpt1_lfs;
445 	uint16_t __io ree0_lfs;
446 	uint16_t __io ree1_lfs;
447 	uint16_t __io cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
448 	uint16_t __io ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
449 	uint16_t __io ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
450 };
451 
452 struct lmtst_tbl_setup_req {
453 	struct mbox_msghdr hdr;
454 
455 	uint64_t __io dis_sched_early_comp : 1;
456 	uint64_t __io sched_ena : 1;
457 	uint64_t __io dis_line_pref : 1;
458 	uint64_t __io ssow_pf_func : 13;
459 	uint16_t __io pcifunc;
460 	uint8_t __io use_local_lmt_region;
461 	uint64_t __io lmt_iova;
462 	uint64_t __io rsvd[2]; /* Future use */
463 };
464 
465 /* CGX mbox message formats */
466 
467 struct cgx_stats_rsp {
468 	struct mbox_msghdr hdr;
469 #define CGX_RX_STATS_COUNT 13
470 #define CGX_TX_STATS_COUNT 18
471 	uint64_t __io rx_stats[CGX_RX_STATS_COUNT];
472 	uint64_t __io tx_stats[CGX_TX_STATS_COUNT];
473 };
474 
475 struct rpm_stats_rsp {
476 	struct mbox_msghdr hdr;
477 #define RPM_RX_STATS_COUNT 43
478 #define RPM_TX_STATS_COUNT 34
479 	uint64_t __io rx_stats[RPM_RX_STATS_COUNT];
480 	uint64_t __io tx_stats[RPM_TX_STATS_COUNT];
481 };
482 
483 struct cgx_fec_stats_rsp {
484 	struct mbox_msghdr hdr;
485 	uint64_t __io fec_corr_blks;
486 	uint64_t __io fec_uncorr_blks;
487 };
488 
489 /* Structure for requesting the operation for
490  * setting/getting mac address in the CGX interface
491  */
492 struct cgx_mac_addr_set_or_get {
493 	struct mbox_msghdr hdr;
494 	uint8_t __io mac_addr[PLT_ETHER_ADDR_LEN];
495 };
496 
497 /* Structure for requesting the operation to
498  * add DMAC filter entry into CGX interface
499  */
500 struct cgx_mac_addr_add_req {
501 	struct mbox_msghdr hdr;
502 	uint8_t __io mac_addr[PLT_ETHER_ADDR_LEN];
503 };
504 
505 /* Structure for response against the operation to
506  * add DMAC filter entry into CGX interface
507  */
508 struct cgx_mac_addr_add_rsp {
509 	struct mbox_msghdr hdr;
510 	uint8_t __io index;
511 };
512 
513 /* Structure for requesting the operation to
514  * delete DMAC filter entry from CGX interface
515  */
516 struct cgx_mac_addr_del_req {
517 	struct mbox_msghdr hdr;
518 	uint8_t __io index;
519 };
520 
521 /* Structure for response against the operation to
522  * get maximum supported DMAC filter entries
523  */
524 struct cgx_max_dmac_entries_get_rsp {
525 	struct mbox_msghdr hdr;
526 	uint8_t __io max_dmac_filters;
527 };
528 
529 struct cgx_link_user_info {
530 	uint64_t __io link_up : 1;
531 	uint64_t __io full_duplex : 1;
532 	uint64_t __io lmac_type_id : 4;
533 	uint64_t __io speed : 20; /* speed in Mbps */
534 	uint64_t __io an : 1;	  /* AN supported or not */
535 	uint64_t __io fec : 2;	  /* FEC type if enabled else 0 */
536 	uint64_t __io port : 8;
537 #define LMACTYPE_STR_LEN 16
538 	char lmac_type[LMACTYPE_STR_LEN];
539 };
540 
541 struct cgx_link_info_msg {
542 	struct mbox_msghdr hdr;
543 	struct cgx_link_user_info link_info;
544 };
545 
546 struct cgx_ptp_rx_info_msg {
547 	struct mbox_msghdr hdr;
548 	uint8_t __io ptp_en;
549 };
550 
551 struct cgx_pause_frm_cfg {
552 	struct mbox_msghdr hdr;
553 	uint8_t __io set;
554 	/* set = 1 if the request is to config pause frames */
555 	/* set = 0 if the request is to fetch pause frames config */
556 	uint8_t __io rx_pause;
557 	uint8_t __io tx_pause;
558 };
559 
560 struct cgx_pfc_cfg {
561 	struct mbox_msghdr hdr;
562 	uint8_t __io rx_pause;
563 	uint8_t __io tx_pause;
564 	uint16_t __io pfc_en; /*  bitmap indicating enabled traffic classes */
565 };
566 
567 struct cgx_pfc_rsp {
568 	struct mbox_msghdr hdr;
569 	uint8_t __io rx_pause;
570 	uint8_t __io tx_pause;
571 };
572 
573 struct sfp_eeprom_s {
574 #define SFP_EEPROM_SIZE 256
575 	uint16_t __io sff_id;
576 	uint8_t __io buf[SFP_EEPROM_SIZE];
577 	uint64_t __io reserved;
578 };
579 
580 enum fec_type {
581 	ROC_FEC_NONE,
582 	ROC_FEC_BASER,
583 	ROC_FEC_RS,
584 };
585 
586 struct phy_s {
587 	uint64_t __io can_change_mod_type : 1;
588 	uint64_t __io mod_type : 1;
589 };
590 
591 struct cgx_lmac_fwdata_s {
592 	uint16_t __io rw_valid;
593 	uint64_t __io supported_fec;
594 	uint64_t __io supported_an;
595 	uint64_t __io supported_link_modes;
596 	/* Only applicable if AN is supported */
597 	uint64_t __io advertised_fec;
598 	uint64_t __io advertised_link_modes;
599 	/* Only applicable if SFP/QSFP slot is present */
600 	struct sfp_eeprom_s sfp_eeprom;
601 	struct phy_s phy;
602 #define LMAC_FWDATA_RESERVED_MEM 1023
603 	uint64_t __io reserved[LMAC_FWDATA_RESERVED_MEM];
604 };
605 
606 struct cgx_fw_data {
607 	struct mbox_msghdr hdr;
608 	struct cgx_lmac_fwdata_s fwdata;
609 };
610 
611 struct fec_mode {
612 	struct mbox_msghdr hdr;
613 	int __io fec;
614 };
615 
616 struct cgx_set_link_state_msg {
617 	struct mbox_msghdr hdr;
618 	uint8_t __io enable;
619 };
620 
621 struct cgx_phy_mod_type {
622 	struct mbox_msghdr hdr;
623 	int __io mod;
624 };
625 
626 struct cgx_set_link_mode_args {
627 	uint32_t __io speed;
628 	uint8_t __io duplex;
629 	uint8_t __io an;
630 	uint8_t __io ports;
631 	uint64_t __io mode;
632 };
633 
634 struct cgx_set_link_mode_req {
635 	struct mbox_msghdr hdr;
636 	struct cgx_set_link_mode_args args;
637 };
638 
639 struct cgx_set_link_mode_rsp {
640 	struct mbox_msghdr hdr;
641 	int __io status;
642 };
643 
644 /* NPA mbox message formats */
645 
646 /* NPA mailbox error codes
647  * Range 301 - 400.
648  */
649 enum npa_af_status {
650 	NPA_AF_ERR_PARAM = -301,
651 	NPA_AF_ERR_AQ_FULL = -302,
652 	NPA_AF_ERR_AQ_ENQUEUE = -303,
653 	NPA_AF_ERR_AF_LF_INVALID = -304,
654 	NPA_AF_ERR_AF_LF_ALLOC = -305,
655 	NPA_AF_ERR_LF_RESET = -306,
656 };
657 
658 #define NPA_AURA_SZ_0	 0
659 #define NPA_AURA_SZ_128	 1
660 #define NPA_AURA_SZ_256	 2
661 #define NPA_AURA_SZ_512	 3
662 #define NPA_AURA_SZ_1K	 4
663 #define NPA_AURA_SZ_2K	 5
664 #define NPA_AURA_SZ_4K	 6
665 #define NPA_AURA_SZ_8K	 7
666 #define NPA_AURA_SZ_16K	 8
667 #define NPA_AURA_SZ_32K	 9
668 #define NPA_AURA_SZ_64K	 10
669 #define NPA_AURA_SZ_128K 11
670 #define NPA_AURA_SZ_256K 12
671 #define NPA_AURA_SZ_512K 13
672 #define NPA_AURA_SZ_1M	 14
673 #define NPA_AURA_SZ_MAX	 15
674 
675 /* For NPA LF context alloc and init */
676 struct npa_lf_alloc_req {
677 	struct mbox_msghdr hdr;
678 	int __io node;
679 	int __io aura_sz;	/* No of auras. See NPA_AURA_SZ_* */
680 	uint32_t __io nr_pools; /* No of pools */
681 	uint64_t __io way_mask;
682 };
683 
684 struct npa_lf_alloc_rsp {
685 	struct mbox_msghdr hdr;
686 	uint32_t __io stack_pg_ptrs;  /* No of ptrs per stack page */
687 	uint32_t __io stack_pg_bytes; /* Size of stack page */
688 	uint16_t __io qints;	      /* NPA_AF_CONST::QINTS */
689 	uint8_t __io cache_lines;     /* Batch Alloc DMA */
690 };
691 
692 /* NPA AQ enqueue msg */
693 struct npa_aq_enq_req {
694 	struct mbox_msghdr hdr;
695 	uint32_t __io aura_id;
696 	uint8_t __io ctype;
697 	uint8_t __io op;
698 	union {
699 		/* Valid when op == WRITE/INIT and ctype == AURA.
700 		 * LF fills the pool_id in aura.pool_addr. AF will translate
701 		 * the pool_id to pool context pointer.
702 		 */
703 		__io struct npa_aura_s aura;
704 		/* Valid when op == WRITE/INIT and ctype == POOL */
705 		__io struct npa_pool_s pool;
706 	};
707 	/* Mask data when op == WRITE (1=write, 0=don't write) */
708 	union {
709 		/* Valid when op == WRITE and ctype == AURA */
710 		__io struct npa_aura_s aura_mask;
711 		/* Valid when op == WRITE and ctype == POOL */
712 		__io struct npa_pool_s pool_mask;
713 	};
714 };
715 
716 struct npa_aq_enq_rsp {
717 	struct mbox_msghdr hdr;
718 	union {
719 		/* Valid when op == READ and ctype == AURA */
720 		__io struct npa_aura_s aura;
721 		/* Valid when op == READ and ctype == POOL */
722 		__io struct npa_pool_s pool;
723 	};
724 };
725 
726 /* Disable all contexts of type 'ctype' */
727 struct hwctx_disable_req {
728 	struct mbox_msghdr hdr;
729 	uint8_t __io ctype;
730 };
731 
732 /* NIX mbox message formats */
733 
734 /* NIX mailbox error codes
735  * Range 401 - 500.
736  */
737 enum nix_af_status {
738 	NIX_AF_ERR_PARAM = -401,
739 	NIX_AF_ERR_AQ_FULL = -402,
740 	NIX_AF_ERR_AQ_ENQUEUE = -403,
741 	NIX_AF_ERR_AF_LF_INVALID = -404,
742 	NIX_AF_ERR_AF_LF_ALLOC = -405,
743 	NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
744 	NIX_AF_ERR_TLX_INVALID = -407,
745 	NIX_AF_ERR_RSS_SIZE_INVALID = -408,
746 	NIX_AF_ERR_RSS_GRPS_INVALID = -409,
747 	NIX_AF_ERR_FRS_INVALID = -410,
748 	NIX_AF_ERR_RX_LINK_INVALID = -411,
749 	NIX_AF_INVAL_TXSCHQ_CFG = -412,
750 	NIX_AF_SMQ_FLUSH_FAILED = -413,
751 	NIX_AF_ERR_LF_RESET = -414,
752 	NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
753 	NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
754 	NIX_AF_ERR_MARK_CFG_FAIL = -417,
755 	NIX_AF_ERR_LSO_CFG_FAIL = -418,
756 	NIX_AF_INVAL_NPA_PF_FUNC = -419,
757 	NIX_AF_INVAL_SSO_PF_FUNC = -420,
758 	NIX_AF_ERR_TX_VTAG_NOSPC = -421,
759 	NIX_AF_ERR_RX_VTAG_INUSE = -422,
760 	NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
761 };
762 
763 /* For NIX LF context alloc and init */
764 struct nix_lf_alloc_req {
765 	struct mbox_msghdr hdr;
766 	int __io node;
767 	uint32_t __io rq_cnt; /* No of receive queues */
768 	uint32_t __io sq_cnt; /* No of send queues */
769 	uint32_t __io cq_cnt; /* No of completion queues */
770 	uint8_t __io xqe_sz;
771 	uint16_t __io rss_sz;
772 	uint8_t __io rss_grps;
773 	uint16_t __io npa_func;
774 	/* RVU_DEFAULT_PF_FUNC == default pf_func associated with lf */
775 	uint16_t __io sso_func;
776 	uint64_t __io rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
777 	uint64_t __io way_mask;
778 #define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
779 #define NIX_LF_LBK_BLK_SEL	    BIT_ULL(1)
780 	uint64_t flags;
781 };
782 
783 struct nix_lf_alloc_rsp {
784 	struct mbox_msghdr hdr;
785 	uint16_t __io sqb_size;
786 	uint16_t __io rx_chan_base;
787 	uint16_t __io tx_chan_base;
788 	uint8_t __io rx_chan_cnt; /* Total number of RX channels */
789 	uint8_t __io tx_chan_cnt; /* Total number of TX channels */
790 	uint8_t __io lso_tsov4_idx;
791 	uint8_t __io lso_tsov6_idx;
792 	uint8_t __io mac_addr[PLT_ETHER_ADDR_LEN];
793 	uint8_t __io lf_rx_stats;     /* NIX_AF_CONST1::LF_RX_STATS */
794 	uint8_t __io lf_tx_stats;     /* NIX_AF_CONST1::LF_TX_STATS */
795 	uint16_t __io cints;	      /* NIX_AF_CONST2::CINTS */
796 	uint16_t __io qints;	      /* NIX_AF_CONST2::QINTS */
797 	uint8_t __io hw_rx_tstamp_en; /*set if rx timestamping enabled */
798 	uint8_t __io cgx_links;	      /* No. of CGX links present in HW */
799 	uint8_t __io lbk_links;	      /* No. of LBK links present in HW */
800 	uint8_t __io sdp_links;	      /* No. of SDP links present in HW */
801 	uint8_t tx_link;	      /* Transmit channel link number */
802 };
803 
804 struct nix_lf_free_req {
805 	struct mbox_msghdr hdr;
806 #define NIX_LF_DISABLE_FLOWS	 BIT_ULL(0)
807 #define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
808 	uint64_t __io flags;
809 };
810 
811 /* CN10x NIX AQ enqueue msg */
812 struct nix_cn10k_aq_enq_req {
813 	struct mbox_msghdr hdr;
814 	uint32_t __io qidx;
815 	uint8_t __io ctype;
816 	uint8_t __io op;
817 	union {
818 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RQ */
819 		__io struct nix_cn10k_rq_ctx_s rq;
820 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_SQ */
821 		__io struct nix_cn10k_sq_ctx_s sq;
822 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_CQ */
823 		__io struct nix_cq_ctx_s cq;
824 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RSS */
825 		__io struct nix_rsse_s rss;
826 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_MCE */
827 		__io struct nix_rx_mce_s mce;
828 		/* Valid when op == WRITE/INIT and
829 		 * ctype == NIX_AQ_CTYPE_BAND_PROF
830 		 */
831 		__io struct nix_band_prof_s prof;
832 	};
833 	/* Mask data when op == WRITE (1=write, 0=don't write) */
834 	union {
835 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RQ */
836 		__io struct nix_cn10k_rq_ctx_s rq_mask;
837 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_SQ */
838 		__io struct nix_cn10k_sq_ctx_s sq_mask;
839 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_CQ */
840 		__io struct nix_cq_ctx_s cq_mask;
841 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RSS */
842 		__io struct nix_rsse_s rss_mask;
843 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_MCE */
844 		__io struct nix_rx_mce_s mce_mask;
845 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_BAND_PROF */
846 		__io struct nix_band_prof_s prof_mask;
847 	};
848 };
849 
850 struct nix_cn10k_aq_enq_rsp {
851 	struct mbox_msghdr hdr;
852 	union {
853 		struct nix_cn10k_rq_ctx_s rq;
854 		struct nix_cn10k_sq_ctx_s sq;
855 		struct nix_cq_ctx_s cq;
856 		struct nix_rsse_s rss;
857 		struct nix_rx_mce_s mce;
858 		struct nix_band_prof_s prof;
859 	};
860 };
861 
862 /* NIX AQ enqueue msg */
863 struct nix_aq_enq_req {
864 	struct mbox_msghdr hdr;
865 	uint32_t __io qidx;
866 	uint8_t __io ctype;
867 	uint8_t __io op;
868 	union {
869 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RQ */
870 		__io struct nix_rq_ctx_s rq;
871 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_SQ */
872 		__io struct nix_sq_ctx_s sq;
873 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_CQ */
874 		__io struct nix_cq_ctx_s cq;
875 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RSS */
876 		__io struct nix_rsse_s rss;
877 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_MCE */
878 		__io struct nix_rx_mce_s mce;
879 	};
880 	/* Mask data when op == WRITE (1=write, 0=don't write) */
881 	union {
882 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RQ */
883 		__io struct nix_rq_ctx_s rq_mask;
884 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_SQ */
885 		__io struct nix_sq_ctx_s sq_mask;
886 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_CQ */
887 		__io struct nix_cq_ctx_s cq_mask;
888 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RSS */
889 		__io struct nix_rsse_s rss_mask;
890 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_MCE */
891 		__io struct nix_rx_mce_s mce_mask;
892 	};
893 };
894 
895 struct nix_aq_enq_rsp {
896 	struct mbox_msghdr hdr;
897 	union {
898 		__io struct nix_rq_ctx_s rq;
899 		__io struct nix_sq_ctx_s sq;
900 		__io struct nix_cq_ctx_s cq;
901 		__io struct nix_rsse_s rss;
902 		__io struct nix_rx_mce_s mce;
903 	};
904 };
905 
906 /* Tx scheduler/shaper mailbox messages */
907 
908 #define MAX_TXSCHQ_PER_FUNC 128
909 
910 struct nix_txsch_alloc_req {
911 	struct mbox_msghdr hdr;
912 	/* Scheduler queue count request at each level */
913 	uint16_t __io schq_contig[NIX_TXSCH_LVL_CNT]; /* Contig. queues */
914 	uint16_t __io schq[NIX_TXSCH_LVL_CNT];	      /* Non-Contig. queues */
915 };
916 
917 struct nix_txsch_alloc_rsp {
918 	struct mbox_msghdr hdr;
919 	/* Scheduler queue count allocated at each level */
920 	uint16_t __io schq_contig[NIX_TXSCH_LVL_CNT]; /* Contig. queues */
921 	uint16_t __io schq[NIX_TXSCH_LVL_CNT];	      /* Non-Contig. queues */
922 	/* Scheduler queue list allocated at each level */
923 	uint16_t __io schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
924 	uint16_t __io schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
925 	/* Traffic aggregation scheduler level */
926 	uint8_t __io aggr_level;
927 	/* Aggregation lvl's RR_PRIO config */
928 	uint8_t __io aggr_lvl_rr_prio;
929 	/* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
930 	uint8_t __io link_cfg_lvl;
931 };
932 
933 struct nix_txsch_free_req {
934 	struct mbox_msghdr hdr;
935 #define TXSCHQ_FREE_ALL BIT_ULL(0)
936 	uint16_t __io flags;
937 	/* Scheduler queue level to be freed */
938 	uint16_t __io schq_lvl;
939 	/* List of scheduler queues to be freed */
940 	uint16_t __io schq;
941 };
942 
943 struct nix_txschq_config {
944 	struct mbox_msghdr hdr;
945 	uint8_t __io lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
946 	uint8_t __io read;
947 #define TXSCHQ_IDX_SHIFT       16
948 #define TXSCHQ_IDX_MASK	       (BIT_ULL(10) - 1)
949 #define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
950 	uint8_t __io num_regs;
951 #define MAX_REGS_PER_MBOX_MSG 20
952 	uint64_t __io reg[MAX_REGS_PER_MBOX_MSG];
953 	uint64_t __io regval[MAX_REGS_PER_MBOX_MSG];
954 	/* All 0's => overwrite with new value */
955 	uint64_t __io regval_mask[MAX_REGS_PER_MBOX_MSG];
956 };
957 
958 struct nix_vtag_config {
959 	struct mbox_msghdr hdr;
960 	/* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
961 	uint8_t __io vtag_size;
962 	/* cfg_type is '0' for tx vlan cfg
963 	 * cfg_type is '1' for rx vlan cfg
964 	 */
965 	uint8_t __io cfg_type;
966 	union {
967 		/* Valid when cfg_type is '0' */
968 		struct {
969 			uint64_t __io vtag0;
970 			uint64_t __io vtag1;
971 
972 			/* cfg_vtag0 & cfg_vtag1 fields are valid
973 			 * when free_vtag0 & free_vtag1 are '0's.
974 			 */
975 			/* cfg_vtag0 = 1 to configure vtag0 */
976 			uint8_t __io cfg_vtag0 : 1;
977 			/* cfg_vtag1 = 1 to configure vtag1 */
978 			uint8_t __io cfg_vtag1 : 1;
979 
980 			/* vtag0_idx & vtag1_idx are only valid when
981 			 * both cfg_vtag0 & cfg_vtag1 are '0's,
982 			 * these fields are used along with free_vtag0
983 			 * & free_vtag1 to free the nix lf's tx_vlan
984 			 * configuration.
985 			 *
986 			 * Denotes the indices of tx_vtag def registers
987 			 * that needs to be cleared and freed.
988 			 */
989 			int __io vtag0_idx;
990 			int __io vtag1_idx;
991 
992 			/* Free_vtag0 & free_vtag1 fields are valid
993 			 * when cfg_vtag0 & cfg_vtag1 are '0's.
994 			 */
995 			/* Free_vtag0 = 1 clears vtag0 configuration
996 			 * vtag0_idx denotes the index to be cleared.
997 			 */
998 			uint8_t __io free_vtag0 : 1;
999 			/* Free_vtag1 = 1 clears vtag1 configuration
1000 			 * vtag1_idx denotes the index to be cleared.
1001 			 */
1002 			uint8_t __io free_vtag1 : 1;
1003 		} tx;
1004 
1005 		/* Valid when cfg_type is '1' */
1006 		struct {
1007 			/* Rx vtag type index, valid values are in 0..7 range */
1008 			uint8_t __io vtag_type;
1009 			/* Rx vtag strip */
1010 			uint8_t __io strip_vtag : 1;
1011 			/* Rx vtag capture */
1012 			uint8_t __io capture_vtag : 1;
1013 		} rx;
1014 	};
1015 };
1016 
1017 struct nix_vtag_config_rsp {
1018 	struct mbox_msghdr hdr;
1019 	/* Indices of tx_vtag def registers used to configure
1020 	 * tx vtag0 & vtag1 headers, these indices are valid
1021 	 * when nix_vtag_config mbox requested for vtag0 and/
1022 	 * or vtag1 configuration.
1023 	 */
1024 	int __io vtag0_idx;
1025 	int __io vtag1_idx;
1026 };
1027 
1028 struct nix_rss_flowkey_cfg {
1029 	struct mbox_msghdr hdr;
1030 	int __io mcam_index;	   /* MCAM entry index to modify */
1031 	uint32_t __io flowkey_cfg; /* Flowkey types selected */
1032 #define FLOW_KEY_TYPE_PORT	    BIT(0)
1033 #define FLOW_KEY_TYPE_IPV4	    BIT(1)
1034 #define FLOW_KEY_TYPE_IPV6	    BIT(2)
1035 #define FLOW_KEY_TYPE_TCP	    BIT(3)
1036 #define FLOW_KEY_TYPE_UDP	    BIT(4)
1037 #define FLOW_KEY_TYPE_SCTP	    BIT(5)
1038 #define FLOW_KEY_TYPE_NVGRE	    BIT(6)
1039 #define FLOW_KEY_TYPE_VXLAN	    BIT(7)
1040 #define FLOW_KEY_TYPE_GENEVE	    BIT(8)
1041 #define FLOW_KEY_TYPE_ETH_DMAC	    BIT(9)
1042 #define FLOW_KEY_TYPE_IPV6_EXT	    BIT(10)
1043 #define FLOW_KEY_TYPE_GTPU	    BIT(11)
1044 #define FLOW_KEY_TYPE_INNR_IPV4	    BIT(12)
1045 #define FLOW_KEY_TYPE_INNR_IPV6	    BIT(13)
1046 #define FLOW_KEY_TYPE_INNR_TCP	    BIT(14)
1047 #define FLOW_KEY_TYPE_INNR_UDP	    BIT(15)
1048 #define FLOW_KEY_TYPE_INNR_SCTP	    BIT(16)
1049 #define FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
1050 #define FLOW_KEY_TYPE_CH_LEN_90B    BIT(18)
1051 #define FLOW_KEY_TYPE_CUSTOM0	    BIT(19)
1052 #define FLOW_KEY_TYPE_VLAN	    BIT(20)
1053 #define FLOW_KEY_TYPE_L4_DST	    BIT(28)
1054 #define FLOW_KEY_TYPE_L4_SRC	    BIT(29)
1055 #define FLOW_KEY_TYPE_L3_DST	    BIT(30)
1056 #define FLOW_KEY_TYPE_L3_SRC	    BIT(31)
1057 	uint8_t __io group; /* RSS context or group */
1058 };
1059 
1060 struct nix_rss_flowkey_cfg_rsp {
1061 	struct mbox_msghdr hdr;
1062 	uint8_t __io alg_idx; /* Selected algo index */
1063 };
1064 
1065 struct nix_set_mac_addr {
1066 	struct mbox_msghdr hdr;
1067 	uint8_t __io mac_addr[PLT_ETHER_ADDR_LEN];
1068 };
1069 
1070 struct nix_get_mac_addr_rsp {
1071 	struct mbox_msghdr hdr;
1072 	uint8_t __io mac_addr[PLT_ETHER_ADDR_LEN];
1073 };
1074 
1075 struct nix_mark_format_cfg {
1076 	struct mbox_msghdr hdr;
1077 	uint8_t __io offset;
1078 	uint8_t __io y_mask;
1079 	uint8_t __io y_val;
1080 	uint8_t __io r_mask;
1081 	uint8_t __io r_val;
1082 };
1083 
1084 struct nix_mark_format_cfg_rsp {
1085 	struct mbox_msghdr hdr;
1086 	uint8_t __io mark_format_idx;
1087 };
1088 
1089 struct nix_lso_format_cfg {
1090 	struct mbox_msghdr hdr;
1091 	uint64_t __io field_mask;
1092 	uint64_t __io fields[NIX_LSO_FIELD_MAX];
1093 };
1094 
1095 struct nix_lso_format_cfg_rsp {
1096 	struct mbox_msghdr hdr;
1097 	uint8_t __io lso_format_idx;
1098 };
1099 
1100 struct nix_rx_mode {
1101 	struct mbox_msghdr hdr;
1102 #define NIX_RX_MODE_UCAST    BIT(0)
1103 #define NIX_RX_MODE_PROMISC  BIT(1)
1104 #define NIX_RX_MODE_ALLMULTI BIT(2)
1105 	uint16_t __io mode;
1106 };
1107 
1108 struct nix_rx_cfg {
1109 	struct mbox_msghdr hdr;
1110 #define NIX_RX_OL3_VERIFY BIT(0)
1111 #define NIX_RX_OL4_VERIFY BIT(1)
1112 #define NIX_RX_DROP_RE	  BIT(2)
1113 	uint8_t __io len_verify; /* Outer L3/L4 len check */
1114 #define NIX_RX_CSUM_OL4_VERIFY BIT(0)
1115 	uint8_t __io csum_verify; /* Outer L4 checksum verification */
1116 };
1117 
1118 struct nix_frs_cfg {
1119 	struct mbox_msghdr hdr;
1120 	uint8_t __io update_smq;    /* Update SMQ's min/max lens */
1121 	uint8_t __io update_minlen; /* Set minlen also */
1122 	uint8_t __io sdp_link;	    /* Set SDP RX link */
1123 	uint16_t __io maxlen;
1124 	uint16_t __io minlen;
1125 };
1126 
1127 struct nix_set_vlan_tpid {
1128 	struct mbox_msghdr hdr;
1129 #define NIX_VLAN_TYPE_INNER 0
1130 #define NIX_VLAN_TYPE_OUTER 1
1131 	uint8_t __io vlan_type;
1132 	uint16_t __io tpid;
1133 };
1134 
1135 struct nix_bp_cfg_req {
1136 	struct mbox_msghdr hdr;
1137 	uint16_t __io chan_base; /* Starting channel number */
1138 	uint8_t __io chan_cnt;	 /* Number of channels */
1139 	uint8_t __io bpid_per_chan;
1140 	/* bpid_per_chan = 0  assigns single bp id for range of channels */
1141 	/* bpid_per_chan = 1 assigns separate bp id for each channel */
1142 };
1143 
1144 /* PF can be mapped to either CGX or LBK interface,
1145  * so maximum 64 channels are possible.
1146  */
1147 #define NIX_MAX_CHAN	 64
1148 #define NIX_CGX_MAX_CHAN 16
1149 #define NIX_LBK_MAX_CHAN 1
1150 struct nix_bp_cfg_rsp {
1151 	struct mbox_msghdr hdr;
1152 	/* Channel and bpid mapping */
1153 	uint16_t __io chan_bpid[NIX_MAX_CHAN];
1154 	/* Number of channel for which bpids are assigned */
1155 	uint8_t __io chan_cnt;
1156 };
1157 
1158 /* Global NIX inline IPSec configuration */
1159 struct nix_inline_ipsec_cfg {
1160 	struct mbox_msghdr hdr;
1161 	uint32_t __io cpt_credit;
1162 	struct {
1163 		uint8_t __io egrp;
1164 		uint8_t __io opcode;
1165 	} gen_cfg;
1166 	struct {
1167 		uint16_t __io cpt_pf_func;
1168 		uint8_t __io cpt_slot;
1169 	} inst_qsel;
1170 	uint8_t __io enable;
1171 };
1172 
1173 /* Per NIX LF inline IPSec configuration */
1174 struct nix_inline_ipsec_lf_cfg {
1175 	struct mbox_msghdr hdr;
1176 	uint64_t __io sa_base_addr;
1177 	struct {
1178 		uint32_t __io tag_const;
1179 		uint16_t __io lenm1_max;
1180 		uint8_t __io sa_pow2_size;
1181 		uint8_t __io tt;
1182 	} ipsec_cfg0;
1183 	struct {
1184 		uint32_t __io sa_idx_max;
1185 		uint8_t __io sa_idx_w;
1186 	} ipsec_cfg1;
1187 	uint8_t __io enable;
1188 };
1189 
1190 struct nix_hw_info {
1191 	struct mbox_msghdr hdr;
1192 	uint16_t __io vwqe_delay;
1193 	uint16_t __io rsvd[15];
1194 };
1195 
1196 struct nix_bandprof_alloc_req {
1197 	struct mbox_msghdr hdr;
1198 	/* Count of profiles needed per layer */
1199 	uint16_t __io prof_count[NIX_RX_BAND_PROF_LAYER_MAX];
1200 };
1201 
1202 struct nix_bandprof_alloc_rsp {
1203 	struct mbox_msghdr hdr;
1204 	uint16_t __io prof_count[NIX_RX_BAND_PROF_LAYER_MAX];
1205 
1206 #define BANDPROF_PER_PFFUNC 64
1207 	uint16_t __io prof_idx[NIX_RX_BAND_PROF_LAYER_MAX][BANDPROF_PER_PFFUNC];
1208 };
1209 
1210 struct nix_bandprof_free_req {
1211 	struct mbox_msghdr hdr;
1212 	uint8_t __io free_all;
1213 	uint16_t __io prof_count[NIX_RX_BAND_PROF_LAYER_MAX];
1214 	uint16_t __io prof_idx[NIX_RX_BAND_PROF_LAYER_MAX][BANDPROF_PER_PFFUNC];
1215 };
1216 
1217 struct nix_bandprof_get_hwinfo_rsp {
1218 	struct mbox_msghdr hdr;
1219 	uint16_t __io prof_count[NIX_RX_BAND_PROF_LAYER_MAX];
1220 	uint32_t __io policer_timeunit;
1221 };
1222 
1223 /* SSO mailbox error codes
1224  * Range 501 - 600.
1225  */
1226 enum sso_af_status {
1227 	SSO_AF_ERR_PARAM = -501,
1228 	SSO_AF_ERR_LF_INVALID = -502,
1229 	SSO_AF_ERR_AF_LF_ALLOC = -503,
1230 	SSO_AF_ERR_GRP_EBUSY = -504,
1231 	SSO_AF_INVAL_NPA_PF_FUNC = -505,
1232 };
1233 
1234 struct sso_lf_alloc_req {
1235 	struct mbox_msghdr hdr;
1236 	int __io node;
1237 	uint16_t __io hwgrps;
1238 };
1239 
1240 struct sso_lf_alloc_rsp {
1241 	struct mbox_msghdr hdr;
1242 	uint32_t __io xaq_buf_size;
1243 	uint32_t __io xaq_wq_entries;
1244 	uint32_t __io in_unit_entries;
1245 	uint16_t __io hwgrps;
1246 };
1247 
1248 struct sso_lf_free_req {
1249 	struct mbox_msghdr hdr;
1250 	int __io node;
1251 	uint16_t __io hwgrps;
1252 };
1253 
1254 /* SSOW mailbox error codes
1255  * Range 601 - 700.
1256  */
1257 enum ssow_af_status {
1258 	SSOW_AF_ERR_PARAM = -601,
1259 	SSOW_AF_ERR_LF_INVALID = -602,
1260 	SSOW_AF_ERR_AF_LF_ALLOC = -603,
1261 };
1262 
1263 struct ssow_lf_alloc_req {
1264 	struct mbox_msghdr hdr;
1265 	int __io node;
1266 	uint16_t __io hws;
1267 };
1268 
1269 struct ssow_lf_free_req {
1270 	struct mbox_msghdr hdr;
1271 	int __io node;
1272 	uint16_t __io hws;
1273 };
1274 
1275 #define SSOW_INVAL_SELECTIVE_VER 0x1000
1276 struct ssow_lf_inv_req {
1277 	struct mbox_msghdr hdr;
1278 	uint16_t nb_hws;		 /* Number of HWS to invalidate*/
1279 	uint16_t hws[MAX_RVU_BLKLF_CNT]; /* Array of HWS */
1280 };
1281 
1282 struct ssow_config_lsw {
1283 	struct mbox_msghdr hdr;
1284 #define SSOW_LSW_DIS	 0
1285 #define SSOW_LSW_GW_WAIT 1
1286 #define SSOW_LSW_GW_IMM	 2
1287 	uint8_t __io lsw_mode;
1288 #define SSOW_WQE_REL_LSW_WAIT 0
1289 #define SSOW_WQE_REL_IMM      1
1290 	uint8_t __io wqe_release;
1291 };
1292 
1293 struct ssow_chng_mship {
1294 	struct mbox_msghdr hdr;
1295 	uint8_t __io set;	 /* Membership set to modify. */
1296 	uint8_t __io enable;	 /* Enable/Disable the hwgrps. */
1297 	uint8_t __io hws;	 /* HWS to modify. */
1298 	uint16_t __io nb_hwgrps; /* Number of hwgrps in the array */
1299 	uint16_t __io hwgrps[MAX_RVU_BLKLF_CNT]; /* Array of hwgrps. */
1300 };
1301 
1302 struct sso_hw_setconfig {
1303 	struct mbox_msghdr hdr;
1304 	uint32_t __io npa_aura_id;
1305 	uint16_t __io npa_pf_func;
1306 	uint16_t __io hwgrps;
1307 };
1308 
1309 struct sso_hw_xaq_release {
1310 	struct mbox_msghdr hdr;
1311 	uint16_t __io hwgrps;
1312 };
1313 
1314 struct sso_info_req {
1315 	struct mbox_msghdr hdr;
1316 	union {
1317 		uint16_t __io grp;
1318 		uint16_t __io hws;
1319 	};
1320 };
1321 
1322 struct sso_grp_priority {
1323 	struct mbox_msghdr hdr;
1324 	uint16_t __io grp;
1325 	uint8_t __io priority;
1326 	uint8_t __io affinity;
1327 	uint8_t __io weight;
1328 };
1329 
1330 struct sso_grp_qos_cfg {
1331 	struct mbox_msghdr hdr;
1332 	uint16_t __io grp;
1333 	uint32_t __io xaq_limit;
1334 	uint16_t __io taq_thr;
1335 	uint16_t __io iaq_thr;
1336 };
1337 
1338 struct sso_grp_stats {
1339 	struct mbox_msghdr hdr;
1340 	uint16_t __io grp;
1341 	uint64_t __io ws_pc;
1342 	uint64_t __io ext_pc;
1343 	uint64_t __io wa_pc;
1344 	uint64_t __io ts_pc;
1345 	uint64_t __io ds_pc;
1346 	uint64_t __io dq_pc;
1347 	uint64_t __io aw_status;
1348 	uint64_t __io page_cnt;
1349 };
1350 
1351 struct sso_hws_stats {
1352 	struct mbox_msghdr hdr;
1353 	uint16_t __io hws;
1354 	uint64_t __io arbitration;
1355 };
1356 
1357 /* CPT mailbox error codes
1358  * Range 901 - 1000.
1359  */
1360 enum cpt_af_status {
1361 	CPT_AF_ERR_PARAM = -901,
1362 	CPT_AF_ERR_GRP_INVALID = -902,
1363 	CPT_AF_ERR_LF_INVALID = -903,
1364 	CPT_AF_ERR_ACCESS_DENIED = -904,
1365 	CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
1366 	CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
1367 	CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
1368 	CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
1369 };
1370 
1371 /* CPT mbox message formats */
1372 
1373 struct cpt_rd_wr_reg_msg {
1374 	struct mbox_msghdr hdr;
1375 	uint64_t __io reg_offset;
1376 	uint64_t __io *ret_val;
1377 	uint64_t __io val;
1378 	uint8_t __io is_write;
1379 };
1380 
1381 struct cpt_set_crypto_grp_req_msg {
1382 	struct mbox_msghdr hdr;
1383 	uint8_t __io crypto_eng_grp;
1384 };
1385 
1386 struct cpt_lf_alloc_req_msg {
1387 	struct mbox_msghdr hdr;
1388 	uint16_t __io nix_pf_func;
1389 	uint16_t __io sso_pf_func;
1390 	uint16_t __io eng_grpmsk;
1391 	uint8_t __io blkaddr;
1392 };
1393 
1394 #define CPT_INLINE_INBOUND  0
1395 #define CPT_INLINE_OUTBOUND 1
1396 
1397 struct cpt_inline_ipsec_cfg_msg {
1398 	struct mbox_msghdr hdr;
1399 	uint8_t __io enable;
1400 	uint8_t __io slot;
1401 	uint8_t __io dir;
1402 	uint8_t __io sso_pf_func_ovrd;
1403 	uint16_t __io sso_pf_func; /* Inbound path SSO_PF_FUNC */
1404 	uint16_t __io nix_pf_func; /* Outbound path NIX_PF_FUNC */
1405 };
1406 
1407 struct cpt_sts_req {
1408 	struct mbox_msghdr hdr;
1409 	uint8_t __io blkaddr;
1410 };
1411 
1412 struct cpt_sts_rsp {
1413 	struct mbox_msghdr hdr;
1414 	uint64_t __io inst_req_pc;
1415 	uint64_t __io inst_lat_pc;
1416 	uint64_t __io rd_req_pc;
1417 	uint64_t __io rd_lat_pc;
1418 	uint64_t __io rd_uc_pc;
1419 	uint64_t __io active_cycles_pc;
1420 	uint64_t __io ctx_mis_pc;
1421 	uint64_t __io ctx_hit_pc;
1422 	uint64_t __io ctx_aop_pc;
1423 	uint64_t __io ctx_aop_lat_pc;
1424 	uint64_t __io ctx_ifetch_pc;
1425 	uint64_t __io ctx_ifetch_lat_pc;
1426 	uint64_t __io ctx_ffetch_pc;
1427 	uint64_t __io ctx_ffetch_lat_pc;
1428 	uint64_t __io ctx_wback_pc;
1429 	uint64_t __io ctx_wback_lat_pc;
1430 	uint64_t __io ctx_psh_pc;
1431 	uint64_t __io ctx_psh_lat_pc;
1432 	uint64_t __io ctx_err;
1433 	uint64_t __io ctx_enc_id;
1434 	uint64_t __io ctx_flush_timer;
1435 	uint64_t __io rxc_time;
1436 	uint64_t __io rxc_time_cfg;
1437 	uint64_t __io rxc_active_sts;
1438 	uint64_t __io rxc_zombie_sts;
1439 	uint64_t __io busy_sts_ae;
1440 	uint64_t __io free_sts_ae;
1441 	uint64_t __io busy_sts_se;
1442 	uint64_t __io free_sts_se;
1443 	uint64_t __io busy_sts_ie;
1444 	uint64_t __io free_sts_ie;
1445 	uint64_t __io exe_err_info;
1446 	uint64_t __io cptclk_cnt;
1447 	uint64_t __io diag;
1448 	uint64_t __io rxc_dfrg;
1449 	uint64_t __io x2p_link_cfg0;
1450 	uint64_t __io x2p_link_cfg1;
1451 };
1452 
1453 struct cpt_rxc_time_cfg_req {
1454 	struct mbox_msghdr hdr;
1455 	int blkaddr;
1456 	uint32_t step;
1457 	uint16_t zombie_thres;
1458 	uint16_t zombie_limit;
1459 	uint16_t active_thres;
1460 	uint16_t active_limit;
1461 };
1462 
1463 struct cpt_rx_inline_lf_cfg_msg {
1464 	struct mbox_msghdr hdr;
1465 	uint16_t __io sso_pf_func;
1466 	uint16_t __io param1;
1467 	uint16_t __io param2;
1468 	uint16_t __io reserved;
1469 };
1470 
1471 enum cpt_eng_type {
1472 	CPT_ENG_TYPE_AE = 1,
1473 	CPT_ENG_TYPE_SE = 2,
1474 	CPT_ENG_TYPE_IE = 3,
1475 	CPT_MAX_ENG_TYPES,
1476 };
1477 
1478 /* CPT HW capabilities */
1479 union cpt_eng_caps {
1480 	uint64_t __io u;
1481 	struct {
1482 		uint64_t __io reserved_0_4 : 5;
1483 		uint64_t __io mul : 1;
1484 		uint64_t __io sha1_sha2 : 1;
1485 		uint64_t __io chacha20 : 1;
1486 		uint64_t __io zuc_snow3g : 1;
1487 		uint64_t __io sha3 : 1;
1488 		uint64_t __io aes : 1;
1489 		uint64_t __io kasumi : 1;
1490 		uint64_t __io des : 1;
1491 		uint64_t __io crc : 1;
1492 		uint64_t __io reserved_14_63 : 50;
1493 	};
1494 };
1495 
1496 struct cpt_caps_rsp_msg {
1497 	struct mbox_msghdr hdr;
1498 	uint16_t __io cpt_pf_drv_version;
1499 	uint8_t __io cpt_revision;
1500 	union cpt_eng_caps eng_caps[CPT_MAX_ENG_TYPES];
1501 };
1502 
1503 struct cpt_eng_grp_req {
1504 	struct mbox_msghdr hdr;
1505 	uint8_t __io eng_type;
1506 };
1507 
1508 struct cpt_eng_grp_rsp {
1509 	struct mbox_msghdr hdr;
1510 	uint8_t __io eng_type;
1511 	uint8_t __io eng_grp_num;
1512 };
1513 
1514 /* REE mailbox error codes
1515  * Range 1001 - 1100.
1516  */
1517 enum ree_af_status {
1518 	REE_AF_ERR_RULE_UNKNOWN_VALUE = -1001,
1519 	REE_AF_ERR_LF_NO_MORE_RESOURCES = -1002,
1520 	REE_AF_ERR_LF_INVALID = -1003,
1521 	REE_AF_ERR_ACCESS_DENIED = -1004,
1522 	REE_AF_ERR_RULE_DB_PARTIAL = -1005,
1523 	REE_AF_ERR_RULE_DB_EQ_BAD_VALUE = -1006,
1524 	REE_AF_ERR_RULE_DB_BLOCK_ALLOC_FAILED = -1007,
1525 	REE_AF_ERR_BLOCK_NOT_IMPLEMENTED = -1008,
1526 	REE_AF_ERR_RULE_DB_INC_OFFSET_TOO_BIG = -1009,
1527 	REE_AF_ERR_RULE_DB_OFFSET_TOO_BIG = -1010,
1528 	REE_AF_ERR_Q_IS_GRACEFUL_DIS = -1011,
1529 	REE_AF_ERR_Q_NOT_GRACEFUL_DIS = -1012,
1530 	REE_AF_ERR_RULE_DB_ALLOC_FAILED = -1013,
1531 	REE_AF_ERR_RULE_DB_TOO_BIG = -1014,
1532 	REE_AF_ERR_RULE_DB_GEQ_BAD_VALUE = -1015,
1533 	REE_AF_ERR_RULE_DB_LEQ_BAD_VALUE = -1016,
1534 	REE_AF_ERR_RULE_DB_WRONG_LENGTH = -1017,
1535 	REE_AF_ERR_RULE_DB_WRONG_OFFSET = -1018,
1536 	REE_AF_ERR_RULE_DB_BLOCK_TOO_BIG = -1019,
1537 	REE_AF_ERR_RULE_DB_SHOULD_FILL_REQUEST = -1020,
1538 	REE_AF_ERR_RULE_DBI_ALLOC_FAILED = -1021,
1539 	REE_AF_ERR_LF_WRONG_PRIORITY = -1022,
1540 	REE_AF_ERR_LF_SIZE_TOO_BIG = -1023,
1541 };
1542 
1543 /* REE mbox message formats */
1544 
1545 struct ree_req_msg {
1546 	struct mbox_msghdr hdr;
1547 	uint32_t __io blkaddr;
1548 };
1549 
1550 struct ree_lf_req_msg {
1551 	struct mbox_msghdr hdr;
1552 	uint32_t __io blkaddr;
1553 	uint32_t __io size;
1554 	uint8_t __io lf;
1555 	uint8_t __io pri;
1556 };
1557 
1558 struct ree_rule_db_prog_req_msg {
1559 	struct mbox_msghdr hdr;
1560 #define REE_RULE_DB_REQ_BLOCK_SIZE ((64ULL * 1024ULL) >> 1)
1561 	uint8_t __io rule_db[REE_RULE_DB_REQ_BLOCK_SIZE];
1562 	uint32_t __io blkaddr;	     /* REE0 or REE1 */
1563 	uint32_t __io total_len;     /* total len of rule db */
1564 	uint32_t __io offset;	     /* offset of current rule db block */
1565 	uint16_t __io len;	     /* length of rule db block */
1566 	uint8_t __io is_last;	     /* is this the last block */
1567 	uint8_t __io is_incremental; /* is incremental flow */
1568 	uint8_t __io is_dbi;	     /* is rule db incremental */
1569 };
1570 
1571 struct ree_rule_db_get_req_msg {
1572 	struct mbox_msghdr hdr;
1573 	uint32_t __io blkaddr;
1574 	uint32_t __io offset; /* retrieve db from this offset */
1575 	uint8_t __io is_dbi;  /* is request for rule db incremental */
1576 };
1577 
1578 struct ree_rd_wr_reg_msg {
1579 	struct mbox_msghdr hdr;
1580 	uint64_t __io reg_offset;
1581 	uint64_t __io *ret_val;
1582 	uint64_t __io val;
1583 	uint32_t __io blkaddr;
1584 	uint8_t __io is_write;
1585 };
1586 
1587 struct ree_rule_db_len_rsp_msg {
1588 	struct mbox_msghdr hdr;
1589 	uint32_t __io blkaddr;
1590 	uint32_t __io len;
1591 	uint32_t __io inc_len;
1592 };
1593 
1594 struct ree_rule_db_get_rsp_msg {
1595 	struct mbox_msghdr hdr;
1596 #define REE_RULE_DB_RSP_BLOCK_SIZE (15ULL * 1024ULL)
1597 	uint8_t __io rule_db[REE_RULE_DB_RSP_BLOCK_SIZE];
1598 	uint32_t __io total_len; /* total len of rule db */
1599 	uint32_t __io offset;	 /* offset of current rule db block */
1600 	uint16_t __io len;	 /* length of rule db block */
1601 	uint8_t __io is_last;	 /* is this the last block */
1602 };
1603 
1604 /* NPC mbox message structs */
1605 
1606 #define NPC_MCAM_ENTRY_INVALID 0xFFFF
1607 #define NPC_MCAM_INVALID_MAP   0xFFFF
1608 
1609 /* NPC mailbox error codes
1610  * Range 701 - 800.
1611  */
1612 enum npc_af_status {
1613 	NPC_MCAM_INVALID_REQ = -701,
1614 	NPC_MCAM_ALLOC_DENIED = -702,
1615 	NPC_MCAM_ALLOC_FAILED = -703,
1616 	NPC_MCAM_PERM_DENIED = -704,
1617 	NPC_AF_ERR_HIGIG_CONFIG_FAIL = -705,
1618 };
1619 
1620 struct npc_mcam_alloc_entry_req {
1621 	struct mbox_msghdr hdr;
1622 #define NPC_MAX_NONCONTIG_ENTRIES 256
1623 	uint8_t __io contig; /* Contiguous entries ? */
1624 #define NPC_MCAM_ANY_PRIO    0
1625 #define NPC_MCAM_LOWER_PRIO  1
1626 #define NPC_MCAM_HIGHER_PRIO 2
1627 	uint8_t __io priority; /* Lower or higher w.r.t ref_entry */
1628 	uint16_t __io ref_entry;
1629 	uint16_t __io count; /* Number of entries requested */
1630 };
1631 
1632 struct npc_mcam_alloc_entry_rsp {
1633 	struct mbox_msghdr hdr;
1634 	/* Entry alloc'ed or start index if contiguous.
1635 	 * Invalid in case of non-contiguous.
1636 	 */
1637 	uint16_t __io entry;
1638 	uint16_t __io count;	  /* Number of entries allocated */
1639 	uint16_t __io free_count; /* Number of entries available */
1640 	uint16_t __io entry_list[NPC_MAX_NONCONTIG_ENTRIES];
1641 };
1642 
1643 struct npc_mcam_free_entry_req {
1644 	struct mbox_msghdr hdr;
1645 	uint16_t __io entry; /* Entry index to be freed */
1646 	uint8_t __io all;    /* Free all entries alloc'ed to this PFVF */
1647 };
1648 
1649 struct mcam_entry {
1650 #define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max key width */
1651 	uint64_t __io kw[NPC_MAX_KWS_IN_KEY];
1652 	uint64_t __io kw_mask[NPC_MAX_KWS_IN_KEY];
1653 	uint64_t __io action;
1654 	uint64_t __io vtag_action;
1655 };
1656 
1657 struct npc_mcam_write_entry_req {
1658 	struct mbox_msghdr hdr;
1659 	struct mcam_entry entry_data;
1660 	uint16_t __io entry;	   /* MCAM entry to write this match key */
1661 	uint16_t __io cntr;	   /* Counter for this MCAM entry */
1662 	uint8_t __io intf;	   /* Rx or Tx interface */
1663 	uint8_t __io enable_entry; /* Enable this MCAM entry ? */
1664 	uint8_t __io set_cntr;	   /* Set counter for this entry ? */
1665 };
1666 
1667 /* Enable/Disable a given entry */
1668 struct npc_mcam_ena_dis_entry_req {
1669 	struct mbox_msghdr hdr;
1670 	uint16_t __io entry;
1671 };
1672 
1673 struct npc_mcam_shift_entry_req {
1674 	struct mbox_msghdr hdr;
1675 #define NPC_MCAM_MAX_SHIFTS 64
1676 	uint16_t __io curr_entry[NPC_MCAM_MAX_SHIFTS];
1677 	uint16_t __io new_entry[NPC_MCAM_MAX_SHIFTS];
1678 	uint16_t __io shift_count; /* Number of entries to shift */
1679 };
1680 
1681 struct npc_mcam_shift_entry_rsp {
1682 	struct mbox_msghdr hdr;
1683 	/* Index in 'curr_entry', not entry itself */
1684 	uint16_t __io failed_entry_idx;
1685 };
1686 
1687 struct npc_mcam_alloc_counter_req {
1688 	struct mbox_msghdr hdr;
1689 	uint8_t __io contig; /* Contiguous counters ? */
1690 #define NPC_MAX_NONCONTIG_COUNTERS 64
1691 	uint16_t __io count; /* Number of counters requested */
1692 };
1693 
1694 struct npc_mcam_alloc_counter_rsp {
1695 	struct mbox_msghdr hdr;
1696 	/* Counter alloc'ed or start idx if contiguous.
1697 	 * Invalid in case of non-contiguous.
1698 	 */
1699 	uint16_t __io cntr;
1700 	uint16_t __io count; /* Number of counters allocated */
1701 	uint16_t __io cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
1702 };
1703 
1704 struct npc_mcam_oper_counter_req {
1705 	struct mbox_msghdr hdr;
1706 	uint16_t __io cntr; /* Free a counter or clear/fetch it's stats */
1707 };
1708 
1709 struct npc_mcam_oper_counter_rsp {
1710 	struct mbox_msghdr hdr;
1711 	/* valid only while fetching counter's stats */
1712 	uint64_t __io stat;
1713 };
1714 
1715 struct npc_mcam_unmap_counter_req {
1716 	struct mbox_msghdr hdr;
1717 	uint16_t __io cntr;
1718 	uint16_t __io entry; /* Entry and counter to be unmapped */
1719 	uint8_t __io all;    /* Unmap all entries using this counter ? */
1720 };
1721 
1722 struct npc_mcam_alloc_and_write_entry_req {
1723 	struct mbox_msghdr hdr;
1724 	struct mcam_entry entry_data;
1725 	uint16_t __io ref_entry;
1726 	uint8_t __io priority;	   /* Lower or higher w.r.t ref_entry */
1727 	uint8_t __io intf;	   /* Rx or Tx interface */
1728 	uint8_t __io enable_entry; /* Enable this MCAM entry ? */
1729 	uint8_t __io alloc_cntr;   /* Allocate counter and map ? */
1730 };
1731 
1732 struct npc_mcam_alloc_and_write_entry_rsp {
1733 	struct mbox_msghdr hdr;
1734 	uint16_t __io entry;
1735 	uint16_t __io cntr;
1736 };
1737 
1738 struct npc_get_kex_cfg_rsp {
1739 	struct mbox_msghdr hdr;
1740 	uint64_t __io rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
1741 	uint64_t __io tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
1742 #define NPC_MAX_INTF 2
1743 #define NPC_MAX_LID  8
1744 #define NPC_MAX_LT   16
1745 #define NPC_MAX_LD   2
1746 #define NPC_MAX_LFL  16
1747 	/* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
1748 	uint64_t __io kex_ld_flags[NPC_MAX_LD];
1749 	/* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
1750 	uint64_t __io intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
1751 				    [NPC_MAX_LD];
1752 	/* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
1753 	uint64_t __io intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
1754 #define MKEX_NAME_LEN 128
1755 	uint8_t __io mkex_pfl_name[MKEX_NAME_LEN];
1756 };
1757 
1758 enum header_fields {
1759 	NPC_DMAC,
1760 	NPC_SMAC,
1761 	NPC_ETYPE,
1762 	NPC_OUTER_VID,
1763 	NPC_TOS,
1764 	NPC_SIP_IPV4,
1765 	NPC_DIP_IPV4,
1766 	NPC_SIP_IPV6,
1767 	NPC_DIP_IPV6,
1768 	NPC_SPORT_TCP,
1769 	NPC_DPORT_TCP,
1770 	NPC_SPORT_UDP,
1771 	NPC_DPORT_UDP,
1772 	NPC_FDSA_VAL,
1773 	NPC_HEADER_FIELDS_MAX,
1774 };
1775 
1776 struct flow_msg {
1777 	unsigned char __io dmac[6];
1778 	unsigned char __io smac[6];
1779 	uint16_t __io etype;
1780 	uint16_t __io vlan_etype;
1781 	uint16_t __io vlan_tci;
1782 	union {
1783 		uint32_t __io ip4src;
1784 		uint32_t __io ip6src[4];
1785 	};
1786 	union {
1787 		uint32_t __io ip4dst;
1788 		uint32_t __io ip6dst[4];
1789 	};
1790 	uint8_t __io tos;
1791 	uint8_t __io ip_ver;
1792 	uint8_t __io ip_proto;
1793 	uint8_t __io tc;
1794 	uint16_t __io sport;
1795 	uint16_t __io dport;
1796 };
1797 
1798 struct npc_install_flow_req {
1799 	struct mbox_msghdr hdr;
1800 	struct flow_msg packet;
1801 	struct flow_msg mask;
1802 	uint64_t __io features;
1803 	uint16_t __io entry;
1804 	uint16_t __io channel;
1805 	uint8_t __io intf;
1806 	uint8_t __io set_cntr;
1807 	uint8_t __io default_rule;
1808 	/* Overwrite(0) or append(1) flow to default rule? */
1809 	uint8_t __io append;
1810 	uint16_t __io vf;
1811 	/* action */
1812 	uint32_t __io index;
1813 	uint16_t __io match_id;
1814 	uint8_t __io flow_key_alg;
1815 	uint8_t __io op;
1816 	/* vtag action */
1817 	uint8_t __io vtag0_type;
1818 	uint8_t __io vtag0_valid;
1819 	uint8_t __io vtag1_type;
1820 	uint8_t __io vtag1_valid;
1821 
1822 	/* vtag tx action */
1823 	uint16_t __io vtag0_def;
1824 	uint8_t __io vtag0_op;
1825 	uint16_t __io vtag1_def;
1826 	uint8_t __io vtag1_op;
1827 };
1828 
1829 struct npc_install_flow_rsp {
1830 	struct mbox_msghdr hdr;
1831 	/* Negative if no counter else counter number */
1832 	int __io counter;
1833 };
1834 
1835 struct npc_delete_flow_req {
1836 	struct mbox_msghdr hdr;
1837 	uint16_t __io entry;
1838 	uint16_t __io start; /*Disable range of entries */
1839 	uint16_t __io end;
1840 	uint8_t __io all; /* PF + VFs */
1841 };
1842 
1843 struct npc_mcam_read_entry_req {
1844 	struct mbox_msghdr hdr;
1845 	/* MCAM entry to read */
1846 	uint16_t __io entry;
1847 };
1848 
1849 struct npc_mcam_read_entry_rsp {
1850 	struct mbox_msghdr hdr;
1851 	struct mcam_entry entry_data;
1852 	uint8_t __io intf;
1853 	uint8_t __io enable;
1854 };
1855 
1856 struct npc_mcam_read_base_rule_rsp {
1857 	struct mbox_msghdr hdr;
1858 	struct mcam_entry entry_data;
1859 };
1860 
1861 struct npc_mcam_get_stats_req {
1862 	struct mbox_msghdr hdr;
1863 	uint16_t __io entry; /* mcam entry */
1864 };
1865 
1866 struct npc_mcam_get_stats_rsp {
1867 	struct mbox_msghdr hdr;
1868 	uint64_t __io stat;  /* counter stats */
1869 	uint8_t __io stat_ena; /* enabled */
1870 };
1871 
1872 /* TIM mailbox error codes
1873  * Range 801 - 900.
1874  */
1875 enum tim_af_status {
1876 	TIM_AF_NO_RINGS_LEFT = -801,
1877 	TIM_AF_INVALID_NPA_PF_FUNC = -802,
1878 	TIM_AF_INVALID_SSO_PF_FUNC = -803,
1879 	TIM_AF_RING_STILL_RUNNING = -804,
1880 	TIM_AF_LF_INVALID = -805,
1881 	TIM_AF_CSIZE_NOT_ALIGNED = -806,
1882 	TIM_AF_CSIZE_TOO_SMALL = -807,
1883 	TIM_AF_CSIZE_TOO_BIG = -808,
1884 	TIM_AF_INTERVAL_TOO_SMALL = -809,
1885 	TIM_AF_INVALID_BIG_ENDIAN_VALUE = -810,
1886 	TIM_AF_INVALID_CLOCK_SOURCE = -811,
1887 	TIM_AF_GPIO_CLK_SRC_NOT_ENABLED = -812,
1888 	TIM_AF_INVALID_BSIZE = -813,
1889 	TIM_AF_INVALID_ENABLE_PERIODIC = -814,
1890 	TIM_AF_INVALID_ENABLE_DONTFREE = -815,
1891 	TIM_AF_ENA_DONTFRE_NSET_PERIODIC = -816,
1892 	TIM_AF_RING_ALREADY_DISABLED = -817,
1893 };
1894 
1895 enum tim_clk_srcs {
1896 	TIM_CLK_SRCS_TENNS = 0,
1897 	TIM_CLK_SRCS_GPIO = 1,
1898 	TIM_CLK_SRCS_GTI = 2,
1899 	TIM_CLK_SRCS_PTP = 3,
1900 	TIM_CLK_SRSC_INVALID,
1901 };
1902 
1903 enum tim_gpio_edge {
1904 	TIM_GPIO_NO_EDGE = 0,
1905 	TIM_GPIO_LTOH_TRANS = 1,
1906 	TIM_GPIO_HTOL_TRANS = 2,
1907 	TIM_GPIO_BOTH_TRANS = 3,
1908 	TIM_GPIO_INVALID,
1909 };
1910 
1911 enum ptp_op {
1912 	PTP_OP_ADJFINE = 0,   /* adjfine(req.scaled_ppm); */
1913 	PTP_OP_GET_CLOCK = 1, /* rsp.clk = get_clock() */
1914 };
1915 
1916 struct ptp_req {
1917 	struct mbox_msghdr hdr;
1918 	uint8_t __io op;
1919 	int64_t __io scaled_ppm;
1920 	uint8_t __io is_pmu;
1921 };
1922 
1923 struct ptp_rsp {
1924 	struct mbox_msghdr hdr;
1925 	uint64_t __io clk;
1926 	uint64_t __io tsc;
1927 };
1928 
1929 struct get_hw_cap_rsp {
1930 	struct mbox_msghdr hdr;
1931 	/* Schq mapping fixed or flexible */
1932 	uint8_t __io nix_fixed_txschq_mapping;
1933 	uint8_t __io nix_shaping; /* Is shaping and coloring supported */
1934 };
1935 
1936 struct ndc_sync_op {
1937 	struct mbox_msghdr hdr;
1938 	uint8_t __io nix_lf_tx_sync;
1939 	uint8_t __io nix_lf_rx_sync;
1940 	uint8_t __io npa_lf_sync;
1941 };
1942 
1943 struct tim_lf_alloc_req {
1944 	struct mbox_msghdr hdr;
1945 	uint16_t __io ring;
1946 	uint16_t __io npa_pf_func;
1947 	uint16_t __io sso_pf_func;
1948 };
1949 
1950 struct tim_ring_req {
1951 	struct mbox_msghdr hdr;
1952 	uint16_t __io ring;
1953 };
1954 
1955 struct tim_config_req {
1956 	struct mbox_msghdr hdr;
1957 	uint16_t __io ring;
1958 	uint8_t __io bigendian;
1959 	uint8_t __io clocksource;
1960 	uint8_t __io enableperiodic;
1961 	uint8_t __io enabledontfreebuffer;
1962 	uint32_t __io bucketsize;
1963 	uint32_t __io chunksize;
1964 	uint32_t __io interval;
1965 	uint8_t __io gpioedge;
1966 	uint8_t __io rsvd[7];
1967 	uint64_t __io intervalns;
1968 	uint64_t __io clockfreq;
1969 };
1970 
1971 struct tim_lf_alloc_rsp {
1972 	struct mbox_msghdr hdr;
1973 	uint64_t __io tenns_clk;
1974 };
1975 
1976 struct tim_enable_rsp {
1977 	struct mbox_msghdr hdr;
1978 	uint64_t __io timestarted;
1979 	uint32_t __io currentbucket;
1980 };
1981 
1982 struct tim_intvl_req {
1983 	struct mbox_msghdr hdr;
1984 	uint8_t __io clocksource;
1985 	uint64_t __io clockfreq;
1986 };
1987 
1988 struct tim_intvl_rsp {
1989 	struct mbox_msghdr hdr;
1990 	uint64_t __io intvl_cyc;
1991 	uint64_t __io intvl_ns;
1992 };
1993 
1994 struct sdp_node_info {
1995 	/* Node to which this PF belons to */
1996 	uint8_t __io node_id;
1997 	uint8_t __io max_vfs;
1998 	uint8_t __io num_pf_rings;
1999 	uint8_t __io pf_srn;
2000 #define SDP_MAX_VFS	128
2001 	uint8_t __io vf_rings[SDP_MAX_VFS];
2002 };
2003 
2004 struct sdp_chan_info_msg {
2005 	struct mbox_msghdr hdr;
2006 	struct sdp_node_info info;
2007 };
2008 
2009 #endif /* __ROC_MBOX_H__ */
2010