1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #ifndef _ROC_NIX_PRIV_H_
6 #define _ROC_NIX_PRIV_H_
7
8 /* Constants */
9 #define NIX_CQ_ENTRY_SZ 128
10 #define NIX_CQ_ENTRY64_SZ 512
11 #define NIX_CQ_ALIGN ((uint16_t)512)
12 #define NIX_MAX_SQB ((uint16_t)512)
13 #define NIX_DEF_SQB ((uint16_t)16)
14 #define NIX_MIN_SQB ((uint16_t)8)
15 #define NIX_SQB_LIST_SPACE ((uint16_t)2)
16 #define NIX_SQB_LOWER_THRESH ((uint16_t)70)
17
18 /* Apply BP/DROP when CQ is 95% full */
19 #define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
20 #define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
21 #define NIX_RQ_AURA_THRESH(x) (((x)*95) / 100)
22
23 /* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */
24 #define CQ_CQE_THRESH_DEFAULT 0x1ULL
25 #define CQ_TIMER_THRESH_DEFAULT 0xAULL /* ~1usec i.e (0xA * 100nsec) */
26 #define CQ_TIMER_THRESH_MAX 255
27
28 struct nix_qint {
29 struct nix *nix;
30 uint8_t qintx;
31 };
32
33 /* Traffic Manager */
34 #define NIX_TM_MAX_HW_TXSCHQ 1024
35 #define NIX_TM_HW_ID_INVALID UINT32_MAX
36 #define NIX_TM_CHAN_INVALID UINT16_MAX
37
38 /* TM flags */
39 #define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
40 #define NIX_TM_TL1_NO_SP BIT_ULL(1)
41 #define NIX_TM_TL1_ACCESS BIT_ULL(2)
42 #define NIX_TM_MARK_VLAN_DEI_EN BIT_ULL(3)
43 #define NIX_TM_MARK_IP_DSCP_EN BIT_ULL(4)
44 #define NIX_TM_MARK_IP_ECN_EN BIT_ULL(5)
45
46 #define NIX_TM_MARK_EN_MASK \
47 (NIX_TM_MARK_IP_DSCP_EN | NIX_TM_MARK_IP_ECN_EN | \
48 NIX_TM_MARK_VLAN_DEI_EN)
49
50 #define NIX_TM_MARK_VLAN_DEI_SHIFT 0 /* Leave 16b for VLAN for FP logic */
51 #define NIX_TM_MARK_IPV4_DSCP_SHIFT 16
52 #define NIX_TM_MARK_IPV6_DSCP_SHIFT 24
53 #define NIX_TM_MARK_IPV4_ECN_SHIFT 32
54 #define NIX_TM_MARK_IPV6_ECN_SHIFT 40
55
56 struct nix_tm_tb {
57 /** Token bucket rate (bytes per second) */
58 uint64_t rate;
59
60 /** Token bucket size (bytes), a.k.a. max burst size */
61 uint64_t size;
62 };
63
64 struct nix_tm_node {
65 TAILQ_ENTRY(nix_tm_node) node;
66
67 /* Input params */
68 enum roc_nix_tm_tree tree;
69 uint32_t id;
70 uint32_t priority;
71 uint32_t weight;
72 uint16_t lvl;
73 uint16_t rel_chan;
74 uint32_t parent_id;
75 uint32_t shaper_profile_id;
76 void (*free_fn)(void *node);
77
78 /* Derived params */
79 uint32_t hw_id;
80 uint16_t hw_lvl;
81 uint32_t rr_prio;
82 uint32_t rr_num;
83 uint32_t max_prio;
84 uint32_t parent_hw_id;
85 uint32_t flags : 16;
86 #define NIX_TM_NODE_HWRES BIT_ULL(0)
87 #define NIX_TM_NODE_ENABLED BIT_ULL(1)
88 /* Shaper algorithm for RED state @NIX_REDALG_E */
89 uint32_t red_algo : 2;
90 uint32_t pkt_mode : 1;
91 uint32_t pkt_mode_set : 1;
92 uint32_t bp_capa : 1;
93
94 bool child_realloc;
95 struct nix_tm_node *parent;
96
97 /* Non-leaf node sp count */
98 uint32_t n_sp_priorities;
99
100 /* Last stats */
101 uint64_t last_pkts;
102 uint64_t last_bytes;
103 };
104
105 struct nix_tm_shaper_profile {
106 TAILQ_ENTRY(nix_tm_shaper_profile) shaper;
107 struct nix_tm_tb commit;
108 struct nix_tm_tb peak;
109 int32_t pkt_len_adj;
110 int32_t pkt_mode_adj;
111 bool pkt_mode;
112 uint32_t id;
113 int8_t accuracy;
114 void (*free_fn)(void *profile);
115
116 uint32_t ref_cnt;
117 };
118
119 TAILQ_HEAD(nix_tm_node_list, nix_tm_node);
120 TAILQ_HEAD(nix_tm_shaper_profile_list, nix_tm_shaper_profile);
121
122 struct nix {
123 uint16_t reta[ROC_NIX_RSS_GRPS][ROC_NIX_RSS_RETA_MAX];
124 enum roc_nix_rss_reta_sz reta_sz;
125 struct plt_pci_device *pci_dev;
126 uint16_t bpid[NIX_MAX_CHAN];
127 struct nix_qint *qints_mem;
128 struct nix_qint *cints_mem;
129 uint8_t configured_qints;
130 uint8_t configured_cints;
131 struct roc_nix_sq **sqs;
132 uint16_t vwqe_interval;
133 uint16_t tx_chan_base;
134 uint16_t rx_chan_base;
135 uint16_t nb_rx_queues;
136 uint16_t nb_tx_queues;
137 uint8_t lso_tsov6_idx;
138 uint8_t lso_tsov4_idx;
139 uint8_t lso_udp_tun_idx[ROC_NIX_LSO_TUN_MAX];
140 uint8_t lso_tun_idx[ROC_NIX_LSO_TUN_MAX];
141 uint8_t lf_rx_stats;
142 uint8_t lf_tx_stats;
143 uint8_t rx_chan_cnt;
144 uint8_t rss_alg_idx;
145 uint8_t tx_chan_cnt;
146 uintptr_t lmt_base;
147 uint8_t cgx_links;
148 uint8_t lbk_links;
149 uint8_t sdp_links;
150 uint8_t tx_link;
151 uint16_t sqb_size;
152 /* Without FCS, with L2 overhead */
153 uint16_t mtu;
154 uint16_t chan_cnt;
155 uint16_t msixoff;
156 uint8_t rx_pause;
157 uint8_t tx_pause;
158 uint16_t cev;
159 uint64_t rx_cfg;
160 struct dev dev;
161 uint16_t cints;
162 uint16_t qints;
163 uintptr_t base;
164 bool sdp_link;
165 bool lbk_link;
166 bool ptp_en;
167 bool is_nix1;
168
169 /* Traffic manager info */
170
171 /* Contiguous resources per lvl */
172 struct plt_bitmap *schq_contig_bmp[NIX_TXSCH_LVL_CNT];
173 /* Dis-contiguous resources per lvl */
174 struct plt_bitmap *schq_bmp[NIX_TXSCH_LVL_CNT];
175 void *schq_bmp_mem;
176
177 struct nix_tm_shaper_profile_list shaper_profile_list;
178 struct nix_tm_node_list trees[ROC_NIX_TM_TREE_MAX];
179 enum roc_nix_tm_tree tm_tree;
180 uint64_t tm_rate_min;
181 uint16_t tm_root_lvl;
182 uint16_t tm_flags;
183 uint16_t tm_link_cfg_lvl;
184 uint8_t tm_aggr_lvl_rr_prio;
185 uint16_t contig_rsvd[NIX_TXSCH_LVL_CNT];
186 uint16_t discontig_rsvd[NIX_TXSCH_LVL_CNT];
187 uint64_t tm_markfmt_en;
188 uint8_t tm_markfmt_null;
189 uint8_t tm_markfmt[ROC_NIX_TM_MARK_MAX][ROC_NIX_TM_MARK_COLOR_MAX];
190
191 /* Ipsec info */
192 uint16_t cpt_msixoff[MAX_RVU_BLKLF_CNT];
193 bool inl_inb_ena;
194 bool inl_outb_ena;
195 void *inb_sa_base;
196 size_t inb_sa_sz;
197 uint32_t inb_spi_mask;
198 void *outb_sa_base;
199 size_t outb_sa_sz;
200 uint16_t outb_err_sso_pffunc;
201 struct roc_cpt_lf *cpt_lf_base;
202 uint16_t nb_cpt_lf;
203 uint16_t outb_se_ring_cnt;
204 uint16_t outb_se_ring_base;
205 /* Mode provided by driver */
206 bool inb_inl_dev;
207
208 } __plt_cache_aligned;
209
210 enum nix_err_status {
211 NIX_ERR_PARAM = -2048,
212 NIX_ERR_NO_MEM,
213 NIX_ERR_INVALID_RANGE,
214 NIX_ERR_INTERNAL,
215 NIX_ERR_OP_NOTSUP,
216 NIX_ERR_HW_NOTSUP,
217 NIX_ERR_QUEUE_INVALID_RANGE,
218 NIX_ERR_AQ_READ_FAILED,
219 NIX_ERR_AQ_WRITE_FAILED,
220 NIX_ERR_TM_LEAF_NODE_GET,
221 NIX_ERR_TM_INVALID_LVL,
222 NIX_ERR_TM_INVALID_PRIO,
223 NIX_ERR_TM_INVALID_PARENT,
224 NIX_ERR_TM_NODE_EXISTS,
225 NIX_ERR_TM_INVALID_NODE,
226 NIX_ERR_TM_INVALID_SHAPER_PROFILE,
227 NIX_ERR_TM_PKT_MODE_MISMATCH,
228 NIX_ERR_TM_WEIGHT_EXCEED,
229 NIX_ERR_TM_CHILD_EXISTS,
230 NIX_ERR_TM_INVALID_PEAK_SZ,
231 NIX_ERR_TM_INVALID_PEAK_RATE,
232 NIX_ERR_TM_INVALID_COMMIT_SZ,
233 NIX_ERR_TM_INVALID_COMMIT_RATE,
234 NIX_ERR_TM_SHAPER_PROFILE_IN_USE,
235 NIX_ERR_TM_SHAPER_PROFILE_EXISTS,
236 NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST,
237 NIX_ERR_TM_INVALID_TREE,
238 NIX_ERR_TM_PARENT_PRIO_UPDATE,
239 NIX_ERR_TM_PRIO_EXCEEDED,
240 NIX_ERR_TM_PRIO_ORDER,
241 NIX_ERR_TM_MULTIPLE_RR_GROUPS,
242 NIX_ERR_TM_SQ_UPDATE_FAIL,
243 NIX_ERR_NDC_SYNC,
244 };
245
246 enum nix_q_size {
247 nix_q_size_16, /* 16 entries */
248 nix_q_size_64, /* 64 entries */
249 nix_q_size_256,
250 nix_q_size_1K,
251 nix_q_size_4K,
252 nix_q_size_16K,
253 nix_q_size_64K,
254 nix_q_size_256K,
255 nix_q_size_1M, /* Million entries */
256 nix_q_size_max
257 };
258
259 static inline struct nix *
roc_nix_to_nix_priv(struct roc_nix * roc_nix)260 roc_nix_to_nix_priv(struct roc_nix *roc_nix)
261 {
262 return (struct nix *)&roc_nix->reserved[0];
263 }
264
265 static inline struct roc_nix *
nix_priv_to_roc_nix(struct nix * nix)266 nix_priv_to_roc_nix(struct nix *nix)
267 {
268 return (struct roc_nix *)((char *)nix -
269 offsetof(struct roc_nix, reserved));
270 }
271
272 /* IRQ */
273 int nix_register_irqs(struct nix *nix);
274 void nix_unregister_irqs(struct nix *nix);
275
276 /* TM */
277 #define NIX_TM_TREE_MASK_ALL \
278 (BIT(ROC_NIX_TM_DEFAULT) | BIT(ROC_NIX_TM_RLIMIT) | \
279 BIT(ROC_NIX_TM_USER))
280
281 /* NIX_MAX_HW_FRS ==
282 * NIX_TM_DFLT_RR_WT * NIX_TM_RR_QUANTUM_MAX / ROC_NIX_TM_MAX_SCHED_WT
283 */
284 #define NIX_TM_DFLT_RR_WT 71
285
286 /* Default TL1 priority and Quantum from AF */
287 #define NIX_TM_TL1_DFLT_RR_QTM ((1 << 24) - 1)
288
289 struct nix_tm_shaper_data {
290 uint64_t burst_exponent;
291 uint64_t burst_mantissa;
292 uint64_t div_exp;
293 uint64_t exponent;
294 uint64_t mantissa;
295 uint64_t burst;
296 uint64_t rate;
297 };
298
299 static inline uint64_t
nix_tm_weight_to_rr_quantum(uint64_t weight)300 nix_tm_weight_to_rr_quantum(uint64_t weight)
301 {
302 uint64_t max = NIX_CN9K_TM_RR_QUANTUM_MAX;
303
304 /* From CN10K onwards, we only configure RR weight */
305 if (!roc_model_is_cn9k())
306 return weight;
307
308 weight &= (uint64_t)max;
309 return (weight * max) / ROC_NIX_CN9K_TM_RR_WEIGHT_MAX;
310 }
311
312 static inline bool
nix_tm_have_tl1_access(struct nix * nix)313 nix_tm_have_tl1_access(struct nix *nix)
314 {
315 return !!(nix->tm_flags & NIX_TM_TL1_ACCESS);
316 }
317
318 static inline bool
nix_tm_is_leaf(struct nix * nix,int lvl)319 nix_tm_is_leaf(struct nix *nix, int lvl)
320 {
321 if (nix_tm_have_tl1_access(nix))
322 return (lvl == ROC_TM_LVL_QUEUE);
323 return (lvl == ROC_TM_LVL_SCH4);
324 }
325
326 static inline struct nix_tm_node_list *
nix_tm_node_list(struct nix * nix,enum roc_nix_tm_tree tree)327 nix_tm_node_list(struct nix *nix, enum roc_nix_tm_tree tree)
328 {
329 return &nix->trees[tree];
330 }
331
332 static inline const char *
nix_tm_hwlvl2str(uint32_t hw_lvl)333 nix_tm_hwlvl2str(uint32_t hw_lvl)
334 {
335 switch (hw_lvl) {
336 case NIX_TXSCH_LVL_MDQ:
337 return "SMQ/MDQ";
338 case NIX_TXSCH_LVL_TL4:
339 return "TL4";
340 case NIX_TXSCH_LVL_TL3:
341 return "TL3";
342 case NIX_TXSCH_LVL_TL2:
343 return "TL2";
344 case NIX_TXSCH_LVL_TL1:
345 return "TL1";
346 default:
347 break;
348 }
349
350 return "???";
351 }
352
353 static inline const char *
nix_tm_tree2str(enum roc_nix_tm_tree tree)354 nix_tm_tree2str(enum roc_nix_tm_tree tree)
355 {
356 if (tree == ROC_NIX_TM_DEFAULT)
357 return "Default Tree";
358 else if (tree == ROC_NIX_TM_RLIMIT)
359 return "Rate Limit Tree";
360 else if (tree == ROC_NIX_TM_USER)
361 return "User Tree";
362 return "???";
363 }
364
365 /*
366 * TM priv ops.
367 */
368
369 int nix_tm_conf_init(struct roc_nix *roc_nix);
370 void nix_tm_conf_fini(struct roc_nix *roc_nix);
371 int nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
372 uint16_t *smq);
373 int nix_tm_sq_flush_pre(struct roc_nix_sq *sq);
374 int nix_tm_sq_flush_post(struct roc_nix_sq *sq);
375 int nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable);
376 int nix_tm_prepare_default_tree(struct roc_nix *roc_nix);
377 int nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node);
378 int nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
379 enum roc_nix_tm_tree tree, bool free);
380 int nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node);
381 int nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask,
382 bool hw_only);
383 int nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node);
384 void nix_tm_clear_shaper_profiles(struct nix *nix);
385 int nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree);
386 int nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree);
387 int nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
388 bool above_thresh);
389 void nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp);
390
391 int nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree);
392 int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);
393 int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
394 bool rr_quantum_only);
395
396 int nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
397 bool cfg, bool ena);
398 int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
399 bool ena);
400 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
401 int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
402 int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
403 bool enable);
404 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
405 int nix_tm_mark_init(struct nix *nix);
406
407 /*
408 * TM priv utils.
409 */
410 uint16_t nix_tm_lvl2nix(struct nix *nix, uint32_t lvl);
411 uint16_t nix_tm_lvl2nix_tl1_root(uint32_t lvl);
412 uint16_t nix_tm_lvl2nix_tl2_root(uint32_t lvl);
413 uint16_t nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig);
414 int nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id,
415 uint32_t priority, enum roc_nix_tm_tree tree);
416 struct nix_tm_node *nix_tm_node_search(struct nix *nix, uint32_t node_id,
417 enum roc_nix_tm_tree tree);
418 struct nix_tm_shaper_profile *nix_tm_shaper_profile_search(struct nix *nix,
419 uint32_t id);
420 uint8_t nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
421 volatile uint64_t *reg, volatile uint64_t *regval);
422 uint32_t nix_tm_check_rr(struct nix *nix, uint32_t parent_id,
423 enum roc_nix_tm_tree tree, uint32_t *rr_prio,
424 uint32_t *max_prio);
425 uint64_t nix_tm_shaper_profile_rate_min(struct nix *nix);
426 uint64_t nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p,
427 uint64_t *mantissa_p, uint64_t *div_exp_p,
428 int8_t accuracy);
429 uint64_t nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
430 uint64_t *mantissa_p);
431 bool nix_tm_child_res_valid(struct nix_tm_node_list *list,
432 struct nix_tm_node *parent);
433 uint16_t nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig,
434 uint16_t *schq, enum roc_nix_tm_tree tree);
435 uint8_t nix_tm_tl1_default_prep(struct nix *nix, uint32_t schq,
436 volatile uint64_t *reg,
437 volatile uint64_t *regval);
438 uint8_t nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
439 volatile uint64_t *reg,
440 volatile uint64_t *regval,
441 volatile uint64_t *regval_mask);
442 uint8_t nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
443 volatile uint64_t *reg,
444 volatile uint64_t *regval);
445 uint8_t nix_tm_shaper_reg_prep(struct nix_tm_node *node,
446 struct nix_tm_shaper_profile *profile,
447 volatile uint64_t *reg,
448 volatile uint64_t *regval);
449 struct nix_tm_node *nix_tm_node_alloc(void);
450 void nix_tm_node_free(struct nix_tm_node *node);
451 struct nix_tm_shaper_profile *nix_tm_shaper_profile_alloc(void);
452 void nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile);
453
454 uint64_t nix_get_blkaddr(struct dev *dev);
455 void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx);
456 int nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data);
457 int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data,
458 uint8_t lf_tx_stats, uint8_t lf_rx_stats);
459 int nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
460 uint16_t cints);
461 int nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid,
462 __io void **ctx_p);
463
464 /*
465 * Telemetry
466 */
467 int nix_tel_node_add(struct roc_nix *roc_nix);
468 void nix_tel_node_del(struct roc_nix *roc_nix);
469 int nix_tel_node_add_rq(struct roc_nix_rq *rq);
470 int nix_tel_node_add_cq(struct roc_nix_cq *cq);
471 int nix_tel_node_add_sq(struct roc_nix_sq *sq);
472
473 #endif /* _ROC_NIX_PRIV_H_ */
474