1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <math.h> 6 7 #include "roc_api.h" 8 #include "roc_priv.h" 9 10 static inline uint32_t 11 nix_qsize_to_val(enum nix_q_size qsize) 12 { 13 return (16UL << (qsize * 2)); 14 } 15 16 static inline enum nix_q_size 17 nix_qsize_clampup(uint32_t val) 18 { 19 int i = nix_q_size_16; 20 21 for (; i < nix_q_size_max; i++) 22 if (val <= nix_qsize_to_val(i)) 23 break; 24 25 if (i >= nix_q_size_max) 26 i = nix_q_size_max - 1; 27 28 return i; 29 } 30 31 void 32 nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval) 33 { 34 uint64_t wait_ns; 35 36 if (!roc_model_is_cn10k()) 37 return; 38 /* Due to HW errata writes to VWQE_FLUSH might hang, so instead 39 * wait for max vwqe timeout interval. 40 */ 41 if (rq->vwqe_ena) { 42 wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100; 43 plt_delay_us((wait_ns / 1E3) + 1); 44 } 45 } 46 47 int 48 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) 49 { 50 struct mbox *mbox = dev->mbox; 51 52 /* Pkts will be dropped silently if RQ is disabled */ 53 if (roc_model_is_cn9k()) { 54 struct nix_aq_enq_req *aq; 55 56 aq = mbox_alloc_msg_nix_aq_enq(mbox); 57 if (!aq) 58 return -ENOSPC; 59 60 aq->qidx = rq->qid; 61 aq->ctype = NIX_AQ_CTYPE_RQ; 62 aq->op = NIX_AQ_INSTOP_WRITE; 63 64 aq->rq.ena = enable; 65 aq->rq_mask.ena = ~(aq->rq_mask.ena); 66 } else { 67 struct nix_cn10k_aq_enq_req *aq; 68 69 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 70 if (!aq) 71 return -ENOSPC; 72 73 aq->qidx = rq->qid; 74 aq->ctype = NIX_AQ_CTYPE_RQ; 75 aq->op = NIX_AQ_INSTOP_WRITE; 76 77 aq->rq.ena = enable; 78 aq->rq_mask.ena = ~(aq->rq_mask.ena); 79 } 80 81 return mbox_process(mbox); 82 } 83 84 int 85 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) 86 { 87 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); 88 int rc; 89 90 rc = nix_rq_ena_dis(&nix->dev, rq, enable); 91 nix_rq_vwqe_flush(rq, nix->vwqe_interval); 92 93 return rc; 94 } 95 96 int 97 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, 98 bool cfg, bool ena) 99 { 100 struct mbox *mbox = dev->mbox; 101 struct nix_aq_enq_req *aq; 102 103 aq = mbox_alloc_msg_nix_aq_enq(mbox); 104 if (!aq) 105 return -ENOSPC; 106 107 aq->qidx = rq->qid; 108 aq->ctype = NIX_AQ_CTYPE_RQ; 109 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 110 111 if (rq->sso_ena) { 112 /* SSO mode */ 113 aq->rq.sso_ena = 1; 114 aq->rq.sso_tt = rq->tt; 115 aq->rq.sso_grp = rq->hwgrp; 116 aq->rq.ena_wqwd = 1; 117 aq->rq.wqe_skip = rq->wqe_skip; 118 aq->rq.wqe_caching = 1; 119 120 aq->rq.good_utag = rq->tag_mask >> 24; 121 aq->rq.bad_utag = rq->tag_mask >> 24; 122 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 123 } else { 124 /* CQ mode */ 125 aq->rq.sso_ena = 0; 126 aq->rq.good_utag = rq->tag_mask >> 24; 127 aq->rq.bad_utag = rq->tag_mask >> 24; 128 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 129 aq->rq.cq = rq->qid; 130 } 131 132 if (rq->ipsech_ena) 133 aq->rq.ipsech_ena = 1; 134 135 aq->rq.spb_ena = 0; 136 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 137 138 /* Sizes must be aligned to 8 bytes */ 139 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 140 return -EINVAL; 141 142 /* Expressed in number of dwords */ 143 aq->rq.first_skip = rq->first_skip / 8; 144 aq->rq.later_skip = rq->later_skip / 8; 145 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 146 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 147 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 148 aq->rq.ena = ena; 149 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 150 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 151 aq->rq.rq_int_ena = 0; 152 /* Many to one reduction */ 153 aq->rq.qint_idx = rq->qid % qints; 154 aq->rq.xqe_drop_ena = 1; 155 156 /* If RED enabled, then fill enable for all cases */ 157 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 158 aq->rq.spb_pool_pass = rq->spb_red_pass; 159 aq->rq.lpb_pool_pass = rq->red_pass; 160 161 aq->rq.spb_pool_drop = rq->spb_red_drop; 162 aq->rq.lpb_pool_drop = rq->red_drop; 163 } 164 165 if (cfg) { 166 if (rq->sso_ena) { 167 /* SSO mode */ 168 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 169 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 170 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 171 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 172 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 173 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 174 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 175 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 176 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 177 } else { 178 /* CQ mode */ 179 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 180 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 181 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 182 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 183 aq->rq_mask.cq = ~aq->rq_mask.cq; 184 } 185 186 if (rq->ipsech_ena) 187 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 188 189 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 190 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 191 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 192 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 193 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 194 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 195 aq->rq_mask.ena = ~aq->rq_mask.ena; 196 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 197 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 198 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 199 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 200 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 201 202 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 203 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 204 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 205 206 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 207 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 208 } 209 } 210 211 return 0; 212 } 213 214 int 215 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, 216 bool ena) 217 { 218 struct nix_cn10k_aq_enq_req *aq; 219 struct mbox *mbox = dev->mbox; 220 221 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 222 if (!aq) 223 return -ENOSPC; 224 225 aq->qidx = rq->qid; 226 aq->ctype = NIX_AQ_CTYPE_RQ; 227 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 228 229 if (rq->sso_ena) { 230 /* SSO mode */ 231 aq->rq.sso_ena = 1; 232 aq->rq.sso_tt = rq->tt; 233 aq->rq.sso_grp = rq->hwgrp; 234 aq->rq.ena_wqwd = 1; 235 aq->rq.wqe_skip = rq->wqe_skip; 236 aq->rq.wqe_caching = 1; 237 238 aq->rq.good_utag = rq->tag_mask >> 24; 239 aq->rq.bad_utag = rq->tag_mask >> 24; 240 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 241 242 if (rq->vwqe_ena) { 243 aq->rq.vwqe_ena = true; 244 aq->rq.vwqe_skip = rq->vwqe_first_skip; 245 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */ 246 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2; 247 aq->rq.vtime_wait = rq->vwqe_wait_tmo; 248 aq->rq.wqe_aura = rq->vwqe_aura_handle; 249 } 250 } else { 251 /* CQ mode */ 252 aq->rq.sso_ena = 0; 253 aq->rq.good_utag = rq->tag_mask >> 24; 254 aq->rq.bad_utag = rq->tag_mask >> 24; 255 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 256 aq->rq.cq = rq->qid; 257 } 258 259 if (rq->ipsech_ena) { 260 aq->rq.ipsech_ena = 1; 261 aq->rq.ipsecd_drop_en = 1; 262 } 263 264 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 265 266 /* Sizes must be aligned to 8 bytes */ 267 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 268 return -EINVAL; 269 270 /* Expressed in number of dwords */ 271 aq->rq.first_skip = rq->first_skip / 8; 272 aq->rq.later_skip = rq->later_skip / 8; 273 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 274 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 275 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 276 aq->rq.ena = ena; 277 278 if (rq->spb_ena) { 279 uint32_t spb_sizem1; 280 281 aq->rq.spb_ena = 1; 282 aq->rq.spb_aura = 283 roc_npa_aura_handle_to_aura(rq->spb_aura_handle); 284 285 if (rq->spb_size & 0x7 || 286 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE) 287 return -EINVAL; 288 289 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */ 290 spb_sizem1 -= 1; /* Expressed in size minus one */ 291 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F; 292 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7; 293 } else { 294 aq->rq.spb_ena = 0; 295 } 296 297 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 298 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 299 aq->rq.rq_int_ena = 0; 300 /* Many to one reduction */ 301 aq->rq.qint_idx = rq->qid % qints; 302 aq->rq.xqe_drop_ena = 0; 303 aq->rq.lpb_drop_ena = rq->lpb_drop_ena; 304 aq->rq.spb_drop_ena = rq->spb_drop_ena; 305 306 /* If RED enabled, then fill enable for all cases */ 307 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 308 aq->rq.spb_pool_pass = rq->spb_red_pass; 309 aq->rq.lpb_pool_pass = rq->red_pass; 310 aq->rq.wqe_pool_pass = rq->red_pass; 311 aq->rq.xqe_pass = rq->red_pass; 312 313 aq->rq.spb_pool_drop = rq->spb_red_drop; 314 aq->rq.lpb_pool_drop = rq->red_drop; 315 aq->rq.wqe_pool_drop = rq->red_drop; 316 aq->rq.xqe_drop = rq->red_drop; 317 } 318 319 if (cfg) { 320 if (rq->sso_ena) { 321 /* SSO mode */ 322 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 323 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 324 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 325 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 326 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 327 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 328 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 329 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 330 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 331 if (rq->vwqe_ena) { 332 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena; 333 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip; 334 aq->rq_mask.max_vsize_exp = 335 ~aq->rq_mask.max_vsize_exp; 336 aq->rq_mask.vtime_wait = 337 ~aq->rq_mask.vtime_wait; 338 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura; 339 } 340 } else { 341 /* CQ mode */ 342 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 343 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 344 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 345 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 346 aq->rq_mask.cq = ~aq->rq_mask.cq; 347 } 348 349 if (rq->ipsech_ena) 350 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 351 352 if (rq->spb_ena) { 353 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura; 354 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1; 355 aq->rq_mask.spb_high_sizem1 = 356 ~aq->rq_mask.spb_high_sizem1; 357 } 358 359 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 360 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 361 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 362 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 363 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 364 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 365 aq->rq_mask.ena = ~aq->rq_mask.ena; 366 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 367 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 368 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 369 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 370 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 371 aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena; 372 aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena; 373 374 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 375 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 376 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 377 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass; 378 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass; 379 380 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 381 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 382 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop; 383 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop; 384 } 385 } 386 387 return 0; 388 } 389 390 int 391 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 392 { 393 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 394 struct mbox *mbox = (&nix->dev)->mbox; 395 bool is_cn9k = roc_model_is_cn9k(); 396 struct dev *dev = &nix->dev; 397 int rc; 398 399 if (roc_nix == NULL || rq == NULL) 400 return NIX_ERR_PARAM; 401 402 if (rq->qid >= nix->nb_rx_queues) 403 return NIX_ERR_QUEUE_INVALID_RANGE; 404 405 rq->roc_nix = roc_nix; 406 407 if (is_cn9k) 408 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena); 409 else 410 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena); 411 412 if (rc) 413 return rc; 414 415 rc = mbox_process(mbox); 416 if (rc) 417 return rc; 418 419 return nix_tel_node_add_rq(rq); 420 } 421 422 int 423 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 424 { 425 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 426 struct mbox *mbox = (&nix->dev)->mbox; 427 bool is_cn9k = roc_model_is_cn9k(); 428 struct dev *dev = &nix->dev; 429 int rc; 430 431 if (roc_nix == NULL || rq == NULL) 432 return NIX_ERR_PARAM; 433 434 if (rq->qid >= nix->nb_rx_queues) 435 return NIX_ERR_QUEUE_INVALID_RANGE; 436 437 rq->roc_nix = roc_nix; 438 439 if (is_cn9k) 440 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena); 441 else 442 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena); 443 444 if (rc) 445 return rc; 446 447 rc = mbox_process(mbox); 448 if (rc) 449 return rc; 450 451 return nix_tel_node_add_rq(rq); 452 } 453 454 int 455 roc_nix_rq_fini(struct roc_nix_rq *rq) 456 { 457 /* Disabling RQ is sufficient */ 458 return roc_nix_rq_ena_dis(rq, false); 459 } 460 461 int 462 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) 463 { 464 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 465 struct mbox *mbox = (&nix->dev)->mbox; 466 volatile struct nix_cq_ctx_s *cq_ctx; 467 enum nix_q_size qsize; 468 size_t desc_sz; 469 int rc; 470 471 if (cq == NULL) 472 return NIX_ERR_PARAM; 473 474 if (cq->qid >= nix->nb_rx_queues) 475 return NIX_ERR_QUEUE_INVALID_RANGE; 476 477 qsize = nix_qsize_clampup(cq->nb_desc); 478 cq->nb_desc = nix_qsize_to_val(qsize); 479 cq->qmask = cq->nb_desc - 1; 480 cq->door = nix->base + NIX_LF_CQ_OP_DOOR; 481 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 482 cq->wdata = (uint64_t)cq->qid << 32; 483 cq->roc_nix = roc_nix; 484 485 /* CQE of W16 */ 486 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ; 487 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN); 488 if (cq->desc_base == NULL) { 489 rc = NIX_ERR_NO_MEM; 490 goto fail; 491 } 492 493 if (roc_model_is_cn9k()) { 494 struct nix_aq_enq_req *aq; 495 496 aq = mbox_alloc_msg_nix_aq_enq(mbox); 497 if (!aq) 498 return -ENOSPC; 499 500 aq->qidx = cq->qid; 501 aq->ctype = NIX_AQ_CTYPE_CQ; 502 aq->op = NIX_AQ_INSTOP_INIT; 503 cq_ctx = &aq->cq; 504 } else { 505 struct nix_cn10k_aq_enq_req *aq; 506 507 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 508 if (!aq) 509 return -ENOSPC; 510 511 aq->qidx = cq->qid; 512 aq->ctype = NIX_AQ_CTYPE_CQ; 513 aq->op = NIX_AQ_INSTOP_INIT; 514 cq_ctx = &aq->cq; 515 } 516 517 cq_ctx->ena = 1; 518 cq_ctx->caching = 1; 519 cq_ctx->qsize = qsize; 520 cq_ctx->base = (uint64_t)cq->desc_base; 521 cq_ctx->avg_level = 0xff; 522 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); 523 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); 524 525 /* Many to one reduction */ 526 cq_ctx->qint_idx = cq->qid % nix->qints; 527 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ 528 cq_ctx->cint_idx = cq->qid; 529 530 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) { 531 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; 532 uint16_t min_rx_drop; 533 534 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc); 535 cq_ctx->drop = min_rx_drop; 536 cq_ctx->drop_ena = 1; 537 cq->drop_thresh = min_rx_drop; 538 } else { 539 cq->drop_thresh = NIX_CQ_THRESH_LEVEL; 540 /* Drop processing or red drop cannot be enabled due to 541 * due to packets coming for second pass from CPT. 542 */ 543 if (!roc_nix_inl_inb_is_enabled(roc_nix)) { 544 cq_ctx->drop = cq->drop_thresh; 545 cq_ctx->drop_ena = 1; 546 } 547 } 548 549 /* TX pause frames enable flow ctrl on RX side */ 550 if (nix->tx_pause) { 551 /* Single BPID is allocated for all rx channels for now */ 552 cq_ctx->bpid = nix->bpid[0]; 553 cq_ctx->bp = cq->drop_thresh; 554 cq_ctx->bp_ena = 1; 555 } 556 557 rc = mbox_process(mbox); 558 if (rc) 559 goto free_mem; 560 561 return nix_tel_node_add_cq(cq); 562 563 free_mem: 564 plt_free(cq->desc_base); 565 fail: 566 return rc; 567 } 568 569 int 570 roc_nix_cq_fini(struct roc_nix_cq *cq) 571 { 572 struct mbox *mbox; 573 struct nix *nix; 574 int rc; 575 576 if (cq == NULL) 577 return NIX_ERR_PARAM; 578 579 nix = roc_nix_to_nix_priv(cq->roc_nix); 580 mbox = (&nix->dev)->mbox; 581 582 /* Disable CQ */ 583 if (roc_model_is_cn9k()) { 584 struct nix_aq_enq_req *aq; 585 586 aq = mbox_alloc_msg_nix_aq_enq(mbox); 587 if (!aq) 588 return -ENOSPC; 589 590 aq->qidx = cq->qid; 591 aq->ctype = NIX_AQ_CTYPE_CQ; 592 aq->op = NIX_AQ_INSTOP_WRITE; 593 aq->cq.ena = 0; 594 aq->cq.bp_ena = 0; 595 aq->cq_mask.ena = ~aq->cq_mask.ena; 596 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 597 } else { 598 struct nix_cn10k_aq_enq_req *aq; 599 600 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 601 if (!aq) 602 return -ENOSPC; 603 604 aq->qidx = cq->qid; 605 aq->ctype = NIX_AQ_CTYPE_CQ; 606 aq->op = NIX_AQ_INSTOP_WRITE; 607 aq->cq.ena = 0; 608 aq->cq.bp_ena = 0; 609 aq->cq_mask.ena = ~aq->cq_mask.ena; 610 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 611 } 612 613 rc = mbox_process(mbox); 614 if (rc) 615 return rc; 616 617 plt_free(cq->desc_base); 618 return 0; 619 } 620 621 static int 622 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 623 { 624 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 625 uint16_t sqes_per_sqb, count, nb_sqb_bufs; 626 struct npa_pool_s pool; 627 struct npa_aura_s aura; 628 uint64_t blk_sz; 629 uint64_t iova; 630 int rc; 631 632 blk_sz = nix->sqb_size; 633 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16) 634 sqes_per_sqb = (blk_sz / 8) / 16; 635 else 636 sqes_per_sqb = (blk_sz / 8) / 8; 637 638 sq->nb_desc = PLT_MAX(256U, sq->nb_desc); 639 nb_sqb_bufs = sq->nb_desc / sqes_per_sqb; 640 nb_sqb_bufs += NIX_SQB_LIST_SPACE; 641 /* Clamp up the SQB count */ 642 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, 643 (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs)); 644 645 sq->nb_sqb_bufs = nb_sqb_bufs; 646 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb); 647 sq->nb_sqb_bufs_adj = 648 nb_sqb_bufs - 649 (PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb); 650 sq->nb_sqb_bufs_adj = 651 (sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100; 652 653 /* Explicitly set nat_align alone as by default pool is with both 654 * nat_align and buf_offset = 1 which we don't want for SQB. 655 */ 656 memset(&pool, 0, sizeof(struct npa_pool_s)); 657 pool.nat_align = 1; 658 659 memset(&aura, 0, sizeof(aura)); 660 aura.fc_ena = 1; 661 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0()) 662 aura.fc_stype = 0x0; /* STF */ 663 else 664 aura.fc_stype = 0x3; /* STSTP */ 665 aura.fc_addr = (uint64_t)sq->fc; 666 aura.fc_hyst_bits = 0; /* Store count on all updates */ 667 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura, 668 &pool); 669 if (rc) 670 goto fail; 671 672 sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz); 673 if (sq->sqe_mem == NULL) { 674 rc = NIX_ERR_NO_MEM; 675 goto nomem; 676 } 677 678 /* Fill the initial buffers */ 679 iova = (uint64_t)sq->sqe_mem; 680 for (count = 0; count < NIX_MAX_SQB; count++) { 681 roc_npa_aura_op_free(sq->aura_handle, 0, iova); 682 iova += blk_sz; 683 } 684 685 if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) != 686 NIX_MAX_SQB) { 687 plt_err("Failed to free all pointers to the pool"); 688 rc = NIX_ERR_NO_MEM; 689 goto npa_fail; 690 } 691 692 roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova); 693 roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs); 694 sq->aura_sqb_bufs = NIX_MAX_SQB; 695 696 return rc; 697 npa_fail: 698 plt_free(sq->sqe_mem); 699 nomem: 700 roc_npa_pool_destroy(sq->aura_handle); 701 fail: 702 return rc; 703 } 704 705 static int 706 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, 707 uint16_t smq) 708 { 709 struct mbox *mbox = (&nix->dev)->mbox; 710 struct nix_aq_enq_req *aq; 711 712 aq = mbox_alloc_msg_nix_aq_enq(mbox); 713 if (!aq) 714 return -ENOSPC; 715 716 aq->qidx = sq->qid; 717 aq->ctype = NIX_AQ_CTYPE_SQ; 718 aq->op = NIX_AQ_INSTOP_INIT; 719 aq->sq.max_sqe_size = sq->max_sqe_sz; 720 721 aq->sq.max_sqe_size = sq->max_sqe_sz; 722 aq->sq.smq = smq; 723 aq->sq.smq_rr_quantum = rr_quantum; 724 aq->sq.default_chan = nix->tx_chan_base; 725 aq->sq.sqe_stype = NIX_STYPE_STF; 726 aq->sq.ena = 1; 727 aq->sq.sso_ena = !!sq->sso_ena; 728 aq->sq.cq_ena = !!sq->cq_ena; 729 aq->sq.cq = sq->cqid; 730 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 731 aq->sq.sqe_stype = NIX_STYPE_STP; 732 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 733 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 734 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 735 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 736 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 737 738 /* Many to one reduction */ 739 /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can 740 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This 741 * might result in software missing the interrupt. 742 */ 743 aq->sq.qint_idx = 0; 744 return 0; 745 } 746 747 static int 748 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) 749 { 750 struct mbox *mbox = (&nix->dev)->mbox; 751 struct nix_aq_enq_rsp *rsp; 752 struct nix_aq_enq_req *aq; 753 uint16_t sqes_per_sqb; 754 void *sqb_buf; 755 int rc, count; 756 757 aq = mbox_alloc_msg_nix_aq_enq(mbox); 758 if (!aq) 759 return -ENOSPC; 760 761 aq->qidx = sq->qid; 762 aq->ctype = NIX_AQ_CTYPE_SQ; 763 aq->op = NIX_AQ_INSTOP_READ; 764 rc = mbox_process_msg(mbox, (void *)&rsp); 765 if (rc) 766 return rc; 767 768 /* Check if sq is already cleaned up */ 769 if (!rsp->sq.ena) 770 return 0; 771 772 /* Disable sq */ 773 aq = mbox_alloc_msg_nix_aq_enq(mbox); 774 if (!aq) 775 return -ENOSPC; 776 777 aq->qidx = sq->qid; 778 aq->ctype = NIX_AQ_CTYPE_SQ; 779 aq->op = NIX_AQ_INSTOP_WRITE; 780 aq->sq_mask.ena = ~aq->sq_mask.ena; 781 aq->sq.ena = 0; 782 rc = mbox_process(mbox); 783 if (rc) 784 return rc; 785 786 /* Read SQ and free sqb's */ 787 aq = mbox_alloc_msg_nix_aq_enq(mbox); 788 if (!aq) 789 return -ENOSPC; 790 791 aq->qidx = sq->qid; 792 aq->ctype = NIX_AQ_CTYPE_SQ; 793 aq->op = NIX_AQ_INSTOP_READ; 794 rc = mbox_process_msg(mbox, (void *)&rsp); 795 if (rc) 796 return rc; 797 798 if (aq->sq.smq_pend) 799 plt_err("SQ has pending SQE's"); 800 801 count = aq->sq.sqb_count; 802 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 803 /* Free SQB's that are used */ 804 sqb_buf = (void *)rsp->sq.head_sqb; 805 while (count) { 806 void *next_sqb; 807 808 next_sqb = *(void **)((uintptr_t)sqb_buf + 809 (uint32_t)((sqes_per_sqb - 1) * 810 sq->max_sqe_sz)); 811 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 812 sqb_buf = next_sqb; 813 count--; 814 } 815 816 /* Free next to use sqb */ 817 if (rsp->sq.next_sqb) 818 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 819 return 0; 820 } 821 822 static int 823 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, 824 uint16_t smq) 825 { 826 struct mbox *mbox = (&nix->dev)->mbox; 827 struct nix_cn10k_aq_enq_req *aq; 828 829 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 830 if (!aq) 831 return -ENOSPC; 832 833 aq->qidx = sq->qid; 834 aq->ctype = NIX_AQ_CTYPE_SQ; 835 aq->op = NIX_AQ_INSTOP_INIT; 836 aq->sq.max_sqe_size = sq->max_sqe_sz; 837 838 aq->sq.max_sqe_size = sq->max_sqe_sz; 839 aq->sq.smq = smq; 840 aq->sq.smq_rr_weight = rr_quantum; 841 aq->sq.default_chan = nix->tx_chan_base; 842 aq->sq.sqe_stype = NIX_STYPE_STF; 843 aq->sq.ena = 1; 844 aq->sq.sso_ena = !!sq->sso_ena; 845 aq->sq.cq_ena = !!sq->cq_ena; 846 aq->sq.cq = sq->cqid; 847 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 848 aq->sq.sqe_stype = NIX_STYPE_STP; 849 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 850 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 851 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 852 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 853 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 854 855 /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can 856 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This 857 * might result in software missing the interrupt. 858 */ 859 aq->sq.qint_idx = 0; 860 return 0; 861 } 862 863 static int 864 sq_fini(struct nix *nix, struct roc_nix_sq *sq) 865 { 866 struct mbox *mbox = (&nix->dev)->mbox; 867 struct nix_cn10k_aq_enq_rsp *rsp; 868 struct nix_cn10k_aq_enq_req *aq; 869 uint16_t sqes_per_sqb; 870 void *sqb_buf; 871 int rc, count; 872 873 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 874 if (!aq) 875 return -ENOSPC; 876 877 aq->qidx = sq->qid; 878 aq->ctype = NIX_AQ_CTYPE_SQ; 879 aq->op = NIX_AQ_INSTOP_READ; 880 rc = mbox_process_msg(mbox, (void *)&rsp); 881 if (rc) 882 return rc; 883 884 /* Check if sq is already cleaned up */ 885 if (!rsp->sq.ena) 886 return 0; 887 888 /* Disable sq */ 889 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 890 if (!aq) 891 return -ENOSPC; 892 893 aq->qidx = sq->qid; 894 aq->ctype = NIX_AQ_CTYPE_SQ; 895 aq->op = NIX_AQ_INSTOP_WRITE; 896 aq->sq_mask.ena = ~aq->sq_mask.ena; 897 aq->sq.ena = 0; 898 rc = mbox_process(mbox); 899 if (rc) 900 return rc; 901 902 /* Read SQ and free sqb's */ 903 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 904 if (!aq) 905 return -ENOSPC; 906 907 aq->qidx = sq->qid; 908 aq->ctype = NIX_AQ_CTYPE_SQ; 909 aq->op = NIX_AQ_INSTOP_READ; 910 rc = mbox_process_msg(mbox, (void *)&rsp); 911 if (rc) 912 return rc; 913 914 if (aq->sq.smq_pend) 915 plt_err("SQ has pending SQE's"); 916 917 count = aq->sq.sqb_count; 918 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 919 /* Free SQB's that are used */ 920 sqb_buf = (void *)rsp->sq.head_sqb; 921 while (count) { 922 void *next_sqb; 923 924 next_sqb = *(void **)((uintptr_t)sqb_buf + 925 (uint32_t)((sqes_per_sqb - 1) * 926 sq->max_sqe_sz)); 927 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 928 sqb_buf = next_sqb; 929 count--; 930 } 931 932 /* Free next to use sqb */ 933 if (rsp->sq.next_sqb) 934 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 935 return 0; 936 } 937 938 int 939 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 940 { 941 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 942 struct mbox *mbox = (&nix->dev)->mbox; 943 uint16_t qid, smq = UINT16_MAX; 944 uint32_t rr_quantum = 0; 945 int rc; 946 947 if (sq == NULL) 948 return NIX_ERR_PARAM; 949 950 qid = sq->qid; 951 if (qid >= nix->nb_tx_queues) 952 return NIX_ERR_QUEUE_INVALID_RANGE; 953 954 sq->roc_nix = roc_nix; 955 /* 956 * Allocate memory for flow control updates from HW. 957 * Alloc one cache line, so that fits all FC_STYPE modes. 958 */ 959 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN); 960 if (sq->fc == NULL) { 961 rc = NIX_ERR_NO_MEM; 962 goto fail; 963 } 964 965 rc = sqb_pool_populate(roc_nix, sq); 966 if (rc) 967 goto nomem; 968 969 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq); 970 if (rc) { 971 rc = NIX_ERR_TM_LEAF_NODE_GET; 972 goto nomem; 973 } 974 975 /* Init SQ context */ 976 if (roc_model_is_cn9k()) 977 rc = sq_cn9k_init(nix, sq, rr_quantum, smq); 978 else 979 rc = sq_init(nix, sq, rr_quantum, smq); 980 981 if (rc) 982 goto nomem; 983 984 rc = mbox_process(mbox); 985 if (rc) 986 goto nomem; 987 988 nix->sqs[qid] = sq; 989 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0); 990 /* Evenly distribute LMT slot for each sq */ 991 if (roc_model_is_cn9k()) { 992 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */ 993 sq->lmt_addr = (void *)(nix->lmt_base + 994 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12)); 995 } 996 997 rc = nix_tel_node_add_sq(sq); 998 return rc; 999 nomem: 1000 plt_free(sq->fc); 1001 fail: 1002 return rc; 1003 } 1004 1005 int 1006 roc_nix_sq_fini(struct roc_nix_sq *sq) 1007 { 1008 struct nix *nix; 1009 struct mbox *mbox; 1010 struct ndc_sync_op *ndc_req; 1011 uint16_t qid; 1012 int rc = 0; 1013 1014 if (sq == NULL) 1015 return NIX_ERR_PARAM; 1016 1017 nix = roc_nix_to_nix_priv(sq->roc_nix); 1018 mbox = (&nix->dev)->mbox; 1019 1020 qid = sq->qid; 1021 1022 rc = nix_tm_sq_flush_pre(sq); 1023 1024 /* Release SQ context */ 1025 if (roc_model_is_cn9k()) 1026 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 1027 else 1028 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 1029 1030 /* Sync NDC-NIX-TX for LF */ 1031 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox); 1032 if (ndc_req == NULL) 1033 return -ENOSPC; 1034 ndc_req->nix_lf_tx_sync = 1; 1035 if (mbox_process(mbox)) 1036 rc |= NIX_ERR_NDC_SYNC; 1037 1038 rc |= nix_tm_sq_flush_post(sq); 1039 1040 /* Restore limit to max SQB count that the pool was created 1041 * for aura drain to succeed. 1042 */ 1043 roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB); 1044 rc |= roc_npa_pool_destroy(sq->aura_handle); 1045 plt_free(sq->fc); 1046 plt_free(sq->sqe_mem); 1047 nix->sqs[qid] = NULL; 1048 1049 return rc; 1050 } 1051 1052 void 1053 roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, 1054 uint32_t *tail) 1055 { 1056 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1057 uint64_t reg, val; 1058 int64_t *addr; 1059 1060 if (head == NULL || tail == NULL) 1061 return; 1062 1063 reg = (((uint64_t)qid) << 32); 1064 addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 1065 val = roc_atomic64_add_nosync(reg, addr); 1066 if (val & 1067 (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))) 1068 val = 0; 1069 1070 *tail = (uint32_t)(val & 0xFFFFF); 1071 *head = (uint32_t)((val >> 20) & 0xFFFFF); 1072 } 1073 1074 void 1075 roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, 1076 uint32_t *tail) 1077 { 1078 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1079 struct roc_nix_sq *sq = nix->sqs[qid]; 1080 uint16_t sqes_per_sqb, sqb_cnt; 1081 uint64_t reg, val; 1082 int64_t *addr; 1083 1084 if (head == NULL || tail == NULL) 1085 return; 1086 1087 reg = (((uint64_t)qid) << 32); 1088 addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS); 1089 val = roc_atomic64_add_nosync(reg, addr); 1090 if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) { 1091 val = 0; 1092 return; 1093 } 1094 1095 *tail = (uint32_t)((val >> 28) & 0x3F); 1096 *head = (uint32_t)((val >> 20) & 0x3F); 1097 sqb_cnt = (uint16_t)(val & 0xFFFF); 1098 1099 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1100 1101 /* Update tail index as per used sqb count */ 1102 *tail += (sqes_per_sqb * (sqb_cnt - 1)); 1103 } 1104