1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <math.h> 6 7 #include "roc_api.h" 8 #include "roc_priv.h" 9 10 /* Default SQB slack per SQ */ 11 #define ROC_NIX_SQB_SLACK_DFLT 24 12 13 static inline uint32_t 14 nix_qsize_to_val(enum nix_q_size qsize) 15 { 16 return (16UL << (qsize * 2)); 17 } 18 19 static inline enum nix_q_size 20 nix_qsize_clampup(uint32_t val) 21 { 22 int i = nix_q_size_16; 23 24 for (; i < nix_q_size_max; i++) 25 if (val <= nix_qsize_to_val(i)) 26 break; 27 28 if (i >= nix_q_size_max) 29 i = nix_q_size_max - 1; 30 31 return i; 32 } 33 34 void 35 nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval) 36 { 37 uint64_t wait_ns; 38 39 if (!roc_model_is_cn10k()) 40 return; 41 /* Due to HW errata writes to VWQE_FLUSH might hang, so instead 42 * wait for max vwqe timeout interval. 43 */ 44 if (rq->vwqe_ena) { 45 wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100; 46 plt_delay_us((wait_ns / 1E3) + 1); 47 } 48 } 49 50 int 51 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) 52 { 53 struct mbox *mbox = mbox_get(dev->mbox); 54 int rc; 55 56 /* Pkts will be dropped silently if RQ is disabled */ 57 if (roc_model_is_cn9k()) { 58 struct nix_aq_enq_req *aq; 59 60 aq = mbox_alloc_msg_nix_aq_enq(mbox); 61 if (!aq) { 62 rc = -ENOSPC; 63 goto exit; 64 } 65 66 aq->qidx = rq->qid; 67 aq->ctype = NIX_AQ_CTYPE_RQ; 68 aq->op = NIX_AQ_INSTOP_WRITE; 69 70 aq->rq.ena = enable; 71 aq->rq_mask.ena = ~(aq->rq_mask.ena); 72 } else { 73 struct nix_cn10k_aq_enq_req *aq; 74 75 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 76 if (!aq) { 77 rc = -ENOSPC; 78 goto exit; 79 } 80 81 aq->qidx = rq->qid; 82 aq->ctype = NIX_AQ_CTYPE_RQ; 83 aq->op = NIX_AQ_INSTOP_WRITE; 84 85 aq->rq.ena = enable; 86 aq->rq_mask.ena = ~(aq->rq_mask.ena); 87 } 88 89 rc = mbox_process(mbox); 90 exit: 91 mbox_put(mbox); 92 return rc; 93 } 94 95 int 96 roc_nix_sq_ena_dis(struct roc_nix_sq *sq, bool enable) 97 { 98 int rc = 0; 99 100 rc = roc_nix_tm_sq_aura_fc(sq, enable); 101 if (rc) 102 goto done; 103 104 sq->enable = enable; 105 done: 106 return rc; 107 } 108 109 int 110 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) 111 { 112 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); 113 int rc; 114 115 rc = nix_rq_ena_dis(&nix->dev, rq, enable); 116 nix_rq_vwqe_flush(rq, nix->vwqe_interval); 117 if (rc) 118 return rc; 119 120 /* Check for meta aura if RQ is enabled */ 121 if (enable && nix->need_meta_aura) 122 rc = roc_nix_inl_meta_aura_check(rq->roc_nix, rq); 123 return rc; 124 } 125 126 int 127 roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid) 128 { 129 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 130 struct dev *dev = &nix->dev; 131 struct mbox *mbox = mbox_get(dev->mbox); 132 bool sso_enable; 133 int rc; 134 135 if (roc_model_is_cn9k()) { 136 struct nix_aq_enq_rsp *rsp; 137 struct nix_aq_enq_req *aq; 138 139 aq = mbox_alloc_msg_nix_aq_enq(mbox); 140 if (!aq) { 141 rc = -ENOSPC; 142 goto exit; 143 } 144 145 aq->qidx = qid; 146 aq->ctype = NIX_AQ_CTYPE_RQ; 147 aq->op = NIX_AQ_INSTOP_READ; 148 rc = mbox_process_msg(mbox, (void *)&rsp); 149 if (rc) 150 goto exit; 151 152 sso_enable = rsp->rq.sso_ena; 153 } else { 154 struct nix_cn10k_aq_enq_rsp *rsp; 155 struct nix_cn10k_aq_enq_req *aq; 156 157 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 158 if (!aq) { 159 rc = -ENOSPC; 160 goto exit; 161 } 162 163 aq->qidx = qid; 164 aq->ctype = NIX_AQ_CTYPE_RQ; 165 aq->op = NIX_AQ_INSTOP_READ; 166 167 rc = mbox_process_msg(mbox, (void *)&rsp); 168 if (rc) 169 goto exit; 170 171 sso_enable = rsp->rq.sso_ena; 172 } 173 174 rc = sso_enable ? true : false; 175 exit: 176 mbox_put(mbox); 177 return rc; 178 } 179 180 static int 181 nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set) 182 { 183 struct roc_nix *roc_nix = rq->roc_nix; 184 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 185 bool inl_inb_ena = roc_nix_inl_inb_is_enabled(roc_nix); 186 uint64_t lpb_aura = 0, vwqe_aura = 0, spb_aura = 0; 187 struct mbox *mbox = nix->dev.mbox; 188 uint64_t aura_base; 189 int rc, count; 190 191 count = set ? 1 : -1; 192 /* For buf type set, use info from RQ context */ 193 if (set) { 194 lpb_aura = rq->aura_handle; 195 spb_aura = rq->spb_ena ? rq->spb_aura_handle : 0; 196 vwqe_aura = rq->vwqe_ena ? rq->vwqe_aura_handle : 0; 197 goto skip_ctx_read; 198 } 199 200 aura_base = roc_npa_aura_handle_to_base(rq->aura_handle); 201 if (roc_model_is_cn9k()) { 202 struct nix_aq_enq_rsp *rsp; 203 struct nix_aq_enq_req *aq; 204 205 aq = mbox_alloc_msg_nix_aq_enq(mbox_get(mbox)); 206 if (!aq) { 207 mbox_put(mbox); 208 return -ENOSPC; 209 } 210 211 aq->qidx = rq->qid; 212 aq->ctype = NIX_AQ_CTYPE_RQ; 213 aq->op = NIX_AQ_INSTOP_READ; 214 rc = mbox_process_msg(mbox, (void *)&rsp); 215 if (rc) { 216 mbox_put(mbox); 217 return rc; 218 } 219 220 /* Get aura handle from aura */ 221 lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); 222 if (rsp->rq.spb_ena) 223 spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); 224 mbox_put(mbox); 225 } else { 226 struct nix_cn10k_aq_enq_rsp *rsp; 227 struct nix_cn10k_aq_enq_req *aq; 228 229 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox_get(mbox)); 230 if (!aq) { 231 mbox_put(mbox); 232 return -ENOSPC; 233 } 234 235 aq->qidx = rq->qid; 236 aq->ctype = NIX_AQ_CTYPE_RQ; 237 aq->op = NIX_AQ_INSTOP_READ; 238 239 rc = mbox_process_msg(mbox, (void *)&rsp); 240 if (rc) { 241 mbox_put(mbox); 242 return rc; 243 } 244 245 /* Get aura handle from aura */ 246 lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); 247 if (rsp->rq.spb_ena) 248 spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); 249 if (rsp->rq.vwqe_ena) 250 vwqe_aura = roc_npa_aura_handle_gen(rsp->rq.wqe_aura, aura_base); 251 252 mbox_put(mbox); 253 } 254 255 skip_ctx_read: 256 /* Update attributes for LPB aura */ 257 if (inl_inb_ena) 258 roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count); 259 else 260 roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET, count); 261 262 /* Update attributes for SPB aura */ 263 if (spb_aura) { 264 if (inl_inb_ena) 265 roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count); 266 else 267 roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET, count); 268 } 269 270 /* Update attributes for VWQE aura */ 271 if (vwqe_aura) { 272 if (inl_inb_ena) 273 roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE_IPSEC, count); 274 else 275 roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE, count); 276 } 277 278 return 0; 279 } 280 281 static int 282 nix_rq_cn9k_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) 283 { 284 struct mbox *mbox = mbox_get(dev->mbox); 285 struct nix_aq_enq_req *aq; 286 int rc; 287 288 aq = mbox_alloc_msg_nix_aq_enq(mbox); 289 if (!aq) { 290 rc = -ENOSPC; 291 goto exit; 292 } 293 294 aq->qidx = rq->qid; 295 aq->ctype = NIX_AQ_CTYPE_RQ; 296 aq->op = NIX_AQ_INSTOP_WRITE; 297 298 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 299 aq->rq.lpb_pool_pass = rq->red_pass; 300 aq->rq.lpb_pool_drop = rq->red_drop; 301 aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); 302 aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); 303 304 } 305 306 if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { 307 aq->rq.spb_pool_pass = rq->spb_red_pass; 308 aq->rq.spb_pool_drop = rq->spb_red_drop; 309 aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); 310 aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); 311 312 } 313 314 if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { 315 aq->rq.xqe_pass = rq->xqe_red_pass; 316 aq->rq.xqe_drop = rq->xqe_red_drop; 317 aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); 318 aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); 319 } 320 321 rc = mbox_process(mbox); 322 exit: 323 mbox_put(mbox); 324 return rc; 325 } 326 327 int 328 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, 329 bool cfg, bool ena) 330 { 331 struct mbox *mbox = dev->mbox; 332 struct nix_aq_enq_req *aq; 333 334 aq = mbox_alloc_msg_nix_aq_enq(mbox); 335 if (!aq) 336 return -ENOSPC; 337 338 aq->qidx = rq->qid; 339 aq->ctype = NIX_AQ_CTYPE_RQ; 340 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 341 342 if (rq->sso_ena) { 343 /* SSO mode */ 344 aq->rq.sso_ena = 1; 345 aq->rq.sso_tt = rq->tt; 346 aq->rq.sso_grp = rq->hwgrp; 347 aq->rq.ena_wqwd = 1; 348 aq->rq.wqe_skip = rq->wqe_skip; 349 aq->rq.wqe_caching = 1; 350 351 aq->rq.good_utag = rq->tag_mask >> 24; 352 aq->rq.bad_utag = rq->tag_mask >> 24; 353 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 354 } else { 355 /* CQ mode */ 356 aq->rq.sso_ena = 0; 357 aq->rq.good_utag = rq->tag_mask >> 24; 358 aq->rq.bad_utag = rq->tag_mask >> 24; 359 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 360 aq->rq.cq = rq->cqid; 361 } 362 363 if (rq->ipsech_ena) 364 aq->rq.ipsech_ena = 1; 365 366 aq->rq.spb_ena = 0; 367 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 368 369 /* Sizes must be aligned to 8 bytes */ 370 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 371 return -EINVAL; 372 373 /* Expressed in number of dwords */ 374 aq->rq.first_skip = rq->first_skip / 8; 375 aq->rq.later_skip = rq->later_skip / 8; 376 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 377 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 378 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 379 aq->rq.ena = ena; 380 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 381 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 382 aq->rq.rq_int_ena = 0; 383 /* Many to one reduction */ 384 aq->rq.qint_idx = rq->qid % qints; 385 aq->rq.xqe_drop_ena = 1; 386 387 /* If RED enabled, then fill enable for all cases */ 388 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 389 aq->rq.spb_pool_pass = rq->spb_red_pass; 390 aq->rq.lpb_pool_pass = rq->red_pass; 391 392 aq->rq.spb_pool_drop = rq->spb_red_drop; 393 aq->rq.lpb_pool_drop = rq->red_drop; 394 } 395 396 if (cfg) { 397 if (rq->sso_ena) { 398 /* SSO mode */ 399 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 400 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 401 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 402 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 403 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 404 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 405 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 406 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 407 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 408 } else { 409 /* CQ mode */ 410 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 411 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 412 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 413 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 414 aq->rq_mask.cq = ~aq->rq_mask.cq; 415 } 416 417 if (rq->ipsech_ena) 418 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 419 420 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 421 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 422 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 423 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 424 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 425 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 426 aq->rq_mask.ena = ~aq->rq_mask.ena; 427 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 428 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 429 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 430 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 431 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 432 433 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 434 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 435 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 436 437 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 438 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 439 } 440 } 441 442 return 0; 443 } 444 445 int 446 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, 447 bool ena) 448 { 449 struct nix_cn10k_aq_enq_req *aq; 450 struct mbox *mbox = dev->mbox; 451 452 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 453 if (!aq) 454 return -ENOSPC; 455 456 aq->qidx = rq->qid; 457 aq->ctype = NIX_AQ_CTYPE_RQ; 458 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 459 460 if (rq->sso_ena) { 461 /* SSO mode */ 462 aq->rq.sso_ena = 1; 463 aq->rq.sso_tt = rq->tt; 464 aq->rq.sso_grp = rq->hwgrp; 465 aq->rq.ena_wqwd = 1; 466 aq->rq.wqe_skip = rq->wqe_skip; 467 aq->rq.wqe_caching = 1; 468 469 aq->rq.good_utag = rq->tag_mask >> 24; 470 aq->rq.bad_utag = rq->tag_mask >> 24; 471 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 472 473 if (rq->vwqe_ena) { 474 aq->rq.vwqe_ena = true; 475 aq->rq.vwqe_skip = rq->vwqe_first_skip; 476 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */ 477 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2; 478 aq->rq.vtime_wait = rq->vwqe_wait_tmo; 479 aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle); 480 } 481 } else { 482 /* CQ mode */ 483 aq->rq.sso_ena = 0; 484 aq->rq.good_utag = rq->tag_mask >> 24; 485 aq->rq.bad_utag = rq->tag_mask >> 24; 486 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 487 aq->rq.cq = rq->cqid; 488 } 489 490 if (rq->ipsech_ena) { 491 aq->rq.ipsech_ena = 1; 492 aq->rq.ipsecd_drop_en = 1; 493 aq->rq.ena_wqwd = 1; 494 aq->rq.wqe_skip = rq->wqe_skip; 495 aq->rq.wqe_caching = 1; 496 } 497 498 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 499 500 /* Sizes must be aligned to 8 bytes */ 501 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 502 return -EINVAL; 503 504 /* Expressed in number of dwords */ 505 aq->rq.first_skip = rq->first_skip / 8; 506 aq->rq.later_skip = rq->later_skip / 8; 507 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 508 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 509 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 510 aq->rq.ena = ena; 511 512 if (rq->spb_ena) { 513 uint32_t spb_sizem1; 514 515 aq->rq.spb_ena = 1; 516 aq->rq.spb_aura = 517 roc_npa_aura_handle_to_aura(rq->spb_aura_handle); 518 519 if (rq->spb_size & 0x7 || 520 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE) 521 return -EINVAL; 522 523 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */ 524 spb_sizem1 -= 1; /* Expressed in size minus one */ 525 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F; 526 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7; 527 } else { 528 aq->rq.spb_ena = 0; 529 } 530 531 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 532 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 533 aq->rq.rq_int_ena = 0; 534 /* Many to one reduction */ 535 aq->rq.qint_idx = rq->qid % qints; 536 aq->rq.xqe_drop_ena = 0; 537 aq->rq.lpb_drop_ena = rq->lpb_drop_ena; 538 aq->rq.spb_drop_ena = rq->spb_drop_ena; 539 540 /* If RED enabled, then fill enable for all cases */ 541 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 542 aq->rq.spb_pool_pass = rq->spb_red_pass; 543 aq->rq.lpb_pool_pass = rq->red_pass; 544 aq->rq.wqe_pool_pass = rq->red_pass; 545 aq->rq.xqe_pass = rq->red_pass; 546 547 aq->rq.spb_pool_drop = rq->spb_red_drop; 548 aq->rq.lpb_pool_drop = rq->red_drop; 549 aq->rq.wqe_pool_drop = rq->red_drop; 550 aq->rq.xqe_drop = rq->red_drop; 551 } 552 553 if (cfg) { 554 if (rq->sso_ena) { 555 /* SSO mode */ 556 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 557 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 558 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 559 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 560 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 561 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 562 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 563 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 564 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 565 if (rq->vwqe_ena) { 566 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena; 567 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip; 568 aq->rq_mask.max_vsize_exp = 569 ~aq->rq_mask.max_vsize_exp; 570 aq->rq_mask.vtime_wait = 571 ~aq->rq_mask.vtime_wait; 572 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura; 573 } 574 } else { 575 /* CQ mode */ 576 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 577 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 578 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 579 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 580 aq->rq_mask.cq = ~aq->rq_mask.cq; 581 } 582 583 if (rq->ipsech_ena) 584 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 585 586 if (rq->spb_ena) { 587 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura; 588 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1; 589 aq->rq_mask.spb_high_sizem1 = 590 ~aq->rq_mask.spb_high_sizem1; 591 } 592 593 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 594 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 595 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 596 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 597 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 598 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 599 aq->rq_mask.ena = ~aq->rq_mask.ena; 600 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 601 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 602 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 603 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 604 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 605 aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena; 606 aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena; 607 608 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 609 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 610 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 611 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass; 612 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass; 613 614 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 615 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 616 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop; 617 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop; 618 } 619 } 620 621 return 0; 622 } 623 624 static int 625 nix_rq_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) 626 { 627 struct nix_cn10k_aq_enq_req *aq; 628 struct mbox *mbox = mbox_get(dev->mbox); 629 int rc; 630 631 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 632 if (!aq) { 633 rc = -ENOSPC; 634 goto exit; 635 } 636 637 aq->qidx = rq->qid; 638 aq->ctype = NIX_AQ_CTYPE_RQ; 639 aq->op = NIX_AQ_INSTOP_WRITE; 640 641 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 642 aq->rq.lpb_pool_pass = rq->red_pass; 643 aq->rq.lpb_pool_drop = rq->red_drop; 644 aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); 645 aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); 646 647 } 648 649 if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { 650 aq->rq.spb_pool_pass = rq->spb_red_pass; 651 aq->rq.spb_pool_drop = rq->spb_red_drop; 652 aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); 653 aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); 654 655 } 656 657 if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { 658 aq->rq.xqe_pass = rq->xqe_red_pass; 659 aq->rq.xqe_drop = rq->xqe_red_drop; 660 aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); 661 aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); 662 } 663 664 rc = mbox_process(mbox); 665 exit: 666 mbox_put(mbox); 667 return rc; 668 } 669 670 int 671 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 672 { 673 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 674 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 675 bool is_cn9k = roc_model_is_cn9k(); 676 struct dev *dev = &nix->dev; 677 int rc; 678 679 if (roc_nix == NULL || rq == NULL) { 680 mbox_put(mbox); 681 return NIX_ERR_PARAM; 682 } 683 684 if (rq->qid >= nix->nb_rx_queues) { 685 mbox_put(mbox); 686 return NIX_ERR_QUEUE_INVALID_RANGE; 687 } 688 689 rq->roc_nix = roc_nix; 690 rq->tc = ROC_NIX_PFC_CLASS_INVALID; 691 692 if (is_cn9k) 693 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena); 694 else 695 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena); 696 697 if (rc) { 698 mbox_put(mbox); 699 return rc; 700 } 701 702 rc = mbox_process(mbox); 703 if (rc) { 704 mbox_put(mbox); 705 return rc; 706 } 707 mbox_put(mbox); 708 709 /* Update aura buf type to indicate its use */ 710 nix_rq_aura_buf_type_update(rq, true); 711 712 /* Check for meta aura if RQ is enabled */ 713 if (ena && nix->need_meta_aura) { 714 rc = roc_nix_inl_meta_aura_check(roc_nix, rq); 715 if (rc) 716 return rc; 717 } 718 719 nix->rqs[rq->qid] = rq; 720 return nix_tel_node_add_rq(rq); 721 } 722 723 int 724 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 725 { 726 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 727 struct mbox *m_box = (&nix->dev)->mbox; 728 bool is_cn9k = roc_model_is_cn9k(); 729 struct dev *dev = &nix->dev; 730 struct mbox *mbox; 731 int rc; 732 733 if (roc_nix == NULL || rq == NULL) 734 return NIX_ERR_PARAM; 735 736 if (rq->qid >= nix->nb_rx_queues) 737 return NIX_ERR_QUEUE_INVALID_RANGE; 738 739 /* Clear attributes for existing aura's */ 740 nix_rq_aura_buf_type_update(rq, false); 741 742 rq->roc_nix = roc_nix; 743 rq->tc = ROC_NIX_PFC_CLASS_INVALID; 744 745 mbox = mbox_get(m_box); 746 if (is_cn9k) 747 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena); 748 else 749 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena); 750 751 if (rc) { 752 mbox_put(mbox); 753 return rc; 754 } 755 756 rc = mbox_process(mbox); 757 if (rc) { 758 mbox_put(mbox); 759 return rc; 760 } 761 mbox_put(mbox); 762 763 /* Update aura attribute to indicate its use */ 764 nix_rq_aura_buf_type_update(rq, true); 765 766 /* Check for meta aura if RQ is enabled */ 767 if (ena && nix->need_meta_aura) { 768 rc = roc_nix_inl_meta_aura_check(roc_nix, rq); 769 if (rc) 770 return rc; 771 } 772 773 return nix_tel_node_add_rq(rq); 774 } 775 776 int 777 roc_nix_rq_cman_config(struct roc_nix *roc_nix, struct roc_nix_rq *rq) 778 { 779 bool is_cn9k = roc_model_is_cn9k(); 780 struct nix *nix; 781 struct dev *dev; 782 int rc; 783 784 if (roc_nix == NULL || rq == NULL) 785 return NIX_ERR_PARAM; 786 787 nix = roc_nix_to_nix_priv(roc_nix); 788 789 if (rq->qid >= nix->nb_rx_queues) 790 return NIX_ERR_QUEUE_INVALID_RANGE; 791 792 dev = &nix->dev; 793 794 if (is_cn9k) 795 rc = nix_rq_cn9k_cman_cfg(dev, rq); 796 else 797 rc = nix_rq_cman_cfg(dev, rq); 798 799 return rc; 800 } 801 802 int 803 roc_nix_rq_fini(struct roc_nix_rq *rq) 804 { 805 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); 806 int rc; 807 808 /* Disabling RQ is sufficient */ 809 rc = roc_nix_rq_ena_dis(rq, false); 810 if (rc) 811 return rc; 812 813 /* Update aura attribute to indicate its use for */ 814 nix_rq_aura_buf_type_update(rq, false); 815 816 nix->rqs[rq->qid] = NULL; 817 return 0; 818 } 819 820 int 821 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) 822 { 823 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 824 struct mbox *mbox = (&nix->dev)->mbox; 825 volatile struct nix_cq_ctx_s *cq_ctx; 826 uint16_t drop_thresh = NIX_CQ_THRESH_LEVEL; 827 uint16_t cpt_lbpid = nix->cpt_lbpid; 828 enum nix_q_size qsize; 829 size_t desc_sz; 830 int rc; 831 832 if (cq == NULL) 833 return NIX_ERR_PARAM; 834 835 qsize = nix_qsize_clampup(cq->nb_desc); 836 cq->nb_desc = nix_qsize_to_val(qsize); 837 cq->qmask = cq->nb_desc - 1; 838 cq->door = nix->base + NIX_LF_CQ_OP_DOOR; 839 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 840 cq->wdata = (uint64_t)cq->qid << 32; 841 cq->roc_nix = roc_nix; 842 843 /* CQE of W16 */ 844 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ; 845 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN); 846 if (cq->desc_base == NULL) { 847 rc = NIX_ERR_NO_MEM; 848 goto fail; 849 } 850 851 if (roc_model_is_cn9k()) { 852 struct nix_aq_enq_req *aq; 853 854 aq = mbox_alloc_msg_nix_aq_enq(mbox_get(mbox)); 855 if (!aq) { 856 mbox_put(mbox); 857 return -ENOSPC; 858 } 859 860 aq->qidx = cq->qid; 861 aq->ctype = NIX_AQ_CTYPE_CQ; 862 aq->op = NIX_AQ_INSTOP_INIT; 863 cq_ctx = &aq->cq; 864 } else { 865 struct nix_cn10k_aq_enq_req *aq; 866 867 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox_get(mbox)); 868 if (!aq) { 869 mbox_put(mbox); 870 return -ENOSPC; 871 } 872 873 aq->qidx = cq->qid; 874 aq->ctype = NIX_AQ_CTYPE_CQ; 875 aq->op = NIX_AQ_INSTOP_INIT; 876 cq_ctx = &aq->cq; 877 } 878 879 cq_ctx->ena = 1; 880 cq_ctx->caching = 1; 881 cq_ctx->qsize = qsize; 882 cq_ctx->base = (uint64_t)cq->desc_base; 883 cq_ctx->avg_level = 0xff; 884 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); 885 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); 886 if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(roc_nix)) { 887 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_CPT_DROP); 888 cq_ctx->cpt_drop_err_en = 1; 889 /* Enable Late BP only when non zero CPT BPID */ 890 if (cpt_lbpid) { 891 cq_ctx->lbp_ena = 1; 892 cq_ctx->lbpid_low = cpt_lbpid & 0x7; 893 cq_ctx->lbpid_med = (cpt_lbpid >> 3) & 0x7; 894 cq_ctx->lbpid_high = (cpt_lbpid >> 6) & 0x7; 895 cq_ctx->lbp_frac = NIX_CQ_LPB_THRESH_FRAC; 896 } 897 drop_thresh = NIX_CQ_SEC_THRESH_LEVEL; 898 } 899 900 /* Many to one reduction */ 901 cq_ctx->qint_idx = cq->qid % nix->qints; 902 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ 903 cq_ctx->cint_idx = cq->qid; 904 905 if (roc_errata_nix_has_cq_min_size_4k()) { 906 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; 907 uint16_t min_rx_drop; 908 909 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc); 910 cq_ctx->drop = min_rx_drop; 911 cq_ctx->drop_ena = 1; 912 cq->drop_thresh = min_rx_drop; 913 } else { 914 cq->drop_thresh = drop_thresh; 915 /* Drop processing or red drop cannot be enabled due to 916 * due to packets coming for second pass from CPT. 917 */ 918 if (!roc_nix_inl_inb_is_enabled(roc_nix)) { 919 cq_ctx->drop = cq->drop_thresh; 920 cq_ctx->drop_ena = 1; 921 } 922 } 923 cq_ctx->bp = cq->drop_thresh; 924 925 if (roc_feature_nix_has_cqe_stash()) { 926 if (cq_ctx->caching) { 927 cq_ctx->stashing = 1; 928 cq_ctx->stash_thresh = cq->stash_thresh; 929 } 930 } 931 932 rc = mbox_process(mbox); 933 mbox_put(mbox); 934 if (rc) 935 goto free_mem; 936 937 return nix_tel_node_add_cq(cq); 938 939 free_mem: 940 plt_free(cq->desc_base); 941 fail: 942 return rc; 943 } 944 945 int 946 roc_nix_cq_fini(struct roc_nix_cq *cq) 947 { 948 struct mbox *mbox; 949 struct nix *nix; 950 int rc; 951 952 if (cq == NULL) 953 return NIX_ERR_PARAM; 954 955 nix = roc_nix_to_nix_priv(cq->roc_nix); 956 mbox = mbox_get((&nix->dev)->mbox); 957 958 /* Disable CQ */ 959 if (roc_model_is_cn9k()) { 960 struct nix_aq_enq_req *aq; 961 962 aq = mbox_alloc_msg_nix_aq_enq(mbox); 963 if (!aq) { 964 mbox_put(mbox); 965 return -ENOSPC; 966 } 967 968 aq->qidx = cq->qid; 969 aq->ctype = NIX_AQ_CTYPE_CQ; 970 aq->op = NIX_AQ_INSTOP_WRITE; 971 aq->cq.ena = 0; 972 aq->cq.bp_ena = 0; 973 aq->cq_mask.ena = ~aq->cq_mask.ena; 974 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 975 } else { 976 struct nix_cn10k_aq_enq_req *aq; 977 978 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 979 if (!aq) { 980 mbox_put(mbox); 981 return -ENOSPC; 982 } 983 984 aq->qidx = cq->qid; 985 aq->ctype = NIX_AQ_CTYPE_CQ; 986 aq->op = NIX_AQ_INSTOP_WRITE; 987 aq->cq.ena = 0; 988 aq->cq.bp_ena = 0; 989 aq->cq_mask.ena = ~aq->cq_mask.ena; 990 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 991 if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(cq->roc_nix)) { 992 aq->cq.lbp_ena = 0; 993 aq->cq_mask.lbp_ena = ~aq->cq_mask.lbp_ena; 994 } 995 } 996 997 rc = mbox_process(mbox); 998 if (rc) { 999 mbox_put(mbox); 1000 return rc; 1001 } 1002 1003 mbox_put(mbox); 1004 plt_free(cq->desc_base); 1005 return 0; 1006 } 1007 1008 static int 1009 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 1010 { 1011 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1012 uint16_t sqes_per_sqb, count, nb_sqb_bufs, thr; 1013 struct npa_pool_s pool; 1014 struct npa_aura_s aura; 1015 uint64_t blk_sz; 1016 uint64_t iova; 1017 int rc; 1018 1019 blk_sz = nix->sqb_size; 1020 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16) 1021 sqes_per_sqb = (blk_sz / 8) / 16; 1022 else 1023 sqes_per_sqb = (blk_sz / 8) / 8; 1024 1025 /* Reserve One SQE in each SQB to hold pointer for next SQB */ 1026 sqes_per_sqb -= 1; 1027 1028 sq->nb_desc = PLT_MAX(512U, sq->nb_desc); 1029 nb_sqb_bufs = PLT_DIV_CEIL(sq->nb_desc, sqes_per_sqb); 1030 thr = PLT_DIV_CEIL((nb_sqb_bufs * ROC_NIX_SQB_THRESH), 100); 1031 nb_sqb_bufs += NIX_SQB_PREFETCH; 1032 /* Clamp up the SQB count */ 1033 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs)); 1034 1035 sq->nb_sqb_bufs = nb_sqb_bufs; 1036 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb); 1037 sq->nb_sqb_bufs_adj = nb_sqb_bufs; 1038 1039 if (roc_nix->sqb_slack) 1040 nb_sqb_bufs += roc_nix->sqb_slack; 1041 else 1042 nb_sqb_bufs += PLT_MAX((int)thr, (int)ROC_NIX_SQB_SLACK_DFLT); 1043 /* Explicitly set nat_align alone as by default pool is with both 1044 * nat_align and buf_offset = 1 which we don't want for SQB. 1045 */ 1046 memset(&pool, 0, sizeof(struct npa_pool_s)); 1047 pool.nat_align = 1; 1048 1049 memset(&aura, 0, sizeof(aura)); 1050 aura.fc_ena = 1; 1051 if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp()) 1052 aura.fc_stype = 0x0; /* STF */ 1053 else 1054 aura.fc_stype = 0x3; /* STSTP */ 1055 aura.fc_addr = (uint64_t)sq->fc; 1056 aura.fc_hyst_bits = sq->fc_hyst_bits & 0xF; 1057 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura, &pool, 0); 1058 if (rc) 1059 goto fail; 1060 1061 roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1); 1062 sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz); 1063 if (sq->sqe_mem == NULL) { 1064 rc = NIX_ERR_NO_MEM; 1065 goto nomem; 1066 } 1067 1068 /* Fill the initial buffers */ 1069 iova = (uint64_t)sq->sqe_mem; 1070 for (count = 0; count < nb_sqb_bufs; count++) { 1071 roc_npa_aura_op_free(sq->aura_handle, 0, iova); 1072 iova += blk_sz; 1073 } 1074 1075 if (roc_npa_aura_op_available_wait(sq->aura_handle, nb_sqb_bufs, 0) != 1076 nb_sqb_bufs) { 1077 plt_err("Failed to free all pointers to the pool"); 1078 rc = NIX_ERR_NO_MEM; 1079 goto npa_fail; 1080 } 1081 1082 roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova); 1083 roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs); 1084 sq->aura_sqb_bufs = nb_sqb_bufs; 1085 1086 return rc; 1087 npa_fail: 1088 plt_free(sq->sqe_mem); 1089 nomem: 1090 roc_npa_pool_destroy(sq->aura_handle); 1091 fail: 1092 return rc; 1093 } 1094 1095 static int 1096 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, 1097 uint16_t smq) 1098 { 1099 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 1100 struct mbox *mbox = (&nix->dev)->mbox; 1101 struct nix_aq_enq_req *aq; 1102 1103 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1104 if (!aq) 1105 return -ENOSPC; 1106 1107 aq->qidx = sq->qid; 1108 aq->ctype = NIX_AQ_CTYPE_SQ; 1109 aq->op = NIX_AQ_INSTOP_INIT; 1110 aq->sq.max_sqe_size = sq->max_sqe_sz; 1111 1112 aq->sq.max_sqe_size = sq->max_sqe_sz; 1113 aq->sq.smq = smq; 1114 aq->sq.smq_rr_quantum = rr_quantum; 1115 if (roc_nix_is_sdp(roc_nix)) 1116 aq->sq.default_chan = 1117 nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt); 1118 else 1119 aq->sq.default_chan = nix->tx_chan_base; 1120 aq->sq.sqe_stype = NIX_STYPE_STF; 1121 aq->sq.ena = 1; 1122 aq->sq.sso_ena = !!sq->sso_ena; 1123 aq->sq.cq_ena = !!sq->cq_ena; 1124 aq->sq.cq = sq->cqid; 1125 aq->sq.cq_limit = sq->cq_drop_thresh; 1126 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 1127 aq->sq.sqe_stype = NIX_STYPE_STP; 1128 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 1129 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 1130 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 1131 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 1132 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 1133 1134 /* Many to one reduction */ 1135 aq->sq.qint_idx = sq->qid % nix->qints; 1136 1137 return 0; 1138 } 1139 1140 static int 1141 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) 1142 { 1143 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 1144 struct nix_aq_enq_rsp *rsp; 1145 struct nix_aq_enq_req *aq; 1146 uint16_t sqes_per_sqb; 1147 void *sqb_buf; 1148 int rc, count; 1149 1150 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1151 if (!aq) { 1152 mbox_put(mbox); 1153 return -ENOSPC; 1154 } 1155 1156 aq->qidx = sq->qid; 1157 aq->ctype = NIX_AQ_CTYPE_SQ; 1158 aq->op = NIX_AQ_INSTOP_READ; 1159 rc = mbox_process_msg(mbox, (void *)&rsp); 1160 if (rc) { 1161 mbox_put(mbox); 1162 return rc; 1163 } 1164 1165 /* Check if sq is already cleaned up */ 1166 if (!rsp->sq.ena) { 1167 mbox_put(mbox); 1168 return 0; 1169 } 1170 1171 /* Disable sq */ 1172 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1173 if (!aq) { 1174 mbox_put(mbox); 1175 return -ENOSPC; 1176 } 1177 1178 aq->qidx = sq->qid; 1179 aq->ctype = NIX_AQ_CTYPE_SQ; 1180 aq->op = NIX_AQ_INSTOP_WRITE; 1181 aq->sq_mask.ena = ~aq->sq_mask.ena; 1182 aq->sq.ena = 0; 1183 rc = mbox_process(mbox); 1184 if (rc) { 1185 mbox_put(mbox); 1186 return rc; 1187 } 1188 1189 /* Read SQ and free sqb's */ 1190 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1191 if (!aq) { 1192 mbox_put(mbox); 1193 return -ENOSPC; 1194 } 1195 1196 aq->qidx = sq->qid; 1197 aq->ctype = NIX_AQ_CTYPE_SQ; 1198 aq->op = NIX_AQ_INSTOP_READ; 1199 rc = mbox_process_msg(mbox, (void *)&rsp); 1200 if (rc) { 1201 mbox_put(mbox); 1202 return rc; 1203 } 1204 1205 if (aq->sq.smq_pend) 1206 plt_err("SQ has pending SQE's"); 1207 1208 count = aq->sq.sqb_count; 1209 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1210 /* Free SQB's that are used */ 1211 sqb_buf = (void *)rsp->sq.head_sqb; 1212 while (count) { 1213 void *next_sqb; 1214 1215 next_sqb = *(void **)((uint64_t *)sqb_buf + 1216 (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8)); 1217 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 1218 sqb_buf = next_sqb; 1219 count--; 1220 } 1221 1222 /* Free next to use sqb */ 1223 if (rsp->sq.next_sqb) 1224 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 1225 mbox_put(mbox); 1226 return 0; 1227 } 1228 1229 static int 1230 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, uint16_t smq) 1231 { 1232 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 1233 struct mbox *mbox = (&nix->dev)->mbox; 1234 struct nix_cn10k_aq_enq_req *aq; 1235 1236 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1237 if (!aq) 1238 return -ENOSPC; 1239 1240 aq->qidx = sq->qid; 1241 aq->ctype = NIX_AQ_CTYPE_SQ; 1242 aq->op = NIX_AQ_INSTOP_INIT; 1243 aq->sq.max_sqe_size = sq->max_sqe_sz; 1244 1245 aq->sq.max_sqe_size = sq->max_sqe_sz; 1246 aq->sq.smq = smq; 1247 aq->sq.smq_rr_weight = rr_quantum; 1248 if (roc_nix_is_sdp(roc_nix)) 1249 aq->sq.default_chan = nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt); 1250 else 1251 aq->sq.default_chan = nix->tx_chan_base; 1252 aq->sq.sqe_stype = NIX_STYPE_STF; 1253 aq->sq.ena = 1; 1254 aq->sq.sso_ena = !!sq->sso_ena; 1255 aq->sq.cq_ena = !!sq->cq_ena; 1256 aq->sq.cq = sq->cqid; 1257 aq->sq.cq_limit = sq->cq_drop_thresh; 1258 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 1259 aq->sq.sqe_stype = NIX_STYPE_STP; 1260 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 1261 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 1262 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 1263 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 1264 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 1265 1266 /* Many to one reduction */ 1267 aq->sq.qint_idx = sq->qid % nix->qints; 1268 if (roc_errata_nix_assign_incorrect_qint()) { 1269 /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can 1270 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This 1271 * might result in software missing the interrupt. 1272 */ 1273 aq->sq.qint_idx = 0; 1274 } 1275 return 0; 1276 } 1277 1278 static int 1279 sq_fini(struct nix *nix, struct roc_nix_sq *sq) 1280 { 1281 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 1282 struct nix_cn10k_aq_enq_rsp *rsp; 1283 struct nix_cn10k_aq_enq_req *aq; 1284 uint16_t sqes_per_sqb; 1285 void *sqb_buf; 1286 int rc, count; 1287 1288 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1289 if (!aq) { 1290 mbox_put(mbox); 1291 return -ENOSPC; 1292 } 1293 1294 aq->qidx = sq->qid; 1295 aq->ctype = NIX_AQ_CTYPE_SQ; 1296 aq->op = NIX_AQ_INSTOP_READ; 1297 rc = mbox_process_msg(mbox, (void *)&rsp); 1298 if (rc) { 1299 mbox_put(mbox); 1300 return rc; 1301 } 1302 1303 /* Check if sq is already cleaned up */ 1304 if (!rsp->sq.ena) { 1305 mbox_put(mbox); 1306 return 0; 1307 } 1308 1309 /* Disable sq */ 1310 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1311 if (!aq) { 1312 mbox_put(mbox); 1313 return -ENOSPC; 1314 } 1315 1316 aq->qidx = sq->qid; 1317 aq->ctype = NIX_AQ_CTYPE_SQ; 1318 aq->op = NIX_AQ_INSTOP_WRITE; 1319 aq->sq_mask.ena = ~aq->sq_mask.ena; 1320 aq->sq.ena = 0; 1321 rc = mbox_process(mbox); 1322 if (rc) { 1323 mbox_put(mbox); 1324 return rc; 1325 } 1326 1327 /* Read SQ and free sqb's */ 1328 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1329 if (!aq) { 1330 mbox_put(mbox); 1331 return -ENOSPC; 1332 } 1333 1334 aq->qidx = sq->qid; 1335 aq->ctype = NIX_AQ_CTYPE_SQ; 1336 aq->op = NIX_AQ_INSTOP_READ; 1337 rc = mbox_process_msg(mbox, (void *)&rsp); 1338 if (rc) { 1339 mbox_put(mbox); 1340 return rc; 1341 } 1342 1343 if (aq->sq.smq_pend) 1344 plt_err("SQ has pending SQE's"); 1345 1346 count = aq->sq.sqb_count; 1347 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1348 /* Free SQB's that are used */ 1349 sqb_buf = (void *)rsp->sq.head_sqb; 1350 while (count) { 1351 void *next_sqb; 1352 1353 next_sqb = *(void **)((uint64_t *)sqb_buf + 1354 (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8)); 1355 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 1356 sqb_buf = next_sqb; 1357 count--; 1358 } 1359 1360 /* Free next to use sqb */ 1361 if (rsp->sq.next_sqb) 1362 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 1363 mbox_put(mbox); 1364 return 0; 1365 } 1366 1367 int 1368 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 1369 { 1370 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1371 struct mbox *m_box = (&nix->dev)->mbox; 1372 uint16_t qid, smq = UINT16_MAX; 1373 uint32_t rr_quantum = 0; 1374 struct mbox *mbox; 1375 int rc; 1376 1377 if (sq == NULL) 1378 return NIX_ERR_PARAM; 1379 1380 qid = sq->qid; 1381 if (qid >= nix->nb_tx_queues) 1382 return NIX_ERR_QUEUE_INVALID_RANGE; 1383 1384 sq->roc_nix = roc_nix; 1385 sq->tc = ROC_NIX_PFC_CLASS_INVALID; 1386 /* 1387 * Allocate memory for flow control updates from HW. 1388 * Alloc one cache line, so that fits all FC_STYPE modes. 1389 */ 1390 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN); 1391 if (sq->fc == NULL) { 1392 rc = NIX_ERR_NO_MEM; 1393 goto fail; 1394 } 1395 1396 rc = sqb_pool_populate(roc_nix, sq); 1397 if (rc) 1398 goto nomem; 1399 1400 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq); 1401 if (rc) { 1402 rc = NIX_ERR_TM_LEAF_NODE_GET; 1403 goto nomem; 1404 } 1405 1406 mbox = mbox_get(m_box); 1407 /* Init SQ context */ 1408 if (roc_model_is_cn9k()) 1409 rc = sq_cn9k_init(nix, sq, rr_quantum, smq); 1410 else 1411 rc = sq_init(nix, sq, rr_quantum, smq); 1412 1413 if (rc) { 1414 mbox_put(mbox); 1415 goto nomem; 1416 } 1417 1418 1419 rc = mbox_process(mbox); 1420 if (rc) { 1421 mbox_put(mbox); 1422 goto nomem; 1423 } 1424 mbox_put(mbox); 1425 1426 sq->enable = true; 1427 nix->sqs[qid] = sq; 1428 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0); 1429 /* Evenly distribute LMT slot for each sq */ 1430 if (roc_model_is_cn9k()) { 1431 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */ 1432 sq->lmt_addr = (void *)(nix->lmt_base + 1433 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12)); 1434 } 1435 1436 rc = nix_tel_node_add_sq(sq); 1437 return rc; 1438 nomem: 1439 plt_free(sq->fc); 1440 fail: 1441 return rc; 1442 } 1443 1444 int 1445 roc_nix_sq_fini(struct roc_nix_sq *sq) 1446 { 1447 struct nix *nix; 1448 struct mbox *mbox; 1449 struct ndc_sync_op *ndc_req; 1450 uint16_t qid; 1451 int rc = 0; 1452 1453 if (sq == NULL) 1454 return NIX_ERR_PARAM; 1455 1456 nix = roc_nix_to_nix_priv(sq->roc_nix); 1457 mbox = (&nix->dev)->mbox; 1458 1459 qid = sq->qid; 1460 1461 rc = nix_tm_sq_flush_pre(sq); 1462 1463 /* Release SQ context */ 1464 if (roc_model_is_cn9k()) 1465 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 1466 else 1467 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 1468 1469 /* Sync NDC-NIX-TX for LF */ 1470 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox)); 1471 if (ndc_req == NULL) { 1472 mbox_put(mbox); 1473 return -ENOSPC; 1474 } 1475 ndc_req->nix_lf_tx_sync = 1; 1476 if (mbox_process(mbox)) 1477 rc |= NIX_ERR_NDC_SYNC; 1478 mbox_put(mbox); 1479 1480 rc |= nix_tm_sq_flush_post(sq); 1481 1482 /* Restore limit to max SQB count that the pool was created 1483 * for aura drain to succeed. 1484 */ 1485 roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB); 1486 rc |= roc_npa_pool_destroy(sq->aura_handle); 1487 plt_free(sq->fc); 1488 plt_free(sq->sqe_mem); 1489 nix->sqs[qid] = NULL; 1490 1491 return rc; 1492 } 1493 1494 void 1495 roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, 1496 uint32_t *tail) 1497 { 1498 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1499 uint64_t reg, val; 1500 int64_t *addr; 1501 1502 if (head == NULL || tail == NULL) 1503 return; 1504 1505 reg = (((uint64_t)qid) << 32); 1506 addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 1507 val = roc_atomic64_add_nosync(reg, addr); 1508 if (val & 1509 (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))) 1510 val = 0; 1511 1512 *tail = (uint32_t)(val & 0xFFFFF); 1513 *head = (uint32_t)((val >> 20) & 0xFFFFF); 1514 } 1515 1516 void 1517 roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, 1518 uint32_t *tail) 1519 { 1520 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1521 struct roc_nix_sq *sq = nix->sqs[qid]; 1522 uint16_t sqes_per_sqb, sqb_cnt; 1523 uint64_t reg, val; 1524 int64_t *addr; 1525 1526 if (head == NULL || tail == NULL) 1527 return; 1528 1529 reg = (((uint64_t)qid) << 32); 1530 addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS); 1531 val = roc_atomic64_add_nosync(reg, addr); 1532 if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) { 1533 val = 0; 1534 return; 1535 } 1536 1537 *tail = (uint32_t)((val >> 28) & 0x3F); 1538 *head = (uint32_t)((val >> 20) & 0x3F); 1539 sqb_cnt = (uint16_t)(val & 0xFFFF); 1540 1541 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1542 1543 /* Update tail index as per used sqb count */ 1544 *tail += (sqes_per_sqb * (sqb_cnt - 1)); 1545 } 1546 1547 int 1548 roc_nix_q_err_cb_register(struct roc_nix *roc_nix, q_err_get_t sq_err_handle) 1549 { 1550 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1551 struct dev *dev = &nix->dev; 1552 1553 if (sq_err_handle == NULL) 1554 return NIX_ERR_PARAM; 1555 1556 dev->ops->q_err_cb = (q_err_cb_t)sq_err_handle; 1557 return 0; 1558 } 1559 1560 void 1561 roc_nix_q_err_cb_unregister(struct roc_nix *roc_nix) 1562 { 1563 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1564 struct dev *dev = &nix->dev; 1565 1566 dev->ops->q_err_cb = NULL; 1567 } 1568