1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <math.h> 6 7 #include "roc_api.h" 8 #include "roc_priv.h" 9 10 /* Default SQB slack per SQ */ 11 #define ROC_NIX_SQB_SLACK_DFLT 24 12 13 static inline uint32_t 14 nix_qsize_to_val(enum nix_q_size qsize) 15 { 16 return (16UL << (qsize * 2)); 17 } 18 19 static inline enum nix_q_size 20 nix_qsize_clampup(uint32_t val) 21 { 22 int i = nix_q_size_16; 23 24 for (; i < nix_q_size_max; i++) 25 if (val <= nix_qsize_to_val(i)) 26 break; 27 28 if (i >= nix_q_size_max) 29 i = nix_q_size_max - 1; 30 31 return i; 32 } 33 34 void 35 nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval) 36 { 37 uint64_t wait_ns; 38 39 if (!roc_model_is_cn10k()) 40 return; 41 /* Due to HW errata writes to VWQE_FLUSH might hang, so instead 42 * wait for max vwqe timeout interval. 43 */ 44 if (rq->vwqe_ena) { 45 wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100; 46 plt_delay_us((wait_ns / 1E3) + 1); 47 } 48 } 49 50 int 51 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) 52 { 53 struct mbox *mbox = mbox_get(dev->mbox); 54 int rc; 55 56 /* Pkts will be dropped silently if RQ is disabled */ 57 if (roc_model_is_cn9k()) { 58 struct nix_aq_enq_req *aq; 59 60 aq = mbox_alloc_msg_nix_aq_enq(mbox); 61 if (!aq) { 62 rc = -ENOSPC; 63 goto exit; 64 } 65 66 aq->qidx = rq->qid; 67 aq->ctype = NIX_AQ_CTYPE_RQ; 68 aq->op = NIX_AQ_INSTOP_WRITE; 69 70 aq->rq.ena = enable; 71 aq->rq_mask.ena = ~(aq->rq_mask.ena); 72 } else if (roc_model_is_cn10k()) { 73 struct nix_cn10k_aq_enq_req *aq; 74 75 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 76 if (!aq) { 77 rc = -ENOSPC; 78 goto exit; 79 } 80 81 aq->qidx = rq->qid; 82 aq->ctype = NIX_AQ_CTYPE_RQ; 83 aq->op = NIX_AQ_INSTOP_WRITE; 84 85 aq->rq.ena = enable; 86 aq->rq_mask.ena = ~(aq->rq_mask.ena); 87 } else { 88 struct nix_cn20k_aq_enq_req *aq; 89 90 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 91 if (!aq) { 92 rc = -ENOSPC; 93 goto exit; 94 } 95 96 aq->qidx = rq->qid; 97 aq->ctype = NIX_AQ_CTYPE_RQ; 98 aq->op = NIX_AQ_INSTOP_WRITE; 99 100 aq->rq.ena = enable; 101 aq->rq_mask.ena = ~(aq->rq_mask.ena); 102 } 103 104 rc = mbox_process(mbox); 105 exit: 106 mbox_put(mbox); 107 return rc; 108 } 109 110 int 111 roc_nix_sq_ena_dis(struct roc_nix_sq *sq, bool enable) 112 { 113 int rc = 0; 114 115 rc = roc_nix_tm_sq_aura_fc(sq, enable); 116 if (rc) 117 goto done; 118 119 sq->enable = enable; 120 done: 121 return rc; 122 } 123 124 int 125 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) 126 { 127 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); 128 int rc; 129 130 rc = nix_rq_ena_dis(&nix->dev, rq, enable); 131 nix_rq_vwqe_flush(rq, nix->vwqe_interval); 132 if (rc) 133 return rc; 134 135 /* Check for meta aura if RQ is enabled */ 136 if (enable && nix->need_meta_aura) 137 rc = roc_nix_inl_meta_aura_check(rq->roc_nix, rq); 138 return rc; 139 } 140 141 int 142 roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid) 143 { 144 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 145 struct dev *dev = &nix->dev; 146 struct mbox *mbox = mbox_get(dev->mbox); 147 bool sso_enable; 148 int rc; 149 150 if (roc_model_is_cn9k()) { 151 struct nix_aq_enq_rsp *rsp; 152 struct nix_aq_enq_req *aq; 153 154 aq = mbox_alloc_msg_nix_aq_enq(mbox); 155 if (!aq) { 156 rc = -ENOSPC; 157 goto exit; 158 } 159 160 aq->qidx = qid; 161 aq->ctype = NIX_AQ_CTYPE_RQ; 162 aq->op = NIX_AQ_INSTOP_READ; 163 rc = mbox_process_msg(mbox, (void *)&rsp); 164 if (rc) 165 goto exit; 166 167 sso_enable = rsp->rq.sso_ena; 168 } else if (roc_model_is_cn10k()) { 169 struct nix_cn10k_aq_enq_rsp *rsp; 170 struct nix_cn10k_aq_enq_req *aq; 171 172 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 173 if (!aq) { 174 rc = -ENOSPC; 175 goto exit; 176 } 177 178 aq->qidx = qid; 179 aq->ctype = NIX_AQ_CTYPE_RQ; 180 aq->op = NIX_AQ_INSTOP_READ; 181 182 rc = mbox_process_msg(mbox, (void *)&rsp); 183 if (rc) 184 goto exit; 185 186 sso_enable = rsp->rq.sso_ena; 187 } else { 188 struct nix_cn20k_aq_enq_rsp *rsp; 189 struct nix_cn20k_aq_enq_req *aq; 190 191 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 192 if (!aq) { 193 rc = -ENOSPC; 194 goto exit; 195 } 196 197 aq->qidx = qid; 198 aq->ctype = NIX_AQ_CTYPE_RQ; 199 aq->op = NIX_AQ_INSTOP_READ; 200 201 rc = mbox_process_msg(mbox, (void *)&rsp); 202 if (rc) 203 goto exit; 204 205 sso_enable = rsp->rq.sso_ena; 206 } 207 208 rc = sso_enable ? true : false; 209 exit: 210 mbox_put(mbox); 211 return rc; 212 } 213 214 static int 215 nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set) 216 { 217 struct roc_nix *roc_nix = rq->roc_nix; 218 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 219 bool inl_inb_ena = roc_nix_inl_inb_is_enabled(roc_nix); 220 uint64_t lpb_aura = 0, vwqe_aura = 0, spb_aura = 0; 221 struct mbox *mbox = nix->dev.mbox; 222 uint64_t aura_base; 223 int rc, count; 224 225 count = set ? 1 : -1; 226 /* For buf type set, use info from RQ context */ 227 if (set) { 228 lpb_aura = rq->aura_handle; 229 spb_aura = rq->spb_ena ? rq->spb_aura_handle : 0; 230 vwqe_aura = rq->vwqe_ena ? rq->vwqe_aura_handle : 0; 231 goto skip_ctx_read; 232 } 233 234 aura_base = roc_npa_aura_handle_to_base(rq->aura_handle); 235 if (roc_model_is_cn9k()) { 236 struct nix_aq_enq_rsp *rsp; 237 struct nix_aq_enq_req *aq; 238 239 aq = mbox_alloc_msg_nix_aq_enq(mbox_get(mbox)); 240 if (!aq) { 241 mbox_put(mbox); 242 return -ENOSPC; 243 } 244 245 aq->qidx = rq->qid; 246 aq->ctype = NIX_AQ_CTYPE_RQ; 247 aq->op = NIX_AQ_INSTOP_READ; 248 rc = mbox_process_msg(mbox, (void *)&rsp); 249 if (rc) { 250 mbox_put(mbox); 251 return rc; 252 } 253 254 /* Get aura handle from aura */ 255 lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); 256 if (rsp->rq.spb_ena) 257 spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); 258 mbox_put(mbox); 259 } else if (roc_model_is_cn10k()) { 260 struct nix_cn10k_aq_enq_rsp *rsp; 261 struct nix_cn10k_aq_enq_req *aq; 262 263 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox_get(mbox)); 264 if (!aq) { 265 mbox_put(mbox); 266 return -ENOSPC; 267 } 268 269 aq->qidx = rq->qid; 270 aq->ctype = NIX_AQ_CTYPE_RQ; 271 aq->op = NIX_AQ_INSTOP_READ; 272 273 rc = mbox_process_msg(mbox, (void *)&rsp); 274 if (rc) { 275 mbox_put(mbox); 276 return rc; 277 } 278 279 /* Get aura handle from aura */ 280 lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); 281 if (rsp->rq.spb_ena) 282 spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); 283 if (rsp->rq.vwqe_ena) 284 vwqe_aura = roc_npa_aura_handle_gen(rsp->rq.wqe_aura, aura_base); 285 286 mbox_put(mbox); 287 } else { 288 struct nix_cn20k_aq_enq_rsp *rsp; 289 struct nix_cn20k_aq_enq_req *aq; 290 291 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox_get(mbox)); 292 if (!aq) { 293 mbox_put(mbox); 294 return -ENOSPC; 295 } 296 297 aq->qidx = rq->qid; 298 aq->ctype = NIX_AQ_CTYPE_RQ; 299 aq->op = NIX_AQ_INSTOP_READ; 300 301 rc = mbox_process_msg(mbox, (void *)&rsp); 302 if (rc) { 303 mbox_put(mbox); 304 return rc; 305 } 306 307 /* Get aura handle from aura */ 308 lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); 309 if (rsp->rq.spb_ena) 310 spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); 311 312 mbox_put(mbox); 313 } 314 315 skip_ctx_read: 316 /* Update attributes for LPB aura */ 317 if (inl_inb_ena) 318 roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count); 319 else 320 roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET, count); 321 322 /* Update attributes for SPB aura */ 323 if (spb_aura) { 324 if (inl_inb_ena) 325 roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count); 326 else 327 roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET, count); 328 } 329 330 /* Update attributes for VWQE aura */ 331 if (vwqe_aura) { 332 if (inl_inb_ena) 333 roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE_IPSEC, count); 334 else 335 roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE, count); 336 } 337 338 return 0; 339 } 340 341 static int 342 nix_rq_cn9k_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) 343 { 344 struct mbox *mbox = mbox_get(dev->mbox); 345 struct nix_aq_enq_req *aq; 346 int rc; 347 348 aq = mbox_alloc_msg_nix_aq_enq(mbox); 349 if (!aq) { 350 rc = -ENOSPC; 351 goto exit; 352 } 353 354 aq->qidx = rq->qid; 355 aq->ctype = NIX_AQ_CTYPE_RQ; 356 aq->op = NIX_AQ_INSTOP_WRITE; 357 358 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 359 aq->rq.lpb_pool_pass = rq->red_pass; 360 aq->rq.lpb_pool_drop = rq->red_drop; 361 aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); 362 aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); 363 364 } 365 366 if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { 367 aq->rq.spb_pool_pass = rq->spb_red_pass; 368 aq->rq.spb_pool_drop = rq->spb_red_drop; 369 aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); 370 aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); 371 372 } 373 374 if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { 375 aq->rq.xqe_pass = rq->xqe_red_pass; 376 aq->rq.xqe_drop = rq->xqe_red_drop; 377 aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); 378 aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); 379 } 380 381 rc = mbox_process(mbox); 382 exit: 383 mbox_put(mbox); 384 return rc; 385 } 386 387 static int 388 nix_rq_cn10k_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) 389 { 390 struct nix_cn10k_aq_enq_req *aq; 391 struct mbox *mbox = mbox_get(dev->mbox); 392 int rc; 393 394 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 395 if (!aq) { 396 rc = -ENOSPC; 397 goto exit; 398 } 399 400 aq->qidx = rq->qid; 401 aq->ctype = NIX_AQ_CTYPE_RQ; 402 aq->op = NIX_AQ_INSTOP_WRITE; 403 404 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 405 aq->rq.lpb_pool_pass = rq->red_pass; 406 aq->rq.lpb_pool_drop = rq->red_drop; 407 aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); 408 aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); 409 } 410 411 if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { 412 aq->rq.spb_pool_pass = rq->spb_red_pass; 413 aq->rq.spb_pool_drop = rq->spb_red_drop; 414 aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); 415 aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); 416 } 417 418 if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { 419 aq->rq.xqe_pass = rq->xqe_red_pass; 420 aq->rq.xqe_drop = rq->xqe_red_drop; 421 aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); 422 aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); 423 } 424 425 rc = mbox_process(mbox); 426 exit: 427 mbox_put(mbox); 428 return rc; 429 } 430 431 static int 432 nix_rq_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) 433 { 434 struct nix_cn20k_aq_enq_req *aq; 435 struct mbox *mbox = mbox_get(dev->mbox); 436 int rc; 437 438 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 439 if (!aq) { 440 rc = -ENOSPC; 441 goto exit; 442 } 443 444 aq->qidx = rq->qid; 445 aq->ctype = NIX_AQ_CTYPE_RQ; 446 aq->op = NIX_AQ_INSTOP_WRITE; 447 448 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 449 aq->rq.lpb_pool_pass = rq->red_pass; 450 aq->rq.lpb_pool_drop = rq->red_drop; 451 aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); 452 aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); 453 } 454 455 if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { 456 aq->rq.spb_pool_pass = rq->spb_red_pass; 457 aq->rq.spb_pool_drop = rq->spb_red_drop; 458 aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); 459 aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); 460 } 461 462 if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { 463 aq->rq.xqe_pass = rq->xqe_red_pass; 464 aq->rq.xqe_drop = rq->xqe_red_drop; 465 aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); 466 aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); 467 } 468 469 rc = mbox_process(mbox); 470 exit: 471 mbox_put(mbox); 472 return rc; 473 } 474 475 int 476 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, 477 bool cfg, bool ena) 478 { 479 struct mbox *mbox = dev->mbox; 480 struct nix_aq_enq_req *aq; 481 482 aq = mbox_alloc_msg_nix_aq_enq(mbox); 483 if (!aq) 484 return -ENOSPC; 485 486 aq->qidx = rq->qid; 487 aq->ctype = NIX_AQ_CTYPE_RQ; 488 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 489 490 if (rq->sso_ena) { 491 /* SSO mode */ 492 aq->rq.sso_ena = 1; 493 aq->rq.sso_tt = rq->tt; 494 aq->rq.sso_grp = rq->hwgrp; 495 aq->rq.ena_wqwd = 1; 496 aq->rq.wqe_skip = rq->wqe_skip; 497 aq->rq.wqe_caching = 1; 498 499 aq->rq.good_utag = rq->tag_mask >> 24; 500 aq->rq.bad_utag = rq->tag_mask >> 24; 501 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 502 } else { 503 /* CQ mode */ 504 aq->rq.sso_ena = 0; 505 aq->rq.good_utag = rq->tag_mask >> 24; 506 aq->rq.bad_utag = rq->tag_mask >> 24; 507 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 508 aq->rq.cq = rq->cqid; 509 } 510 511 if (rq->ipsech_ena) 512 aq->rq.ipsech_ena = 1; 513 514 aq->rq.spb_ena = 0; 515 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 516 517 /* Sizes must be aligned to 8 bytes */ 518 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 519 return -EINVAL; 520 521 /* Expressed in number of dwords */ 522 aq->rq.first_skip = rq->first_skip / 8; 523 aq->rq.later_skip = rq->later_skip / 8; 524 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 525 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 526 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 527 aq->rq.ena = ena; 528 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 529 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 530 aq->rq.rq_int_ena = 0; 531 /* Many to one reduction */ 532 aq->rq.qint_idx = rq->qid % qints; 533 aq->rq.xqe_drop_ena = 1; 534 535 /* If RED enabled, then fill enable for all cases */ 536 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 537 aq->rq.spb_pool_pass = rq->spb_red_pass; 538 aq->rq.lpb_pool_pass = rq->red_pass; 539 540 aq->rq.spb_pool_drop = rq->spb_red_drop; 541 aq->rq.lpb_pool_drop = rq->red_drop; 542 } 543 544 if (cfg) { 545 if (rq->sso_ena) { 546 /* SSO mode */ 547 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 548 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 549 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 550 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 551 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 552 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 553 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 554 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 555 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 556 } else { 557 /* CQ mode */ 558 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 559 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 560 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 561 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 562 aq->rq_mask.cq = ~aq->rq_mask.cq; 563 } 564 565 if (rq->ipsech_ena) 566 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 567 568 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 569 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 570 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 571 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 572 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 573 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 574 aq->rq_mask.ena = ~aq->rq_mask.ena; 575 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 576 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 577 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 578 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 579 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 580 581 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 582 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 583 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 584 585 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 586 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 587 } 588 } 589 590 return 0; 591 } 592 593 int 594 nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, bool ena) 595 { 596 struct nix_cn10k_aq_enq_req *aq; 597 struct mbox *mbox = dev->mbox; 598 599 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 600 if (!aq) 601 return -ENOSPC; 602 603 aq->qidx = rq->qid; 604 aq->ctype = NIX_AQ_CTYPE_RQ; 605 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 606 607 if (rq->sso_ena) { 608 /* SSO mode */ 609 aq->rq.sso_ena = 1; 610 aq->rq.sso_tt = rq->tt; 611 aq->rq.sso_grp = rq->hwgrp; 612 aq->rq.ena_wqwd = 1; 613 aq->rq.wqe_skip = rq->wqe_skip; 614 aq->rq.wqe_caching = 1; 615 616 aq->rq.good_utag = rq->tag_mask >> 24; 617 aq->rq.bad_utag = rq->tag_mask >> 24; 618 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 619 620 if (rq->vwqe_ena) { 621 aq->rq.vwqe_ena = true; 622 aq->rq.vwqe_skip = rq->vwqe_first_skip; 623 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */ 624 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2; 625 aq->rq.vtime_wait = rq->vwqe_wait_tmo; 626 aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle); 627 } 628 } else { 629 /* CQ mode */ 630 aq->rq.sso_ena = 0; 631 aq->rq.good_utag = rq->tag_mask >> 24; 632 aq->rq.bad_utag = rq->tag_mask >> 24; 633 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 634 aq->rq.cq = rq->cqid; 635 } 636 637 if (rq->ipsech_ena) { 638 aq->rq.ipsech_ena = 1; 639 aq->rq.ipsecd_drop_en = 1; 640 aq->rq.ena_wqwd = 1; 641 aq->rq.wqe_skip = rq->wqe_skip; 642 aq->rq.wqe_caching = 1; 643 } 644 645 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 646 647 /* Sizes must be aligned to 8 bytes */ 648 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 649 return -EINVAL; 650 651 /* Expressed in number of dwords */ 652 aq->rq.first_skip = rq->first_skip / 8; 653 aq->rq.later_skip = rq->later_skip / 8; 654 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 655 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 656 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 657 aq->rq.ena = ena; 658 659 if (rq->spb_ena) { 660 uint32_t spb_sizem1; 661 662 aq->rq.spb_ena = 1; 663 aq->rq.spb_aura = 664 roc_npa_aura_handle_to_aura(rq->spb_aura_handle); 665 666 if (rq->spb_size & 0x7 || 667 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE) 668 return -EINVAL; 669 670 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */ 671 spb_sizem1 -= 1; /* Expressed in size minus one */ 672 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F; 673 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7; 674 } else { 675 aq->rq.spb_ena = 0; 676 } 677 678 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 679 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 680 aq->rq.rq_int_ena = 0; 681 /* Many to one reduction */ 682 aq->rq.qint_idx = rq->qid % qints; 683 aq->rq.xqe_drop_ena = 0; 684 aq->rq.lpb_drop_ena = rq->lpb_drop_ena; 685 aq->rq.spb_drop_ena = rq->spb_drop_ena; 686 687 /* If RED enabled, then fill enable for all cases */ 688 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 689 aq->rq.spb_pool_pass = rq->spb_red_pass; 690 aq->rq.lpb_pool_pass = rq->red_pass; 691 aq->rq.wqe_pool_pass = rq->red_pass; 692 aq->rq.xqe_pass = rq->red_pass; 693 694 aq->rq.spb_pool_drop = rq->spb_red_drop; 695 aq->rq.lpb_pool_drop = rq->red_drop; 696 aq->rq.wqe_pool_drop = rq->red_drop; 697 aq->rq.xqe_drop = rq->red_drop; 698 } 699 700 if (cfg) { 701 if (rq->sso_ena) { 702 /* SSO mode */ 703 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 704 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 705 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 706 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 707 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 708 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 709 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 710 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 711 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 712 if (rq->vwqe_ena) { 713 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena; 714 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip; 715 aq->rq_mask.max_vsize_exp = 716 ~aq->rq_mask.max_vsize_exp; 717 aq->rq_mask.vtime_wait = 718 ~aq->rq_mask.vtime_wait; 719 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura; 720 } 721 } else { 722 /* CQ mode */ 723 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 724 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 725 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 726 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 727 aq->rq_mask.cq = ~aq->rq_mask.cq; 728 } 729 730 if (rq->ipsech_ena) 731 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 732 733 if (rq->spb_ena) { 734 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura; 735 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1; 736 aq->rq_mask.spb_high_sizem1 = 737 ~aq->rq_mask.spb_high_sizem1; 738 } 739 740 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 741 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 742 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 743 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 744 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 745 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 746 aq->rq_mask.ena = ~aq->rq_mask.ena; 747 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 748 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 749 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 750 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 751 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 752 aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena; 753 aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena; 754 755 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 756 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 757 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 758 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass; 759 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass; 760 761 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 762 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 763 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop; 764 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop; 765 } 766 } 767 768 return 0; 769 } 770 771 int 772 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, bool ena) 773 { 774 struct nix_cn20k_aq_enq_req *aq; 775 struct mbox *mbox = dev->mbox; 776 777 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 778 if (!aq) 779 return -ENOSPC; 780 781 aq->qidx = rq->qid; 782 aq->ctype = NIX_AQ_CTYPE_RQ; 783 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 784 785 if (rq->sso_ena) { 786 /* SSO mode */ 787 aq->rq.sso_ena = 1; 788 aq->rq.sso_tt = rq->tt; 789 aq->rq.sso_grp = rq->hwgrp; 790 aq->rq.ena_wqwd = 1; 791 aq->rq.wqe_skip = rq->wqe_skip; 792 aq->rq.wqe_caching = 1; 793 794 aq->rq.good_utag = rq->tag_mask >> 24; 795 aq->rq.bad_utag = rq->tag_mask >> 24; 796 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 797 } else { 798 /* CQ mode */ 799 aq->rq.sso_ena = 0; 800 aq->rq.good_utag = rq->tag_mask >> 24; 801 aq->rq.bad_utag = rq->tag_mask >> 24; 802 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 803 aq->rq.cq = rq->cqid; 804 } 805 806 if (rq->ipsech_ena) { 807 aq->rq.ipsech_ena = 1; 808 aq->rq.ipsecd_drop_en = 1; 809 aq->rq.ena_wqwd = 1; 810 aq->rq.wqe_skip = rq->wqe_skip; 811 aq->rq.wqe_caching = 1; 812 } 813 814 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 815 816 /* Sizes must be aligned to 8 bytes */ 817 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 818 return -EINVAL; 819 820 /* Expressed in number of dwords */ 821 aq->rq.first_skip = rq->first_skip / 8; 822 aq->rq.later_skip = rq->later_skip / 8; 823 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 824 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 825 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 826 aq->rq.ena = ena; 827 828 if (rq->spb_ena) { 829 uint32_t spb_sizem1; 830 831 aq->rq.spb_ena = 1; 832 aq->rq.spb_aura = 833 roc_npa_aura_handle_to_aura(rq->spb_aura_handle); 834 835 if (rq->spb_size & 0x7 || 836 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE) 837 return -EINVAL; 838 839 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */ 840 spb_sizem1 -= 1; /* Expressed in size minus one */ 841 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F; 842 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7; 843 } else { 844 aq->rq.spb_ena = 0; 845 } 846 847 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 848 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 849 aq->rq.rq_int_ena = 0; 850 /* Many to one reduction */ 851 aq->rq.qint_idx = rq->qid % qints; 852 aq->rq.xqe_drop_ena = 0; 853 aq->rq.lpb_drop_ena = rq->lpb_drop_ena; 854 aq->rq.spb_drop_ena = rq->spb_drop_ena; 855 856 /* If RED enabled, then fill enable for all cases */ 857 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 858 aq->rq.spb_pool_pass = rq->spb_red_pass; 859 aq->rq.lpb_pool_pass = rq->red_pass; 860 aq->rq.wqe_pool_pass = rq->red_pass; 861 aq->rq.xqe_pass = rq->red_pass; 862 863 aq->rq.spb_pool_drop = rq->spb_red_drop; 864 aq->rq.lpb_pool_drop = rq->red_drop; 865 aq->rq.wqe_pool_drop = rq->red_drop; 866 aq->rq.xqe_drop = rq->red_drop; 867 } 868 869 if (cfg) { 870 if (rq->sso_ena) { 871 /* SSO mode */ 872 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 873 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 874 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 875 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 876 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 877 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 878 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 879 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 880 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 881 } else { 882 /* CQ mode */ 883 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 884 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 885 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 886 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 887 aq->rq_mask.cq = ~aq->rq_mask.cq; 888 } 889 890 if (rq->ipsech_ena) 891 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 892 893 if (rq->spb_ena) { 894 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura; 895 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1; 896 aq->rq_mask.spb_high_sizem1 = 897 ~aq->rq_mask.spb_high_sizem1; 898 } 899 900 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 901 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 902 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 903 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 904 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 905 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 906 aq->rq_mask.ena = ~aq->rq_mask.ena; 907 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 908 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 909 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 910 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 911 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 912 aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena; 913 aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena; 914 915 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 916 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 917 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 918 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass; 919 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass; 920 921 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 922 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 923 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop; 924 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop; 925 } 926 } 927 928 return 0; 929 } 930 931 int 932 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 933 { 934 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 935 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 936 bool is_cn9k = roc_model_is_cn9k(); 937 struct dev *dev = &nix->dev; 938 int rc; 939 940 if (roc_nix == NULL || rq == NULL) { 941 mbox_put(mbox); 942 return NIX_ERR_PARAM; 943 } 944 945 if (rq->qid >= nix->nb_rx_queues) { 946 mbox_put(mbox); 947 return NIX_ERR_QUEUE_INVALID_RANGE; 948 } 949 950 rq->roc_nix = roc_nix; 951 rq->tc = ROC_NIX_PFC_CLASS_INVALID; 952 953 if (is_cn9k) 954 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena); 955 else if (roc_model_is_cn10k()) 956 rc = nix_rq_cn10k_cfg(dev, rq, nix->qints, false, ena); 957 else 958 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena); 959 960 if (rc) { 961 mbox_put(mbox); 962 return rc; 963 } 964 965 rc = mbox_process(mbox); 966 if (rc) { 967 mbox_put(mbox); 968 return rc; 969 } 970 mbox_put(mbox); 971 972 /* Update aura buf type to indicate its use */ 973 nix_rq_aura_buf_type_update(rq, true); 974 975 /* Check for meta aura if RQ is enabled */ 976 if (ena && nix->need_meta_aura) { 977 rc = roc_nix_inl_meta_aura_check(roc_nix, rq); 978 if (rc) 979 return rc; 980 } 981 982 nix->rqs[rq->qid] = rq; 983 return nix_tel_node_add_rq(rq); 984 } 985 986 int 987 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 988 { 989 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 990 struct mbox *m_box = (&nix->dev)->mbox; 991 bool is_cn9k = roc_model_is_cn9k(); 992 struct dev *dev = &nix->dev; 993 struct mbox *mbox; 994 int rc; 995 996 if (roc_nix == NULL || rq == NULL) 997 return NIX_ERR_PARAM; 998 999 if (rq->qid >= nix->nb_rx_queues) 1000 return NIX_ERR_QUEUE_INVALID_RANGE; 1001 1002 /* Clear attributes for existing aura's */ 1003 nix_rq_aura_buf_type_update(rq, false); 1004 1005 rq->roc_nix = roc_nix; 1006 rq->tc = ROC_NIX_PFC_CLASS_INVALID; 1007 1008 mbox = mbox_get(m_box); 1009 if (is_cn9k) 1010 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena); 1011 else if (roc_model_is_cn10k()) 1012 rc = nix_rq_cn10k_cfg(dev, rq, nix->qints, true, ena); 1013 else 1014 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena); 1015 1016 if (rc) { 1017 mbox_put(mbox); 1018 return rc; 1019 } 1020 1021 rc = mbox_process(mbox); 1022 if (rc) { 1023 mbox_put(mbox); 1024 return rc; 1025 } 1026 mbox_put(mbox); 1027 1028 /* Update aura attribute to indicate its use */ 1029 nix_rq_aura_buf_type_update(rq, true); 1030 1031 /* Check for meta aura if RQ is enabled */ 1032 if (ena && nix->need_meta_aura) { 1033 rc = roc_nix_inl_meta_aura_check(roc_nix, rq); 1034 if (rc) 1035 return rc; 1036 } 1037 1038 return nix_tel_node_add_rq(rq); 1039 } 1040 1041 int 1042 roc_nix_rq_cman_config(struct roc_nix *roc_nix, struct roc_nix_rq *rq) 1043 { 1044 bool is_cn9k = roc_model_is_cn9k(); 1045 struct nix *nix; 1046 struct dev *dev; 1047 int rc; 1048 1049 if (roc_nix == NULL || rq == NULL) 1050 return NIX_ERR_PARAM; 1051 1052 nix = roc_nix_to_nix_priv(roc_nix); 1053 1054 if (rq->qid >= nix->nb_rx_queues) 1055 return NIX_ERR_QUEUE_INVALID_RANGE; 1056 1057 dev = &nix->dev; 1058 1059 if (is_cn9k) 1060 rc = nix_rq_cn9k_cman_cfg(dev, rq); 1061 else if (roc_model_is_cn10k()) 1062 rc = nix_rq_cn10k_cman_cfg(dev, rq); 1063 else 1064 rc = nix_rq_cman_cfg(dev, rq); 1065 1066 return rc; 1067 } 1068 1069 int 1070 roc_nix_rq_fini(struct roc_nix_rq *rq) 1071 { 1072 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); 1073 int rc; 1074 1075 /* Disabling RQ is sufficient */ 1076 rc = roc_nix_rq_ena_dis(rq, false); 1077 if (rc) 1078 return rc; 1079 1080 /* Update aura attribute to indicate its use for */ 1081 nix_rq_aura_buf_type_update(rq, false); 1082 1083 nix->rqs[rq->qid] = NULL; 1084 return 0; 1085 } 1086 1087 static inline int 1088 roc_nix_cn20k_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) 1089 { 1090 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1091 struct mbox *mbox = (&nix->dev)->mbox; 1092 volatile struct nix_cn20k_cq_ctx_s *cq_ctx; 1093 uint16_t drop_thresh = NIX_CQ_THRESH_LEVEL; 1094 uint16_t cpt_lbpid = nix->cpt_lbpid; 1095 struct nix_cn20k_aq_enq_req *aq; 1096 enum nix_q_size qsize; 1097 size_t desc_sz; 1098 int rc; 1099 1100 if (cq == NULL) 1101 return NIX_ERR_PARAM; 1102 1103 qsize = nix_qsize_clampup(cq->nb_desc); 1104 cq->nb_desc = nix_qsize_to_val(qsize); 1105 cq->qmask = cq->nb_desc - 1; 1106 cq->door = nix->base + NIX_LF_CQ_OP_DOOR; 1107 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 1108 cq->wdata = (uint64_t)cq->qid << 32; 1109 cq->roc_nix = roc_nix; 1110 1111 /* CQE of W16 */ 1112 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ; 1113 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN); 1114 if (cq->desc_base == NULL) { 1115 rc = NIX_ERR_NO_MEM; 1116 goto fail; 1117 } 1118 1119 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox_get(mbox)); 1120 if (!aq) { 1121 mbox_put(mbox); 1122 return -ENOSPC; 1123 } 1124 1125 aq->qidx = cq->qid; 1126 aq->ctype = NIX_AQ_CTYPE_CQ; 1127 aq->op = NIX_AQ_INSTOP_INIT; 1128 cq_ctx = &aq->cq; 1129 1130 cq_ctx->ena = 1; 1131 cq_ctx->caching = 1; 1132 cq_ctx->qsize = qsize; 1133 cq_ctx->base = (uint64_t)cq->desc_base; 1134 cq_ctx->avg_level = 0xff; 1135 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); 1136 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); 1137 if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(roc_nix)) { 1138 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_CPT_DROP); 1139 cq_ctx->cpt_drop_err_en = 1; 1140 /* Enable Late BP only when non zero CPT BPID */ 1141 if (cpt_lbpid) { 1142 cq_ctx->lbp_ena = 1; 1143 cq_ctx->lbpid_low = cpt_lbpid & 0x7; 1144 cq_ctx->lbpid_med = (cpt_lbpid >> 3) & 0x7; 1145 cq_ctx->lbpid_high = (cpt_lbpid >> 6) & 0x7; 1146 cq_ctx->lbp_frac = NIX_CQ_LPB_THRESH_FRAC; 1147 } 1148 drop_thresh = NIX_CQ_SEC_THRESH_LEVEL; 1149 } 1150 1151 /* Many to one reduction */ 1152 cq_ctx->qint_idx = cq->qid % nix->qints; 1153 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ 1154 cq_ctx->cint_idx = cq->qid; 1155 1156 if (roc_errata_nix_has_cq_min_size_4k()) { 1157 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; 1158 uint16_t min_rx_drop; 1159 1160 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc); 1161 cq_ctx->drop = min_rx_drop; 1162 cq_ctx->drop_ena = 1; 1163 cq->drop_thresh = min_rx_drop; 1164 } else { 1165 cq->drop_thresh = drop_thresh; 1166 /* Drop processing or red drop cannot be enabled due to 1167 * due to packets coming for second pass from CPT. 1168 */ 1169 if (!roc_nix_inl_inb_is_enabled(roc_nix)) { 1170 cq_ctx->drop = cq->drop_thresh; 1171 cq_ctx->drop_ena = 1; 1172 } 1173 } 1174 cq_ctx->bp = cq->drop_thresh; 1175 1176 if (roc_feature_nix_has_cqe_stash()) { 1177 if (cq_ctx->caching) { 1178 cq_ctx->stashing = 1; 1179 cq_ctx->stash_thresh = cq->stash_thresh; 1180 } 1181 } 1182 1183 rc = mbox_process(mbox); 1184 mbox_put(mbox); 1185 if (rc) 1186 goto free_mem; 1187 1188 return nix_tel_node_add_cq(cq); 1189 1190 free_mem: 1191 plt_free(cq->desc_base); 1192 fail: 1193 return rc; 1194 } 1195 1196 int 1197 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) 1198 { 1199 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1200 struct mbox *mbox = (&nix->dev)->mbox; 1201 volatile struct nix_cq_ctx_s *cq_ctx = NULL; 1202 uint16_t drop_thresh = NIX_CQ_THRESH_LEVEL; 1203 uint16_t cpt_lbpid = nix->cpt_lbpid; 1204 enum nix_q_size qsize; 1205 size_t desc_sz; 1206 int rc; 1207 1208 if (cq == NULL) 1209 return NIX_ERR_PARAM; 1210 1211 if (roc_model_is_cn20k()) 1212 return roc_nix_cn20k_cq_init(roc_nix, cq); 1213 1214 qsize = nix_qsize_clampup(cq->nb_desc); 1215 cq->nb_desc = nix_qsize_to_val(qsize); 1216 cq->qmask = cq->nb_desc - 1; 1217 cq->door = nix->base + NIX_LF_CQ_OP_DOOR; 1218 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 1219 cq->wdata = (uint64_t)cq->qid << 32; 1220 cq->roc_nix = roc_nix; 1221 1222 /* CQE of W16 */ 1223 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ; 1224 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN); 1225 if (cq->desc_base == NULL) { 1226 rc = NIX_ERR_NO_MEM; 1227 goto fail; 1228 } 1229 1230 if (roc_model_is_cn9k()) { 1231 struct nix_aq_enq_req *aq; 1232 1233 aq = mbox_alloc_msg_nix_aq_enq(mbox_get(mbox)); 1234 if (!aq) { 1235 mbox_put(mbox); 1236 return -ENOSPC; 1237 } 1238 1239 aq->qidx = cq->qid; 1240 aq->ctype = NIX_AQ_CTYPE_CQ; 1241 aq->op = NIX_AQ_INSTOP_INIT; 1242 cq_ctx = &aq->cq; 1243 } else if (roc_model_is_cn10k()) { 1244 struct nix_cn10k_aq_enq_req *aq; 1245 1246 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox_get(mbox)); 1247 if (!aq) { 1248 mbox_put(mbox); 1249 return -ENOSPC; 1250 } 1251 1252 aq->qidx = cq->qid; 1253 aq->ctype = NIX_AQ_CTYPE_CQ; 1254 aq->op = NIX_AQ_INSTOP_INIT; 1255 cq_ctx = &aq->cq; 1256 } 1257 1258 cq_ctx->ena = 1; 1259 cq_ctx->caching = 1; 1260 cq_ctx->qsize = qsize; 1261 cq_ctx->base = (uint64_t)cq->desc_base; 1262 cq_ctx->avg_level = 0xff; 1263 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); 1264 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); 1265 if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(roc_nix)) { 1266 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_CPT_DROP); 1267 cq_ctx->cpt_drop_err_en = 1; 1268 /* Enable Late BP only when non zero CPT BPID */ 1269 if (cpt_lbpid) { 1270 cq_ctx->lbp_ena = 1; 1271 cq_ctx->lbpid_low = cpt_lbpid & 0x7; 1272 cq_ctx->lbpid_med = (cpt_lbpid >> 3) & 0x7; 1273 cq_ctx->lbpid_high = (cpt_lbpid >> 6) & 0x7; 1274 cq_ctx->lbp_frac = NIX_CQ_LPB_THRESH_FRAC; 1275 } 1276 drop_thresh = NIX_CQ_SEC_THRESH_LEVEL; 1277 } 1278 1279 /* Many to one reduction */ 1280 cq_ctx->qint_idx = cq->qid % nix->qints; 1281 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ 1282 cq_ctx->cint_idx = cq->qid; 1283 1284 if (roc_errata_nix_has_cq_min_size_4k()) { 1285 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; 1286 uint16_t min_rx_drop; 1287 1288 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc); 1289 cq_ctx->drop = min_rx_drop; 1290 cq_ctx->drop_ena = 1; 1291 cq->drop_thresh = min_rx_drop; 1292 } else { 1293 cq->drop_thresh = drop_thresh; 1294 /* Drop processing or red drop cannot be enabled due to 1295 * due to packets coming for second pass from CPT. 1296 */ 1297 if (!roc_nix_inl_inb_is_enabled(roc_nix)) { 1298 cq_ctx->drop = cq->drop_thresh; 1299 cq_ctx->drop_ena = 1; 1300 } 1301 } 1302 cq_ctx->bp = cq->drop_thresh; 1303 1304 if (roc_feature_nix_has_cqe_stash()) { 1305 if (cq_ctx->caching) { 1306 cq_ctx->stashing = 1; 1307 cq_ctx->stash_thresh = cq->stash_thresh; 1308 } 1309 } 1310 1311 rc = mbox_process(mbox); 1312 mbox_put(mbox); 1313 if (rc) 1314 goto free_mem; 1315 1316 return nix_tel_node_add_cq(cq); 1317 1318 free_mem: 1319 plt_free(cq->desc_base); 1320 fail: 1321 return rc; 1322 } 1323 1324 int 1325 roc_nix_cq_fini(struct roc_nix_cq *cq) 1326 { 1327 struct mbox *mbox; 1328 struct nix *nix; 1329 int rc; 1330 1331 if (cq == NULL) 1332 return NIX_ERR_PARAM; 1333 1334 nix = roc_nix_to_nix_priv(cq->roc_nix); 1335 mbox = mbox_get((&nix->dev)->mbox); 1336 1337 /* Disable CQ */ 1338 if (roc_model_is_cn9k()) { 1339 struct nix_aq_enq_req *aq; 1340 1341 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1342 if (!aq) { 1343 mbox_put(mbox); 1344 return -ENOSPC; 1345 } 1346 1347 aq->qidx = cq->qid; 1348 aq->ctype = NIX_AQ_CTYPE_CQ; 1349 aq->op = NIX_AQ_INSTOP_WRITE; 1350 aq->cq.ena = 0; 1351 aq->cq.bp_ena = 0; 1352 aq->cq_mask.ena = ~aq->cq_mask.ena; 1353 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 1354 } else if (roc_model_is_cn10k()) { 1355 struct nix_cn10k_aq_enq_req *aq; 1356 1357 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1358 if (!aq) { 1359 mbox_put(mbox); 1360 return -ENOSPC; 1361 } 1362 1363 aq->qidx = cq->qid; 1364 aq->ctype = NIX_AQ_CTYPE_CQ; 1365 aq->op = NIX_AQ_INSTOP_WRITE; 1366 aq->cq.ena = 0; 1367 aq->cq.bp_ena = 0; 1368 aq->cq_mask.ena = ~aq->cq_mask.ena; 1369 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 1370 if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(cq->roc_nix)) { 1371 aq->cq.lbp_ena = 0; 1372 aq->cq_mask.lbp_ena = ~aq->cq_mask.lbp_ena; 1373 } 1374 } else { 1375 struct nix_cn20k_aq_enq_req *aq; 1376 1377 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 1378 if (!aq) { 1379 mbox_put(mbox); 1380 return -ENOSPC; 1381 } 1382 1383 aq->qidx = cq->qid; 1384 aq->ctype = NIX_AQ_CTYPE_CQ; 1385 aq->op = NIX_AQ_INSTOP_WRITE; 1386 aq->cq.ena = 0; 1387 aq->cq.bp_ena = 0; 1388 aq->cq_mask.ena = ~aq->cq_mask.ena; 1389 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 1390 if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(cq->roc_nix)) { 1391 aq->cq.lbp_ena = 0; 1392 aq->cq_mask.lbp_ena = ~aq->cq_mask.lbp_ena; 1393 } 1394 } 1395 1396 rc = mbox_process(mbox); 1397 if (rc) { 1398 mbox_put(mbox); 1399 return rc; 1400 } 1401 1402 mbox_put(mbox); 1403 plt_free(cq->desc_base); 1404 return 0; 1405 } 1406 1407 static int 1408 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 1409 { 1410 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1411 uint16_t sqes_per_sqb, count, nb_sqb_bufs, thr; 1412 struct npa_pool_s pool; 1413 struct npa_aura_s aura; 1414 uint64_t blk_sz; 1415 uint64_t iova; 1416 int rc; 1417 1418 blk_sz = nix->sqb_size; 1419 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16) 1420 sqes_per_sqb = (blk_sz / 8) / 16; 1421 else 1422 sqes_per_sqb = (blk_sz / 8) / 8; 1423 1424 /* Reserve One SQE in each SQB to hold pointer for next SQB */ 1425 sqes_per_sqb -= 1; 1426 1427 sq->nb_desc = PLT_MAX(512U, sq->nb_desc); 1428 nb_sqb_bufs = PLT_DIV_CEIL(sq->nb_desc, sqes_per_sqb); 1429 thr = PLT_DIV_CEIL((nb_sqb_bufs * ROC_NIX_SQB_THRESH), 100); 1430 nb_sqb_bufs += NIX_SQB_PREFETCH; 1431 /* Clamp up the SQB count */ 1432 nb_sqb_bufs = PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs); 1433 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, (uint16_t)nb_sqb_bufs); 1434 1435 sq->nb_sqb_bufs = nb_sqb_bufs; 1436 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb); 1437 sq->nb_sqb_bufs_adj = nb_sqb_bufs; 1438 1439 if (roc_nix->sqb_slack) 1440 nb_sqb_bufs += roc_nix->sqb_slack; 1441 else 1442 nb_sqb_bufs += PLT_MAX((int)thr, (int)ROC_NIX_SQB_SLACK_DFLT); 1443 /* Explicitly set nat_align alone as by default pool is with both 1444 * nat_align and buf_offset = 1 which we don't want for SQB. 1445 */ 1446 memset(&pool, 0, sizeof(struct npa_pool_s)); 1447 pool.nat_align = 1; 1448 1449 memset(&aura, 0, sizeof(aura)); 1450 aura.fc_ena = 1; 1451 if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp()) 1452 aura.fc_stype = 0x0; /* STF */ 1453 else 1454 aura.fc_stype = 0x3; /* STSTP */ 1455 aura.fc_addr = (uint64_t)sq->fc; 1456 aura.fc_hyst_bits = sq->fc_hyst_bits & 0xF; 1457 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura, &pool, 0); 1458 if (rc) 1459 goto fail; 1460 1461 roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1); 1462 sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz); 1463 if (sq->sqe_mem == NULL) { 1464 rc = NIX_ERR_NO_MEM; 1465 goto nomem; 1466 } 1467 1468 /* Fill the initial buffers */ 1469 iova = (uint64_t)sq->sqe_mem; 1470 for (count = 0; count < nb_sqb_bufs; count++) { 1471 roc_npa_aura_op_free(sq->aura_handle, 0, iova); 1472 iova += blk_sz; 1473 } 1474 1475 if (roc_npa_aura_op_available_wait(sq->aura_handle, nb_sqb_bufs, 0) != 1476 nb_sqb_bufs) { 1477 plt_err("Failed to free all pointers to the pool"); 1478 rc = NIX_ERR_NO_MEM; 1479 goto npa_fail; 1480 } 1481 1482 roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova); 1483 roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs); 1484 sq->aura_sqb_bufs = nb_sqb_bufs; 1485 1486 return rc; 1487 npa_fail: 1488 plt_free(sq->sqe_mem); 1489 nomem: 1490 roc_npa_pool_destroy(sq->aura_handle); 1491 fail: 1492 return rc; 1493 } 1494 1495 static int 1496 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, 1497 uint16_t smq) 1498 { 1499 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 1500 struct mbox *mbox = (&nix->dev)->mbox; 1501 struct nix_aq_enq_req *aq; 1502 1503 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1504 if (!aq) 1505 return -ENOSPC; 1506 1507 aq->qidx = sq->qid; 1508 aq->ctype = NIX_AQ_CTYPE_SQ; 1509 aq->op = NIX_AQ_INSTOP_INIT; 1510 aq->sq.max_sqe_size = sq->max_sqe_sz; 1511 1512 aq->sq.max_sqe_size = sq->max_sqe_sz; 1513 aq->sq.smq = smq; 1514 aq->sq.smq_rr_quantum = rr_quantum; 1515 if (roc_nix_is_sdp(roc_nix)) 1516 aq->sq.default_chan = 1517 nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt); 1518 else 1519 aq->sq.default_chan = nix->tx_chan_base; 1520 aq->sq.sqe_stype = NIX_STYPE_STF; 1521 aq->sq.ena = 1; 1522 aq->sq.sso_ena = !!sq->sso_ena; 1523 aq->sq.cq_ena = !!sq->cq_ena; 1524 aq->sq.cq = sq->cqid; 1525 aq->sq.cq_limit = sq->cq_drop_thresh; 1526 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 1527 aq->sq.sqe_stype = NIX_STYPE_STP; 1528 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 1529 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 1530 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 1531 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 1532 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 1533 1534 /* Many to one reduction */ 1535 aq->sq.qint_idx = sq->qid % nix->qints; 1536 1537 return 0; 1538 } 1539 1540 static int 1541 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) 1542 { 1543 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 1544 struct nix_aq_enq_rsp *rsp; 1545 struct nix_aq_enq_req *aq; 1546 uint16_t sqes_per_sqb; 1547 void *sqb_buf; 1548 int rc, count; 1549 1550 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1551 if (!aq) { 1552 mbox_put(mbox); 1553 return -ENOSPC; 1554 } 1555 1556 aq->qidx = sq->qid; 1557 aq->ctype = NIX_AQ_CTYPE_SQ; 1558 aq->op = NIX_AQ_INSTOP_READ; 1559 rc = mbox_process_msg(mbox, (void *)&rsp); 1560 if (rc) { 1561 mbox_put(mbox); 1562 return rc; 1563 } 1564 1565 /* Check if sq is already cleaned up */ 1566 if (!rsp->sq.ena) { 1567 mbox_put(mbox); 1568 return 0; 1569 } 1570 1571 /* Disable sq */ 1572 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1573 if (!aq) { 1574 mbox_put(mbox); 1575 return -ENOSPC; 1576 } 1577 1578 aq->qidx = sq->qid; 1579 aq->ctype = NIX_AQ_CTYPE_SQ; 1580 aq->op = NIX_AQ_INSTOP_WRITE; 1581 aq->sq_mask.ena = ~aq->sq_mask.ena; 1582 aq->sq.ena = 0; 1583 rc = mbox_process(mbox); 1584 if (rc) { 1585 mbox_put(mbox); 1586 return rc; 1587 } 1588 1589 /* Read SQ and free sqb's */ 1590 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1591 if (!aq) { 1592 mbox_put(mbox); 1593 return -ENOSPC; 1594 } 1595 1596 aq->qidx = sq->qid; 1597 aq->ctype = NIX_AQ_CTYPE_SQ; 1598 aq->op = NIX_AQ_INSTOP_READ; 1599 rc = mbox_process_msg(mbox, (void *)&rsp); 1600 if (rc) { 1601 mbox_put(mbox); 1602 return rc; 1603 } 1604 1605 if (aq->sq.smq_pend) 1606 plt_err("SQ has pending SQE's"); 1607 1608 count = aq->sq.sqb_count; 1609 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1610 /* Free SQB's that are used */ 1611 sqb_buf = (void *)rsp->sq.head_sqb; 1612 while (count) { 1613 void *next_sqb; 1614 1615 next_sqb = *(void **)((uint64_t *)sqb_buf + 1616 (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8)); 1617 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 1618 sqb_buf = next_sqb; 1619 count--; 1620 } 1621 1622 /* Free next to use sqb */ 1623 if (rsp->sq.next_sqb) 1624 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 1625 mbox_put(mbox); 1626 return 0; 1627 } 1628 1629 static int 1630 sq_cn10k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, uint16_t smq) 1631 { 1632 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 1633 struct mbox *mbox = (&nix->dev)->mbox; 1634 struct nix_cn10k_aq_enq_req *aq; 1635 1636 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1637 if (!aq) 1638 return -ENOSPC; 1639 1640 aq->qidx = sq->qid; 1641 aq->ctype = NIX_AQ_CTYPE_SQ; 1642 aq->op = NIX_AQ_INSTOP_INIT; 1643 aq->sq.max_sqe_size = sq->max_sqe_sz; 1644 1645 aq->sq.max_sqe_size = sq->max_sqe_sz; 1646 aq->sq.smq = smq; 1647 aq->sq.smq_rr_weight = rr_quantum; 1648 if (roc_nix_is_sdp(roc_nix)) 1649 aq->sq.default_chan = nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt); 1650 else 1651 aq->sq.default_chan = nix->tx_chan_base; 1652 aq->sq.sqe_stype = NIX_STYPE_STF; 1653 aq->sq.ena = 1; 1654 aq->sq.sso_ena = !!sq->sso_ena; 1655 aq->sq.cq_ena = !!sq->cq_ena; 1656 aq->sq.cq = sq->cqid; 1657 aq->sq.cq_limit = sq->cq_drop_thresh; 1658 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 1659 aq->sq.sqe_stype = NIX_STYPE_STP; 1660 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 1661 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 1662 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 1663 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 1664 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 1665 1666 /* Many to one reduction */ 1667 aq->sq.qint_idx = sq->qid % nix->qints; 1668 if (roc_errata_nix_assign_incorrect_qint()) { 1669 /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can 1670 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This 1671 * might result in software missing the interrupt. 1672 */ 1673 aq->sq.qint_idx = 0; 1674 } 1675 return 0; 1676 } 1677 1678 static int 1679 sq_cn10k_fini(struct nix *nix, struct roc_nix_sq *sq) 1680 { 1681 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 1682 struct nix_cn10k_aq_enq_rsp *rsp; 1683 struct nix_cn10k_aq_enq_req *aq; 1684 uint16_t sqes_per_sqb; 1685 void *sqb_buf; 1686 int rc, count; 1687 1688 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1689 if (!aq) { 1690 mbox_put(mbox); 1691 return -ENOSPC; 1692 } 1693 1694 aq->qidx = sq->qid; 1695 aq->ctype = NIX_AQ_CTYPE_SQ; 1696 aq->op = NIX_AQ_INSTOP_READ; 1697 rc = mbox_process_msg(mbox, (void *)&rsp); 1698 if (rc) { 1699 mbox_put(mbox); 1700 return rc; 1701 } 1702 1703 /* Check if sq is already cleaned up */ 1704 if (!rsp->sq.ena) { 1705 mbox_put(mbox); 1706 return 0; 1707 } 1708 1709 /* Disable sq */ 1710 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1711 if (!aq) { 1712 mbox_put(mbox); 1713 return -ENOSPC; 1714 } 1715 1716 aq->qidx = sq->qid; 1717 aq->ctype = NIX_AQ_CTYPE_SQ; 1718 aq->op = NIX_AQ_INSTOP_WRITE; 1719 aq->sq_mask.ena = ~aq->sq_mask.ena; 1720 aq->sq.ena = 0; 1721 rc = mbox_process(mbox); 1722 if (rc) { 1723 mbox_put(mbox); 1724 return rc; 1725 } 1726 1727 /* Read SQ and free sqb's */ 1728 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1729 if (!aq) { 1730 mbox_put(mbox); 1731 return -ENOSPC; 1732 } 1733 1734 aq->qidx = sq->qid; 1735 aq->ctype = NIX_AQ_CTYPE_SQ; 1736 aq->op = NIX_AQ_INSTOP_READ; 1737 rc = mbox_process_msg(mbox, (void *)&rsp); 1738 if (rc) { 1739 mbox_put(mbox); 1740 return rc; 1741 } 1742 1743 if (aq->sq.smq_pend) 1744 plt_err("SQ has pending SQE's"); 1745 1746 count = aq->sq.sqb_count; 1747 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1748 /* Free SQB's that are used */ 1749 sqb_buf = (void *)rsp->sq.head_sqb; 1750 while (count) { 1751 void *next_sqb; 1752 1753 next_sqb = *(void **)((uint64_t *)sqb_buf + 1754 (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8)); 1755 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 1756 sqb_buf = next_sqb; 1757 count--; 1758 } 1759 1760 /* Free next to use sqb */ 1761 if (rsp->sq.next_sqb) 1762 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 1763 mbox_put(mbox); 1764 return 0; 1765 } 1766 1767 static int 1768 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, uint16_t smq) 1769 { 1770 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 1771 struct mbox *mbox = (&nix->dev)->mbox; 1772 struct nix_cn20k_aq_enq_req *aq; 1773 1774 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 1775 if (!aq) 1776 return -ENOSPC; 1777 1778 aq->qidx = sq->qid; 1779 aq->ctype = NIX_AQ_CTYPE_SQ; 1780 aq->op = NIX_AQ_INSTOP_INIT; 1781 aq->sq.max_sqe_size = sq->max_sqe_sz; 1782 1783 aq->sq.max_sqe_size = sq->max_sqe_sz; 1784 aq->sq.smq = smq; 1785 aq->sq.smq_rr_weight = rr_quantum; 1786 if (roc_nix_is_sdp(roc_nix)) 1787 aq->sq.default_chan = nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt); 1788 else 1789 aq->sq.default_chan = nix->tx_chan_base; 1790 aq->sq.sqe_stype = NIX_STYPE_STF; 1791 aq->sq.ena = 1; 1792 aq->sq.sso_ena = !!sq->sso_ena; 1793 aq->sq.cq_ena = !!sq->cq_ena; 1794 aq->sq.cq = sq->cqid; 1795 aq->sq.cq_limit = sq->cq_drop_thresh; 1796 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 1797 aq->sq.sqe_stype = NIX_STYPE_STP; 1798 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 1799 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 1800 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 1801 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 1802 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 1803 1804 /* Many to one reduction */ 1805 aq->sq.qint_idx = sq->qid % nix->qints; 1806 if (roc_errata_nix_assign_incorrect_qint()) { 1807 /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can 1808 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This 1809 * might result in software missing the interrupt. 1810 */ 1811 aq->sq.qint_idx = 0; 1812 } 1813 return 0; 1814 } 1815 1816 static int 1817 sq_fini(struct nix *nix, struct roc_nix_sq *sq) 1818 { 1819 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 1820 struct nix_cn20k_aq_enq_rsp *rsp; 1821 struct nix_cn20k_aq_enq_req *aq; 1822 uint16_t sqes_per_sqb; 1823 void *sqb_buf; 1824 int rc, count; 1825 1826 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 1827 if (!aq) { 1828 mbox_put(mbox); 1829 return -ENOSPC; 1830 } 1831 1832 aq->qidx = sq->qid; 1833 aq->ctype = NIX_AQ_CTYPE_SQ; 1834 aq->op = NIX_AQ_INSTOP_READ; 1835 rc = mbox_process_msg(mbox, (void *)&rsp); 1836 if (rc) { 1837 mbox_put(mbox); 1838 return rc; 1839 } 1840 1841 /* Check if sq is already cleaned up */ 1842 if (!rsp->sq.ena) { 1843 mbox_put(mbox); 1844 return 0; 1845 } 1846 1847 /* Disable sq */ 1848 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 1849 if (!aq) { 1850 mbox_put(mbox); 1851 return -ENOSPC; 1852 } 1853 1854 aq->qidx = sq->qid; 1855 aq->ctype = NIX_AQ_CTYPE_SQ; 1856 aq->op = NIX_AQ_INSTOP_WRITE; 1857 aq->sq_mask.ena = ~aq->sq_mask.ena; 1858 aq->sq.ena = 0; 1859 rc = mbox_process(mbox); 1860 if (rc) { 1861 mbox_put(mbox); 1862 return rc; 1863 } 1864 1865 /* Read SQ and free sqb's */ 1866 aq = mbox_alloc_msg_nix_cn20k_aq_enq(mbox); 1867 if (!aq) { 1868 mbox_put(mbox); 1869 return -ENOSPC; 1870 } 1871 1872 aq->qidx = sq->qid; 1873 aq->ctype = NIX_AQ_CTYPE_SQ; 1874 aq->op = NIX_AQ_INSTOP_READ; 1875 rc = mbox_process_msg(mbox, (void *)&rsp); 1876 if (rc) { 1877 mbox_put(mbox); 1878 return rc; 1879 } 1880 1881 if (aq->sq.smq_pend) 1882 plt_err("SQ has pending SQE's"); 1883 1884 count = aq->sq.sqb_count; 1885 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1886 /* Free SQB's that are used */ 1887 sqb_buf = (void *)rsp->sq.head_sqb; 1888 while (count) { 1889 void *next_sqb; 1890 1891 next_sqb = *(void **)((uint64_t *)sqb_buf + 1892 (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8)); 1893 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 1894 sqb_buf = next_sqb; 1895 count--; 1896 } 1897 1898 /* Free next to use sqb */ 1899 if (rsp->sq.next_sqb) 1900 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 1901 mbox_put(mbox); 1902 return 0; 1903 } 1904 1905 int 1906 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 1907 { 1908 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1909 struct mbox *m_box = (&nix->dev)->mbox; 1910 uint16_t qid, smq = UINT16_MAX; 1911 uint32_t rr_quantum = 0; 1912 struct mbox *mbox; 1913 int rc; 1914 1915 if (sq == NULL) 1916 return NIX_ERR_PARAM; 1917 1918 qid = sq->qid; 1919 if (qid >= nix->nb_tx_queues) 1920 return NIX_ERR_QUEUE_INVALID_RANGE; 1921 1922 sq->roc_nix = roc_nix; 1923 sq->tc = ROC_NIX_PFC_CLASS_INVALID; 1924 /* 1925 * Allocate memory for flow control updates from HW. 1926 * Alloc one cache line, so that fits all FC_STYPE modes. 1927 */ 1928 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN); 1929 if (sq->fc == NULL) { 1930 rc = NIX_ERR_NO_MEM; 1931 goto fail; 1932 } 1933 1934 rc = sqb_pool_populate(roc_nix, sq); 1935 if (rc) 1936 goto nomem; 1937 1938 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq); 1939 if (rc) { 1940 rc = NIX_ERR_TM_LEAF_NODE_GET; 1941 goto nomem; 1942 } 1943 1944 mbox = mbox_get(m_box); 1945 /* Init SQ context */ 1946 if (roc_model_is_cn9k()) 1947 rc = sq_cn9k_init(nix, sq, rr_quantum, smq); 1948 else if (roc_model_is_cn10k()) 1949 rc = sq_cn10k_init(nix, sq, rr_quantum, smq); 1950 else 1951 rc = sq_init(nix, sq, rr_quantum, smq); 1952 1953 if (rc) { 1954 mbox_put(mbox); 1955 goto nomem; 1956 } 1957 1958 1959 rc = mbox_process(mbox); 1960 if (rc) { 1961 mbox_put(mbox); 1962 goto nomem; 1963 } 1964 mbox_put(mbox); 1965 1966 sq->enable = true; 1967 nix->sqs[qid] = sq; 1968 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0); 1969 /* Evenly distribute LMT slot for each sq */ 1970 if (roc_model_is_cn9k()) { 1971 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */ 1972 sq->lmt_addr = (void *)(nix->lmt_base + 1973 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12)); 1974 } 1975 1976 rc = nix_tel_node_add_sq(sq); 1977 return rc; 1978 nomem: 1979 plt_free(sq->fc); 1980 fail: 1981 return rc; 1982 } 1983 1984 int 1985 roc_nix_sq_fini(struct roc_nix_sq *sq) 1986 { 1987 struct nix *nix; 1988 struct mbox *mbox; 1989 struct ndc_sync_op *ndc_req; 1990 uint16_t qid; 1991 int rc = 0; 1992 1993 if (sq == NULL) 1994 return NIX_ERR_PARAM; 1995 1996 nix = roc_nix_to_nix_priv(sq->roc_nix); 1997 mbox = (&nix->dev)->mbox; 1998 1999 qid = sq->qid; 2000 2001 rc = nix_tm_sq_flush_pre(sq); 2002 2003 /* Release SQ context */ 2004 if (roc_model_is_cn9k()) 2005 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 2006 else if (roc_model_is_cn10k()) 2007 rc |= sq_cn10k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 2008 else 2009 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 2010 2011 /* Sync NDC-NIX-TX for LF */ 2012 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox)); 2013 if (ndc_req == NULL) { 2014 mbox_put(mbox); 2015 return -ENOSPC; 2016 } 2017 ndc_req->nix_lf_tx_sync = 1; 2018 if (mbox_process(mbox)) 2019 rc |= NIX_ERR_NDC_SYNC; 2020 mbox_put(mbox); 2021 2022 rc |= nix_tm_sq_flush_post(sq); 2023 2024 /* Restore limit to max SQB count that the pool was created 2025 * for aura drain to succeed. 2026 */ 2027 roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB); 2028 rc |= roc_npa_pool_destroy(sq->aura_handle); 2029 plt_free(sq->fc); 2030 plt_free(sq->sqe_mem); 2031 nix->sqs[qid] = NULL; 2032 2033 return rc; 2034 } 2035 2036 void 2037 roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, 2038 uint32_t *tail) 2039 { 2040 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 2041 uint64_t reg, val; 2042 int64_t *addr; 2043 2044 if (head == NULL || tail == NULL) 2045 return; 2046 2047 reg = (((uint64_t)qid) << 32); 2048 addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 2049 val = roc_atomic64_add_nosync(reg, addr); 2050 if (val & 2051 (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))) 2052 val = 0; 2053 2054 *tail = (uint32_t)(val & 0xFFFFF); 2055 *head = (uint32_t)((val >> 20) & 0xFFFFF); 2056 } 2057 2058 void 2059 roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, 2060 uint32_t *tail) 2061 { 2062 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 2063 struct roc_nix_sq *sq = nix->sqs[qid]; 2064 uint16_t sqes_per_sqb, sqb_cnt; 2065 uint64_t reg, val; 2066 int64_t *addr; 2067 2068 if (head == NULL || tail == NULL) 2069 return; 2070 2071 reg = (((uint64_t)qid) << 32); 2072 addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS); 2073 val = roc_atomic64_add_nosync(reg, addr); 2074 if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) { 2075 val = 0; 2076 return; 2077 } 2078 2079 *tail = (uint32_t)((val >> 28) & 0x3F); 2080 *head = (uint32_t)((val >> 20) & 0x3F); 2081 sqb_cnt = (uint16_t)(val & 0xFFFF); 2082 2083 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 2084 2085 /* Update tail index as per used sqb count */ 2086 *tail += (sqes_per_sqb * (sqb_cnt - 1)); 2087 } 2088 2089 int 2090 roc_nix_q_err_cb_register(struct roc_nix *roc_nix, q_err_get_t sq_err_handle) 2091 { 2092 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 2093 struct dev *dev = &nix->dev; 2094 2095 if (sq_err_handle == NULL) 2096 return NIX_ERR_PARAM; 2097 2098 dev->ops->q_err_cb = (q_err_cb_t)sq_err_handle; 2099 return 0; 2100 } 2101 2102 void 2103 roc_nix_q_err_cb_unregister(struct roc_nix *roc_nix) 2104 { 2105 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 2106 struct dev *dev = &nix->dev; 2107 2108 dev->ops->q_err_cb = NULL; 2109 } 2110