1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <math.h> 6 7 #include "roc_api.h" 8 #include "roc_priv.h" 9 10 /* Default SQB slack per SQ */ 11 #define ROC_NIX_SQB_SLACK_DFLT 24 12 13 static inline uint32_t 14 nix_qsize_to_val(enum nix_q_size qsize) 15 { 16 return (16UL << (qsize * 2)); 17 } 18 19 static inline enum nix_q_size 20 nix_qsize_clampup(uint32_t val) 21 { 22 int i = nix_q_size_16; 23 24 for (; i < nix_q_size_max; i++) 25 if (val <= nix_qsize_to_val(i)) 26 break; 27 28 if (i >= nix_q_size_max) 29 i = nix_q_size_max - 1; 30 31 return i; 32 } 33 34 void 35 nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval) 36 { 37 uint64_t wait_ns; 38 39 if (!roc_model_is_cn10k()) 40 return; 41 /* Due to HW errata writes to VWQE_FLUSH might hang, so instead 42 * wait for max vwqe timeout interval. 43 */ 44 if (rq->vwqe_ena) { 45 wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100; 46 plt_delay_us((wait_ns / 1E3) + 1); 47 } 48 } 49 50 int 51 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) 52 { 53 struct mbox *mbox = mbox_get(dev->mbox); 54 int rc; 55 56 /* Pkts will be dropped silently if RQ is disabled */ 57 if (roc_model_is_cn9k()) { 58 struct nix_aq_enq_req *aq; 59 60 aq = mbox_alloc_msg_nix_aq_enq(mbox); 61 if (!aq) { 62 rc = -ENOSPC; 63 goto exit; 64 } 65 66 aq->qidx = rq->qid; 67 aq->ctype = NIX_AQ_CTYPE_RQ; 68 aq->op = NIX_AQ_INSTOP_WRITE; 69 70 aq->rq.ena = enable; 71 aq->rq_mask.ena = ~(aq->rq_mask.ena); 72 } else { 73 struct nix_cn10k_aq_enq_req *aq; 74 75 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 76 if (!aq) { 77 rc = -ENOSPC; 78 goto exit; 79 } 80 81 aq->qidx = rq->qid; 82 aq->ctype = NIX_AQ_CTYPE_RQ; 83 aq->op = NIX_AQ_INSTOP_WRITE; 84 85 aq->rq.ena = enable; 86 aq->rq_mask.ena = ~(aq->rq_mask.ena); 87 } 88 89 rc = mbox_process(mbox); 90 exit: 91 mbox_put(mbox); 92 return rc; 93 } 94 95 int 96 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) 97 { 98 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); 99 int rc; 100 101 rc = nix_rq_ena_dis(&nix->dev, rq, enable); 102 nix_rq_vwqe_flush(rq, nix->vwqe_interval); 103 if (rc) 104 return rc; 105 106 /* Check for meta aura if RQ is enabled */ 107 if (enable && nix->need_meta_aura) 108 rc = roc_nix_inl_meta_aura_check(rq->roc_nix, rq); 109 return rc; 110 } 111 112 int 113 roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid) 114 { 115 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 116 struct dev *dev = &nix->dev; 117 struct mbox *mbox = mbox_get(dev->mbox); 118 bool sso_enable; 119 int rc; 120 121 if (roc_model_is_cn9k()) { 122 struct nix_aq_enq_rsp *rsp; 123 struct nix_aq_enq_req *aq; 124 125 aq = mbox_alloc_msg_nix_aq_enq(mbox); 126 if (!aq) { 127 rc = -ENOSPC; 128 goto exit; 129 } 130 131 aq->qidx = qid; 132 aq->ctype = NIX_AQ_CTYPE_RQ; 133 aq->op = NIX_AQ_INSTOP_READ; 134 rc = mbox_process_msg(mbox, (void *)&rsp); 135 if (rc) 136 goto exit; 137 138 sso_enable = rsp->rq.sso_ena; 139 } else { 140 struct nix_cn10k_aq_enq_rsp *rsp; 141 struct nix_cn10k_aq_enq_req *aq; 142 143 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 144 if (!aq) { 145 rc = -ENOSPC; 146 goto exit; 147 } 148 149 aq->qidx = qid; 150 aq->ctype = NIX_AQ_CTYPE_RQ; 151 aq->op = NIX_AQ_INSTOP_READ; 152 153 rc = mbox_process_msg(mbox, (void *)&rsp); 154 if (rc) 155 goto exit; 156 157 sso_enable = rsp->rq.sso_ena; 158 } 159 160 rc = sso_enable ? true : false; 161 exit: 162 mbox_put(mbox); 163 return rc; 164 } 165 166 static int 167 nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set) 168 { 169 struct roc_nix *roc_nix = rq->roc_nix; 170 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 171 bool inl_inb_ena = roc_nix_inl_inb_is_enabled(roc_nix); 172 uint64_t lpb_aura = 0, vwqe_aura = 0, spb_aura = 0; 173 struct mbox *mbox = nix->dev.mbox; 174 uint64_t aura_base; 175 int rc, count; 176 177 count = set ? 1 : -1; 178 /* For buf type set, use info from RQ context */ 179 if (set) { 180 lpb_aura = rq->aura_handle; 181 spb_aura = rq->spb_ena ? rq->spb_aura_handle : 0; 182 vwqe_aura = rq->vwqe_ena ? rq->vwqe_aura_handle : 0; 183 goto skip_ctx_read; 184 } 185 186 aura_base = roc_npa_aura_handle_to_base(rq->aura_handle); 187 if (roc_model_is_cn9k()) { 188 struct nix_aq_enq_rsp *rsp; 189 struct nix_aq_enq_req *aq; 190 191 aq = mbox_alloc_msg_nix_aq_enq(mbox_get(mbox)); 192 if (!aq) { 193 mbox_put(mbox); 194 return -ENOSPC; 195 } 196 197 aq->qidx = rq->qid; 198 aq->ctype = NIX_AQ_CTYPE_RQ; 199 aq->op = NIX_AQ_INSTOP_READ; 200 rc = mbox_process_msg(mbox, (void *)&rsp); 201 if (rc) { 202 mbox_put(mbox); 203 return rc; 204 } 205 206 /* Get aura handle from aura */ 207 lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); 208 if (rsp->rq.spb_ena) 209 spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); 210 mbox_put(mbox); 211 } else { 212 struct nix_cn10k_aq_enq_rsp *rsp; 213 struct nix_cn10k_aq_enq_req *aq; 214 215 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox_get(mbox)); 216 if (!aq) { 217 mbox_put(mbox); 218 return -ENOSPC; 219 } 220 221 aq->qidx = rq->qid; 222 aq->ctype = NIX_AQ_CTYPE_RQ; 223 aq->op = NIX_AQ_INSTOP_READ; 224 225 rc = mbox_process_msg(mbox, (void *)&rsp); 226 if (rc) { 227 mbox_put(mbox); 228 return rc; 229 } 230 231 /* Get aura handle from aura */ 232 lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); 233 if (rsp->rq.spb_ena) 234 spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); 235 if (rsp->rq.vwqe_ena) 236 vwqe_aura = roc_npa_aura_handle_gen(rsp->rq.wqe_aura, aura_base); 237 238 mbox_put(mbox); 239 } 240 241 skip_ctx_read: 242 /* Update attributes for LPB aura */ 243 if (inl_inb_ena) 244 roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count); 245 else 246 roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET, count); 247 248 /* Update attributes for SPB aura */ 249 if (spb_aura) { 250 if (inl_inb_ena) 251 roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count); 252 else 253 roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET, count); 254 } 255 256 /* Update attributes for VWQE aura */ 257 if (vwqe_aura) { 258 if (inl_inb_ena) 259 roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE_IPSEC, count); 260 else 261 roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE, count); 262 } 263 264 return 0; 265 } 266 267 static int 268 nix_rq_cn9k_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) 269 { 270 struct mbox *mbox = mbox_get(dev->mbox); 271 struct nix_aq_enq_req *aq; 272 int rc; 273 274 aq = mbox_alloc_msg_nix_aq_enq(mbox); 275 if (!aq) { 276 rc = -ENOSPC; 277 goto exit; 278 } 279 280 aq->qidx = rq->qid; 281 aq->ctype = NIX_AQ_CTYPE_RQ; 282 aq->op = NIX_AQ_INSTOP_WRITE; 283 284 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 285 aq->rq.lpb_pool_pass = rq->red_pass; 286 aq->rq.lpb_pool_drop = rq->red_drop; 287 aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); 288 aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); 289 290 } 291 292 if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { 293 aq->rq.spb_pool_pass = rq->spb_red_pass; 294 aq->rq.spb_pool_drop = rq->spb_red_drop; 295 aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); 296 aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); 297 298 } 299 300 if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { 301 aq->rq.xqe_pass = rq->xqe_red_pass; 302 aq->rq.xqe_drop = rq->xqe_red_drop; 303 aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); 304 aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); 305 } 306 307 rc = mbox_process(mbox); 308 exit: 309 mbox_put(mbox); 310 return rc; 311 } 312 313 int 314 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, 315 bool cfg, bool ena) 316 { 317 struct mbox *mbox = dev->mbox; 318 struct nix_aq_enq_req *aq; 319 320 aq = mbox_alloc_msg_nix_aq_enq(mbox); 321 if (!aq) 322 return -ENOSPC; 323 324 aq->qidx = rq->qid; 325 aq->ctype = NIX_AQ_CTYPE_RQ; 326 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 327 328 if (rq->sso_ena) { 329 /* SSO mode */ 330 aq->rq.sso_ena = 1; 331 aq->rq.sso_tt = rq->tt; 332 aq->rq.sso_grp = rq->hwgrp; 333 aq->rq.ena_wqwd = 1; 334 aq->rq.wqe_skip = rq->wqe_skip; 335 aq->rq.wqe_caching = 1; 336 337 aq->rq.good_utag = rq->tag_mask >> 24; 338 aq->rq.bad_utag = rq->tag_mask >> 24; 339 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 340 } else { 341 /* CQ mode */ 342 aq->rq.sso_ena = 0; 343 aq->rq.good_utag = rq->tag_mask >> 24; 344 aq->rq.bad_utag = rq->tag_mask >> 24; 345 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 346 aq->rq.cq = rq->cqid; 347 } 348 349 if (rq->ipsech_ena) 350 aq->rq.ipsech_ena = 1; 351 352 aq->rq.spb_ena = 0; 353 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 354 355 /* Sizes must be aligned to 8 bytes */ 356 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 357 return -EINVAL; 358 359 /* Expressed in number of dwords */ 360 aq->rq.first_skip = rq->first_skip / 8; 361 aq->rq.later_skip = rq->later_skip / 8; 362 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 363 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 364 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 365 aq->rq.ena = ena; 366 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 367 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 368 aq->rq.rq_int_ena = 0; 369 /* Many to one reduction */ 370 aq->rq.qint_idx = rq->qid % qints; 371 aq->rq.xqe_drop_ena = 1; 372 373 /* If RED enabled, then fill enable for all cases */ 374 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 375 aq->rq.spb_pool_pass = rq->spb_red_pass; 376 aq->rq.lpb_pool_pass = rq->red_pass; 377 378 aq->rq.spb_pool_drop = rq->spb_red_drop; 379 aq->rq.lpb_pool_drop = rq->red_drop; 380 } 381 382 if (cfg) { 383 if (rq->sso_ena) { 384 /* SSO mode */ 385 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 386 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 387 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 388 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 389 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 390 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 391 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 392 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 393 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 394 } else { 395 /* CQ mode */ 396 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 397 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 398 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 399 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 400 aq->rq_mask.cq = ~aq->rq_mask.cq; 401 } 402 403 if (rq->ipsech_ena) 404 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 405 406 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 407 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 408 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 409 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 410 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 411 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 412 aq->rq_mask.ena = ~aq->rq_mask.ena; 413 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 414 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 415 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 416 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 417 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 418 419 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 420 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 421 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 422 423 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 424 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 425 } 426 } 427 428 return 0; 429 } 430 431 int 432 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, 433 bool ena) 434 { 435 struct nix_cn10k_aq_enq_req *aq; 436 struct mbox *mbox = dev->mbox; 437 438 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 439 if (!aq) 440 return -ENOSPC; 441 442 aq->qidx = rq->qid; 443 aq->ctype = NIX_AQ_CTYPE_RQ; 444 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 445 446 if (rq->sso_ena) { 447 /* SSO mode */ 448 aq->rq.sso_ena = 1; 449 aq->rq.sso_tt = rq->tt; 450 aq->rq.sso_grp = rq->hwgrp; 451 aq->rq.ena_wqwd = 1; 452 aq->rq.wqe_skip = rq->wqe_skip; 453 aq->rq.wqe_caching = 1; 454 455 aq->rq.good_utag = rq->tag_mask >> 24; 456 aq->rq.bad_utag = rq->tag_mask >> 24; 457 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 458 459 if (rq->vwqe_ena) { 460 aq->rq.vwqe_ena = true; 461 aq->rq.vwqe_skip = rq->vwqe_first_skip; 462 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */ 463 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2; 464 aq->rq.vtime_wait = rq->vwqe_wait_tmo; 465 aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle); 466 } 467 } else { 468 /* CQ mode */ 469 aq->rq.sso_ena = 0; 470 aq->rq.good_utag = rq->tag_mask >> 24; 471 aq->rq.bad_utag = rq->tag_mask >> 24; 472 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 473 aq->rq.cq = rq->cqid; 474 } 475 476 if (rq->ipsech_ena) { 477 aq->rq.ipsech_ena = 1; 478 aq->rq.ipsecd_drop_en = 1; 479 aq->rq.ena_wqwd = 1; 480 aq->rq.wqe_skip = rq->wqe_skip; 481 aq->rq.wqe_caching = 1; 482 } 483 484 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 485 486 /* Sizes must be aligned to 8 bytes */ 487 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 488 return -EINVAL; 489 490 /* Expressed in number of dwords */ 491 aq->rq.first_skip = rq->first_skip / 8; 492 aq->rq.later_skip = rq->later_skip / 8; 493 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 494 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 495 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 496 aq->rq.ena = ena; 497 498 if (rq->spb_ena) { 499 uint32_t spb_sizem1; 500 501 aq->rq.spb_ena = 1; 502 aq->rq.spb_aura = 503 roc_npa_aura_handle_to_aura(rq->spb_aura_handle); 504 505 if (rq->spb_size & 0x7 || 506 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE) 507 return -EINVAL; 508 509 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */ 510 spb_sizem1 -= 1; /* Expressed in size minus one */ 511 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F; 512 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7; 513 } else { 514 aq->rq.spb_ena = 0; 515 } 516 517 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 518 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 519 aq->rq.rq_int_ena = 0; 520 /* Many to one reduction */ 521 aq->rq.qint_idx = rq->qid % qints; 522 aq->rq.xqe_drop_ena = 0; 523 aq->rq.lpb_drop_ena = rq->lpb_drop_ena; 524 aq->rq.spb_drop_ena = rq->spb_drop_ena; 525 526 /* If RED enabled, then fill enable for all cases */ 527 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 528 aq->rq.spb_pool_pass = rq->spb_red_pass; 529 aq->rq.lpb_pool_pass = rq->red_pass; 530 aq->rq.wqe_pool_pass = rq->red_pass; 531 aq->rq.xqe_pass = rq->red_pass; 532 533 aq->rq.spb_pool_drop = rq->spb_red_drop; 534 aq->rq.lpb_pool_drop = rq->red_drop; 535 aq->rq.wqe_pool_drop = rq->red_drop; 536 aq->rq.xqe_drop = rq->red_drop; 537 } 538 539 if (cfg) { 540 if (rq->sso_ena) { 541 /* SSO mode */ 542 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 543 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 544 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 545 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 546 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 547 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 548 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 549 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 550 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 551 if (rq->vwqe_ena) { 552 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena; 553 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip; 554 aq->rq_mask.max_vsize_exp = 555 ~aq->rq_mask.max_vsize_exp; 556 aq->rq_mask.vtime_wait = 557 ~aq->rq_mask.vtime_wait; 558 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura; 559 } 560 } else { 561 /* CQ mode */ 562 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 563 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 564 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 565 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 566 aq->rq_mask.cq = ~aq->rq_mask.cq; 567 } 568 569 if (rq->ipsech_ena) 570 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 571 572 if (rq->spb_ena) { 573 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura; 574 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1; 575 aq->rq_mask.spb_high_sizem1 = 576 ~aq->rq_mask.spb_high_sizem1; 577 } 578 579 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 580 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 581 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 582 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 583 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 584 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 585 aq->rq_mask.ena = ~aq->rq_mask.ena; 586 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 587 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 588 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 589 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 590 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 591 aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena; 592 aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena; 593 594 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 595 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 596 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 597 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass; 598 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass; 599 600 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 601 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 602 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop; 603 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop; 604 } 605 } 606 607 return 0; 608 } 609 610 static int 611 nix_rq_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) 612 { 613 struct nix_cn10k_aq_enq_req *aq; 614 struct mbox *mbox = mbox_get(dev->mbox); 615 int rc; 616 617 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 618 if (!aq) { 619 rc = -ENOSPC; 620 goto exit; 621 } 622 623 aq->qidx = rq->qid; 624 aq->ctype = NIX_AQ_CTYPE_RQ; 625 aq->op = NIX_AQ_INSTOP_WRITE; 626 627 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 628 aq->rq.lpb_pool_pass = rq->red_pass; 629 aq->rq.lpb_pool_drop = rq->red_drop; 630 aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); 631 aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); 632 633 } 634 635 if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { 636 aq->rq.spb_pool_pass = rq->spb_red_pass; 637 aq->rq.spb_pool_drop = rq->spb_red_drop; 638 aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); 639 aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); 640 641 } 642 643 if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { 644 aq->rq.xqe_pass = rq->xqe_red_pass; 645 aq->rq.xqe_drop = rq->xqe_red_drop; 646 aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); 647 aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); 648 } 649 650 rc = mbox_process(mbox); 651 exit: 652 mbox_put(mbox); 653 return rc; 654 } 655 656 int 657 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 658 { 659 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 660 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 661 bool is_cn9k = roc_model_is_cn9k(); 662 struct dev *dev = &nix->dev; 663 int rc; 664 665 if (roc_nix == NULL || rq == NULL) { 666 mbox_put(mbox); 667 return NIX_ERR_PARAM; 668 } 669 670 if (rq->qid >= nix->nb_rx_queues) { 671 mbox_put(mbox); 672 return NIX_ERR_QUEUE_INVALID_RANGE; 673 } 674 675 rq->roc_nix = roc_nix; 676 rq->tc = ROC_NIX_PFC_CLASS_INVALID; 677 678 if (is_cn9k) 679 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena); 680 else 681 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena); 682 683 if (rc) { 684 mbox_put(mbox); 685 return rc; 686 } 687 688 rc = mbox_process(mbox); 689 if (rc) { 690 mbox_put(mbox); 691 return rc; 692 } 693 mbox_put(mbox); 694 695 /* Update aura buf type to indicate its use */ 696 nix_rq_aura_buf_type_update(rq, true); 697 698 /* Check for meta aura if RQ is enabled */ 699 if (ena && nix->need_meta_aura) { 700 rc = roc_nix_inl_meta_aura_check(roc_nix, rq); 701 if (rc) 702 return rc; 703 } 704 705 nix->rqs[rq->qid] = rq; 706 return nix_tel_node_add_rq(rq); 707 } 708 709 int 710 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 711 { 712 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 713 struct mbox *m_box = (&nix->dev)->mbox; 714 bool is_cn9k = roc_model_is_cn9k(); 715 struct dev *dev = &nix->dev; 716 struct mbox *mbox; 717 int rc; 718 719 if (roc_nix == NULL || rq == NULL) 720 return NIX_ERR_PARAM; 721 722 if (rq->qid >= nix->nb_rx_queues) 723 return NIX_ERR_QUEUE_INVALID_RANGE; 724 725 /* Clear attributes for existing aura's */ 726 nix_rq_aura_buf_type_update(rq, false); 727 728 rq->roc_nix = roc_nix; 729 rq->tc = ROC_NIX_PFC_CLASS_INVALID; 730 731 mbox = mbox_get(m_box); 732 if (is_cn9k) 733 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena); 734 else 735 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena); 736 737 if (rc) { 738 mbox_put(mbox); 739 return rc; 740 } 741 742 rc = mbox_process(mbox); 743 if (rc) { 744 mbox_put(mbox); 745 return rc; 746 } 747 mbox_put(mbox); 748 749 /* Update aura attribute to indicate its use */ 750 nix_rq_aura_buf_type_update(rq, true); 751 752 /* Check for meta aura if RQ is enabled */ 753 if (ena && nix->need_meta_aura) { 754 rc = roc_nix_inl_meta_aura_check(roc_nix, rq); 755 if (rc) 756 return rc; 757 } 758 759 return nix_tel_node_add_rq(rq); 760 } 761 762 int 763 roc_nix_rq_cman_config(struct roc_nix *roc_nix, struct roc_nix_rq *rq) 764 { 765 bool is_cn9k = roc_model_is_cn9k(); 766 struct nix *nix; 767 struct dev *dev; 768 int rc; 769 770 if (roc_nix == NULL || rq == NULL) 771 return NIX_ERR_PARAM; 772 773 nix = roc_nix_to_nix_priv(roc_nix); 774 775 if (rq->qid >= nix->nb_rx_queues) 776 return NIX_ERR_QUEUE_INVALID_RANGE; 777 778 dev = &nix->dev; 779 780 if (is_cn9k) 781 rc = nix_rq_cn9k_cman_cfg(dev, rq); 782 else 783 rc = nix_rq_cman_cfg(dev, rq); 784 785 return rc; 786 } 787 788 int 789 roc_nix_rq_fini(struct roc_nix_rq *rq) 790 { 791 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); 792 int rc; 793 794 /* Disabling RQ is sufficient */ 795 rc = roc_nix_rq_ena_dis(rq, false); 796 if (rc) 797 return rc; 798 799 /* Update aura attribute to indicate its use for */ 800 nix_rq_aura_buf_type_update(rq, false); 801 802 nix->rqs[rq->qid] = NULL; 803 return 0; 804 } 805 806 int 807 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) 808 { 809 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 810 struct mbox *mbox = (&nix->dev)->mbox; 811 volatile struct nix_cq_ctx_s *cq_ctx; 812 uint16_t drop_thresh = NIX_CQ_THRESH_LEVEL; 813 uint16_t cpt_lbpid = nix->cpt_lbpid; 814 enum nix_q_size qsize; 815 size_t desc_sz; 816 int rc; 817 818 if (cq == NULL) 819 return NIX_ERR_PARAM; 820 821 qsize = nix_qsize_clampup(cq->nb_desc); 822 cq->nb_desc = nix_qsize_to_val(qsize); 823 cq->qmask = cq->nb_desc - 1; 824 cq->door = nix->base + NIX_LF_CQ_OP_DOOR; 825 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 826 cq->wdata = (uint64_t)cq->qid << 32; 827 cq->roc_nix = roc_nix; 828 829 /* CQE of W16 */ 830 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ; 831 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN); 832 if (cq->desc_base == NULL) { 833 rc = NIX_ERR_NO_MEM; 834 goto fail; 835 } 836 837 if (roc_model_is_cn9k()) { 838 struct nix_aq_enq_req *aq; 839 840 aq = mbox_alloc_msg_nix_aq_enq(mbox_get(mbox)); 841 if (!aq) { 842 mbox_put(mbox); 843 return -ENOSPC; 844 } 845 846 aq->qidx = cq->qid; 847 aq->ctype = NIX_AQ_CTYPE_CQ; 848 aq->op = NIX_AQ_INSTOP_INIT; 849 cq_ctx = &aq->cq; 850 } else { 851 struct nix_cn10k_aq_enq_req *aq; 852 853 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox_get(mbox)); 854 if (!aq) { 855 mbox_put(mbox); 856 return -ENOSPC; 857 } 858 859 aq->qidx = cq->qid; 860 aq->ctype = NIX_AQ_CTYPE_CQ; 861 aq->op = NIX_AQ_INSTOP_INIT; 862 cq_ctx = &aq->cq; 863 } 864 865 cq_ctx->ena = 1; 866 cq_ctx->caching = 1; 867 cq_ctx->qsize = qsize; 868 cq_ctx->base = (uint64_t)cq->desc_base; 869 cq_ctx->avg_level = 0xff; 870 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); 871 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); 872 if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(roc_nix)) { 873 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_CPT_DROP); 874 cq_ctx->cpt_drop_err_en = 1; 875 /* Enable Late BP only when non zero CPT BPID */ 876 if (cpt_lbpid) { 877 cq_ctx->lbp_ena = 1; 878 cq_ctx->lbpid_low = cpt_lbpid & 0x7; 879 cq_ctx->lbpid_med = (cpt_lbpid >> 3) & 0x7; 880 cq_ctx->lbpid_high = (cpt_lbpid >> 6) & 0x7; 881 cq_ctx->lbp_frac = NIX_CQ_LPB_THRESH_FRAC; 882 } 883 drop_thresh = NIX_CQ_SEC_THRESH_LEVEL; 884 } 885 886 /* Many to one reduction */ 887 cq_ctx->qint_idx = cq->qid % nix->qints; 888 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ 889 cq_ctx->cint_idx = cq->qid; 890 891 if (roc_errata_nix_has_cq_min_size_4k()) { 892 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; 893 uint16_t min_rx_drop; 894 895 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc); 896 cq_ctx->drop = min_rx_drop; 897 cq_ctx->drop_ena = 1; 898 cq->drop_thresh = min_rx_drop; 899 } else { 900 cq->drop_thresh = drop_thresh; 901 /* Drop processing or red drop cannot be enabled due to 902 * due to packets coming for second pass from CPT. 903 */ 904 if (!roc_nix_inl_inb_is_enabled(roc_nix)) { 905 cq_ctx->drop = cq->drop_thresh; 906 cq_ctx->drop_ena = 1; 907 } 908 } 909 cq_ctx->bp = cq->drop_thresh; 910 911 if (roc_feature_nix_has_cqe_stash()) { 912 if (cq_ctx->caching) { 913 cq_ctx->stashing = 1; 914 cq_ctx->stash_thresh = cq->stash_thresh; 915 } 916 } 917 918 rc = mbox_process(mbox); 919 mbox_put(mbox); 920 if (rc) 921 goto free_mem; 922 923 return nix_tel_node_add_cq(cq); 924 925 free_mem: 926 plt_free(cq->desc_base); 927 fail: 928 return rc; 929 } 930 931 int 932 roc_nix_cq_fini(struct roc_nix_cq *cq) 933 { 934 struct mbox *mbox; 935 struct nix *nix; 936 int rc; 937 938 if (cq == NULL) 939 return NIX_ERR_PARAM; 940 941 nix = roc_nix_to_nix_priv(cq->roc_nix); 942 mbox = mbox_get((&nix->dev)->mbox); 943 944 /* Disable CQ */ 945 if (roc_model_is_cn9k()) { 946 struct nix_aq_enq_req *aq; 947 948 aq = mbox_alloc_msg_nix_aq_enq(mbox); 949 if (!aq) { 950 mbox_put(mbox); 951 return -ENOSPC; 952 } 953 954 aq->qidx = cq->qid; 955 aq->ctype = NIX_AQ_CTYPE_CQ; 956 aq->op = NIX_AQ_INSTOP_WRITE; 957 aq->cq.ena = 0; 958 aq->cq.bp_ena = 0; 959 aq->cq_mask.ena = ~aq->cq_mask.ena; 960 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 961 } else { 962 struct nix_cn10k_aq_enq_req *aq; 963 964 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 965 if (!aq) { 966 mbox_put(mbox); 967 return -ENOSPC; 968 } 969 970 aq->qidx = cq->qid; 971 aq->ctype = NIX_AQ_CTYPE_CQ; 972 aq->op = NIX_AQ_INSTOP_WRITE; 973 aq->cq.ena = 0; 974 aq->cq.bp_ena = 0; 975 aq->cq_mask.ena = ~aq->cq_mask.ena; 976 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 977 if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(cq->roc_nix)) { 978 aq->cq.lbp_ena = 0; 979 aq->cq_mask.lbp_ena = ~aq->cq_mask.lbp_ena; 980 } 981 } 982 983 rc = mbox_process(mbox); 984 if (rc) { 985 mbox_put(mbox); 986 return rc; 987 } 988 989 mbox_put(mbox); 990 plt_free(cq->desc_base); 991 return 0; 992 } 993 994 static int 995 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 996 { 997 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 998 uint16_t sqes_per_sqb, count, nb_sqb_bufs, thr; 999 struct npa_pool_s pool; 1000 struct npa_aura_s aura; 1001 uint64_t blk_sz; 1002 uint64_t iova; 1003 int rc; 1004 1005 blk_sz = nix->sqb_size; 1006 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16) 1007 sqes_per_sqb = (blk_sz / 8) / 16; 1008 else 1009 sqes_per_sqb = (blk_sz / 8) / 8; 1010 1011 /* Reserve One SQE in each SQB to hold pointer for next SQB */ 1012 sqes_per_sqb -= 1; 1013 1014 sq->nb_desc = PLT_MAX(512U, sq->nb_desc); 1015 nb_sqb_bufs = PLT_DIV_CEIL(sq->nb_desc, sqes_per_sqb); 1016 thr = PLT_DIV_CEIL((nb_sqb_bufs * ROC_NIX_SQB_THRESH), 100); 1017 nb_sqb_bufs += NIX_SQB_PREFETCH; 1018 /* Clamp up the SQB count */ 1019 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs)); 1020 1021 sq->nb_sqb_bufs = nb_sqb_bufs; 1022 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb); 1023 sq->nb_sqb_bufs_adj = nb_sqb_bufs; 1024 1025 if (roc_nix->sqb_slack) 1026 nb_sqb_bufs += roc_nix->sqb_slack; 1027 else 1028 nb_sqb_bufs += PLT_MAX((int)thr, (int)ROC_NIX_SQB_SLACK_DFLT); 1029 /* Explicitly set nat_align alone as by default pool is with both 1030 * nat_align and buf_offset = 1 which we don't want for SQB. 1031 */ 1032 memset(&pool, 0, sizeof(struct npa_pool_s)); 1033 pool.nat_align = 1; 1034 1035 memset(&aura, 0, sizeof(aura)); 1036 aura.fc_ena = 1; 1037 if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp()) 1038 aura.fc_stype = 0x0; /* STF */ 1039 else 1040 aura.fc_stype = 0x3; /* STSTP */ 1041 aura.fc_addr = (uint64_t)sq->fc; 1042 aura.fc_hyst_bits = sq->fc_hyst_bits & 0xF; 1043 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura, &pool, 0); 1044 if (rc) 1045 goto fail; 1046 1047 roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1); 1048 sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz); 1049 if (sq->sqe_mem == NULL) { 1050 rc = NIX_ERR_NO_MEM; 1051 goto nomem; 1052 } 1053 1054 /* Fill the initial buffers */ 1055 iova = (uint64_t)sq->sqe_mem; 1056 for (count = 0; count < nb_sqb_bufs; count++) { 1057 roc_npa_aura_op_free(sq->aura_handle, 0, iova); 1058 iova += blk_sz; 1059 } 1060 1061 if (roc_npa_aura_op_available_wait(sq->aura_handle, nb_sqb_bufs, 0) != 1062 nb_sqb_bufs) { 1063 plt_err("Failed to free all pointers to the pool"); 1064 rc = NIX_ERR_NO_MEM; 1065 goto npa_fail; 1066 } 1067 1068 roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova); 1069 roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs); 1070 sq->aura_sqb_bufs = nb_sqb_bufs; 1071 1072 return rc; 1073 npa_fail: 1074 plt_free(sq->sqe_mem); 1075 nomem: 1076 roc_npa_pool_destroy(sq->aura_handle); 1077 fail: 1078 return rc; 1079 } 1080 1081 static int 1082 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, 1083 uint16_t smq) 1084 { 1085 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 1086 struct mbox *mbox = (&nix->dev)->mbox; 1087 struct nix_aq_enq_req *aq; 1088 1089 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1090 if (!aq) 1091 return -ENOSPC; 1092 1093 aq->qidx = sq->qid; 1094 aq->ctype = NIX_AQ_CTYPE_SQ; 1095 aq->op = NIX_AQ_INSTOP_INIT; 1096 aq->sq.max_sqe_size = sq->max_sqe_sz; 1097 1098 aq->sq.max_sqe_size = sq->max_sqe_sz; 1099 aq->sq.smq = smq; 1100 aq->sq.smq_rr_quantum = rr_quantum; 1101 if (roc_nix_is_sdp(roc_nix)) 1102 aq->sq.default_chan = 1103 nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt); 1104 else 1105 aq->sq.default_chan = nix->tx_chan_base; 1106 aq->sq.sqe_stype = NIX_STYPE_STF; 1107 aq->sq.ena = 1; 1108 aq->sq.sso_ena = !!sq->sso_ena; 1109 aq->sq.cq_ena = !!sq->cq_ena; 1110 aq->sq.cq = sq->cqid; 1111 aq->sq.cq_limit = sq->cq_drop_thresh; 1112 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 1113 aq->sq.sqe_stype = NIX_STYPE_STP; 1114 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 1115 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 1116 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 1117 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 1118 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 1119 1120 /* Many to one reduction */ 1121 aq->sq.qint_idx = sq->qid % nix->qints; 1122 1123 return 0; 1124 } 1125 1126 static int 1127 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) 1128 { 1129 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 1130 struct nix_aq_enq_rsp *rsp; 1131 struct nix_aq_enq_req *aq; 1132 uint16_t sqes_per_sqb; 1133 void *sqb_buf; 1134 int rc, count; 1135 1136 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1137 if (!aq) { 1138 mbox_put(mbox); 1139 return -ENOSPC; 1140 } 1141 1142 aq->qidx = sq->qid; 1143 aq->ctype = NIX_AQ_CTYPE_SQ; 1144 aq->op = NIX_AQ_INSTOP_READ; 1145 rc = mbox_process_msg(mbox, (void *)&rsp); 1146 if (rc) { 1147 mbox_put(mbox); 1148 return rc; 1149 } 1150 1151 /* Check if sq is already cleaned up */ 1152 if (!rsp->sq.ena) { 1153 mbox_put(mbox); 1154 return 0; 1155 } 1156 1157 /* Disable sq */ 1158 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1159 if (!aq) { 1160 mbox_put(mbox); 1161 return -ENOSPC; 1162 } 1163 1164 aq->qidx = sq->qid; 1165 aq->ctype = NIX_AQ_CTYPE_SQ; 1166 aq->op = NIX_AQ_INSTOP_WRITE; 1167 aq->sq_mask.ena = ~aq->sq_mask.ena; 1168 aq->sq.ena = 0; 1169 rc = mbox_process(mbox); 1170 if (rc) { 1171 mbox_put(mbox); 1172 return rc; 1173 } 1174 1175 /* Read SQ and free sqb's */ 1176 aq = mbox_alloc_msg_nix_aq_enq(mbox); 1177 if (!aq) { 1178 mbox_put(mbox); 1179 return -ENOSPC; 1180 } 1181 1182 aq->qidx = sq->qid; 1183 aq->ctype = NIX_AQ_CTYPE_SQ; 1184 aq->op = NIX_AQ_INSTOP_READ; 1185 rc = mbox_process_msg(mbox, (void *)&rsp); 1186 if (rc) { 1187 mbox_put(mbox); 1188 return rc; 1189 } 1190 1191 if (aq->sq.smq_pend) 1192 plt_err("SQ has pending SQE's"); 1193 1194 count = aq->sq.sqb_count; 1195 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1196 /* Free SQB's that are used */ 1197 sqb_buf = (void *)rsp->sq.head_sqb; 1198 while (count) { 1199 void *next_sqb; 1200 1201 next_sqb = *(void **)((uint64_t *)sqb_buf + 1202 (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8)); 1203 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 1204 sqb_buf = next_sqb; 1205 count--; 1206 } 1207 1208 /* Free next to use sqb */ 1209 if (rsp->sq.next_sqb) 1210 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 1211 mbox_put(mbox); 1212 return 0; 1213 } 1214 1215 static int 1216 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, uint16_t smq) 1217 { 1218 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); 1219 struct mbox *mbox = (&nix->dev)->mbox; 1220 struct nix_cn10k_aq_enq_req *aq; 1221 1222 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1223 if (!aq) 1224 return -ENOSPC; 1225 1226 aq->qidx = sq->qid; 1227 aq->ctype = NIX_AQ_CTYPE_SQ; 1228 aq->op = NIX_AQ_INSTOP_INIT; 1229 aq->sq.max_sqe_size = sq->max_sqe_sz; 1230 1231 aq->sq.max_sqe_size = sq->max_sqe_sz; 1232 aq->sq.smq = smq; 1233 aq->sq.smq_rr_weight = rr_quantum; 1234 if (roc_nix_is_sdp(roc_nix)) 1235 aq->sq.default_chan = nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt); 1236 else 1237 aq->sq.default_chan = nix->tx_chan_base; 1238 aq->sq.sqe_stype = NIX_STYPE_STF; 1239 aq->sq.ena = 1; 1240 aq->sq.sso_ena = !!sq->sso_ena; 1241 aq->sq.cq_ena = !!sq->cq_ena; 1242 aq->sq.cq = sq->cqid; 1243 aq->sq.cq_limit = sq->cq_drop_thresh; 1244 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 1245 aq->sq.sqe_stype = NIX_STYPE_STP; 1246 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 1247 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 1248 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 1249 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 1250 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 1251 1252 /* Many to one reduction */ 1253 aq->sq.qint_idx = sq->qid % nix->qints; 1254 if (roc_errata_nix_assign_incorrect_qint()) { 1255 /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can 1256 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This 1257 * might result in software missing the interrupt. 1258 */ 1259 aq->sq.qint_idx = 0; 1260 } 1261 return 0; 1262 } 1263 1264 static int 1265 sq_fini(struct nix *nix, struct roc_nix_sq *sq) 1266 { 1267 struct mbox *mbox = mbox_get((&nix->dev)->mbox); 1268 struct nix_cn10k_aq_enq_rsp *rsp; 1269 struct nix_cn10k_aq_enq_req *aq; 1270 uint16_t sqes_per_sqb; 1271 void *sqb_buf; 1272 int rc, count; 1273 1274 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1275 if (!aq) { 1276 mbox_put(mbox); 1277 return -ENOSPC; 1278 } 1279 1280 aq->qidx = sq->qid; 1281 aq->ctype = NIX_AQ_CTYPE_SQ; 1282 aq->op = NIX_AQ_INSTOP_READ; 1283 rc = mbox_process_msg(mbox, (void *)&rsp); 1284 if (rc) { 1285 mbox_put(mbox); 1286 return rc; 1287 } 1288 1289 /* Check if sq is already cleaned up */ 1290 if (!rsp->sq.ena) { 1291 mbox_put(mbox); 1292 return 0; 1293 } 1294 1295 /* Disable sq */ 1296 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1297 if (!aq) { 1298 mbox_put(mbox); 1299 return -ENOSPC; 1300 } 1301 1302 aq->qidx = sq->qid; 1303 aq->ctype = NIX_AQ_CTYPE_SQ; 1304 aq->op = NIX_AQ_INSTOP_WRITE; 1305 aq->sq_mask.ena = ~aq->sq_mask.ena; 1306 aq->sq.ena = 0; 1307 rc = mbox_process(mbox); 1308 if (rc) { 1309 mbox_put(mbox); 1310 return rc; 1311 } 1312 1313 /* Read SQ and free sqb's */ 1314 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 1315 if (!aq) { 1316 mbox_put(mbox); 1317 return -ENOSPC; 1318 } 1319 1320 aq->qidx = sq->qid; 1321 aq->ctype = NIX_AQ_CTYPE_SQ; 1322 aq->op = NIX_AQ_INSTOP_READ; 1323 rc = mbox_process_msg(mbox, (void *)&rsp); 1324 if (rc) { 1325 mbox_put(mbox); 1326 return rc; 1327 } 1328 1329 if (aq->sq.smq_pend) 1330 plt_err("SQ has pending SQE's"); 1331 1332 count = aq->sq.sqb_count; 1333 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1334 /* Free SQB's that are used */ 1335 sqb_buf = (void *)rsp->sq.head_sqb; 1336 while (count) { 1337 void *next_sqb; 1338 1339 next_sqb = *(void **)((uint64_t *)sqb_buf + 1340 (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8)); 1341 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 1342 sqb_buf = next_sqb; 1343 count--; 1344 } 1345 1346 /* Free next to use sqb */ 1347 if (rsp->sq.next_sqb) 1348 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 1349 mbox_put(mbox); 1350 return 0; 1351 } 1352 1353 int 1354 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 1355 { 1356 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1357 struct mbox *m_box = (&nix->dev)->mbox; 1358 uint16_t qid, smq = UINT16_MAX; 1359 uint32_t rr_quantum = 0; 1360 struct mbox *mbox; 1361 int rc; 1362 1363 if (sq == NULL) 1364 return NIX_ERR_PARAM; 1365 1366 qid = sq->qid; 1367 if (qid >= nix->nb_tx_queues) 1368 return NIX_ERR_QUEUE_INVALID_RANGE; 1369 1370 sq->roc_nix = roc_nix; 1371 sq->tc = ROC_NIX_PFC_CLASS_INVALID; 1372 /* 1373 * Allocate memory for flow control updates from HW. 1374 * Alloc one cache line, so that fits all FC_STYPE modes. 1375 */ 1376 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN); 1377 if (sq->fc == NULL) { 1378 rc = NIX_ERR_NO_MEM; 1379 goto fail; 1380 } 1381 1382 rc = sqb_pool_populate(roc_nix, sq); 1383 if (rc) 1384 goto nomem; 1385 1386 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq); 1387 if (rc) { 1388 rc = NIX_ERR_TM_LEAF_NODE_GET; 1389 goto nomem; 1390 } 1391 1392 mbox = mbox_get(m_box); 1393 /* Init SQ context */ 1394 if (roc_model_is_cn9k()) 1395 rc = sq_cn9k_init(nix, sq, rr_quantum, smq); 1396 else 1397 rc = sq_init(nix, sq, rr_quantum, smq); 1398 1399 if (rc) { 1400 mbox_put(mbox); 1401 goto nomem; 1402 } 1403 1404 1405 rc = mbox_process(mbox); 1406 if (rc) { 1407 mbox_put(mbox); 1408 goto nomem; 1409 } 1410 mbox_put(mbox); 1411 1412 nix->sqs[qid] = sq; 1413 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0); 1414 /* Evenly distribute LMT slot for each sq */ 1415 if (roc_model_is_cn9k()) { 1416 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */ 1417 sq->lmt_addr = (void *)(nix->lmt_base + 1418 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12)); 1419 } 1420 1421 rc = nix_tel_node_add_sq(sq); 1422 return rc; 1423 nomem: 1424 plt_free(sq->fc); 1425 fail: 1426 return rc; 1427 } 1428 1429 int 1430 roc_nix_sq_fini(struct roc_nix_sq *sq) 1431 { 1432 struct nix *nix; 1433 struct mbox *mbox; 1434 struct ndc_sync_op *ndc_req; 1435 uint16_t qid; 1436 int rc = 0; 1437 1438 if (sq == NULL) 1439 return NIX_ERR_PARAM; 1440 1441 nix = roc_nix_to_nix_priv(sq->roc_nix); 1442 mbox = (&nix->dev)->mbox; 1443 1444 qid = sq->qid; 1445 1446 rc = nix_tm_sq_flush_pre(sq); 1447 1448 /* Release SQ context */ 1449 if (roc_model_is_cn9k()) 1450 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 1451 else 1452 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 1453 1454 /* Sync NDC-NIX-TX for LF */ 1455 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox)); 1456 if (ndc_req == NULL) { 1457 mbox_put(mbox); 1458 return -ENOSPC; 1459 } 1460 ndc_req->nix_lf_tx_sync = 1; 1461 if (mbox_process(mbox)) 1462 rc |= NIX_ERR_NDC_SYNC; 1463 mbox_put(mbox); 1464 1465 rc |= nix_tm_sq_flush_post(sq); 1466 1467 /* Restore limit to max SQB count that the pool was created 1468 * for aura drain to succeed. 1469 */ 1470 roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB); 1471 rc |= roc_npa_pool_destroy(sq->aura_handle); 1472 plt_free(sq->fc); 1473 plt_free(sq->sqe_mem); 1474 nix->sqs[qid] = NULL; 1475 1476 return rc; 1477 } 1478 1479 void 1480 roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, 1481 uint32_t *tail) 1482 { 1483 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1484 uint64_t reg, val; 1485 int64_t *addr; 1486 1487 if (head == NULL || tail == NULL) 1488 return; 1489 1490 reg = (((uint64_t)qid) << 32); 1491 addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 1492 val = roc_atomic64_add_nosync(reg, addr); 1493 if (val & 1494 (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))) 1495 val = 0; 1496 1497 *tail = (uint32_t)(val & 0xFFFFF); 1498 *head = (uint32_t)((val >> 20) & 0xFFFFF); 1499 } 1500 1501 void 1502 roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, 1503 uint32_t *tail) 1504 { 1505 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1506 struct roc_nix_sq *sq = nix->sqs[qid]; 1507 uint16_t sqes_per_sqb, sqb_cnt; 1508 uint64_t reg, val; 1509 int64_t *addr; 1510 1511 if (head == NULL || tail == NULL) 1512 return; 1513 1514 reg = (((uint64_t)qid) << 32); 1515 addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS); 1516 val = roc_atomic64_add_nosync(reg, addr); 1517 if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) { 1518 val = 0; 1519 return; 1520 } 1521 1522 *tail = (uint32_t)((val >> 28) & 0x3F); 1523 *head = (uint32_t)((val >> 20) & 0x3F); 1524 sqb_cnt = (uint16_t)(val & 0xFFFF); 1525 1526 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 1527 1528 /* Update tail index as per used sqb count */ 1529 *tail += (sqes_per_sqb * (sqb_cnt - 1)); 1530 } 1531 1532 int 1533 roc_nix_q_err_cb_register(struct roc_nix *roc_nix, q_err_get_t sq_err_handle) 1534 { 1535 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1536 struct dev *dev = &nix->dev; 1537 1538 if (sq_err_handle == NULL) 1539 return NIX_ERR_PARAM; 1540 1541 dev->ops->q_err_cb = (q_err_cb_t)sq_err_handle; 1542 return 0; 1543 } 1544 1545 void 1546 roc_nix_q_err_cb_unregister(struct roc_nix *roc_nix) 1547 { 1548 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 1549 struct dev *dev = &nix->dev; 1550 1551 dev->ops->q_err_cb = NULL; 1552 } 1553