1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <math.h> 6 7 #include "roc_api.h" 8 #include "roc_priv.h" 9 10 static inline uint32_t 11 nix_qsize_to_val(enum nix_q_size qsize) 12 { 13 return (16UL << (qsize * 2)); 14 } 15 16 static inline enum nix_q_size 17 nix_qsize_clampup(uint32_t val) 18 { 19 int i = nix_q_size_16; 20 21 for (; i < nix_q_size_max; i++) 22 if (val <= nix_qsize_to_val(i)) 23 break; 24 25 if (i >= nix_q_size_max) 26 i = nix_q_size_max - 1; 27 28 return i; 29 } 30 31 int 32 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) 33 { 34 struct mbox *mbox = dev->mbox; 35 36 /* Pkts will be dropped silently if RQ is disabled */ 37 if (roc_model_is_cn9k()) { 38 struct nix_aq_enq_req *aq; 39 40 aq = mbox_alloc_msg_nix_aq_enq(mbox); 41 aq->qidx = rq->qid; 42 aq->ctype = NIX_AQ_CTYPE_RQ; 43 aq->op = NIX_AQ_INSTOP_WRITE; 44 45 aq->rq.ena = enable; 46 aq->rq_mask.ena = ~(aq->rq_mask.ena); 47 } else { 48 struct nix_cn10k_aq_enq_req *aq; 49 50 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 51 aq->qidx = rq->qid; 52 aq->ctype = NIX_AQ_CTYPE_RQ; 53 aq->op = NIX_AQ_INSTOP_WRITE; 54 55 aq->rq.ena = enable; 56 aq->rq_mask.ena = ~(aq->rq_mask.ena); 57 } 58 59 return mbox_process(mbox); 60 } 61 62 int 63 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) 64 { 65 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); 66 int rc; 67 68 rc = nix_rq_ena_dis(&nix->dev, rq, enable); 69 70 if (roc_model_is_cn10k()) 71 plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH); 72 return rc; 73 } 74 75 int 76 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, 77 bool cfg, bool ena) 78 { 79 struct mbox *mbox = dev->mbox; 80 struct nix_aq_enq_req *aq; 81 82 aq = mbox_alloc_msg_nix_aq_enq(mbox); 83 aq->qidx = rq->qid; 84 aq->ctype = NIX_AQ_CTYPE_RQ; 85 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 86 87 if (rq->sso_ena) { 88 /* SSO mode */ 89 aq->rq.sso_ena = 1; 90 aq->rq.sso_tt = rq->tt; 91 aq->rq.sso_grp = rq->hwgrp; 92 aq->rq.ena_wqwd = 1; 93 aq->rq.wqe_skip = rq->wqe_skip; 94 aq->rq.wqe_caching = 1; 95 96 aq->rq.good_utag = rq->tag_mask >> 24; 97 aq->rq.bad_utag = rq->tag_mask >> 24; 98 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 99 } else { 100 /* CQ mode */ 101 aq->rq.sso_ena = 0; 102 aq->rq.good_utag = rq->tag_mask >> 24; 103 aq->rq.bad_utag = rq->tag_mask >> 24; 104 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 105 aq->rq.cq = rq->qid; 106 } 107 108 if (rq->ipsech_ena) 109 aq->rq.ipsech_ena = 1; 110 111 aq->rq.spb_ena = 0; 112 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 113 114 /* Sizes must be aligned to 8 bytes */ 115 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 116 return -EINVAL; 117 118 /* Expressed in number of dwords */ 119 aq->rq.first_skip = rq->first_skip / 8; 120 aq->rq.later_skip = rq->later_skip / 8; 121 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 122 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 123 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 124 aq->rq.ena = ena; 125 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 126 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 127 aq->rq.rq_int_ena = 0; 128 /* Many to one reduction */ 129 aq->rq.qint_idx = rq->qid % qints; 130 aq->rq.xqe_drop_ena = 1; 131 132 /* If RED enabled, then fill enable for all cases */ 133 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 134 aq->rq.spb_pool_pass = rq->spb_red_pass; 135 aq->rq.lpb_pool_pass = rq->red_pass; 136 137 aq->rq.spb_pool_drop = rq->spb_red_drop; 138 aq->rq.lpb_pool_drop = rq->red_drop; 139 } 140 141 if (cfg) { 142 if (rq->sso_ena) { 143 /* SSO mode */ 144 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 145 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 146 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 147 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 148 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 149 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 150 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 151 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 152 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 153 } else { 154 /* CQ mode */ 155 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 156 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 157 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 158 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 159 aq->rq_mask.cq = ~aq->rq_mask.cq; 160 } 161 162 if (rq->ipsech_ena) 163 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 164 165 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 166 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 167 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 168 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 169 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 170 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 171 aq->rq_mask.ena = ~aq->rq_mask.ena; 172 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 173 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 174 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 175 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 176 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 177 178 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 179 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 180 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 181 182 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 183 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 184 } 185 } 186 187 return 0; 188 } 189 190 int 191 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, 192 bool ena) 193 { 194 struct nix_cn10k_aq_enq_req *aq; 195 struct mbox *mbox = dev->mbox; 196 197 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 198 aq->qidx = rq->qid; 199 aq->ctype = NIX_AQ_CTYPE_RQ; 200 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; 201 202 if (rq->sso_ena) { 203 /* SSO mode */ 204 aq->rq.sso_ena = 1; 205 aq->rq.sso_tt = rq->tt; 206 aq->rq.sso_grp = rq->hwgrp; 207 aq->rq.ena_wqwd = 1; 208 aq->rq.wqe_skip = rq->wqe_skip; 209 aq->rq.wqe_caching = 1; 210 211 aq->rq.good_utag = rq->tag_mask >> 24; 212 aq->rq.bad_utag = rq->tag_mask >> 24; 213 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 214 215 if (rq->vwqe_ena) { 216 aq->rq.vwqe_ena = true; 217 aq->rq.vwqe_skip = rq->vwqe_first_skip; 218 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */ 219 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2; 220 aq->rq.vtime_wait = rq->vwqe_wait_tmo; 221 aq->rq.wqe_aura = rq->vwqe_aura_handle; 222 } 223 } else { 224 /* CQ mode */ 225 aq->rq.sso_ena = 0; 226 aq->rq.good_utag = rq->tag_mask >> 24; 227 aq->rq.bad_utag = rq->tag_mask >> 24; 228 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); 229 aq->rq.cq = rq->qid; 230 } 231 232 if (rq->ipsech_ena) { 233 aq->rq.ipsech_ena = 1; 234 aq->rq.ipsecd_drop_en = 1; 235 } 236 237 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); 238 239 /* Sizes must be aligned to 8 bytes */ 240 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7) 241 return -EINVAL; 242 243 /* Expressed in number of dwords */ 244 aq->rq.first_skip = rq->first_skip / 8; 245 aq->rq.later_skip = rq->later_skip / 8; 246 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */ 247 aq->rq.lpb_sizem1 = rq->lpb_size / 8; 248 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ 249 aq->rq.ena = ena; 250 251 if (rq->spb_ena) { 252 uint32_t spb_sizem1; 253 254 aq->rq.spb_ena = 1; 255 aq->rq.spb_aura = 256 roc_npa_aura_handle_to_aura(rq->spb_aura_handle); 257 258 if (rq->spb_size & 0x7 || 259 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE) 260 return -EINVAL; 261 262 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */ 263 spb_sizem1 -= 1; /* Expressed in size minus one */ 264 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F; 265 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7; 266 } else { 267 aq->rq.spb_ena = 0; 268 } 269 270 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ 271 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ 272 aq->rq.rq_int_ena = 0; 273 /* Many to one reduction */ 274 aq->rq.qint_idx = rq->qid % qints; 275 aq->rq.xqe_drop_ena = 1; 276 277 /* If RED enabled, then fill enable for all cases */ 278 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 279 aq->rq.spb_pool_pass = rq->spb_red_pass; 280 aq->rq.lpb_pool_pass = rq->red_pass; 281 aq->rq.wqe_pool_pass = rq->red_pass; 282 aq->rq.xqe_pass = rq->red_pass; 283 284 aq->rq.spb_pool_drop = rq->spb_red_drop; 285 aq->rq.lpb_pool_drop = rq->red_drop; 286 aq->rq.wqe_pool_drop = rq->red_drop; 287 aq->rq.xqe_drop = rq->red_drop; 288 } 289 290 if (cfg) { 291 if (rq->sso_ena) { 292 /* SSO mode */ 293 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 294 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt; 295 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp; 296 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd; 297 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip; 298 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching; 299 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 300 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 301 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 302 if (rq->vwqe_ena) { 303 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena; 304 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip; 305 aq->rq_mask.max_vsize_exp = 306 ~aq->rq_mask.max_vsize_exp; 307 aq->rq_mask.vtime_wait = 308 ~aq->rq_mask.vtime_wait; 309 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura; 310 } 311 } else { 312 /* CQ mode */ 313 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; 314 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; 315 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; 316 aq->rq_mask.ltag = ~aq->rq_mask.ltag; 317 aq->rq_mask.cq = ~aq->rq_mask.cq; 318 } 319 320 if (rq->ipsech_ena) 321 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena; 322 323 if (rq->spb_ena) { 324 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura; 325 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1; 326 aq->rq_mask.spb_high_sizem1 = 327 ~aq->rq_mask.spb_high_sizem1; 328 } 329 330 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena; 331 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura; 332 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip; 333 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip; 334 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw; 335 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1; 336 aq->rq_mask.ena = ~aq->rq_mask.ena; 337 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching; 338 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size; 339 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena; 340 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx; 341 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; 342 343 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { 344 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; 345 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; 346 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass; 347 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass; 348 349 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; 350 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; 351 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop; 352 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop; 353 } 354 } 355 356 return 0; 357 } 358 359 int 360 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 361 { 362 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 363 struct mbox *mbox = (&nix->dev)->mbox; 364 bool is_cn9k = roc_model_is_cn9k(); 365 struct dev *dev = &nix->dev; 366 int rc; 367 368 if (roc_nix == NULL || rq == NULL) 369 return NIX_ERR_PARAM; 370 371 if (rq->qid >= nix->nb_rx_queues) 372 return NIX_ERR_QUEUE_INVALID_RANGE; 373 374 rq->roc_nix = roc_nix; 375 376 if (is_cn9k) 377 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena); 378 else 379 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena); 380 381 if (rc) 382 return rc; 383 384 rc = mbox_process(mbox); 385 if (rc) 386 return rc; 387 388 return nix_tel_node_add_rq(rq); 389 } 390 391 int 392 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) 393 { 394 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 395 struct mbox *mbox = (&nix->dev)->mbox; 396 bool is_cn9k = roc_model_is_cn9k(); 397 struct dev *dev = &nix->dev; 398 int rc; 399 400 if (roc_nix == NULL || rq == NULL) 401 return NIX_ERR_PARAM; 402 403 if (rq->qid >= nix->nb_rx_queues) 404 return NIX_ERR_QUEUE_INVALID_RANGE; 405 406 rq->roc_nix = roc_nix; 407 408 if (is_cn9k) 409 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena); 410 else 411 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena); 412 413 if (rc) 414 return rc; 415 416 rc = mbox_process(mbox); 417 if (rc) 418 return rc; 419 420 return nix_tel_node_add_rq(rq); 421 } 422 423 int 424 roc_nix_rq_fini(struct roc_nix_rq *rq) 425 { 426 /* Disabling RQ is sufficient */ 427 return roc_nix_rq_ena_dis(rq, false); 428 } 429 430 int 431 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) 432 { 433 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 434 struct mbox *mbox = (&nix->dev)->mbox; 435 volatile struct nix_cq_ctx_s *cq_ctx; 436 enum nix_q_size qsize; 437 size_t desc_sz; 438 int rc; 439 440 if (cq == NULL) 441 return NIX_ERR_PARAM; 442 443 if (cq->qid >= nix->nb_rx_queues) 444 return NIX_ERR_QUEUE_INVALID_RANGE; 445 446 qsize = nix_qsize_clampup(cq->nb_desc); 447 cq->nb_desc = nix_qsize_to_val(qsize); 448 cq->qmask = cq->nb_desc - 1; 449 cq->door = nix->base + NIX_LF_CQ_OP_DOOR; 450 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); 451 cq->wdata = (uint64_t)cq->qid << 32; 452 cq->roc_nix = roc_nix; 453 454 /* CQE of W16 */ 455 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ; 456 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN); 457 if (cq->desc_base == NULL) { 458 rc = NIX_ERR_NO_MEM; 459 goto fail; 460 } 461 462 if (roc_model_is_cn9k()) { 463 struct nix_aq_enq_req *aq; 464 465 aq = mbox_alloc_msg_nix_aq_enq(mbox); 466 aq->qidx = cq->qid; 467 aq->ctype = NIX_AQ_CTYPE_CQ; 468 aq->op = NIX_AQ_INSTOP_INIT; 469 cq_ctx = &aq->cq; 470 } else { 471 struct nix_cn10k_aq_enq_req *aq; 472 473 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 474 aq->qidx = cq->qid; 475 aq->ctype = NIX_AQ_CTYPE_CQ; 476 aq->op = NIX_AQ_INSTOP_INIT; 477 cq_ctx = &aq->cq; 478 } 479 480 cq_ctx->ena = 1; 481 cq_ctx->caching = 1; 482 cq_ctx->qsize = qsize; 483 cq_ctx->base = (uint64_t)cq->desc_base; 484 cq_ctx->avg_level = 0xff; 485 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); 486 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); 487 488 /* Many to one reduction */ 489 cq_ctx->qint_idx = cq->qid % nix->qints; 490 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ 491 cq_ctx->cint_idx = cq->qid; 492 493 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) { 494 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; 495 uint16_t min_rx_drop; 496 497 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc); 498 cq_ctx->drop = min_rx_drop; 499 cq_ctx->drop_ena = 1; 500 cq->drop_thresh = min_rx_drop; 501 } else { 502 cq->drop_thresh = NIX_CQ_THRESH_LEVEL; 503 /* Drop processing or red drop cannot be enabled due to 504 * due to packets coming for second pass from CPT. 505 */ 506 if (!roc_nix_inl_inb_is_enabled(roc_nix)) { 507 cq_ctx->drop = cq->drop_thresh; 508 cq_ctx->drop_ena = 1; 509 } 510 } 511 512 /* TX pause frames enable flow ctrl on RX side */ 513 if (nix->tx_pause) { 514 /* Single BPID is allocated for all rx channels for now */ 515 cq_ctx->bpid = nix->bpid[0]; 516 cq_ctx->bp = cq->drop_thresh; 517 cq_ctx->bp_ena = 1; 518 } 519 520 rc = mbox_process(mbox); 521 if (rc) 522 goto free_mem; 523 524 return nix_tel_node_add_cq(cq); 525 526 free_mem: 527 plt_free(cq->desc_base); 528 fail: 529 return rc; 530 } 531 532 int 533 roc_nix_cq_fini(struct roc_nix_cq *cq) 534 { 535 struct mbox *mbox; 536 struct nix *nix; 537 int rc; 538 539 if (cq == NULL) 540 return NIX_ERR_PARAM; 541 542 nix = roc_nix_to_nix_priv(cq->roc_nix); 543 mbox = (&nix->dev)->mbox; 544 545 /* Disable CQ */ 546 if (roc_model_is_cn9k()) { 547 struct nix_aq_enq_req *aq; 548 549 aq = mbox_alloc_msg_nix_aq_enq(mbox); 550 aq->qidx = cq->qid; 551 aq->ctype = NIX_AQ_CTYPE_CQ; 552 aq->op = NIX_AQ_INSTOP_WRITE; 553 aq->cq.ena = 0; 554 aq->cq.bp_ena = 0; 555 aq->cq_mask.ena = ~aq->cq_mask.ena; 556 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 557 } else { 558 struct nix_cn10k_aq_enq_req *aq; 559 560 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 561 aq->qidx = cq->qid; 562 aq->ctype = NIX_AQ_CTYPE_CQ; 563 aq->op = NIX_AQ_INSTOP_WRITE; 564 aq->cq.ena = 0; 565 aq->cq.bp_ena = 0; 566 aq->cq_mask.ena = ~aq->cq_mask.ena; 567 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena; 568 } 569 570 rc = mbox_process(mbox); 571 if (rc) 572 return rc; 573 574 plt_free(cq->desc_base); 575 return 0; 576 } 577 578 static int 579 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 580 { 581 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 582 uint16_t sqes_per_sqb, count, nb_sqb_bufs; 583 struct npa_pool_s pool; 584 struct npa_aura_s aura; 585 uint64_t blk_sz; 586 uint64_t iova; 587 int rc; 588 589 blk_sz = nix->sqb_size; 590 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16) 591 sqes_per_sqb = (blk_sz / 8) / 16; 592 else 593 sqes_per_sqb = (blk_sz / 8) / 8; 594 595 sq->nb_desc = PLT_MAX(256U, sq->nb_desc); 596 nb_sqb_bufs = sq->nb_desc / sqes_per_sqb; 597 nb_sqb_bufs += NIX_SQB_LIST_SPACE; 598 /* Clamp up the SQB count */ 599 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count, 600 (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs)); 601 602 sq->nb_sqb_bufs = nb_sqb_bufs; 603 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb); 604 sq->nb_sqb_bufs_adj = 605 nb_sqb_bufs - 606 (PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb); 607 sq->nb_sqb_bufs_adj = 608 (sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100; 609 610 /* Explicitly set nat_align alone as by default pool is with both 611 * nat_align and buf_offset = 1 which we don't want for SQB. 612 */ 613 memset(&pool, 0, sizeof(struct npa_pool_s)); 614 pool.nat_align = 1; 615 616 memset(&aura, 0, sizeof(aura)); 617 aura.fc_ena = 1; 618 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0()) 619 aura.fc_stype = 0x0; /* STF */ 620 else 621 aura.fc_stype = 0x3; /* STSTP */ 622 aura.fc_addr = (uint64_t)sq->fc; 623 aura.fc_hyst_bits = 0; /* Store count on all updates */ 624 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura, 625 &pool); 626 if (rc) 627 goto fail; 628 629 sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz); 630 if (sq->sqe_mem == NULL) { 631 rc = NIX_ERR_NO_MEM; 632 goto nomem; 633 } 634 635 /* Fill the initial buffers */ 636 iova = (uint64_t)sq->sqe_mem; 637 for (count = 0; count < NIX_MAX_SQB; count++) { 638 roc_npa_aura_op_free(sq->aura_handle, 0, iova); 639 iova += blk_sz; 640 } 641 roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova); 642 roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs); 643 sq->aura_sqb_bufs = NIX_MAX_SQB; 644 645 return rc; 646 nomem: 647 roc_npa_pool_destroy(sq->aura_handle); 648 fail: 649 return rc; 650 } 651 652 static void 653 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, 654 uint16_t smq) 655 { 656 struct mbox *mbox = (&nix->dev)->mbox; 657 struct nix_aq_enq_req *aq; 658 659 aq = mbox_alloc_msg_nix_aq_enq(mbox); 660 aq->qidx = sq->qid; 661 aq->ctype = NIX_AQ_CTYPE_SQ; 662 aq->op = NIX_AQ_INSTOP_INIT; 663 aq->sq.max_sqe_size = sq->max_sqe_sz; 664 665 aq->sq.max_sqe_size = sq->max_sqe_sz; 666 aq->sq.smq = smq; 667 aq->sq.smq_rr_quantum = rr_quantum; 668 aq->sq.default_chan = nix->tx_chan_base; 669 aq->sq.sqe_stype = NIX_STYPE_STF; 670 aq->sq.ena = 1; 671 aq->sq.sso_ena = !!sq->sso_ena; 672 aq->sq.cq_ena = !!sq->cq_ena; 673 aq->sq.cq = sq->cqid; 674 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 675 aq->sq.sqe_stype = NIX_STYPE_STP; 676 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 677 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 678 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 679 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 680 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 681 682 /* Many to one reduction */ 683 aq->sq.qint_idx = sq->qid % nix->qints; 684 } 685 686 static int 687 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) 688 { 689 struct mbox *mbox = (&nix->dev)->mbox; 690 struct nix_aq_enq_rsp *rsp; 691 struct nix_aq_enq_req *aq; 692 uint16_t sqes_per_sqb; 693 void *sqb_buf; 694 int rc, count; 695 696 aq = mbox_alloc_msg_nix_aq_enq(mbox); 697 aq->qidx = sq->qid; 698 aq->ctype = NIX_AQ_CTYPE_SQ; 699 aq->op = NIX_AQ_INSTOP_READ; 700 rc = mbox_process_msg(mbox, (void *)&rsp); 701 if (rc) 702 return rc; 703 704 /* Check if sq is already cleaned up */ 705 if (!rsp->sq.ena) 706 return 0; 707 708 /* Disable sq */ 709 aq = mbox_alloc_msg_nix_aq_enq(mbox); 710 aq->qidx = sq->qid; 711 aq->ctype = NIX_AQ_CTYPE_SQ; 712 aq->op = NIX_AQ_INSTOP_WRITE; 713 aq->sq_mask.ena = ~aq->sq_mask.ena; 714 aq->sq.ena = 0; 715 rc = mbox_process(mbox); 716 if (rc) 717 return rc; 718 719 /* Read SQ and free sqb's */ 720 aq = mbox_alloc_msg_nix_aq_enq(mbox); 721 aq->qidx = sq->qid; 722 aq->ctype = NIX_AQ_CTYPE_SQ; 723 aq->op = NIX_AQ_INSTOP_READ; 724 rc = mbox_process_msg(mbox, (void *)&rsp); 725 if (rc) 726 return rc; 727 728 if (aq->sq.smq_pend) 729 plt_err("SQ has pending SQE's"); 730 731 count = aq->sq.sqb_count; 732 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 733 /* Free SQB's that are used */ 734 sqb_buf = (void *)rsp->sq.head_sqb; 735 while (count) { 736 void *next_sqb; 737 738 next_sqb = *(void **)((uintptr_t)sqb_buf + 739 (uint32_t)((sqes_per_sqb - 1) * 740 sq->max_sqe_sz)); 741 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 742 sqb_buf = next_sqb; 743 count--; 744 } 745 746 /* Free next to use sqb */ 747 if (rsp->sq.next_sqb) 748 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 749 return 0; 750 } 751 752 static void 753 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, 754 uint16_t smq) 755 { 756 struct mbox *mbox = (&nix->dev)->mbox; 757 struct nix_cn10k_aq_enq_req *aq; 758 759 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 760 aq->qidx = sq->qid; 761 aq->ctype = NIX_AQ_CTYPE_SQ; 762 aq->op = NIX_AQ_INSTOP_INIT; 763 aq->sq.max_sqe_size = sq->max_sqe_sz; 764 765 aq->sq.max_sqe_size = sq->max_sqe_sz; 766 aq->sq.smq = smq; 767 aq->sq.smq_rr_weight = rr_quantum; 768 aq->sq.default_chan = nix->tx_chan_base; 769 aq->sq.sqe_stype = NIX_STYPE_STF; 770 aq->sq.ena = 1; 771 aq->sq.sso_ena = !!sq->sso_ena; 772 aq->sq.cq_ena = !!sq->cq_ena; 773 aq->sq.cq = sq->cqid; 774 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) 775 aq->sq.sqe_stype = NIX_STYPE_STP; 776 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); 777 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); 778 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); 779 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); 780 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); 781 782 /* Many to one reduction */ 783 aq->sq.qint_idx = sq->qid % nix->qints; 784 } 785 786 static int 787 sq_fini(struct nix *nix, struct roc_nix_sq *sq) 788 { 789 struct mbox *mbox = (&nix->dev)->mbox; 790 struct nix_cn10k_aq_enq_rsp *rsp; 791 struct nix_cn10k_aq_enq_req *aq; 792 uint16_t sqes_per_sqb; 793 void *sqb_buf; 794 int rc, count; 795 796 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 797 aq->qidx = sq->qid; 798 aq->ctype = NIX_AQ_CTYPE_SQ; 799 aq->op = NIX_AQ_INSTOP_READ; 800 rc = mbox_process_msg(mbox, (void *)&rsp); 801 if (rc) 802 return rc; 803 804 /* Check if sq is already cleaned up */ 805 if (!rsp->sq.ena) 806 return 0; 807 808 /* Disable sq */ 809 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 810 aq->qidx = sq->qid; 811 aq->ctype = NIX_AQ_CTYPE_SQ; 812 aq->op = NIX_AQ_INSTOP_WRITE; 813 aq->sq_mask.ena = ~aq->sq_mask.ena; 814 aq->sq.ena = 0; 815 rc = mbox_process(mbox); 816 if (rc) 817 return rc; 818 819 /* Read SQ and free sqb's */ 820 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); 821 aq->qidx = sq->qid; 822 aq->ctype = NIX_AQ_CTYPE_SQ; 823 aq->op = NIX_AQ_INSTOP_READ; 824 rc = mbox_process_msg(mbox, (void *)&rsp); 825 if (rc) 826 return rc; 827 828 if (aq->sq.smq_pend) 829 plt_err("SQ has pending SQE's"); 830 831 count = aq->sq.sqb_count; 832 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; 833 /* Free SQB's that are used */ 834 sqb_buf = (void *)rsp->sq.head_sqb; 835 while (count) { 836 void *next_sqb; 837 838 next_sqb = *(void **)((uintptr_t)sqb_buf + 839 (uint32_t)((sqes_per_sqb - 1) * 840 sq->max_sqe_sz)); 841 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf); 842 sqb_buf = next_sqb; 843 count--; 844 } 845 846 /* Free next to use sqb */ 847 if (rsp->sq.next_sqb) 848 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb); 849 return 0; 850 } 851 852 int 853 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq) 854 { 855 struct nix *nix = roc_nix_to_nix_priv(roc_nix); 856 struct mbox *mbox = (&nix->dev)->mbox; 857 uint16_t qid, smq = UINT16_MAX; 858 uint32_t rr_quantum = 0; 859 int rc; 860 861 if (sq == NULL) 862 return NIX_ERR_PARAM; 863 864 qid = sq->qid; 865 if (qid >= nix->nb_tx_queues) 866 return NIX_ERR_QUEUE_INVALID_RANGE; 867 868 sq->roc_nix = roc_nix; 869 /* 870 * Allocate memory for flow control updates from HW. 871 * Alloc one cache line, so that fits all FC_STYPE modes. 872 */ 873 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN); 874 if (sq->fc == NULL) { 875 rc = NIX_ERR_NO_MEM; 876 goto fail; 877 } 878 879 rc = sqb_pool_populate(roc_nix, sq); 880 if (rc) 881 goto nomem; 882 883 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq); 884 if (rc) { 885 rc = NIX_ERR_TM_LEAF_NODE_GET; 886 goto nomem; 887 } 888 889 /* Init SQ context */ 890 if (roc_model_is_cn9k()) 891 sq_cn9k_init(nix, sq, rr_quantum, smq); 892 else 893 sq_init(nix, sq, rr_quantum, smq); 894 895 rc = mbox_process(mbox); 896 if (rc) 897 goto nomem; 898 899 nix->sqs[qid] = sq; 900 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0); 901 /* Evenly distribute LMT slot for each sq */ 902 if (roc_model_is_cn9k()) { 903 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */ 904 sq->lmt_addr = (void *)(nix->lmt_base + 905 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12)); 906 } 907 908 rc = nix_tel_node_add_sq(sq); 909 return rc; 910 nomem: 911 plt_free(sq->fc); 912 fail: 913 return rc; 914 } 915 916 int 917 roc_nix_sq_fini(struct roc_nix_sq *sq) 918 { 919 struct nix *nix; 920 struct mbox *mbox; 921 struct ndc_sync_op *ndc_req; 922 uint16_t qid; 923 int rc = 0; 924 925 if (sq == NULL) 926 return NIX_ERR_PARAM; 927 928 nix = roc_nix_to_nix_priv(sq->roc_nix); 929 mbox = (&nix->dev)->mbox; 930 931 qid = sq->qid; 932 933 rc = nix_tm_sq_flush_pre(sq); 934 935 /* Release SQ context */ 936 if (roc_model_is_cn9k()) 937 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 938 else 939 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq); 940 941 /* Sync NDC-NIX-TX for LF */ 942 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox); 943 if (ndc_req == NULL) 944 return -ENOSPC; 945 ndc_req->nix_lf_tx_sync = 1; 946 if (mbox_process(mbox)) 947 rc |= NIX_ERR_NDC_SYNC; 948 949 rc |= nix_tm_sq_flush_post(sq); 950 951 /* Restore limit to max SQB count that the pool was created 952 * for aura drain to succeed. 953 */ 954 roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB); 955 rc |= roc_npa_pool_destroy(sq->aura_handle); 956 plt_free(sq->fc); 957 plt_free(sq->sqe_mem); 958 nix->sqs[qid] = NULL; 959 960 return rc; 961 } 962