1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Chelsio Communications. 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <unistd.h> 12 #include <stdarg.h> 13 #include <inttypes.h> 14 #include <netinet/in.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_cycles.h> 19 #include <rte_interrupts.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_pci.h> 23 #include <rte_atomic.h> 24 #include <rte_branch_prediction.h> 25 #include <rte_memory.h> 26 #include <rte_memzone.h> 27 #include <rte_tailq.h> 28 #include <rte_eal.h> 29 #include <rte_alarm.h> 30 #include <rte_ether.h> 31 #include <ethdev_driver.h> 32 #include <rte_malloc.h> 33 #include <rte_random.h> 34 #include <rte_dev.h> 35 36 #include "base/common.h" 37 #include "base/t4_regs.h" 38 #include "base/t4_msg.h" 39 #include "cxgbe.h" 40 41 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, 42 struct sge_eth_txq *txq); 43 44 /* 45 * Max number of Rx buffers we replenish at a time. 46 */ 47 #define MAX_RX_REFILL 64U 48 49 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) 50 51 /* 52 * Max Tx descriptor space we allow for an Ethernet packet to be inlined 53 * into a WR. 54 */ 55 #define MAX_IMM_TX_PKT_LEN 256 56 57 /* 58 * Max size of a WR sent through a control Tx queue. 59 */ 60 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 61 62 /* 63 * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet 64 * per mbuf buffer). We currently only support two sizes for 1500- and 65 * 9000-byte MTUs. We could easily support more but there doesn't seem to be 66 * much need for that ... 67 */ 68 #define FL_MTU_SMALL 1500 69 #define FL_MTU_LARGE 9000 70 71 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, 72 unsigned int mtu) 73 { 74 struct sge *s = &adapter->sge; 75 76 return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu, 77 s->fl_align); 78 } 79 80 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) 81 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) 82 83 /* 84 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses 85 * these to specify the buffer size as an index into the SGE Free List Buffer 86 * Size register array. We also use bit 4, when the buffer has been unmapped 87 * for DMA, but this is of course never sent to the hardware and is only used 88 * to prevent double unmappings. All of the above requires that the Free List 89 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are 90 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal 91 * Free List Buffer alignment is 32 bytes, this works out for us ... 92 */ 93 enum { 94 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ 95 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ 96 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ 97 98 /* 99 * XXX We shouldn't depend on being able to use these indices. 100 * XXX Especially when some other Master PF has initialized the 101 * XXX adapter or we use the Firmware Configuration File. We 102 * XXX should really search through the Host Buffer Size register 103 * XXX array for the appropriately sized buffer indices. 104 */ 105 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ 106 RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */ 107 108 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ 109 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ 110 }; 111 112 /** 113 * txq_avail - return the number of available slots in a Tx queue 114 * @q: the Tx queue 115 * 116 * Returns the number of descriptors in a Tx queue available to write new 117 * packets. 118 */ 119 static inline unsigned int txq_avail(const struct sge_txq *q) 120 { 121 return q->size - 1 - q->in_use; 122 } 123 124 static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr) 125 { 126 struct rte_mbuf *m = mbuf; 127 128 for (; m; m = m->next, addr++) { 129 *addr = m->buf_iova + rte_pktmbuf_headroom(m); 130 if (*addr == 0) 131 goto out_err; 132 } 133 return 0; 134 135 out_err: 136 return -ENOMEM; 137 } 138 139 /** 140 * free_tx_desc - reclaims Tx descriptors and their buffers 141 * @q: the Tx queue to reclaim descriptors from 142 * @n: the number of descriptors to reclaim 143 * 144 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 145 * Tx buffers. Called with the Tx queue lock held. 146 */ 147 static void free_tx_desc(struct sge_txq *q, unsigned int n) 148 { 149 struct tx_sw_desc *d; 150 unsigned int cidx = 0; 151 152 d = &q->sdesc[cidx]; 153 while (n--) { 154 if (d->mbuf) { /* an SGL is present */ 155 rte_pktmbuf_free(d->mbuf); 156 d->mbuf = NULL; 157 } 158 if (d->coalesce.idx) { 159 int i; 160 161 for (i = 0; i < d->coalesce.idx; i++) { 162 rte_pktmbuf_free(d->coalesce.mbuf[i]); 163 d->coalesce.mbuf[i] = NULL; 164 } 165 d->coalesce.idx = 0; 166 } 167 ++d; 168 if (++cidx == q->size) { 169 cidx = 0; 170 d = q->sdesc; 171 } 172 RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool); 173 } 174 } 175 176 static void reclaim_tx_desc(struct sge_txq *q, unsigned int n) 177 { 178 struct tx_sw_desc *d; 179 unsigned int cidx = q->cidx; 180 181 d = &q->sdesc[cidx]; 182 while (n--) { 183 if (d->mbuf) { /* an SGL is present */ 184 rte_pktmbuf_free(d->mbuf); 185 d->mbuf = NULL; 186 } 187 ++d; 188 if (++cidx == q->size) { 189 cidx = 0; 190 d = q->sdesc; 191 } 192 } 193 q->cidx = cidx; 194 } 195 196 /** 197 * fl_cap - return the capacity of a free-buffer list 198 * @fl: the FL 199 * 200 * Returns the capacity of a free-buffer list. The capacity is less than 201 * the size because one descriptor needs to be left unpopulated, otherwise 202 * HW will think the FL is empty. 203 */ 204 static inline unsigned int fl_cap(const struct sge_fl *fl) 205 { 206 return fl->size - 8; /* 1 descriptor = 8 buffers */ 207 } 208 209 /** 210 * fl_starving - return whether a Free List is starving. 211 * @adapter: pointer to the adapter 212 * @fl: the Free List 213 * 214 * Tests specified Free List to see whether the number of buffers 215 * available to the hardware has falled below our "starvation" 216 * threshold. 217 */ 218 static inline bool fl_starving(const struct adapter *adapter, 219 const struct sge_fl *fl) 220 { 221 const struct sge *s = &adapter->sge; 222 223 return fl->avail - fl->pend_cred <= s->fl_starve_thres; 224 } 225 226 static inline unsigned int get_buf_size(struct adapter *adapter, 227 const struct rx_sw_desc *d) 228 { 229 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; 230 unsigned int buf_size = 0; 231 232 switch (rx_buf_size_idx) { 233 case RX_SMALL_MTU_BUF: 234 buf_size = FL_MTU_SMALL_BUFSIZE(adapter); 235 break; 236 237 case RX_LARGE_MTU_BUF: 238 buf_size = FL_MTU_LARGE_BUFSIZE(adapter); 239 break; 240 241 default: 242 BUG_ON(1); 243 /* NOT REACHED */ 244 } 245 246 return buf_size; 247 } 248 249 /** 250 * free_rx_bufs - free the Rx buffers on an SGE free list 251 * @q: the SGE free list to free buffers from 252 * @n: how many buffers to free 253 * 254 * Release the next @n buffers on an SGE free-buffer Rx queue. The 255 * buffers must be made inaccessible to HW before calling this function. 256 */ 257 static void free_rx_bufs(struct sge_fl *q, int n) 258 { 259 unsigned int cidx = q->cidx; 260 struct rx_sw_desc *d; 261 262 d = &q->sdesc[cidx]; 263 while (n--) { 264 if (d->buf) { 265 rte_pktmbuf_free(d->buf); 266 d->buf = NULL; 267 } 268 ++d; 269 if (++cidx == q->size) { 270 cidx = 0; 271 d = q->sdesc; 272 } 273 q->avail--; 274 } 275 q->cidx = cidx; 276 } 277 278 /** 279 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list 280 * @q: the SGE free list 281 * 282 * Unmap the current buffer on an SGE free-buffer Rx queue. The 283 * buffer must be made inaccessible to HW before calling this function. 284 * 285 * This is similar to @free_rx_bufs above but does not free the buffer. 286 * Do note that the FL still loses any further access to the buffer. 287 */ 288 static void unmap_rx_buf(struct sge_fl *q) 289 { 290 if (++q->cidx == q->size) 291 q->cidx = 0; 292 q->avail--; 293 } 294 295 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 296 { 297 if (q->pend_cred >= 64) { 298 u32 val = adap->params.arch.sge_fl_db; 299 300 if (is_t4(adap->params.chip)) 301 val |= V_PIDX(q->pend_cred / 8); 302 else 303 val |= V_PIDX_T5(q->pend_cred / 8); 304 305 /* 306 * Make sure all memory writes to the Free List queue are 307 * committed before we tell the hardware about them. 308 */ 309 wmb(); 310 311 /* 312 * If we don't have access to the new User Doorbell (T5+), use 313 * the old doorbell mechanism; otherwise use the new BAR2 314 * mechanism. 315 */ 316 if (unlikely(!q->bar2_addr)) { 317 u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) : 318 T4VF_SGE_BASE_ADDR + 319 A_SGE_VF_KDOORBELL; 320 321 t4_write_reg_relaxed(adap, reg, 322 val | V_QID(q->cntxt_id)); 323 } else { 324 writel_relaxed(val | V_QID(q->bar2_qid), 325 (void *)((uintptr_t)q->bar2_addr + 326 SGE_UDB_KDOORBELL)); 327 328 /* 329 * This Write memory Barrier will force the write to 330 * the User Doorbell area to be flushed. 331 */ 332 wmb(); 333 } 334 q->pend_cred &= 7; 335 } 336 } 337 338 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf, 339 dma_addr_t mapping) 340 { 341 sd->buf = buf; 342 sd->dma_addr = mapping; /* includes size low bits */ 343 } 344 345 /** 346 * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs 347 * @adap: the adapter 348 * @q: the ring to refill 349 * @n: the number of new buffers to allocate 350 * 351 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 352 * allocated with the supplied gfp flags. The caller must assure that 353 * @n does not exceed the queue's capacity. If afterwards the queue is 354 * found critically low mark it as starving in the bitmap of starving FLs. 355 * 356 * Returns the number of buffers allocated. 357 */ 358 static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q, 359 int n) 360 { 361 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl); 362 unsigned int cred = q->avail; 363 __be64 *d = &q->desc[q->pidx]; 364 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 365 unsigned int buf_size_idx = RX_SMALL_MTU_BUF; 366 struct rte_mbuf *buf_bulk[n]; 367 int ret, i; 368 struct rte_pktmbuf_pool_private *mbp_priv; 369 u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads & 370 DEV_RX_OFFLOAD_JUMBO_FRAME; 371 372 /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */ 373 mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool); 374 if (jumbo_en && 375 ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)) 376 buf_size_idx = RX_LARGE_MTU_BUF; 377 378 ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n); 379 if (unlikely(ret != 0)) { 380 dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n", 381 __func__); 382 q->alloc_failed++; 383 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++; 384 goto out; 385 } 386 387 for (i = 0; i < n; i++) { 388 struct rte_mbuf *mbuf = buf_bulk[i]; 389 dma_addr_t mapping; 390 391 if (!mbuf) { 392 dev_debug(adap, "%s: mbuf alloc failed\n", __func__); 393 q->alloc_failed++; 394 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++; 395 goto out; 396 } 397 398 rte_mbuf_refcnt_set(mbuf, 1); 399 mbuf->data_off = 400 (uint16_t)((char *) 401 RTE_PTR_ALIGN((char *)mbuf->buf_addr + 402 RTE_PKTMBUF_HEADROOM, 403 adap->sge.fl_align) - 404 (char *)mbuf->buf_addr); 405 mbuf->next = NULL; 406 mbuf->nb_segs = 1; 407 mbuf->port = rxq->rspq.port_id; 408 409 mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova + 410 mbuf->data_off, 411 adap->sge.fl_align); 412 mapping |= buf_size_idx; 413 *d++ = cpu_to_be64(mapping); 414 set_rx_sw_desc(sd, mbuf, mapping); 415 sd++; 416 417 q->avail++; 418 if (++q->pidx == q->size) { 419 q->pidx = 0; 420 sd = q->sdesc; 421 d = q->desc; 422 } 423 } 424 425 out: cred = q->avail - cred; 426 q->pend_cred += cred; 427 ring_fl_db(adap, q); 428 429 if (unlikely(fl_starving(adap, q))) { 430 /* 431 * Make sure data has been written to free list 432 */ 433 wmb(); 434 q->low++; 435 } 436 437 return cred; 438 } 439 440 /** 441 * refill_fl - refill an SGE Rx buffer ring with mbufs 442 * @adap: the adapter 443 * @q: the ring to refill 444 * @n: the number of new buffers to allocate 445 * 446 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, 447 * allocated with the supplied gfp flags. The caller must assure that 448 * @n does not exceed the queue's capacity. Returns the number of buffers 449 * allocated. 450 */ 451 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n) 452 { 453 return refill_fl_usembufs(adap, q, n); 454 } 455 456 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 457 { 458 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail)); 459 } 460 461 /* 462 * Return the number of reclaimable descriptors in a Tx queue. 463 */ 464 static inline int reclaimable(const struct sge_txq *q) 465 { 466 int hw_cidx = ntohs(q->stat->cidx); 467 468 hw_cidx -= q->cidx; 469 if (hw_cidx < 0) 470 return hw_cidx + q->size; 471 return hw_cidx; 472 } 473 474 /** 475 * reclaim_completed_tx - reclaims completed Tx descriptors 476 * @q: the Tx queue to reclaim completed descriptors from 477 * 478 * Reclaims Tx descriptors that the SGE has indicated it has processed. 479 */ 480 void reclaim_completed_tx(struct sge_txq *q) 481 { 482 unsigned int avail = reclaimable(q); 483 484 do { 485 /* reclaim as much as possible */ 486 reclaim_tx_desc(q, avail); 487 q->in_use -= avail; 488 avail = reclaimable(q); 489 } while (avail); 490 } 491 492 /** 493 * sgl_len - calculates the size of an SGL of the given capacity 494 * @n: the number of SGL entries 495 * 496 * Calculates the number of flits needed for a scatter/gather list that 497 * can hold the given number of entries. 498 */ 499 static inline unsigned int sgl_len(unsigned int n) 500 { 501 /* 502 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA 503 * addresses. The DSGL Work Request starts off with a 32-bit DSGL 504 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, 505 * repeated sequences of { Length[i], Length[i+1], Address[i], 506 * Address[i+1] } (this ensures that all addresses are on 64-bit 507 * boundaries). If N is even, then Length[N+1] should be set to 0 and 508 * Address[N+1] is omitted. 509 * 510 * The following calculation incorporates all of the above. It's 511 * somewhat hard to follow but, briefly: the "+2" accounts for the 512 * first two flits which include the DSGL header, Length0 and 513 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 514 * flits for every pair of the remaining N) +1 if (n-1) is odd; and 515 * finally the "+((n-1)&1)" adds the one remaining flit needed if 516 * (n-1) is odd ... 517 */ 518 n--; 519 return (3 * n) / 2 + (n & 1) + 2; 520 } 521 522 /** 523 * flits_to_desc - returns the num of Tx descriptors for the given flits 524 * @n: the number of flits 525 * 526 * Returns the number of Tx descriptors needed for the supplied number 527 * of flits. 528 */ 529 static inline unsigned int flits_to_desc(unsigned int n) 530 { 531 return DIV_ROUND_UP(n, 8); 532 } 533 534 /** 535 * is_eth_imm - can an Ethernet packet be sent as immediate data? 536 * @m: the packet 537 * 538 * Returns whether an Ethernet packet is small enough to fit as 539 * immediate data. Return value corresponds to the headroom required. 540 */ 541 static inline int is_eth_imm(const struct rte_mbuf *m) 542 { 543 unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ? 544 sizeof(struct cpl_tx_pkt_lso_core) : 0; 545 546 hdrlen += sizeof(struct cpl_tx_pkt); 547 if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen) 548 return hdrlen; 549 550 return 0; 551 } 552 553 /** 554 * calc_tx_flits - calculate the number of flits for a packet Tx WR 555 * @m: the packet 556 * @adap: adapter structure pointer 557 * 558 * Returns the number of flits needed for a Tx WR for the given Ethernet 559 * packet, including the needed WR and CPL headers. 560 */ 561 static inline unsigned int calc_tx_flits(const struct rte_mbuf *m, 562 struct adapter *adap) 563 { 564 size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) : 565 sizeof(struct fw_eth_tx_pkt_vm_wr); 566 unsigned int flits; 567 int hdrlen; 568 569 /* 570 * If the mbuf is small enough, we can pump it out as a work request 571 * with only immediate data. In that case we just have to have the 572 * TX Packet header plus the mbuf data in the Work Request. 573 */ 574 575 hdrlen = is_eth_imm(m); 576 if (hdrlen) 577 return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64)); 578 579 /* 580 * Otherwise, we're going to have to construct a Scatter gather list 581 * of the mbuf body and fragments. We also include the flits necessary 582 * for the TX Packet Work Request and CPL. We always have a firmware 583 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 584 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 585 * message or, if we're doing a Large Send Offload, an LSO CPL message 586 * with an embedded TX Packet Write CPL message. 587 */ 588 flits = sgl_len(m->nb_segs); 589 if (m->tso_segsz) 590 flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) + 591 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 592 else 593 flits += (wr_size + 594 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 595 return flits; 596 } 597 598 /** 599 * write_sgl - populate a scatter/gather list for a packet 600 * @mbuf: the packet 601 * @q: the Tx queue we are writing into 602 * @sgl: starting location for writing the SGL 603 * @end: points right after the end of the SGL 604 * @start: start offset into mbuf main-body data to include in the SGL 605 * @addr: address of mapped region 606 * 607 * Generates a scatter/gather list for the buffers that make up a packet. 608 * The caller must provide adequate space for the SGL that will be written. 609 * The SGL includes all of the packet's page fragments and the data in its 610 * main body except for the first @start bytes. @sgl must be 16-byte 611 * aligned and within a Tx descriptor with available space. @end points 612 * write after the end of the SGL but does not account for any potential 613 * wrap around, i.e., @end > @sgl. 614 */ 615 static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q, 616 struct ulptx_sgl *sgl, u64 *end, unsigned int start, 617 const dma_addr_t *addr) 618 { 619 unsigned int i, len; 620 struct ulptx_sge_pair *to; 621 struct rte_mbuf *m = mbuf; 622 unsigned int nfrags = m->nb_segs; 623 struct ulptx_sge_pair buf[nfrags / 2]; 624 625 len = m->data_len - start; 626 sgl->len0 = htonl(len); 627 sgl->addr0 = rte_cpu_to_be_64(addr[0]); 628 629 sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 630 V_ULPTX_NSGE(nfrags)); 631 if (likely(--nfrags == 0)) 632 return; 633 /* 634 * Most of the complexity below deals with the possibility we hit the 635 * end of the queue in the middle of writing the SGL. For this case 636 * only we create the SGL in a temporary buffer and then copy it. 637 */ 638 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 639 640 for (i = 0; nfrags >= 2; nfrags -= 2, to++) { 641 m = m->next; 642 to->len[0] = rte_cpu_to_be_32(m->data_len); 643 to->addr[0] = rte_cpu_to_be_64(addr[++i]); 644 m = m->next; 645 to->len[1] = rte_cpu_to_be_32(m->data_len); 646 to->addr[1] = rte_cpu_to_be_64(addr[++i]); 647 } 648 if (nfrags) { 649 m = m->next; 650 to->len[0] = rte_cpu_to_be_32(m->data_len); 651 to->len[1] = rte_cpu_to_be_32(0); 652 to->addr[0] = rte_cpu_to_be_64(addr[i + 1]); 653 } 654 if (unlikely((u8 *)end > (u8 *)q->stat)) { 655 unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat, 656 (u8 *)sgl->sge); 657 unsigned int part1; 658 659 if (likely(part0)) 660 memcpy(sgl->sge, buf, part0); 661 part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat); 662 rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1); 663 end = RTE_PTR_ADD((void *)q->desc, part1); 664 } 665 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 666 *(u64 *)end = 0; 667 } 668 669 #define IDXDIFF(head, tail, wrap) \ 670 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 671 672 #define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size) 673 #define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size) 674 675 #define PIDXDIFF(head, tail, wrap) \ 676 ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail)) 677 #define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size) 678 679 /** 680 * ring_tx_db - ring a Tx queue's doorbell 681 * @adap: the adapter 682 * @q: the Tx queue 683 * @n: number of new descriptors to give to HW 684 * 685 * Ring the doorbel for a Tx queue. 686 */ 687 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q) 688 { 689 int n = Q_IDXDIFF(q, dbidx); 690 691 /* 692 * Make sure that all writes to the TX Descriptors are committed 693 * before we tell the hardware about them. 694 */ 695 rte_wmb(); 696 697 /* 698 * If we don't have access to the new User Doorbell (T5+), use the old 699 * doorbell mechanism; otherwise use the new BAR2 mechanism. 700 */ 701 if (unlikely(!q->bar2_addr)) { 702 u32 val = V_PIDX(n); 703 704 /* 705 * For T4 we need to participate in the Doorbell Recovery 706 * mechanism. 707 */ 708 if (!q->db_disabled) 709 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), 710 V_QID(q->cntxt_id) | val); 711 else 712 q->db_pidx_inc += n; 713 q->db_pidx = q->pidx; 714 } else { 715 u32 val = V_PIDX_T5(n); 716 717 /* 718 * T4 and later chips share the same PIDX field offset within 719 * the doorbell, but T5 and later shrank the field in order to 720 * gain a bit for Doorbell Priority. The field was absurdly 721 * large in the first place (14 bits) so we just use the T5 722 * and later limits and warn if a Queue ID is too large. 723 */ 724 WARN_ON(val & F_DBPRIO); 725 726 writel(val | V_QID(q->bar2_qid), 727 (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL)); 728 729 /* 730 * This Write Memory Barrier will force the write to the User 731 * Doorbell area to be flushed. This is needed to prevent 732 * writes on different CPUs for the same queue from hitting 733 * the adapter out of order. This is required when some Work 734 * Requests take the Write Combine Gather Buffer path (user 735 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some 736 * take the traditional path where we simply increment the 737 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the 738 * hardware DMA read the actual Work Request. 739 */ 740 rte_wmb(); 741 } 742 q->dbidx = q->pidx; 743 } 744 745 /* 746 * Figure out what HW csum a packet wants and return the appropriate control 747 * bits. 748 */ 749 static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m) 750 { 751 int csum_type; 752 753 if (m->ol_flags & PKT_TX_IP_CKSUM) { 754 switch (m->ol_flags & PKT_TX_L4_MASK) { 755 case PKT_TX_TCP_CKSUM: 756 csum_type = TX_CSUM_TCPIP; 757 break; 758 case PKT_TX_UDP_CKSUM: 759 csum_type = TX_CSUM_UDPIP; 760 break; 761 default: 762 goto nocsum; 763 } 764 } else { 765 goto nocsum; 766 } 767 768 if (likely(csum_type >= TX_CSUM_TCPIP)) { 769 u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len); 770 int eth_hdr_len = m->l2_len; 771 772 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) 773 hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len); 774 else 775 hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len); 776 return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len; 777 } 778 nocsum: 779 /* 780 * unknown protocol, disable HW csum 781 * and hope a bad packet is detected 782 */ 783 return F_TXPKT_L4CSUM_DIS; 784 } 785 786 static inline void txq_advance(struct sge_txq *q, unsigned int n) 787 { 788 q->in_use += n; 789 q->pidx += n; 790 if (q->pidx >= q->size) 791 q->pidx -= q->size; 792 } 793 794 #define MAX_COALESCE_LEN 64000 795 796 static inline int wraps_around(struct sge_txq *q, int ndesc) 797 { 798 return (q->pidx + ndesc) > q->size ? 1 : 0; 799 } 800 801 static void tx_timer_cb(void *data) 802 { 803 struct adapter *adap = (struct adapter *)data; 804 struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; 805 int i; 806 unsigned int coal_idx; 807 808 /* monitor any pending tx */ 809 for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) { 810 if (t4_os_trylock(&txq->txq_lock)) { 811 coal_idx = txq->q.coalesce.idx; 812 if (coal_idx) { 813 if (coal_idx == txq->q.last_coal_idx && 814 txq->q.pidx == txq->q.last_pidx) { 815 ship_tx_pkt_coalesce_wr(adap, txq); 816 } else { 817 txq->q.last_coal_idx = coal_idx; 818 txq->q.last_pidx = txq->q.pidx; 819 } 820 } 821 t4_os_unlock(&txq->txq_lock); 822 } 823 } 824 rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); 825 } 826 827 /** 828 * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR 829 * @ adap: adapter structure 830 * @txq: tx queue 831 * 832 * writes the different fields of the pkts WR and sends it. 833 */ 834 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, 835 struct sge_eth_txq *txq) 836 { 837 struct fw_eth_tx_pkts_vm_wr *vmwr; 838 const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + 839 sizeof(vmwr->ethmacsrc) + 840 sizeof(vmwr->ethtype) + 841 sizeof(vmwr->vlantci)); 842 struct fw_eth_tx_pkts_wr *wr; 843 struct sge_txq *q = &txq->q; 844 unsigned int ndesc; 845 u32 wr_mid; 846 847 /* fill the pkts WR header */ 848 wr = (void *)&q->desc[q->pidx]; 849 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 850 vmwr = (void *)&q->desc[q->pidx]; 851 852 wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2)); 853 ndesc = flits_to_desc(q->coalesce.flits); 854 wr->equiq_to_len16 = htonl(wr_mid); 855 wr->plen = cpu_to_be16(q->coalesce.len); 856 wr->npkt = q->coalesce.idx; 857 wr->r3 = 0; 858 if (is_pf4(adap)) { 859 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 860 wr->type = q->coalesce.type; 861 } else { 862 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); 863 vmwr->r4 = 0; 864 memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst, 865 fw_hdr_copy_len); 866 } 867 868 /* zero out coalesce structure members */ 869 memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce)); 870 871 txq_advance(q, ndesc); 872 txq->stats.coal_wr++; 873 txq->stats.coal_pkts += wr->npkt; 874 875 if (Q_IDXDIFF(q, equeidx) >= q->size / 2) { 876 q->equeidx = q->pidx; 877 wr_mid |= F_FW_WR_EQUEQ; 878 wr->equiq_to_len16 = htonl(wr_mid); 879 } 880 ring_tx_db(adap, q); 881 } 882 883 /** 884 * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not 885 * @txq: tx queue where the mbuf is sent 886 * @mbuf: mbuf to be sent 887 * @nflits: return value for number of flits needed 888 * @adap: adapter structure 889 * 890 * This function decides if a packet should be coalesced or not. 891 */ 892 static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, 893 struct rte_mbuf *mbuf, 894 unsigned int *nflits, 895 struct adapter *adap) 896 { 897 struct fw_eth_tx_pkts_vm_wr *wr; 898 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + 899 sizeof(wr->ethmacsrc) + 900 sizeof(wr->ethtype) + 901 sizeof(wr->vlantci)); 902 struct sge_txq *q = &txq->q; 903 unsigned int flits, ndesc; 904 unsigned char type = 0; 905 int credits, wr_size; 906 907 /* use coal WR type 1 when no frags are present */ 908 type = (mbuf->nb_segs == 1) ? 1 : 0; 909 if (!is_pf4(adap)) { 910 if (!type) 911 return 0; 912 913 if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst, 914 rte_pktmbuf_mtod(mbuf, void *), 915 fw_hdr_copy_len)) 916 ship_tx_pkt_coalesce_wr(adap, txq); 917 } 918 919 if (unlikely(type != q->coalesce.type && q->coalesce.idx)) 920 ship_tx_pkt_coalesce_wr(adap, txq); 921 922 /* calculate the number of flits required for coalescing this packet 923 * without the 2 flits of the WR header. These are added further down 924 * if we are just starting in new PKTS WR. sgl_len doesn't account for 925 * the possible 16 bytes alignment ULP TX commands so we do it here. 926 */ 927 flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U; 928 if (type == 0) 929 flits += (sizeof(struct ulp_txpkt) + 930 sizeof(struct ulptx_idata)) / sizeof(__be64); 931 flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64); 932 *nflits = flits; 933 934 /* If coalescing is on, the mbuf is added to a pkts WR */ 935 if (q->coalesce.idx) { 936 ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8); 937 credits = txq_avail(q) - ndesc; 938 939 /* If we are wrapping or this is last mbuf then, send the 940 * already coalesced mbufs and let the non-coalesce pass 941 * handle the mbuf. 942 */ 943 if (unlikely(credits < 0 || wraps_around(q, ndesc))) { 944 ship_tx_pkt_coalesce_wr(adap, txq); 945 return 0; 946 } 947 948 /* If the max coalesce len or the max WR len is reached 949 * ship the WR and keep coalescing on. 950 */ 951 if (unlikely((q->coalesce.len + mbuf->pkt_len > 952 MAX_COALESCE_LEN) || 953 (q->coalesce.flits + flits > 954 q->coalesce.max))) { 955 ship_tx_pkt_coalesce_wr(adap, txq); 956 goto new; 957 } 958 return 1; 959 } 960 961 new: 962 /* start a new pkts WR, the WR header is not filled below */ 963 wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) : 964 sizeof(struct fw_eth_tx_pkts_vm_wr); 965 flits += wr_size / sizeof(__be64); 966 ndesc = flits_to_desc(q->coalesce.flits + flits); 967 credits = txq_avail(q) - ndesc; 968 969 if (unlikely(credits < 0 || wraps_around(q, ndesc))) 970 return 0; 971 q->coalesce.flits += wr_size / sizeof(__be64); 972 q->coalesce.type = type; 973 q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] + 974 q->coalesce.flits * sizeof(__be64); 975 if (!is_pf4(adap)) 976 memcpy((void *)q->coalesce.ethmacdst, 977 rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len); 978 return 1; 979 } 980 981 /** 982 * tx_do_packet_coalesce - add an mbuf to a coalesce WR 983 * @txq: sge_eth_txq used send the mbuf 984 * @mbuf: mbuf to be sent 985 * @flits: flits needed for this mbuf 986 * @adap: adapter structure 987 * @pi: port_info structure 988 * @addr: mapped address of the mbuf 989 * 990 * Adds an mbuf to be sent as part of a coalesce WR by filling a 991 * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and 992 * ulp_tx_sc_dsgl command. 993 */ 994 static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, 995 struct rte_mbuf *mbuf, 996 int flits, struct adapter *adap, 997 const struct port_info *pi, 998 dma_addr_t *addr, uint16_t nb_pkts) 999 { 1000 u64 cntrl, *end; 1001 struct sge_txq *q = &txq->q; 1002 struct ulp_txpkt *mc; 1003 struct ulptx_idata *sc_imm; 1004 struct cpl_tx_pkt_core *cpl; 1005 struct tx_sw_desc *sd; 1006 unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len; 1007 1008 if (q->coalesce.type == 0) { 1009 mc = (struct ulp_txpkt *)q->coalesce.ptr; 1010 mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) | 1011 V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) | 1012 F_ULP_TXPKT_RO); 1013 mc->len = htonl(DIV_ROUND_UP(flits, 2)); 1014 sc_imm = (struct ulptx_idata *)(mc + 1); 1015 sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) | 1016 F_ULP_TX_SC_MORE); 1017 sc_imm->len = htonl(sizeof(*cpl)); 1018 end = (u64 *)mc + flits; 1019 cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1); 1020 } else { 1021 end = (u64 *)q->coalesce.ptr + flits; 1022 cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr; 1023 } 1024 1025 /* update coalesce structure for this txq */ 1026 q->coalesce.flits += flits; 1027 q->coalesce.ptr += flits * sizeof(__be64); 1028 q->coalesce.len += mbuf->pkt_len; 1029 1030 /* fill the cpl message, same as in t4_eth_xmit, this should be kept 1031 * similar to t4_eth_xmit 1032 */ 1033 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) { 1034 cntrl = hwcsum(adap->params.chip, mbuf) | 1035 F_TXPKT_IPCSUM_DIS; 1036 txq->stats.tx_cso++; 1037 } else { 1038 cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; 1039 } 1040 1041 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) { 1042 txq->stats.vlan_ins++; 1043 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci); 1044 } 1045 1046 cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); 1047 if (is_pf4(adap)) 1048 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | 1049 V_TXPKT_PF(adap->pf)); 1050 else 1051 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id)); 1052 cpl->pack = htons(0); 1053 cpl->len = htons(len); 1054 cpl->ctrl1 = cpu_to_be64(cntrl); 1055 write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr); 1056 txq->stats.pkts++; 1057 txq->stats.tx_bytes += len; 1058 1059 sd = &q->sdesc[q->pidx + (idx >> 1)]; 1060 if (!(idx & 1)) { 1061 if (sd->coalesce.idx) { 1062 int i; 1063 1064 for (i = 0; i < sd->coalesce.idx; i++) { 1065 rte_pktmbuf_free(sd->coalesce.mbuf[i]); 1066 sd->coalesce.mbuf[i] = NULL; 1067 } 1068 } 1069 } 1070 1071 /* store pointers to the mbuf and the sgl used in free_tx_desc. 1072 * each tx desc can hold two pointers corresponding to the value 1073 * of ETH_COALESCE_PKT_PER_DESC 1074 */ 1075 sd->coalesce.mbuf[idx & 1] = mbuf; 1076 sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1); 1077 sd->coalesce.idx = (idx & 1) + 1; 1078 1079 /* Send the coalesced work request, only if max reached. However, 1080 * if lower latency is preferred over throughput, then don't wait 1081 * for coalescing the next Tx burst and send the packets now. 1082 */ 1083 q->coalesce.idx++; 1084 if (q->coalesce.idx == adap->params.max_tx_coalesce_num || 1085 (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts)) 1086 ship_tx_pkt_coalesce_wr(adap, txq); 1087 1088 return 0; 1089 } 1090 1091 /** 1092 * t4_eth_xmit - add a packet to an Ethernet Tx queue 1093 * @txq: the egress queue 1094 * @mbuf: the packet 1095 * 1096 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. 1097 */ 1098 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, 1099 uint16_t nb_pkts) 1100 { 1101 const struct port_info *pi; 1102 struct cpl_tx_pkt_lso_core *lso; 1103 struct adapter *adap; 1104 struct rte_mbuf *m = mbuf; 1105 struct fw_eth_tx_pkt_wr *wr; 1106 struct fw_eth_tx_pkt_vm_wr *vmwr; 1107 struct cpl_tx_pkt_core *cpl; 1108 struct tx_sw_desc *d; 1109 dma_addr_t addr[m->nb_segs]; 1110 unsigned int flits, ndesc, cflits; 1111 int l3hdr_len, l4hdr_len, eth_xtra_len; 1112 int len, last_desc; 1113 int credits; 1114 u32 wr_mid; 1115 u64 cntrl, *end; 1116 bool v6; 1117 u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len; 1118 1119 /* Reject xmit if queue is stopped */ 1120 if (unlikely(txq->flags & EQ_STOPPED)) 1121 return -(EBUSY); 1122 1123 /* 1124 * The chip min packet length is 10 octets but play safe and reject 1125 * anything shorter than an Ethernet header. 1126 */ 1127 if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) { 1128 out_free: 1129 rte_pktmbuf_free(m); 1130 return 0; 1131 } 1132 1133 if ((!(m->ol_flags & PKT_TX_TCP_SEG)) && 1134 (unlikely(m->pkt_len > max_pkt_len))) 1135 goto out_free; 1136 1137 pi = txq->data->dev_private; 1138 adap = pi->adapter; 1139 1140 cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; 1141 /* align the end of coalesce WR to a 512 byte boundary */ 1142 txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8; 1143 1144 if (!((m->ol_flags & PKT_TX_TCP_SEG) || 1145 m->pkt_len > RTE_ETHER_MAX_LEN)) { 1146 if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) { 1147 if (unlikely(map_mbuf(mbuf, addr) < 0)) { 1148 dev_warn(adap, "%s: mapping err for coalesce\n", 1149 __func__); 1150 txq->stats.mapping_err++; 1151 goto out_free; 1152 } 1153 return tx_do_packet_coalesce(txq, mbuf, cflits, adap, 1154 pi, addr, nb_pkts); 1155 } else { 1156 return -EBUSY; 1157 } 1158 } 1159 1160 if (txq->q.coalesce.idx) 1161 ship_tx_pkt_coalesce_wr(adap, txq); 1162 1163 flits = calc_tx_flits(m, adap); 1164 ndesc = flits_to_desc(flits); 1165 credits = txq_avail(&txq->q) - ndesc; 1166 1167 if (unlikely(credits < 0)) { 1168 dev_debug(adap, "%s: Tx ring %u full; credits = %d\n", 1169 __func__, txq->q.cntxt_id, credits); 1170 return -EBUSY; 1171 } 1172 1173 if (unlikely(map_mbuf(m, addr) < 0)) { 1174 txq->stats.mapping_err++; 1175 goto out_free; 1176 } 1177 1178 wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); 1179 if (Q_IDXDIFF(&txq->q, equeidx) >= 64) { 1180 txq->q.equeidx = txq->q.pidx; 1181 wr_mid |= F_FW_WR_EQUEQ; 1182 } 1183 1184 wr = (void *)&txq->q.desc[txq->q.pidx]; 1185 vmwr = (void *)&txq->q.desc[txq->q.pidx]; 1186 wr->equiq_to_len16 = htonl(wr_mid); 1187 if (is_pf4(adap)) { 1188 wr->r3 = rte_cpu_to_be_64(0); 1189 end = (u64 *)wr + flits; 1190 } else { 1191 const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + 1192 sizeof(vmwr->ethmacsrc) + 1193 sizeof(vmwr->ethtype) + 1194 sizeof(vmwr->vlantci)); 1195 1196 vmwr->r3[0] = rte_cpu_to_be_32(0); 1197 vmwr->r3[1] = rte_cpu_to_be_32(0); 1198 memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *), 1199 fw_hdr_copy_len); 1200 end = (u64 *)vmwr + flits; 1201 } 1202 1203 len = 0; 1204 len += sizeof(*cpl); 1205 1206 /* Coalescing skipped and we send through normal path */ 1207 if (!(m->ol_flags & PKT_TX_TCP_SEG)) { 1208 wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? 1209 FW_ETH_TX_PKT_WR : 1210 FW_ETH_TX_PKT_VM_WR) | 1211 V_FW_WR_IMMDLEN(len)); 1212 if (is_pf4(adap)) 1213 cpl = (void *)(wr + 1); 1214 else 1215 cpl = (void *)(vmwr + 1); 1216 if (m->ol_flags & PKT_TX_IP_CKSUM) { 1217 cntrl = hwcsum(adap->params.chip, m) | 1218 F_TXPKT_IPCSUM_DIS; 1219 txq->stats.tx_cso++; 1220 } 1221 } else { 1222 if (is_pf4(adap)) 1223 lso = (void *)(wr + 1); 1224 else 1225 lso = (void *)(vmwr + 1); 1226 v6 = (m->ol_flags & PKT_TX_IPV6) != 0; 1227 l3hdr_len = m->l3_len; 1228 l4hdr_len = m->l4_len; 1229 eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN; 1230 len += sizeof(*lso); 1231 wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? 1232 FW_ETH_TX_PKT_WR : 1233 FW_ETH_TX_PKT_VM_WR) | 1234 V_FW_WR_IMMDLEN(len)); 1235 lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | 1236 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 1237 V_LSO_IPV6(v6) | 1238 V_LSO_ETHHDR_LEN(eth_xtra_len / 4) | 1239 V_LSO_IPHDR_LEN(l3hdr_len / 4) | 1240 V_LSO_TCPHDR_LEN(l4hdr_len / 4)); 1241 lso->ipid_ofst = htons(0); 1242 lso->mss = htons(m->tso_segsz); 1243 lso->seqno_offset = htonl(0); 1244 if (is_t4(adap->params.chip)) 1245 lso->len = htonl(m->pkt_len); 1246 else 1247 lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len)); 1248 cpl = (void *)(lso + 1); 1249 1250 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 1251 cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len); 1252 else 1253 cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len); 1254 1255 cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : 1256 TX_CSUM_TCPIP) | 1257 V_TXPKT_IPHDR_LEN(l3hdr_len); 1258 txq->stats.tso++; 1259 txq->stats.tx_cso += m->tso_segsz; 1260 } 1261 1262 if (m->ol_flags & PKT_TX_VLAN_PKT) { 1263 txq->stats.vlan_ins++; 1264 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci); 1265 } 1266 1267 cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); 1268 if (is_pf4(adap)) 1269 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | 1270 V_TXPKT_PF(adap->pf)); 1271 else 1272 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) | 1273 V_TXPKT_PF(0)); 1274 1275 cpl->pack = htons(0); 1276 cpl->len = htons(m->pkt_len); 1277 cpl->ctrl1 = cpu_to_be64(cntrl); 1278 1279 txq->stats.pkts++; 1280 txq->stats.tx_bytes += m->pkt_len; 1281 last_desc = txq->q.pidx + ndesc - 1; 1282 if (last_desc >= (int)txq->q.size) 1283 last_desc -= txq->q.size; 1284 1285 d = &txq->q.sdesc[last_desc]; 1286 if (d->coalesce.idx) { 1287 int i; 1288 1289 for (i = 0; i < d->coalesce.idx; i++) { 1290 rte_pktmbuf_free(d->coalesce.mbuf[i]); 1291 d->coalesce.mbuf[i] = NULL; 1292 } 1293 d->coalesce.idx = 0; 1294 } 1295 write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0, 1296 addr); 1297 txq->q.sdesc[last_desc].mbuf = m; 1298 txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); 1299 txq_advance(&txq->q, ndesc); 1300 ring_tx_db(adap, &txq->q); 1301 return 0; 1302 } 1303 1304 /** 1305 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1306 * @q: the SGE control Tx queue 1307 * 1308 * This is a variant of reclaim_completed_tx() that is used for Tx queues 1309 * that send only immediate data (presently just the control queues) and 1310 * thus do not have any mbufs to release. 1311 */ 1312 static inline void reclaim_completed_tx_imm(struct sge_txq *q) 1313 { 1314 int hw_cidx = ntohs(q->stat->cidx); 1315 int reclaim = hw_cidx - q->cidx; 1316 1317 if (reclaim < 0) 1318 reclaim += q->size; 1319 1320 q->in_use -= reclaim; 1321 q->cidx = hw_cidx; 1322 } 1323 1324 /** 1325 * is_imm - check whether a packet can be sent as immediate data 1326 * @mbuf: the packet 1327 * 1328 * Returns true if a packet can be sent as a WR with immediate data. 1329 */ 1330 static inline int is_imm(const struct rte_mbuf *mbuf) 1331 { 1332 return mbuf->pkt_len <= MAX_CTRL_WR_LEN; 1333 } 1334 1335 /** 1336 * inline_tx_mbuf: inline a packet's data into TX descriptors 1337 * @q: the TX queue where the packet will be inlined 1338 * @from: pointer to data portion of packet 1339 * @to: pointer after cpl where data has to be inlined 1340 * @len: length of data to inline 1341 * 1342 * Inline a packet's contents directly to TX descriptors, starting at 1343 * the given position within the TX DMA ring. 1344 * Most of the complexity of this operation is dealing with wrap arounds 1345 * in the middle of the packet we want to inline. 1346 */ 1347 static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to, 1348 int len) 1349 { 1350 int left = RTE_PTR_DIFF(q->stat, *to); 1351 1352 if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) { 1353 rte_memcpy(*to, from, len); 1354 *to = RTE_PTR_ADD(*to, len); 1355 } else { 1356 rte_memcpy(*to, from, left); 1357 from = RTE_PTR_ADD(from, left); 1358 left = len - left; 1359 rte_memcpy((void *)q->desc, from, left); 1360 *to = RTE_PTR_ADD((void *)q->desc, left); 1361 } 1362 } 1363 1364 /** 1365 * ctrl_xmit - send a packet through an SGE control Tx queue 1366 * @q: the control queue 1367 * @mbuf: the packet 1368 * 1369 * Send a packet through an SGE control Tx queue. Packets sent through 1370 * a control queue must fit entirely as immediate data. 1371 */ 1372 static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) 1373 { 1374 unsigned int ndesc; 1375 struct fw_wr_hdr *wr; 1376 caddr_t dst; 1377 1378 if (unlikely(!is_imm(mbuf))) { 1379 WARN_ON(1); 1380 rte_pktmbuf_free(mbuf); 1381 return -1; 1382 } 1383 1384 reclaim_completed_tx_imm(&q->q); 1385 ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc)); 1386 t4_os_lock(&q->ctrlq_lock); 1387 1388 q->full = txq_avail(&q->q) < ndesc ? 1 : 0; 1389 if (unlikely(q->full)) { 1390 t4_os_unlock(&q->ctrlq_lock); 1391 return -1; 1392 } 1393 1394 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 1395 dst = (void *)wr; 1396 inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t), 1397 &dst, mbuf->data_len); 1398 1399 txq_advance(&q->q, ndesc); 1400 if (unlikely(txq_avail(&q->q) < 64)) 1401 wr->lo |= htonl(F_FW_WR_EQUEQ); 1402 1403 q->txp++; 1404 1405 ring_tx_db(q->adapter, &q->q); 1406 t4_os_unlock(&q->ctrlq_lock); 1407 1408 rte_pktmbuf_free(mbuf); 1409 return 0; 1410 } 1411 1412 /** 1413 * t4_mgmt_tx - send a management message 1414 * @q: the control queue 1415 * @mbuf: the packet containing the management message 1416 * 1417 * Send a management message through control queue. 1418 */ 1419 int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) 1420 { 1421 return ctrl_xmit(q, mbuf); 1422 } 1423 1424 /** 1425 * alloc_ring - allocate resources for an SGE descriptor ring 1426 * @dev: the port associated with the queue 1427 * @z_name: memzone's name 1428 * @queue_id: queue index 1429 * @socket_id: preferred socket id for memory allocations 1430 * @nelem: the number of descriptors 1431 * @elem_size: the size of each descriptor 1432 * @stat_size: extra space in HW ring for status information 1433 * @sw_size: the size of the SW state associated with each ring element 1434 * @phys: the physical address of the allocated ring 1435 * @metadata: address of the array holding the SW state for the ring 1436 * 1437 * Allocates resources for an SGE descriptor ring, such as Tx queues, 1438 * free buffer lists, or response queues. Each SGE ring requires 1439 * space for its HW descriptors plus, optionally, space for the SW state 1440 * associated with each HW entry (the metadata). The function returns 1441 * three values: the virtual address for the HW ring (the return value 1442 * of the function), the bus address of the HW ring, and the address 1443 * of the SW ring. 1444 */ 1445 static void *alloc_ring(struct rte_eth_dev *dev, const char *z_name, 1446 uint16_t queue_id, int socket_id, size_t nelem, 1447 size_t elem_size, size_t stat_size, size_t sw_size, 1448 dma_addr_t *phys, void *metadata) 1449 { 1450 size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size; 1451 char z_name_sw[RTE_MEMZONE_NAMESIZE]; 1452 const struct rte_memzone *tz; 1453 void *s = NULL; 1454 1455 snprintf(z_name_sw, sizeof(z_name_sw), "eth_p%d_q%d_%s_sw_ring", 1456 dev->data->port_id, queue_id, z_name); 1457 1458 dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; " 1459 "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;" 1460 " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size, 1461 stat_size, queue_id, socket_id, z_name, z_name_sw); 1462 1463 /* 1464 * Allocate TX/RX ring hardware descriptors. A memzone large enough to 1465 * handle the maximum ring size is allocated in order to allow for 1466 * resizing in later calls to the queue setup function. 1467 */ 1468 tz = rte_eth_dma_zone_reserve(dev, z_name, queue_id, len, 4096, 1469 socket_id); 1470 if (!tz) 1471 return NULL; 1472 1473 memset(tz->addr, 0, len); 1474 if (sw_size) { 1475 s = rte_zmalloc_socket(z_name_sw, nelem * sw_size, 1476 RTE_CACHE_LINE_SIZE, socket_id); 1477 1478 if (!s) { 1479 dev_err(adapter, "%s: failed to get sw_ring memory\n", 1480 __func__); 1481 return NULL; 1482 } 1483 } 1484 if (metadata) 1485 *(void **)metadata = s; 1486 1487 *phys = (uint64_t)tz->iova; 1488 return tz->addr; 1489 } 1490 1491 #define CXGB4_MSG_AN ((void *)1) 1492 1493 /** 1494 * rspq_next - advance to the next entry in a response queue 1495 * @q: the queue 1496 * 1497 * Updates the state of a response queue to advance it to the next entry. 1498 */ 1499 static inline void rspq_next(struct sge_rspq *q) 1500 { 1501 q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len); 1502 if (unlikely(++q->cidx == q->size)) { 1503 q->cidx = 0; 1504 q->gen ^= 1; 1505 q->cur_desc = q->desc; 1506 } 1507 } 1508 1509 static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype, 1510 uint64_t ol_flags) 1511 { 1512 pkt->packet_type |= ptype; 1513 pkt->ol_flags |= ol_flags; 1514 } 1515 1516 static inline void cxgbe_fill_mbuf_info(struct adapter *adap, 1517 const struct cpl_rx_pkt *cpl, 1518 struct rte_mbuf *pkt) 1519 { 1520 bool csum_ok; 1521 u16 err_vec; 1522 1523 if (adap->params.tp.rx_pkt_encap) 1524 err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec)); 1525 else 1526 err_vec = ntohs(cpl->err_vec); 1527 1528 csum_ok = cpl->csum_calc && !err_vec; 1529 1530 if (cpl->vlan_ex) 1531 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN, 1532 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); 1533 else 1534 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0); 1535 1536 if (cpl->l2info & htonl(F_RXF_IP)) 1537 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4, 1538 csum_ok ? PKT_RX_IP_CKSUM_GOOD : 1539 PKT_RX_IP_CKSUM_BAD); 1540 else if (cpl->l2info & htonl(F_RXF_IP6)) 1541 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6, 1542 csum_ok ? PKT_RX_IP_CKSUM_GOOD : 1543 PKT_RX_IP_CKSUM_BAD); 1544 1545 if (cpl->l2info & htonl(F_RXF_TCP)) 1546 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP, 1547 csum_ok ? PKT_RX_L4_CKSUM_GOOD : 1548 PKT_RX_L4_CKSUM_BAD); 1549 else if (cpl->l2info & htonl(F_RXF_UDP)) 1550 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP, 1551 csum_ok ? PKT_RX_L4_CKSUM_GOOD : 1552 PKT_RX_L4_CKSUM_BAD); 1553 } 1554 1555 /** 1556 * process_responses - process responses from an SGE response queue 1557 * @q: the ingress queue to process 1558 * @budget: how many responses can be processed in this round 1559 * @rx_pkts: mbuf to put the pkts 1560 * 1561 * Process responses from an SGE response queue up to the supplied budget. 1562 * Responses include received packets as well as control messages from FW 1563 * or HW. 1564 * 1565 * Additionally choose the interrupt holdoff time for the next interrupt 1566 * on this queue. If the system is under memory shortage use a fairly 1567 * long delay to help recovery. 1568 */ 1569 static int process_responses(struct sge_rspq *q, int budget, 1570 struct rte_mbuf **rx_pkts) 1571 { 1572 int ret = 0, rsp_type; 1573 int budget_left = budget; 1574 const struct rsp_ctrl *rc; 1575 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1576 1577 while (likely(budget_left)) { 1578 if (q->cidx == ntohs(q->stat->pidx)) 1579 break; 1580 1581 rc = (const struct rsp_ctrl *) 1582 ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc))); 1583 1584 /* 1585 * Ensure response has been read 1586 */ 1587 rmb(); 1588 rsp_type = G_RSPD_TYPE(rc->u.type_gen); 1589 1590 if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) { 1591 struct sge *s = &q->adapter->sge; 1592 unsigned int stat_pidx; 1593 int stat_pidx_diff; 1594 1595 stat_pidx = ntohs(q->stat->pidx); 1596 stat_pidx_diff = P_IDXDIFF(q, stat_pidx); 1597 while (stat_pidx_diff && budget_left) { 1598 const struct rx_sw_desc *rsd = 1599 &rxq->fl.sdesc[rxq->fl.cidx]; 1600 const struct rss_header *rss_hdr = 1601 (const void *)q->cur_desc; 1602 const struct cpl_rx_pkt *cpl = 1603 (const void *)&q->cur_desc[1]; 1604 struct rte_mbuf *pkt, *npkt; 1605 u32 len, bufsz; 1606 1607 rc = (const struct rsp_ctrl *) 1608 ((const char *)q->cur_desc + 1609 (q->iqe_len - sizeof(*rc))); 1610 1611 rsp_type = G_RSPD_TYPE(rc->u.type_gen); 1612 if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF)) 1613 break; 1614 1615 len = ntohl(rc->pldbuflen_qid); 1616 BUG_ON(!(len & F_RSPD_NEWBUF)); 1617 pkt = rsd->buf; 1618 npkt = pkt; 1619 len = G_RSPD_LEN(len); 1620 pkt->pkt_len = len; 1621 1622 /* Chain mbufs into len if necessary */ 1623 while (len) { 1624 struct rte_mbuf *new_pkt = rsd->buf; 1625 1626 bufsz = min(get_buf_size(q->adapter, 1627 rsd), len); 1628 new_pkt->data_len = bufsz; 1629 unmap_rx_buf(&rxq->fl); 1630 len -= bufsz; 1631 npkt->next = new_pkt; 1632 npkt = new_pkt; 1633 pkt->nb_segs++; 1634 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 1635 } 1636 npkt->next = NULL; 1637 pkt->nb_segs--; 1638 1639 cxgbe_fill_mbuf_info(q->adapter, cpl, pkt); 1640 1641 if (!rss_hdr->filter_tid && 1642 rss_hdr->hash_type) { 1643 pkt->ol_flags |= PKT_RX_RSS_HASH; 1644 pkt->hash.rss = 1645 ntohl(rss_hdr->hash_val); 1646 } 1647 1648 if (cpl->vlan_ex) 1649 pkt->vlan_tci = ntohs(cpl->vlan); 1650 1651 rte_pktmbuf_adj(pkt, s->pktshift); 1652 rxq->stats.pkts++; 1653 rxq->stats.rx_bytes += pkt->pkt_len; 1654 rx_pkts[budget - budget_left] = pkt; 1655 1656 rspq_next(q); 1657 budget_left--; 1658 stat_pidx_diff--; 1659 } 1660 continue; 1661 } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) { 1662 ret = q->handler(q, q->cur_desc, NULL); 1663 } else { 1664 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); 1665 } 1666 1667 if (unlikely(ret)) { 1668 /* couldn't process descriptor, back off for recovery */ 1669 q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX); 1670 break; 1671 } 1672 1673 rspq_next(q); 1674 budget_left--; 1675 } 1676 1677 /* 1678 * If this is a Response Queue with an associated Free List and 1679 * there's room for another chunk of new Free List buffer pointers, 1680 * refill the Free List. 1681 */ 1682 1683 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64) 1684 __refill_fl(q->adapter, &rxq->fl); 1685 1686 return budget - budget_left; 1687 } 1688 1689 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, 1690 unsigned int budget, unsigned int *work_done) 1691 { 1692 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1693 unsigned int cidx_inc; 1694 unsigned int params; 1695 u32 val; 1696 1697 if (unlikely(rxq->flags & IQ_STOPPED)) { 1698 *work_done = 0; 1699 return 0; 1700 } 1701 1702 *work_done = process_responses(q, budget, rx_pkts); 1703 1704 if (*work_done) { 1705 cidx_inc = R_IDXDIFF(q, gts_idx); 1706 1707 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64) 1708 __refill_fl(q->adapter, &rxq->fl); 1709 1710 params = q->intr_params; 1711 q->next_intr_params = params; 1712 val = V_CIDXINC(cidx_inc) | V_SEINTARM(params); 1713 1714 if (unlikely(!q->bar2_addr)) { 1715 u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) : 1716 T4VF_SGE_BASE_ADDR + 1717 A_SGE_VF_GTS; 1718 1719 t4_write_reg(q->adapter, reg, 1720 val | V_INGRESSQID((u32)q->cntxt_id)); 1721 } else { 1722 writel(val | V_INGRESSQID(q->bar2_qid), 1723 (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS)); 1724 /* This Write memory Barrier will force the 1725 * write to the User Doorbell area to be 1726 * flushed. 1727 */ 1728 wmb(); 1729 } 1730 q->gts_idx = q->cidx; 1731 } 1732 return 0; 1733 } 1734 1735 /** 1736 * bar2_address - return the BAR2 address for an SGE Queue's Registers 1737 * @adapter: the adapter 1738 * @qid: the SGE Queue ID 1739 * @qtype: the SGE Queue Type (Egress or Ingress) 1740 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 1741 * 1742 * Returns the BAR2 address for the SGE Queue Registers associated with 1743 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also 1744 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE 1745 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" 1746 * Registers are supported (e.g. the Write Combining Doorbell Buffer). 1747 */ 1748 static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid, 1749 enum t4_bar2_qtype qtype, 1750 unsigned int *pbar2_qid) 1751 { 1752 u64 bar2_qoffset; 1753 int ret; 1754 1755 ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid); 1756 if (ret) 1757 return NULL; 1758 1759 return adapter->bar2 + bar2_qoffset; 1760 } 1761 1762 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq) 1763 { 1764 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff; 1765 1766 rxq->flags &= ~IQ_STOPPED; 1767 return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0, 1768 rxq->rspq.cntxt_id, fl_id, 0xffff); 1769 } 1770 1771 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq) 1772 { 1773 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff; 1774 1775 rxq->flags |= IQ_STOPPED; 1776 return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0, 1777 rxq->rspq.cntxt_id, fl_id, 0xffff); 1778 } 1779 1780 /* 1781 * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 1782 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map 1783 */ 1784 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 1785 struct rte_eth_dev *eth_dev, int intr_idx, 1786 struct sge_fl *fl, rspq_handler_t hnd, int cong, 1787 struct rte_mempool *mp, int queue_id, int socket_id) 1788 { 1789 int ret, flsz = 0; 1790 struct fw_iq_cmd c; 1791 struct sge *s = &adap->sge; 1792 struct port_info *pi = eth_dev->data->dev_private; 1793 unsigned int nb_refill; 1794 u8 pciechan; 1795 1796 /* Size needs to be multiple of 16, including status entry. */ 1797 iq->size = cxgbe_roundup(iq->size, 16); 1798 1799 iq->desc = alloc_ring(eth_dev, fwevtq ? "fwq_ring" : "rx_ring", 1800 queue_id, socket_id, iq->size, iq->iqe_len, 1801 0, 0, &iq->phys_addr, NULL); 1802 if (!iq->desc) 1803 return -ENOMEM; 1804 1805 memset(&c, 0, sizeof(c)); 1806 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 1807 F_FW_CMD_WRITE | F_FW_CMD_EXEC); 1808 1809 if (is_pf4(adap)) { 1810 pciechan = pi->tx_chan; 1811 c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) | 1812 V_FW_IQ_CMD_VFN(0)); 1813 if (cong >= 0) 1814 c.iqns_to_fl0congen = 1815 htonl(F_FW_IQ_CMD_IQFLINTCONGEN | 1816 V_FW_IQ_CMD_IQTYPE(cong ? 1817 FW_IQ_IQTYPE_NIC : 1818 FW_IQ_IQTYPE_OFLD) | 1819 F_FW_IQ_CMD_IQRO); 1820 } else { 1821 pciechan = pi->port_id; 1822 } 1823 1824 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 1825 (sizeof(c) / 16)); 1826 c.type_to_iqandstindex = 1827 htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 1828 V_FW_IQ_CMD_IQASYNCH(fwevtq) | 1829 V_FW_IQ_CMD_VIID(pi->viid) | 1830 V_FW_IQ_CMD_IQANDST(intr_idx < 0) | 1831 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) | 1832 V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : 1833 -intr_idx - 1)); 1834 c.iqdroprss_to_iqesize = 1835 htons(V_FW_IQ_CMD_IQPCIECH(pciechan) | 1836 F_FW_IQ_CMD_IQGTSMODE | 1837 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | 1838 V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); 1839 c.iqsize = htons(iq->size); 1840 c.iqaddr = cpu_to_be64(iq->phys_addr); 1841 1842 if (fl) { 1843 struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq, 1844 fl); 1845 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 1846 1847 /* 1848 * Allocate the ring for the hardware free list (with space 1849 * for its status page) along with the associated software 1850 * descriptor ring. The free list size needs to be a multiple 1851 * of the Egress Queue Unit and at least 2 Egress Units larger 1852 * than the SGE's Egress Congrestion Threshold 1853 * (fl_starve_thres - 1). 1854 */ 1855 if (fl->size < s->fl_starve_thres - 1 + 2 * 8) 1856 fl->size = s->fl_starve_thres - 1 + 2 * 8; 1857 fl->size = cxgbe_roundup(fl->size, 8); 1858 1859 fl->desc = alloc_ring(eth_dev, "fl_ring", queue_id, socket_id, 1860 fl->size, sizeof(__be64), s->stat_len, 1861 sizeof(struct rx_sw_desc), 1862 &fl->addr, &fl->sdesc); 1863 if (!fl->desc) { 1864 ret = -ENOMEM; 1865 goto err; 1866 } 1867 1868 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); 1869 c.iqns_to_fl0congen |= 1870 htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 1871 (unlikely(rxq->usembufs) ? 1872 0 : F_FW_IQ_CMD_FL0PACKEN) | 1873 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 1874 F_FW_IQ_CMD_FL0PADEN); 1875 if (is_pf4(adap) && cong >= 0) 1876 c.iqns_to_fl0congen |= 1877 htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 1878 F_FW_IQ_CMD_FL0CONGCIF | 1879 F_FW_IQ_CMD_FL0CONGEN); 1880 1881 /* In T6, for egress queue type FL there is internal overhead 1882 * of 16B for header going into FLM module. 1883 * Hence maximum allowed burst size will be 448 bytes. 1884 */ 1885 c.fl0dcaen_to_fl0cidxfthresh = 1886 htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ? 1887 X_FETCHBURSTMIN_128B : 1888 X_FETCHBURSTMIN_64B) | 1889 V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ? 1890 X_FETCHBURSTMAX_512B : 1891 X_FETCHBURSTMAX_256B)); 1892 c.fl0size = htons(flsz); 1893 c.fl0addr = cpu_to_be64(fl->addr); 1894 } 1895 1896 if (is_pf4(adap)) 1897 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 1898 else 1899 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); 1900 if (ret) 1901 goto err; 1902 1903 iq->cur_desc = iq->desc; 1904 iq->cidx = 0; 1905 iq->gts_idx = 0; 1906 iq->gen = 1; 1907 iq->next_intr_params = iq->intr_params; 1908 iq->cntxt_id = ntohs(c.iqid); 1909 iq->abs_id = ntohs(c.physiqid); 1910 iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS, 1911 &iq->bar2_qid); 1912 iq->size--; /* subtract status entry */ 1913 iq->stat = (void *)&iq->desc[iq->size * 8]; 1914 iq->eth_dev = eth_dev; 1915 iq->handler = hnd; 1916 iq->port_id = pi->pidx; 1917 iq->mb_pool = mp; 1918 1919 /* set offset to -1 to distinguish ingress queues without FL */ 1920 iq->offset = fl ? 0 : -1; 1921 1922 if (fl) { 1923 fl->cntxt_id = ntohs(c.fl0id); 1924 fl->avail = 0; 1925 fl->pend_cred = 0; 1926 fl->pidx = 0; 1927 fl->cidx = 0; 1928 fl->alloc_failed = 0; 1929 1930 /* 1931 * Note, we must initialize the BAR2 Free List User Doorbell 1932 * information before refilling the Free List! 1933 */ 1934 fl->bar2_addr = bar2_address(adap, fl->cntxt_id, 1935 T4_BAR2_QTYPE_EGRESS, 1936 &fl->bar2_qid); 1937 1938 nb_refill = refill_fl(adap, fl, fl_cap(fl)); 1939 if (nb_refill != fl_cap(fl)) { 1940 ret = -ENOMEM; 1941 dev_err(adap, "%s: mbuf alloc failed with error: %d\n", 1942 __func__, ret); 1943 goto refill_fl_err; 1944 } 1945 } 1946 1947 /* 1948 * For T5 and later we attempt to set up the Congestion Manager values 1949 * of the new RX Ethernet Queue. This should really be handled by 1950 * firmware because it's more complex than any host driver wants to 1951 * get involved with and it's different per chip and this is almost 1952 * certainly wrong. Formware would be wrong as well, but it would be 1953 * a lot easier to fix in one place ... For now we do something very 1954 * simple (and hopefully less wrong). 1955 */ 1956 if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) { 1957 u8 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; 1958 u32 param, val, ch_map = 0; 1959 int i; 1960 1961 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 1962 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 1963 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id)); 1964 if (cong == 0) { 1965 val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE); 1966 } else { 1967 val = V_CONMCTXT_CNGTPMODE( 1968 X_CONMCTXT_CNGTPMODE_CHANNEL); 1969 for (i = 0; i < 4; i++) { 1970 if (cong & (1 << i)) 1971 ch_map |= 1 << (i << cng_ch_bits_log); 1972 } 1973 val |= V_CONMCTXT_CNGCHMAP(ch_map); 1974 } 1975 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 1976 ¶m, &val); 1977 if (ret) 1978 dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n", 1979 iq->cntxt_id, -ret); 1980 } 1981 1982 return 0; 1983 1984 refill_fl_err: 1985 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 1986 iq->cntxt_id, fl->cntxt_id, 0xffff); 1987 err: 1988 iq->cntxt_id = 0; 1989 iq->abs_id = 0; 1990 if (iq->desc) 1991 iq->desc = NULL; 1992 1993 if (fl && fl->desc) { 1994 rte_free(fl->sdesc); 1995 fl->cntxt_id = 0; 1996 fl->sdesc = NULL; 1997 fl->desc = NULL; 1998 } 1999 return ret; 2000 } 2001 2002 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id, 2003 unsigned int abs_id) 2004 { 2005 q->cntxt_id = id; 2006 q->abs_id = abs_id; 2007 q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS, 2008 &q->bar2_qid); 2009 q->cidx = 0; 2010 q->pidx = 0; 2011 q->dbidx = 0; 2012 q->in_use = 0; 2013 q->equeidx = 0; 2014 q->coalesce.idx = 0; 2015 q->coalesce.len = 0; 2016 q->coalesce.flits = 0; 2017 q->last_coal_idx = 0; 2018 q->last_pidx = 0; 2019 q->stat = (void *)&q->desc[q->size]; 2020 } 2021 2022 int t4_sge_eth_txq_start(struct sge_eth_txq *txq) 2023 { 2024 /* 2025 * TODO: For flow-control, queue may be stopped waiting to reclaim 2026 * credits. 2027 * Ensure queue is in EQ_STOPPED state before starting it. 2028 */ 2029 if (!(txq->flags & EQ_STOPPED)) 2030 return -(EBUSY); 2031 2032 txq->flags &= ~EQ_STOPPED; 2033 2034 return 0; 2035 } 2036 2037 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq) 2038 { 2039 txq->flags |= EQ_STOPPED; 2040 2041 return 0; 2042 } 2043 2044 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 2045 struct rte_eth_dev *eth_dev, uint16_t queue_id, 2046 unsigned int iqid, int socket_id) 2047 { 2048 int ret, nentries; 2049 struct fw_eq_eth_cmd c; 2050 struct sge *s = &adap->sge; 2051 struct port_info *pi = eth_dev->data->dev_private; 2052 u8 pciechan; 2053 2054 /* Add status entries */ 2055 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 2056 2057 txq->q.desc = alloc_ring(eth_dev, "tx_ring", queue_id, socket_id, 2058 txq->q.size, sizeof(struct tx_desc), 2059 s->stat_len, sizeof(struct tx_sw_desc), 2060 &txq->q.phys_addr, &txq->q.sdesc); 2061 if (!txq->q.desc) 2062 return -ENOMEM; 2063 2064 memset(&c, 0, sizeof(c)); 2065 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 2066 F_FW_CMD_WRITE | F_FW_CMD_EXEC); 2067 if (is_pf4(adap)) { 2068 pciechan = pi->tx_chan; 2069 c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) | 2070 V_FW_EQ_ETH_CMD_VFN(0)); 2071 } else { 2072 pciechan = pi->port_id; 2073 } 2074 2075 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC | 2076 F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16)); 2077 c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE | 2078 V_FW_EQ_ETH_CMD_VIID(pi->viid)); 2079 c.fetchszm_to_iqid = 2080 htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 2081 V_FW_EQ_ETH_CMD_PCIECHN(pciechan) | 2082 F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid)); 2083 c.dcaen_to_eqsize = 2084 htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2085 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2086 V_FW_EQ_ETH_CMD_EQSIZE(nentries)); 2087 c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr); 2088 2089 if (is_pf4(adap)) 2090 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 2091 else 2092 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); 2093 if (ret) { 2094 rte_free(txq->q.sdesc); 2095 txq->q.sdesc = NULL; 2096 txq->q.desc = NULL; 2097 return ret; 2098 } 2099 2100 init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)), 2101 G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd))); 2102 txq->stats.tso = 0; 2103 txq->stats.pkts = 0; 2104 txq->stats.tx_cso = 0; 2105 txq->stats.coal_wr = 0; 2106 txq->stats.vlan_ins = 0; 2107 txq->stats.tx_bytes = 0; 2108 txq->stats.coal_pkts = 0; 2109 txq->stats.mapping_err = 0; 2110 txq->flags |= EQ_STOPPED; 2111 txq->eth_dev = eth_dev; 2112 txq->data = eth_dev->data; 2113 t4_os_lock_init(&txq->txq_lock); 2114 return 0; 2115 } 2116 2117 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 2118 struct rte_eth_dev *eth_dev, uint16_t queue_id, 2119 unsigned int iqid, int socket_id) 2120 { 2121 int ret, nentries; 2122 struct fw_eq_ctrl_cmd c; 2123 struct sge *s = &adap->sge; 2124 struct port_info *pi = eth_dev->data->dev_private; 2125 2126 /* Add status entries */ 2127 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); 2128 2129 txq->q.desc = alloc_ring(eth_dev, "ctrl_tx_ring", queue_id, 2130 socket_id, txq->q.size, sizeof(struct tx_desc), 2131 0, 0, &txq->q.phys_addr, NULL); 2132 if (!txq->q.desc) 2133 return -ENOMEM; 2134 2135 memset(&c, 0, sizeof(c)); 2136 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 2137 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 2138 V_FW_EQ_CTRL_CMD_PFN(adap->pf) | 2139 V_FW_EQ_CTRL_CMD_VFN(0)); 2140 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC | 2141 F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16)); 2142 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0)); 2143 c.physeqid_pkd = htonl(0); 2144 c.fetchszm_to_iqid = 2145 htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 2146 V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | 2147 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid)); 2148 c.dcaen_to_eqsize = 2149 htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2150 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2151 V_FW_EQ_CTRL_CMD_EQSIZE(nentries)); 2152 c.eqaddr = cpu_to_be64(txq->q.phys_addr); 2153 2154 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 2155 if (ret) { 2156 txq->q.desc = NULL; 2157 return ret; 2158 } 2159 2160 init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)), 2161 G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd))); 2162 txq->adapter = adap; 2163 txq->full = 0; 2164 return 0; 2165 } 2166 2167 static void free_txq(struct sge_txq *q) 2168 { 2169 q->cntxt_id = 0; 2170 q->sdesc = NULL; 2171 q->desc = NULL; 2172 } 2173 2174 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 2175 struct sge_fl *fl) 2176 { 2177 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2178 2179 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 2180 rq->cntxt_id, fl_id, 0xffff); 2181 rq->cntxt_id = 0; 2182 rq->abs_id = 0; 2183 rq->desc = NULL; 2184 2185 if (fl) { 2186 free_rx_bufs(fl, fl->avail); 2187 rte_free(fl->sdesc); 2188 fl->sdesc = NULL; 2189 fl->cntxt_id = 0; 2190 fl->desc = NULL; 2191 } 2192 } 2193 2194 /* 2195 * Clear all queues of the port 2196 * 2197 * Note: This function must only be called after rx and tx path 2198 * of the port have been disabled. 2199 */ 2200 void t4_sge_eth_clear_queues(struct port_info *pi) 2201 { 2202 struct adapter *adap = pi->adapter; 2203 struct sge_eth_rxq *rxq; 2204 struct sge_eth_txq *txq; 2205 int i; 2206 2207 rxq = &adap->sge.ethrxq[pi->first_rxqset]; 2208 for (i = 0; i < pi->n_rx_qsets; i++, rxq++) { 2209 if (rxq->rspq.desc) 2210 t4_sge_eth_rxq_stop(adap, rxq); 2211 } 2212 2213 txq = &adap->sge.ethtxq[pi->first_txqset]; 2214 for (i = 0; i < pi->n_tx_qsets; i++, txq++) { 2215 if (txq->q.desc) { 2216 struct sge_txq *q = &txq->q; 2217 2218 t4_sge_eth_txq_stop(txq); 2219 reclaim_completed_tx(q); 2220 free_tx_desc(q, q->size); 2221 q->equeidx = q->pidx; 2222 } 2223 } 2224 } 2225 2226 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq) 2227 { 2228 if (rxq->rspq.desc) { 2229 t4_sge_eth_rxq_stop(adap, rxq); 2230 free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL); 2231 } 2232 } 2233 2234 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq) 2235 { 2236 if (txq->q.desc) { 2237 t4_sge_eth_txq_stop(txq); 2238 reclaim_completed_tx(&txq->q); 2239 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id); 2240 free_tx_desc(&txq->q, txq->q.size); 2241 rte_free(txq->q.sdesc); 2242 free_txq(&txq->q); 2243 } 2244 } 2245 2246 void t4_sge_eth_release_queues(struct port_info *pi) 2247 { 2248 struct adapter *adap = pi->adapter; 2249 struct sge_eth_rxq *rxq; 2250 struct sge_eth_txq *txq; 2251 unsigned int i; 2252 2253 rxq = &adap->sge.ethrxq[pi->first_rxqset]; 2254 /* clean up Ethernet Tx/Rx queues */ 2255 for (i = 0; i < pi->n_rx_qsets; i++, rxq++) { 2256 /* Free only the queues allocated */ 2257 if (rxq->rspq.desc) { 2258 t4_sge_eth_rxq_release(adap, rxq); 2259 rte_eth_dma_zone_free(rxq->rspq.eth_dev, "fl_ring", i); 2260 rte_eth_dma_zone_free(rxq->rspq.eth_dev, "rx_ring", i); 2261 rxq->rspq.eth_dev = NULL; 2262 } 2263 } 2264 2265 txq = &adap->sge.ethtxq[pi->first_txqset]; 2266 for (i = 0; i < pi->n_tx_qsets; i++, txq++) { 2267 /* Free only the queues allocated */ 2268 if (txq->q.desc) { 2269 t4_sge_eth_txq_release(adap, txq); 2270 rte_eth_dma_zone_free(txq->eth_dev, "tx_ring", i); 2271 txq->eth_dev = NULL; 2272 } 2273 } 2274 } 2275 2276 void t4_sge_tx_monitor_start(struct adapter *adap) 2277 { 2278 rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); 2279 } 2280 2281 void t4_sge_tx_monitor_stop(struct adapter *adap) 2282 { 2283 rte_eal_alarm_cancel(tx_timer_cb, (void *)adap); 2284 } 2285 2286 /** 2287 * t4_free_sge_resources - free SGE resources 2288 * @adap: the adapter 2289 * 2290 * Frees resources used by the SGE queue sets. 2291 */ 2292 void t4_free_sge_resources(struct adapter *adap) 2293 { 2294 unsigned int i; 2295 2296 /* clean up control Tx queues */ 2297 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { 2298 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; 2299 2300 if (cq->q.desc) { 2301 reclaim_completed_tx_imm(&cq->q); 2302 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, 2303 cq->q.cntxt_id); 2304 rte_eth_dma_zone_free(adap->eth_dev, "ctrl_tx_ring", i); 2305 rte_mempool_free(cq->mb_pool); 2306 free_txq(&cq->q); 2307 } 2308 } 2309 2310 /* clean up firmware event queue */ 2311 if (adap->sge.fw_evtq.desc) { 2312 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); 2313 rte_eth_dma_zone_free(adap->eth_dev, "fwq_ring", 0); 2314 } 2315 } 2316 2317 /** 2318 * t4_sge_init - initialize SGE 2319 * @adap: the adapter 2320 * 2321 * Performs SGE initialization needed every time after a chip reset. 2322 * We do not initialize any of the queues here, instead the driver 2323 * top-level must request those individually. 2324 * 2325 * Called in two different modes: 2326 * 2327 * 1. Perform actual hardware initialization and record hard-coded 2328 * parameters which were used. This gets used when we're the 2329 * Master PF and the Firmware Configuration File support didn't 2330 * work for some reason. 2331 * 2332 * 2. We're not the Master PF or initialization was performed with 2333 * a Firmware Configuration File. In this case we need to grab 2334 * any of the SGE operating parameters that we need to have in 2335 * order to do our job and make sure we can live with them ... 2336 */ 2337 static int t4_sge_init_soft(struct adapter *adap) 2338 { 2339 struct sge *s = &adap->sge; 2340 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; 2341 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; 2342 u32 ingress_rx_threshold; 2343 2344 /* 2345 * Verify that CPL messages are going to the Ingress Queue for 2346 * process_responses() and that only packet data is going to the 2347 * Free Lists. 2348 */ 2349 if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) != 2350 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { 2351 dev_err(adap, "bad SGE CPL MODE\n"); 2352 return -EINVAL; 2353 } 2354 2355 /* 2356 * Validate the Host Buffer Register Array indices that we want to 2357 * use ... 2358 * 2359 * XXX Note that we should really read through the Host Buffer Size 2360 * XXX register array and find the indices of the Buffer Sizes which 2361 * XXX meet our needs! 2362 */ 2363 #define READ_FL_BUF(x) \ 2364 t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32)) 2365 2366 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 2367 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 2368 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 2369 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 2370 2371 /* 2372 * We only bother using the Large Page logic if the Large Page Buffer 2373 * is larger than our Page Size Buffer. 2374 */ 2375 if (fl_large_pg <= fl_small_pg) 2376 fl_large_pg = 0; 2377 2378 #undef READ_FL_BUF 2379 2380 /* 2381 * The Page Size Buffer must be exactly equal to our Page Size and the 2382 * Large Page Size Buffer should be 0 (per above) or a power of 2. 2383 */ 2384 if (fl_small_pg != CXGBE_PAGE_SIZE || 2385 (fl_large_pg & (fl_large_pg - 1)) != 0) { 2386 dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n", 2387 fl_small_pg, fl_large_pg); 2388 return -EINVAL; 2389 } 2390 if (fl_large_pg) 2391 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; 2392 2393 if (adap->use_unpacked_mode) { 2394 int err = 0; 2395 2396 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) { 2397 dev_err(adap, "bad SGE FL small MTU %d\n", 2398 fl_small_mtu); 2399 err = -EINVAL; 2400 } 2401 if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { 2402 dev_err(adap, "bad SGE FL large MTU %d\n", 2403 fl_large_mtu); 2404 err = -EINVAL; 2405 } 2406 if (err) 2407 return err; 2408 } 2409 2410 /* 2411 * Retrieve our RX interrupt holdoff timer values and counter 2412 * threshold values from the SGE parameters. 2413 */ 2414 timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1); 2415 timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3); 2416 timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5); 2417 s->timer_val[0] = core_ticks_to_us(adap, 2418 G_TIMERVALUE0(timer_value_0_and_1)); 2419 s->timer_val[1] = core_ticks_to_us(adap, 2420 G_TIMERVALUE1(timer_value_0_and_1)); 2421 s->timer_val[2] = core_ticks_to_us(adap, 2422 G_TIMERVALUE2(timer_value_2_and_3)); 2423 s->timer_val[3] = core_ticks_to_us(adap, 2424 G_TIMERVALUE3(timer_value_2_and_3)); 2425 s->timer_val[4] = core_ticks_to_us(adap, 2426 G_TIMERVALUE4(timer_value_4_and_5)); 2427 s->timer_val[5] = core_ticks_to_us(adap, 2428 G_TIMERVALUE5(timer_value_4_and_5)); 2429 2430 ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD); 2431 s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold); 2432 s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold); 2433 s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold); 2434 s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold); 2435 2436 return 0; 2437 } 2438 2439 int t4_sge_init(struct adapter *adap) 2440 { 2441 struct sge *s = &adap->sge; 2442 u32 sge_control, sge_conm_ctrl; 2443 int ret, egress_threshold; 2444 2445 /* 2446 * Ingress Padding Boundary and Egress Status Page Size are set up by 2447 * t4_fixup_host_params(). 2448 */ 2449 sge_control = t4_read_reg(adap, A_SGE_CONTROL); 2450 s->pktshift = G_PKTSHIFT(sge_control); 2451 s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64; 2452 s->fl_align = t4_fl_pkt_align(adap); 2453 ret = t4_sge_init_soft(adap); 2454 if (ret < 0) { 2455 dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n", 2456 __func__, -ret); 2457 return ret; 2458 } 2459 2460 /* 2461 * A FL with <= fl_starve_thres buffers is starving and a periodic 2462 * timer will attempt to refill it. This needs to be larger than the 2463 * SGE's Egress Congestion Threshold. If it isn't, then we can get 2464 * stuck waiting for new packets while the SGE is waiting for us to 2465 * give it more Free List entries. (Note that the SGE's Egress 2466 * Congestion Threshold is in units of 2 Free List pointers.) For T4, 2467 * there was only a single field to control this. For T5 there's the 2468 * original field which now only applies to Unpacked Mode Free List 2469 * buffers and a new field which only applies to Packed Mode Free List 2470 * buffers. 2471 */ 2472 sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL); 2473 if (is_t4(adap->params.chip) || adap->use_unpacked_mode) 2474 egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl); 2475 else 2476 egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl); 2477 s->fl_starve_thres = 2 * egress_threshold + 1; 2478 2479 return 0; 2480 } 2481 2482 int t4vf_sge_init(struct adapter *adap) 2483 { 2484 struct sge_params *sge_params = &adap->params.sge; 2485 u32 sge_ingress_queues_per_page; 2486 u32 sge_egress_queues_per_page; 2487 u32 sge_control, sge_control2; 2488 u32 fl_small_pg, fl_large_pg; 2489 u32 sge_ingress_rx_threshold; 2490 u32 sge_timer_value_0_and_1; 2491 u32 sge_timer_value_2_and_3; 2492 u32 sge_timer_value_4_and_5; 2493 u32 sge_congestion_control; 2494 struct sge *s = &adap->sge; 2495 unsigned int s_hps, s_qpp; 2496 u32 sge_host_page_size; 2497 u32 params[7], vals[7]; 2498 int v; 2499 2500 /* query basic params from fw */ 2501 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2502 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL)); 2503 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2504 V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE)); 2505 params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2506 V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0)); 2507 params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2508 V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1)); 2509 params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2510 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1)); 2511 params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2512 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3)); 2513 params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2514 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5)); 2515 v = t4vf_query_params(adap, 7, params, vals); 2516 if (v != FW_SUCCESS) 2517 return v; 2518 2519 sge_control = vals[0]; 2520 sge_host_page_size = vals[1]; 2521 fl_small_pg = vals[2]; 2522 fl_large_pg = vals[3]; 2523 sge_timer_value_0_and_1 = vals[4]; 2524 sge_timer_value_2_and_3 = vals[5]; 2525 sge_timer_value_4_and_5 = vals[6]; 2526 2527 /* 2528 * Start by vetting the basic SGE parameters which have been set up by 2529 * the Physical Function Driver. 2530 */ 2531 2532 /* We only bother using the Large Page logic if the Large Page Buffer 2533 * is larger than our Page Size Buffer. 2534 */ 2535 if (fl_large_pg <= fl_small_pg) 2536 fl_large_pg = 0; 2537 2538 /* The Page Size Buffer must be exactly equal to our Page Size and the 2539 * Large Page Size Buffer should be 0 (per above) or a power of 2. 2540 */ 2541 if (fl_small_pg != CXGBE_PAGE_SIZE || 2542 (fl_large_pg & (fl_large_pg - 1)) != 0) { 2543 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", 2544 fl_small_pg, fl_large_pg); 2545 return -EINVAL; 2546 } 2547 2548 if ((sge_control & F_RXPKTCPLMODE) != 2549 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { 2550 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); 2551 return -EINVAL; 2552 } 2553 2554 2555 /* Grab ingress packing boundary from SGE_CONTROL2 for */ 2556 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2557 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2)); 2558 v = t4vf_query_params(adap, 1, params, vals); 2559 if (v != FW_SUCCESS) { 2560 dev_err(adapter, "Unable to get SGE Control2; " 2561 "probably old firmware.\n"); 2562 return v; 2563 } 2564 sge_control2 = vals[0]; 2565 2566 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2567 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD)); 2568 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2569 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL)); 2570 v = t4vf_query_params(adap, 2, params, vals); 2571 if (v != FW_SUCCESS) 2572 return v; 2573 sge_ingress_rx_threshold = vals[0]; 2574 sge_congestion_control = vals[1]; 2575 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2576 V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF)); 2577 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 2578 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF)); 2579 v = t4vf_query_params(adap, 2, params, vals); 2580 if (v != FW_SUCCESS) { 2581 dev_warn(adap, "Unable to get VF SGE Queues/Page; " 2582 "probably old firmware.\n"); 2583 return v; 2584 } 2585 sge_egress_queues_per_page = vals[0]; 2586 sge_ingress_queues_per_page = vals[1]; 2587 2588 /* 2589 * We need the Queues/Page for our VF. This is based on the 2590 * PF from which we're instantiated and is indexed in the 2591 * register we just read. 2592 */ 2593 s_hps = (S_HOSTPAGESIZEPF0 + 2594 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf); 2595 sge_params->hps = 2596 ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0); 2597 2598 s_qpp = (S_QUEUESPERPAGEPF0 + 2599 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf); 2600 sge_params->eq_qpp = 2601 ((sge_egress_queues_per_page >> s_qpp) 2602 & M_QUEUESPERPAGEPF0); 2603 sge_params->iq_qpp = 2604 ((sge_ingress_queues_per_page >> s_qpp) 2605 & M_QUEUESPERPAGEPF0); 2606 2607 /* 2608 * Now translate the queried parameters into our internal forms. 2609 */ 2610 if (fl_large_pg) 2611 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; 2612 s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE) 2613 ? 128 : 64); 2614 s->pktshift = G_PKTSHIFT(sge_control); 2615 s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2); 2616 2617 /* 2618 * A FL with <= fl_starve_thres buffers is starving and a periodic 2619 * timer will attempt to refill it. This needs to be larger than the 2620 * SGE's Egress Congestion Threshold. If it isn't, then we can get 2621 * stuck waiting for new packets while the SGE is waiting for us to 2622 * give it more Free List entries. (Note that the SGE's Egress 2623 * Congestion Threshold is in units of 2 Free List pointers.) 2624 */ 2625 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { 2626 case CHELSIO_T5: 2627 s->fl_starve_thres = 2628 G_EGRTHRESHOLDPACKING(sge_congestion_control); 2629 break; 2630 case CHELSIO_T6: 2631 default: 2632 s->fl_starve_thres = 2633 G_T6_EGRTHRESHOLDPACKING(sge_congestion_control); 2634 break; 2635 } 2636 s->fl_starve_thres = s->fl_starve_thres * 2 + 1; 2637 2638 /* 2639 * Save RX interrupt holdoff timer values and counter 2640 * threshold values from the SGE parameters. 2641 */ 2642 s->timer_val[0] = core_ticks_to_us(adap, 2643 G_TIMERVALUE0(sge_timer_value_0_and_1)); 2644 s->timer_val[1] = core_ticks_to_us(adap, 2645 G_TIMERVALUE1(sge_timer_value_0_and_1)); 2646 s->timer_val[2] = core_ticks_to_us(adap, 2647 G_TIMERVALUE2(sge_timer_value_2_and_3)); 2648 s->timer_val[3] = core_ticks_to_us(adap, 2649 G_TIMERVALUE3(sge_timer_value_2_and_3)); 2650 s->timer_val[4] = core_ticks_to_us(adap, 2651 G_TIMERVALUE4(sge_timer_value_4_and_5)); 2652 s->timer_val[5] = core_ticks_to_us(adap, 2653 G_TIMERVALUE5(sge_timer_value_4_and_5)); 2654 s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold); 2655 s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold); 2656 s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold); 2657 s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold); 2658 return 0; 2659 } 2660