Lines Matching defs:nm_txq

213 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx)
225 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
226 &nm_txq->ba, (void **)&nm_txq->desc);
230 nm_txq->pidx = nm_txq->cidx = 0;
231 nm_txq->sidx = na->num_tx_desc;
232 nm_txq->nid = idx;
233 nm_txq->iqidx = iqidx;
234 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
238 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
240 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
241 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
249 &nm_txq->cntxt_id, 0, "SGE context id of the queue");
251 &nm_txq->cidx, 0, "consumer index");
253 &nm_txq->pidx, 0, "producer index");
259 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
266 if (nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID)
267 free_nm_txq_hwq(vi, nm_txq);
268 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID);
270 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
271 nm_txq->desc);
405 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
414 MPASS(nm_txq->desc != NULL);
417 bzero(nm_txq->desc, len);
424 if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
427 c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id));
433 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
439 c.eqaddr = htobe64(nm_txq->ba);
448 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
449 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
451 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
453 sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
455 nm_txq->pidx = nm_txq->cidx = 0;
456 MPASS(nm_txq->sidx == na->num_tx_desc);
457 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
459 nm_txq->doorbells = sc->doorbells;
460 if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
461 isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
462 isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
468 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
469 nm_txq->udb_qid = nm_txq->cntxt_id & mask;
470 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
471 clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
473 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
474 nm_txq->udb_qid = 0;
476 nm_txq->udb = (volatile void *)udb;
484 V_FW_PARAMS_PARAM_YZ(nm_txq->cntxt_id);
490 nm_txq->cntxt_id, rc);
499 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
504 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
507 nm_txq->cntxt_id, rc);
508 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
693 struct sge_nm_txq *nm_txq;
758 for_each_nm_txq(vi, i, nm_txq) {
759 kring = na->tx_rings[nm_txq->nid];
763 alloc_nm_txq_hwq(vi, nm_txq);
782 struct sge_nm_txq *nm_txq;
800 for_each_nm_txq(vi, i, nm_txq) {
801 kring = na->tx_rings[nm_txq->nid];
804 MPASS(nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID);
806 rc = -t4_eth_eq_stop(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
809 "failed to stop nm_txq[%d]: %d.\n", i, rc);
910 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
913 u_int db = nm_txq->doorbells;
915 MPASS(nm_txq->pidx != nm_txq->dbidx);
917 n = NMIDXDIFF(nm_txq, dbidx);
924 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
935 KASSERT(nm_txq->udb_qid == 0 && n == 1,
936 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
937 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
939 dst = (volatile void *)((uintptr_t)nm_txq->udb +
941 src = (void *)&nm_txq->desc[nm_txq->dbidx];
942 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
949 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
955 V_QID(nm_txq->cntxt_id) | V_PIDX(n));
958 nm_txq->dbidx = nm_txq->pidx;
966 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
972 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
983 wr = (void *)&nm_txq->desc[nm_txq->pidx];
984 wr->op_pkd = nm_txq->op_pkd;
996 cpl->ctrl0 = nm_txq->cpl_ctrl0;
1017 nm_txq->pidx += npkt_to_ndesc(n);
1018 MPASS(nm_txq->pidx <= nm_txq->sidx);
1019 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
1025 nm_txq->pidx = 0;
1031 NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
1034 nm_txq->equeqidx = nm_txq->pidx;
1035 nm_txq->equiqidx = nm_txq->pidx;
1036 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
1038 nm_txq->equeqidx = nm_txq->pidx;
1040 ring_nm_txq_db(sc, nm_txq);
1043 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) {
1044 if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
1046 nm_txq->equeqidx = nm_txq->pidx;
1048 ring_nm_txq_db(sc, nm_txq);
1058 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
1061 if (nm_txq->cidx > nm_txq->pidx)
1062 return (nm_txq->cidx - nm_txq->pidx - 1);
1063 else if (nm_txq->cidx > 0)
1064 return (nm_txq->sidx - nm_txq->pidx);
1066 return (nm_txq->sidx - nm_txq->pidx - 1);
1070 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
1072 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
1079 while (nm_txq->cidx != hw_cidx) {
1080 wr = (void *)&nm_txq->desc[nm_txq->cidx];
1088 nm_txq->cidx += npkt_to_ndesc(wr->npkt);
1095 MPASS(nm_txq->cidx <= nm_txq->sidx);
1096 if (__predict_false(nm_txq->cidx == nm_txq->sidx))
1097 nm_txq->cidx = 0;
1110 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
1125 reclaimed += reclaim_nm_tx_desc(nm_txq);
1126 ndesc_remaining = contiguous_ndesc_available(nm_txq);
1145 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
1147 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining);
1151 MPASS(nm_txq->dbidx == nm_txq->pidx);
1157 reclaimed += reclaim_nm_tx_desc(nm_txq);
1317 struct sge_nm_txq *nm_txq;
1321 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
1323 netmap_tx_irq(ifp, nm_txq->nid);