Lines Matching defs:vi
125 alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
134 struct adapter *sc = vi->adapter;
135 struct netmap_adapter *na = NA(vi->ifp);
139 len = vi->qsize_rxq * IQ_ESIZE;
151 nm_rxq->vi = vi;
154 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE;
162 ctx = &vi->ctx;
163 children = SYSCTL_CHILDREN(vi->nm_rxq_oid);
193 free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
195 struct adapter *sc = vi->adapter;
197 if (!(vi->flags & VI_INIT_DONE))
201 free_nm_rxq_hwq(vi, nm_rxq);
213 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx)
217 struct port_info *pi = vi->pi;
219 struct netmap_adapter *na = NA(vi->ifp);
222 struct sysctl_oid_list *children = SYSCTL_CHILDREN(vi->nm_txq_oid);
236 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
244 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name,
248 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
250 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
252 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
259 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
261 struct adapter *sc = vi->adapter;
263 if (!(vi->flags & VI_INIT_DONE))
267 free_nm_txq_hwq(vi, nm_txq);
277 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
281 struct adapter *sc = vi->adapter;
282 struct port_info *pi = vi->pi;
284 struct netmap_adapter *na = NA(vi->ifp);
293 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
315 V_FW_IQ_CMD_VIID(vi->viid) |
321 c.iqsize = htobe16(vi->qsize_rxq);
350 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE);
390 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
392 struct adapter *sc = vi->adapter;
405 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
409 struct adapter *sc = vi->adapter;
410 struct netmap_adapter *na = NA(vi->ifp);
429 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
432 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
443 device_printf(vi->dev,
488 device_printf(vi->dev,
499 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
501 struct adapter *sc = vi->adapter;
513 cxgbe_netmap_simple_rss(struct adapter *sc, struct vi_info *vi,
526 for_each_nm_rxq(vi, j, nm_rxq) {
541 rss = vi->rss;
542 defq = vi->rss[0];
544 for (i = 0; i < vi->rss_size;) {
545 for_each_nm_rxq(vi, j, nm_rxq) {
554 vi->nm_rss[i++] = nm_rxq->iq_abs_id;
555 if (i == vi->rss_size)
560 rss = vi->nm_rss;
563 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
564 vi->rss_size);
568 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0);
581 cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi,
592 MPASS(vi->nnmrxq > 1);
594 for_each_nm_rxq(vi, i, nm_rxq) {
595 j = i / ((vi->nnmrxq + 1) / 2);
610 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na));
625 nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq];
626 while (i < vi->rss_size / 2) {
627 for (j = 0; j < (vi->nnmrxq + 1) / 2; j++) {
639 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id;
640 if (i == vi->rss_size / 2)
644 while (i < vi->rss_size) {
645 for (j = (vi->nnmrxq + 1) / 2; j < vi->nnmrxq; j++) {
657 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id;
658 if (i == vi->rss_size)
663 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
664 vi->nm_rss, vi->rss_size);
668 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0);
676 cxgbe_netmap_rss(struct adapter *sc, struct vi_info *vi, if_t ifp,
680 if (nm_split_rss == 0 || vi->nnmrxq == 1)
681 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na));
683 return (cxgbe_netmap_split_rss(sc, vi, ifp, na));
687 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, if_t ifp,
698 MPASS(vi->nnmrxq > 0);
699 MPASS(vi->nnmtxq > 0);
701 if ((vi->flags & VI_INIT_DONE) == 0 ||
728 for_each_nm_rxq(vi, i, nm_rxq) {
733 alloc_nm_rxq_hwq(vi, nm_rxq);
758 for_each_nm_txq(vi, i, nm_txq) {
763 alloc_nm_txq_hwq(vi, nm_txq);
768 if (vi->nm_rss == NULL) {
769 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
773 return (cxgbe_netmap_rss(sc, vi, ifp, na));
777 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, if_t ifp,
786 MPASS(vi->nnmrxq > 0);
787 MPASS(vi->nnmtxq > 0);
792 if ((vi->flags & VI_INIT_DONE) == 0)
796 rc = cxgbe_netmap_rss(sc, vi, ifp, na);
800 for_each_nm_txq(vi, i, nm_txq) {
808 device_printf(vi->dev,
818 for_each_nm_rxq(vi, i, nm_rxq) {
831 device_printf(vi->dev,
854 struct vi_info *vi = if_getsoftc(ifp);
855 struct adapter *sc = vi->adapter;
858 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg");
862 rc = cxgbe_netmap_on(sc, vi, ifp, na);
864 rc = cxgbe_netmap_off(sc, vi, ifp, na);
1108 struct vi_info *vi = if_getsoftc(ifp);
1109 struct adapter *sc = vi->adapter;
1110 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
1172 struct vi_info *vi = if_getsoftc(ifp);
1173 struct adapter *sc = vi->adapter;
1174 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id];
1256 cxgbe_nm_attach(struct vi_info *vi)
1262 MPASS(vi->nnmrxq > 0);
1263 MPASS(vi->ifp != NULL);
1265 pi = vi->pi;
1270 na.ifp = vi->ifp;
1274 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE;
1282 na.num_rx_desc = rounddown(vi->qsize_rxq, 8);
1286 na.num_tx_rings = vi->nnmtxq;
1287 na.num_rx_rings = vi->nnmrxq;
1293 cxgbe_nm_detach(struct vi_info *vi)
1296 MPASS(vi->nnmrxq > 0);
1297 MPASS(vi->ifp != NULL);
1299 netmap_detach(vi->ifp);
1329 struct vi_info *vi = nm_rxq->vi;
1330 struct adapter *sc = vi->adapter;
1331 if_t ifp = vi->ifp;