Lines Matching defs:rxq

266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
461 WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */
462 WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */
463 WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */
464 WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */
465 WM_Q_EVCNT_DEFINE(rxq, qdrop); /* Rx queue drop packet */
743 #define WM_RXCHAIN_RESET(rxq) \
745 (rxq)->rxq_tailp = &(rxq)->rxq_head; \
746 *(rxq)->rxq_tailp = NULL; \
747 (rxq)->rxq_len = 0; \
750 #define WM_RXCHAIN_LINK(rxq, m) \
752 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \
753 (rxq)->rxq_tailp = &(m)->m_next; \
813 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
820 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
821 #define WM_CDRXADDR_HI(rxq, x) \
823 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
1914 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1916 struct wm_softc *sc = rxq->rxq_sc;
1918 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1919 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1923 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1925 struct wm_softc *sc = rxq->rxq_sc;
1926 struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1946 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1951 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1958 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1968 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1970 CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
3698 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3699 mutex_enter(rxq->rxq_lock);
3700 wm_rxdrain(rxq);
3701 mutex_exit(rxq->rxq_lock);
5940 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5942 struct wm_softc *sc = rxq->rxq_sc;
5943 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5947 KASSERT(mutex_owned(rxq->rxq_lock));
5984 wm_init_rxdesc(rxq, idx);
5986 wm_init_rxdesc(rxq, idx);
5997 wm_rxdrain(struct wm_rxqueue *rxq)
5999 struct wm_softc *sc = rxq->rxq_sc;
6003 KASSERT(mutex_owned(rxq->rxq_lock));
6006 rxs = &rxq->rxq_soft[i];
6356 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6362 mutex_enter(rxq->rxq_lock);
6363 rxq->rxq_stopping = false;
6364 mutex_exit(rxq->rxq_lock);
6381 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6384 mutex_enter(rxq->rxq_lock);
6385 rxq->rxq_stopping = true;
6386 mutex_exit(rxq->rxq_lock);
6447 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6452 if (rxq->rxq_packets)
6453 avg_size = rxq->rxq_bytes / rxq->rxq_packets;
6488 rxq->rxq_packets = 0;
6489 rxq->rxq_bytes = 0;
6523 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6598 NULL, 0, &rxq->rxq_ptr,
6794 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6804 WM_Q_EVCNT_ADD(rxq, qdrop, rqdpc);
6848 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6850 WM_Q_EVCNT_STORE(rxq, intr, 0);
6851 WM_Q_EVCNT_STORE(rxq, defer, 0);
6852 WM_Q_EVCNT_STORE(rxq, ipsum, 0);
6853 WM_Q_EVCNT_STORE(rxq, tusum, 0);
6855 WM_Q_EVCNT_STORE(rxq, qdrop, 0);
7588 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7590 mutex_enter(rxq->rxq_lock);
7591 wm_init_rxdesc(rxq, i);
7592 mutex_exit(rxq->rxq_lock);
7740 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7741 mutex_enter(rxq->rxq_lock);
7742 wm_rxdrain(rxq);
7743 mutex_exit(rxq->rxq_lock);
7944 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7957 rxq->rxq_ndesc = WM_NRXDESC;
7959 rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7961 rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7963 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7964 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7967 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7968 1, &rxq->rxq_desc_rseg, 0)) != 0) {
7975 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7976 rxq->rxq_desc_rseg, rxq_descs_size,
7977 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7984 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7991 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7992 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
8002 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
8004 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
8007 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
8013 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
8016 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
8017 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
8018 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
8019 rxq->rxq_descsize * rxq->rxq_ndesc);
8020 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
8068 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8073 for (i = 0; i < rxq->rxq_ndesc; i++) {
8076 &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
8082 rxq->rxq_soft[i].rxs_mbuf = NULL;
8088 for (i = 0; i < rxq->rxq_ndesc; i++) {
8089 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
8091 rxq->rxq_soft[i].rxs_dmamap);
8097 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8101 for (i = 0; i < rxq->rxq_ndesc; i++) {
8102 if (rxq->rxq_soft[i].rxs_dmamap != NULL)
8104 rxq->rxq_soft[i].rxs_dmamap);
8199 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8200 rxq->rxq_sc = sc;
8201 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8203 error = wm_alloc_rx_descs(sc, rxq);
8207 error = wm_alloc_rx_buffer(sc, rxq);
8209 wm_free_rx_descs(sc, rxq);
8216 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
8217 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
8218 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
8219 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
8221 WM_Q_MISC_EVCNT_ATTACH(rxq, qdrop, rxq, i, xname);
8233 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8234 wm_free_rx_buffer(sc, rxq);
8235 wm_free_rx_descs(sc, rxq);
8236 if (rxq->rxq_lock)
8237 mutex_obj_free(rxq->rxq_lock);
8265 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8268 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
8269 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
8270 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
8271 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
8273 WM_Q_EVCNT_DETACH(rxq, qdrop, rxq, i);
8276 wm_free_rx_buffer(sc, rxq);
8277 wm_free_rx_descs(sc, rxq);
8278 if (rxq->rxq_lock)
8279 mutex_obj_free(rxq->rxq_lock);
8430 struct wm_rxqueue *rxq)
8433 KASSERT(mutex_owned(rxq->rxq_lock));
8440 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
8441 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
8443 rxq->rxq_descsize * rxq->rxq_ndesc);
8457 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
8458 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
8460 rxq->rxq_descsize * rxq->rxq_ndesc);
8504 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8509 KASSERT(mutex_owned(rxq->rxq_lock));
8511 for (i = 0; i < rxq->rxq_ndesc; i++) {
8512 rxs = &rxq->rxq_soft[i];
8514 if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8522 wm_rxdrain(rxq);
8532 wm_init_rxdesc(rxq, i);
8535 rxq->rxq_ptr = 0;
8536 rxq->rxq_discard = 0;
8537 WM_RXCHAIN_RESET(rxq);
8544 struct wm_rxqueue *rxq)
8547 KASSERT(mutex_owned(rxq->rxq_lock));
8554 rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8556 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8558 wm_init_rx_regs(sc, wmq, rxq);
8559 return wm_init_rx_buffer(sc, rxq);
8577 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8596 mutex_enter(rxq->rxq_lock);
8597 error = wm_init_rx_queue(sc, wmq, rxq);
8598 mutex_exit(rxq->rxq_lock);
10002 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
10004 struct wm_softc *sc = rxq->rxq_sc;
10008 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
10011 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
10013 return rxq->rxq_descs[idx].wrx_status;
10017 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
10019 struct wm_softc *sc = rxq->rxq_sc;
10023 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
10026 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
10028 return rxq->rxq_descs[idx].wrx_errors;
10032 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
10034 struct wm_softc *sc = rxq->rxq_sc;
10037 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
10039 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
10041 return rxq->rxq_descs[idx].wrx_special;
10045 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
10047 struct wm_softc *sc = rxq->rxq_sc;
10050 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
10052 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
10054 return rxq->rxq_descs[idx].wrx_len;
10059 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
10061 struct wm_softc *sc = rxq->rxq_sc;
10064 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
10066 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
10072 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
10074 struct wm_softc *sc = rxq->rxq_sc;
10077 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
10079 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
10112 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
10115 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10123 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
10125 struct wm_softc *sc = rxq->rxq_sc;
10152 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
10154 struct wm_softc *sc = rxq->rxq_sc;
10159 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
10167 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
10171 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10180 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
10183 struct wm_softc *sc = rxq->rxq_sc;
10188 WM_Q_EVCNT_INCR(rxq, ipsum);
10201 WM_Q_EVCNT_INCR(rxq, tusum);
10218 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
10220 struct wm_softc *sc = rxq->rxq_sc;
10230 KASSERT(mutex_owned(rxq->rxq_lock));
10232 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
10233 rxs = &rxq->rxq_soft[i];
10238 wm_cdrxsync(rxq, i,
10241 status = wm_rxdesc_get_status(rxq, i);
10242 errors = wm_rxdesc_get_errors(rxq, i);
10243 len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
10244 vlantag = wm_rxdesc_get_vlantag(rxq, i);
10246 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
10247 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
10250 if (!wm_rxdesc_dd(rxq, i, status))
10262 if (__predict_false(rxq->rxq_discard)) {
10266 wm_init_rxdesc(rxq, i);
10267 if (wm_rxdesc_is_eop(rxq, status)) {
10272 rxq->rxq_discard = 0;
10287 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
10295 wm_init_rxdesc(rxq, i);
10296 if (!wm_rxdesc_is_eop(rxq, status))
10297 rxq->rxq_discard = 1;
10298 m_freem(rxq->rxq_head);
10299 WM_RXCHAIN_RESET(rxq);
10303 rxq->rxq_discard ? " (discard)" : ""));
10308 rxq->rxq_len += len;
10314 if (!wm_rxdesc_is_eop(rxq, status)) {
10315 WM_RXCHAIN_LINK(rxq, m);
10318 device_xname(sc->sc_dev), rxq->rxq_len));
10335 rxq->rxq_tail->m_len
10340 len = rxq->rxq_len - ETHER_CRC_LEN;
10342 len = rxq->rxq_len;
10344 WM_RXCHAIN_LINK(rxq, m);
10346 *rxq->rxq_tailp = NULL;
10347 m = rxq->rxq_head;
10349 WM_RXCHAIN_RESET(rxq);
10356 if (wm_rxdesc_has_errors(rxq, errors)) {
10376 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
10380 wm_rxdesc_ensure_checksum(rxq, status, errors, m);
10382 rxq->rxq_packets++;
10383 rxq->rxq_bytes += len;
10387 if (rxq->rxq_stopping)
10390 rxq->rxq_ptr = i;
10906 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10944 mutex_enter(rxq->rxq_lock);
10946 if (rxq->rxq_stopping) {
10947 mutex_exit(rxq->rxq_lock);
10957 WM_Q_EVCNT_INCR(rxq, intr);
10966 more = wm_rxeof(rxq, rxlimit);
10970 mutex_exit(rxq->rxq_lock);
11060 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11091 mutex_enter(rxq->rxq_lock);
11093 if (rxq->rxq_stopping) {
11094 mutex_exit(rxq->rxq_lock);
11098 WM_Q_EVCNT_INCR(rxq, intr);
11100 rxmore = wm_rxeof(rxq, rxlimit);
11103 mutex_exit(rxq->rxq_lock);
11121 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11137 mutex_enter(rxq->rxq_lock);
11138 if (rxq->rxq_stopping) {
11139 mutex_exit(rxq->rxq_lock);
11142 WM_Q_EVCNT_INCR(rxq, defer);
11143 rxmore = wm_rxeof(rxq, rxlimit);
11144 mutex_exit(rxq->rxq_lock);