Lines Matching defs:vsc

295 #define VIO_DMAMEM_SYNC(vsc, sc, p, size, flags)		\
296 bus_dmamap_sync((vsc)->sc_dmat, (sc)->sc_dma_map, \
322 void vio_get_lladdr(struct arpcom *ac, struct virtio_softc *vsc);
323 void vio_put_lladdr(struct arpcom *ac, struct virtio_softc *vsc);
391 struct virtio_softc *vsc = sc->sc_virtio;
394 if (bus_dmamap_create(vsc->sc_dmat, sc->sc_dma_size, 1,
398 if (bus_dmamem_alloc(vsc->sc_dmat, sc->sc_dma_size, 16, 0,
401 if (bus_dmamem_map(vsc->sc_dmat, &sc->sc_dma_seg, nsegs,
404 if (bus_dmamap_load(vsc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,
410 bus_dmamem_unmap(vsc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
412 bus_dmamem_free(vsc->sc_dmat, &sc->sc_dma_seg, 1);
414 bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dma_map);
422 struct virtio_softc *vsc = sc->sc_virtio;
424 bus_dmamap_unload(vsc->sc_dmat, sc->sc_dma_map);
425 bus_dmamem_unmap(vsc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
426 bus_dmamem_free(vsc->sc_dmat, &sc->sc_dma_seg, 1);
427 bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dma_map);
460 struct virtio_softc *vsc = sc->sc_virtio;
477 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_VQ)) {
500 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_VQ)) {
520 if (virtio_has_feature(vsc, VIRTIO_NET_F_HOST_TSO4) ||
521 virtio_has_feature(vsc, VIRTIO_NET_F_HOST_TSO6))
542 r = bus_dmamap_create(vsc->sc_dmat,
552 r = bus_dmamap_create(vsc->sc_dmat, txsize,
571 bus_dmamap_destroy(vsc->sc_dmat,
577 bus_dmamap_destroy(vsc->sc_dmat,
590 vio_dmamem_enqueue(struct virtio_softc *vsc, struct vio_softc *sc,
593 VIO_DMAMEM_SYNC(vsc, sc, p, size, write ? BUS_DMASYNC_PREWRITE :
600 vio_get_lladdr(struct arpcom *ac, struct virtio_softc *vsc)
604 ac->ac_enaddr[i] = virtio_read_device_config_1(vsc,
610 vio_put_lladdr(struct arpcom *ac, struct virtio_softc *vsc)
614 virtio_write_device_config_1(vsc, VIRTIO_NET_CONFIG_MAC + i,
635 struct virtio_softc *vsc = (struct virtio_softc *)parent;
640 if (vsc->sc_child != NULL) {
646 sc->sc_virtio = vsc;
648 vsc->sc_child = self;
649 vsc->sc_ipl = IPL_NET | IPL_MPSAFE;
650 vsc->sc_driver_features = VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS |
656 vsc->sc_driver_features |= VIRTIO_NET_F_MQ;
658 vsc->sc_driver_features |= VIRTIO_NET_F_HOST_TSO4;
659 vsc->sc_driver_features |= VIRTIO_NET_F_HOST_TSO6;
661 vsc->sc_driver_features |= VIRTIO_NET_F_CTRL_GUEST_OFFLOADS;
662 vsc->sc_driver_features |= VIRTIO_NET_F_GUEST_TSO4;
663 vsc->sc_driver_features |= VIRTIO_NET_F_GUEST_TSO6;
665 if (virtio_negotiate_features(vsc, virtio_net_feature_names) != 0)
668 if (virtio_has_feature(vsc, VIRTIO_NET_F_MQ)) {
669 i = virtio_read_device_config_2(vsc,
671 vsc->sc_nvqs = 2 * i + 1;
681 vsc->sc_nvqs = 2;
682 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_VQ))
683 vsc->sc_nvqs++;
686 vsc->sc_vqs = mallocarray(vsc->sc_nvqs, sizeof(*vsc->sc_vqs), M_DEVBUF,
688 if (vsc->sc_vqs == NULL) {
689 vsc->sc_nvqs = 0;
698 if (virtio_has_feature(vsc, VIRTIO_NET_F_MAC)) {
699 vio_get_lladdr(&sc->sc_ac, vsc);
702 vio_put_lladdr(&sc->sc_ac, vsc);
706 if (virtio_has_feature(vsc, VIRTIO_NET_F_MRG_RXBUF) ||
707 vsc->sc_version_1) {
720 if (virtio_has_feature(vsc, VIRTIO_NET_F_CSUM))
723 if (virtio_has_feature(vsc, VIRTIO_NET_F_HOST_TSO4))
725 if (virtio_has_feature(vsc, VIRTIO_NET_F_HOST_TSO6))
729 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) &&
730 (virtio_has_feature(vsc, VIRTIO_NET_F_GUEST_TSO4) ||
731 virtio_has_feature(vsc, VIRTIO_NET_F_GUEST_TSO6))) {
737 if (virtio_has_feature(vsc, VIRTIO_NET_F_MRG_RXBUF))
745 if (virtio_has_feature(vsc, VIRTIO_NET_F_HOST_TSO4) ||
746 virtio_has_feature(vsc, VIRTIO_NET_F_HOST_TSO6)) {
754 if (virtio_has_feature(vsc, VIRTIO_F_RING_INDIRECT_DESC))
763 vioq->viq_rxvq = &vsc->sc_vqs[vqidx];
767 if (virtio_alloc_vq(vsc, vioq->viq_rxvq, vqidx, 2, "rx") != 0)
770 virtio_start_vq_intr(vsc, vioq->viq_rxvq);
773 vioq->viq_txvq = &vsc->sc_vqs[vqidx];
774 if (virtio_alloc_vq(vsc, vioq->viq_txvq, vqidx,
779 if (virtio_has_feature(vsc, VIRTIO_F_RING_EVENT_IDX))
782 virtio_stop_vq_intr(vsc, vioq->viq_txvq);
801 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_VQ)) {
803 if (virtio_has_feature(vsc, VIRTIO_NET_F_MQ)) {
804 i = 2 * virtio_read_device_config_2(vsc,
807 sc->sc_ctl_vq = &vsc->sc_vqs[i];
808 if (virtio_alloc_vq(vsc, sc->sc_ctl_vq, i, 1, "control") != 0)
813 virtio_start_vq_intr(vsc, sc->sc_ctl_vq);
817 r = virtio_intr_establish(vsc, va, 0, NULL, vio_config_intr,
818 vsc);
824 r = virtio_intr_establish(vsc, va, 1, NULL, vio_ctrl_intr,
834 r = virtio_intr_establish(vsc, va, i + 2, ci,
852 ifq_init_maxlen(&ifp->if_snd, vsc->sc_vqs[1].vq_num - 1);
856 vsc->sc_config_change = vio_config_change;
860 if (virtio_attach_finish(vsc, va) != 0)
863 if (virtio_has_feature(vsc, VIRTIO_NET_F_MQ)) {
884 for (i = 0; i < vsc->sc_nvqs; i++)
885 virtio_free_vq(vsc, &vsc->sc_vqs[i]);
886 free(vsc->sc_vqs, M_DEVBUF, vsc->sc_nvqs * sizeof(*vsc->sc_vqs));
888 vsc->sc_nvqs = 0;
889 vsc->sc_child = VIRTIO_CHILD_ERROR;
898 struct virtio_softc *vsc = sc->sc_virtio;
901 if (virtio_has_feature(vsc, VIRTIO_NET_F_STATUS)) {
902 int status = virtio_read_device_config_2(vsc,
918 struct virtio_softc *vsc = vioq->viq_sc->sc_virtio;
920 r = virtio_check_vq(vsc, vioq->viq_txvq);
921 r |= virtio_check_vq(vsc, vioq->viq_rxvq);
928 struct virtio_softc *vsc = arg;
929 return vio_config_change(vsc);
941 vio_config_change(struct virtio_softc *vsc)
943 struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
976 struct virtio_softc *vsc = sc->sc_virtio;
994 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
997 if (virtio_has_feature(vsc, VIRTIO_NET_F_GUEST_CSUM))
1001 if (virtio_has_feature(vsc, VIRTIO_NET_F_GUEST_TSO4))
1003 if (virtio_has_feature(vsc, VIRTIO_NET_F_GUEST_TSO6))
1019 struct virtio_softc *vsc = sc->sc_virtio;
1026 virtio_reset(vsc);
1027 virtio_intr_barrier(vsc);
1034 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_VQ))
1040 virtio_reinit_start(vsc);
1042 virtio_start_vq_intr(vsc, sc->sc_q[i].viq_rxvq);
1043 virtio_stop_vq_intr(vsc, sc->sc_q[i].viq_txvq);
1045 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_VQ))
1046 virtio_start_vq_intr(vsc, sc->sc_ctl_vq);
1047 virtio_reinit_end(vsc);
1048 if (virtio_has_feature(vsc, VIRTIO_NET_F_MQ))
1050 if (virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_VQ))
1141 struct virtio_softc *vsc = sc->sc_virtio;
1197 bus_dmamap_unload(vsc->sc_dmat,
1208 bus_dmamap_sync(vsc->sc_dmat, vioq->viq_txdmamaps[slot], 0,
1211 vio_dmamem_enqueue(vsc, sc, vq, slot, hdr, sc->sc_hdr_size, 1);
1213 virtio_enqueue_commit(vsc, vq, slot, 0);
1229 if (virtio_has_feature(vsc, VIRTIO_F_RING_EVENT_IDX))
1232 r = virtio_start_vq_intr(vsc, vq);
1242 virtio_notify(vsc, vq);
1390 struct virtio_softc *vsc = sc->sc_virtio;
1419 bus_dmamap_sync(vsc->sc_dmat, vioq->viq_rxdmamaps[slot], 0,
1436 virtio_enqueue_commit(vsc, vq, slot, 0);
1442 virtio_notify(vsc, vq);
1491 struct virtio_softc *vsc = sc->sc_virtio;
1500 while (virtio_dequeue(vsc, vioq->viq_rxvq, &slot, &len) == 0) {
1502 bus_dmamap_sync(vsc->sc_dmat, vioq->viq_rxdmamaps[slot], 0,
1507 bus_dmamap_unload(vsc->sc_dmat, vioq->viq_rxdmamaps[slot]);
1517 if (virtio_has_feature(vsc, VIRTIO_NET_F_MQ)) {
1535 if (virtio_has_feature(vsc, VIRTIO_NET_F_GUEST_CSUM))
1557 struct virtio_softc *vsc = vq->vq_owner;
1558 struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1569 if (virtio_has_feature(vsc, VIRTIO_F_RING_EVENT_IDX)) {
1622 struct virtio_softc *vsc = vq->vq_owner;
1623 struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1645 struct virtio_softc *vsc = vq->vq_owner;
1646 struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1654 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1657 VIO_DMAMEM_SYNC(vsc, sc, hdr, sc->sc_hdr_size,
1659 bus_dmamap_sync(vsc->sc_dmat, vioq->viq_txdmamaps[slot], 0,
1663 bus_dmamap_unload(vsc->sc_dmat, vioq->viq_txdmamaps[slot]);
1677 struct virtio_softc *vsc = vq->vq_owner;
1678 struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1689 virtio_stop_vq_intr(vsc, vq);
1704 struct virtio_softc *vsc = vioq->viq_sc->sc_virtio;
1708 r = bus_dmamap_load_mbuf(vsc->sc_dmat, dmap, m,
1715 bus_dmamap_load_mbuf(vsc->sc_dmat, dmap, m,
1731 struct virtio_softc *vsc = sc->sc_virtio;
1742 bus_dmamap_unload(vsc->sc_dmat,
1769 struct virtio_softc *vsc = sc->sc_virtio;
1796 vio_dmamem_enqueue(vsc, sc, vq, *slotp, sc->sc_ctrl_cmd,
1813 struct virtio_softc *vsc = sc->sc_virtio;
1817 vio_dmamem_enqueue(vsc, sc, vq, slot, sc->sc_ctrl_status,
1820 virtio_enqueue_commit(vsc, vq, slot, 1);
1838 VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_cmd,
1840 VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_status,
1868 struct virtio_softc *vsc = sc->sc_virtio;
1878 vio_dmamem_enqueue(vsc, sc, vq, slot, sc->sc_ctrl_rx,
1882 VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_rx,
1897 struct virtio_softc *vsc = sc->sc_virtio;
1909 vio_dmamem_enqueue(vsc, sc, vq, slot, sc->sc_ctrl_mq_pairs,
1914 VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_mq_pairs,
1931 struct virtio_softc *vsc = sc->sc_virtio;
1942 vio_dmamem_enqueue(vsc, sc, vq, slot, sc->sc_ctrl_guest_offloads,
1947 VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_guest_offloads,
1971 struct virtio_softc *vsc = vq->vq_owner;
1972 struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1978 ret = virtio_dequeue(vsc, vq, &slot, NULL);
1984 if (virtio_start_vq_intr(vsc, vq))
1998 struct virtio_softc *vsc = sc->sc_virtio;
2013 vio_dmamem_enqueue(vsc, sc, vq, slot, sc->sc_ctrl_mac_tbl_uc, len_uc,
2015 vio_dmamem_enqueue(vsc, sc, vq, slot, sc->sc_ctrl_mac_tbl_mc, len_mc,
2019 VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_mac_tbl_uc, len_uc,
2021 VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_mac_tbl_mc, len_mc,
2036 struct virtio_softc *vsc = sc->sc_virtio;
2049 if (!virtio_has_feature(vsc, VIRTIO_NET_F_CTRL_RX)) {