Lines Matching defs:sc

75 virtio_set_status(struct virtio_softc *sc, int status)
77 sc->sc_ops->set_status(sc, status);
85 * virtio_reset(sc); // this will stop the device activity
88 * virtio_reinit_start(sc); // dequeue prohibited
89 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
91 * virtio_reinit_end(sc); // device activated; enqueue allowed
95 virtio_reset(struct virtio_softc *sc)
97 virtio_device_reset(sc);
101 virtio_reinit_start(struct virtio_softc *sc)
105 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
106 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
107 for (i = 0; i < sc->sc_nvqs; i++) {
109 struct virtqueue *vq = &sc->sc_vqs[i];
110 n = sc->sc_ops->read_queue_size(sc, vq->vq_index);
115 device_xname(sc->sc_dev),
118 virtio_reset_vq(sc, vq);
119 sc->sc_ops->setup_queue(sc, vq->vq_index,
123 r = sc->sc_ops->setup_interrupts(sc, 1);
130 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
136 virtio_reinit_end(struct virtio_softc *sc)
138 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
145 virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features)
147 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
148 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
150 sc->sc_ops->neg_features(sc, guest_features);
151 if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC)
152 sc->sc_indirect = true;
154 sc->sc_indirect = false;
165 printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \
177 virtio_read_device_config_1(struct virtio_softc *sc, int index)
179 bus_space_tag_t iot = sc->sc_devcfg_iot;
180 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
190 virtio_read_device_config_2(struct virtio_softc *sc, int index)
192 bus_space_tag_t iot = sc->sc_devcfg_iot;
193 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
197 if (BYTE_ORDER != sc->sc_bus_endian)
202 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
204 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
209 virtio_read_device_config_4(struct virtio_softc *sc, int index)
211 bus_space_tag_t iot = sc->sc_devcfg_iot;
212 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
216 if (BYTE_ORDER != sc->sc_bus_endian)
221 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
223 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
233 virtio_read_device_config_8(struct virtio_softc *sc, int index)
235 bus_space_tag_t iot = sc->sc_devcfg_iot;
236 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
245 if (sc->sc_bus_endian != sc->sc_struct_endian) {
251 if (BYTE_ORDER != sc->sc_struct_endian)
256 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
258 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
260 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
262 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4));
272 virtio_read_device_config_le_2(struct virtio_softc *sc, int index)
274 bus_space_tag_t iot = sc->sc_devcfg_iot;
275 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
285 if (sc->sc_bus_endian != LITTLE_ENDIAN)
291 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
292 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
297 virtio_read_device_config_le_4(struct virtio_softc *sc, int index)
299 bus_space_tag_t iot = sc->sc_devcfg_iot;
300 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
306 if (sc->sc_bus_endian != LITTLE_ENDIAN)
312 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
313 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
318 virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value)
320 bus_space_tag_t iot = sc->sc_devcfg_iot;
321 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
327 virtio_write_device_config_2(struct virtio_softc *sc, int index,
330 bus_space_tag_t iot = sc->sc_devcfg_iot;
331 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
333 if (BYTE_ORDER != sc->sc_bus_endian)
339 virtio_write_device_config_4(struct virtio_softc *sc, int index,
342 bus_space_tag_t iot = sc->sc_devcfg_iot;
343 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
345 if (BYTE_ORDER != sc->sc_bus_endian)
358 virtio_write_device_config_8(struct virtio_softc *sc, int index,
361 bus_space_tag_t iot = sc->sc_devcfg_iot;
362 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
368 if (BYTE_ORDER != sc->sc_struct_endian)
372 if (sc->sc_bus_endian != sc->sc_struct_endian) {
377 if (sc->sc_struct_endian == LITTLE_ENDIAN) {
392 virtio_write_device_config_le_2(struct virtio_softc *sc, int index,
395 bus_space_tag_t iot = sc->sc_devcfg_iot;
396 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
398 if (sc->sc_bus_endian != LITTLE_ENDIAN)
404 virtio_write_device_config_le_4(struct virtio_softc *sc, int index,
407 bus_space_tag_t iot = sc->sc_devcfg_iot;
408 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
410 if (sc->sc_bus_endian != LITTLE_ENDIAN)
420 virtio_rw16(struct virtio_softc *sc, uint16_t val)
422 KASSERT(sc);
423 return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val;
427 virtio_rw32(struct virtio_softc *sc, uint32_t val)
429 KASSERT(sc);
430 return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val;
434 virtio_rw64(struct virtio_softc *sc, uint64_t val)
436 KASSERT(sc);
437 return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val;
447 struct virtio_softc *sc = arg;
449 KASSERT(sc->sc_intrhand != NULL);
451 (*sc->sc_intrhand)(sc);
464 virtio_vq_intr(struct virtio_softc *sc)
469 for (i = 0; i < sc->sc_nvqs; i++) {
470 vq = &sc->sc_vqs[i];
471 if (virtio_vq_is_enqueued(sc, vq) == 1) {
483 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
487 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
492 vq_sync_aring_all(struct virtio_softc *sc, struct virtqueue *vq, int ops)
498 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
500 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
505 vq_sync_aring_header(struct virtio_softc *sc, struct virtqueue *vq, int ops)
509 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
514 vq_sync_aring_payload(struct virtio_softc *sc, struct virtqueue *vq, int ops)
519 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
524 vq_sync_aring_used(struct virtio_softc *sc, struct virtqueue *vq, int ops)
530 if ((sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) == 0)
532 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
537 vq_sync_uring_all(struct virtio_softc *sc, struct virtqueue *vq, int ops)
543 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
545 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
550 vq_sync_uring_header(struct virtio_softc *sc, struct virtqueue *vq, int ops)
554 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
559 vq_sync_uring_payload(struct virtio_softc *sc, struct virtqueue *vq, int ops)
564 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
569 vq_sync_uring_avail(struct virtio_softc *sc, struct virtqueue *vq, int ops)
575 if ((sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) == 0)
577 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
582 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
588 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
593 virtio_vq_is_enqueued(struct virtio_softc *sc, struct virtqueue *vq)
598 vq_sync_aring_all(sc, vq, BUS_DMASYNC_POSTWRITE);
601 vq_sync_uring_header(sc, vq, BUS_DMASYNC_POSTREAD);
602 if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx))
604 vq_sync_uring_payload(sc, vq, BUS_DMASYNC_POSTREAD);
612 virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq,
620 *vq->vq_used_event = virtio_rw16(sc, idx);
625 (virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx);
636 virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq)
641 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4;
643 return virtio_postpone_intr(sc, vq, nslots);
651 virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq)
656 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx);
658 return virtio_postpone_intr(sc, vq, nslots);
665 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
668 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
675 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000);
676 vq_sync_aring_used(sc, vq, BUS_DMASYNC_PREWRITE);
679 virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
680 vq_sync_aring_header(sc, vq, BUS_DMASYNC_PREWRITE);
686 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
689 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
695 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx);
696 vq_sync_aring_used(sc, vq, BUS_DMASYNC_PREWRITE);
699 ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
700 vq_sync_aring_header(sc, vq, BUS_DMASYNC_PREWRITE);
704 vq_sync_uring_header(sc, vq, BUS_DMASYNC_POSTREAD);
705 if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx))
707 vq_sync_uring_payload(sc, vq, BUS_DMASYNC_POSTREAD);
718 virtio_reset_vq(struct virtio_softc *sc, struct virtqueue *vq)
729 vds[i].next = virtio_rw16(sc, i + 1);
731 vds[i].next = virtio_rw16(sc, VRING_DESC_CHAIN_END);
742 vd[j].next = virtio_rw16(sc, j + 1);
751 vq_sync_uring_all(sc, vq, BUS_DMASYNC_PREREAD);
757 virtio_init_vq_vqdone(struct virtio_softc *sc, struct virtqueue *vq,
761 virtio_init_vq(sc, vq, index, virtio_vq_done, vq);
766 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
772 vq->vq_owner = sc;
773 vq->vq_num = sc->sc_ops->read_queue_size(sc, index);
783 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq,
795 aprint_error_dev(sc->sc_dev,
801 hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2;
808 size_indirect = (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) ?
817 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
820 aprint_error_dev(sc->sc_dev,
826 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize,
829 aprint_error_dev(sc->sc_dev,
835 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
838 aprint_error_dev(sc->sc_dev,
844 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
847 aprint_error_dev(sc->sc_dev,
879 mutex_init(&vq->vq_freedesc_lock, MUTEX_SPIN, sc->sc_ipl);
880 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
881 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
883 virtio_reset_vq(sc, vq);
885 aprint_verbose_dev(sc->sc_dev,
889 aprint_verbose_dev(sc->sc_dev,
896 sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
898 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
900 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
902 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
909 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
921 while (s != virtio_rw16(sc, VRING_DESC_CHAIN_END)) {
927 device_xname(sc->sc_dev), vq->vq_index);
932 sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
934 vq_sync_aring_all(sc, vq, BUS_DMASYNC_POSTWRITE);
937 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
938 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
939 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
940 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
953 vq_alloc_slot_locked(struct virtio_softc *sc, struct virtqueue *vq,
962 head = tail = virtio_rw16(sc, vq->vq_free_idx);
968 vd->flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
969 tail = virtio_rw16(sc, vd->next);
976 vd->flags = virtio_rw16(sc, 0);
982 vq_alloc_slot(struct virtio_softc *sc, struct virtqueue *vq, size_t nslots)
987 rv = vq_alloc_slot_locked(sc, vq, nslots);
994 vq_free_slot(struct virtio_softc *sc, struct virtqueue *vq, uint16_t slot)
1001 while ((vd->flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) != 0) {
1002 s = virtio_rw16(sc, vd->next);
1006 vq->vq_free_idx = virtio_rw16(sc, slot);
1019 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
1024 * virtio_enqueue_abort(sc, vq, slot);
1027 * r = virtio_enqueue_reserve(sc, vq, slot,
1037 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
1038 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
1039 * virtio_enqueue_commit(sc, vq, slot, true);
1046 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
1050 KASSERT(sc->sc_child_state == VIRTIO_CHILD_ATTACH_FINISHED);
1053 slot = vq_alloc_slot(sc, vq, 1);
1066 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
1079 KASSERT((vd->flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0);
1096 vd->addr = virtio_rw64(sc, addr);
1097 vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs);
1098 vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT);
1105 vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
1107 vd[i].flags = virtio_rw16(sc, 0);
1112 s = vq_alloc_slot(sc, vq, nsegs - 1);
1114 vq_free_slot(sc, vq, slot);
1117 vd->next = virtio_rw16(sc, s);
1118 vd->flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
1132 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1151 vds[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr);
1152 vds[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len);
1154 vds[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
1156 if ((vds[s].flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0) {
1159 s = virtio_rw16(sc, vds[s].next);
1169 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1187 vds[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start);
1188 vds[s].len = virtio_rw32(sc, len);
1190 vds[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
1192 if ((vds[s].flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0) {
1195 s = virtio_rw16(sc, vds[s].next);
1207 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1216 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
1218 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
1222 virtio_rw16(sc, slot);
1229 o = virtio_rw16(sc, vq->vq_avail->idx) - 1;
1240 vq_sync_uring_all(sc, vq, BUS_DMASYNC_PREREAD);
1243 vq_sync_aring_payload(sc, vq, BUS_DMASYNC_PREWRITE);
1244 vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx);
1245 vq_sync_aring_header(sc, vq, BUS_DMASYNC_PREWRITE);
1248 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
1249 vq_sync_uring_avail(sc, vq, BUS_DMASYNC_POSTREAD);
1250 t = virtio_rw16(sc, *vq->vq_avail_event) + 1;
1252 sc->sc_ops->kick(sc, vq->vq_index);
1254 vq_sync_uring_header(sc, vq, BUS_DMASYNC_POSTREAD);
1255 flags = virtio_rw16(sc, vq->vq_used->flags);
1257 sc->sc_ops->kick(sc, vq->vq_index);
1269 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1277 vq_free_slot(sc, vq, slot);
1290 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
1295 if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx))
1301 slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id);
1304 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
1309 *lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len);
1319 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1327 vq_free_slot(sc, vq, slot);
1336 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
1341 KASSERT(sc->sc_child == NULL);
1342 KASSERT(sc->sc_child_state == VIRTIO_NO_CHILD);
1344 sc->sc_child = child;
1345 sc->sc_ipl = ipl;
1347 virtio_negotiate_features(sc, req_features);
1348 snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features);
1354 virtio_child_attach_finish(struct virtio_softc *sc,
1377 sc->sc_vqs = vqs;
1378 sc->sc_nvqs = nvqs;
1379 sc->sc_config_change = config_change;
1380 sc->sc_intrhand = virtio_vq_intr;
1381 sc->sc_flags = req_flags;
1385 sc->sc_ops->setup_queue(sc, vqs[i].vq_index,
1389 r = sc->sc_ops->alloc_interrupts(sc);
1391 aprint_error_dev(sc->sc_dev,
1396 r = sc->sc_ops->setup_interrupts(sc, 0);
1398 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
1402 KASSERT(sc->sc_soft_ih == NULL);
1403 if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) {
1405 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1408 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr,
1409 sc);
1410 if (sc->sc_soft_ih == NULL) {
1411 sc->sc_ops->free_interrupts(sc);
1412 aprint_error_dev(sc->sc_dev,
1418 sc->sc_child_state = VIRTIO_CHILD_ATTACH_FINISHED;
1419 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
1423 if (sc->sc_soft_ih) {
1424 softint_disestablish(sc->sc_soft_ih);
1425 sc->sc_soft_ih = NULL;
1428 sc->sc_ops->free_interrupts(sc);
1430 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1435 virtio_child_detach(struct virtio_softc *sc)
1439 if (sc->sc_child == NULL)
1443 virtio_device_reset(sc);
1445 sc->sc_ops->free_interrupts(sc);
1447 if (sc->sc_soft_ih) {
1448 softint_disestablish(sc->sc_soft_ih);
1449 sc->sc_soft_ih = NULL;
1452 sc->sc_vqs = NULL;
1453 sc->sc_child = NULL;
1457 virtio_child_attach_failed(struct virtio_softc *sc)
1459 virtio_child_detach(sc);
1461 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1463 sc->sc_child_state = VIRTIO_CHILD_ATTACH_FAILED;
1467 virtio_dmat(struct virtio_softc *sc)
1469 return sc->sc_dmat;
1473 virtio_child(struct virtio_softc *sc)
1475 return sc->sc_child;
1479 virtio_intrhand(struct virtio_softc *sc)
1481 return (*sc->sc_intrhand)(sc);
1485 virtio_features(struct virtio_softc *sc)
1487 return sc->sc_active_features;
1491 virtio_attach_failed(struct virtio_softc *sc)
1493 device_t self = sc->sc_dev;
1496 if (sc->sc_childdevid == 0)
1499 if (sc->sc_child == NULL) {
1500 switch (sc->sc_child_state) {
1521 if (sc->sc_child_state != VIRTIO_CHILD_ATTACH_FINISHED) {