1 /* $OpenBSD: vnet.c,v 1.33 2014/07/12 18:44:43 tedu Exp $ */ 2 /* 3 * Copyright (c) 2009 Mark Kettenis 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "bpfilter.h" 19 20 #include <sys/param.h> 21 #include <sys/device.h> 22 #include <sys/malloc.h> 23 #include <sys/mbuf.h> 24 #include <sys/socket.h> 25 #include <sys/sockio.h> 26 #include <sys/systm.h> 27 #include <sys/timeout.h> 28 29 #include <machine/autoconf.h> 30 #include <machine/hypervisor.h> 31 #include <machine/openfirm.h> 32 33 #include <net/if.h> 34 #include <net/if_dl.h> 35 #include <net/if_media.h> 36 #include <net/if_types.h> 37 38 #include <netinet/in.h> 39 #include <netinet/if_ether.h> 40 41 #if NBPFILTER > 0 42 #include <net/bpf.h> 43 #endif 44 45 #include <uvm/uvm_extern.h> 46 47 #include <sparc64/dev/cbusvar.h> 48 #include <sparc64/dev/ldcvar.h> 49 #include <sparc64/dev/viovar.h> 50 51 #ifdef VNET_DEBUG 52 #define DPRINTF(x) printf x 53 #else 54 #define DPRINTF(x) 55 #endif 56 57 #define VNET_TX_ENTRIES 32 58 #define VNET_RX_ENTRIES 32 59 60 struct vnet_attr_info { 61 struct vio_msg_tag tag; 62 uint8_t xfer_mode; 63 uint8_t addr_type; 64 uint16_t ack_freq; 65 uint32_t _reserved1; 66 uint64_t addr; 67 uint64_t mtu; 68 uint64_t _reserved2[3]; 69 }; 70 71 /* Address types. */ 72 #define VNET_ADDR_ETHERMAC 0x01 73 74 /* Sub-Type envelopes. */ 75 #define VNET_MCAST_INFO 0x0101 76 77 #define VNET_NUM_MCAST 7 78 79 struct vnet_mcast_info { 80 struct vio_msg_tag tag; 81 uint8_t set; 82 uint8_t count; 83 uint8_t mcast_addr[VNET_NUM_MCAST][ETHER_ADDR_LEN]; 84 uint32_t _reserved; 85 }; 86 87 struct vnet_desc { 88 struct vio_dring_hdr hdr; 89 uint32_t nbytes; 90 uint32_t ncookies; 91 struct ldc_cookie cookie[2]; 92 }; 93 94 struct vnet_desc_msg { 95 struct vio_msg_tag tag; 96 uint64_t seq_no; 97 uint64_t desc_handle; 98 uint32_t nbytes; 99 uint32_t ncookies; 100 struct ldc_cookie cookie[1]; 101 }; 102 103 struct vnet_dring { 104 bus_dmamap_t vd_map; 105 bus_dma_segment_t vd_seg; 106 struct vnet_desc *vd_desc; 107 int vd_nentries; 108 }; 109 110 struct vnet_dring *vnet_dring_alloc(bus_dma_tag_t, int); 111 void vnet_dring_free(bus_dma_tag_t, struct vnet_dring *); 112 113 /* 114 * For now, we only support vNet 1.0. 115 */ 116 #define VNET_MAJOR 1 117 #define VNET_MINOR 0 118 119 /* 120 * The vNet protocol wants the IP header to be 64-bit aligned, so 121 * define out own variant of ETHER_ALIGN. 122 */ 123 #define VNET_ETHER_ALIGN 6 124 125 struct vnet_soft_desc { 126 int vsd_map_idx; 127 caddr_t vsd_buf; 128 }; 129 130 struct vnet_softc { 131 struct device sc_dv; 132 bus_space_tag_t sc_bustag; 133 bus_dma_tag_t sc_dmatag; 134 135 uint64_t sc_tx_sysino; 136 uint64_t sc_rx_sysino; 137 void *sc_tx_ih; 138 void *sc_rx_ih; 139 140 struct ldc_conn sc_lc; 141 142 uint16_t sc_vio_state; 143 #define VIO_SND_VER_INFO 0x0001 144 #define VIO_ACK_VER_INFO 0x0002 145 #define VIO_RCV_VER_INFO 0x0004 146 #define VIO_SND_ATTR_INFO 0x0008 147 #define VIO_ACK_ATTR_INFO 0x0010 148 #define VIO_RCV_ATTR_INFO 0x0020 149 #define VIO_SND_DRING_REG 0x0040 150 #define VIO_ACK_DRING_REG 0x0080 151 #define VIO_RCV_DRING_REG 0x0100 152 #define VIO_SND_RDX 0x0200 153 #define VIO_ACK_RDX 0x0400 154 #define VIO_RCV_RDX 0x0800 155 156 struct timeout sc_handshake_to; 157 158 uint8_t sc_xfer_mode; 159 160 uint32_t sc_local_sid; 161 uint64_t sc_dring_ident; 162 uint64_t sc_seq_no; 163 164 int sc_tx_cnt; 165 int sc_tx_prod; 166 int sc_tx_cons; 167 168 uint8_t sc_peer_state; 169 170 struct ldc_map *sc_lm; 171 struct vnet_dring *sc_vd; 172 struct vnet_soft_desc *sc_vsd; 173 174 size_t sc_peer_desc_size; 175 struct ldc_cookie sc_peer_dring_cookie; 176 int sc_peer_dring_nentries; 177 178 struct pool sc_pool; 179 180 struct arpcom sc_ac; 181 struct ifmedia sc_media; 182 }; 183 184 int vnet_match(struct device *, void *, void *); 185 void vnet_attach(struct device *, struct device *, void *); 186 187 struct cfattach vnet_ca = { 188 sizeof(struct vnet_softc), vnet_match, vnet_attach 189 }; 190 191 struct cfdriver vnet_cd = { 192 NULL, "vnet", DV_IFNET 193 }; 194 195 int vnet_tx_intr(void *); 196 int vnet_rx_intr(void *); 197 void vnet_handshake(void *); 198 199 void vio_rx_data(struct ldc_conn *, struct ldc_pkt *); 200 void vnet_rx_vio_ctrl(struct vnet_softc *, struct vio_msg *); 201 void vnet_rx_vio_ver_info(struct vnet_softc *, struct vio_msg_tag *); 202 void vnet_rx_vio_attr_info(struct vnet_softc *, struct vio_msg_tag *); 203 void vnet_rx_vio_dring_reg(struct vnet_softc *, struct vio_msg_tag *); 204 void vnet_rx_vio_rdx(struct vnet_softc *sc, struct vio_msg_tag *); 205 void vnet_rx_vio_data(struct vnet_softc *sc, struct vio_msg *); 206 void vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *); 207 void vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *); 208 209 void vnet_ldc_reset(struct ldc_conn *); 210 void vnet_ldc_start(struct ldc_conn *); 211 212 void vio_sendmsg(struct vnet_softc *, void *, size_t); 213 void vnet_send_ver_info(struct vnet_softc *, uint16_t, uint16_t); 214 void vnet_send_attr_info(struct vnet_softc *); 215 void vnet_send_dring_reg(struct vnet_softc *); 216 void vio_send_rdx(struct vnet_softc *); 217 void vnet_send_dring_data(struct vnet_softc *, uint32_t); 218 219 void vnet_start(struct ifnet *); 220 void vnet_start_desc(struct ifnet *); 221 int vnet_ioctl(struct ifnet *, u_long, caddr_t); 222 void vnet_watchdog(struct ifnet *); 223 224 int vnet_media_change(struct ifnet *); 225 void vnet_media_status(struct ifnet *, struct ifmediareq *); 226 227 void vnet_link_state(struct vnet_softc *sc); 228 229 void vnet_setmulti(struct vnet_softc *, int); 230 231 void vnet_init(struct ifnet *); 232 void vnet_stop(struct ifnet *); 233 234 int 235 vnet_match(struct device *parent, void *match, void *aux) 236 { 237 struct cbus_attach_args *ca = aux; 238 239 if (strcmp(ca->ca_name, "network") == 0) 240 return (1); 241 242 return (0); 243 } 244 245 void 246 vnet_attach(struct device *parent, struct device *self, void *aux) 247 { 248 struct vnet_softc *sc = (struct vnet_softc *)self; 249 struct cbus_attach_args *ca = aux; 250 struct ldc_conn *lc; 251 struct ifnet *ifp; 252 253 sc->sc_bustag = ca->ca_bustag; 254 sc->sc_dmatag = ca->ca_dmatag; 255 256 if (cbus_intr_map(ca->ca_node, ca->ca_tx_ino, &sc->sc_tx_sysino) || 257 cbus_intr_map(ca->ca_node, ca->ca_rx_ino, &sc->sc_rx_sysino)) { 258 printf(": can't map interrupt\n"); 259 return; 260 } 261 printf(": ivec 0x%llx, 0x%llx", sc->sc_tx_sysino, sc->sc_rx_sysino); 262 263 /* 264 * Un-configure queues before registering interrupt handlers, 265 * such that we dont get any stale LDC packets or events. 266 */ 267 hv_ldc_tx_qconf(ca->ca_id, 0, 0); 268 hv_ldc_rx_qconf(ca->ca_id, 0, 0); 269 270 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_sysino, 271 IPL_NET, 0, vnet_tx_intr, sc, sc->sc_dv.dv_xname); 272 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_sysino, 273 IPL_NET, 0, vnet_rx_intr, sc, sc->sc_dv.dv_xname); 274 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { 275 printf(", can't establish interrupt\n"); 276 return; 277 } 278 279 lc = &sc->sc_lc; 280 lc->lc_id = ca->ca_id; 281 lc->lc_sc = sc; 282 lc->lc_reset = vnet_ldc_reset; 283 lc->lc_start = vnet_ldc_start; 284 lc->lc_rx_data = vio_rx_data; 285 286 timeout_set(&sc->sc_handshake_to, vnet_handshake, sc); 287 288 lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VNET_TX_ENTRIES); 289 if (lc->lc_txq == NULL) { 290 printf(", can't allocate tx queue\n"); 291 return; 292 } 293 294 lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VNET_RX_ENTRIES); 295 if (lc->lc_rxq == NULL) { 296 printf(", can't allocate rx queue\n"); 297 goto free_txqueue; 298 } 299 300 if (OF_getprop(ca->ca_node, "local-mac-address", 301 sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) > 0) 302 printf(", address %s", ether_sprintf(sc->sc_ac.ac_enaddr)); 303 304 /* 305 * Each interface gets its own pool. 306 */ 307 pool_init(&sc->sc_pool, 2048, 0, 0, 0, sc->sc_dv.dv_xname, NULL); 308 309 ifp = &sc->sc_ac.ac_if; 310 ifp->if_softc = sc; 311 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 312 ifp->if_ioctl = vnet_ioctl; 313 ifp->if_start = vnet_start; 314 ifp->if_watchdog = vnet_watchdog; 315 strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ); 316 IFQ_SET_MAXLEN(&ifp->if_snd, 31); /* XXX */ 317 IFQ_SET_READY(&ifp->if_snd); 318 319 ifmedia_init(&sc->sc_media, 0, vnet_media_change, vnet_media_status); 320 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 321 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 322 323 if_attach(ifp); 324 ether_ifattach(ifp); 325 326 printf("\n"); 327 return; 328 329 free_txqueue: 330 ldc_queue_free(sc->sc_dmatag, lc->lc_txq); 331 } 332 333 int 334 vnet_tx_intr(void *arg) 335 { 336 struct vnet_softc *sc = arg; 337 struct ldc_conn *lc = &sc->sc_lc; 338 uint64_t tx_head, tx_tail, tx_state; 339 340 hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); 341 if (tx_state != lc->lc_tx_state) { 342 switch (tx_state) { 343 case LDC_CHANNEL_DOWN: 344 DPRINTF(("Tx link down\n")); 345 break; 346 case LDC_CHANNEL_UP: 347 DPRINTF(("Tx link up\n")); 348 break; 349 case LDC_CHANNEL_RESET: 350 DPRINTF(("Tx link reset\n")); 351 break; 352 } 353 lc->lc_tx_state = tx_state; 354 } 355 356 return (1); 357 } 358 359 int 360 vnet_rx_intr(void *arg) 361 { 362 struct vnet_softc *sc = arg; 363 struct ldc_conn *lc = &sc->sc_lc; 364 uint64_t rx_head, rx_tail, rx_state; 365 struct ldc_pkt *lp; 366 int err; 367 368 err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state); 369 if (err == H_EINVAL) 370 return (0); 371 if (err != H_EOK) { 372 printf("hv_ldc_rx_get_state %d\n", err); 373 return (0); 374 } 375 376 if (rx_state != lc->lc_rx_state) { 377 switch (rx_state) { 378 case LDC_CHANNEL_DOWN: 379 DPRINTF(("Rx link down\n")); 380 lc->lc_tx_seqid = 0; 381 lc->lc_state = 0; 382 lc->lc_reset(lc); 383 break; 384 case LDC_CHANNEL_UP: 385 DPRINTF(("Rx link up\n")); 386 timeout_add_msec(&sc->sc_handshake_to, 500); 387 break; 388 case LDC_CHANNEL_RESET: 389 DPRINTF(("Rx link reset\n")); 390 lc->lc_tx_seqid = 0; 391 lc->lc_state = 0; 392 lc->lc_reset(lc); 393 break; 394 } 395 lc->lc_rx_state = rx_state; 396 return (1); 397 } 398 399 if (rx_head == rx_tail) 400 return (0); 401 402 lp = (struct ldc_pkt *)(lc->lc_rxq->lq_va + rx_head); 403 switch (lp->type) { 404 case LDC_CTRL: 405 ldc_rx_ctrl(lc, lp); 406 break; 407 408 case LDC_DATA: 409 ldc_rx_data(lc, lp); 410 break; 411 412 default: 413 DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype, 414 lp->ctrl)); 415 ldc_reset(lc); 416 break; 417 } 418 419 if (lc->lc_state == 0) 420 return (1); 421 422 rx_head += sizeof(*lp); 423 rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1); 424 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head); 425 if (err != H_EOK) 426 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err); 427 428 return (1); 429 } 430 431 void 432 vnet_handshake(void *arg) 433 { 434 struct vnet_softc *sc = arg; 435 436 ldc_send_vers(&sc->sc_lc); 437 } 438 439 void 440 vio_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp) 441 { 442 struct vio_msg *vm = (struct vio_msg *)lp; 443 444 switch (vm->type) { 445 case VIO_TYPE_CTRL: 446 if ((lp->env & LDC_FRAG_START) == 0 && 447 (lp->env & LDC_FRAG_STOP) == 0) 448 return; 449 vnet_rx_vio_ctrl(lc->lc_sc, vm); 450 break; 451 452 case VIO_TYPE_DATA: 453 if((lp->env & LDC_FRAG_START) == 0) 454 return; 455 vnet_rx_vio_data(lc->lc_sc, vm); 456 break; 457 458 default: 459 DPRINTF(("Unhandled packet type 0x%02x\n", vm->type)); 460 ldc_reset(lc); 461 break; 462 } 463 } 464 465 void 466 vnet_rx_vio_ctrl(struct vnet_softc *sc, struct vio_msg *vm) 467 { 468 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 469 470 switch (tag->stype_env) { 471 case VIO_VER_INFO: 472 vnet_rx_vio_ver_info(sc, tag); 473 break; 474 case VIO_ATTR_INFO: 475 vnet_rx_vio_attr_info(sc, tag); 476 break; 477 case VIO_DRING_REG: 478 vnet_rx_vio_dring_reg(sc, tag); 479 break; 480 case VIO_RDX: 481 vnet_rx_vio_rdx(sc, tag); 482 break; 483 default: 484 DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 485 break; 486 } 487 } 488 489 void 490 vnet_rx_vio_ver_info(struct vnet_softc *sc, struct vio_msg_tag *tag) 491 { 492 struct vio_ver_info *vi = (struct vio_ver_info *)tag; 493 494 switch (vi->tag.stype) { 495 case VIO_SUBTYPE_INFO: 496 DPRINTF(("CTRL/INFO/VER_INFO\n")); 497 498 /* Make sure we're talking to a virtual network device. */ 499 if (vi->dev_class != VDEV_NETWORK && 500 vi->dev_class != VDEV_NETWORK_SWITCH) { 501 /* Huh, we're not talking to a network device? */ 502 printf("Not a network device\n"); 503 vi->tag.stype = VIO_SUBTYPE_NACK; 504 vio_sendmsg(sc, vi, sizeof(*vi)); 505 return; 506 } 507 508 if (vi->major != VNET_MAJOR) { 509 vi->tag.stype = VIO_SUBTYPE_NACK; 510 vi->major = VNET_MAJOR; 511 vi->minor = VNET_MINOR; 512 vio_sendmsg(sc, vi, sizeof(*vi)); 513 return; 514 } 515 516 vi->tag.stype = VIO_SUBTYPE_ACK; 517 vi->tag.sid = sc->sc_local_sid; 518 vi->minor = VNET_MINOR; 519 vio_sendmsg(sc, vi, sizeof(*vi)); 520 sc->sc_vio_state |= VIO_RCV_VER_INFO; 521 break; 522 523 case VIO_SUBTYPE_ACK: 524 DPRINTF(("CTRL/ACK/VER_INFO\n")); 525 if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) { 526 ldc_reset(&sc->sc_lc); 527 break; 528 } 529 sc->sc_vio_state |= VIO_ACK_VER_INFO; 530 break; 531 532 default: 533 DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype)); 534 break; 535 } 536 537 if (ISSET(sc->sc_vio_state, VIO_RCV_VER_INFO) && 538 ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO)) 539 vnet_send_attr_info(sc); 540 } 541 542 void 543 vnet_rx_vio_attr_info(struct vnet_softc *sc, struct vio_msg_tag *tag) 544 { 545 struct vnet_attr_info *ai = (struct vnet_attr_info *)tag; 546 547 switch (ai->tag.stype) { 548 case VIO_SUBTYPE_INFO: 549 DPRINTF(("CTRL/INFO/ATTR_INFO\n")); 550 sc->sc_xfer_mode = ai->xfer_mode; 551 552 ai->tag.stype = VIO_SUBTYPE_ACK; 553 ai->tag.sid = sc->sc_local_sid; 554 vio_sendmsg(sc, ai, sizeof(*ai)); 555 sc->sc_vio_state |= VIO_RCV_ATTR_INFO; 556 break; 557 558 case VIO_SUBTYPE_ACK: 559 DPRINTF(("CTRL/ACK/ATTR_INFO\n")); 560 if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) { 561 ldc_reset(&sc->sc_lc); 562 break; 563 } 564 sc->sc_vio_state |= VIO_ACK_ATTR_INFO; 565 break; 566 567 default: 568 DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype)); 569 break; 570 } 571 572 if (ISSET(sc->sc_vio_state, VIO_RCV_ATTR_INFO) && 573 ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO)) { 574 if (sc->sc_xfer_mode == VIO_DRING_MODE) 575 vnet_send_dring_reg(sc); 576 else 577 vio_send_rdx(sc); 578 } 579 } 580 581 void 582 vnet_rx_vio_dring_reg(struct vnet_softc *sc, struct vio_msg_tag *tag) 583 { 584 struct vio_dring_reg *dr = (struct vio_dring_reg *)tag; 585 586 switch (dr->tag.stype) { 587 case VIO_SUBTYPE_INFO: 588 DPRINTF(("CTRL/INFO/DRING_REG\n")); 589 590 sc->sc_peer_dring_nentries = dr->num_descriptors; 591 sc->sc_peer_desc_size = dr->descriptor_size; 592 sc->sc_peer_dring_cookie = dr->cookie[0]; 593 594 dr->tag.stype = VIO_SUBTYPE_ACK; 595 dr->tag.sid = sc->sc_local_sid; 596 vio_sendmsg(sc, dr, sizeof(*dr)); 597 sc->sc_vio_state |= VIO_RCV_DRING_REG; 598 break; 599 600 case VIO_SUBTYPE_ACK: 601 DPRINTF(("CTRL/ACK/DRING_REG\n")); 602 if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) { 603 ldc_reset(&sc->sc_lc); 604 break; 605 } 606 607 sc->sc_dring_ident = dr->dring_ident; 608 sc->sc_seq_no = 1; 609 610 sc->sc_vio_state |= VIO_ACK_DRING_REG; 611 break; 612 613 default: 614 DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype)); 615 break; 616 } 617 618 if (ISSET(sc->sc_vio_state, VIO_RCV_DRING_REG) && 619 ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG)) 620 vio_send_rdx(sc); 621 } 622 623 void 624 vnet_rx_vio_rdx(struct vnet_softc *sc, struct vio_msg_tag *tag) 625 { 626 struct ifnet *ifp = &sc->sc_ac.ac_if; 627 628 switch(tag->stype) { 629 case VIO_SUBTYPE_INFO: 630 DPRINTF(("CTRL/INFO/RDX\n")); 631 632 tag->stype = VIO_SUBTYPE_ACK; 633 tag->sid = sc->sc_local_sid; 634 vio_sendmsg(sc, tag, sizeof(*tag)); 635 sc->sc_vio_state |= VIO_RCV_RDX; 636 break; 637 638 case VIO_SUBTYPE_ACK: 639 DPRINTF(("CTRL/ACK/RDX\n")); 640 if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) { 641 ldc_reset(&sc->sc_lc); 642 break; 643 } 644 sc->sc_vio_state |= VIO_ACK_RDX; 645 break; 646 647 default: 648 DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype)); 649 break; 650 } 651 652 if (ISSET(sc->sc_vio_state, VIO_RCV_RDX) && 653 ISSET(sc->sc_vio_state, VIO_ACK_RDX)) { 654 /* Link is up! */ 655 vnet_link_state(sc); 656 657 /* Configure multicast now that we can. */ 658 vnet_setmulti(sc, 1); 659 vnet_start(ifp); 660 } 661 } 662 663 void 664 vnet_rx_vio_data(struct vnet_softc *sc, struct vio_msg *vm) 665 { 666 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 667 668 if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || 669 !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) { 670 DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype, 671 tag->stype_env)); 672 return; 673 } 674 675 switch(tag->stype_env) { 676 case VIO_DESC_DATA: 677 vnet_rx_vio_desc_data(sc, tag); 678 break; 679 680 case VIO_DRING_DATA: 681 vnet_rx_vio_dring_data(sc, tag); 682 break; 683 684 default: 685 DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 686 break; 687 } 688 } 689 690 void 691 vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *tag) 692 { 693 struct vnet_desc_msg *dm = (struct vnet_desc_msg *)tag; 694 struct ldc_conn *lc = &sc->sc_lc; 695 struct ldc_map *map = sc->sc_lm; 696 struct ifnet *ifp = &sc->sc_ac.ac_if; 697 struct mbuf *m; 698 caddr_t buf; 699 paddr_t pa; 700 psize_t nbytes; 701 int err; 702 703 switch(tag->stype) { 704 case VIO_SUBTYPE_INFO: 705 buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); 706 if (buf == NULL) { 707 ifp->if_ierrors++; 708 goto skip; 709 } 710 nbytes = roundup(dm->nbytes, 8); 711 712 pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); 713 err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, 714 dm->cookie[0].addr, pa, nbytes, &nbytes); 715 if (err != H_EOK) { 716 pool_put(&sc->sc_pool, buf); 717 ifp->if_ierrors++; 718 goto skip; 719 } 720 721 /* Stupid OBP doesn't align properly. */ 722 m = m_devget(buf, dm->nbytes, ETHER_ALIGN, ifp); 723 pool_put(&sc->sc_pool, buf); 724 if (m == NULL) { 725 ifp->if_ierrors++; 726 goto skip; 727 } 728 729 ifp->if_ipackets++; 730 731 #if NBPFILTER > 0 732 if (ifp->if_bpf) 733 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 734 #endif /* NBPFILTER > 0 */ 735 736 /* Pass it on. */ 737 ether_input_mbuf(ifp, m); 738 739 skip: 740 dm->tag.stype = VIO_SUBTYPE_ACK; 741 dm->tag.sid = sc->sc_local_sid; 742 vio_sendmsg(sc, dm, sizeof(*dm)); 743 break; 744 745 case VIO_SUBTYPE_ACK: 746 DPRINTF(("DATA/ACK/DESC_DATA\n")); 747 748 if (dm->desc_handle != sc->sc_tx_cons) { 749 printf("out of order\n"); 750 return; 751 } 752 753 map->lm_slot[sc->sc_vsd[sc->sc_tx_cons].vsd_map_idx].entry = 0; 754 map->lm_count--; 755 756 pool_put(&sc->sc_pool, sc->sc_vsd[sc->sc_tx_cons].vsd_buf); 757 758 sc->sc_tx_cons++; 759 sc->sc_tx_cons &= (sc->sc_vd->vd_nentries - 1); 760 sc->sc_tx_cnt--; 761 break; 762 763 case VIO_SUBTYPE_NACK: 764 DPRINTF(("DATA/NACK/DESC_DATA\n")); 765 break; 766 767 default: 768 DPRINTF(("DATA/0x%02x/DESC_DATA\n", tag->stype)); 769 break; 770 } 771 } 772 773 void 774 vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *tag) 775 { 776 struct vio_dring_msg *dm = (struct vio_dring_msg *)tag; 777 struct ldc_conn *lc = &sc->sc_lc; 778 struct ifnet *ifp = &sc->sc_ac.ac_if; 779 struct mbuf *m; 780 paddr_t pa; 781 psize_t nbytes; 782 int err; 783 784 switch(tag->stype) { 785 case VIO_SUBTYPE_INFO: 786 { 787 struct vnet_desc desc; 788 uint64_t cookie; 789 paddr_t desc_pa; 790 int idx, ack_end_idx = -1; 791 792 idx = dm->start_idx; 793 for (;;) { 794 cookie = sc->sc_peer_dring_cookie.addr; 795 cookie += idx * sc->sc_peer_desc_size; 796 nbytes = sc->sc_peer_desc_size; 797 pmap_extract(pmap_kernel(), (vaddr_t)&desc, &desc_pa); 798 err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, cookie, 799 desc_pa, nbytes, &nbytes); 800 if (err != H_EOK) { 801 printf("hv_ldc_copy_in %d\n", err); 802 break; 803 } 804 805 if (desc.hdr.dstate != VIO_DESC_READY) 806 break; 807 808 m = MCLGETI(NULL, M_DONTWAIT, NULL, desc.nbytes); 809 if (!m) 810 break; 811 ifp->if_ipackets++; 812 m->m_pkthdr.rcvif = ifp; 813 m->m_len = m->m_pkthdr.len = desc.nbytes; 814 nbytes = roundup(desc.nbytes + VNET_ETHER_ALIGN, 8); 815 816 pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, &pa); 817 err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, 818 desc.cookie[0].addr, pa, nbytes, &nbytes); 819 if (err != H_EOK) { 820 m_freem(m); 821 goto skip; 822 } 823 m->m_data += VNET_ETHER_ALIGN; 824 825 #if NBPFILTER > 0 826 if (ifp->if_bpf) 827 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 828 #endif /* NBPFILTER > 0 */ 829 830 /* Pass it on. */ 831 ether_input_mbuf(ifp, m); 832 833 skip: 834 desc.hdr.dstate = VIO_DESC_DONE; 835 nbytes = sc->sc_peer_desc_size; 836 err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT, cookie, 837 desc_pa, nbytes, &nbytes); 838 if (err != H_EOK) 839 printf("hv_ldc_copy_out %d\n", err); 840 841 ack_end_idx = idx; 842 if (++idx == sc->sc_peer_dring_nentries) 843 idx = 0; 844 } 845 846 if (ack_end_idx == -1) { 847 dm->tag.stype = VIO_SUBTYPE_NACK; 848 } else { 849 dm->tag.stype = VIO_SUBTYPE_ACK; 850 dm->end_idx = ack_end_idx; 851 } 852 dm->tag.sid = sc->sc_local_sid; 853 dm->proc_state = VIO_DP_STOPPED; 854 vio_sendmsg(sc, dm, sizeof(*dm)); 855 break; 856 } 857 858 case VIO_SUBTYPE_ACK: 859 { 860 struct ldc_map *map = sc->sc_lm; 861 int cons; 862 863 sc->sc_peer_state = dm->proc_state; 864 865 cons = sc->sc_tx_cons; 866 while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) { 867 map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0; 868 map->lm_count--; 869 870 pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf); 871 872 sc->sc_vd->vd_desc[cons++].hdr.dstate = VIO_DESC_FREE; 873 cons &= (sc->sc_vd->vd_nentries - 1); 874 sc->sc_tx_cnt--; 875 } 876 sc->sc_tx_cons = cons; 877 878 if (sc->sc_tx_cnt > 0 && sc->sc_peer_state != VIO_DP_ACTIVE) 879 vnet_send_dring_data(sc, sc->sc_tx_cons); 880 881 if (sc->sc_tx_cnt < sc->sc_vd->vd_nentries) 882 ifp->if_flags &= ~IFF_OACTIVE; 883 if (sc->sc_tx_cnt == 0) 884 ifp->if_timer = 0; 885 886 vnet_start(ifp); 887 break; 888 } 889 890 case VIO_SUBTYPE_NACK: 891 DPRINTF(("DATA/NACK/DRING_DATA\n")); 892 break; 893 894 default: 895 DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype)); 896 break; 897 } 898 } 899 900 void 901 vnet_ldc_reset(struct ldc_conn *lc) 902 { 903 struct vnet_softc *sc = lc->lc_sc; 904 905 timeout_del(&sc->sc_handshake_to); 906 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 907 sc->sc_vio_state = 0; 908 vnet_link_state(sc); 909 } 910 911 void 912 vnet_ldc_start(struct ldc_conn *lc) 913 { 914 struct vnet_softc *sc = lc->lc_sc; 915 916 timeout_del(&sc->sc_handshake_to); 917 vnet_send_ver_info(sc, VNET_MAJOR, VNET_MINOR); 918 } 919 920 void 921 vio_sendmsg(struct vnet_softc *sc, void *msg, size_t len) 922 { 923 struct ldc_conn *lc = &sc->sc_lc; 924 struct ldc_pkt *lp; 925 uint64_t tx_head, tx_tail, tx_state; 926 int err; 927 928 err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); 929 if (err != H_EOK) 930 return; 931 932 lp = (struct ldc_pkt *)(lc->lc_txq->lq_va + tx_tail); 933 bzero(lp, sizeof(struct ldc_pkt)); 934 lp->type = LDC_DATA; 935 lp->stype = LDC_INFO; 936 KASSERT((len & ~LDC_LEN_MASK) == 0); 937 lp->env = len | LDC_FRAG_STOP | LDC_FRAG_START; 938 lp->seqid = lc->lc_tx_seqid++; 939 bcopy(msg, &lp->major, len); 940 941 tx_tail += sizeof(*lp); 942 tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(*lp)) - 1); 943 err = hv_ldc_tx_set_qtail(lc->lc_id, tx_tail); 944 if (err != H_EOK) 945 printf("%s: hv_ldc_tx_set_qtail: %d\n", __func__, err); 946 } 947 948 void 949 vnet_send_ver_info(struct vnet_softc *sc, uint16_t major, uint16_t minor) 950 { 951 struct vio_ver_info vi; 952 953 bzero(&vi, sizeof(vi)); 954 vi.tag.type = VIO_TYPE_CTRL; 955 vi.tag.stype = VIO_SUBTYPE_INFO; 956 vi.tag.stype_env = VIO_VER_INFO; 957 vi.tag.sid = sc->sc_local_sid; 958 vi.major = major; 959 vi.minor = minor; 960 vi.dev_class = VDEV_NETWORK; 961 vio_sendmsg(sc, &vi, sizeof(vi)); 962 963 sc->sc_vio_state |= VIO_SND_VER_INFO; 964 } 965 966 void 967 vnet_send_attr_info(struct vnet_softc *sc) 968 { 969 struct vnet_attr_info ai; 970 int i; 971 972 bzero(&ai, sizeof(ai)); 973 ai.tag.type = VIO_TYPE_CTRL; 974 ai.tag.stype = VIO_SUBTYPE_INFO; 975 ai.tag.stype_env = VIO_ATTR_INFO; 976 ai.tag.sid = sc->sc_local_sid; 977 ai.xfer_mode = VIO_DRING_MODE; 978 ai.addr_type = VNET_ADDR_ETHERMAC; 979 ai.ack_freq = 0; 980 ai.addr = 0; 981 for (i = 0; i < ETHER_ADDR_LEN; i++) { 982 ai.addr <<= 8; 983 ai.addr |= sc->sc_ac.ac_enaddr[i]; 984 } 985 ai.mtu = ETHER_MAX_LEN - ETHER_CRC_LEN; 986 vio_sendmsg(sc, &ai, sizeof(ai)); 987 988 sc->sc_vio_state |= VIO_SND_ATTR_INFO; 989 } 990 991 void 992 vnet_send_dring_reg(struct vnet_softc *sc) 993 { 994 struct vio_dring_reg dr; 995 996 bzero(&dr, sizeof(dr)); 997 dr.tag.type = VIO_TYPE_CTRL; 998 dr.tag.stype = VIO_SUBTYPE_INFO; 999 dr.tag.stype_env = VIO_DRING_REG; 1000 dr.tag.sid = sc->sc_local_sid; 1001 dr.dring_ident = 0; 1002 dr.num_descriptors = sc->sc_vd->vd_nentries; 1003 dr.descriptor_size = sizeof(struct vnet_desc); 1004 dr.options = VIO_TX_RING; 1005 dr.ncookies = 1; 1006 dr.cookie[0].addr = 0; 1007 dr.cookie[0].size = PAGE_SIZE; 1008 vio_sendmsg(sc, &dr, sizeof(dr)); 1009 1010 sc->sc_vio_state |= VIO_SND_DRING_REG; 1011 }; 1012 1013 void 1014 vio_send_rdx(struct vnet_softc *sc) 1015 { 1016 struct vio_msg_tag tag; 1017 1018 tag.type = VIO_TYPE_CTRL; 1019 tag.stype = VIO_SUBTYPE_INFO; 1020 tag.stype_env = VIO_RDX; 1021 tag.sid = sc->sc_local_sid; 1022 vio_sendmsg(sc, &tag, sizeof(tag)); 1023 1024 sc->sc_vio_state |= VIO_SND_RDX; 1025 } 1026 1027 void 1028 vnet_send_dring_data(struct vnet_softc *sc, uint32_t start_idx) 1029 { 1030 struct vio_dring_msg dm; 1031 1032 bzero(&dm, sizeof(dm)); 1033 dm.tag.type = VIO_TYPE_DATA; 1034 dm.tag.stype = VIO_SUBTYPE_INFO; 1035 dm.tag.stype_env = VIO_DRING_DATA; 1036 dm.tag.sid = sc->sc_local_sid; 1037 dm.seq_no = sc->sc_seq_no++; 1038 dm.dring_ident = sc->sc_dring_ident; 1039 dm.start_idx = start_idx; 1040 dm.end_idx = -1; 1041 vio_sendmsg(sc, &dm, sizeof(dm)); 1042 1043 sc->sc_peer_state = VIO_DP_ACTIVE; 1044 } 1045 1046 void 1047 vnet_start(struct ifnet *ifp) 1048 { 1049 struct vnet_softc *sc = ifp->if_softc; 1050 struct ldc_conn *lc = &sc->sc_lc; 1051 struct ldc_map *map = sc->sc_lm; 1052 struct mbuf *m; 1053 paddr_t pa; 1054 caddr_t buf; 1055 uint64_t tx_head, tx_tail, tx_state; 1056 int err, desc; 1057 1058 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1059 return; 1060 1061 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1062 return; 1063 1064 /* 1065 * We cannot transmit packets until a VIO connection has been 1066 * established. 1067 */ 1068 if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || 1069 !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) 1070 return; 1071 1072 /* 1073 * Make sure there is room in the LDC transmit queue to send a 1074 * DRING_DATA message. 1075 */ 1076 err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); 1077 if (err != H_EOK) 1078 return; 1079 tx_tail += sizeof(struct ldc_pkt); 1080 tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(struct ldc_pkt)) - 1); 1081 if (tx_tail == tx_head) { 1082 ifp->if_flags |= IFF_OACTIVE; 1083 return; 1084 } 1085 1086 if (sc->sc_xfer_mode == VIO_DESC_MODE) { 1087 vnet_start_desc(ifp); 1088 return; 1089 } 1090 1091 desc = sc->sc_tx_prod; 1092 while (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_FREE) { 1093 IFQ_POLL(&ifp->if_snd, m); 1094 if (m == NULL) 1095 break; 1096 1097 if (sc->sc_tx_cnt >= sc->sc_vd->vd_nentries || 1098 map->lm_count >= map->lm_nentries) { 1099 ifp->if_flags |= IFF_OACTIVE; 1100 break; 1101 } 1102 1103 buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); 1104 if (buf == NULL) { 1105 ifp->if_flags |= IFF_OACTIVE; 1106 break; 1107 } 1108 m_copydata(m, 0, m->m_pkthdr.len, buf + VNET_ETHER_ALIGN); 1109 IFQ_DEQUEUE(&ifp->if_snd, m); 1110 1111 #if NBPFILTER > 0 1112 /* 1113 * If BPF is listening on this interface, let it see the 1114 * packet before we commit it to the wire. 1115 */ 1116 if (ifp->if_bpf) 1117 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1118 #endif 1119 1120 pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); 1121 KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK)); 1122 while (map->lm_slot[map->lm_next].entry != 0) { 1123 map->lm_next++; 1124 map->lm_next &= (map->lm_nentries - 1); 1125 } 1126 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); 1127 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR; 1128 map->lm_count++; 1129 1130 sc->sc_vd->vd_desc[desc].nbytes = max(m->m_pkthdr.len, 60); 1131 sc->sc_vd->vd_desc[desc].ncookies = 1; 1132 sc->sc_vd->vd_desc[desc].cookie[0].addr = 1133 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); 1134 sc->sc_vd->vd_desc[desc].cookie[0].size = 2048; 1135 membar(Sync); 1136 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY; 1137 1138 sc->sc_vsd[desc].vsd_map_idx = map->lm_next; 1139 sc->sc_vsd[desc].vsd_buf = buf; 1140 1141 desc++; 1142 desc &= (sc->sc_vd->vd_nentries - 1); 1143 sc->sc_tx_cnt++; 1144 1145 m_freem(m); 1146 } 1147 1148 if (sc->sc_tx_cnt > 0 && sc->sc_peer_state != VIO_DP_ACTIVE) { 1149 vnet_send_dring_data(sc, sc->sc_tx_prod); 1150 ifp->if_timer = 5; 1151 } 1152 1153 sc->sc_tx_prod = desc; 1154 } 1155 1156 void 1157 vnet_start_desc(struct ifnet *ifp) 1158 { 1159 struct vnet_softc *sc = ifp->if_softc; 1160 struct ldc_map *map = sc->sc_lm; 1161 struct vnet_desc_msg dm; 1162 struct mbuf *m; 1163 paddr_t pa; 1164 caddr_t buf; 1165 1166 for (;;) { 1167 IFQ_POLL(&ifp->if_snd, m); 1168 if (m == NULL) 1169 break; 1170 1171 if (sc->sc_tx_cnt >= sc->sc_vd->vd_nentries || 1172 map->lm_count >= map->lm_nentries) { 1173 ifp->if_flags |= IFF_OACTIVE; 1174 return; 1175 } 1176 1177 buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); 1178 if (buf == NULL) { 1179 ifp->if_flags |= IFF_OACTIVE; 1180 return; 1181 } 1182 m_copydata(m, 0, m->m_pkthdr.len, buf); 1183 IFQ_DEQUEUE(&ifp->if_snd, m); 1184 1185 #if NBPFILTER > 0 1186 /* 1187 * If BPF is listening on this interface, let it see the 1188 * packet before we commit it to the wire. 1189 */ 1190 if (ifp->if_bpf) 1191 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1192 #endif 1193 1194 pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); 1195 KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK)); 1196 while (map->lm_slot[map->lm_next].entry != 0) { 1197 map->lm_next++; 1198 map->lm_next &= (map->lm_nentries - 1); 1199 } 1200 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); 1201 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR; 1202 map->lm_count++; 1203 1204 sc->sc_vsd[sc->sc_tx_prod].vsd_map_idx = map->lm_next; 1205 sc->sc_vsd[sc->sc_tx_prod].vsd_buf = buf; 1206 1207 bzero(&dm, sizeof(dm)); 1208 dm.tag.type = VIO_TYPE_DATA; 1209 dm.tag.stype = VIO_SUBTYPE_INFO; 1210 dm.tag.stype_env = VIO_DESC_DATA; 1211 dm.tag.sid = sc->sc_local_sid; 1212 dm.seq_no = sc->sc_seq_no++; 1213 dm.desc_handle = sc->sc_tx_prod; 1214 dm.nbytes = max(m->m_pkthdr.len, 60); 1215 dm.ncookies = 1; 1216 dm.cookie[0].addr = 1217 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); 1218 dm.cookie[0].size = 2048; 1219 vio_sendmsg(sc, &dm, sizeof(dm)); 1220 1221 sc->sc_tx_prod++; 1222 sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); 1223 sc->sc_tx_cnt++; 1224 1225 m_freem(m); 1226 } 1227 } 1228 1229 int 1230 vnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1231 { 1232 struct vnet_softc *sc = ifp->if_softc; 1233 struct ifaddr *ifa = (struct ifaddr *)data; 1234 struct ifreq *ifr = (struct ifreq *)data; 1235 int s, error = 0; 1236 1237 s = splnet(); 1238 1239 switch (cmd) { 1240 case SIOCSIFADDR: 1241 ifp->if_flags |= IFF_UP; 1242 #ifdef INET 1243 if (ifa->ifa_addr->sa_family == AF_INET) 1244 arp_ifinit(&sc->sc_ac, ifa); 1245 #endif 1246 /* FALLTHROUGH */ 1247 case SIOCSIFFLAGS: 1248 if (ifp->if_flags & IFF_UP) { 1249 if ((ifp->if_flags & IFF_RUNNING) == 0) 1250 vnet_init(ifp); 1251 } else { 1252 if (ifp->if_flags & IFF_RUNNING) 1253 vnet_stop(ifp); 1254 } 1255 break; 1256 1257 case SIOCGIFMEDIA: 1258 case SIOCSIFMEDIA: 1259 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1260 break; 1261 1262 case SIOCADDMULTI: 1263 case SIOCDELMULTI: 1264 /* 1265 * XXX Removing all multicast addresses and adding 1266 * most of them back, is somewhat retarded. 1267 */ 1268 vnet_setmulti(sc, 0); 1269 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 1270 vnet_setmulti(sc, 1); 1271 if (error == ENETRESET) 1272 error = 0; 1273 break; 1274 1275 default: 1276 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 1277 } 1278 1279 splx(s); 1280 return (error); 1281 } 1282 1283 void 1284 vnet_watchdog(struct ifnet *ifp) 1285 { 1286 struct vnet_softc *sc = ifp->if_softc; 1287 1288 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname); 1289 } 1290 1291 int 1292 vnet_media_change(struct ifnet *ifp) 1293 { 1294 return (0); 1295 } 1296 1297 void 1298 vnet_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1299 { 1300 imr->ifm_active = IFM_ETHER | IFM_AUTO; 1301 imr->ifm_status = IFM_AVALID; 1302 1303 if (LINK_STATE_IS_UP(ifp->if_link_state) && 1304 ifp->if_flags & IFF_UP) 1305 imr->ifm_status |= IFM_ACTIVE; 1306 } 1307 1308 void 1309 vnet_link_state(struct vnet_softc *sc) 1310 { 1311 struct ifnet *ifp = &sc->sc_ac.ac_if; 1312 int link_state = LINK_STATE_DOWN; 1313 1314 if (ISSET(sc->sc_vio_state, VIO_RCV_RDX) && 1315 ISSET(sc->sc_vio_state, VIO_ACK_RDX)) 1316 link_state = LINK_STATE_FULL_DUPLEX; 1317 if (ifp->if_link_state != link_state) { 1318 ifp->if_link_state = link_state; 1319 if_link_state_change(ifp); 1320 } 1321 } 1322 1323 void 1324 vnet_setmulti(struct vnet_softc *sc, int set) 1325 { 1326 struct arpcom *ac = &sc->sc_ac; 1327 struct ether_multi *enm; 1328 struct ether_multistep step; 1329 struct vnet_mcast_info mi; 1330 int count = 0; 1331 1332 if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || 1333 !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) 1334 return; 1335 1336 bzero(&mi, sizeof(mi)); 1337 mi.tag.type = VIO_TYPE_CTRL; 1338 mi.tag.stype = VIO_SUBTYPE_INFO; 1339 mi.tag.stype_env = VNET_MCAST_INFO; 1340 mi.tag.sid = sc->sc_local_sid; 1341 mi.set = set ? 1 : 0; 1342 ETHER_FIRST_MULTI(step, ac, enm); 1343 while (enm != NULL) { 1344 /* XXX What about multicast ranges? */ 1345 bcopy(enm->enm_addrlo, mi.mcast_addr[count], ETHER_ADDR_LEN); 1346 ETHER_NEXT_MULTI(step, enm); 1347 1348 count++; 1349 if (count < VNET_NUM_MCAST) 1350 continue; 1351 1352 mi.count = VNET_NUM_MCAST; 1353 vio_sendmsg(sc, &mi, sizeof(mi)); 1354 count = 0; 1355 } 1356 1357 if (count > 0) { 1358 mi.count = count; 1359 vio_sendmsg(sc, &mi, sizeof(mi)); 1360 } 1361 } 1362 1363 void 1364 vnet_init(struct ifnet *ifp) 1365 { 1366 struct vnet_softc *sc = ifp->if_softc; 1367 struct ldc_conn *lc = &sc->sc_lc; 1368 int err; 1369 1370 sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048); 1371 if (sc->sc_lm == NULL) 1372 return; 1373 1374 err = hv_ldc_set_map_table(lc->lc_id, 1375 sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries); 1376 if (err != H_EOK) { 1377 printf("hv_ldc_set_map_table %d\n", err); 1378 return; 1379 } 1380 1381 sc->sc_vd = vnet_dring_alloc(sc->sc_dmatag, 128); 1382 if (sc->sc_vd == NULL) 1383 return; 1384 sc->sc_vsd = malloc(128 * sizeof(*sc->sc_vsd), M_DEVBUF, M_NOWAIT); 1385 if (sc->sc_vsd == NULL) 1386 return; 1387 1388 sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr; 1389 sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK; 1390 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW; 1391 sc->sc_lm->lm_next = 1; 1392 sc->sc_lm->lm_count = 1; 1393 1394 err = hv_ldc_tx_qconf(lc->lc_id, 1395 lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries); 1396 if (err != H_EOK) 1397 printf("hv_ldc_tx_qconf %d\n", err); 1398 1399 err = hv_ldc_rx_qconf(lc->lc_id, 1400 lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries); 1401 if (err != H_EOK) 1402 printf("hv_ldc_rx_qconf %d\n", err); 1403 1404 cbus_intr_setenabled(sc->sc_tx_sysino, INTR_ENABLED); 1405 cbus_intr_setenabled(sc->sc_rx_sysino, INTR_ENABLED); 1406 1407 ldc_send_vers(lc); 1408 1409 ifp->if_flags |= IFF_RUNNING; 1410 ifp->if_flags &= ~IFF_OACTIVE; 1411 } 1412 1413 void 1414 vnet_stop(struct ifnet *ifp) 1415 { 1416 struct vnet_softc *sc = ifp->if_softc; 1417 struct ldc_conn *lc = &sc->sc_lc; 1418 1419 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1420 ifp->if_timer = 0; 1421 1422 cbus_intr_setenabled(sc->sc_tx_sysino, INTR_DISABLED); 1423 cbus_intr_setenabled(sc->sc_rx_sysino, INTR_DISABLED); 1424 1425 hv_ldc_tx_qconf(lc->lc_id, 0, 0); 1426 hv_ldc_rx_qconf(lc->lc_id, 0, 0); 1427 lc->lc_tx_state = lc->lc_rx_state = LDC_CHANNEL_DOWN; 1428 1429 vnet_dring_free(sc->sc_dmatag, sc->sc_vd); 1430 1431 hv_ldc_set_map_table(lc->lc_id, 0, 0); 1432 ldc_map_free(sc->sc_dmatag, sc->sc_lm); 1433 } 1434 1435 struct vnet_dring * 1436 vnet_dring_alloc(bus_dma_tag_t t, int nentries) 1437 { 1438 struct vnet_dring *vd; 1439 bus_size_t size; 1440 caddr_t va; 1441 int nsegs; 1442 int i; 1443 1444 vd = malloc(sizeof(struct vnet_dring), M_DEVBUF, M_NOWAIT); 1445 if (vd == NULL) 1446 return NULL; 1447 1448 size = roundup(nentries * sizeof(struct vnet_desc), PAGE_SIZE); 1449 1450 if (bus_dmamap_create(t, size, 1, size, 0, 1451 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0) 1452 return (NULL); 1453 1454 if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1, 1455 &nsegs, BUS_DMA_NOWAIT) != 0) 1456 goto destroy; 1457 1458 if (bus_dmamem_map(t, &vd->vd_seg, 1, size, &va, 1459 BUS_DMA_NOWAIT) != 0) 1460 goto free; 1461 1462 if (bus_dmamap_load(t, vd->vd_map, va, size, NULL, 1463 BUS_DMA_NOWAIT) != 0) 1464 goto unmap; 1465 1466 vd->vd_desc = (struct vnet_desc *)va; 1467 vd->vd_nentries = nentries; 1468 bzero(vd->vd_desc, nentries * sizeof(struct vnet_desc)); 1469 for (i = 0; i < vd->vd_nentries; i++) 1470 vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE; 1471 return (vd); 1472 1473 unmap: 1474 bus_dmamem_unmap(t, va, size); 1475 free: 1476 bus_dmamem_free(t, &vd->vd_seg, 1); 1477 destroy: 1478 bus_dmamap_destroy(t, vd->vd_map); 1479 1480 return (NULL); 1481 } 1482 1483 void 1484 vnet_dring_free(bus_dma_tag_t t, struct vnet_dring *vd) 1485 { 1486 bus_size_t size; 1487 1488 size = vd->vd_nentries * sizeof(struct vnet_desc); 1489 size = roundup(size, PAGE_SIZE); 1490 1491 bus_dmamap_unload(t, vd->vd_map); 1492 bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size); 1493 bus_dmamem_free(t, &vd->vd_seg, 1); 1494 bus_dmamap_destroy(t, vd->vd_map); 1495 free(vd, M_DEVBUF, 0); 1496 } 1497