Lines Matching defs:priv
54 gve_verify_driver_compatibility(struct gve_priv *priv)
60 err = gve_dma_alloc_coherent(priv, sizeof(struct gve_driver_info),
90 err = gve_adminq_verify_driver_compatibility(priv,
103 gve_up(struct gve_priv *priv)
105 if_t ifp = priv->ifp;
108 GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
110 if (device_is_attached(priv->dev) == 0) {
111 device_printf(priv->dev, "Cannot bring the iface up when detached\n");
115 if (gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
128 if (gve_is_qpl(priv)) {
129 err = gve_register_qpls(priv);
134 err = gve_create_rx_rings(priv);
138 err = gve_create_tx_rings(priv);
144 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
146 gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
149 gve_unmask_all_queue_irqs(priv);
150 gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
151 priv->interface_up_cnt++;
155 gve_schedule_reset(priv);
160 gve_down(struct gve_priv *priv)
162 GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
164 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
167 if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
168 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
169 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
172 if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
174 if (gve_destroy_rx_rings(priv) != 0)
177 if (gve_destroy_tx_rings(priv) != 0)
180 if (gve_is_qpl(priv)) {
181 if (gve_unregister_qpls(priv) != 0)
185 if (gve_is_gqi(priv))
186 gve_mask_all_queue_irqs(priv);
187 gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
188 priv->interface_down_cnt++;
192 gve_schedule_reset(priv);
198 struct gve_priv *priv = if_getsoftc(ifp);
201 if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) {
202 device_printf(priv->dev, "Invalid new MTU setting. new mtu: %d max mtu: %d min mtu: %d\n",
203 new_mtu, priv->max_mtu, ETHERMIN);
207 err = gve_adminq_set_mtu(priv, new_mtu);
210 device_printf(priv->dev, "MTU set to %d\n", new_mtu);
213 device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu);
222 struct gve_priv *priv = (struct gve_priv *)arg;
224 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) {
225 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
226 gve_up(priv);
227 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
234 struct gve_priv *priv;
238 priv = if_getsoftc(ifp);
245 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
246 gve_down(priv);
248 rc = gve_up(priv);
249 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
255 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
256 rc = gve_up(priv);
257 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
261 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
262 gve_down(priv);
263 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
271 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
272 gve_down(priv);
274 rc = gve_up(priv);
275 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
281 rc = ifmedia_ioctl(ifp, ifr, &priv->media, command);
295 struct gve_priv *priv = if_getsoftc(ifp);
297 device_printf(priv->dev, "Media change not supported\n");
304 struct gve_priv *priv = if_getsoftc(ifp);
306 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
311 if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
318 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
324 struct gve_priv *priv;
332 priv = if_getsoftc(ifp);
334 gve_accum_stats(priv, &rpackets, &rbytes, &rx_dropped_pkt, &tpackets,
362 gve_setup_ifnet(device_t dev, struct gve_priv *priv)
367 ifp = priv->ifp = if_alloc(IFT_ETHER);
369 if_setsoftc(ifp, priv);
382 if (!gve_is_gqi(priv) && !gve_is_qpl(priv)) {
394 ifmedia_init(&priv->media, IFM_IMASK, gve_media_change, gve_media_status);
403 if ((priv->supported_features & GVE_SUP_JUMBO_FRAMES_MASK) != 0)
410 device_printf(priv->dev, "Setting initial MTU to %d\n", priv->max_mtu);
411 if_setmtu(ifp, priv->max_mtu);
413 ether_ifattach(ifp, priv->mac);
415 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
416 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
420 gve_alloc_counter_array(struct gve_priv *priv)
424 err = gve_dma_alloc_coherent(priv, sizeof(uint32_t) * priv->num_event_counters,
425 PAGE_SIZE, &priv->counter_array_mem);
429 priv->counters = priv->counter_array_mem.cpu_addr;
434 gve_free_counter_array(struct gve_priv *priv)
436 if (priv->counters != NULL)
437 gve_dma_free_coherent(&priv->counter_array_mem);
438 priv->counter_array_mem = (struct gve_dma_handle){};
442 gve_alloc_irq_db_array(struct gve_priv *priv)
446 err = gve_dma_alloc_coherent(priv,
447 sizeof(struct gve_irq_db) * (priv->num_queues), PAGE_SIZE,
448 &priv->irqs_db_mem);
452 priv->irq_db_indices = priv->irqs_db_mem.cpu_addr;
457 gve_free_irq_db_array(struct gve_priv *priv)
459 if (priv->irq_db_indices != NULL)
460 gve_dma_free_coherent(&priv->irqs_db_mem);
461 priv->irqs_db_mem = (struct gve_dma_handle){};
465 gve_free_rings(struct gve_priv *priv)
467 gve_free_irqs(priv);
468 gve_free_tx_rings(priv);
469 gve_free_rx_rings(priv);
470 if (gve_is_qpl(priv))
471 gve_free_qpls(priv);
475 gve_alloc_rings(struct gve_priv *priv)
479 if (gve_is_qpl(priv)) {
480 err = gve_alloc_qpls(priv);
485 err = gve_alloc_rx_rings(priv);
489 err = gve_alloc_tx_rings(priv);
493 err = gve_alloc_irqs(priv);
500 gve_free_rings(priv);
505 gve_deconfigure_resources(struct gve_priv *priv)
509 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) {
510 err = gve_adminq_deconfigure_device_resources(priv);
512 device_printf(priv->dev, "Failed to deconfigure device resources: err=%d\n",
517 device_printf(priv->dev, "Deconfigured device resources\n");
518 gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
521 gve_free_irq_db_array(priv);
522 gve_free_counter_array(priv);
524 if (priv->ptype_lut_dqo) {
525 free(priv->ptype_lut_dqo, M_GVE);
526 priv->ptype_lut_dqo = NULL;
531 gve_configure_resources(struct gve_priv *priv)
535 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK))
538 err = gve_alloc_counter_array(priv);
542 err = gve_alloc_irq_db_array(priv);
546 err = gve_adminq_configure_device_resources(priv);
548 device_printf(priv->dev, "Failed to configure device resources: err=%d\n",
554 if (!gve_is_gqi(priv)) {
555 priv->ptype_lut_dqo = malloc(sizeof(*priv->ptype_lut_dqo), M_GVE,
558 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
560 device_printf(priv->dev, "Failed to configure ptype lut: err=%d\n",
566 gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
568 device_printf(priv->dev, "Configured device resources\n");
572 gve_deconfigure_resources(priv);
577 gve_set_queue_cnts(struct gve_priv *priv)
579 priv->tx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_TX_QUEUES);
580 priv->rx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_RX_QUEUES);
581 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
582 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
584 if (priv->default_num_queues > 0) {
585 priv->tx_cfg.num_queues = MIN(priv->default_num_queues,
586 priv->tx_cfg.num_queues);
587 priv->rx_cfg.num_queues = MIN(priv->default_num_queues,
588 priv->rx_cfg.num_queues);
591 priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues;
592 priv->mgmt_msix_idx = priv->num_queues;
596 gve_alloc_adminq_and_describe_device(struct gve_priv *priv)
600 if ((err = gve_adminq_alloc(priv)) != 0)
603 if ((err = gve_verify_driver_compatibility(priv)) != 0) {
604 device_printf(priv->dev,
609 if ((err = gve_adminq_describe_device(priv)) != 0)
612 gve_set_queue_cnts(priv);
614 priv->num_registered_pages = 0;
618 gve_release_adminq(priv);
623 gve_schedule_reset(struct gve_priv *priv)
625 if (gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET))
628 device_printf(priv->dev, "Scheduling reset task!\n");
629 gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
630 taskqueue_enqueue(priv->service_tq, &priv->service_task);
634 gve_destroy(struct gve_priv *priv)
636 gve_down(priv);
637 gve_deconfigure_resources(priv);
638 gve_release_adminq(priv);
642 gve_restore(struct gve_priv *priv)
646 err = gve_adminq_alloc(priv);
650 err = gve_configure_resources(priv);
654 err = gve_up(priv);
661 device_printf(priv->dev, "Restore failed!\n");
666 gve_handle_reset(struct gve_priv *priv)
668 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET))
671 gve_clear_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
672 gve_set_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
674 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
676 if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
677 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
678 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
689 gve_release_adminq(priv);
690 gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
691 gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
692 gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK);
693 gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK);
695 gve_down(priv);
696 gve_restore(priv);
698 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
700 priv->reset_cnt++;
701 gve_clear_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
705 gve_handle_link_status(struct gve_priv *priv)
707 uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
710 if (link_up == gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP))
715 device_printf(priv->dev, "Device link is up.\n");
716 if_link_state_change(priv->ifp, LINK_STATE_UP);
717 gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
719 device_printf(priv->dev, "Device link is down.\n");
720 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
721 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
728 struct gve_priv *priv = (struct gve_priv *)arg;
729 uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
732 !gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) {
733 device_printf(priv->dev, "Device requested reset\n");
734 gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
737 gve_handle_reset(priv);
738 gve_handle_link_status(priv);
761 gve_free_sys_res_mem(struct gve_priv *priv)
763 if (priv->msix_table != NULL)
764 bus_release_resource(priv->dev, SYS_RES_MEMORY,
765 rman_get_rid(priv->msix_table), priv->msix_table);
767 if (priv->db_bar != NULL)
768 bus_release_resource(priv->dev, SYS_RES_MEMORY,
769 rman_get_rid(priv->db_bar), priv->db_bar);
771 if (priv->reg_bar != NULL)
772 bus_release_resource(priv->dev, SYS_RES_MEMORY,
773 rman_get_rid(priv->reg_bar), priv->reg_bar);
779 struct gve_priv *priv;
786 priv = device_get_softc(dev);
787 priv->dev = dev;
788 GVE_IFACE_LOCK_INIT(priv->gve_iface_lock);
793 priv->reg_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
795 if (priv->reg_bar == NULL) {
802 priv->db_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
804 if (priv->db_bar == NULL) {
810 rid = pci_msix_table_bar(priv->dev);
811 priv->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
813 if (priv->msix_table == NULL) {
819 err = gve_alloc_adminq_and_describe_device(priv);
823 err = gve_configure_resources(priv);
827 err = gve_alloc_rings(priv);
831 gve_setup_ifnet(dev, priv);
833 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
835 bus_write_multi_1(priv->reg_bar, DRIVER_VERSION, GVE_DRIVER_VERSION,
838 TASK_INIT(&priv->service_task, 0, gve_service_task, priv);
839 priv->service_tq = taskqueue_create("gve service", M_WAITOK | M_ZERO,
840 taskqueue_thread_enqueue, &priv->service_tq);
841 taskqueue_start_threads(&priv->service_tq, 1, PI_NET, "%s service tq",
842 device_get_nameunit(priv->dev));
844 gve_setup_sysctl(priv);
847 device_printf(priv->dev, "Successfully attached %s", GVE_DRIVER_VERSION);
851 gve_free_rings(priv);
852 gve_deconfigure_resources(priv);
853 gve_release_adminq(priv);
854 gve_free_sys_res_mem(priv);
855 GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
862 struct gve_priv *priv = device_get_softc(dev);
863 if_t ifp = priv->ifp;
872 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
873 gve_destroy(priv);
874 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
876 gve_free_rings(priv);
877 gve_free_sys_res_mem(priv);
878 GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
880 while (taskqueue_cancel(priv->service_tq, &priv->service_task, NULL))
881 taskqueue_drain(priv->service_tq, &priv->service_task);
882 taskqueue_free(priv->service_tq);