Lines Matching defs:apc
67 static int mana_up(struct mana_port_context *apc);
68 static int mana_down(struct mana_port_context *apc);
95 struct mana_port_context *apc = if_getsoftc(ifp);
97 if (!apc) {
102 MANA_APC_LOCK_LOCK(apc);
107 if (!apc->port_is_up) {
108 MANA_APC_LOCK_UNLOCK(apc);
109 mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
116 MANA_APC_LOCK_UNLOCK(apc);
122 struct mana_port_context *apc = if_getsoftc(ifp);
123 struct mana_port_stats *stats = &apc->port_stats;
150 mana_restart(struct mana_port_context *apc)
154 MANA_APC_LOCK_LOCK(apc);
155 if (apc->port_is_up)
156 mana_down(apc);
158 rc = mana_up(apc);
159 MANA_APC_LOCK_UNLOCK(apc);
167 struct mana_port_context *apc = if_getsoftc(ifp);
187 MANA_APC_LOCK_LOCK(apc);
188 if (apc->port_is_up)
189 mana_down(apc);
191 apc->frame_size = new_mtu + 18;
195 rc = mana_up(apc);
196 MANA_APC_LOCK_UNLOCK(apc);
202 MANA_APC_LOCK_LOCK(apc);
203 if (!apc->port_is_up)
204 rc = mana_up(apc);
205 MANA_APC_LOCK_UNLOCK(apc);
209 MANA_APC_LOCK_LOCK(apc);
210 if (apc->port_is_up)
211 mana_down(apc);
212 MANA_APC_LOCK_UNLOCK(apc);
218 MANA_APC_LOCK_LOCK(apc);
267 MANA_APC_LOCK_UNLOCK(apc);
279 MANA_APC_LOCK_UNLOCK(apc);
288 MANA_APC_LOCK_UNLOCK(apc);
296 rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
303 memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
349 mana_tx_map_mbuf(struct mana_port_context *apc,
354 struct gdma_dev *gd = apc->ac->gdma_dev;
359 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
376 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
389 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
397 mana_tx_unmap_mbuf(struct mana_port_context *apc,
400 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
402 bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
410 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
440 err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
450 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
457 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
467 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
470 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
472 bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
494 struct mana_port_context *apc = if_getsoftc(ndev);
495 struct mana_port_stats *port_stats = &apc->port_stats;
496 struct gdma_dev *gd = apc->ac->gdma_dev;
507 cq = &apc->tx_qp[txq->idx].tx_cq;
515 if (!apc->port_is_up ||
523 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
532 % apc->num_queues;
547 err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
631 mana_tx_unmap_mbuf(apc, tx_info);
670 struct mana_port_context *apc = if_getsoftc(ndev);
672 while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
804 struct mana_port_context *apc = if_getsoftc(ifp);
810 if (unlikely((!apc->port_is_up) ||
818 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
826 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
834 txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
835 apc->num_queues;
837 txq_id = m->m_pkthdr.flowid % apc->num_queues;
840 if (apc->enable_tx_altq)
841 txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
843 txq = &apc->tx_qp[txq_id].txq;
865 mana_cleanup_port_context(struct mana_port_context *apc)
867 bus_dma_tag_destroy(apc->tx_buf_tag);
868 bus_dma_tag_destroy(apc->rx_buf_tag);
869 apc->rx_buf_tag = NULL;
871 free(apc->rxqs, M_DEVBUF);
872 apc->rxqs = NULL;
874 mana_free_counters((counter_u64_t *)&apc->port_stats,
879 mana_init_port_context(struct mana_port_context *apc)
881 device_t dev = apc->ac->gdma_dev->gdma_context->dev;
898 &apc->tx_buf_tag);
915 &apc->rx_buf_tag);
921 apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
1022 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
1034 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1051 apc->port_handle = resp.vport;
1052 memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
1058 mana_uncfg_vport(struct mana_port_context *apc)
1060 apc->vport_use_count--;
1061 if (apc->vport_use_count < 0) {
1064 apc->vport_use_count);
1069 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
1094 if (apc->vport_use_count > 0) {
1097 apc->vport_use_count++;
1101 req.vport = apc->port_handle;
1105 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1108 if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1115 if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1123 apc->tx_shortform_allowed = resp.short_form_allowed;
1124 apc->tx_vp_offset = resp.tx_vport_offset;
1126 if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n",
1127 apc->port_handle, protection_dom_id, doorbell_pg_id);
1131 mana_uncfg_vport(apc);
1137 mana_cfg_vport_steering(struct mana_port_context *apc,
1145 if_t ndev = apc->ndev;
1156 req->vport = apc->port_handle;
1160 req->rss_enable = apc->rss_state;
1164 req->default_rxobj = apc->default_rxobj;
1167 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1171 memcpy(req_indir_tab, apc->rxobj_table,
1175 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1196 apc->port_handle, num_entries);
1204 mana_create_wq_obj(struct mana_port_context *apc,
1212 if_t ndev = apc->ndev;
1226 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1259 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1264 if_t ndev = apc->ndev;
1272 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1340 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1352 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1355 if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
1362 if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1371 if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
1380 mana_fence_rqs(struct mana_port_context *apc)
1386 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1387 rxq = apc->rxqs[rxq_idx];
1388 err = mana_fence_rq(apc, rxq);
1425 struct mana_port_context *apc;
1438 apc = if_getsoftc(ndev);
1520 mana_tx_unmap_mbuf(apc, tx_info);
1550 /* Ensure checking txq_full before apc->port_is_up. */
1553 if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1559 apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1561 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1623 struct mana_port_context *apc;
1627 apc = if_getsoftc(ndev);
1713 counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1715 counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1726 struct mana_port_context *apc;
1736 apc = if_getsoftc(ndev);
1737 counter_u64_add(apc->port_stats.rx_drops, 1);
1776 apc = if_getsoftc(ndev);
1781 mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1784 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1796 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1913 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1915 struct gdma_dev *gd = apc->ac->gdma_dev;
1935 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1937 struct gdma_dev *gd = apc->ac->gdma_dev;
1961 drbr_flush(apc->ndev, txq->txq_br);
1982 mana_tx_unmap_mbuf(apc, txbuf_info);
1998 mana_destroy_txq(struct mana_port_context *apc)
2002 if (!apc->tx_qp)
2005 for (i = 0; i < apc->num_queues; i++) {
2006 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2008 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2010 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2013 free(apc->tx_qp, M_DEVBUF);
2014 apc->tx_qp = NULL;
2018 mana_create_txq(struct mana_port_context *apc, if_t net)
2020 struct mana_context *ac = apc->ac;
2033 apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
2050 for (i = 0; i < apc->num_queues; i++) {
2051 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2054 txq = &apc->tx_qp[i].txq;
2057 txq->vp_offset = apc->tx_vp_offset;
2070 cq = &apc->tx_qp[i].tx_cq;
2097 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2098 &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
2150 "mana txq p%u-tx%d", apc->port_idx, i);
2164 if (apc->last_tx_cq_bind_cpu < 0)
2165 apc->last_tx_cq_bind_cpu = CPU_FIRST();
2166 cq->cpu = apc->last_tx_cq_bind_cpu;
2167 apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
2169 if (apc->bind_cleanup_thread_cpu) {
2175 apc->port_idx, txq->idx, cq->cpu);
2179 apc->port_idx, txq->idx);
2187 mana_destroy_txq(apc);
2192 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2195 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2209 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2211 mana_deinit_cq(apc, &rxq->rx_cq);
2223 mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2225 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2238 mana_alloc_rx_wqe(struct mana_port_context *apc,
2257 err = bus_dmamap_create(apc->rx_buf_tag, 0,
2266 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2271 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2310 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2313 struct gdma_dev *gd = apc->ac->gdma_dev;
2335 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2344 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2400 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2434 if (apc->last_rx_cq_bind_cpu < 0)
2435 apc->last_rx_cq_bind_cpu = CPU_FIRST();
2436 cq->cpu = apc->last_rx_cq_bind_cpu;
2437 apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
2439 if (apc->bind_cleanup_thread_cpu) {
2445 apc->port_idx, rxq->rxq_idx, cq->cpu);
2449 apc->port_idx, rxq->rxq_idx);
2459 mana_destroy_rxq(apc, rxq, false);
2462 mana_deinit_cq(apc, cq);
2468 mana_add_rx_queues(struct mana_port_context *apc, if_t ndev)
2470 struct mana_context *ac = apc->ac;
2475 for (i = 0; i < apc->num_queues; i++) {
2476 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2482 apc->rxqs[i] = rxq;
2485 apc->default_rxobj = apc->rxqs[0]->rxobj;
2491 mana_destroy_vport(struct mana_port_context *apc)
2496 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2497 rxq = apc->rxqs[rxq_idx];
2501 mana_destroy_rxq(apc, rxq, true);
2502 apc->rxqs[rxq_idx] = NULL;
2505 mana_destroy_txq(apc);
2507 mana_uncfg_vport(apc);
2511 mana_create_vport(struct mana_port_context *apc, if_t net)
2513 struct gdma_dev *gd = apc->ac->gdma_dev;
2516 apc->default_rxobj = INVALID_MANA_HANDLE;
2518 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2522 return mana_create_txq(apc, net);
2526 static void mana_rss_table_init(struct mana_port_context *apc)
2531 apc->indir_table[i] = i % apc->num_queues;
2534 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2543 queue_idx = apc->indir_table[i];
2544 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2548 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2552 mana_fence_rqs(apc);
2560 struct mana_port_context *apc = if_getsoftc(ndev);
2562 int port_idx = apc->port_idx;
2566 err = mana_init_port_context(apc);
2570 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2579 if (apc->max_queues > max_queues)
2580 apc->max_queues = max_queues;
2582 if (apc->num_queues > apc->max_queues)
2583 apc->num_queues = apc->max_queues;
2588 bus_dma_tag_destroy(apc->rx_buf_tag);
2589 apc->rx_buf_tag = NULL;
2590 free(apc->rxqs, M_DEVBUF);
2591 apc->rxqs = NULL;
2598 struct mana_port_context *apc = if_getsoftc(ndev);
2601 err = mana_create_vport(apc, ndev);
2605 err = mana_add_rx_queues(apc, ndev);
2609 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2611 mana_rss_table_init(apc);
2613 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2620 mana_destroy_vport(apc);
2625 mana_up(struct mana_port_context *apc)
2631 err = mana_alloc_queues(apc->ndev);
2638 mana_sysctl_add_queues(apc);
2640 apc->port_is_up = true;
2645 if_link_state_change(apc->ndev, LINK_STATE_UP);
2646 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2655 struct mana_port_context *apc = (struct mana_port_context *)arg;
2657 MANA_APC_LOCK_LOCK(apc);
2658 if (!apc->port_is_up) {
2659 mana_up(apc);
2661 MANA_APC_LOCK_UNLOCK(apc);
2667 struct mana_port_context *apc = if_getsoftc(ndev);
2671 if (apc->port_is_up)
2674 /* No packet can be transmitted now since apc->port_is_up is false.
2676 * a txq because it may not timely see apc->port_is_up being cleared
2678 * new packets due to apc->port_is_up being false.
2682 for (i = 0; i < apc->num_queues; i++) {
2683 txq = &apc->tx_qp[i].txq;
2685 struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2686 struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
2702 apc->rss_state = TRI_STATE_FALSE;
2703 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2709 mana_destroy_vport(apc);
2715 mana_down(struct mana_port_context *apc)
2719 apc->port_st_save = apc->port_is_up;
2720 apc->port_is_up = false;
2725 if (apc->port_st_save) {
2726 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2728 if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2730 mana_sysctl_free_queues(apc);
2732 err = mana_dealloc_queues(apc->ndev);
2734 if_printf(apc->ndev,
2745 struct mana_port_context *apc = if_getsoftc(ndev);
2750 if (!apc)
2753 MANA_APC_LOCK_LOCK(apc);
2754 err = mana_down(apc);
2755 MANA_APC_LOCK_UNLOCK(apc);
2757 mana_cleanup_port_context(apc);
2759 MANA_APC_LOCK_DESTROY(apc);
2761 free(apc, M_DEVBUF);
2771 struct mana_port_context *apc;
2779 apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2780 apc->ac = ac;
2781 apc->ndev = ndev;
2782 apc->max_queues = gc->max_num_queues;
2783 apc->num_queues = min_t(unsigned int,
2785 apc->port_handle = INVALID_MANA_HANDLE;
2786 apc->port_idx = port_idx;
2787 apc->frame_size = DEFAULT_FRAME_SIZE;
2788 apc->last_tx_cq_bind_cpu = -1;
2789 apc->last_rx_cq_bind_cpu = -1;
2790 apc->vport_use_count = 0;
2792 MANA_APC_LOCK_INIT(apc);
2796 if_setsoftc(ndev, apc);
2808 mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2839 ifmedia_init(&apc->media, IFM_IMASK,
2841 ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2842 ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2844 ether_ifattach(ndev, apc->mac_addr);
2847 mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2849 mana_sysctl_add_port(apc);
2857 free(apc, M_DEVBUF);