13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 225770Sml29623 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #pragma ident "%Z%%M% %I% %E% SMI" 273859Sml29623 283859Sml29623 /* 293859Sml29623 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 303859Sml29623 */ 313859Sml29623 #include <sys/nxge/nxge_impl.h> 326495Sspeer #include <sys/nxge/nxge_hio.h> 336495Sspeer #include <sys/nxge/nxge_rxdma.h> 343859Sml29623 #include <sys/pcie.h> 353859Sml29623 363859Sml29623 uint32_t nxge_use_partition = 0; /* debug partition flag */ 373859Sml29623 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 383859Sml29623 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 393859Sml29623 /* 405013Sml29623 * PSARC/2007/453 MSI-X interrupt limit override 415013Sml29623 * (This PSARC case is limited to MSI-X vectors 425013Sml29623 * and SPARC platforms only). 433859Sml29623 */ 445013Sml29623 #if defined(_BIG_ENDIAN) 455013Sml29623 uint32_t nxge_msi_enable = 2; 465013Sml29623 #else 475013Sml29623 uint32_t nxge_msi_enable = 1; 485013Sml29623 #endif 493859Sml29623 506495Sspeer uint32_t nxge_cksum_enable = 0; 516495Sspeer 523859Sml29623 /* 533859Sml29623 * Globals: tunable parameters (/etc/system or adb) 543859Sml29623 * 553859Sml29623 */ 563859Sml29623 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 573859Sml29623 uint32_t nxge_rbr_spare_size = 0; 583859Sml29623 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 593859Sml29623 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 604193Sspeer boolean_t nxge_no_msg = B_TRUE; /* control message display */ 613859Sml29623 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 623859Sml29623 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 633859Sml29623 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 643859Sml29623 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 653859Sml29623 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 663859Sml29623 boolean_t nxge_jumbo_enable = B_FALSE; 673859Sml29623 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 683859Sml29623 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 693952Sml29623 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 703859Sml29623 715770Sml29623 /* MAX LSO size */ 725770Sml29623 #define NXGE_LSO_MAXLEN 65535 735770Sml29623 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 745770Sml29623 753859Sml29623 /* 763859Sml29623 * Debugging flags: 773859Sml29623 * nxge_no_tx_lb : transmit load balancing 783859Sml29623 * nxge_tx_lb_policy: 0 - TCP port (default) 793859Sml29623 * 3 - DEST MAC 803859Sml29623 */ 813859Sml29623 uint32_t nxge_no_tx_lb = 0; 823859Sml29623 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 833859Sml29623 843859Sml29623 /* 853859Sml29623 * Add tunable to reduce the amount of time spent in the 863859Sml29623 * ISR doing Rx Processing. 873859Sml29623 */ 883859Sml29623 uint32_t nxge_max_rx_pkts = 1024; 893859Sml29623 903859Sml29623 /* 913859Sml29623 * Tunables to manage the receive buffer blocks. 923859Sml29623 * 933859Sml29623 * nxge_rx_threshold_hi: copy all buffers. 943859Sml29623 * nxge_rx_bcopy_size_type: receive buffer block size type. 953859Sml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type. 963859Sml29623 */ 973859Sml29623 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 983859Sml29623 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 993859Sml29623 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 1003859Sml29623 1016495Sspeer /* Use kmem_alloc() to allocate data buffers. */ 1026495Sspeer #if !defined(__i386) 1036498Sspeer uint32_t nxge_use_kmem_alloc = 1; 1046495Sspeer #else 1056498Sspeer uint32_t nxge_use_kmem_alloc = 0; 1066495Sspeer #endif 1076495Sspeer 1083859Sml29623 rtrace_t npi_rtracebuf; 1093859Sml29623 1103859Sml29623 #if defined(sun4v) 1113859Sml29623 /* 1123859Sml29623 * Hypervisor N2/NIU services information. 1133859Sml29623 */ 1143859Sml29623 static hsvc_info_t niu_hsvc = { 1153859Sml29623 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 1163859Sml29623 NIU_MINOR_VER, "nxge" 1173859Sml29623 }; 1186495Sspeer 1196495Sspeer static int nxge_hsvc_register(p_nxge_t); 1203859Sml29623 #endif 1213859Sml29623 1223859Sml29623 /* 1233859Sml29623 * Function Prototypes 1243859Sml29623 */ 1253859Sml29623 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 1263859Sml29623 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 1273859Sml29623 static void nxge_unattach(p_nxge_t); 1283859Sml29623 1293859Sml29623 #if NXGE_PROPERTY 1303859Sml29623 static void nxge_remove_hard_properties(p_nxge_t); 1313859Sml29623 #endif 1323859Sml29623 1336495Sspeer /* 1346495Sspeer * These two functions are required by nxge_hio.c 1356495Sspeer */ 1366495Sspeer extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 1376495Sspeer extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 1386495Sspeer 1393859Sml29623 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 1403859Sml29623 1413859Sml29623 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 1423859Sml29623 static void nxge_destroy_mutexes(p_nxge_t); 1433859Sml29623 1443859Sml29623 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 1453859Sml29623 static void nxge_unmap_regs(p_nxge_t nxgep); 1463859Sml29623 #ifdef NXGE_DEBUG 1473859Sml29623 static void nxge_test_map_regs(p_nxge_t nxgep); 1483859Sml29623 #endif 1493859Sml29623 1503859Sml29623 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 1513859Sml29623 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 1523859Sml29623 static void nxge_remove_intrs(p_nxge_t nxgep); 1533859Sml29623 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 1543859Sml29623 1553859Sml29623 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 1563859Sml29623 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 1573859Sml29623 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 1583859Sml29623 static void nxge_intrs_enable(p_nxge_t nxgep); 1593859Sml29623 static void nxge_intrs_disable(p_nxge_t nxgep); 1603859Sml29623 1613859Sml29623 static void nxge_suspend(p_nxge_t); 1623859Sml29623 static nxge_status_t nxge_resume(p_nxge_t); 1633859Sml29623 1643859Sml29623 static nxge_status_t nxge_setup_dev(p_nxge_t); 1653859Sml29623 static void nxge_destroy_dev(p_nxge_t); 1663859Sml29623 1673859Sml29623 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 1683859Sml29623 static void nxge_free_mem_pool(p_nxge_t); 1693859Sml29623 1706495Sspeer nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 1713859Sml29623 static void nxge_free_rx_mem_pool(p_nxge_t); 1723859Sml29623 1736495Sspeer nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 1743859Sml29623 static void nxge_free_tx_mem_pool(p_nxge_t); 1753859Sml29623 1763859Sml29623 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 1773859Sml29623 struct ddi_dma_attr *, 1783859Sml29623 size_t, ddi_device_acc_attr_t *, uint_t, 1793859Sml29623 p_nxge_dma_common_t); 1803859Sml29623 1813859Sml29623 static void nxge_dma_mem_free(p_nxge_dma_common_t); 1826495Sspeer static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 1833859Sml29623 1843859Sml29623 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 1853859Sml29623 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 1863859Sml29623 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 1873859Sml29623 1883859Sml29623 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 1893859Sml29623 p_nxge_dma_common_t *, size_t); 1903859Sml29623 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 1913859Sml29623 1926495Sspeer extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 1933859Sml29623 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 1943859Sml29623 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 1953859Sml29623 1966495Sspeer extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 1973859Sml29623 p_nxge_dma_common_t *, 1983859Sml29623 size_t); 1993859Sml29623 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 2003859Sml29623 2013859Sml29623 static int nxge_init_common_dev(p_nxge_t); 2023859Sml29623 static void nxge_uninit_common_dev(p_nxge_t); 203*6512Ssowmini extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 204*6512Ssowmini char *, caddr_t); 2053859Sml29623 2063859Sml29623 /* 2073859Sml29623 * The next declarations are for the GLDv3 interface. 2083859Sml29623 */ 2093859Sml29623 static int nxge_m_start(void *); 2103859Sml29623 static void nxge_m_stop(void *); 2113859Sml29623 static int nxge_m_unicst(void *, const uint8_t *); 2123859Sml29623 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 2133859Sml29623 static int nxge_m_promisc(void *, boolean_t); 2143859Sml29623 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 2153859Sml29623 static void nxge_m_resources(void *); 2163859Sml29623 mblk_t *nxge_m_tx(void *arg, mblk_t *); 2173859Sml29623 static nxge_status_t nxge_mac_register(p_nxge_t); 2183859Sml29623 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 2193859Sml29623 mac_addr_slot_t slot); 2206495Sspeer void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 2213859Sml29623 boolean_t factory); 2223859Sml29623 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 2233859Sml29623 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 2243859Sml29623 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 2256439Sml29623 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 2266439Sml29623 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 2276439Sml29623 uint_t, const void *); 2286439Sml29623 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 229*6512Ssowmini uint_t, uint_t, void *); 2306439Sml29623 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 2316439Sml29623 const void *); 232*6512Ssowmini static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 2336439Sml29623 void *); 234*6512Ssowmini static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 235*6512Ssowmini 236*6512Ssowmini 237*6512Ssowmini mac_priv_prop_t nxge_priv_props[] = { 238*6512Ssowmini {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 239*6512Ssowmini {"_adv_pause_cap", MAC_PROP_PERM_RW}, 240*6512Ssowmini {"_function_number", MAC_PROP_PERM_READ}, 241*6512Ssowmini {"_fw_version", MAC_PROP_PERM_READ}, 242*6512Ssowmini {"_port_mode", MAC_PROP_PERM_READ}, 243*6512Ssowmini {"_hot_swap_phy", MAC_PROP_PERM_READ}, 244*6512Ssowmini {"_accept_jumbo", MAC_PROP_PERM_RW}, 245*6512Ssowmini {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 246*6512Ssowmini {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 247*6512Ssowmini {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 248*6512Ssowmini {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 249*6512Ssowmini {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 250*6512Ssowmini {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 251*6512Ssowmini {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 252*6512Ssowmini {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 253*6512Ssowmini {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 254*6512Ssowmini {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 255*6512Ssowmini {"_soft_lso_enable", MAC_PROP_PERM_RW} 256*6512Ssowmini }; 257*6512Ssowmini 258*6512Ssowmini #define NXGE_MAX_PRIV_PROPS \ 259*6512Ssowmini (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 2606439Sml29623 2616439Sml29623 #define NXGE_M_CALLBACK_FLAGS\ 2626439Sml29623 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 2636439Sml29623 2643859Sml29623 2653859Sml29623 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 2663859Sml29623 #define MAX_DUMP_SZ 256 2673859Sml29623 2686439Sml29623 #define NXGE_M_CALLBACK_FLAGS \ 2696439Sml29623 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 2706439Sml29623 2716495Sspeer mac_callbacks_t nxge_m_callbacks = { 2723859Sml29623 NXGE_M_CALLBACK_FLAGS, 2733859Sml29623 nxge_m_stat, 2743859Sml29623 nxge_m_start, 2753859Sml29623 nxge_m_stop, 2763859Sml29623 nxge_m_promisc, 2773859Sml29623 nxge_m_multicst, 2783859Sml29623 nxge_m_unicst, 2793859Sml29623 nxge_m_tx, 2803859Sml29623 nxge_m_resources, 2813859Sml29623 nxge_m_ioctl, 2826439Sml29623 nxge_m_getcapab, 2836439Sml29623 NULL, 2846439Sml29623 NULL, 2856439Sml29623 nxge_m_setprop, 2866439Sml29623 nxge_m_getprop 2873859Sml29623 }; 2883859Sml29623 2893859Sml29623 void 2903859Sml29623 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 2913859Sml29623 2925013Sml29623 /* PSARC/2007/453 MSI-X interrupt limit override. */ 2935013Sml29623 #define NXGE_MSIX_REQUEST_10G 8 2945013Sml29623 #define NXGE_MSIX_REQUEST_1G 2 2955013Sml29623 static int nxge_create_msi_property(p_nxge_t); 2965013Sml29623 2973859Sml29623 /* 2983859Sml29623 * These global variables control the message 2993859Sml29623 * output. 3003859Sml29623 */ 3013859Sml29623 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 3026495Sspeer uint64_t nxge_debug_level; 3033859Sml29623 3043859Sml29623 /* 3053859Sml29623 * This list contains the instance structures for the Neptune 3063859Sml29623 * devices present in the system. The lock exists to guarantee 3073859Sml29623 * mutually exclusive access to the list. 3083859Sml29623 */ 3093859Sml29623 void *nxge_list = NULL; 3103859Sml29623 3113859Sml29623 void *nxge_hw_list = NULL; 3123859Sml29623 nxge_os_mutex_t nxge_common_lock; 3133859Sml29623 3143859Sml29623 extern uint64_t npi_debug_level; 3153859Sml29623 3163859Sml29623 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 3173859Sml29623 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 3183859Sml29623 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 3193859Sml29623 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 3203859Sml29623 extern void nxge_fm_init(p_nxge_t, 3213859Sml29623 ddi_device_acc_attr_t *, 3223859Sml29623 ddi_device_acc_attr_t *, 3233859Sml29623 ddi_dma_attr_t *); 3243859Sml29623 extern void nxge_fm_fini(p_nxge_t); 3253859Sml29623 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 3263859Sml29623 3273859Sml29623 /* 3283859Sml29623 * Count used to maintain the number of buffers being used 3293859Sml29623 * by Neptune instances and loaned up to the upper layers. 3303859Sml29623 */ 3313859Sml29623 uint32_t nxge_mblks_pending = 0; 3323859Sml29623 3333859Sml29623 /* 3343859Sml29623 * Device register access attributes for PIO. 3353859Sml29623 */ 3363859Sml29623 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 3373859Sml29623 DDI_DEVICE_ATTR_V0, 3383859Sml29623 DDI_STRUCTURE_LE_ACC, 3393859Sml29623 DDI_STRICTORDER_ACC, 3403859Sml29623 }; 3413859Sml29623 3423859Sml29623 /* 3433859Sml29623 * Device descriptor access attributes for DMA. 3443859Sml29623 */ 3453859Sml29623 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 3463859Sml29623 DDI_DEVICE_ATTR_V0, 3473859Sml29623 DDI_STRUCTURE_LE_ACC, 3483859Sml29623 DDI_STRICTORDER_ACC 3493859Sml29623 }; 3503859Sml29623 3513859Sml29623 /* 3523859Sml29623 * Device buffer access attributes for DMA. 3533859Sml29623 */ 3543859Sml29623 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 3553859Sml29623 DDI_DEVICE_ATTR_V0, 3563859Sml29623 DDI_STRUCTURE_BE_ACC, 3573859Sml29623 DDI_STRICTORDER_ACC 3583859Sml29623 }; 3593859Sml29623 3603859Sml29623 ddi_dma_attr_t nxge_desc_dma_attr = { 3613859Sml29623 DMA_ATTR_V0, /* version number. */ 3623859Sml29623 0, /* low address */ 3633859Sml29623 0xffffffffffffffff, /* high address */ 3643859Sml29623 0xffffffffffffffff, /* address counter max */ 3653859Sml29623 #ifndef NIU_PA_WORKAROUND 3663859Sml29623 0x100000, /* alignment */ 3673859Sml29623 #else 3683859Sml29623 0x2000, 3693859Sml29623 #endif 3703859Sml29623 0xfc00fc, /* dlim_burstsizes */ 3713859Sml29623 0x1, /* minimum transfer size */ 3723859Sml29623 0xffffffffffffffff, /* maximum transfer size */ 3733859Sml29623 0xffffffffffffffff, /* maximum segment size */ 3743859Sml29623 1, /* scatter/gather list length */ 3753859Sml29623 (unsigned int) 1, /* granularity */ 3763859Sml29623 0 /* attribute flags */ 3773859Sml29623 }; 3783859Sml29623 3793859Sml29623 ddi_dma_attr_t nxge_tx_dma_attr = { 3803859Sml29623 DMA_ATTR_V0, /* version number. */ 3813859Sml29623 0, /* low address */ 3823859Sml29623 0xffffffffffffffff, /* high address */ 3833859Sml29623 0xffffffffffffffff, /* address counter max */ 3843859Sml29623 #if defined(_BIG_ENDIAN) 3853859Sml29623 0x2000, /* alignment */ 3863859Sml29623 #else 3873859Sml29623 0x1000, /* alignment */ 3883859Sml29623 #endif 3893859Sml29623 0xfc00fc, /* dlim_burstsizes */ 3903859Sml29623 0x1, /* minimum transfer size */ 3913859Sml29623 0xffffffffffffffff, /* maximum transfer size */ 3923859Sml29623 0xffffffffffffffff, /* maximum segment size */ 3933859Sml29623 5, /* scatter/gather list length */ 3943859Sml29623 (unsigned int) 1, /* granularity */ 3953859Sml29623 0 /* attribute flags */ 3963859Sml29623 }; 3973859Sml29623 3983859Sml29623 ddi_dma_attr_t nxge_rx_dma_attr = { 3993859Sml29623 DMA_ATTR_V0, /* version number. */ 4003859Sml29623 0, /* low address */ 4013859Sml29623 0xffffffffffffffff, /* high address */ 4023859Sml29623 0xffffffffffffffff, /* address counter max */ 4033859Sml29623 0x2000, /* alignment */ 4043859Sml29623 0xfc00fc, /* dlim_burstsizes */ 4053859Sml29623 0x1, /* minimum transfer size */ 4063859Sml29623 0xffffffffffffffff, /* maximum transfer size */ 4073859Sml29623 0xffffffffffffffff, /* maximum segment size */ 4083859Sml29623 1, /* scatter/gather list length */ 4093859Sml29623 (unsigned int) 1, /* granularity */ 4104781Ssbehera DDI_DMA_RELAXED_ORDERING /* attribute flags */ 4113859Sml29623 }; 4123859Sml29623 4133859Sml29623 ddi_dma_lim_t nxge_dma_limits = { 4143859Sml29623 (uint_t)0, /* dlim_addr_lo */ 4153859Sml29623 (uint_t)0xffffffff, /* dlim_addr_hi */ 4163859Sml29623 (uint_t)0xffffffff, /* dlim_cntr_max */ 4173859Sml29623 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 4183859Sml29623 0x1, /* dlim_minxfer */ 4193859Sml29623 1024 /* dlim_speed */ 4203859Sml29623 }; 4213859Sml29623 4223859Sml29623 dma_method_t nxge_force_dma = DVMA; 4233859Sml29623 4243859Sml29623 /* 4253859Sml29623 * dma chunk sizes. 4263859Sml29623 * 4273859Sml29623 * Try to allocate the largest possible size 4283859Sml29623 * so that fewer number of dma chunks would be managed 4293859Sml29623 */ 4303859Sml29623 #ifdef NIU_PA_WORKAROUND 4313859Sml29623 size_t alloc_sizes [] = {0x2000}; 4323859Sml29623 #else 4333859Sml29623 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 4343859Sml29623 0x10000, 0x20000, 0x40000, 0x80000, 4355770Sml29623 0x100000, 0x200000, 0x400000, 0x800000, 4365770Sml29623 0x1000000, 0x2000000, 0x4000000}; 4373859Sml29623 #endif 4383859Sml29623 4393859Sml29623 /* 4403859Sml29623 * Translate "dev_t" to a pointer to the associated "dev_info_t". 4413859Sml29623 */ 4423859Sml29623 4436495Sspeer extern void nxge_get_environs(nxge_t *); 4446495Sspeer 4453859Sml29623 static int 4463859Sml29623 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4473859Sml29623 { 4483859Sml29623 p_nxge_t nxgep = NULL; 4493859Sml29623 int instance; 4503859Sml29623 int status = DDI_SUCCESS; 4513859Sml29623 uint8_t portn; 4523859Sml29623 nxge_mmac_t *mmac_info; 4533859Sml29623 4543859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 4553859Sml29623 4563859Sml29623 /* 4573859Sml29623 * Get the device instance since we'll need to setup 4583859Sml29623 * or retrieve a soft state for this instance. 4593859Sml29623 */ 4603859Sml29623 instance = ddi_get_instance(dip); 4613859Sml29623 4623859Sml29623 switch (cmd) { 4633859Sml29623 case DDI_ATTACH: 4643859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 4653859Sml29623 break; 4663859Sml29623 4673859Sml29623 case DDI_RESUME: 4683859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 4693859Sml29623 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 4703859Sml29623 if (nxgep == NULL) { 4713859Sml29623 status = DDI_FAILURE; 4723859Sml29623 break; 4733859Sml29623 } 4743859Sml29623 if (nxgep->dip != dip) { 4753859Sml29623 status = DDI_FAILURE; 4763859Sml29623 break; 4773859Sml29623 } 4783859Sml29623 if (nxgep->suspended == DDI_PM_SUSPEND) { 4793859Sml29623 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 4803859Sml29623 } else { 4814185Sspeer status = nxge_resume(nxgep); 4823859Sml29623 } 4833859Sml29623 goto nxge_attach_exit; 4843859Sml29623 4853859Sml29623 case DDI_PM_RESUME: 4863859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 4873859Sml29623 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 4883859Sml29623 if (nxgep == NULL) { 4893859Sml29623 status = DDI_FAILURE; 4903859Sml29623 break; 4913859Sml29623 } 4923859Sml29623 if (nxgep->dip != dip) { 4933859Sml29623 status = DDI_FAILURE; 4943859Sml29623 break; 4953859Sml29623 } 4964185Sspeer status = nxge_resume(nxgep); 4973859Sml29623 goto nxge_attach_exit; 4983859Sml29623 4993859Sml29623 default: 5003859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 5013859Sml29623 status = DDI_FAILURE; 5023859Sml29623 goto nxge_attach_exit; 5033859Sml29623 } 5043859Sml29623 5053859Sml29623 5063859Sml29623 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 5073859Sml29623 status = DDI_FAILURE; 5083859Sml29623 goto nxge_attach_exit; 5093859Sml29623 } 5103859Sml29623 5113859Sml29623 nxgep = ddi_get_soft_state(nxge_list, instance); 5123859Sml29623 if (nxgep == NULL) { 5134977Sraghus status = NXGE_ERROR; 5144977Sraghus goto nxge_attach_fail2; 5153859Sml29623 } 5163859Sml29623 5174693Stm144005 nxgep->nxge_magic = NXGE_MAGIC; 5184693Stm144005 5193859Sml29623 nxgep->drv_state = 0; 5203859Sml29623 nxgep->dip = dip; 5213859Sml29623 nxgep->instance = instance; 5223859Sml29623 nxgep->p_dip = ddi_get_parent(dip); 5233859Sml29623 nxgep->nxge_debug_level = nxge_debug_level; 5243859Sml29623 npi_debug_level = nxge_debug_level; 5253859Sml29623 5266495Sspeer /* Are we a guest running in a Hybrid I/O environment? */ 5276495Sspeer nxge_get_environs(nxgep); 5283859Sml29623 5293859Sml29623 status = nxge_map_regs(nxgep); 5306495Sspeer 5313859Sml29623 if (status != NXGE_OK) { 5323859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 5334977Sraghus goto nxge_attach_fail3; 5343859Sml29623 } 5353859Sml29623 5366495Sspeer nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 5376495Sspeer &nxge_dev_desc_dma_acc_attr, 5386495Sspeer &nxge_rx_dma_attr); 5396495Sspeer 5406495Sspeer /* Create & initialize the per-Neptune data structure */ 5416495Sspeer /* (even if we're a guest). */ 5423859Sml29623 status = nxge_init_common_dev(nxgep); 5433859Sml29623 if (status != NXGE_OK) { 5443859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 545*6512Ssowmini "nxge_init_common_dev failed")); 5464977Sraghus goto nxge_attach_fail4; 5473859Sml29623 } 5483859Sml29623 5496495Sspeer #if defined(sun4v) 5506495Sspeer /* This is required by nxge_hio_init(), which follows. */ 5516495Sspeer if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 5526495Sspeer goto nxge_attach_fail; 5536495Sspeer #endif 5546495Sspeer 5556495Sspeer if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 5566495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 557*6512Ssowmini "nxge_hio_init failed")); 5586495Sspeer goto nxge_attach_fail4; 5596495Sspeer } 5606495Sspeer 5614732Sdavemq if (nxgep->niu_type == NEPTUNE_2_10GF) { 5624732Sdavemq if (nxgep->function_num > 1) { 5636028Ssbehera NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 5644732Sdavemq " function %d. Only functions 0 and 1 are " 5654732Sdavemq "supported for this card.", nxgep->function_num)); 5664732Sdavemq status = NXGE_ERROR; 5674977Sraghus goto nxge_attach_fail4; 5684732Sdavemq } 5694732Sdavemq } 5704732Sdavemq 5716495Sspeer if (isLDOMguest(nxgep)) { 5726495Sspeer /* 5736495Sspeer * Use the function number here. 5746495Sspeer */ 5756495Sspeer nxgep->mac.portnum = nxgep->function_num; 5766495Sspeer nxgep->mac.porttype = PORT_TYPE_LOGICAL; 5776495Sspeer 5786495Sspeer /* XXX We'll set the MAC address counts to 1 for now. */ 5796495Sspeer mmac_info = &nxgep->nxge_mmac_info; 5806495Sspeer mmac_info->num_mmac = 1; 5816495Sspeer mmac_info->naddrfree = 1; 5823859Sml29623 } else { 5836495Sspeer portn = NXGE_GET_PORT_NUM(nxgep->function_num); 5846495Sspeer nxgep->mac.portnum = portn; 5856495Sspeer if ((portn == 0) || (portn == 1)) 5866495Sspeer nxgep->mac.porttype = PORT_TYPE_XMAC; 5876495Sspeer else 5886495Sspeer nxgep->mac.porttype = PORT_TYPE_BMAC; 5896495Sspeer /* 5906495Sspeer * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 5916495Sspeer * internally, the rest 2 ports use BMAC (1G "Big" MAC). 5926495Sspeer * The two types of MACs have different characterizations. 5936495Sspeer */ 5946495Sspeer mmac_info = &nxgep->nxge_mmac_info; 5956495Sspeer if (nxgep->function_num < 2) { 5966495Sspeer mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 5976495Sspeer mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 5986495Sspeer } else { 5996495Sspeer mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 6006495Sspeer mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 6016495Sspeer } 6023859Sml29623 } 6033859Sml29623 /* 6043859Sml29623 * Setup the Ndd parameters for the this instance. 6053859Sml29623 */ 6063859Sml29623 nxge_init_param(nxgep); 6073859Sml29623 6083859Sml29623 /* 6093859Sml29623 * Setup Register Tracing Buffer. 6103859Sml29623 */ 6113859Sml29623 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 6123859Sml29623 6133859Sml29623 /* init stats ptr */ 6143859Sml29623 nxge_init_statsp(nxgep); 6154185Sspeer 6164977Sraghus /* 6176495Sspeer * Copy the vpd info from eeprom to a local data 6186495Sspeer * structure, and then check its validity. 6194977Sraghus */ 6206495Sspeer if (!isLDOMguest(nxgep)) { 6216495Sspeer int *regp; 6226495Sspeer uint_t reglen; 6236495Sspeer int rv; 6246495Sspeer 6256495Sspeer nxge_vpd_info_get(nxgep); 6266495Sspeer 6276495Sspeer /* Find the NIU config handle. */ 6286495Sspeer rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 6296495Sspeer ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 6306495Sspeer "reg", ®p, ®len); 6316495Sspeer 6326495Sspeer if (rv != DDI_PROP_SUCCESS) { 6336495Sspeer goto nxge_attach_fail5; 6346495Sspeer } 6356495Sspeer /* 6366495Sspeer * The address_hi, that is the first int, in the reg 6376495Sspeer * property consists of config handle, but need to remove 6386495Sspeer * the bits 28-31 which are OBP specific info. 6396495Sspeer */ 6406495Sspeer nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 6416495Sspeer ddi_prop_free(regp); 6426495Sspeer } 6436495Sspeer 6446495Sspeer if (isLDOMguest(nxgep)) { 6456495Sspeer uchar_t *prop_val; 6466495Sspeer uint_t prop_len; 6476495Sspeer 6486495Sspeer extern void nxge_get_logical_props(p_nxge_t); 6496495Sspeer 6506495Sspeer nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 6516495Sspeer nxgep->mac.portmode = PORT_LOGICAL; 6526495Sspeer (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 6536495Sspeer "phy-type", "virtual transceiver"); 6546495Sspeer 6556495Sspeer nxgep->nports = 1; 6566495Sspeer nxgep->board_ver = 0; /* XXX What? */ 6576495Sspeer 6586495Sspeer /* 6596495Sspeer * local-mac-address property gives us info on which 6606495Sspeer * specific MAC address the Hybrid resource is associated 6616495Sspeer * with. 6626495Sspeer */ 6636495Sspeer if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6646495Sspeer "local-mac-address", &prop_val, 6656495Sspeer &prop_len) != DDI_PROP_SUCCESS) { 6666495Sspeer goto nxge_attach_fail5; 6676495Sspeer } 6686495Sspeer if (prop_len != ETHERADDRL) { 6696495Sspeer ddi_prop_free(prop_val); 6706495Sspeer goto nxge_attach_fail5; 6716495Sspeer } 6726495Sspeer ether_copy(prop_val, nxgep->hio_mac_addr); 6736495Sspeer ddi_prop_free(prop_val); 6746495Sspeer nxge_get_logical_props(nxgep); 6756495Sspeer 6766495Sspeer } else { 6776495Sspeer status = nxge_xcvr_find(nxgep); 6786495Sspeer 6796495Sspeer if (status != NXGE_OK) { 6806495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 681*6512Ssowmini " Couldn't determine card type" 682*6512Ssowmini " .... exit ")); 6836495Sspeer goto nxge_attach_fail5; 6846495Sspeer } 6856495Sspeer 6866495Sspeer status = nxge_get_config_properties(nxgep); 6876495Sspeer 6886495Sspeer if (status != NXGE_OK) { 6896495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 690*6512Ssowmini "get_hw create failed")); 6916495Sspeer goto nxge_attach_fail; 6926495Sspeer } 6933859Sml29623 } 6943859Sml29623 6953859Sml29623 /* 6963859Sml29623 * Setup the Kstats for the driver. 6973859Sml29623 */ 6983859Sml29623 nxge_setup_kstats(nxgep); 6993859Sml29623 7006495Sspeer if (!isLDOMguest(nxgep)) 7016495Sspeer nxge_setup_param(nxgep); 7023859Sml29623 7033859Sml29623 status = nxge_setup_system_dma_pages(nxgep); 7043859Sml29623 if (status != NXGE_OK) { 7053859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 7063859Sml29623 goto nxge_attach_fail; 7073859Sml29623 } 7083859Sml29623 7093859Sml29623 nxge_hw_id_init(nxgep); 7106495Sspeer 7116495Sspeer if (!isLDOMguest(nxgep)) 7126495Sspeer nxge_hw_init_niu_common(nxgep); 7133859Sml29623 7143859Sml29623 status = nxge_setup_mutexes(nxgep); 7153859Sml29623 if (status != NXGE_OK) { 7163859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 7173859Sml29623 goto nxge_attach_fail; 7183859Sml29623 } 7193859Sml29623 7206495Sspeer #if defined(sun4v) 7216495Sspeer if (isLDOMguest(nxgep)) { 7226495Sspeer /* Find our VR & channel sets. */ 7236495Sspeer status = nxge_hio_vr_add(nxgep); 7246495Sspeer goto nxge_attach_exit; 7256495Sspeer } 7266495Sspeer #endif 7276495Sspeer 7283859Sml29623 status = nxge_setup_dev(nxgep); 7293859Sml29623 if (status != DDI_SUCCESS) { 7303859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 7313859Sml29623 goto nxge_attach_fail; 7323859Sml29623 } 7333859Sml29623 7343859Sml29623 status = nxge_add_intrs(nxgep); 7353859Sml29623 if (status != DDI_SUCCESS) { 7363859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 7373859Sml29623 goto nxge_attach_fail; 7383859Sml29623 } 7393859Sml29623 status = nxge_add_soft_intrs(nxgep); 7403859Sml29623 if (status != DDI_SUCCESS) { 7416495Sspeer NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7426495Sspeer "add_soft_intr failed")); 7433859Sml29623 goto nxge_attach_fail; 7443859Sml29623 } 7453859Sml29623 7463859Sml29623 /* 7473859Sml29623 * Enable interrupts. 7483859Sml29623 */ 7493859Sml29623 nxge_intrs_enable(nxgep); 7503859Sml29623 7516495Sspeer // If a guest, register with vio_net instead. 7524977Sraghus if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 7533859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7546495Sspeer "unable to register to mac layer (%d)", status)); 7553859Sml29623 goto nxge_attach_fail; 7563859Sml29623 } 7573859Sml29623 7583859Sml29623 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 7593859Sml29623 7606495Sspeer NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7616495Sspeer "registered to mac (instance %d)", instance)); 7623859Sml29623 7633859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 7643859Sml29623 7653859Sml29623 goto nxge_attach_exit; 7663859Sml29623 7673859Sml29623 nxge_attach_fail: 7683859Sml29623 nxge_unattach(nxgep); 7694977Sraghus goto nxge_attach_fail1; 7704977Sraghus 7714977Sraghus nxge_attach_fail5: 7724977Sraghus /* 7734977Sraghus * Tear down the ndd parameters setup. 7744977Sraghus */ 7754977Sraghus nxge_destroy_param(nxgep); 7764977Sraghus 7774977Sraghus /* 7784977Sraghus * Tear down the kstat setup. 7794977Sraghus */ 7804977Sraghus nxge_destroy_kstats(nxgep); 7814977Sraghus 7824977Sraghus nxge_attach_fail4: 7834977Sraghus if (nxgep->nxge_hw_p) { 7844977Sraghus nxge_uninit_common_dev(nxgep); 7854977Sraghus nxgep->nxge_hw_p = NULL; 7864977Sraghus } 7874977Sraghus 7884977Sraghus nxge_attach_fail3: 7894977Sraghus /* 7904977Sraghus * Unmap the register setup. 7914977Sraghus */ 7924977Sraghus nxge_unmap_regs(nxgep); 7934977Sraghus 7944977Sraghus nxge_fm_fini(nxgep); 7954977Sraghus 7964977Sraghus nxge_attach_fail2: 7974977Sraghus ddi_soft_state_free(nxge_list, nxgep->instance); 7984977Sraghus 7994977Sraghus nxge_attach_fail1: 8004185Sspeer if (status != NXGE_OK) 8014185Sspeer status = (NXGE_ERROR | NXGE_DDI_FAILED); 8023859Sml29623 nxgep = NULL; 8033859Sml29623 8043859Sml29623 nxge_attach_exit: 8053859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 806*6512Ssowmini status)); 8073859Sml29623 8083859Sml29623 return (status); 8093859Sml29623 } 8103859Sml29623 8113859Sml29623 static int 8123859Sml29623 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 8133859Sml29623 { 8143859Sml29623 int status = DDI_SUCCESS; 8153859Sml29623 int instance; 8163859Sml29623 p_nxge_t nxgep = NULL; 8173859Sml29623 8183859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 8193859Sml29623 instance = ddi_get_instance(dip); 8203859Sml29623 nxgep = ddi_get_soft_state(nxge_list, instance); 8213859Sml29623 if (nxgep == NULL) { 8223859Sml29623 status = DDI_FAILURE; 8233859Sml29623 goto nxge_detach_exit; 8243859Sml29623 } 8253859Sml29623 8263859Sml29623 switch (cmd) { 8273859Sml29623 case DDI_DETACH: 8283859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 8293859Sml29623 break; 8303859Sml29623 8313859Sml29623 case DDI_PM_SUSPEND: 8323859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 8333859Sml29623 nxgep->suspended = DDI_PM_SUSPEND; 8343859Sml29623 nxge_suspend(nxgep); 8353859Sml29623 break; 8363859Sml29623 8373859Sml29623 case DDI_SUSPEND: 8383859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 8393859Sml29623 if (nxgep->suspended != DDI_PM_SUSPEND) { 8403859Sml29623 nxgep->suspended = DDI_SUSPEND; 8413859Sml29623 nxge_suspend(nxgep); 8423859Sml29623 } 8433859Sml29623 break; 8443859Sml29623 8453859Sml29623 default: 8463859Sml29623 status = DDI_FAILURE; 8473859Sml29623 } 8483859Sml29623 8493859Sml29623 if (cmd != DDI_DETACH) 8503859Sml29623 goto nxge_detach_exit; 8513859Sml29623 8523859Sml29623 /* 8533859Sml29623 * Stop the xcvr polling. 8543859Sml29623 */ 8553859Sml29623 nxgep->suspended = cmd; 8563859Sml29623 8573859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 8583859Sml29623 8596495Sspeer if (isLDOMguest(nxgep)) { 8606495Sspeer nxge_hio_unregister(nxgep); 8616495Sspeer } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 8623859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 863*6512Ssowmini "<== nxge_detach status = 0x%08X", status)); 8643859Sml29623 return (DDI_FAILURE); 8653859Sml29623 } 8663859Sml29623 8673859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 868*6512Ssowmini "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 8693859Sml29623 8703859Sml29623 nxge_unattach(nxgep); 8713859Sml29623 nxgep = NULL; 8723859Sml29623 8733859Sml29623 nxge_detach_exit: 8743859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 875*6512Ssowmini status)); 8763859Sml29623 8773859Sml29623 return (status); 8783859Sml29623 } 8793859Sml29623 8803859Sml29623 static void 8813859Sml29623 nxge_unattach(p_nxge_t nxgep) 8823859Sml29623 { 8833859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 8843859Sml29623 8853859Sml29623 if (nxgep == NULL || nxgep->dev_regs == NULL) { 8863859Sml29623 return; 8873859Sml29623 } 8883859Sml29623 8894693Stm144005 nxgep->nxge_magic = 0; 8904693Stm144005 8915780Ssbehera if (nxgep->nxge_timerid) { 8925780Ssbehera nxge_stop_timer(nxgep, nxgep->nxge_timerid); 8935780Ssbehera nxgep->nxge_timerid = 0; 8945780Ssbehera } 8955780Ssbehera 8966495Sspeer #if defined(sun4v) 8976495Sspeer if (isLDOMguest(nxgep)) { 8986498Sspeer (void) nxge_hio_vr_release(nxgep); 8996495Sspeer } 9006495Sspeer #endif 9016495Sspeer 9023859Sml29623 if (nxgep->nxge_hw_p) { 9033859Sml29623 nxge_uninit_common_dev(nxgep); 9043859Sml29623 nxgep->nxge_hw_p = NULL; 9053859Sml29623 } 9063859Sml29623 9073859Sml29623 #if defined(sun4v) 9083859Sml29623 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 9093859Sml29623 (void) hsvc_unregister(&nxgep->niu_hsvc); 9103859Sml29623 nxgep->niu_hsvc_available = B_FALSE; 9113859Sml29623 } 9123859Sml29623 #endif 9133859Sml29623 /* 9143859Sml29623 * Stop any further interrupts. 9153859Sml29623 */ 9163859Sml29623 nxge_remove_intrs(nxgep); 9173859Sml29623 9183859Sml29623 /* remove soft interrups */ 9193859Sml29623 nxge_remove_soft_intrs(nxgep); 9203859Sml29623 9213859Sml29623 /* 9223859Sml29623 * Stop the device and free resources. 9233859Sml29623 */ 9246495Sspeer if (!isLDOMguest(nxgep)) { 9256495Sspeer nxge_destroy_dev(nxgep); 9266495Sspeer } 9273859Sml29623 9283859Sml29623 /* 9293859Sml29623 * Tear down the ndd parameters setup. 9303859Sml29623 */ 9313859Sml29623 nxge_destroy_param(nxgep); 9323859Sml29623 9333859Sml29623 /* 9343859Sml29623 * Tear down the kstat setup. 9353859Sml29623 */ 9363859Sml29623 nxge_destroy_kstats(nxgep); 9373859Sml29623 9383859Sml29623 /* 9393859Sml29623 * Destroy all mutexes. 9403859Sml29623 */ 9413859Sml29623 nxge_destroy_mutexes(nxgep); 9423859Sml29623 9433859Sml29623 /* 9443859Sml29623 * Remove the list of ndd parameters which 9453859Sml29623 * were setup during attach. 9463859Sml29623 */ 9473859Sml29623 if (nxgep->dip) { 9483859Sml29623 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 949*6512Ssowmini " nxge_unattach: remove all properties")); 9503859Sml29623 9513859Sml29623 (void) ddi_prop_remove_all(nxgep->dip); 9523859Sml29623 } 9533859Sml29623 9543859Sml29623 #if NXGE_PROPERTY 9553859Sml29623 nxge_remove_hard_properties(nxgep); 9563859Sml29623 #endif 9573859Sml29623 9583859Sml29623 /* 9593859Sml29623 * Unmap the register setup. 9603859Sml29623 */ 9613859Sml29623 nxge_unmap_regs(nxgep); 9623859Sml29623 9633859Sml29623 nxge_fm_fini(nxgep); 9643859Sml29623 9653859Sml29623 ddi_soft_state_free(nxge_list, nxgep->instance); 9663859Sml29623 9673859Sml29623 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 9683859Sml29623 } 9693859Sml29623 9706495Sspeer #if defined(sun4v) 9716495Sspeer int 9726495Sspeer nxge_hsvc_register( 9736495Sspeer nxge_t *nxgep) 9746495Sspeer { 9756495Sspeer nxge_status_t status; 9766495Sspeer 9776495Sspeer if (nxgep->niu_type == N2_NIU) { 9786495Sspeer nxgep->niu_hsvc_available = B_FALSE; 9796495Sspeer bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 9806495Sspeer if ((status = hsvc_register(&nxgep->niu_hsvc, 9816495Sspeer &nxgep->niu_min_ver)) != 0) { 9826495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 9836495Sspeer "nxge_attach: %s: cannot negotiate " 9846495Sspeer "hypervisor services revision %d group: 0x%lx " 9856495Sspeer "major: 0x%lx minor: 0x%lx errno: %d", 9866495Sspeer niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 9876495Sspeer niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 9886495Sspeer niu_hsvc.hsvc_minor, status)); 9896495Sspeer return (DDI_FAILURE); 9906495Sspeer } 9916495Sspeer nxgep->niu_hsvc_available = B_TRUE; 9926495Sspeer NXGE_DEBUG_MSG((nxgep, DDI_CTL, 993*6512Ssowmini "NIU Hypervisor service enabled")); 9946495Sspeer } 9956495Sspeer 9966495Sspeer return (DDI_SUCCESS); 9976495Sspeer } 9986495Sspeer #endif 9996495Sspeer 10003859Sml29623 static char n2_siu_name[] = "niu"; 10013859Sml29623 10023859Sml29623 static nxge_status_t 10033859Sml29623 nxge_map_regs(p_nxge_t nxgep) 10043859Sml29623 { 10053859Sml29623 int ddi_status = DDI_SUCCESS; 10063859Sml29623 p_dev_regs_t dev_regs; 10073859Sml29623 char buf[MAXPATHLEN + 1]; 10083859Sml29623 char *devname; 10093859Sml29623 #ifdef NXGE_DEBUG 10103859Sml29623 char *sysname; 10113859Sml29623 #endif 10123859Sml29623 off_t regsize; 10133859Sml29623 nxge_status_t status = NXGE_OK; 10143859Sml29623 #if !defined(_BIG_ENDIAN) 10153859Sml29623 off_t pci_offset; 10163859Sml29623 uint16_t pcie_devctl; 10173859Sml29623 #endif 10183859Sml29623 10196495Sspeer if (isLDOMguest(nxgep)) { 10206495Sspeer return (nxge_guest_regs_map(nxgep)); 10216495Sspeer } 10226495Sspeer 10233859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 10243859Sml29623 nxgep->dev_regs = NULL; 10253859Sml29623 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 10263859Sml29623 dev_regs->nxge_regh = NULL; 10273859Sml29623 dev_regs->nxge_pciregh = NULL; 10283859Sml29623 dev_regs->nxge_msix_regh = NULL; 10293859Sml29623 dev_regs->nxge_vir_regh = NULL; 10303859Sml29623 dev_regs->nxge_vir2_regh = NULL; 10314732Sdavemq nxgep->niu_type = NIU_TYPE_NONE; 10323859Sml29623 10333859Sml29623 devname = ddi_pathname(nxgep->dip, buf); 10343859Sml29623 ASSERT(strlen(devname) > 0); 10353859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1036*6512Ssowmini "nxge_map_regs: pathname devname %s", devname)); 10373859Sml29623 10383859Sml29623 if (strstr(devname, n2_siu_name)) { 10393859Sml29623 /* N2/NIU */ 10403859Sml29623 nxgep->niu_type = N2_NIU; 10413859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1042*6512Ssowmini "nxge_map_regs: N2/NIU devname %s", devname)); 10433859Sml29623 /* get function number */ 10443859Sml29623 nxgep->function_num = 1045*6512Ssowmini (devname[strlen(devname) -1] == '1' ? 1 : 0); 10463859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1047*6512Ssowmini "nxge_map_regs: N2/NIU function number %d", 1048*6512Ssowmini nxgep->function_num)); 10493859Sml29623 } else { 10503859Sml29623 int *prop_val; 10513859Sml29623 uint_t prop_len; 10523859Sml29623 uint8_t func_num; 10533859Sml29623 10543859Sml29623 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1055*6512Ssowmini 0, "reg", 1056*6512Ssowmini &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 10573859Sml29623 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1058*6512Ssowmini "Reg property not found")); 10593859Sml29623 ddi_status = DDI_FAILURE; 10603859Sml29623 goto nxge_map_regs_fail0; 10613859Sml29623 10623859Sml29623 } else { 10633859Sml29623 func_num = (prop_val[0] >> 8) & 0x7; 10643859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1065*6512Ssowmini "Reg property found: fun # %d", 1066*6512Ssowmini func_num)); 10673859Sml29623 nxgep->function_num = func_num; 10686495Sspeer if (isLDOMguest(nxgep)) { 10696495Sspeer nxgep->function_num /= 2; 10706495Sspeer return (NXGE_OK); 10716495Sspeer } 10723859Sml29623 ddi_prop_free(prop_val); 10733859Sml29623 } 10743859Sml29623 } 10753859Sml29623 10763859Sml29623 switch (nxgep->niu_type) { 10773859Sml29623 default: 10783859Sml29623 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 10793859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1080*6512Ssowmini "nxge_map_regs: pci config size 0x%x", regsize)); 10813859Sml29623 10823859Sml29623 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1083*6512Ssowmini (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1084*6512Ssowmini &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 10853859Sml29623 if (ddi_status != DDI_SUCCESS) { 10863859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1087*6512Ssowmini "ddi_map_regs, nxge bus config regs failed")); 10883859Sml29623 goto nxge_map_regs_fail0; 10893859Sml29623 } 10903859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1091*6512Ssowmini "nxge_map_reg: PCI config addr 0x%0llx " 1092*6512Ssowmini " handle 0x%0llx", dev_regs->nxge_pciregp, 1093*6512Ssowmini dev_regs->nxge_pciregh)); 10943859Sml29623 /* 10953859Sml29623 * IMP IMP 10963859Sml29623 * workaround for bit swapping bug in HW 10973859Sml29623 * which ends up in no-snoop = yes 10983859Sml29623 * resulting, in DMA not synched properly 10993859Sml29623 */ 11003859Sml29623 #if !defined(_BIG_ENDIAN) 11013859Sml29623 /* workarounds for x86 systems */ 11023859Sml29623 pci_offset = 0x80 + PCIE_DEVCTL; 11033859Sml29623 pcie_devctl = 0x0; 11043859Sml29623 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 11053859Sml29623 pcie_devctl |= PCIE_DEVCTL_RO_EN; 11063859Sml29623 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1107*6512Ssowmini pcie_devctl); 11083859Sml29623 #endif 11093859Sml29623 11103859Sml29623 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 11113859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1112*6512Ssowmini "nxge_map_regs: pio size 0x%x", regsize)); 11133859Sml29623 /* set up the device mapped register */ 11143859Sml29623 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1115*6512Ssowmini (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1116*6512Ssowmini &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 11173859Sml29623 if (ddi_status != DDI_SUCCESS) { 11183859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1119*6512Ssowmini "ddi_map_regs for Neptune global reg failed")); 11203859Sml29623 goto nxge_map_regs_fail1; 11213859Sml29623 } 11223859Sml29623 11233859Sml29623 /* set up the msi/msi-x mapped register */ 11243859Sml29623 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 11253859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1126*6512Ssowmini "nxge_map_regs: msix size 0x%x", regsize)); 11273859Sml29623 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1128*6512Ssowmini (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1129*6512Ssowmini &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 11303859Sml29623 if (ddi_status != DDI_SUCCESS) { 11313859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1132*6512Ssowmini "ddi_map_regs for msi reg failed")); 11333859Sml29623 goto nxge_map_regs_fail2; 11343859Sml29623 } 11353859Sml29623 11363859Sml29623 /* set up the vio region mapped register */ 11373859Sml29623 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 11383859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1139*6512Ssowmini "nxge_map_regs: vio size 0x%x", regsize)); 11403859Sml29623 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1141*6512Ssowmini (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1142*6512Ssowmini &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 11433859Sml29623 11443859Sml29623 if (ddi_status != DDI_SUCCESS) { 11453859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1146*6512Ssowmini "ddi_map_regs for nxge vio reg failed")); 11473859Sml29623 goto nxge_map_regs_fail3; 11483859Sml29623 } 11493859Sml29623 nxgep->dev_regs = dev_regs; 11503859Sml29623 11513859Sml29623 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 11523859Sml29623 NPI_PCI_ADD_HANDLE_SET(nxgep, 1153*6512Ssowmini (npi_reg_ptr_t)dev_regs->nxge_pciregp); 11543859Sml29623 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 11553859Sml29623 NPI_MSI_ADD_HANDLE_SET(nxgep, 1156*6512Ssowmini (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 11573859Sml29623 11583859Sml29623 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 11593859Sml29623 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 11603859Sml29623 11613859Sml29623 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 11623859Sml29623 NPI_REG_ADD_HANDLE_SET(nxgep, 1163*6512Ssowmini (npi_reg_ptr_t)dev_regs->nxge_regp); 11643859Sml29623 11653859Sml29623 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 11663859Sml29623 NPI_VREG_ADD_HANDLE_SET(nxgep, 1167*6512Ssowmini (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 11683859Sml29623 11693859Sml29623 break; 11703859Sml29623 11713859Sml29623 case N2_NIU: 11723859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 11733859Sml29623 /* 11743859Sml29623 * Set up the device mapped register (FWARC 2006/556) 11753859Sml29623 * (changed back to 1: reg starts at 1!) 11763859Sml29623 */ 11773859Sml29623 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 11783859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1179*6512Ssowmini "nxge_map_regs: dev size 0x%x", regsize)); 11803859Sml29623 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1181*6512Ssowmini (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1182*6512Ssowmini &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 11833859Sml29623 11843859Sml29623 if (ddi_status != DDI_SUCCESS) { 11853859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1186*6512Ssowmini "ddi_map_regs for N2/NIU, global reg failed ")); 11873859Sml29623 goto nxge_map_regs_fail1; 11883859Sml29623 } 11893859Sml29623 11906495Sspeer /* set up the first vio region mapped register */ 11913859Sml29623 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 11923859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1193*6512Ssowmini "nxge_map_regs: vio (1) size 0x%x", regsize)); 11943859Sml29623 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1195*6512Ssowmini (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1196*6512Ssowmini &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 11973859Sml29623 11983859Sml29623 if (ddi_status != DDI_SUCCESS) { 11993859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1200*6512Ssowmini "ddi_map_regs for nxge vio reg failed")); 12013859Sml29623 goto nxge_map_regs_fail2; 12023859Sml29623 } 12036495Sspeer /* set up the second vio region mapped register */ 12043859Sml29623 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 12053859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1206*6512Ssowmini "nxge_map_regs: vio (3) size 0x%x", regsize)); 12073859Sml29623 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1208*6512Ssowmini (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1209*6512Ssowmini &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 12103859Sml29623 12113859Sml29623 if (ddi_status != DDI_SUCCESS) { 12123859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1213*6512Ssowmini "ddi_map_regs for nxge vio2 reg failed")); 12143859Sml29623 goto nxge_map_regs_fail3; 12153859Sml29623 } 12163859Sml29623 nxgep->dev_regs = dev_regs; 12173859Sml29623 12183859Sml29623 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 12193859Sml29623 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 12203859Sml29623 12213859Sml29623 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 12223859Sml29623 NPI_REG_ADD_HANDLE_SET(nxgep, 1223*6512Ssowmini (npi_reg_ptr_t)dev_regs->nxge_regp); 12243859Sml29623 12253859Sml29623 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 12263859Sml29623 NPI_VREG_ADD_HANDLE_SET(nxgep, 1227*6512Ssowmini (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 12283859Sml29623 12293859Sml29623 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 12303859Sml29623 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1231*6512Ssowmini (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 12323859Sml29623 12333859Sml29623 break; 12343859Sml29623 } 12353859Sml29623 12363859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1237*6512Ssowmini " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 12383859Sml29623 12393859Sml29623 goto nxge_map_regs_exit; 12403859Sml29623 nxge_map_regs_fail3: 12413859Sml29623 if (dev_regs->nxge_msix_regh) { 12423859Sml29623 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 12433859Sml29623 } 12443859Sml29623 if (dev_regs->nxge_vir_regh) { 12453859Sml29623 ddi_regs_map_free(&dev_regs->nxge_regh); 12463859Sml29623 } 12473859Sml29623 nxge_map_regs_fail2: 12483859Sml29623 if (dev_regs->nxge_regh) { 12493859Sml29623 ddi_regs_map_free(&dev_regs->nxge_regh); 12503859Sml29623 } 12513859Sml29623 nxge_map_regs_fail1: 12523859Sml29623 if (dev_regs->nxge_pciregh) { 12533859Sml29623 ddi_regs_map_free(&dev_regs->nxge_pciregh); 12543859Sml29623 } 12553859Sml29623 nxge_map_regs_fail0: 12563859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 12573859Sml29623 kmem_free(dev_regs, sizeof (dev_regs_t)); 12583859Sml29623 12593859Sml29623 nxge_map_regs_exit: 12603859Sml29623 if (ddi_status != DDI_SUCCESS) 12613859Sml29623 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 12623859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 12633859Sml29623 return (status); 12643859Sml29623 } 12653859Sml29623 12663859Sml29623 static void 12673859Sml29623 nxge_unmap_regs(p_nxge_t nxgep) 12683859Sml29623 { 12693859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 12706495Sspeer 12716495Sspeer if (isLDOMguest(nxgep)) { 12726495Sspeer nxge_guest_regs_map_free(nxgep); 12736495Sspeer return; 12746495Sspeer } 12756495Sspeer 12763859Sml29623 if (nxgep->dev_regs) { 12773859Sml29623 if (nxgep->dev_regs->nxge_pciregh) { 12783859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1279*6512Ssowmini "==> nxge_unmap_regs: bus")); 12803859Sml29623 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 12813859Sml29623 nxgep->dev_regs->nxge_pciregh = NULL; 12823859Sml29623 } 12833859Sml29623 if (nxgep->dev_regs->nxge_regh) { 12843859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1285*6512Ssowmini "==> nxge_unmap_regs: device registers")); 12863859Sml29623 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 12873859Sml29623 nxgep->dev_regs->nxge_regh = NULL; 12883859Sml29623 } 12893859Sml29623 if (nxgep->dev_regs->nxge_msix_regh) { 12903859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1291*6512Ssowmini "==> nxge_unmap_regs: device interrupts")); 12923859Sml29623 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 12933859Sml29623 nxgep->dev_regs->nxge_msix_regh = NULL; 12943859Sml29623 } 12953859Sml29623 if (nxgep->dev_regs->nxge_vir_regh) { 12963859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1297*6512Ssowmini "==> nxge_unmap_regs: vio region")); 12983859Sml29623 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 12993859Sml29623 nxgep->dev_regs->nxge_vir_regh = NULL; 13003859Sml29623 } 13013859Sml29623 if (nxgep->dev_regs->nxge_vir2_regh) { 13023859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1303*6512Ssowmini "==> nxge_unmap_regs: vio2 region")); 13043859Sml29623 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 13053859Sml29623 nxgep->dev_regs->nxge_vir2_regh = NULL; 13063859Sml29623 } 13073859Sml29623 13083859Sml29623 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 13093859Sml29623 nxgep->dev_regs = NULL; 13103859Sml29623 } 13113859Sml29623 13123859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 13133859Sml29623 } 13143859Sml29623 13153859Sml29623 static nxge_status_t 13163859Sml29623 nxge_setup_mutexes(p_nxge_t nxgep) 13173859Sml29623 { 13183859Sml29623 int ddi_status = DDI_SUCCESS; 13193859Sml29623 nxge_status_t status = NXGE_OK; 13203859Sml29623 nxge_classify_t *classify_ptr; 13213859Sml29623 int partition; 13223859Sml29623 13233859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 13243859Sml29623 13253859Sml29623 /* 13263859Sml29623 * Get the interrupt cookie so the mutexes can be 13273859Sml29623 * Initialized. 13283859Sml29623 */ 13296495Sspeer if (isLDOMguest(nxgep)) { 13306495Sspeer nxgep->interrupt_cookie = 0; 13316495Sspeer } else { 13326495Sspeer ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 13336495Sspeer &nxgep->interrupt_cookie); 13346495Sspeer 13356495Sspeer if (ddi_status != DDI_SUCCESS) { 13366495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 13376495Sspeer "<== nxge_setup_mutexes: failed 0x%x", 13386495Sspeer ddi_status)); 13396495Sspeer goto nxge_setup_mutexes_exit; 13406495Sspeer } 13413859Sml29623 } 13423859Sml29623 13434693Stm144005 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 13444693Stm144005 MUTEX_INIT(&nxgep->poll_lock, NULL, 13454693Stm144005 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 13464693Stm144005 13473859Sml29623 /* 13484693Stm144005 * Initialize mutexes for this device. 13493859Sml29623 */ 13503859Sml29623 MUTEX_INIT(nxgep->genlock, NULL, 1351*6512Ssowmini MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 13523859Sml29623 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1353*6512Ssowmini MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 13543859Sml29623 MUTEX_INIT(&nxgep->mif_lock, NULL, 1355*6512Ssowmini MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 13566495Sspeer MUTEX_INIT(&nxgep->group_lock, NULL, 13576495Sspeer MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 13583859Sml29623 RW_INIT(&nxgep->filter_lock, NULL, 1359*6512Ssowmini RW_DRIVER, (void *)nxgep->interrupt_cookie); 13603859Sml29623 13613859Sml29623 classify_ptr = &nxgep->classifier; 13623859Sml29623 /* 13633859Sml29623 * FFLP Mutexes are never used in interrupt context 13643859Sml29623 * as fflp operation can take very long time to 13653859Sml29623 * complete and hence not suitable to invoke from interrupt 13663859Sml29623 * handlers. 13673859Sml29623 */ 13683859Sml29623 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 13694732Sdavemq NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 13704977Sraghus if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 13713859Sml29623 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 13724732Sdavemq NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 13733859Sml29623 for (partition = 0; partition < MAX_PARTITION; partition++) { 13743859Sml29623 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 13753859Sml29623 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 13763859Sml29623 } 13773859Sml29623 } 13783859Sml29623 13793859Sml29623 nxge_setup_mutexes_exit: 13803859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 13814732Sdavemq "<== nxge_setup_mutexes status = %x", status)); 13823859Sml29623 13833859Sml29623 if (ddi_status != DDI_SUCCESS) 13843859Sml29623 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 13853859Sml29623 13863859Sml29623 return (status); 13873859Sml29623 } 13883859Sml29623 13893859Sml29623 static void 13903859Sml29623 nxge_destroy_mutexes(p_nxge_t nxgep) 13913859Sml29623 { 13923859Sml29623 int partition; 13933859Sml29623 nxge_classify_t *classify_ptr; 13943859Sml29623 13953859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 13963859Sml29623 RW_DESTROY(&nxgep->filter_lock); 13976495Sspeer MUTEX_DESTROY(&nxgep->group_lock); 13983859Sml29623 MUTEX_DESTROY(&nxgep->mif_lock); 13993859Sml29623 MUTEX_DESTROY(&nxgep->ouraddr_lock); 14003859Sml29623 MUTEX_DESTROY(nxgep->genlock); 14013859Sml29623 14023859Sml29623 classify_ptr = &nxgep->classifier; 14033859Sml29623 MUTEX_DESTROY(&classify_ptr->tcam_lock); 14043859Sml29623 14054693Stm144005 /* Destroy all polling resources. */ 14064693Stm144005 MUTEX_DESTROY(&nxgep->poll_lock); 14074693Stm144005 cv_destroy(&nxgep->poll_cv); 14084693Stm144005 14094693Stm144005 /* free data structures, based on HW type */ 14104977Sraghus if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 14113859Sml29623 MUTEX_DESTROY(&classify_ptr->fcram_lock); 14123859Sml29623 for (partition = 0; partition < MAX_PARTITION; partition++) { 14133859Sml29623 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 14143859Sml29623 } 14153859Sml29623 } 14163859Sml29623 14173859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 14183859Sml29623 } 14193859Sml29623 14203859Sml29623 nxge_status_t 14213859Sml29623 nxge_init(p_nxge_t nxgep) 14223859Sml29623 { 14236495Sspeer nxge_status_t status = NXGE_OK; 14243859Sml29623 14253859Sml29623 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 14263859Sml29623 14273859Sml29623 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 14283859Sml29623 return (status); 14293859Sml29623 } 14303859Sml29623 14313859Sml29623 /* 14323859Sml29623 * Allocate system memory for the receive/transmit buffer blocks 14333859Sml29623 * and receive/transmit descriptor rings. 14343859Sml29623 */ 14353859Sml29623 status = nxge_alloc_mem_pool(nxgep); 14363859Sml29623 if (status != NXGE_OK) { 14373859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 14383859Sml29623 goto nxge_init_fail1; 14393859Sml29623 } 14403859Sml29623 14416495Sspeer if (!isLDOMguest(nxgep)) { 14426495Sspeer /* 14436495Sspeer * Initialize and enable the TXC registers. 14446495Sspeer * (Globally enable the Tx controller, 14456495Sspeer * enable the port, configure the dma channel bitmap, 14466495Sspeer * configure the max burst size). 14476495Sspeer */ 14486495Sspeer status = nxge_txc_init(nxgep); 14496495Sspeer if (status != NXGE_OK) { 14506495Sspeer NXGE_ERROR_MSG((nxgep, 14516495Sspeer NXGE_ERR_CTL, "init txc failed\n")); 14526495Sspeer goto nxge_init_fail2; 14536495Sspeer } 14543859Sml29623 } 14553859Sml29623 14563859Sml29623 /* 14573859Sml29623 * Initialize and enable TXDMA channels. 14583859Sml29623 */ 14593859Sml29623 status = nxge_init_txdma_channels(nxgep); 14603859Sml29623 if (status != NXGE_OK) { 14613859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 14623859Sml29623 goto nxge_init_fail3; 14633859Sml29623 } 14643859Sml29623 14653859Sml29623 /* 14663859Sml29623 * Initialize and enable RXDMA channels. 14673859Sml29623 */ 14683859Sml29623 status = nxge_init_rxdma_channels(nxgep); 14693859Sml29623 if (status != NXGE_OK) { 14703859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 14713859Sml29623 goto nxge_init_fail4; 14723859Sml29623 } 14733859Sml29623 14743859Sml29623 /* 14756495Sspeer * The guest domain is now done. 14766495Sspeer */ 14776495Sspeer if (isLDOMguest(nxgep)) { 14786495Sspeer nxgep->drv_state |= STATE_HW_INITIALIZED; 14796495Sspeer goto nxge_init_exit; 14806495Sspeer } 14816495Sspeer 14826495Sspeer /* 14833859Sml29623 * Initialize TCAM and FCRAM (Neptune). 14843859Sml29623 */ 14853859Sml29623 status = nxge_classify_init(nxgep); 14863859Sml29623 if (status != NXGE_OK) { 14873859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 14883859Sml29623 goto nxge_init_fail5; 14893859Sml29623 } 14903859Sml29623 14913859Sml29623 /* 14923859Sml29623 * Initialize ZCP 14933859Sml29623 */ 14943859Sml29623 status = nxge_zcp_init(nxgep); 14953859Sml29623 if (status != NXGE_OK) { 14963859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 14973859Sml29623 goto nxge_init_fail5; 14983859Sml29623 } 14993859Sml29623 15003859Sml29623 /* 15013859Sml29623 * Initialize IPP. 15023859Sml29623 */ 15033859Sml29623 status = nxge_ipp_init(nxgep); 15043859Sml29623 if (status != NXGE_OK) { 15053859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 15063859Sml29623 goto nxge_init_fail5; 15073859Sml29623 } 15083859Sml29623 15093859Sml29623 /* 15103859Sml29623 * Initialize the MAC block. 15113859Sml29623 */ 15123859Sml29623 status = nxge_mac_init(nxgep); 15133859Sml29623 if (status != NXGE_OK) { 15143859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 15153859Sml29623 goto nxge_init_fail5; 15163859Sml29623 } 15173859Sml29623 15186495Sspeer nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 15193859Sml29623 15203859Sml29623 /* 15213859Sml29623 * Enable hardware interrupts. 15223859Sml29623 */ 15233859Sml29623 nxge_intr_hw_enable(nxgep); 15243859Sml29623 nxgep->drv_state |= STATE_HW_INITIALIZED; 15253859Sml29623 15263859Sml29623 goto nxge_init_exit; 15273859Sml29623 15283859Sml29623 nxge_init_fail5: 15293859Sml29623 nxge_uninit_rxdma_channels(nxgep); 15303859Sml29623 nxge_init_fail4: 15313859Sml29623 nxge_uninit_txdma_channels(nxgep); 15323859Sml29623 nxge_init_fail3: 15336495Sspeer if (!isLDOMguest(nxgep)) { 15346495Sspeer (void) nxge_txc_uninit(nxgep); 15356495Sspeer } 15363859Sml29623 nxge_init_fail2: 15373859Sml29623 nxge_free_mem_pool(nxgep); 15383859Sml29623 nxge_init_fail1: 15393859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1540*6512Ssowmini "<== nxge_init status (failed) = 0x%08x", status)); 15413859Sml29623 return (status); 15423859Sml29623 15433859Sml29623 nxge_init_exit: 15443859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1545*6512Ssowmini status)); 15463859Sml29623 return (status); 15473859Sml29623 } 15483859Sml29623 15493859Sml29623 15503859Sml29623 timeout_id_t 15513859Sml29623 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 15523859Sml29623 { 1553*6512Ssowmini if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 15543859Sml29623 return (timeout(func, (caddr_t)nxgep, 1555*6512Ssowmini drv_usectohz(1000 * msec))); 15563859Sml29623 } 15573859Sml29623 return (NULL); 15583859Sml29623 } 15593859Sml29623 15603859Sml29623 /*ARGSUSED*/ 15613859Sml29623 void 15623859Sml29623 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 15633859Sml29623 { 15643859Sml29623 if (timerid) { 15653859Sml29623 (void) untimeout(timerid); 15663859Sml29623 } 15673859Sml29623 } 15683859Sml29623 15693859Sml29623 void 15703859Sml29623 nxge_uninit(p_nxge_t nxgep) 15713859Sml29623 { 15723859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 15733859Sml29623 15743859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 15753859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1576*6512Ssowmini "==> nxge_uninit: not initialized")); 15773859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1578*6512Ssowmini "<== nxge_uninit")); 15793859Sml29623 return; 15803859Sml29623 } 15813859Sml29623 15823859Sml29623 /* stop timer */ 15833859Sml29623 if (nxgep->nxge_timerid) { 15843859Sml29623 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 15853859Sml29623 nxgep->nxge_timerid = 0; 15863859Sml29623 } 15873859Sml29623 15883859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 15893859Sml29623 (void) nxge_intr_hw_disable(nxgep); 15903859Sml29623 15913859Sml29623 /* 15923859Sml29623 * Reset the receive MAC side. 15933859Sml29623 */ 15943859Sml29623 (void) nxge_rx_mac_disable(nxgep); 15953859Sml29623 15963859Sml29623 /* Disable and soft reset the IPP */ 15976495Sspeer if (!isLDOMguest(nxgep)) 15986495Sspeer (void) nxge_ipp_disable(nxgep); 15993859Sml29623 16003859Sml29623 /* Free classification resources */ 16013859Sml29623 (void) nxge_classify_uninit(nxgep); 16023859Sml29623 16033859Sml29623 /* 16043859Sml29623 * Reset the transmit/receive DMA side. 16053859Sml29623 */ 16063859Sml29623 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 16073859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 16083859Sml29623 16093859Sml29623 nxge_uninit_txdma_channels(nxgep); 16103859Sml29623 nxge_uninit_rxdma_channels(nxgep); 16113859Sml29623 16123859Sml29623 /* 16133859Sml29623 * Reset the transmit MAC side. 16143859Sml29623 */ 16153859Sml29623 (void) nxge_tx_mac_disable(nxgep); 16163859Sml29623 16173859Sml29623 nxge_free_mem_pool(nxgep); 16183859Sml29623 16193859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 16203859Sml29623 16213859Sml29623 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 16223859Sml29623 16233859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1624*6512Ssowmini "nxge_mblks_pending %d", nxge_mblks_pending)); 16253859Sml29623 } 16263859Sml29623 16273859Sml29623 void 16283859Sml29623 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 16293859Sml29623 { 16305125Sjoycey #if defined(__i386) 16315125Sjoycey size_t reg; 16325125Sjoycey #else 16333859Sml29623 uint64_t reg; 16345125Sjoycey #endif 16353859Sml29623 uint64_t regdata; 16363859Sml29623 int i, retry; 16373859Sml29623 16383859Sml29623 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 16393859Sml29623 regdata = 0; 16403859Sml29623 retry = 1; 16413859Sml29623 16423859Sml29623 for (i = 0; i < retry; i++) { 16433859Sml29623 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 16443859Sml29623 } 16453859Sml29623 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 16463859Sml29623 } 16473859Sml29623 16483859Sml29623 void 16493859Sml29623 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 16503859Sml29623 { 16515125Sjoycey #if defined(__i386) 16525125Sjoycey size_t reg; 16535125Sjoycey #else 16543859Sml29623 uint64_t reg; 16555125Sjoycey #endif 16563859Sml29623 uint64_t buf[2]; 16573859Sml29623 16583859Sml29623 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 16595133Sjoycey #if defined(__i386) 16605133Sjoycey reg = (size_t)buf[0]; 16615133Sjoycey #else 16623859Sml29623 reg = buf[0]; 16635133Sjoycey #endif 16643859Sml29623 16653859Sml29623 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 16663859Sml29623 } 16673859Sml29623 16683859Sml29623 16693859Sml29623 nxge_os_mutex_t nxgedebuglock; 16703859Sml29623 int nxge_debug_init = 0; 16713859Sml29623 16723859Sml29623 /*ARGSUSED*/ 16733859Sml29623 /*VARARGS*/ 16743859Sml29623 void 16753859Sml29623 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 16763859Sml29623 { 16773859Sml29623 char msg_buffer[1048]; 16783859Sml29623 char prefix_buffer[32]; 16793859Sml29623 int instance; 16803859Sml29623 uint64_t debug_level; 16813859Sml29623 int cmn_level = CE_CONT; 16823859Sml29623 va_list ap; 16833859Sml29623 16846495Sspeer if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 16856495Sspeer /* In case a developer has changed nxge_debug_level. */ 16866495Sspeer if (nxgep->nxge_debug_level != nxge_debug_level) 16876495Sspeer nxgep->nxge_debug_level = nxge_debug_level; 16886495Sspeer } 16896495Sspeer 16903859Sml29623 debug_level = (nxgep == NULL) ? nxge_debug_level : 1691*6512Ssowmini nxgep->nxge_debug_level; 16923859Sml29623 16933859Sml29623 if ((level & debug_level) || 1694*6512Ssowmini (level == NXGE_NOTE) || 1695*6512Ssowmini (level == NXGE_ERR_CTL)) { 16963859Sml29623 /* do the msg processing */ 16973859Sml29623 if (nxge_debug_init == 0) { 16983859Sml29623 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 16993859Sml29623 nxge_debug_init = 1; 17003859Sml29623 } 17013859Sml29623 17023859Sml29623 MUTEX_ENTER(&nxgedebuglock); 17033859Sml29623 17043859Sml29623 if ((level & NXGE_NOTE)) { 17053859Sml29623 cmn_level = CE_NOTE; 17063859Sml29623 } 17073859Sml29623 17083859Sml29623 if (level & NXGE_ERR_CTL) { 17093859Sml29623 cmn_level = CE_WARN; 17103859Sml29623 } 17113859Sml29623 17123859Sml29623 va_start(ap, fmt); 17133859Sml29623 (void) vsprintf(msg_buffer, fmt, ap); 17143859Sml29623 va_end(ap); 17153859Sml29623 if (nxgep == NULL) { 17163859Sml29623 instance = -1; 17173859Sml29623 (void) sprintf(prefix_buffer, "%s :", "nxge"); 17183859Sml29623 } else { 17193859Sml29623 instance = nxgep->instance; 17203859Sml29623 (void) sprintf(prefix_buffer, 1721*6512Ssowmini "%s%d :", "nxge", instance); 17223859Sml29623 } 17233859Sml29623 17243859Sml29623 MUTEX_EXIT(&nxgedebuglock); 17253859Sml29623 cmn_err(cmn_level, "!%s %s\n", 1726*6512Ssowmini prefix_buffer, msg_buffer); 17273859Sml29623 17283859Sml29623 } 17293859Sml29623 } 17303859Sml29623 17313859Sml29623 char * 17323859Sml29623 nxge_dump_packet(char *addr, int size) 17333859Sml29623 { 17343859Sml29623 uchar_t *ap = (uchar_t *)addr; 17353859Sml29623 int i; 17363859Sml29623 static char etherbuf[1024]; 17373859Sml29623 char *cp = etherbuf; 17383859Sml29623 char digits[] = "0123456789abcdef"; 17393859Sml29623 17403859Sml29623 if (!size) 17413859Sml29623 size = 60; 17423859Sml29623 17433859Sml29623 if (size > MAX_DUMP_SZ) { 17443859Sml29623 /* Dump the leading bytes */ 17453859Sml29623 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 17463859Sml29623 if (*ap > 0x0f) 17473859Sml29623 *cp++ = digits[*ap >> 4]; 17483859Sml29623 *cp++ = digits[*ap++ & 0xf]; 17493859Sml29623 *cp++ = ':'; 17503859Sml29623 } 17513859Sml29623 for (i = 0; i < 20; i++) 17523859Sml29623 *cp++ = '.'; 17533859Sml29623 /* Dump the last MAX_DUMP_SZ/2 bytes */ 17543859Sml29623 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 17553859Sml29623 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 17563859Sml29623 if (*ap > 0x0f) 17573859Sml29623 *cp++ = digits[*ap >> 4]; 17583859Sml29623 *cp++ = digits[*ap++ & 0xf]; 17593859Sml29623 *cp++ = ':'; 17603859Sml29623 } 17613859Sml29623 } else { 17623859Sml29623 for (i = 0; i < size; i++) { 17633859Sml29623 if (*ap > 0x0f) 17643859Sml29623 *cp++ = digits[*ap >> 4]; 17653859Sml29623 *cp++ = digits[*ap++ & 0xf]; 17663859Sml29623 *cp++ = ':'; 17673859Sml29623 } 17683859Sml29623 } 17693859Sml29623 *--cp = 0; 17703859Sml29623 return (etherbuf); 17713859Sml29623 } 17723859Sml29623 17733859Sml29623 #ifdef NXGE_DEBUG 17743859Sml29623 static void 17753859Sml29623 nxge_test_map_regs(p_nxge_t nxgep) 17763859Sml29623 { 17773859Sml29623 ddi_acc_handle_t cfg_handle; 17783859Sml29623 p_pci_cfg_t cfg_ptr; 17793859Sml29623 ddi_acc_handle_t dev_handle; 17803859Sml29623 char *dev_ptr; 17813859Sml29623 ddi_acc_handle_t pci_config_handle; 17823859Sml29623 uint32_t regval; 17833859Sml29623 int i; 17843859Sml29623 17853859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 17863859Sml29623 17873859Sml29623 dev_handle = nxgep->dev_regs->nxge_regh; 17883859Sml29623 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 17893859Sml29623 17904977Sraghus if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 17913859Sml29623 cfg_handle = nxgep->dev_regs->nxge_pciregh; 17923859Sml29623 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 17933859Sml29623 17943859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 17954732Sdavemq "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 17963859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 17974732Sdavemq "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 17984732Sdavemq &cfg_ptr->vendorid)); 17993859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 18004732Sdavemq "\tvendorid 0x%x devid 0x%x", 18014732Sdavemq NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 18024732Sdavemq NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 18033859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 18044732Sdavemq "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 18054732Sdavemq "bar1c 0x%x", 18064732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 18074732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 18084732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 18094732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 18103859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 18114732Sdavemq "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 18124732Sdavemq "base 28 0x%x bar2c 0x%x\n", 18134732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 18144732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 18154732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 18164732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 18173859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 18184732Sdavemq "\nNeptune PCI BAR: base30 0x%x\n", 18194732Sdavemq NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 18203859Sml29623 18213859Sml29623 cfg_handle = nxgep->dev_regs->nxge_pciregh; 18223859Sml29623 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 18233859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 18244732Sdavemq "first 0x%llx second 0x%llx third 0x%llx " 18254732Sdavemq "last 0x%llx ", 18264732Sdavemq NXGE_PIO_READ64(dev_handle, 18274732Sdavemq (uint64_t *)(dev_ptr + 0), 0), 18284732Sdavemq NXGE_PIO_READ64(dev_handle, 18294732Sdavemq (uint64_t *)(dev_ptr + 8), 0), 18304732Sdavemq NXGE_PIO_READ64(dev_handle, 18314732Sdavemq (uint64_t *)(dev_ptr + 16), 0), 18324732Sdavemq NXGE_PIO_READ64(cfg_handle, 18334732Sdavemq (uint64_t *)(dev_ptr + 24), 0))); 18343859Sml29623 } 18353859Sml29623 } 18363859Sml29623 18373859Sml29623 #endif 18383859Sml29623 18393859Sml29623 static void 18403859Sml29623 nxge_suspend(p_nxge_t nxgep) 18413859Sml29623 { 18423859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 18433859Sml29623 18443859Sml29623 nxge_intrs_disable(nxgep); 18453859Sml29623 nxge_destroy_dev(nxgep); 18463859Sml29623 18473859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 18483859Sml29623 } 18493859Sml29623 18503859Sml29623 static nxge_status_t 18513859Sml29623 nxge_resume(p_nxge_t nxgep) 18523859Sml29623 { 18533859Sml29623 nxge_status_t status = NXGE_OK; 18543859Sml29623 18553859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 18564587Sjoycey 18573859Sml29623 nxgep->suspended = DDI_RESUME; 18584587Sjoycey (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 18594587Sjoycey (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 18604587Sjoycey (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 18614587Sjoycey (void) nxge_rx_mac_enable(nxgep); 18624587Sjoycey (void) nxge_tx_mac_enable(nxgep); 18634587Sjoycey nxge_intrs_enable(nxgep); 18643859Sml29623 nxgep->suspended = 0; 18653859Sml29623 18663859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1867*6512Ssowmini "<== nxge_resume status = 0x%x", status)); 18683859Sml29623 return (status); 18693859Sml29623 } 18703859Sml29623 18713859Sml29623 static nxge_status_t 18723859Sml29623 nxge_setup_dev(p_nxge_t nxgep) 18733859Sml29623 { 18743859Sml29623 nxge_status_t status = NXGE_OK; 18753859Sml29623 18763859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 18774732Sdavemq nxgep->mac.portnum)); 18783859Sml29623 18793859Sml29623 status = nxge_link_init(nxgep); 18803859Sml29623 18813859Sml29623 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 18823859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1883*6512Ssowmini "port%d Bad register acc handle", nxgep->mac.portnum)); 18843859Sml29623 status = NXGE_ERROR; 18853859Sml29623 } 18863859Sml29623 18873859Sml29623 if (status != NXGE_OK) { 18883859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1889*6512Ssowmini " nxge_setup_dev status " 1890*6512Ssowmini "(xcvr init 0x%08x)", status)); 18913859Sml29623 goto nxge_setup_dev_exit; 18923859Sml29623 } 18933859Sml29623 18943859Sml29623 nxge_setup_dev_exit: 18953859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1896*6512Ssowmini "<== nxge_setup_dev port %d status = 0x%08x", 1897*6512Ssowmini nxgep->mac.portnum, status)); 18983859Sml29623 18993859Sml29623 return (status); 19003859Sml29623 } 19013859Sml29623 19023859Sml29623 static void 19033859Sml29623 nxge_destroy_dev(p_nxge_t nxgep) 19043859Sml29623 { 19053859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 19063859Sml29623 19073859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 19083859Sml29623 19093859Sml29623 (void) nxge_hw_stop(nxgep); 19103859Sml29623 19113859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 19123859Sml29623 } 19133859Sml29623 19143859Sml29623 static nxge_status_t 19153859Sml29623 nxge_setup_system_dma_pages(p_nxge_t nxgep) 19163859Sml29623 { 19173859Sml29623 int ddi_status = DDI_SUCCESS; 19183859Sml29623 uint_t count; 19193859Sml29623 ddi_dma_cookie_t cookie; 19203859Sml29623 uint_t iommu_pagesize; 19213859Sml29623 nxge_status_t status = NXGE_OK; 19223859Sml29623 19236495Sspeer NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 19243859Sml29623 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 19253859Sml29623 if (nxgep->niu_type != N2_NIU) { 19263859Sml29623 iommu_pagesize = dvma_pagesize(nxgep->dip); 19273859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1928*6512Ssowmini " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1929*6512Ssowmini " default_block_size %d iommu_pagesize %d", 1930*6512Ssowmini nxgep->sys_page_sz, 1931*6512Ssowmini ddi_ptob(nxgep->dip, (ulong_t)1), 1932*6512Ssowmini nxgep->rx_default_block_size, 1933*6512Ssowmini iommu_pagesize)); 19343859Sml29623 19353859Sml29623 if (iommu_pagesize != 0) { 19363859Sml29623 if (nxgep->sys_page_sz == iommu_pagesize) { 19373859Sml29623 if (iommu_pagesize > 0x4000) 19383859Sml29623 nxgep->sys_page_sz = 0x4000; 19393859Sml29623 } else { 19403859Sml29623 if (nxgep->sys_page_sz > iommu_pagesize) 19413859Sml29623 nxgep->sys_page_sz = iommu_pagesize; 19423859Sml29623 } 19433859Sml29623 } 19443859Sml29623 } 19453859Sml29623 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 19463859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1947*6512Ssowmini "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1948*6512Ssowmini "default_block_size %d page mask %d", 1949*6512Ssowmini nxgep->sys_page_sz, 1950*6512Ssowmini ddi_ptob(nxgep->dip, (ulong_t)1), 1951*6512Ssowmini nxgep->rx_default_block_size, 1952*6512Ssowmini nxgep->sys_page_mask)); 19533859Sml29623 19543859Sml29623 19553859Sml29623 switch (nxgep->sys_page_sz) { 19563859Sml29623 default: 19573859Sml29623 nxgep->sys_page_sz = 0x1000; 19583859Sml29623 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 19593859Sml29623 nxgep->rx_default_block_size = 0x1000; 19603859Sml29623 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 19613859Sml29623 break; 19623859Sml29623 case 0x1000: 19633859Sml29623 nxgep->rx_default_block_size = 0x1000; 19643859Sml29623 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 19653859Sml29623 break; 19663859Sml29623 case 0x2000: 19673859Sml29623 nxgep->rx_default_block_size = 0x2000; 19683859Sml29623 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 19693859Sml29623 break; 19703859Sml29623 case 0x4000: 19713859Sml29623 nxgep->rx_default_block_size = 0x4000; 19723859Sml29623 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 19733859Sml29623 break; 19743859Sml29623 case 0x8000: 19753859Sml29623 nxgep->rx_default_block_size = 0x8000; 19763859Sml29623 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 19773859Sml29623 break; 19783859Sml29623 } 19793859Sml29623 19803859Sml29623 #ifndef USE_RX_BIG_BUF 19813859Sml29623 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 19823859Sml29623 #else 19833859Sml29623 nxgep->rx_default_block_size = 0x2000; 19843859Sml29623 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 19853859Sml29623 #endif 19863859Sml29623 /* 19873859Sml29623 * Get the system DMA burst size. 19883859Sml29623 */ 19893859Sml29623 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1990*6512Ssowmini DDI_DMA_DONTWAIT, 0, 1991*6512Ssowmini &nxgep->dmasparehandle); 19923859Sml29623 if (ddi_status != DDI_SUCCESS) { 19933859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1994*6512Ssowmini "ddi_dma_alloc_handle: failed " 1995*6512Ssowmini " status 0x%x", ddi_status)); 19963859Sml29623 goto nxge_get_soft_properties_exit; 19973859Sml29623 } 19983859Sml29623 19993859Sml29623 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2000*6512Ssowmini (caddr_t)nxgep->dmasparehandle, 2001*6512Ssowmini sizeof (nxgep->dmasparehandle), 2002*6512Ssowmini DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2003*6512Ssowmini DDI_DMA_DONTWAIT, 0, 2004*6512Ssowmini &cookie, &count); 20053859Sml29623 if (ddi_status != DDI_DMA_MAPPED) { 20063859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2007*6512Ssowmini "Binding spare handle to find system" 2008*6512Ssowmini " burstsize failed.")); 20093859Sml29623 ddi_status = DDI_FAILURE; 20103859Sml29623 goto nxge_get_soft_properties_fail1; 20113859Sml29623 } 20123859Sml29623 20133859Sml29623 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 20143859Sml29623 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 20153859Sml29623 20163859Sml29623 nxge_get_soft_properties_fail1: 20173859Sml29623 ddi_dma_free_handle(&nxgep->dmasparehandle); 20183859Sml29623 20193859Sml29623 nxge_get_soft_properties_exit: 20203859Sml29623 20213859Sml29623 if (ddi_status != DDI_SUCCESS) 20223859Sml29623 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 20233859Sml29623 20243859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2025*6512Ssowmini "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 20263859Sml29623 return (status); 20273859Sml29623 } 20283859Sml29623 20293859Sml29623 static nxge_status_t 20303859Sml29623 nxge_alloc_mem_pool(p_nxge_t nxgep) 20313859Sml29623 { 20323859Sml29623 nxge_status_t status = NXGE_OK; 20333859Sml29623 20343859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 20353859Sml29623 20363859Sml29623 status = nxge_alloc_rx_mem_pool(nxgep); 20373859Sml29623 if (status != NXGE_OK) { 20383859Sml29623 return (NXGE_ERROR); 20393859Sml29623 } 20403859Sml29623 20413859Sml29623 status = nxge_alloc_tx_mem_pool(nxgep); 20423859Sml29623 if (status != NXGE_OK) { 20433859Sml29623 nxge_free_rx_mem_pool(nxgep); 20443859Sml29623 return (NXGE_ERROR); 20453859Sml29623 } 20463859Sml29623 20473859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 20483859Sml29623 return (NXGE_OK); 20493859Sml29623 } 20503859Sml29623 20513859Sml29623 static void 20523859Sml29623 nxge_free_mem_pool(p_nxge_t nxgep) 20533859Sml29623 { 20543859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 20553859Sml29623 20563859Sml29623 nxge_free_rx_mem_pool(nxgep); 20573859Sml29623 nxge_free_tx_mem_pool(nxgep); 20583859Sml29623 20593859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 20603859Sml29623 } 20613859Sml29623 20626495Sspeer nxge_status_t 20633859Sml29623 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 20643859Sml29623 { 20656495Sspeer uint32_t rdc_max; 20663859Sml29623 p_nxge_dma_pt_cfg_t p_all_cfgp; 20673859Sml29623 p_nxge_hw_pt_cfg_t p_cfgp; 20683859Sml29623 p_nxge_dma_pool_t dma_poolp; 20693859Sml29623 p_nxge_dma_common_t *dma_buf_p; 20703859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp; 20713859Sml29623 p_nxge_dma_common_t *dma_cntl_p; 20723859Sml29623 uint32_t *num_chunks; /* per dma */ 20733859Sml29623 nxge_status_t status = NXGE_OK; 20743859Sml29623 20753859Sml29623 uint32_t nxge_port_rbr_size; 20763859Sml29623 uint32_t nxge_port_rbr_spare_size; 20773859Sml29623 uint32_t nxge_port_rcr_size; 20786495Sspeer uint32_t rx_cntl_alloc_size; 20793859Sml29623 20803859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 20813859Sml29623 20823859Sml29623 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 20833859Sml29623 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 20846495Sspeer rdc_max = NXGE_MAX_RDCS; 20853859Sml29623 20863859Sml29623 /* 20876495Sspeer * Allocate memory for the common DMA data structures. 20883859Sml29623 */ 20893859Sml29623 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2090*6512Ssowmini KM_SLEEP); 20913859Sml29623 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2092*6512Ssowmini sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 20933859Sml29623 20943859Sml29623 dma_cntl_poolp = (p_nxge_dma_pool_t) 2095*6512Ssowmini KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 20963859Sml29623 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2097*6512Ssowmini sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 20983859Sml29623 20993859Sml29623 num_chunks = (uint32_t *)KMEM_ZALLOC( 2100*6512Ssowmini sizeof (uint32_t) * rdc_max, KM_SLEEP); 21013859Sml29623 21023859Sml29623 /* 21036495Sspeer * Assume that each DMA channel will be configured with 21046495Sspeer * the default block size. 21056495Sspeer * rbr block counts are modulo the batch count (16). 21063859Sml29623 */ 21073859Sml29623 nxge_port_rbr_size = p_all_cfgp->rbr_size; 21083859Sml29623 nxge_port_rcr_size = p_all_cfgp->rcr_size; 21093859Sml29623 21103859Sml29623 if (!nxge_port_rbr_size) { 21113859Sml29623 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 21123859Sml29623 } 21133859Sml29623 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 21143859Sml29623 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2115*6512Ssowmini (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 21163859Sml29623 } 21173859Sml29623 21183859Sml29623 p_all_cfgp->rbr_size = nxge_port_rbr_size; 21193859Sml29623 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 21203859Sml29623 21213859Sml29623 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 21223859Sml29623 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2123*6512Ssowmini (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 21243859Sml29623 } 21255770Sml29623 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 21265770Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 21275770Sml29623 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 21285770Sml29623 "set to default %d", 21295770Sml29623 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 21305770Sml29623 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 21315770Sml29623 } 21325770Sml29623 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 21335770Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 21345770Sml29623 "nxge_alloc_rx_mem_pool: RCR too high %d, " 21355770Sml29623 "set to default %d", 21365770Sml29623 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 21375770Sml29623 nxge_port_rcr_size = RCR_DEFAULT_MAX; 21385770Sml29623 } 21393859Sml29623 21403859Sml29623 /* 21413859Sml29623 * N2/NIU has limitation on the descriptor sizes (contiguous 21423859Sml29623 * memory allocation on data buffers to 4M (contig_mem_alloc) 21433859Sml29623 * and little endian for control buffers (must use the ddi/dki mem alloc 21443859Sml29623 * function). 21453859Sml29623 */ 21463859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 21473859Sml29623 if (nxgep->niu_type == N2_NIU) { 21483859Sml29623 nxge_port_rbr_spare_size = 0; 21493859Sml29623 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2150*6512Ssowmini (!ISP2(nxge_port_rbr_size))) { 21513859Sml29623 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 21523859Sml29623 } 21533859Sml29623 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2154*6512Ssowmini (!ISP2(nxge_port_rcr_size))) { 21553859Sml29623 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 21563859Sml29623 } 21573859Sml29623 } 21583859Sml29623 #endif 21593859Sml29623 21603859Sml29623 /* 21613859Sml29623 * Addresses of receive block ring, receive completion ring and the 21623859Sml29623 * mailbox must be all cache-aligned (64 bytes). 21633859Sml29623 */ 21643859Sml29623 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 21653859Sml29623 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 21663859Sml29623 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 21673859Sml29623 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 21683859Sml29623 21693859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2170*6512Ssowmini "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2171*6512Ssowmini "nxge_port_rcr_size = %d " 2172*6512Ssowmini "rx_cntl_alloc_size = %d", 2173*6512Ssowmini nxge_port_rbr_size, nxge_port_rbr_spare_size, 2174*6512Ssowmini nxge_port_rcr_size, 2175*6512Ssowmini rx_cntl_alloc_size)); 21763859Sml29623 21773859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 21783859Sml29623 if (nxgep->niu_type == N2_NIU) { 21796495Sspeer uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 21806495Sspeer (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 21816495Sspeer 21823859Sml29623 if (!ISP2(rx_buf_alloc_size)) { 21833859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2184*6512Ssowmini "==> nxge_alloc_rx_mem_pool: " 2185*6512Ssowmini " must be power of 2")); 21863859Sml29623 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 21873859Sml29623 goto nxge_alloc_rx_mem_pool_exit; 21883859Sml29623 } 21893859Sml29623 21903859Sml29623 if (rx_buf_alloc_size > (1 << 22)) { 21913859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2192*6512Ssowmini "==> nxge_alloc_rx_mem_pool: " 2193*6512Ssowmini " limit size to 4M")); 21943859Sml29623 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 21953859Sml29623 goto nxge_alloc_rx_mem_pool_exit; 21963859Sml29623 } 21973859Sml29623 21983859Sml29623 if (rx_cntl_alloc_size < 0x2000) { 21993859Sml29623 rx_cntl_alloc_size = 0x2000; 22003859Sml29623 } 22013859Sml29623 } 22023859Sml29623 #endif 22033859Sml29623 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 22043859Sml29623 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 22056495Sspeer nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 22066495Sspeer nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 22076495Sspeer 22086495Sspeer dma_poolp->ndmas = p_cfgp->max_rdcs; 22093859Sml29623 dma_poolp->num_chunks = num_chunks; 22103859Sml29623 dma_poolp->buf_allocated = B_TRUE; 22113859Sml29623 nxgep->rx_buf_pool_p = dma_poolp; 22123859Sml29623 dma_poolp->dma_buf_pool_p = dma_buf_p; 22133859Sml29623 22146495Sspeer dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 22153859Sml29623 dma_cntl_poolp->buf_allocated = B_TRUE; 22163859Sml29623 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 22173859Sml29623 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 22183859Sml29623 22196495Sspeer /* Allocate the receive rings, too. */ 22206495Sspeer nxgep->rx_rbr_rings = 2221*6512Ssowmini KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 22226495Sspeer nxgep->rx_rbr_rings->rbr_rings = 2223*6512Ssowmini KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 22246495Sspeer nxgep->rx_rcr_rings = 2225*6512Ssowmini KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 22266495Sspeer nxgep->rx_rcr_rings->rcr_rings = 2227*6512Ssowmini KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 22286495Sspeer nxgep->rx_mbox_areas_p = 2229*6512Ssowmini KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 22306495Sspeer nxgep->rx_mbox_areas_p->rxmbox_areas = 2231*6512Ssowmini KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 22326495Sspeer 22336495Sspeer nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 22346495Sspeer p_cfgp->max_rdcs; 22356495Sspeer 22363859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2237*6512Ssowmini "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 22383859Sml29623 22393859Sml29623 nxge_alloc_rx_mem_pool_exit: 22406495Sspeer return (status); 22416495Sspeer } 22426495Sspeer 22436495Sspeer /* 22446495Sspeer * nxge_alloc_rxb 22456495Sspeer * 22466495Sspeer * Allocate buffers for an RDC. 22476495Sspeer * 22486495Sspeer * Arguments: 22496495Sspeer * nxgep 22506495Sspeer * channel The channel to map into our kernel space. 22516495Sspeer * 22526495Sspeer * Notes: 22536495Sspeer * 22546495Sspeer * NPI function calls: 22556495Sspeer * 22566495Sspeer * NXGE function calls: 22576495Sspeer * 22586495Sspeer * Registers accessed: 22596495Sspeer * 22606495Sspeer * Context: 22616495Sspeer * 22626495Sspeer * Taking apart: 22636495Sspeer * 22646495Sspeer * Open questions: 22656495Sspeer * 22666495Sspeer */ 22676495Sspeer nxge_status_t 22686495Sspeer nxge_alloc_rxb( 22696495Sspeer p_nxge_t nxgep, 22706495Sspeer int channel) 22716495Sspeer { 22726495Sspeer size_t rx_buf_alloc_size; 22736495Sspeer nxge_status_t status = NXGE_OK; 22746495Sspeer 22756495Sspeer nxge_dma_common_t **data; 22766495Sspeer nxge_dma_common_t **control; 22776495Sspeer uint32_t *num_chunks; 22786495Sspeer 22796495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 22806495Sspeer 22816495Sspeer /* 22826495Sspeer * Allocate memory for the receive buffers and descriptor rings. 22836495Sspeer * Replace these allocation functions with the interface functions 22846495Sspeer * provided by the partition manager if/when they are available. 22856495Sspeer */ 22866495Sspeer 22876495Sspeer /* 22886495Sspeer * Allocate memory for the receive buffer blocks. 22896495Sspeer */ 22906495Sspeer rx_buf_alloc_size = (nxgep->rx_default_block_size * 2291*6512Ssowmini (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 22926495Sspeer 22936495Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 22946495Sspeer num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 22956495Sspeer 22966495Sspeer if ((status = nxge_alloc_rx_buf_dma( 22976495Sspeer nxgep, channel, data, rx_buf_alloc_size, 22986495Sspeer nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 22996495Sspeer return (status); 23006495Sspeer } 23016495Sspeer 23026495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 23036495Sspeer "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 23046495Sspeer 23056495Sspeer /* 23066495Sspeer * Allocate memory for descriptor rings and mailbox. 23076495Sspeer */ 23086495Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 23096495Sspeer 23106495Sspeer if ((status = nxge_alloc_rx_cntl_dma( 23116495Sspeer nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 23126495Sspeer != NXGE_OK) { 23136495Sspeer nxge_free_rx_cntl_dma(nxgep, *control); 23146495Sspeer (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 23156495Sspeer nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 23166495Sspeer return (status); 23176495Sspeer } 23186495Sspeer 23193859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 23206495Sspeer "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 23213859Sml29623 23223859Sml29623 return (status); 23233859Sml29623 } 23243859Sml29623 23256495Sspeer void 23266495Sspeer nxge_free_rxb( 23276495Sspeer p_nxge_t nxgep, 23286495Sspeer int channel) 23296495Sspeer { 23306495Sspeer nxge_dma_common_t *data; 23316495Sspeer nxge_dma_common_t *control; 23326495Sspeer uint32_t num_chunks; 23336495Sspeer 23346495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 23356495Sspeer 23366495Sspeer data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 23376495Sspeer num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 23386495Sspeer nxge_free_rx_buf_dma(nxgep, data, num_chunks); 23396495Sspeer 23406495Sspeer nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 23416495Sspeer nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 23426495Sspeer 23436495Sspeer control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 23446495Sspeer nxge_free_rx_cntl_dma(nxgep, control); 23456495Sspeer 23466495Sspeer nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 23476495Sspeer 23486495Sspeer KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 23496495Sspeer KMEM_FREE(control, sizeof (nxge_dma_common_t)); 23506495Sspeer 23516495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 23526495Sspeer } 23536495Sspeer 23543859Sml29623 static void 23553859Sml29623 nxge_free_rx_mem_pool(p_nxge_t nxgep) 23563859Sml29623 { 23576495Sspeer int rdc_max = NXGE_MAX_RDCS; 23583859Sml29623 23593859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 23603859Sml29623 23616495Sspeer if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 23623859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2363*6512Ssowmini "<== nxge_free_rx_mem_pool " 2364*6512Ssowmini "(null rx buf pool or buf not allocated")); 23653859Sml29623 return; 23663859Sml29623 } 23676495Sspeer if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 23683859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2369*6512Ssowmini "<== nxge_free_rx_mem_pool " 2370*6512Ssowmini "(null rx cntl buf pool or cntl buf not allocated")); 23713859Sml29623 return; 23723859Sml29623 } 23733859Sml29623 23746495Sspeer KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 23756495Sspeer sizeof (p_nxge_dma_common_t) * rdc_max); 23766495Sspeer KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 23776495Sspeer 23786495Sspeer KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 23796495Sspeer sizeof (uint32_t) * rdc_max); 23806495Sspeer KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 23816495Sspeer sizeof (p_nxge_dma_common_t) * rdc_max); 23826495Sspeer KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 23836495Sspeer 23846495Sspeer nxgep->rx_buf_pool_p = 0; 23856495Sspeer nxgep->rx_cntl_pool_p = 0; 23866495Sspeer 23876495Sspeer KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 23886495Sspeer sizeof (p_rx_rbr_ring_t) * rdc_max); 23896495Sspeer KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 23906495Sspeer KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 23916495Sspeer sizeof (p_rx_rcr_ring_t) * rdc_max); 23926495Sspeer KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 23936495Sspeer KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 23946495Sspeer sizeof (p_rx_mbox_t) * rdc_max); 23956495Sspeer KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 23966495Sspeer 23976495Sspeer nxgep->rx_rbr_rings = 0; 23986495Sspeer nxgep->rx_rcr_rings = 0; 23996495Sspeer nxgep->rx_mbox_areas_p = 0; 24003859Sml29623 24013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 24023859Sml29623 } 24033859Sml29623 24043859Sml29623 24053859Sml29623 static nxge_status_t 24063859Sml29623 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 24073859Sml29623 p_nxge_dma_common_t *dmap, 24083859Sml29623 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 24093859Sml29623 { 24103859Sml29623 p_nxge_dma_common_t rx_dmap; 24113859Sml29623 nxge_status_t status = NXGE_OK; 24123859Sml29623 size_t total_alloc_size; 24133859Sml29623 size_t allocated = 0; 24143859Sml29623 int i, size_index, array_size; 24156495Sspeer boolean_t use_kmem_alloc = B_FALSE; 24163859Sml29623 24173859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 24183859Sml29623 24193859Sml29623 rx_dmap = (p_nxge_dma_common_t) 2420*6512Ssowmini KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2421*6512Ssowmini KM_SLEEP); 24223859Sml29623 24233859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2424*6512Ssowmini " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2425*6512Ssowmini dma_channel, alloc_size, block_size, dmap)); 24263859Sml29623 24273859Sml29623 total_alloc_size = alloc_size; 24283859Sml29623 24293859Sml29623 #if defined(RX_USE_RECLAIM_POST) 24303859Sml29623 total_alloc_size = alloc_size + alloc_size/4; 24313859Sml29623 #endif 24323859Sml29623 24333859Sml29623 i = 0; 24343859Sml29623 size_index = 0; 24353859Sml29623 array_size = sizeof (alloc_sizes)/sizeof (size_t); 24363859Sml29623 while ((alloc_sizes[size_index] < alloc_size) && 2437*6512Ssowmini (size_index < array_size)) 2438*6512Ssowmini size_index++; 24393859Sml29623 if (size_index >= array_size) { 24403859Sml29623 size_index = array_size - 1; 24413859Sml29623 } 24423859Sml29623 24436495Sspeer /* For Neptune, use kmem_alloc if the kmem flag is set. */ 24446495Sspeer if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 24456495Sspeer use_kmem_alloc = B_TRUE; 24466495Sspeer #if defined(__i386) || defined(__amd64) 24476495Sspeer size_index = 0; 24486495Sspeer #endif 24496495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 24506495Sspeer "==> nxge_alloc_rx_buf_dma: " 24516495Sspeer "Neptune use kmem_alloc() - size_index %d", 24526495Sspeer size_index)); 24536495Sspeer } 24546495Sspeer 24553859Sml29623 while ((allocated < total_alloc_size) && 2456*6512Ssowmini (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 24573859Sml29623 rx_dmap[i].dma_chunk_index = i; 24583859Sml29623 rx_dmap[i].block_size = block_size; 24593859Sml29623 rx_dmap[i].alength = alloc_sizes[size_index]; 24603859Sml29623 rx_dmap[i].orig_alength = rx_dmap[i].alength; 24613859Sml29623 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 24623859Sml29623 rx_dmap[i].dma_channel = dma_channel; 24633859Sml29623 rx_dmap[i].contig_alloc_type = B_FALSE; 24646495Sspeer rx_dmap[i].kmem_alloc_type = B_FALSE; 24656495Sspeer rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 24663859Sml29623 24673859Sml29623 /* 24683859Sml29623 * N2/NIU: data buffers must be contiguous as the driver 24693859Sml29623 * needs to call Hypervisor api to set up 24703859Sml29623 * logical pages. 24713859Sml29623 */ 24723859Sml29623 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 24733859Sml29623 rx_dmap[i].contig_alloc_type = B_TRUE; 24746495Sspeer rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 24756495Sspeer } else if (use_kmem_alloc) { 24766495Sspeer /* For Neptune, use kmem_alloc */ 24776495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 24786495Sspeer "==> nxge_alloc_rx_buf_dma: " 24796495Sspeer "Neptune use kmem_alloc()")); 24806495Sspeer rx_dmap[i].kmem_alloc_type = B_TRUE; 24816495Sspeer rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 24823859Sml29623 } 24833859Sml29623 24843859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2485*6512Ssowmini "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2486*6512Ssowmini "i %d nblocks %d alength %d", 2487*6512Ssowmini dma_channel, i, &rx_dmap[i], block_size, 2488*6512Ssowmini i, rx_dmap[i].nblocks, 2489*6512Ssowmini rx_dmap[i].alength)); 24903859Sml29623 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2491*6512Ssowmini &nxge_rx_dma_attr, 2492*6512Ssowmini rx_dmap[i].alength, 2493*6512Ssowmini &nxge_dev_buf_dma_acc_attr, 2494*6512Ssowmini DDI_DMA_READ | DDI_DMA_STREAMING, 2495*6512Ssowmini (p_nxge_dma_common_t)(&rx_dmap[i])); 24963859Sml29623 if (status != NXGE_OK) { 24973859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 24986495Sspeer "nxge_alloc_rx_buf_dma: Alloc Failed: " 24996495Sspeer "dma %d size_index %d size requested %d", 25006495Sspeer dma_channel, 25016495Sspeer size_index, 25026495Sspeer rx_dmap[i].alength)); 25033859Sml29623 size_index--; 25043859Sml29623 } else { 25056495Sspeer rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 25066495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 25076495Sspeer " nxge_alloc_rx_buf_dma DONE alloc mem: " 25086495Sspeer "dma %d dma_buf_p $%p kaddrp $%p alength %d " 25096495Sspeer "buf_alloc_state %d alloc_type %d", 25106495Sspeer dma_channel, 25116495Sspeer &rx_dmap[i], 25126495Sspeer rx_dmap[i].kaddrp, 25136495Sspeer rx_dmap[i].alength, 25146495Sspeer rx_dmap[i].buf_alloc_state, 25156495Sspeer rx_dmap[i].buf_alloc_type)); 25166495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 25176495Sspeer " alloc_rx_buf_dma allocated rdc %d " 25186495Sspeer "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 25196495Sspeer dma_channel, i, rx_dmap[i].alength, 25206495Sspeer rx_dmap[i].ioaddr_pp, &rx_dmap[i], 25216495Sspeer rx_dmap[i].kaddrp)); 25223859Sml29623 i++; 25233859Sml29623 allocated += alloc_sizes[size_index]; 25243859Sml29623 } 25253859Sml29623 } 25263859Sml29623 25273859Sml29623 if (allocated < total_alloc_size) { 25285770Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25296495Sspeer "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 25305770Sml29623 "allocated 0x%x requested 0x%x", 25315770Sml29623 dma_channel, 25325770Sml29623 allocated, total_alloc_size)); 25335770Sml29623 status = NXGE_ERROR; 25343859Sml29623 goto nxge_alloc_rx_mem_fail1; 25353859Sml29623 } 25363859Sml29623 25375770Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 25386495Sspeer "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 25395770Sml29623 "allocated 0x%x requested 0x%x", 25405770Sml29623 dma_channel, 25415770Sml29623 allocated, total_alloc_size)); 25425770Sml29623 25433859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2544*6512Ssowmini " alloc_rx_buf_dma rdc %d allocated %d chunks", 2545*6512Ssowmini dma_channel, i)); 25463859Sml29623 *num_chunks = i; 25473859Sml29623 *dmap = rx_dmap; 25483859Sml29623 25493859Sml29623 goto nxge_alloc_rx_mem_exit; 25503859Sml29623 25513859Sml29623 nxge_alloc_rx_mem_fail1: 25523859Sml29623 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 25533859Sml29623 25543859Sml29623 nxge_alloc_rx_mem_exit: 25553859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2556*6512Ssowmini "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 25573859Sml29623 25583859Sml29623 return (status); 25593859Sml29623 } 25603859Sml29623 25613859Sml29623 /*ARGSUSED*/ 25623859Sml29623 static void 25633859Sml29623 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 25643859Sml29623 uint32_t num_chunks) 25653859Sml29623 { 25663859Sml29623 int i; 25673859Sml29623 25683859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2569*6512Ssowmini "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 25703859Sml29623 25716495Sspeer if (dmap == 0) 25726495Sspeer return; 25736495Sspeer 25743859Sml29623 for (i = 0; i < num_chunks; i++) { 25753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2576*6512Ssowmini "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2577*6512Ssowmini i, dmap)); 25786495Sspeer nxge_dma_free_rx_data_buf(dmap++); 25793859Sml29623 } 25803859Sml29623 25813859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 25823859Sml29623 } 25833859Sml29623 25843859Sml29623 /*ARGSUSED*/ 25853859Sml29623 static nxge_status_t 25863859Sml29623 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 25873859Sml29623 p_nxge_dma_common_t *dmap, size_t size) 25883859Sml29623 { 25893859Sml29623 p_nxge_dma_common_t rx_dmap; 25903859Sml29623 nxge_status_t status = NXGE_OK; 25913859Sml29623 25923859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 25933859Sml29623 25943859Sml29623 rx_dmap = (p_nxge_dma_common_t) 2595*6512Ssowmini KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 25963859Sml29623 25973859Sml29623 rx_dmap->contig_alloc_type = B_FALSE; 25986495Sspeer rx_dmap->kmem_alloc_type = B_FALSE; 25993859Sml29623 26003859Sml29623 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2601*6512Ssowmini &nxge_desc_dma_attr, 2602*6512Ssowmini size, 2603*6512Ssowmini &nxge_dev_desc_dma_acc_attr, 2604*6512Ssowmini DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2605*6512Ssowmini rx_dmap); 26063859Sml29623 if (status != NXGE_OK) { 26073859Sml29623 goto nxge_alloc_rx_cntl_dma_fail1; 26083859Sml29623 } 26093859Sml29623 26103859Sml29623 *dmap = rx_dmap; 26113859Sml29623 goto nxge_alloc_rx_cntl_dma_exit; 26123859Sml29623 26133859Sml29623 nxge_alloc_rx_cntl_dma_fail1: 26143859Sml29623 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 26153859Sml29623 26163859Sml29623 nxge_alloc_rx_cntl_dma_exit: 26173859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2618*6512Ssowmini "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 26193859Sml29623 26203859Sml29623 return (status); 26213859Sml29623 } 26223859Sml29623 26233859Sml29623 /*ARGSUSED*/ 26243859Sml29623 static void 26253859Sml29623 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 26263859Sml29623 { 26273859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 26283859Sml29623 26296495Sspeer if (dmap == 0) 26306495Sspeer return; 26316495Sspeer 26323859Sml29623 nxge_dma_mem_free(dmap); 26333859Sml29623 26343859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 26353859Sml29623 } 26363859Sml29623 26376495Sspeer typedef struct { 26386495Sspeer size_t tx_size; 26396495Sspeer size_t cr_size; 26406495Sspeer size_t threshhold; 26416495Sspeer } nxge_tdc_sizes_t; 26426495Sspeer 26436495Sspeer static 26446495Sspeer nxge_status_t 26456495Sspeer nxge_tdc_sizes( 26466495Sspeer nxge_t *nxgep, 26476495Sspeer nxge_tdc_sizes_t *sizes) 26486495Sspeer { 26496495Sspeer uint32_t threshhold; /* The bcopy() threshhold */ 26506495Sspeer size_t tx_size; /* Transmit buffer size */ 26516495Sspeer size_t cr_size; /* Completion ring size */ 26526495Sspeer 26536495Sspeer /* 26546495Sspeer * Assume that each DMA channel will be configured with the 26556495Sspeer * default transmit buffer size for copying transmit data. 26566495Sspeer * (If a packet is bigger than this, it will not be copied.) 26576495Sspeer */ 26586495Sspeer if (nxgep->niu_type == N2_NIU) { 26596495Sspeer threshhold = TX_BCOPY_SIZE; 26606495Sspeer } else { 26616495Sspeer threshhold = nxge_bcopy_thresh; 26626495Sspeer } 26636495Sspeer tx_size = nxge_tx_ring_size * threshhold; 26646495Sspeer 26656495Sspeer cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 26666495Sspeer cr_size += sizeof (txdma_mailbox_t); 26676495Sspeer 26686495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 26696495Sspeer if (nxgep->niu_type == N2_NIU) { 26706495Sspeer if (!ISP2(tx_size)) { 26716495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2672*6512Ssowmini "==> nxge_tdc_sizes: Tx size" 2673*6512Ssowmini " must be power of 2")); 26746495Sspeer return (NXGE_ERROR); 26756495Sspeer } 26766495Sspeer 26776495Sspeer if (tx_size > (1 << 22)) { 26786495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2679*6512Ssowmini "==> nxge_tdc_sizes: Tx size" 2680*6512Ssowmini " limited to 4M")); 26816495Sspeer return (NXGE_ERROR); 26826495Sspeer } 26836495Sspeer 26846495Sspeer if (cr_size < 0x2000) 26856495Sspeer cr_size = 0x2000; 26866495Sspeer } 26876495Sspeer #endif 26886495Sspeer 26896495Sspeer sizes->threshhold = threshhold; 26906495Sspeer sizes->tx_size = tx_size; 26916495Sspeer sizes->cr_size = cr_size; 26926495Sspeer 26936495Sspeer return (NXGE_OK); 26946495Sspeer } 26956495Sspeer /* 26966495Sspeer * nxge_alloc_txb 26976495Sspeer * 26986495Sspeer * Allocate buffers for an TDC. 26996495Sspeer * 27006495Sspeer * Arguments: 27016495Sspeer * nxgep 27026495Sspeer * channel The channel to map into our kernel space. 27036495Sspeer * 27046495Sspeer * Notes: 27056495Sspeer * 27066495Sspeer * NPI function calls: 27076495Sspeer * 27086495Sspeer * NXGE function calls: 27096495Sspeer * 27106495Sspeer * Registers accessed: 27116495Sspeer * 27126495Sspeer * Context: 27136495Sspeer * 27146495Sspeer * Taking apart: 27156495Sspeer * 27166495Sspeer * Open questions: 27176495Sspeer * 27186495Sspeer */ 27196495Sspeer nxge_status_t 27206495Sspeer nxge_alloc_txb( 27216495Sspeer p_nxge_t nxgep, 27226495Sspeer int channel) 27236495Sspeer { 27246495Sspeer nxge_dma_common_t **dma_buf_p; 27256495Sspeer nxge_dma_common_t **dma_cntl_p; 27266495Sspeer uint32_t *num_chunks; 27276495Sspeer nxge_status_t status = NXGE_OK; 27286495Sspeer 27296495Sspeer nxge_tdc_sizes_t sizes; 27306495Sspeer 27316495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 27326495Sspeer 27336495Sspeer if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 27346495Sspeer return (NXGE_ERROR); 27356495Sspeer 27366495Sspeer /* 27376495Sspeer * Allocate memory for transmit buffers and descriptor rings. 27386495Sspeer * Replace these allocation functions with the interface functions 27396495Sspeer * provided by the partition manager Real Soon Now. 27406495Sspeer */ 27416495Sspeer dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 27426495Sspeer num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 27436495Sspeer 27446495Sspeer dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 27456495Sspeer 27466495Sspeer /* 27476495Sspeer * Allocate memory for transmit buffers and descriptor rings. 27486495Sspeer * Replace allocation functions with interface functions provided 27496495Sspeer * by the partition manager when it is available. 27506495Sspeer * 27516495Sspeer * Allocate memory for the transmit buffer pool. 27526495Sspeer */ 27536495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2754*6512Ssowmini "sizes: tx: %ld, cr:%ld, th:%ld", 2755*6512Ssowmini sizes.tx_size, sizes.cr_size, sizes.threshhold)); 27566495Sspeer 27576495Sspeer *num_chunks = 0; 27586495Sspeer status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 27596495Sspeer sizes.tx_size, sizes.threshhold, num_chunks); 27606495Sspeer if (status != NXGE_OK) { 27616495Sspeer cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 27626495Sspeer return (status); 27636495Sspeer } 27646495Sspeer 27656495Sspeer /* 27666495Sspeer * Allocate memory for descriptor rings and mailbox. 27676495Sspeer */ 27686495Sspeer status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 27696495Sspeer sizes.cr_size); 27706495Sspeer if (status != NXGE_OK) { 27716495Sspeer nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 27726495Sspeer cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 27736495Sspeer return (status); 27746495Sspeer } 27756495Sspeer 27766495Sspeer return (NXGE_OK); 27776495Sspeer } 27786495Sspeer 27796495Sspeer void 27806495Sspeer nxge_free_txb( 27816495Sspeer p_nxge_t nxgep, 27826495Sspeer int channel) 27836495Sspeer { 27846495Sspeer nxge_dma_common_t *data; 27856495Sspeer nxge_dma_common_t *control; 27866495Sspeer uint32_t num_chunks; 27876495Sspeer 27886495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 27896495Sspeer 27906495Sspeer data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 27916495Sspeer num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 27926495Sspeer nxge_free_tx_buf_dma(nxgep, data, num_chunks); 27936495Sspeer 27946495Sspeer nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 27956495Sspeer nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 27966495Sspeer 27976495Sspeer control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 27986495Sspeer nxge_free_tx_cntl_dma(nxgep, control); 27996495Sspeer 28006495Sspeer nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 28016495Sspeer 28026495Sspeer KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 28036495Sspeer KMEM_FREE(control, sizeof (nxge_dma_common_t)); 28046495Sspeer 28056495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 28066495Sspeer } 28076495Sspeer 28086495Sspeer /* 28096495Sspeer * nxge_alloc_tx_mem_pool 28106495Sspeer * 28116495Sspeer * This function allocates all of the per-port TDC control data structures. 28126495Sspeer * The per-channel (TDC) data structures are allocated when needed. 28136495Sspeer * 28146495Sspeer * Arguments: 28156495Sspeer * nxgep 28166495Sspeer * 28176495Sspeer * Notes: 28186495Sspeer * 28196495Sspeer * Context: 28206495Sspeer * Any domain 28216495Sspeer */ 28226495Sspeer nxge_status_t 28233859Sml29623 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 28243859Sml29623 { 28256495Sspeer nxge_hw_pt_cfg_t *p_cfgp; 28266495Sspeer nxge_dma_pool_t *dma_poolp; 28276495Sspeer nxge_dma_common_t **dma_buf_p; 28286495Sspeer nxge_dma_pool_t *dma_cntl_poolp; 28296495Sspeer nxge_dma_common_t **dma_cntl_p; 28303859Sml29623 uint32_t *num_chunks; /* per dma */ 28316495Sspeer int tdc_max; 28323859Sml29623 28333859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 28343859Sml29623 28356495Sspeer p_cfgp = &nxgep->pt_config.hw_config; 28366495Sspeer tdc_max = NXGE_MAX_TDCS; 28376495Sspeer 28383859Sml29623 /* 28393859Sml29623 * Allocate memory for each transmit DMA channel. 28403859Sml29623 */ 28413859Sml29623 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2842*6512Ssowmini KM_SLEEP); 28433859Sml29623 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2844*6512Ssowmini sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 28453859Sml29623 28463859Sml29623 dma_cntl_poolp = (p_nxge_dma_pool_t) 2847*6512Ssowmini KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 28483859Sml29623 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2849*6512Ssowmini sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 28503859Sml29623 28515770Sml29623 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 28525770Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 28535770Sml29623 "nxge_alloc_tx_mem_pool: TDC too high %d, " 28545770Sml29623 "set to default %d", 28555770Sml29623 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 28565770Sml29623 nxge_tx_ring_size = TDC_DEFAULT_MAX; 28575770Sml29623 } 28585770Sml29623 28593859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 28603859Sml29623 /* 28613859Sml29623 * N2/NIU has limitation on the descriptor sizes (contiguous 28623859Sml29623 * memory allocation on data buffers to 4M (contig_mem_alloc) 28633859Sml29623 * and little endian for control buffers (must use the ddi/dki mem alloc 28643859Sml29623 * function). The transmit ring is limited to 8K (includes the 28653859Sml29623 * mailbox). 28663859Sml29623 */ 28673859Sml29623 if (nxgep->niu_type == N2_NIU) { 28683859Sml29623 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2869*6512Ssowmini (!ISP2(nxge_tx_ring_size))) { 28703859Sml29623 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 28713859Sml29623 } 28723859Sml29623 } 28733859Sml29623 #endif 28743859Sml29623 28753859Sml29623 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 28763859Sml29623 28773859Sml29623 num_chunks = (uint32_t *)KMEM_ZALLOC( 2878*6512Ssowmini sizeof (uint32_t) * tdc_max, KM_SLEEP); 28796495Sspeer 28806495Sspeer dma_poolp->ndmas = p_cfgp->tdc.owned; 28813859Sml29623 dma_poolp->num_chunks = num_chunks; 28823859Sml29623 dma_poolp->dma_buf_pool_p = dma_buf_p; 28833859Sml29623 nxgep->tx_buf_pool_p = dma_poolp; 28843859Sml29623 28856495Sspeer dma_poolp->buf_allocated = B_TRUE; 28866495Sspeer 28876495Sspeer dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 28883859Sml29623 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 28893859Sml29623 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 28903859Sml29623 28916495Sspeer dma_cntl_poolp->buf_allocated = B_TRUE; 28926495Sspeer 28936495Sspeer nxgep->tx_rings = 28946495Sspeer KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 28956495Sspeer nxgep->tx_rings->rings = 28966495Sspeer KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 28976495Sspeer nxgep->tx_mbox_areas_p = 28986495Sspeer KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 28996495Sspeer nxgep->tx_mbox_areas_p->txmbox_areas_p = 29006495Sspeer KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 29016495Sspeer 29026495Sspeer nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 29036495Sspeer 29043859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2905*6512Ssowmini "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 2906*6512Ssowmini tdc_max, dma_poolp->ndmas)); 29076495Sspeer 29086495Sspeer return (NXGE_OK); 29093859Sml29623 } 29103859Sml29623 29116495Sspeer nxge_status_t 29123859Sml29623 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 29133859Sml29623 p_nxge_dma_common_t *dmap, size_t alloc_size, 29143859Sml29623 size_t block_size, uint32_t *num_chunks) 29153859Sml29623 { 29163859Sml29623 p_nxge_dma_common_t tx_dmap; 29173859Sml29623 nxge_status_t status = NXGE_OK; 29183859Sml29623 size_t total_alloc_size; 29193859Sml29623 size_t allocated = 0; 29203859Sml29623 int i, size_index, array_size; 29213859Sml29623 29223859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 29233859Sml29623 29243859Sml29623 tx_dmap = (p_nxge_dma_common_t) 2925*6512Ssowmini KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2926*6512Ssowmini KM_SLEEP); 29273859Sml29623 29283859Sml29623 total_alloc_size = alloc_size; 29293859Sml29623 i = 0; 29303859Sml29623 size_index = 0; 29313859Sml29623 array_size = sizeof (alloc_sizes) / sizeof (size_t); 29323859Sml29623 while ((alloc_sizes[size_index] < alloc_size) && 2933*6512Ssowmini (size_index < array_size)) 29343859Sml29623 size_index++; 29353859Sml29623 if (size_index >= array_size) { 29363859Sml29623 size_index = array_size - 1; 29373859Sml29623 } 29383859Sml29623 29393859Sml29623 while ((allocated < total_alloc_size) && 2940*6512Ssowmini (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 29413859Sml29623 29423859Sml29623 tx_dmap[i].dma_chunk_index = i; 29433859Sml29623 tx_dmap[i].block_size = block_size; 29443859Sml29623 tx_dmap[i].alength = alloc_sizes[size_index]; 29453859Sml29623 tx_dmap[i].orig_alength = tx_dmap[i].alength; 29463859Sml29623 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 29473859Sml29623 tx_dmap[i].dma_channel = dma_channel; 29483859Sml29623 tx_dmap[i].contig_alloc_type = B_FALSE; 29496495Sspeer tx_dmap[i].kmem_alloc_type = B_FALSE; 29503859Sml29623 29513859Sml29623 /* 29523859Sml29623 * N2/NIU: data buffers must be contiguous as the driver 29533859Sml29623 * needs to call Hypervisor api to set up 29543859Sml29623 * logical pages. 29553859Sml29623 */ 29563859Sml29623 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 29573859Sml29623 tx_dmap[i].contig_alloc_type = B_TRUE; 29583859Sml29623 } 29593859Sml29623 29603859Sml29623 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2961*6512Ssowmini &nxge_tx_dma_attr, 2962*6512Ssowmini tx_dmap[i].alength, 2963*6512Ssowmini &nxge_dev_buf_dma_acc_attr, 2964*6512Ssowmini DDI_DMA_WRITE | DDI_DMA_STREAMING, 2965*6512Ssowmini (p_nxge_dma_common_t)(&tx_dmap[i])); 29663859Sml29623 if (status != NXGE_OK) { 29673859Sml29623 size_index--; 29683859Sml29623 } else { 29693859Sml29623 i++; 29703859Sml29623 allocated += alloc_sizes[size_index]; 29713859Sml29623 } 29723859Sml29623 } 29733859Sml29623 29743859Sml29623 if (allocated < total_alloc_size) { 29755770Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29765770Sml29623 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 29775770Sml29623 "allocated 0x%x requested 0x%x", 29785770Sml29623 dma_channel, 29795770Sml29623 allocated, total_alloc_size)); 29805770Sml29623 status = NXGE_ERROR; 29813859Sml29623 goto nxge_alloc_tx_mem_fail1; 29823859Sml29623 } 29833859Sml29623 29845770Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 29855770Sml29623 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 29865770Sml29623 "allocated 0x%x requested 0x%x", 29875770Sml29623 dma_channel, 29885770Sml29623 allocated, total_alloc_size)); 29895770Sml29623 29903859Sml29623 *num_chunks = i; 29913859Sml29623 *dmap = tx_dmap; 29923859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2993*6512Ssowmini "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2994*6512Ssowmini *dmap, i)); 29953859Sml29623 goto nxge_alloc_tx_mem_exit; 29963859Sml29623 29973859Sml29623 nxge_alloc_tx_mem_fail1: 29983859Sml29623 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 29993859Sml29623 30003859Sml29623 nxge_alloc_tx_mem_exit: 30013859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3002*6512Ssowmini "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 30033859Sml29623 30043859Sml29623 return (status); 30053859Sml29623 } 30063859Sml29623 30073859Sml29623 /*ARGSUSED*/ 30083859Sml29623 static void 30093859Sml29623 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 30103859Sml29623 uint32_t num_chunks) 30113859Sml29623 { 30123859Sml29623 int i; 30133859Sml29623 30143859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 30153859Sml29623 30166495Sspeer if (dmap == 0) 30176495Sspeer return; 30186495Sspeer 30193859Sml29623 for (i = 0; i < num_chunks; i++) { 30203859Sml29623 nxge_dma_mem_free(dmap++); 30213859Sml29623 } 30223859Sml29623 30233859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 30243859Sml29623 } 30253859Sml29623 30263859Sml29623 /*ARGSUSED*/ 30276495Sspeer nxge_status_t 30283859Sml29623 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 30293859Sml29623 p_nxge_dma_common_t *dmap, size_t size) 30303859Sml29623 { 30313859Sml29623 p_nxge_dma_common_t tx_dmap; 30323859Sml29623 nxge_status_t status = NXGE_OK; 30333859Sml29623 30343859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 30353859Sml29623 tx_dmap = (p_nxge_dma_common_t) 3036*6512Ssowmini KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 30373859Sml29623 30383859Sml29623 tx_dmap->contig_alloc_type = B_FALSE; 30396495Sspeer tx_dmap->kmem_alloc_type = B_FALSE; 30403859Sml29623 30413859Sml29623 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3042*6512Ssowmini &nxge_desc_dma_attr, 3043*6512Ssowmini size, 3044*6512Ssowmini &nxge_dev_desc_dma_acc_attr, 3045*6512Ssowmini DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3046*6512Ssowmini tx_dmap); 30473859Sml29623 if (status != NXGE_OK) { 30483859Sml29623 goto nxge_alloc_tx_cntl_dma_fail1; 30493859Sml29623 } 30503859Sml29623 30513859Sml29623 *dmap = tx_dmap; 30523859Sml29623 goto nxge_alloc_tx_cntl_dma_exit; 30533859Sml29623 30543859Sml29623 nxge_alloc_tx_cntl_dma_fail1: 30553859Sml29623 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 30563859Sml29623 30573859Sml29623 nxge_alloc_tx_cntl_dma_exit: 30583859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3059*6512Ssowmini "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 30603859Sml29623 30613859Sml29623 return (status); 30623859Sml29623 } 30633859Sml29623 30643859Sml29623 /*ARGSUSED*/ 30653859Sml29623 static void 30663859Sml29623 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 30673859Sml29623 { 30683859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 30693859Sml29623 30706495Sspeer if (dmap == 0) 30716495Sspeer return; 30726495Sspeer 30733859Sml29623 nxge_dma_mem_free(dmap); 30743859Sml29623 30753859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 30763859Sml29623 } 30773859Sml29623 30786495Sspeer /* 30796495Sspeer * nxge_free_tx_mem_pool 30806495Sspeer * 30816495Sspeer * This function frees all of the per-port TDC control data structures. 30826495Sspeer * The per-channel (TDC) data structures are freed when the channel 30836495Sspeer * is stopped. 30846495Sspeer * 30856495Sspeer * Arguments: 30866495Sspeer * nxgep 30876495Sspeer * 30886495Sspeer * Notes: 30896495Sspeer * 30906495Sspeer * Context: 30916495Sspeer * Any domain 30926495Sspeer */ 30933859Sml29623 static void 30943859Sml29623 nxge_free_tx_mem_pool(p_nxge_t nxgep) 30953859Sml29623 { 30966495Sspeer int tdc_max = NXGE_MAX_TDCS; 30976495Sspeer 30986495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 30996495Sspeer 31006495Sspeer if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 31016495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3102*6512Ssowmini "<== nxge_free_tx_mem_pool " 3103*6512Ssowmini "(null tx buf pool or buf not allocated")); 31043859Sml29623 return; 31053859Sml29623 } 31066495Sspeer if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 31076495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3108*6512Ssowmini "<== nxge_free_tx_mem_pool " 3109*6512Ssowmini "(null tx cntl buf pool or cntl buf not allocated")); 31103859Sml29623 return; 31113859Sml29623 } 31123859Sml29623 31136495Sspeer /* 1. Free the mailboxes. */ 31146495Sspeer KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 31156495Sspeer sizeof (p_tx_mbox_t) * tdc_max); 31166495Sspeer KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 31176495Sspeer 31186495Sspeer nxgep->tx_mbox_areas_p = 0; 31196495Sspeer 31206495Sspeer /* 2. Free the transmit ring arrays. */ 31216495Sspeer KMEM_FREE(nxgep->tx_rings->rings, 31226495Sspeer sizeof (p_tx_ring_t) * tdc_max); 31236495Sspeer KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 31246495Sspeer 31256495Sspeer nxgep->tx_rings = 0; 31266495Sspeer 31276495Sspeer /* 3. Free the completion ring data structures. */ 31286495Sspeer KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 31296495Sspeer sizeof (p_nxge_dma_common_t) * tdc_max); 31306495Sspeer KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 31316495Sspeer 31326495Sspeer nxgep->tx_cntl_pool_p = 0; 31336495Sspeer 31346495Sspeer /* 4. Free the data ring data structures. */ 31356495Sspeer KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 31366495Sspeer sizeof (uint32_t) * tdc_max); 31376495Sspeer KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 31386495Sspeer sizeof (p_nxge_dma_common_t) * tdc_max); 31396495Sspeer KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 31406495Sspeer 31416495Sspeer nxgep->tx_buf_pool_p = 0; 31426495Sspeer 31436495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 31443859Sml29623 } 31453859Sml29623 31463859Sml29623 /*ARGSUSED*/ 31473859Sml29623 static nxge_status_t 31483859Sml29623 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 31493859Sml29623 struct ddi_dma_attr *dma_attrp, 31503859Sml29623 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 31513859Sml29623 p_nxge_dma_common_t dma_p) 31523859Sml29623 { 31533859Sml29623 caddr_t kaddrp; 31543859Sml29623 int ddi_status = DDI_SUCCESS; 31553859Sml29623 boolean_t contig_alloc_type; 31566495Sspeer boolean_t kmem_alloc_type; 31573859Sml29623 31583859Sml29623 contig_alloc_type = dma_p->contig_alloc_type; 31593859Sml29623 31603859Sml29623 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 31613859Sml29623 /* 31623859Sml29623 * contig_alloc_type for contiguous memory only allowed 31633859Sml29623 * for N2/NIU. 31643859Sml29623 */ 31653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3166*6512Ssowmini "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3167*6512Ssowmini dma_p->contig_alloc_type)); 31683859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 31693859Sml29623 } 31703859Sml29623 31713859Sml29623 dma_p->dma_handle = NULL; 31723859Sml29623 dma_p->acc_handle = NULL; 31733859Sml29623 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 31743859Sml29623 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 31753859Sml29623 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3176*6512Ssowmini DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 31773859Sml29623 if (ddi_status != DDI_SUCCESS) { 31783859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3179*6512Ssowmini "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 31803859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 31813859Sml29623 } 31823859Sml29623 31836495Sspeer kmem_alloc_type = dma_p->kmem_alloc_type; 31846495Sspeer 31853859Sml29623 switch (contig_alloc_type) { 31863859Sml29623 case B_FALSE: 31876495Sspeer switch (kmem_alloc_type) { 31886495Sspeer case B_FALSE: 31896495Sspeer ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3190*6512Ssowmini length, 3191*6512Ssowmini acc_attr_p, 3192*6512Ssowmini xfer_flags, 3193*6512Ssowmini DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3194*6512Ssowmini &dma_p->acc_handle); 31956495Sspeer if (ddi_status != DDI_SUCCESS) { 31966495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31976495Sspeer "nxge_dma_mem_alloc: " 31986495Sspeer "ddi_dma_mem_alloc failed")); 31996495Sspeer ddi_dma_free_handle(&dma_p->dma_handle); 32006495Sspeer dma_p->dma_handle = NULL; 32016495Sspeer return (NXGE_ERROR | NXGE_DDI_FAILED); 32026495Sspeer } 32036495Sspeer if (dma_p->alength < length) { 32046495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32056495Sspeer "nxge_dma_mem_alloc:di_dma_mem_alloc " 32066495Sspeer "< length.")); 32076495Sspeer ddi_dma_mem_free(&dma_p->acc_handle); 32086495Sspeer ddi_dma_free_handle(&dma_p->dma_handle); 32096495Sspeer dma_p->acc_handle = NULL; 32106495Sspeer dma_p->dma_handle = NULL; 32116495Sspeer return (NXGE_ERROR); 32126495Sspeer } 32136495Sspeer 32146495Sspeer ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 32156495Sspeer NULL, 32166495Sspeer kaddrp, dma_p->alength, xfer_flags, 32176495Sspeer DDI_DMA_DONTWAIT, 32186495Sspeer 0, &dma_p->dma_cookie, &dma_p->ncookies); 32196495Sspeer if (ddi_status != DDI_DMA_MAPPED) { 32206495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32216495Sspeer "nxge_dma_mem_alloc: ddi_dma_addr_bind " 32226495Sspeer "failed " 32236495Sspeer "(staus 0x%x ncookies %d.)", ddi_status, 32246495Sspeer dma_p->ncookies)); 32256495Sspeer if (dma_p->acc_handle) { 32266495Sspeer ddi_dma_mem_free(&dma_p->acc_handle); 32276495Sspeer dma_p->acc_handle = NULL; 32286495Sspeer } 32296495Sspeer ddi_dma_free_handle(&dma_p->dma_handle); 32306495Sspeer dma_p->dma_handle = NULL; 32316495Sspeer return (NXGE_ERROR | NXGE_DDI_FAILED); 32326495Sspeer } 32336495Sspeer 32346495Sspeer if (dma_p->ncookies != 1) { 32356495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, 32366495Sspeer "nxge_dma_mem_alloc:ddi_dma_addr_bind " 32376495Sspeer "> 1 cookie" 32386495Sspeer "(staus 0x%x ncookies %d.)", ddi_status, 32396495Sspeer dma_p->ncookies)); 32406495Sspeer if (dma_p->acc_handle) { 32416495Sspeer ddi_dma_mem_free(&dma_p->acc_handle); 32426495Sspeer dma_p->acc_handle = NULL; 32436495Sspeer } 32446495Sspeer (void) ddi_dma_unbind_handle(dma_p->dma_handle); 32456495Sspeer ddi_dma_free_handle(&dma_p->dma_handle); 32466495Sspeer dma_p->dma_handle = NULL; 32476495Sspeer return (NXGE_ERROR); 32486495Sspeer } 32496495Sspeer break; 32506495Sspeer 32516495Sspeer case B_TRUE: 32526495Sspeer kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 32536495Sspeer if (kaddrp == NULL) { 32546495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32556495Sspeer "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 32566495Sspeer "kmem alloc failed")); 32576495Sspeer return (NXGE_ERROR); 32586495Sspeer } 32596495Sspeer 32606495Sspeer dma_p->alength = length; 32616495Sspeer ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 32626495Sspeer NULL, kaddrp, dma_p->alength, xfer_flags, 32636495Sspeer DDI_DMA_DONTWAIT, 0, 32646495Sspeer &dma_p->dma_cookie, &dma_p->ncookies); 32656495Sspeer if (ddi_status != DDI_DMA_MAPPED) { 32666495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32676495Sspeer "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 32686495Sspeer "(kmem_alloc) failed kaddrp $%p length %d " 32696495Sspeer "(staus 0x%x (%d) ncookies %d.)", 32706495Sspeer kaddrp, length, 32716495Sspeer ddi_status, ddi_status, dma_p->ncookies)); 32726495Sspeer KMEM_FREE(kaddrp, length); 32736495Sspeer dma_p->acc_handle = NULL; 32746495Sspeer ddi_dma_free_handle(&dma_p->dma_handle); 32756495Sspeer dma_p->dma_handle = NULL; 32766495Sspeer dma_p->kaddrp = NULL; 32776495Sspeer return (NXGE_ERROR | NXGE_DDI_FAILED); 32786495Sspeer } 32796495Sspeer 32806495Sspeer if (dma_p->ncookies != 1) { 32816495Sspeer NXGE_DEBUG_MSG((nxgep, DMA_CTL, 32826495Sspeer "nxge_dma_mem_alloc:ddi_dma_addr_bind " 32836495Sspeer "(kmem_alloc) > 1 cookie" 32846495Sspeer "(staus 0x%x ncookies %d.)", ddi_status, 3285*6512Ssowmini dma_p->ncookies)); 32866495Sspeer KMEM_FREE(kaddrp, length); 32873859Sml29623 dma_p->acc_handle = NULL; 32886495Sspeer (void) ddi_dma_unbind_handle(dma_p->dma_handle); 32896495Sspeer ddi_dma_free_handle(&dma_p->dma_handle); 32906495Sspeer dma_p->dma_handle = NULL; 32916495Sspeer dma_p->kaddrp = NULL; 32926495Sspeer return (NXGE_ERROR); 32933859Sml29623 } 32946495Sspeer 32956495Sspeer dma_p->kaddrp = kaddrp; 32966495Sspeer 32976495Sspeer NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3298*6512Ssowmini "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3299*6512Ssowmini "kaddr $%p alength %d", 3300*6512Ssowmini dma_p, 3301*6512Ssowmini kaddrp, 3302*6512Ssowmini dma_p->alength)); 33036495Sspeer break; 33043859Sml29623 } 33053859Sml29623 break; 33063859Sml29623 33073859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 33083859Sml29623 case B_TRUE: 33093859Sml29623 kaddrp = (caddr_t)contig_mem_alloc(length); 33103859Sml29623 if (kaddrp == NULL) { 33113859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3312*6512Ssowmini "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 33133859Sml29623 ddi_dma_free_handle(&dma_p->dma_handle); 33143859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 33153859Sml29623 } 33163859Sml29623 33173859Sml29623 dma_p->alength = length; 33183859Sml29623 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3319*6512Ssowmini kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3320*6512Ssowmini &dma_p->dma_cookie, &dma_p->ncookies); 33213859Sml29623 if (ddi_status != DDI_DMA_MAPPED) { 33223859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3323*6512Ssowmini "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3324*6512Ssowmini "(status 0x%x ncookies %d.)", ddi_status, 3325*6512Ssowmini dma_p->ncookies)); 33263859Sml29623 33273859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3328*6512Ssowmini "==> nxge_dma_mem_alloc: (not mapped)" 3329*6512Ssowmini "length %lu (0x%x) " 3330*6512Ssowmini "free contig kaddrp $%p " 3331*6512Ssowmini "va_to_pa $%p", 3332*6512Ssowmini length, length, 3333*6512Ssowmini kaddrp, 3334*6512Ssowmini va_to_pa(kaddrp))); 33353859Sml29623 33363859Sml29623 33373859Sml29623 contig_mem_free((void *)kaddrp, length); 33383859Sml29623 ddi_dma_free_handle(&dma_p->dma_handle); 33393859Sml29623 33403859Sml29623 dma_p->dma_handle = NULL; 33413859Sml29623 dma_p->acc_handle = NULL; 33423859Sml29623 dma_p->alength = NULL; 33433859Sml29623 dma_p->kaddrp = NULL; 33443859Sml29623 33453859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 33463859Sml29623 } 33473859Sml29623 33483859Sml29623 if (dma_p->ncookies != 1 || 3349*6512Ssowmini (dma_p->dma_cookie.dmac_laddress == NULL)) { 33503859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3351*6512Ssowmini "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3352*6512Ssowmini "cookie or " 3353*6512Ssowmini "dmac_laddress is NULL $%p size %d " 3354*6512Ssowmini " (status 0x%x ncookies %d.)", 3355*6512Ssowmini ddi_status, 3356*6512Ssowmini dma_p->dma_cookie.dmac_laddress, 3357*6512Ssowmini dma_p->dma_cookie.dmac_size, 3358*6512Ssowmini dma_p->ncookies)); 33593859Sml29623 33603859Sml29623 contig_mem_free((void *)kaddrp, length); 33614185Sspeer (void) ddi_dma_unbind_handle(dma_p->dma_handle); 33623859Sml29623 ddi_dma_free_handle(&dma_p->dma_handle); 33633859Sml29623 33643859Sml29623 dma_p->alength = 0; 33653859Sml29623 dma_p->dma_handle = NULL; 33663859Sml29623 dma_p->acc_handle = NULL; 33673859Sml29623 dma_p->kaddrp = NULL; 33683859Sml29623 33693859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 33703859Sml29623 } 33713859Sml29623 break; 33723859Sml29623 33733859Sml29623 #else 33743859Sml29623 case B_TRUE: 33753859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3376*6512Ssowmini "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 33773859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 33783859Sml29623 #endif 33793859Sml29623 } 33803859Sml29623 33813859Sml29623 dma_p->kaddrp = kaddrp; 33823859Sml29623 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3383*6512Ssowmini dma_p->alength - RXBUF_64B_ALIGNED; 33845125Sjoycey #if defined(__i386) 33855125Sjoycey dma_p->ioaddr_pp = 3386*6512Ssowmini (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 33875125Sjoycey #else 33883859Sml29623 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 33895125Sjoycey #endif 33903859Sml29623 dma_p->last_ioaddr_pp = 33915125Sjoycey #if defined(__i386) 3392*6512Ssowmini (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 33935125Sjoycey #else 3394*6512Ssowmini (unsigned char *)dma_p->dma_cookie.dmac_laddress + 33955125Sjoycey #endif 3396*6512Ssowmini dma_p->alength - RXBUF_64B_ALIGNED; 33973859Sml29623 33983859Sml29623 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 33993859Sml29623 34003859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 34013859Sml29623 dma_p->orig_ioaddr_pp = 3402*6512Ssowmini (unsigned char *)dma_p->dma_cookie.dmac_laddress; 34033859Sml29623 dma_p->orig_alength = length; 34043859Sml29623 dma_p->orig_kaddrp = kaddrp; 34053859Sml29623 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 34063859Sml29623 #endif 34073859Sml29623 34083859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3409*6512Ssowmini "dma buffer allocated: dma_p $%p " 3410*6512Ssowmini "return dmac_ladress from cookie $%p cookie dmac_size %d " 3411*6512Ssowmini "dma_p->ioaddr_p $%p " 3412*6512Ssowmini "dma_p->orig_ioaddr_p $%p " 3413*6512Ssowmini "orig_vatopa $%p " 3414*6512Ssowmini "alength %d (0x%x) " 3415*6512Ssowmini "kaddrp $%p " 3416*6512Ssowmini "length %d (0x%x)", 3417*6512Ssowmini dma_p, 3418*6512Ssowmini dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3419*6512Ssowmini dma_p->ioaddr_pp, 3420*6512Ssowmini dma_p->orig_ioaddr_pp, 3421*6512Ssowmini dma_p->orig_vatopa, 3422*6512Ssowmini dma_p->alength, dma_p->alength, 3423*6512Ssowmini kaddrp, 3424*6512Ssowmini length, length)); 34253859Sml29623 34263859Sml29623 return (NXGE_OK); 34273859Sml29623 } 34283859Sml29623 34293859Sml29623 static void 34303859Sml29623 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 34313859Sml29623 { 34323859Sml29623 if (dma_p->dma_handle != NULL) { 34333859Sml29623 if (dma_p->ncookies) { 34343859Sml29623 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 34353859Sml29623 dma_p->ncookies = 0; 34363859Sml29623 } 34373859Sml29623 ddi_dma_free_handle(&dma_p->dma_handle); 34383859Sml29623 dma_p->dma_handle = NULL; 34393859Sml29623 } 34403859Sml29623 34413859Sml29623 if (dma_p->acc_handle != NULL) { 34423859Sml29623 ddi_dma_mem_free(&dma_p->acc_handle); 34433859Sml29623 dma_p->acc_handle = NULL; 34443859Sml29623 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 34453859Sml29623 } 34463859Sml29623 34473859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 34483859Sml29623 if (dma_p->contig_alloc_type && 3449*6512Ssowmini dma_p->orig_kaddrp && dma_p->orig_alength) { 34503859Sml29623 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3451*6512Ssowmini "kaddrp $%p (orig_kaddrp $%p)" 3452*6512Ssowmini "mem type %d ", 3453*6512Ssowmini "orig_alength %d " 3454*6512Ssowmini "alength 0x%x (%d)", 3455*6512Ssowmini dma_p->kaddrp, 3456*6512Ssowmini dma_p->orig_kaddrp, 3457*6512Ssowmini dma_p->contig_alloc_type, 3458*6512Ssowmini dma_p->orig_alength, 3459*6512Ssowmini dma_p->alength, dma_p->alength)); 34603859Sml29623 34613859Sml29623 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 34623859Sml29623 dma_p->orig_alength = NULL; 34633859Sml29623 dma_p->orig_kaddrp = NULL; 34643859Sml29623 dma_p->contig_alloc_type = B_FALSE; 34653859Sml29623 } 34663859Sml29623 #endif 34673859Sml29623 dma_p->kaddrp = NULL; 34683859Sml29623 dma_p->alength = NULL; 34693859Sml29623 } 34703859Sml29623 34716495Sspeer static void 34726495Sspeer nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 34736495Sspeer { 34746495Sspeer uint64_t kaddr; 34756495Sspeer uint32_t buf_size; 34766495Sspeer 34776495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 34786495Sspeer 34796495Sspeer if (dma_p->dma_handle != NULL) { 34806495Sspeer if (dma_p->ncookies) { 34816495Sspeer (void) ddi_dma_unbind_handle(dma_p->dma_handle); 34826495Sspeer dma_p->ncookies = 0; 34836495Sspeer } 34846495Sspeer ddi_dma_free_handle(&dma_p->dma_handle); 34856495Sspeer dma_p->dma_handle = NULL; 34866495Sspeer } 34876495Sspeer 34886495Sspeer if (dma_p->acc_handle != NULL) { 34896495Sspeer ddi_dma_mem_free(&dma_p->acc_handle); 34906495Sspeer dma_p->acc_handle = NULL; 34916495Sspeer NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 34926495Sspeer } 34936495Sspeer 34946495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 34956495Sspeer "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 34966495Sspeer dma_p, 34976495Sspeer dma_p->buf_alloc_state)); 34986495Sspeer 34996495Sspeer if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 35006495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 35016495Sspeer "<== nxge_dma_free_rx_data_buf: " 35026495Sspeer "outstanding data buffers")); 35036495Sspeer return; 35046495Sspeer } 35056495Sspeer 35066495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 35076495Sspeer if (dma_p->contig_alloc_type && 3508*6512Ssowmini dma_p->orig_kaddrp && dma_p->orig_alength) { 35096495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 35106495Sspeer "kaddrp $%p (orig_kaddrp $%p)" 35116495Sspeer "mem type %d ", 35126495Sspeer "orig_alength %d " 35136495Sspeer "alength 0x%x (%d)", 35146495Sspeer dma_p->kaddrp, 35156495Sspeer dma_p->orig_kaddrp, 35166495Sspeer dma_p->contig_alloc_type, 35176495Sspeer dma_p->orig_alength, 35186495Sspeer dma_p->alength, dma_p->alength)); 35196495Sspeer 35206495Sspeer kaddr = (uint64_t)dma_p->orig_kaddrp; 35216495Sspeer buf_size = dma_p->orig_alength; 35226495Sspeer nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 35236495Sspeer dma_p->orig_alength = NULL; 35246495Sspeer dma_p->orig_kaddrp = NULL; 35256495Sspeer dma_p->contig_alloc_type = B_FALSE; 35266495Sspeer dma_p->kaddrp = NULL; 35276495Sspeer dma_p->alength = NULL; 35286495Sspeer return; 35296495Sspeer } 35306495Sspeer #endif 35316495Sspeer 35326495Sspeer if (dma_p->kmem_alloc_type) { 35336495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 35346495Sspeer "nxge_dma_free_rx_data_buf: free kmem " 3535*6512Ssowmini "kaddrp $%p (orig_kaddrp $%p)" 3536*6512Ssowmini "alloc type %d " 3537*6512Ssowmini "orig_alength %d " 3538*6512Ssowmini "alength 0x%x (%d)", 3539*6512Ssowmini dma_p->kaddrp, 3540*6512Ssowmini dma_p->orig_kaddrp, 3541*6512Ssowmini dma_p->kmem_alloc_type, 3542*6512Ssowmini dma_p->orig_alength, 3543*6512Ssowmini dma_p->alength, dma_p->alength)); 35446495Sspeer #if defined(__i386) 35456495Sspeer kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 35466495Sspeer #else 35476495Sspeer kaddr = (uint64_t)dma_p->kaddrp; 35486495Sspeer #endif 35496495Sspeer buf_size = dma_p->orig_alength; 35506495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 35516495Sspeer "nxge_dma_free_rx_data_buf: free dmap $%p " 35526495Sspeer "kaddr $%p buf_size %d", 35536495Sspeer dma_p, 35546495Sspeer kaddr, buf_size)); 35556495Sspeer nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 35566495Sspeer dma_p->alength = 0; 35576495Sspeer dma_p->orig_alength = 0; 35586495Sspeer dma_p->kaddrp = NULL; 35596495Sspeer dma_p->kmem_alloc_type = B_FALSE; 35606495Sspeer } 35616495Sspeer 35626495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 35636495Sspeer } 35646495Sspeer 35653859Sml29623 /* 35663859Sml29623 * nxge_m_start() -- start transmitting and receiving. 35673859Sml29623 * 35683859Sml29623 * This function is called by the MAC layer when the first 35693859Sml29623 * stream is open to prepare the hardware ready for sending 35703859Sml29623 * and transmitting packets. 35713859Sml29623 */ 35723859Sml29623 static int 35733859Sml29623 nxge_m_start(void *arg) 35743859Sml29623 { 35753859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg; 35763859Sml29623 35773859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 35783859Sml29623 35793859Sml29623 MUTEX_ENTER(nxgep->genlock); 35803859Sml29623 if (nxge_init(nxgep) != NXGE_OK) { 35813859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3582*6512Ssowmini "<== nxge_m_start: initialization failed")); 35833859Sml29623 MUTEX_EXIT(nxgep->genlock); 35843859Sml29623 return (EIO); 35853859Sml29623 } 35863859Sml29623 35873859Sml29623 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 35883859Sml29623 goto nxge_m_start_exit; 35893859Sml29623 /* 35903859Sml29623 * Start timer to check the system error and tx hangs 35913859Sml29623 */ 35926495Sspeer if (!isLDOMguest(nxgep)) 35936495Sspeer nxgep->nxge_timerid = nxge_start_timer(nxgep, 35946495Sspeer nxge_check_hw_state, NXGE_CHECK_TIMER); 35956495Sspeer #if defined(sun4v) 35966495Sspeer else 35976495Sspeer nxge_hio_start_timer(nxgep); 35986495Sspeer #endif 35993859Sml29623 36003859Sml29623 nxgep->link_notify = B_TRUE; 36013859Sml29623 36023859Sml29623 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 36033859Sml29623 36043859Sml29623 nxge_m_start_exit: 36053859Sml29623 MUTEX_EXIT(nxgep->genlock); 36063859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 36073859Sml29623 return (0); 36083859Sml29623 } 36093859Sml29623 36103859Sml29623 /* 36113859Sml29623 * nxge_m_stop(): stop transmitting and receiving. 36123859Sml29623 */ 36133859Sml29623 static void 36143859Sml29623 nxge_m_stop(void *arg) 36153859Sml29623 { 36163859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg; 36173859Sml29623 36183859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 36193859Sml29623 36203859Sml29623 if (nxgep->nxge_timerid) { 36213859Sml29623 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 36223859Sml29623 nxgep->nxge_timerid = 0; 36233859Sml29623 } 36243859Sml29623 36253859Sml29623 MUTEX_ENTER(nxgep->genlock); 36266495Sspeer nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 36273859Sml29623 nxge_uninit(nxgep); 36283859Sml29623 36293859Sml29623 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 36303859Sml29623 36313859Sml29623 MUTEX_EXIT(nxgep->genlock); 36323859Sml29623 36333859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 36343859Sml29623 } 36353859Sml29623 36363859Sml29623 static int 36373859Sml29623 nxge_m_unicst(void *arg, const uint8_t *macaddr) 36383859Sml29623 { 36393859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg; 36403859Sml29623 struct ether_addr addrp; 36413859Sml29623 36423859Sml29623 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 36433859Sml29623 36443859Sml29623 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 36453859Sml29623 if (nxge_set_mac_addr(nxgep, &addrp)) { 36463859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3647*6512Ssowmini "<== nxge_m_unicst: set unitcast failed")); 36483859Sml29623 return (EINVAL); 36493859Sml29623 } 36503859Sml29623 36513859Sml29623 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 36523859Sml29623 36533859Sml29623 return (0); 36543859Sml29623 } 36553859Sml29623 36563859Sml29623 static int 36573859Sml29623 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 36583859Sml29623 { 36593859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg; 36603859Sml29623 struct ether_addr addrp; 36613859Sml29623 36623859Sml29623 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3663*6512Ssowmini "==> nxge_m_multicst: add %d", add)); 36643859Sml29623 36653859Sml29623 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 36663859Sml29623 if (add) { 36673859Sml29623 if (nxge_add_mcast_addr(nxgep, &addrp)) { 36683859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3669*6512Ssowmini "<== nxge_m_multicst: add multicast failed")); 36703859Sml29623 return (EINVAL); 36713859Sml29623 } 36723859Sml29623 } else { 36733859Sml29623 if (nxge_del_mcast_addr(nxgep, &addrp)) { 36743859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3675*6512Ssowmini "<== nxge_m_multicst: del multicast failed")); 36763859Sml29623 return (EINVAL); 36773859Sml29623 } 36783859Sml29623 } 36793859Sml29623 36803859Sml29623 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 36813859Sml29623 36823859Sml29623 return (0); 36833859Sml29623 } 36843859Sml29623 36853859Sml29623 static int 36863859Sml29623 nxge_m_promisc(void *arg, boolean_t on) 36873859Sml29623 { 36883859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg; 36893859Sml29623 36903859Sml29623 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3691*6512Ssowmini "==> nxge_m_promisc: on %d", on)); 36923859Sml29623 36933859Sml29623 if (nxge_set_promisc(nxgep, on)) { 36943859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3695*6512Ssowmini "<== nxge_m_promisc: set promisc failed")); 36963859Sml29623 return (EINVAL); 36973859Sml29623 } 36983859Sml29623 36993859Sml29623 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3700*6512Ssowmini "<== nxge_m_promisc: on %d", on)); 37013859Sml29623 37023859Sml29623 return (0); 37033859Sml29623 } 37043859Sml29623 37053859Sml29623 static void 37063859Sml29623 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 37073859Sml29623 { 37083859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg; 37094185Sspeer struct iocblk *iocp; 37103859Sml29623 boolean_t need_privilege; 37113859Sml29623 int err; 37123859Sml29623 int cmd; 37133859Sml29623 37143859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 37153859Sml29623 37163859Sml29623 iocp = (struct iocblk *)mp->b_rptr; 37173859Sml29623 iocp->ioc_error = 0; 37183859Sml29623 need_privilege = B_TRUE; 37193859Sml29623 cmd = iocp->ioc_cmd; 37203859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 37213859Sml29623 switch (cmd) { 37223859Sml29623 default: 37233859Sml29623 miocnak(wq, mp, 0, EINVAL); 37243859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 37253859Sml29623 return; 37263859Sml29623 37273859Sml29623 case LB_GET_INFO_SIZE: 37283859Sml29623 case LB_GET_INFO: 37293859Sml29623 case LB_GET_MODE: 37303859Sml29623 need_privilege = B_FALSE; 37313859Sml29623 break; 37323859Sml29623 case LB_SET_MODE: 37333859Sml29623 break; 37343859Sml29623 37353859Sml29623 37363859Sml29623 case NXGE_GET_MII: 37373859Sml29623 case NXGE_PUT_MII: 37383859Sml29623 case NXGE_GET64: 37393859Sml29623 case NXGE_PUT64: 37403859Sml29623 case NXGE_GET_TX_RING_SZ: 37413859Sml29623 case NXGE_GET_TX_DESC: 37423859Sml29623 case NXGE_TX_SIDE_RESET: 37433859Sml29623 case NXGE_RX_SIDE_RESET: 37443859Sml29623 case NXGE_GLOBAL_RESET: 37453859Sml29623 case NXGE_RESET_MAC: 37463859Sml29623 case NXGE_TX_REGS_DUMP: 37473859Sml29623 case NXGE_RX_REGS_DUMP: 37483859Sml29623 case NXGE_INT_REGS_DUMP: 37493859Sml29623 case NXGE_VIR_INT_REGS_DUMP: 37503859Sml29623 case NXGE_PUT_TCAM: 37513859Sml29623 case NXGE_GET_TCAM: 37523859Sml29623 case NXGE_RTRACE: 37533859Sml29623 case NXGE_RDUMP: 37543859Sml29623 37553859Sml29623 need_privilege = B_FALSE; 37563859Sml29623 break; 37573859Sml29623 case NXGE_INJECT_ERR: 37583859Sml29623 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 37593859Sml29623 nxge_err_inject(nxgep, wq, mp); 37603859Sml29623 break; 37613859Sml29623 } 37623859Sml29623 37633859Sml29623 if (need_privilege) { 37644185Sspeer err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 37653859Sml29623 if (err != 0) { 37663859Sml29623 miocnak(wq, mp, 0, err); 37673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3768*6512Ssowmini "<== nxge_m_ioctl: no priv")); 37693859Sml29623 return; 37703859Sml29623 } 37713859Sml29623 } 37723859Sml29623 37733859Sml29623 switch (cmd) { 37743859Sml29623 37753859Sml29623 case LB_GET_MODE: 37763859Sml29623 case LB_SET_MODE: 37773859Sml29623 case LB_GET_INFO_SIZE: 37783859Sml29623 case LB_GET_INFO: 37793859Sml29623 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 37803859Sml29623 break; 37813859Sml29623 37823859Sml29623 case NXGE_GET_MII: 37833859Sml29623 case NXGE_PUT_MII: 37843859Sml29623 case NXGE_PUT_TCAM: 37853859Sml29623 case NXGE_GET_TCAM: 37863859Sml29623 case NXGE_GET64: 37873859Sml29623 case NXGE_PUT64: 37883859Sml29623 case NXGE_GET_TX_RING_SZ: 37893859Sml29623 case NXGE_GET_TX_DESC: 37903859Sml29623 case NXGE_TX_SIDE_RESET: 37913859Sml29623 case NXGE_RX_SIDE_RESET: 37923859Sml29623 case NXGE_GLOBAL_RESET: 37933859Sml29623 case NXGE_RESET_MAC: 37943859Sml29623 case NXGE_TX_REGS_DUMP: 37953859Sml29623 case NXGE_RX_REGS_DUMP: 37963859Sml29623 case NXGE_INT_REGS_DUMP: 37973859Sml29623 case NXGE_VIR_INT_REGS_DUMP: 37983859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3799*6512Ssowmini "==> nxge_m_ioctl: cmd 0x%x", cmd)); 38003859Sml29623 nxge_hw_ioctl(nxgep, wq, mp, iocp); 38013859Sml29623 break; 38023859Sml29623 } 38033859Sml29623 38043859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 38053859Sml29623 } 38063859Sml29623 38073859Sml29623 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 38083859Sml29623 38093859Sml29623 static void 38103859Sml29623 nxge_m_resources(void *arg) 38113859Sml29623 { 38123859Sml29623 p_nxge_t nxgep = arg; 38133859Sml29623 mac_rx_fifo_t mrf; 38146495Sspeer 38156495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 38166495Sspeer uint8_t rdc; 38176495Sspeer 38186495Sspeer rx_rcr_ring_t *ring; 38193859Sml29623 38203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 38213859Sml29623 38223859Sml29623 MUTEX_ENTER(nxgep->genlock); 38233859Sml29623 38246495Sspeer if (set->owned.map == 0) { 38256495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 38266495Sspeer "nxge_m_resources: no receive resources")); 38276495Sspeer goto nxge_m_resources_exit; 38286495Sspeer } 38296495Sspeer 38303859Sml29623 /* 38313859Sml29623 * CR 6492541 Check to see if the drv_state has been initialized, 38323859Sml29623 * if not * call nxge_init(). 38333859Sml29623 */ 38343859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 38356495Sspeer if (nxge_init(nxgep) != NXGE_OK) 38363859Sml29623 goto nxge_m_resources_exit; 38373859Sml29623 } 38383859Sml29623 38393859Sml29623 mrf.mrf_type = MAC_RX_FIFO; 38403859Sml29623 mrf.mrf_blank = nxge_rx_hw_blank; 38413859Sml29623 mrf.mrf_arg = (void *)nxgep; 38423859Sml29623 38433859Sml29623 mrf.mrf_normal_blank_time = 128; 38443859Sml29623 mrf.mrf_normal_pkt_count = 8; 38453859Sml29623 38463859Sml29623 /* 38473859Sml29623 * Export our receive resources to the MAC layer. 38483859Sml29623 */ 38496495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 38506495Sspeer if ((1 << rdc) & set->owned.map) { 38516495Sspeer ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 38526495Sspeer if (ring == 0) { 38536495Sspeer /* 38546495Sspeer * This is a big deal only if we are 38556495Sspeer * *not* in an LDOMs environment. 38566495Sspeer */ 38576495Sspeer if (nxgep->environs == SOLARIS_DOMAIN) { 38586495Sspeer cmn_err(CE_NOTE, 38596495Sspeer "==> nxge_m_resources: " 38606495Sspeer "ring %d == 0", rdc); 38616495Sspeer } 38626495Sspeer continue; 38636495Sspeer } 38646495Sspeer ring->rcr_mac_handle = mac_resource_add 38656495Sspeer (nxgep->mach, (mac_resource_t *)&mrf); 38666495Sspeer 38676495Sspeer NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 38686495Sspeer "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 38696495Sspeer rdc, ring, ring->rcr_mac_handle)); 38706495Sspeer } 38713859Sml29623 } 38723859Sml29623 38733859Sml29623 nxge_m_resources_exit: 38743859Sml29623 MUTEX_EXIT(nxgep->genlock); 38753859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 38763859Sml29623 } 38773859Sml29623 38786495Sspeer void 38793859Sml29623 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 38803859Sml29623 { 38813859Sml29623 p_nxge_mmac_stats_t mmac_stats; 38823859Sml29623 int i; 38833859Sml29623 nxge_mmac_t *mmac_info; 38843859Sml29623 38853859Sml29623 mmac_info = &nxgep->nxge_mmac_info; 38863859Sml29623 38873859Sml29623 mmac_stats = &nxgep->statsp->mmac_stats; 38883859Sml29623 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 38893859Sml29623 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 38903859Sml29623 38913859Sml29623 for (i = 0; i < ETHERADDRL; i++) { 38923859Sml29623 if (factory) { 38933859Sml29623 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3894*6512Ssowmini = mmac_info->factory_mac_pool[slot][ 3895*6512Ssowmini (ETHERADDRL-1) - i]; 38963859Sml29623 } else { 38973859Sml29623 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3898*6512Ssowmini = mmac_info->mac_pool[slot].addr[ 3899*6512Ssowmini (ETHERADDRL - 1) - i]; 39003859Sml29623 } 39013859Sml29623 } 39023859Sml29623 } 39033859Sml29623 39043859Sml29623 /* 39053859Sml29623 * nxge_altmac_set() -- Set an alternate MAC address 39063859Sml29623 */ 39073859Sml29623 static int 39083859Sml29623 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 39093859Sml29623 { 39103859Sml29623 uint8_t addrn; 39113859Sml29623 uint8_t portn; 39123859Sml29623 npi_mac_addr_t altmac; 39134484Sspeer hostinfo_t mac_rdc; 39144484Sspeer p_nxge_class_pt_cfg_t clscfgp; 39153859Sml29623 39163859Sml29623 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 39173859Sml29623 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 39183859Sml29623 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 39193859Sml29623 39203859Sml29623 portn = nxgep->mac.portnum; 39213859Sml29623 addrn = (uint8_t)slot - 1; 39223859Sml29623 39233859Sml29623 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3924*6512Ssowmini addrn, &altmac) != NPI_SUCCESS) 39253859Sml29623 return (EIO); 39264484Sspeer 39274484Sspeer /* 39284484Sspeer * Set the rdc table number for the host info entry 39294484Sspeer * for this mac address slot. 39304484Sspeer */ 39314484Sspeer clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 39324484Sspeer mac_rdc.value = 0; 39334484Sspeer mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 39344484Sspeer mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 39354484Sspeer 39364484Sspeer if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 39374484Sspeer nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 39384484Sspeer return (EIO); 39394484Sspeer } 39404484Sspeer 39413859Sml29623 /* 39423859Sml29623 * Enable comparison with the alternate MAC address. 39433859Sml29623 * While the first alternate addr is enabled by bit 1 of register 39443859Sml29623 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 39453859Sml29623 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 39463859Sml29623 * accordingly before calling npi_mac_altaddr_entry. 39473859Sml29623 */ 39483859Sml29623 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 39493859Sml29623 addrn = (uint8_t)slot - 1; 39503859Sml29623 else 39513859Sml29623 addrn = (uint8_t)slot; 39523859Sml29623 39533859Sml29623 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3954*6512Ssowmini != NPI_SUCCESS) 39553859Sml29623 return (EIO); 39563859Sml29623 39573859Sml29623 return (0); 39583859Sml29623 } 39593859Sml29623 39603859Sml29623 /* 39613859Sml29623 * nxeg_m_mmac_add() - find an unused address slot, set the address 39623859Sml29623 * value to the one specified, enable the port to start filtering on 39633859Sml29623 * the new MAC address. Returns 0 on success. 39643859Sml29623 */ 39656495Sspeer int 39663859Sml29623 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 39673859Sml29623 { 39683859Sml29623 p_nxge_t nxgep = arg; 39693859Sml29623 mac_addr_slot_t slot; 39703859Sml29623 nxge_mmac_t *mmac_info; 39713859Sml29623 int err; 39723859Sml29623 nxge_status_t status; 39733859Sml29623 39743859Sml29623 mutex_enter(nxgep->genlock); 39753859Sml29623 39763859Sml29623 /* 39773859Sml29623 * Make sure that nxge is initialized, if _start() has 39783859Sml29623 * not been called. 39793859Sml29623 */ 39803859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 39813859Sml29623 status = nxge_init(nxgep); 39823859Sml29623 if (status != NXGE_OK) { 39833859Sml29623 mutex_exit(nxgep->genlock); 39843859Sml29623 return (ENXIO); 39853859Sml29623 } 39863859Sml29623 } 39873859Sml29623 39883859Sml29623 mmac_info = &nxgep->nxge_mmac_info; 39893859Sml29623 if (mmac_info->naddrfree == 0) { 39903859Sml29623 mutex_exit(nxgep->genlock); 39913859Sml29623 return (ENOSPC); 39923859Sml29623 } 39933859Sml29623 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3994*6512Ssowmini maddr->mma_addrlen)) { 39953859Sml29623 mutex_exit(nxgep->genlock); 39963859Sml29623 return (EINVAL); 39973859Sml29623 } 39983859Sml29623 /* 39993859Sml29623 * Search for the first available slot. Because naddrfree 40003859Sml29623 * is not zero, we are guaranteed to find one. 40013859Sml29623 * Slot 0 is for unique (primary) MAC. The first alternate 40023859Sml29623 * MAC slot is slot 1. 40033859Sml29623 * Each of the first two ports of Neptune has 16 alternate 40046495Sspeer * MAC slots but only the first 7 (of 15) slots have assigned factory 40053859Sml29623 * MAC addresses. We first search among the slots without bundled 40063859Sml29623 * factory MACs. If we fail to find one in that range, then we 40073859Sml29623 * search the slots with bundled factory MACs. A factory MAC 40083859Sml29623 * will be wasted while the slot is used with a user MAC address. 40093859Sml29623 * But the slot could be used by factory MAC again after calling 40103859Sml29623 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 40113859Sml29623 */ 40123859Sml29623 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 40133859Sml29623 for (slot = mmac_info->num_factory_mmac + 1; 4014*6512Ssowmini slot <= mmac_info->num_mmac; slot++) { 40153859Sml29623 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 40163859Sml29623 break; 40173859Sml29623 } 40183859Sml29623 if (slot > mmac_info->num_mmac) { 40193859Sml29623 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4020*6512Ssowmini slot++) { 40213859Sml29623 if (!(mmac_info->mac_pool[slot].flags 4022*6512Ssowmini & MMAC_SLOT_USED)) 40233859Sml29623 break; 40243859Sml29623 } 40253859Sml29623 } 40263859Sml29623 } else { 40273859Sml29623 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 40283859Sml29623 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 40293859Sml29623 break; 40303859Sml29623 } 40313859Sml29623 } 40323859Sml29623 ASSERT(slot <= mmac_info->num_mmac); 40333859Sml29623 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 40343859Sml29623 mutex_exit(nxgep->genlock); 40353859Sml29623 return (err); 40363859Sml29623 } 40373859Sml29623 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 40383859Sml29623 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 40393859Sml29623 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 40403859Sml29623 mmac_info->naddrfree--; 40413859Sml29623 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 40423859Sml29623 40433859Sml29623 maddr->mma_slot = slot; 40443859Sml29623 40453859Sml29623 mutex_exit(nxgep->genlock); 40463859Sml29623 return (0); 40473859Sml29623 } 40483859Sml29623 40493859Sml29623 /* 40503859Sml29623 * This function reserves an unused slot and programs the slot and the HW 40513859Sml29623 * with a factory mac address. 40523859Sml29623 */ 40533859Sml29623 static int 40543859Sml29623 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 40553859Sml29623 { 40563859Sml29623 p_nxge_t nxgep = arg; 40573859Sml29623 mac_addr_slot_t slot; 40583859Sml29623 nxge_mmac_t *mmac_info; 40593859Sml29623 int err; 40603859Sml29623 nxge_status_t status; 40613859Sml29623 40623859Sml29623 mutex_enter(nxgep->genlock); 40633859Sml29623 40643859Sml29623 /* 40653859Sml29623 * Make sure that nxge is initialized, if _start() has 40663859Sml29623 * not been called. 40673859Sml29623 */ 40683859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 40693859Sml29623 status = nxge_init(nxgep); 40703859Sml29623 if (status != NXGE_OK) { 40713859Sml29623 mutex_exit(nxgep->genlock); 40723859Sml29623 return (ENXIO); 40733859Sml29623 } 40743859Sml29623 } 40753859Sml29623 40763859Sml29623 mmac_info = &nxgep->nxge_mmac_info; 40773859Sml29623 if (mmac_info->naddrfree == 0) { 40783859Sml29623 mutex_exit(nxgep->genlock); 40793859Sml29623 return (ENOSPC); 40803859Sml29623 } 40813859Sml29623 40823859Sml29623 slot = maddr->mma_slot; 40833859Sml29623 if (slot == -1) { /* -1: Take the first available slot */ 40843859Sml29623 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 40853859Sml29623 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 40863859Sml29623 break; 40873859Sml29623 } 40883859Sml29623 if (slot > mmac_info->num_factory_mmac) { 40893859Sml29623 mutex_exit(nxgep->genlock); 40903859Sml29623 return (ENOSPC); 40913859Sml29623 } 40923859Sml29623 } 40933859Sml29623 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 40943859Sml29623 /* 40953859Sml29623 * Do not support factory MAC at a slot greater than 40963859Sml29623 * num_factory_mmac even when there are available factory 40973859Sml29623 * MAC addresses because the alternate MACs are bundled with 40983859Sml29623 * slot[1] through slot[num_factory_mmac] 40993859Sml29623 */ 41003859Sml29623 mutex_exit(nxgep->genlock); 41013859Sml29623 return (EINVAL); 41023859Sml29623 } 41033859Sml29623 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 41043859Sml29623 mutex_exit(nxgep->genlock); 41053859Sml29623 return (EBUSY); 41063859Sml29623 } 41073859Sml29623 /* Verify the address to be reserved */ 41083859Sml29623 if (!mac_unicst_verify(nxgep->mach, 4109*6512Ssowmini mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 41103859Sml29623 mutex_exit(nxgep->genlock); 41113859Sml29623 return (EINVAL); 41123859Sml29623 } 41133859Sml29623 if (err = nxge_altmac_set(nxgep, 4114*6512Ssowmini mmac_info->factory_mac_pool[slot], slot)) { 41153859Sml29623 mutex_exit(nxgep->genlock); 41163859Sml29623 return (err); 41173859Sml29623 } 41183859Sml29623 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 41193859Sml29623 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 41203859Sml29623 mmac_info->naddrfree--; 41213859Sml29623 41223859Sml29623 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 41233859Sml29623 mutex_exit(nxgep->genlock); 41243859Sml29623 41253859Sml29623 /* Pass info back to the caller */ 41263859Sml29623 maddr->mma_slot = slot; 41273859Sml29623 maddr->mma_addrlen = ETHERADDRL; 41283859Sml29623 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 41293859Sml29623 41303859Sml29623 return (0); 41313859Sml29623 } 41323859Sml29623 41333859Sml29623 /* 41343859Sml29623 * Remove the specified mac address and update the HW not to filter 41353859Sml29623 * the mac address anymore. 41363859Sml29623 */ 41376495Sspeer int 41383859Sml29623 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 41393859Sml29623 { 41403859Sml29623 p_nxge_t nxgep = arg; 41413859Sml29623 nxge_mmac_t *mmac_info; 41423859Sml29623 uint8_t addrn; 41433859Sml29623 uint8_t portn; 41443859Sml29623 int err = 0; 41453859Sml29623 nxge_status_t status; 41463859Sml29623 41473859Sml29623 mutex_enter(nxgep->genlock); 41483859Sml29623 41493859Sml29623 /* 41503859Sml29623 * Make sure that nxge is initialized, if _start() has 41513859Sml29623 * not been called. 41523859Sml29623 */ 41533859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 41543859Sml29623 status = nxge_init(nxgep); 41553859Sml29623 if (status != NXGE_OK) { 41563859Sml29623 mutex_exit(nxgep->genlock); 41573859Sml29623 return (ENXIO); 41583859Sml29623 } 41593859Sml29623 } 41603859Sml29623 41613859Sml29623 mmac_info = &nxgep->nxge_mmac_info; 41623859Sml29623 if (slot < 1 || slot > mmac_info->num_mmac) { 41633859Sml29623 mutex_exit(nxgep->genlock); 41643859Sml29623 return (EINVAL); 41653859Sml29623 } 41663859Sml29623 41673859Sml29623 portn = nxgep->mac.portnum; 41683859Sml29623 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 41693859Sml29623 addrn = (uint8_t)slot - 1; 41703859Sml29623 else 41713859Sml29623 addrn = (uint8_t)slot; 41723859Sml29623 41733859Sml29623 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 41743859Sml29623 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4175*6512Ssowmini == NPI_SUCCESS) { 41763859Sml29623 mmac_info->naddrfree++; 41773859Sml29623 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 41783859Sml29623 /* 41793859Sml29623 * Regardless if the MAC we just stopped filtering 41803859Sml29623 * is a user addr or a facory addr, we must set 41813859Sml29623 * the MMAC_VENDOR_ADDR flag if this slot has an 41823859Sml29623 * associated factory MAC to indicate that a factory 41833859Sml29623 * MAC is available. 41843859Sml29623 */ 41853859Sml29623 if (slot <= mmac_info->num_factory_mmac) { 41863859Sml29623 mmac_info->mac_pool[slot].flags 4187*6512Ssowmini |= MMAC_VENDOR_ADDR; 41883859Sml29623 } 41893859Sml29623 /* 41903859Sml29623 * Clear mac_pool[slot].addr so that kstat shows 0 41913859Sml29623 * alternate MAC address if the slot is not used. 41923859Sml29623 * (But nxge_m_mmac_get returns the factory MAC even 41933859Sml29623 * when the slot is not used!) 41943859Sml29623 */ 41953859Sml29623 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 41963859Sml29623 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 41973859Sml29623 } else { 41983859Sml29623 err = EIO; 41993859Sml29623 } 42003859Sml29623 } else { 42013859Sml29623 err = EINVAL; 42023859Sml29623 } 42033859Sml29623 42043859Sml29623 mutex_exit(nxgep->genlock); 42053859Sml29623 return (err); 42063859Sml29623 } 42073859Sml29623 42083859Sml29623 /* 42093859Sml29623 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 42103859Sml29623 */ 42113859Sml29623 static int 42123859Sml29623 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 42133859Sml29623 { 42143859Sml29623 p_nxge_t nxgep = arg; 42153859Sml29623 mac_addr_slot_t slot; 42163859Sml29623 nxge_mmac_t *mmac_info; 42173859Sml29623 int err = 0; 42183859Sml29623 nxge_status_t status; 42193859Sml29623 42203859Sml29623 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4221*6512Ssowmini maddr->mma_addrlen)) 42223859Sml29623 return (EINVAL); 42233859Sml29623 42243859Sml29623 slot = maddr->mma_slot; 42253859Sml29623 42263859Sml29623 mutex_enter(nxgep->genlock); 42273859Sml29623 42283859Sml29623 /* 42293859Sml29623 * Make sure that nxge is initialized, if _start() has 42303859Sml29623 * not been called. 42313859Sml29623 */ 42323859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 42333859Sml29623 status = nxge_init(nxgep); 42343859Sml29623 if (status != NXGE_OK) { 42353859Sml29623 mutex_exit(nxgep->genlock); 42363859Sml29623 return (ENXIO); 42373859Sml29623 } 42383859Sml29623 } 42393859Sml29623 42403859Sml29623 mmac_info = &nxgep->nxge_mmac_info; 42413859Sml29623 if (slot < 1 || slot > mmac_info->num_mmac) { 42423859Sml29623 mutex_exit(nxgep->genlock); 42433859Sml29623 return (EINVAL); 42443859Sml29623 } 42453859Sml29623 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 42463859Sml29623 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4247*6512Ssowmini != 0) { 42483859Sml29623 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4249*6512Ssowmini ETHERADDRL); 42503859Sml29623 /* 42513859Sml29623 * Assume that the MAC passed down from the caller 42523859Sml29623 * is not a factory MAC address (The user should 42533859Sml29623 * call mmac_remove followed by mmac_reserve if 42543859Sml29623 * he wants to use the factory MAC for this slot). 42553859Sml29623 */ 42563859Sml29623 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 42573859Sml29623 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 42583859Sml29623 } 42593859Sml29623 } else { 42603859Sml29623 err = EINVAL; 42613859Sml29623 } 42623859Sml29623 mutex_exit(nxgep->genlock); 42633859Sml29623 return (err); 42643859Sml29623 } 42653859Sml29623 42663859Sml29623 /* 42673859Sml29623 * nxge_m_mmac_get() - Get the MAC address and other information 42683859Sml29623 * related to the slot. mma_flags should be set to 0 in the call. 42693859Sml29623 * Note: although kstat shows MAC address as zero when a slot is 42703859Sml29623 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 42713859Sml29623 * to the caller as long as the slot is not using a user MAC address. 42723859Sml29623 * The following table shows the rules, 42733859Sml29623 * 42743859Sml29623 * USED VENDOR mma_addr 42753859Sml29623 * ------------------------------------------------------------ 42763859Sml29623 * (1) Slot uses a user MAC: yes no user MAC 42773859Sml29623 * (2) Slot uses a factory MAC: yes yes factory MAC 42783859Sml29623 * (3) Slot is not used but is 42793859Sml29623 * factory MAC capable: no yes factory MAC 42803859Sml29623 * (4) Slot is not used and is 42813859Sml29623 * not factory MAC capable: no no 0 42823859Sml29623 * ------------------------------------------------------------ 42833859Sml29623 */ 42843859Sml29623 static int 42853859Sml29623 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 42863859Sml29623 { 42873859Sml29623 nxge_t *nxgep = arg; 42883859Sml29623 mac_addr_slot_t slot; 42893859Sml29623 nxge_mmac_t *mmac_info; 42903859Sml29623 nxge_status_t status; 42913859Sml29623 42923859Sml29623 slot = maddr->mma_slot; 42933859Sml29623 42943859Sml29623 mutex_enter(nxgep->genlock); 42953859Sml29623 42963859Sml29623 /* 42973859Sml29623 * Make sure that nxge is initialized, if _start() has 42983859Sml29623 * not been called. 42993859Sml29623 */ 43003859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 43013859Sml29623 status = nxge_init(nxgep); 43023859Sml29623 if (status != NXGE_OK) { 43033859Sml29623 mutex_exit(nxgep->genlock); 43043859Sml29623 return (ENXIO); 43053859Sml29623 } 43063859Sml29623 } 43073859Sml29623 43083859Sml29623 mmac_info = &nxgep->nxge_mmac_info; 43093859Sml29623 43103859Sml29623 if (slot < 1 || slot > mmac_info->num_mmac) { 43113859Sml29623 mutex_exit(nxgep->genlock); 43123859Sml29623 return (EINVAL); 43133859Sml29623 } 43143859Sml29623 maddr->mma_flags = 0; 43153859Sml29623 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 43163859Sml29623 maddr->mma_flags |= MMAC_SLOT_USED; 43173859Sml29623 43183859Sml29623 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 43193859Sml29623 maddr->mma_flags |= MMAC_VENDOR_ADDR; 43203859Sml29623 bcopy(mmac_info->factory_mac_pool[slot], 4321*6512Ssowmini maddr->mma_addr, ETHERADDRL); 43223859Sml29623 maddr->mma_addrlen = ETHERADDRL; 43233859Sml29623 } else { 43243859Sml29623 if (maddr->mma_flags & MMAC_SLOT_USED) { 43253859Sml29623 bcopy(mmac_info->mac_pool[slot].addr, 4326*6512Ssowmini maddr->mma_addr, ETHERADDRL); 43273859Sml29623 maddr->mma_addrlen = ETHERADDRL; 43283859Sml29623 } else { 43293859Sml29623 bzero(maddr->mma_addr, ETHERADDRL); 43303859Sml29623 maddr->mma_addrlen = 0; 43313859Sml29623 } 43323859Sml29623 } 43333859Sml29623 mutex_exit(nxgep->genlock); 43343859Sml29623 return (0); 43353859Sml29623 } 43363859Sml29623 43373859Sml29623 static boolean_t 43383859Sml29623 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 43393859Sml29623 { 43403859Sml29623 nxge_t *nxgep = arg; 43413859Sml29623 uint32_t *txflags = cap_data; 43423859Sml29623 multiaddress_capab_t *mmacp = cap_data; 43433859Sml29623 43443859Sml29623 switch (cap) { 43453859Sml29623 case MAC_CAPAB_HCKSUM: 43466495Sspeer NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 43476495Sspeer "==> nxge_m_getcapab: checksum %d", nxge_cksum_enable)); 43486495Sspeer if (nxge_cksum_enable) { 43496495Sspeer *txflags = HCKSUM_INET_PARTIAL; 43506495Sspeer } 43513859Sml29623 break; 43526495Sspeer 43533859Sml29623 case MAC_CAPAB_POLL: 43543859Sml29623 /* 43553859Sml29623 * There's nothing for us to fill in, simply returning 43563859Sml29623 * B_TRUE stating that we support polling is sufficient. 43573859Sml29623 */ 43583859Sml29623 break; 43593859Sml29623 43603859Sml29623 case MAC_CAPAB_MULTIADDRESS: 43616495Sspeer mmacp = (multiaddress_capab_t *)cap_data; 43623859Sml29623 mutex_enter(nxgep->genlock); 43633859Sml29623 43643859Sml29623 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 43653859Sml29623 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 43663859Sml29623 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 43673859Sml29623 /* 43683859Sml29623 * maddr_handle is driver's private data, passed back to 43693859Sml29623 * entry point functions as arg. 43703859Sml29623 */ 43713859Sml29623 mmacp->maddr_handle = nxgep; 43723859Sml29623 mmacp->maddr_add = nxge_m_mmac_add; 43733859Sml29623 mmacp->maddr_remove = nxge_m_mmac_remove; 43743859Sml29623 mmacp->maddr_modify = nxge_m_mmac_modify; 43753859Sml29623 mmacp->maddr_get = nxge_m_mmac_get; 43763859Sml29623 mmacp->maddr_reserve = nxge_m_mmac_reserve; 43773859Sml29623 43783859Sml29623 mutex_exit(nxgep->genlock); 43793859Sml29623 break; 43806495Sspeer 43815770Sml29623 case MAC_CAPAB_LSO: { 43825770Sml29623 mac_capab_lso_t *cap_lso = cap_data; 43835770Sml29623 43846003Sml29623 if (nxgep->soft_lso_enable) { 43855770Sml29623 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 43865770Sml29623 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 43875770Sml29623 nxge_lso_max = NXGE_LSO_MAXLEN; 43885770Sml29623 } 43895770Sml29623 cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max; 43905770Sml29623 break; 43915770Sml29623 } else { 43925770Sml29623 return (B_FALSE); 43935770Sml29623 } 43945770Sml29623 } 43955770Sml29623 43966495Sspeer #if defined(sun4v) 43976495Sspeer case MAC_CAPAB_RINGS: { 43986495Sspeer mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 43996495Sspeer 44006495Sspeer /* 44016495Sspeer * Only the service domain driver responds to 44026495Sspeer * this capability request. 44036495Sspeer */ 44046495Sspeer if (isLDOMservice(nxgep)) { 44056495Sspeer mrings->mr_handle = (void *)nxgep; 44066495Sspeer 44076495Sspeer /* 44086495Sspeer * No dynamic allocation of groups and 44096495Sspeer * rings at this time. Shares dictate the 44106495Sspeer * configurartion. 44116495Sspeer */ 44126495Sspeer mrings->mr_gadd_ring = NULL; 44136495Sspeer mrings->mr_grem_ring = NULL; 44146495Sspeer mrings->mr_rget = NULL; 44156495Sspeer mrings->mr_gget = nxge_hio_group_get; 44166495Sspeer 44176495Sspeer if (mrings->mr_type == MAC_RING_TYPE_RX) { 44186495Sspeer mrings->mr_rnum = 8; /* XXX */ 44196495Sspeer mrings->mr_gnum = 6; /* XXX */ 44206495Sspeer } else { 44216495Sspeer mrings->mr_rnum = 8; /* XXX */ 44226495Sspeer mrings->mr_gnum = 0; /* XXX */ 44236495Sspeer } 44246495Sspeer } else 44256495Sspeer return (B_FALSE); 44266495Sspeer break; 44276495Sspeer } 44286495Sspeer 44296495Sspeer case MAC_CAPAB_SHARES: { 44306495Sspeer mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 44316495Sspeer 44326495Sspeer /* 44336495Sspeer * Only the service domain driver responds to 44346495Sspeer * this capability request. 44356495Sspeer */ 44366495Sspeer if (isLDOMservice(nxgep)) { 44376495Sspeer mshares->ms_snum = 3; 44386495Sspeer mshares->ms_handle = (void *)nxgep; 44396495Sspeer mshares->ms_salloc = nxge_hio_share_alloc; 44406495Sspeer mshares->ms_sfree = nxge_hio_share_free; 44416495Sspeer mshares->ms_sadd = NULL; 44426495Sspeer mshares->ms_sremove = NULL; 44436495Sspeer mshares->ms_squery = nxge_hio_share_query; 44446495Sspeer } else 44456495Sspeer return (B_FALSE); 44466495Sspeer break; 44476495Sspeer } 44486495Sspeer #endif 44493859Sml29623 default: 44503859Sml29623 return (B_FALSE); 44513859Sml29623 } 44523859Sml29623 return (B_TRUE); 44533859Sml29623 } 44543859Sml29623 44556439Sml29623 static boolean_t 44566439Sml29623 nxge_param_locked(mac_prop_id_t pr_num) 44576439Sml29623 { 44586439Sml29623 /* 44596439Sml29623 * All adv_* parameters are locked (read-only) while 44606439Sml29623 * the device is in any sort of loopback mode ... 44616439Sml29623 */ 44626439Sml29623 switch (pr_num) { 44636439Sml29623 case DLD_PROP_ADV_1000FDX_CAP: 44646439Sml29623 case DLD_PROP_EN_1000FDX_CAP: 44656439Sml29623 case DLD_PROP_ADV_1000HDX_CAP: 44666439Sml29623 case DLD_PROP_EN_1000HDX_CAP: 44676439Sml29623 case DLD_PROP_ADV_100FDX_CAP: 44686439Sml29623 case DLD_PROP_EN_100FDX_CAP: 44696439Sml29623 case DLD_PROP_ADV_100HDX_CAP: 44706439Sml29623 case DLD_PROP_EN_100HDX_CAP: 44716439Sml29623 case DLD_PROP_ADV_10FDX_CAP: 44726439Sml29623 case DLD_PROP_EN_10FDX_CAP: 44736439Sml29623 case DLD_PROP_ADV_10HDX_CAP: 44746439Sml29623 case DLD_PROP_EN_10HDX_CAP: 44756439Sml29623 case DLD_PROP_AUTONEG: 44766439Sml29623 case DLD_PROP_FLOWCTRL: 44776439Sml29623 return (B_TRUE); 44786439Sml29623 } 44796439Sml29623 return (B_FALSE); 44806439Sml29623 } 44816439Sml29623 44826439Sml29623 /* 44836439Sml29623 * callback functions for set/get of properties 44846439Sml29623 */ 44856439Sml29623 static int 44866439Sml29623 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 44876439Sml29623 uint_t pr_valsize, const void *pr_val) 44886439Sml29623 { 44896439Sml29623 nxge_t *nxgep = barg; 44906439Sml29623 p_nxge_param_t param_arr; 44916439Sml29623 p_nxge_stats_t statsp; 44926439Sml29623 int err = 0; 44936439Sml29623 uint8_t val; 44946439Sml29623 uint32_t cur_mtu, new_mtu, old_framesize; 44956439Sml29623 link_flowctrl_t fl; 44966439Sml29623 44976439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 44986439Sml29623 param_arr = nxgep->param_arr; 44996439Sml29623 statsp = nxgep->statsp; 45006439Sml29623 mutex_enter(nxgep->genlock); 45016439Sml29623 if (statsp->port_stats.lb_mode != nxge_lb_normal && 45026439Sml29623 nxge_param_locked(pr_num)) { 45036439Sml29623 /* 45046439Sml29623 * All adv_* parameters are locked (read-only) 45056439Sml29623 * while the device is in any sort of loopback mode. 45066439Sml29623 */ 45076439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 45086439Sml29623 "==> nxge_m_setprop: loopback mode: read only")); 45096439Sml29623 mutex_exit(nxgep->genlock); 45106439Sml29623 return (EBUSY); 45116439Sml29623 } 45126439Sml29623 45136439Sml29623 val = *(uint8_t *)pr_val; 45146439Sml29623 switch (pr_num) { 45156439Sml29623 case DLD_PROP_EN_1000FDX_CAP: 45166439Sml29623 nxgep->param_en_1000fdx = val; 45176439Sml29623 param_arr[param_anar_1000fdx].value = val; 45186439Sml29623 45196439Sml29623 goto reprogram; 45206439Sml29623 45216439Sml29623 case DLD_PROP_EN_100FDX_CAP: 45226439Sml29623 nxgep->param_en_100fdx = val; 45236439Sml29623 param_arr[param_anar_100fdx].value = val; 45246439Sml29623 45256439Sml29623 goto reprogram; 45266439Sml29623 45276439Sml29623 case DLD_PROP_EN_10FDX_CAP: 45286439Sml29623 nxgep->param_en_10fdx = val; 45296439Sml29623 param_arr[param_anar_10fdx].value = val; 45306439Sml29623 45316439Sml29623 goto reprogram; 45326439Sml29623 45336439Sml29623 case DLD_PROP_EN_1000HDX_CAP: 45346439Sml29623 case DLD_PROP_EN_100HDX_CAP: 45356439Sml29623 case DLD_PROP_EN_10HDX_CAP: 45366439Sml29623 case DLD_PROP_ADV_1000FDX_CAP: 45376439Sml29623 case DLD_PROP_ADV_1000HDX_CAP: 45386439Sml29623 case DLD_PROP_ADV_100FDX_CAP: 45396439Sml29623 case DLD_PROP_ADV_100HDX_CAP: 45406439Sml29623 case DLD_PROP_ADV_10FDX_CAP: 45416439Sml29623 case DLD_PROP_ADV_10HDX_CAP: 45426439Sml29623 case DLD_PROP_STATUS: 45436439Sml29623 case DLD_PROP_SPEED: 45446439Sml29623 case DLD_PROP_DUPLEX: 45456439Sml29623 err = EINVAL; /* cannot set read-only properties */ 45466439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 45476439Sml29623 "==> nxge_m_setprop: read only property %d", 45486439Sml29623 pr_num)); 45496439Sml29623 break; 45506439Sml29623 45516439Sml29623 case DLD_PROP_AUTONEG: 45526439Sml29623 param_arr[param_autoneg].value = val; 45536439Sml29623 45546439Sml29623 goto reprogram; 45556439Sml29623 4556*6512Ssowmini case DLD_PROP_MTU: 45576439Sml29623 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 45586439Sml29623 err = EBUSY; 45596439Sml29623 break; 45606439Sml29623 } 45616439Sml29623 45626439Sml29623 cur_mtu = nxgep->mac.default_mtu; 45636439Sml29623 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 45646439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 45656439Sml29623 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 45666439Sml29623 new_mtu, nxgep->mac.is_jumbo)); 45676439Sml29623 45686439Sml29623 if (new_mtu == cur_mtu) { 45696439Sml29623 err = 0; 45706439Sml29623 break; 45716439Sml29623 } 45726439Sml29623 if (new_mtu < NXGE_DEFAULT_MTU || 45736439Sml29623 new_mtu > NXGE_MAXIMUM_MTU) { 45746439Sml29623 err = EINVAL; 45756439Sml29623 break; 45766439Sml29623 } 45776439Sml29623 45786439Sml29623 if ((new_mtu > NXGE_DEFAULT_MTU) && 45796439Sml29623 !nxgep->mac.is_jumbo) { 45806439Sml29623 err = EINVAL; 45816439Sml29623 break; 45826439Sml29623 } 45836439Sml29623 45846439Sml29623 old_framesize = (uint32_t)nxgep->mac.maxframesize; 45856439Sml29623 nxgep->mac.maxframesize = (uint16_t) 45866439Sml29623 (new_mtu + NXGE_EHEADER_VLAN_CRC); 45876439Sml29623 if (nxge_mac_set_framesize(nxgep)) { 45886444Sml29623 nxgep->mac.maxframesize = 45896444Sml29623 (uint16_t)old_framesize; 45906439Sml29623 err = EINVAL; 45916439Sml29623 break; 45926439Sml29623 } 45936439Sml29623 45946439Sml29623 err = mac_maxsdu_update(nxgep->mach, new_mtu); 45956439Sml29623 if (err) { 45966444Sml29623 nxgep->mac.maxframesize = 45976444Sml29623 (uint16_t)old_framesize; 45986439Sml29623 err = EINVAL; 45996439Sml29623 break; 46006439Sml29623 } 46016439Sml29623 46026439Sml29623 nxgep->mac.default_mtu = new_mtu; 46036439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 46046439Sml29623 "==> nxge_m_setprop: set MTU: %d maxframe %d", 46056439Sml29623 new_mtu, nxgep->mac.maxframesize)); 46066439Sml29623 break; 46076439Sml29623 46086439Sml29623 case DLD_PROP_FLOWCTRL: 46096439Sml29623 bcopy(pr_val, &fl, sizeof (fl)); 46106439Sml29623 switch (fl) { 46116439Sml29623 default: 46126439Sml29623 err = EINVAL; 46136439Sml29623 break; 46146439Sml29623 46156439Sml29623 case LINK_FLOWCTRL_NONE: 46166439Sml29623 param_arr[param_anar_pause].value = 0; 46176439Sml29623 break; 46186439Sml29623 46196439Sml29623 case LINK_FLOWCTRL_RX: 46206439Sml29623 param_arr[param_anar_pause].value = 1; 46216439Sml29623 break; 46226439Sml29623 46236439Sml29623 case LINK_FLOWCTRL_TX: 46246439Sml29623 case LINK_FLOWCTRL_BI: 46256439Sml29623 err = EINVAL; 46266439Sml29623 break; 46276439Sml29623 } 46286439Sml29623 46296439Sml29623 reprogram: 46306439Sml29623 if (err == 0) { 46316439Sml29623 if (!nxge_param_link_update(nxgep)) { 46326439Sml29623 err = EINVAL; 46336439Sml29623 } 46346439Sml29623 } 46356439Sml29623 break; 4636*6512Ssowmini case DLD_PROP_PRIVATE: 46376439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 46386439Sml29623 "==> nxge_m_setprop: private property")); 46396439Sml29623 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 46406439Sml29623 pr_val); 46416439Sml29623 break; 4642*6512Ssowmini 4643*6512Ssowmini default: 4644*6512Ssowmini err = ENOTSUP; 4645*6512Ssowmini break; 46466439Sml29623 } 46476439Sml29623 46486439Sml29623 mutex_exit(nxgep->genlock); 46496439Sml29623 46506439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 46516439Sml29623 "<== nxge_m_setprop (return %d)", err)); 46526439Sml29623 return (err); 46536439Sml29623 } 46546439Sml29623 46556439Sml29623 static int 46566439Sml29623 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4657*6512Ssowmini uint_t pr_flags, uint_t pr_valsize, void *pr_val) 46586439Sml29623 { 46596439Sml29623 nxge_t *nxgep = barg; 46606439Sml29623 p_nxge_param_t param_arr = nxgep->param_arr; 46616439Sml29623 p_nxge_stats_t statsp = nxgep->statsp; 46626439Sml29623 int err = 0; 46636439Sml29623 link_flowctrl_t fl; 46646439Sml29623 uint64_t tmp = 0; 4665*6512Ssowmini link_state_t ls; 4666*6512Ssowmini boolean_t is_default = (pr_flags & DLD_DEFAULT); 46676439Sml29623 46686439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 46696439Sml29623 "==> nxge_m_getprop: pr_num %d", pr_num)); 4670*6512Ssowmini 4671*6512Ssowmini if (pr_valsize == 0) 4672*6512Ssowmini return (EINVAL); 4673*6512Ssowmini 4674*6512Ssowmini if ((is_default) && (pr_num != DLD_PROP_PRIVATE)) { 4675*6512Ssowmini err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4676*6512Ssowmini return (err); 4677*6512Ssowmini } 4678*6512Ssowmini 46796439Sml29623 bzero(pr_val, pr_valsize); 46806439Sml29623 switch (pr_num) { 46816439Sml29623 case DLD_PROP_DUPLEX: 46826439Sml29623 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 46836439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 46846439Sml29623 "==> nxge_m_getprop: duplex mode %d", 46856439Sml29623 *(uint8_t *)pr_val)); 46866439Sml29623 break; 46876439Sml29623 46886439Sml29623 case DLD_PROP_SPEED: 46896439Sml29623 if (pr_valsize < sizeof (uint64_t)) 46906439Sml29623 return (EINVAL); 46916439Sml29623 tmp = statsp->mac_stats.link_speed * 1000000ull; 46926439Sml29623 bcopy(&tmp, pr_val, sizeof (tmp)); 46936439Sml29623 break; 46946439Sml29623 46956439Sml29623 case DLD_PROP_STATUS: 4696*6512Ssowmini if (pr_valsize < sizeof (link_state_t)) 46976439Sml29623 return (EINVAL); 4698*6512Ssowmini if (!statsp->mac_stats.link_up) 4699*6512Ssowmini ls = LINK_STATE_DOWN; 4700*6512Ssowmini else 4701*6512Ssowmini ls = LINK_STATE_UP; 4702*6512Ssowmini bcopy(&ls, pr_val, sizeof (ls)); 47036439Sml29623 break; 47046439Sml29623 47056439Sml29623 case DLD_PROP_AUTONEG: 47066439Sml29623 *(uint8_t *)pr_val = 47076439Sml29623 param_arr[param_autoneg].value; 47086439Sml29623 break; 47096439Sml29623 47106439Sml29623 case DLD_PROP_FLOWCTRL: 47116439Sml29623 if (pr_valsize < sizeof (link_flowctrl_t)) 47126439Sml29623 return (EINVAL); 47136439Sml29623 47146439Sml29623 fl = LINK_FLOWCTRL_NONE; 47156439Sml29623 if (param_arr[param_anar_pause].value) { 47166439Sml29623 fl = LINK_FLOWCTRL_RX; 47176439Sml29623 } 47186439Sml29623 bcopy(&fl, pr_val, sizeof (fl)); 47196439Sml29623 break; 47206439Sml29623 47216439Sml29623 case DLD_PROP_ADV_1000FDX_CAP: 47226439Sml29623 *(uint8_t *)pr_val = 47236439Sml29623 param_arr[param_anar_1000fdx].value; 47246439Sml29623 break; 47256439Sml29623 47266439Sml29623 case DLD_PROP_EN_1000FDX_CAP: 47276439Sml29623 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 47286439Sml29623 break; 47296439Sml29623 47306439Sml29623 case DLD_PROP_ADV_100FDX_CAP: 47316439Sml29623 *(uint8_t *)pr_val = 47326439Sml29623 param_arr[param_anar_100fdx].value; 47336439Sml29623 break; 47346439Sml29623 47356439Sml29623 case DLD_PROP_EN_100FDX_CAP: 47366439Sml29623 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 47376439Sml29623 break; 47386439Sml29623 47396439Sml29623 case DLD_PROP_ADV_10FDX_CAP: 47406439Sml29623 *(uint8_t *)pr_val = 47416439Sml29623 param_arr[param_anar_10fdx].value; 47426439Sml29623 break; 47436439Sml29623 47446439Sml29623 case DLD_PROP_EN_10FDX_CAP: 47456439Sml29623 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 47466439Sml29623 break; 47476439Sml29623 47486439Sml29623 case DLD_PROP_EN_1000HDX_CAP: 47496439Sml29623 case DLD_PROP_EN_100HDX_CAP: 47506439Sml29623 case DLD_PROP_EN_10HDX_CAP: 47516439Sml29623 case DLD_PROP_ADV_1000HDX_CAP: 47526439Sml29623 case DLD_PROP_ADV_100HDX_CAP: 47536439Sml29623 case DLD_PROP_ADV_10HDX_CAP: 4754*6512Ssowmini err = ENOTSUP; 4755*6512Ssowmini break; 4756*6512Ssowmini 4757*6512Ssowmini case DLD_PROP_PRIVATE: 4758*6512Ssowmini err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4759*6512Ssowmini pr_valsize, pr_val); 4760*6512Ssowmini break; 4761*6512Ssowmini default: 47626439Sml29623 err = EINVAL; 47636439Sml29623 break; 47646439Sml29623 } 47656439Sml29623 47666439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 47676439Sml29623 47686439Sml29623 return (err); 47696439Sml29623 } 47706439Sml29623 47716439Sml29623 /* ARGSUSED */ 47726439Sml29623 static int 47736439Sml29623 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 47746439Sml29623 const void *pr_val) 47756439Sml29623 { 47766439Sml29623 p_nxge_param_t param_arr = nxgep->param_arr; 47776439Sml29623 int err = 0; 47786439Sml29623 long result; 47796439Sml29623 47806439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 47816439Sml29623 "==> nxge_set_priv_prop: name %s", pr_name)); 47826439Sml29623 47836439Sml29623 if (strcmp(pr_name, "_accept_jumbo") == 0) { 47846439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 47856439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 47866439Sml29623 "<== nxge_set_priv_prop: name %s " 47876439Sml29623 "pr_val %s result %d " 47886439Sml29623 "param %d is_jumbo %d", 47896439Sml29623 pr_name, pr_val, result, 47906439Sml29623 param_arr[param_accept_jumbo].value, 47916439Sml29623 nxgep->mac.is_jumbo)); 47926439Sml29623 47936439Sml29623 if (result > 1 || result < 0) { 47946439Sml29623 err = EINVAL; 47956439Sml29623 } else { 47966439Sml29623 if (nxgep->mac.is_jumbo == 47976439Sml29623 (uint32_t)result) { 47986439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 47996439Sml29623 "no change (%d %d)", 48006439Sml29623 nxgep->mac.is_jumbo, 48016439Sml29623 result)); 48026439Sml29623 return (0); 48036439Sml29623 } 48046439Sml29623 } 48056439Sml29623 48066439Sml29623 param_arr[param_accept_jumbo].value = result; 48076439Sml29623 nxgep->mac.is_jumbo = B_FALSE; 48086439Sml29623 if (result) { 48096439Sml29623 nxgep->mac.is_jumbo = B_TRUE; 48106439Sml29623 } 48116439Sml29623 48126439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48136439Sml29623 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 48146439Sml29623 pr_name, result, nxgep->mac.is_jumbo)); 48156439Sml29623 48166439Sml29623 return (err); 48176439Sml29623 } 48186439Sml29623 48196439Sml29623 /* Blanking */ 48206439Sml29623 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 48216439Sml29623 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 48226439Sml29623 (char *)pr_val, 48236439Sml29623 (caddr_t)¶m_arr[param_rxdma_intr_time]); 48246439Sml29623 if (err) { 48256439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48266439Sml29623 "<== nxge_set_priv_prop: " 48276439Sml29623 "unable to set (%s)", pr_name)); 48286439Sml29623 err = EINVAL; 48296439Sml29623 } else { 48306439Sml29623 err = 0; 48316439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48326439Sml29623 "<== nxge_set_priv_prop: " 48336439Sml29623 "set (%s)", pr_name)); 48346439Sml29623 } 48356439Sml29623 48366439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48376439Sml29623 "<== nxge_set_priv_prop: name %s (value %d)", 48386439Sml29623 pr_name, result)); 48396439Sml29623 48406439Sml29623 return (err); 48416439Sml29623 } 48426439Sml29623 48436439Sml29623 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 48446439Sml29623 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 48456439Sml29623 (char *)pr_val, 48466439Sml29623 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 48476439Sml29623 if (err) { 48486439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48496439Sml29623 "<== nxge_set_priv_prop: " 48506439Sml29623 "unable to set (%s)", pr_name)); 48516439Sml29623 err = EINVAL; 48526439Sml29623 } else { 48536439Sml29623 err = 0; 48546439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48556439Sml29623 "<== nxge_set_priv_prop: " 48566439Sml29623 "set (%s)", pr_name)); 48576439Sml29623 } 48586439Sml29623 48596439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48606439Sml29623 "<== nxge_set_priv_prop: name %s (value %d)", 48616439Sml29623 pr_name, result)); 48626439Sml29623 48636439Sml29623 return (err); 48646439Sml29623 } 48656439Sml29623 48666439Sml29623 /* Classification */ 48676439Sml29623 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 48686439Sml29623 if (pr_val == NULL) { 48696439Sml29623 err = EINVAL; 48706439Sml29623 return (err); 48716439Sml29623 } 48726439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 48736439Sml29623 48746439Sml29623 err = nxge_param_set_ip_opt(nxgep, NULL, 48756439Sml29623 NULL, (char *)pr_val, 48766439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 48776439Sml29623 48786439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48796439Sml29623 "<== nxge_set_priv_prop: name %s (value 0x%x)", 48806439Sml29623 pr_name, result)); 48816439Sml29623 48826439Sml29623 return (err); 48836439Sml29623 } 48846439Sml29623 48856439Sml29623 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 48866439Sml29623 if (pr_val == NULL) { 48876439Sml29623 err = EINVAL; 48886439Sml29623 return (err); 48896439Sml29623 } 48906439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 48916439Sml29623 48926439Sml29623 err = nxge_param_set_ip_opt(nxgep, NULL, 48936439Sml29623 NULL, (char *)pr_val, 48946439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 48956439Sml29623 48966439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 48976439Sml29623 "<== nxge_set_priv_prop: name %s (value 0x%x)", 48986439Sml29623 pr_name, result)); 48996439Sml29623 49006439Sml29623 return (err); 49016439Sml29623 } 49026439Sml29623 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 49036439Sml29623 if (pr_val == NULL) { 49046439Sml29623 err = EINVAL; 49056439Sml29623 return (err); 49066439Sml29623 } 49076439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 49086439Sml29623 49096439Sml29623 err = nxge_param_set_ip_opt(nxgep, NULL, 49106439Sml29623 NULL, (char *)pr_val, 49116439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 49126439Sml29623 49136439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 49146439Sml29623 "<== nxge_set_priv_prop: name %s (value 0x%x)", 49156439Sml29623 pr_name, result)); 49166439Sml29623 49176439Sml29623 return (err); 49186439Sml29623 } 49196439Sml29623 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 49206439Sml29623 if (pr_val == NULL) { 49216439Sml29623 err = EINVAL; 49226439Sml29623 return (err); 49236439Sml29623 } 49246439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 49256439Sml29623 49266439Sml29623 err = nxge_param_set_ip_opt(nxgep, NULL, 49276439Sml29623 NULL, (char *)pr_val, 49286439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 49296439Sml29623 49306439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 49316439Sml29623 "<== nxge_set_priv_prop: name %s (value 0x%x)", 49326439Sml29623 pr_name, result)); 49336439Sml29623 49346439Sml29623 return (err); 49356439Sml29623 } 49366439Sml29623 49376439Sml29623 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 49386439Sml29623 if (pr_val == NULL) { 49396439Sml29623 err = EINVAL; 49406439Sml29623 return (err); 49416439Sml29623 } 49426439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 49436439Sml29623 49446439Sml29623 err = nxge_param_set_ip_opt(nxgep, NULL, 49456439Sml29623 NULL, (char *)pr_val, 49466439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 49476439Sml29623 49486439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 49496439Sml29623 "<== nxge_set_priv_prop: name %s (value 0x%x)", 49506439Sml29623 pr_name, result)); 49516439Sml29623 49526439Sml29623 return (err); 49536439Sml29623 } 49546439Sml29623 49556439Sml29623 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 49566439Sml29623 if (pr_val == NULL) { 49576439Sml29623 err = EINVAL; 49586439Sml29623 return (err); 49596439Sml29623 } 49606439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 49616439Sml29623 49626439Sml29623 err = nxge_param_set_ip_opt(nxgep, NULL, 49636439Sml29623 NULL, (char *)pr_val, 49646439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 49656439Sml29623 49666439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 49676439Sml29623 "<== nxge_set_priv_prop: name %s (value 0x%x)", 49686439Sml29623 pr_name, result)); 49696439Sml29623 49706439Sml29623 return (err); 49716439Sml29623 } 49726439Sml29623 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 49736439Sml29623 if (pr_val == NULL) { 49746439Sml29623 err = EINVAL; 49756439Sml29623 return (err); 49766439Sml29623 } 49776439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 49786439Sml29623 49796439Sml29623 err = nxge_param_set_ip_opt(nxgep, NULL, 49806439Sml29623 NULL, (char *)pr_val, 49816439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 49826439Sml29623 49836439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 49846439Sml29623 "<== nxge_set_priv_prop: name %s (value 0x%x)", 49856439Sml29623 pr_name, result)); 49866439Sml29623 49876439Sml29623 return (err); 49886439Sml29623 } 49896439Sml29623 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 49906439Sml29623 if (pr_val == NULL) { 49916439Sml29623 err = EINVAL; 49926439Sml29623 return (err); 49936439Sml29623 } 49946439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 49956439Sml29623 49966439Sml29623 err = nxge_param_set_ip_opt(nxgep, NULL, 49976439Sml29623 NULL, (char *)pr_val, 49986439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 49996439Sml29623 50006439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50016439Sml29623 "<== nxge_set_priv_prop: name %s (value 0x%x)", 50026439Sml29623 pr_name, result)); 50036439Sml29623 50046439Sml29623 return (err); 50056439Sml29623 } 50066439Sml29623 50076439Sml29623 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 50086439Sml29623 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 50096439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50106439Sml29623 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 50116439Sml29623 err = EBUSY; 50126439Sml29623 return (err); 50136439Sml29623 } 50146439Sml29623 if (pr_val == NULL) { 50156439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50166439Sml29623 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 50176439Sml29623 err = EINVAL; 50186439Sml29623 return (err); 50196439Sml29623 } 50206439Sml29623 50216439Sml29623 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 50226439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50236439Sml29623 "<== nxge_set_priv_prop: name %s " 50246439Sml29623 "(lso %d pr_val %s value %d)", 50256439Sml29623 pr_name, nxgep->soft_lso_enable, pr_val, result)); 50266439Sml29623 50276439Sml29623 if (result > 1 || result < 0) { 50286439Sml29623 err = EINVAL; 50296439Sml29623 } else { 50306439Sml29623 if (nxgep->soft_lso_enable == (uint32_t)result) { 50316439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50326439Sml29623 "no change (%d %d)", 50336439Sml29623 nxgep->soft_lso_enable, result)); 50346439Sml29623 return (0); 50356439Sml29623 } 50366439Sml29623 } 50376439Sml29623 50386439Sml29623 nxgep->soft_lso_enable = (int)result; 50396439Sml29623 50406439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50416439Sml29623 "<== nxge_set_priv_prop: name %s (value %d)", 50426439Sml29623 pr_name, result)); 50436439Sml29623 50446439Sml29623 return (err); 50456439Sml29623 } 5046*6512Ssowmini if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5047*6512Ssowmini err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5048*6512Ssowmini (caddr_t)¶m_arr[param_anar_10gfdx]); 5049*6512Ssowmini return (err); 5050*6512Ssowmini } 5051*6512Ssowmini if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5052*6512Ssowmini err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5053*6512Ssowmini (caddr_t)¶m_arr[param_anar_pause]); 5054*6512Ssowmini return (err); 5055*6512Ssowmini } 50566439Sml29623 50576439Sml29623 return (EINVAL); 50586439Sml29623 } 50596439Sml29623 50606439Sml29623 static int 5061*6512Ssowmini nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5062*6512Ssowmini uint_t pr_valsize, void *pr_val) 50636439Sml29623 { 50646439Sml29623 p_nxge_param_t param_arr = nxgep->param_arr; 50656439Sml29623 char valstr[MAXNAMELEN]; 50666439Sml29623 int err = EINVAL; 50676439Sml29623 uint_t strsize; 5068*6512Ssowmini boolean_t is_default = (pr_flags & DLD_DEFAULT); 50696439Sml29623 50706439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50716439Sml29623 "==> nxge_get_priv_prop: property %s", pr_name)); 50726439Sml29623 50736439Sml29623 /* function number */ 50746439Sml29623 if (strcmp(pr_name, "_function_number") == 0) { 5075*6512Ssowmini if (is_default) 5076*6512Ssowmini return (ENOTSUP); 5077*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 5078*6512Ssowmini nxgep->function_num); 50796439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50806439Sml29623 "==> nxge_get_priv_prop: name %s " 50816439Sml29623 "(value %d valstr %s)", 50826439Sml29623 pr_name, nxgep->function_num, valstr)); 50836439Sml29623 50846439Sml29623 err = 0; 50856439Sml29623 goto done; 50866439Sml29623 } 50876439Sml29623 50886439Sml29623 /* Neptune firmware version */ 50896439Sml29623 if (strcmp(pr_name, "_fw_version") == 0) { 5090*6512Ssowmini if (is_default) 5091*6512Ssowmini return (ENOTSUP); 5092*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%s", 5093*6512Ssowmini nxgep->vpd_info.ver); 50946439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 50956439Sml29623 "==> nxge_get_priv_prop: name %s " 50966439Sml29623 "(value %d valstr %s)", 50976439Sml29623 pr_name, nxgep->vpd_info.ver, valstr)); 50986439Sml29623 50996439Sml29623 err = 0; 51006439Sml29623 goto done; 51016439Sml29623 } 51026439Sml29623 51036439Sml29623 /* port PHY mode */ 51046439Sml29623 if (strcmp(pr_name, "_port_mode") == 0) { 5105*6512Ssowmini if (is_default) 5106*6512Ssowmini return (ENOTSUP); 51076439Sml29623 switch (nxgep->mac.portmode) { 51086439Sml29623 case PORT_1G_COPPER: 5109*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 51106439Sml29623 nxgep->hot_swappable_phy ? 51116439Sml29623 "[Hot Swappable]" : ""); 51126439Sml29623 break; 51136439Sml29623 case PORT_1G_FIBER: 5114*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 51156439Sml29623 nxgep->hot_swappable_phy ? 51166439Sml29623 "[hot swappable]" : ""); 51176439Sml29623 break; 51186439Sml29623 case PORT_10G_COPPER: 5119*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), 5120*6512Ssowmini "10G copper %s", 51216439Sml29623 nxgep->hot_swappable_phy ? 51226439Sml29623 "[hot swappable]" : ""); 51236439Sml29623 break; 51246439Sml29623 case PORT_10G_FIBER: 5125*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 51266439Sml29623 nxgep->hot_swappable_phy ? 51276439Sml29623 "[hot swappable]" : ""); 51286439Sml29623 break; 51296439Sml29623 case PORT_10G_SERDES: 5130*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), 5131*6512Ssowmini "10G serdes %s", nxgep->hot_swappable_phy ? 51326439Sml29623 "[hot swappable]" : ""); 51336439Sml29623 break; 51346439Sml29623 case PORT_1G_SERDES: 5135*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 51366439Sml29623 nxgep->hot_swappable_phy ? 51376439Sml29623 "[hot swappable]" : ""); 51386439Sml29623 break; 51396439Sml29623 case PORT_1G_RGMII_FIBER: 5140*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), 5141*6512Ssowmini "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 51426439Sml29623 "[hot swappable]" : ""); 51436439Sml29623 break; 51446439Sml29623 case PORT_HSP_MODE: 5145*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), 51466444Sml29623 "phy not present[hot swappable]"); 51476439Sml29623 break; 51486439Sml29623 default: 5149*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "unknown %s", 51506439Sml29623 nxgep->hot_swappable_phy ? 51516439Sml29623 "[hot swappable]" : ""); 51526439Sml29623 break; 51536439Sml29623 } 51546439Sml29623 51556439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 51566439Sml29623 "==> nxge_get_priv_prop: name %s (value %s)", 51576439Sml29623 pr_name, valstr)); 51586439Sml29623 51596439Sml29623 err = 0; 51606439Sml29623 goto done; 51616439Sml29623 } 51626439Sml29623 51636439Sml29623 /* Hot swappable PHY */ 51646439Sml29623 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5165*6512Ssowmini if (is_default) 5166*6512Ssowmini return (ENOTSUP); 5167*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%s", 51686439Sml29623 nxgep->hot_swappable_phy ? 51696439Sml29623 "yes" : "no"); 51706439Sml29623 51716439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 51726439Sml29623 "==> nxge_get_priv_prop: name %s " 51736439Sml29623 "(value %d valstr %s)", 51746439Sml29623 pr_name, nxgep->hot_swappable_phy, valstr)); 51756439Sml29623 51766439Sml29623 err = 0; 51776439Sml29623 goto done; 51786439Sml29623 } 51796439Sml29623 51806439Sml29623 51816439Sml29623 /* accept jumbo */ 51826439Sml29623 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5183*6512Ssowmini if (is_default) 5184*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5185*6512Ssowmini else 5186*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), 5187*6512Ssowmini "%d", nxgep->mac.is_jumbo); 51886439Sml29623 err = 0; 51896439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 51906439Sml29623 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 51916439Sml29623 pr_name, 51926439Sml29623 (uint32_t)param_arr[param_accept_jumbo].value, 51936439Sml29623 nxgep->mac.is_jumbo, 51946439Sml29623 nxge_jumbo_enable)); 51956439Sml29623 51966439Sml29623 goto done; 51976439Sml29623 } 51986439Sml29623 51996439Sml29623 /* Receive Interrupt Blanking Parameters */ 52006439Sml29623 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5201*6512Ssowmini err = 0; 5202*6512Ssowmini if (is_default) { 5203*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), 5204*6512Ssowmini "%d", RXDMA_RCR_TO_DEFAULT); 5205*6512Ssowmini goto done; 5206*6512Ssowmini } 5207*6512Ssowmini 5208*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 5209*6512Ssowmini nxgep->intr_timeout); 52106439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 52116439Sml29623 "==> nxge_get_priv_prop: name %s (value %d)", 52126439Sml29623 pr_name, 52136439Sml29623 (uint32_t)nxgep->intr_timeout)); 52146439Sml29623 goto done; 52156439Sml29623 } 52166439Sml29623 52176439Sml29623 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5218*6512Ssowmini err = 0; 5219*6512Ssowmini if (is_default) { 5220*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), 5221*6512Ssowmini "%d", RXDMA_RCR_PTHRES_DEFAULT); 5222*6512Ssowmini goto done; 5223*6512Ssowmini } 5224*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 5225*6512Ssowmini nxgep->intr_threshold); 52266439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 52276439Sml29623 "==> nxge_get_priv_prop: name %s (value %d)", 52286439Sml29623 pr_name, (uint32_t)nxgep->intr_threshold)); 52296439Sml29623 52306439Sml29623 goto done; 52316439Sml29623 } 52326439Sml29623 52336439Sml29623 /* Classification and Load Distribution Configuration */ 52346439Sml29623 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5235*6512Ssowmini if (is_default) { 5236*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 5237*6512Ssowmini NXGE_CLASS_FLOW_GEN_SERVER); 5238*6512Ssowmini err = 0; 5239*6512Ssowmini goto done; 5240*6512Ssowmini } 52416439Sml29623 err = nxge_dld_get_ip_opt(nxgep, 52426439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 52436439Sml29623 5244*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 52456439Sml29623 (int)param_arr[param_class_opt_ipv4_tcp].value); 52466439Sml29623 52476439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 52486439Sml29623 "==> nxge_get_priv_prop: %s", valstr)); 52496439Sml29623 goto done; 52506439Sml29623 } 52516439Sml29623 52526439Sml29623 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5253*6512Ssowmini if (is_default) { 5254*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 5255*6512Ssowmini NXGE_CLASS_FLOW_GEN_SERVER); 5256*6512Ssowmini err = 0; 5257*6512Ssowmini goto done; 5258*6512Ssowmini } 52596439Sml29623 err = nxge_dld_get_ip_opt(nxgep, 52606439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 52616439Sml29623 5262*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 52636439Sml29623 (int)param_arr[param_class_opt_ipv4_udp].value); 52646439Sml29623 52656439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 52666439Sml29623 "==> nxge_get_priv_prop: %s", valstr)); 52676439Sml29623 goto done; 52686439Sml29623 } 52696439Sml29623 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5270*6512Ssowmini if (is_default) { 5271*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 5272*6512Ssowmini NXGE_CLASS_FLOW_GEN_SERVER); 5273*6512Ssowmini err = 0; 5274*6512Ssowmini goto done; 5275*6512Ssowmini } 52766439Sml29623 err = nxge_dld_get_ip_opt(nxgep, 52776439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 52786439Sml29623 5279*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 52806439Sml29623 (int)param_arr[param_class_opt_ipv4_ah].value); 52816439Sml29623 52826439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 52836439Sml29623 "==> nxge_get_priv_prop: %s", valstr)); 52846439Sml29623 goto done; 52856439Sml29623 } 52866439Sml29623 52876439Sml29623 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5288*6512Ssowmini if (is_default) { 5289*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 5290*6512Ssowmini NXGE_CLASS_FLOW_GEN_SERVER); 5291*6512Ssowmini err = 0; 5292*6512Ssowmini goto done; 5293*6512Ssowmini } 52946439Sml29623 err = nxge_dld_get_ip_opt(nxgep, 52956439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 52966439Sml29623 5297*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 52986439Sml29623 (int)param_arr[param_class_opt_ipv4_sctp].value); 52996439Sml29623 53006439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 53016439Sml29623 "==> nxge_get_priv_prop: %s", valstr)); 53026439Sml29623 goto done; 53036439Sml29623 } 53046439Sml29623 53056439Sml29623 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5306*6512Ssowmini if (is_default) { 5307*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 5308*6512Ssowmini NXGE_CLASS_FLOW_GEN_SERVER); 5309*6512Ssowmini err = 0; 5310*6512Ssowmini goto done; 5311*6512Ssowmini } 53126439Sml29623 err = nxge_dld_get_ip_opt(nxgep, 53136439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 53146439Sml29623 5315*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 53166439Sml29623 (int)param_arr[param_class_opt_ipv6_tcp].value); 53176439Sml29623 53186439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 53196439Sml29623 "==> nxge_get_priv_prop: %s", valstr)); 53206439Sml29623 goto done; 53216439Sml29623 } 53226439Sml29623 53236439Sml29623 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5324*6512Ssowmini if (is_default) { 5325*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 5326*6512Ssowmini NXGE_CLASS_FLOW_GEN_SERVER); 5327*6512Ssowmini err = 0; 5328*6512Ssowmini goto done; 5329*6512Ssowmini } 53306439Sml29623 err = nxge_dld_get_ip_opt(nxgep, 53316439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 53326439Sml29623 5333*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 53346439Sml29623 (int)param_arr[param_class_opt_ipv6_udp].value); 53356439Sml29623 53366439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 53376439Sml29623 "==> nxge_get_priv_prop: %s", valstr)); 53386439Sml29623 goto done; 53396439Sml29623 } 53406439Sml29623 53416439Sml29623 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5342*6512Ssowmini if (is_default) { 5343*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 5344*6512Ssowmini NXGE_CLASS_FLOW_GEN_SERVER); 5345*6512Ssowmini err = 0; 5346*6512Ssowmini goto done; 5347*6512Ssowmini } 53486439Sml29623 err = nxge_dld_get_ip_opt(nxgep, 53496439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 53506439Sml29623 5351*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 53526439Sml29623 (int)param_arr[param_class_opt_ipv6_ah].value); 53536439Sml29623 53546439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 53556439Sml29623 "==> nxge_get_priv_prop: %s", valstr)); 53566439Sml29623 goto done; 53576439Sml29623 } 53586439Sml29623 53596439Sml29623 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5360*6512Ssowmini if (is_default) { 5361*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 5362*6512Ssowmini NXGE_CLASS_FLOW_GEN_SERVER); 5363*6512Ssowmini err = 0; 5364*6512Ssowmini goto done; 5365*6512Ssowmini } 53666439Sml29623 err = nxge_dld_get_ip_opt(nxgep, 53676439Sml29623 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 53686439Sml29623 5369*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%x", 53706439Sml29623 (int)param_arr[param_class_opt_ipv6_sctp].value); 53716439Sml29623 53726439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 53736439Sml29623 "==> nxge_get_priv_prop: %s", valstr)); 53746439Sml29623 goto done; 53756439Sml29623 } 53766439Sml29623 53776439Sml29623 /* Software LSO */ 53786439Sml29623 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5379*6512Ssowmini if (is_default) { 5380*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5381*6512Ssowmini err = 0; 5382*6512Ssowmini goto done; 5383*6512Ssowmini } 5384*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), 5385*6512Ssowmini "%d", nxgep->soft_lso_enable); 53866439Sml29623 err = 0; 53876439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 53886439Sml29623 "==> nxge_get_priv_prop: name %s (value %d)", 53896439Sml29623 pr_name, nxgep->soft_lso_enable)); 53906439Sml29623 53916439Sml29623 goto done; 53926439Sml29623 } 5393*6512Ssowmini if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5394*6512Ssowmini err = 0; 5395*6512Ssowmini if (is_default || 5396*6512Ssowmini nxgep->param_arr[param_anar_10gfdx].value != 0) { 5397*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5398*6512Ssowmini goto done; 5399*6512Ssowmini } else { 5400*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5401*6512Ssowmini goto done; 5402*6512Ssowmini } 5403*6512Ssowmini } 5404*6512Ssowmini if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5405*6512Ssowmini err = 0; 5406*6512Ssowmini if (is_default || 5407*6512Ssowmini nxgep->param_arr[param_anar_pause].value != 0) { 5408*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5409*6512Ssowmini goto done; 5410*6512Ssowmini } else { 5411*6512Ssowmini (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5412*6512Ssowmini goto done; 5413*6512Ssowmini } 5414*6512Ssowmini } 54156439Sml29623 54166439Sml29623 done: 54176439Sml29623 if (err == 0) { 54186439Sml29623 strsize = (uint_t)strlen(valstr); 54196439Sml29623 if (pr_valsize < strsize) { 54206439Sml29623 err = ENOBUFS; 54216439Sml29623 } else { 54226439Sml29623 (void) strlcpy(pr_val, valstr, pr_valsize); 54236439Sml29623 } 54246439Sml29623 } 54256439Sml29623 54266439Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 54276439Sml29623 "<== nxge_get_priv_prop: return %d", err)); 54286439Sml29623 return (err); 54296439Sml29623 } 54306439Sml29623 54313859Sml29623 /* 54323859Sml29623 * Module loading and removing entry points. 54333859Sml29623 */ 54343859Sml29623 54353859Sml29623 static struct cb_ops nxge_cb_ops = { 54363859Sml29623 nodev, /* cb_open */ 54373859Sml29623 nodev, /* cb_close */ 54383859Sml29623 nodev, /* cb_strategy */ 54393859Sml29623 nodev, /* cb_print */ 54403859Sml29623 nodev, /* cb_dump */ 54413859Sml29623 nodev, /* cb_read */ 54423859Sml29623 nodev, /* cb_write */ 54433859Sml29623 nodev, /* cb_ioctl */ 54443859Sml29623 nodev, /* cb_devmap */ 54453859Sml29623 nodev, /* cb_mmap */ 54463859Sml29623 nodev, /* cb_segmap */ 54473859Sml29623 nochpoll, /* cb_chpoll */ 54483859Sml29623 ddi_prop_op, /* cb_prop_op */ 54493859Sml29623 NULL, 54503859Sml29623 D_MP, /* cb_flag */ 54513859Sml29623 CB_REV, /* rev */ 54523859Sml29623 nodev, /* int (*cb_aread)() */ 54533859Sml29623 nodev /* int (*cb_awrite)() */ 54543859Sml29623 }; 54553859Sml29623 54563859Sml29623 static struct dev_ops nxge_dev_ops = { 54573859Sml29623 DEVO_REV, /* devo_rev */ 54583859Sml29623 0, /* devo_refcnt */ 54593859Sml29623 nulldev, 54603859Sml29623 nulldev, /* devo_identify */ 54613859Sml29623 nulldev, /* devo_probe */ 54623859Sml29623 nxge_attach, /* devo_attach */ 54633859Sml29623 nxge_detach, /* devo_detach */ 54643859Sml29623 nodev, /* devo_reset */ 54653859Sml29623 &nxge_cb_ops, /* devo_cb_ops */ 54663859Sml29623 (struct bus_ops *)NULL, /* devo_bus_ops */ 54673859Sml29623 ddi_power /* devo_power */ 54683859Sml29623 }; 54693859Sml29623 54703859Sml29623 extern struct mod_ops mod_driverops; 54713859Sml29623 54724977Sraghus #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 54733859Sml29623 54743859Sml29623 /* 54753859Sml29623 * Module linkage information for the kernel. 54763859Sml29623 */ 54773859Sml29623 static struct modldrv nxge_modldrv = { 54783859Sml29623 &mod_driverops, 54793859Sml29623 NXGE_DESC_VER, 54803859Sml29623 &nxge_dev_ops 54813859Sml29623 }; 54823859Sml29623 54833859Sml29623 static struct modlinkage modlinkage = { 54843859Sml29623 MODREV_1, (void *) &nxge_modldrv, NULL 54853859Sml29623 }; 54863859Sml29623 54873859Sml29623 int 54883859Sml29623 _init(void) 54893859Sml29623 { 54903859Sml29623 int status; 54913859Sml29623 54923859Sml29623 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 54933859Sml29623 mac_init_ops(&nxge_dev_ops, "nxge"); 54943859Sml29623 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 54953859Sml29623 if (status != 0) { 54963859Sml29623 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5497*6512Ssowmini "failed to init device soft state")); 54983859Sml29623 goto _init_exit; 54993859Sml29623 } 55003859Sml29623 status = mod_install(&modlinkage); 55013859Sml29623 if (status != 0) { 55023859Sml29623 ddi_soft_state_fini(&nxge_list); 55033859Sml29623 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 55043859Sml29623 goto _init_exit; 55053859Sml29623 } 55063859Sml29623 55073859Sml29623 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 55083859Sml29623 55093859Sml29623 _init_exit: 55103859Sml29623 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 55113859Sml29623 55123859Sml29623 return (status); 55133859Sml29623 } 55143859Sml29623 55153859Sml29623 int 55163859Sml29623 _fini(void) 55173859Sml29623 { 55183859Sml29623 int status; 55193859Sml29623 55203859Sml29623 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 55213859Sml29623 55223859Sml29623 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 55233859Sml29623 55243859Sml29623 if (nxge_mblks_pending) 55253859Sml29623 return (EBUSY); 55263859Sml29623 55273859Sml29623 status = mod_remove(&modlinkage); 55283859Sml29623 if (status != DDI_SUCCESS) { 55293859Sml29623 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5530*6512Ssowmini "Module removal failed 0x%08x", 5531*6512Ssowmini status)); 55323859Sml29623 goto _fini_exit; 55333859Sml29623 } 55343859Sml29623 55353859Sml29623 mac_fini_ops(&nxge_dev_ops); 55363859Sml29623 55373859Sml29623 ddi_soft_state_fini(&nxge_list); 55383859Sml29623 55393859Sml29623 MUTEX_DESTROY(&nxge_common_lock); 55403859Sml29623 _fini_exit: 55413859Sml29623 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 55423859Sml29623 55433859Sml29623 return (status); 55443859Sml29623 } 55453859Sml29623 55463859Sml29623 int 55473859Sml29623 _info(struct modinfo *modinfop) 55483859Sml29623 { 55493859Sml29623 int status; 55503859Sml29623 55513859Sml29623 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 55523859Sml29623 status = mod_info(&modlinkage, modinfop); 55533859Sml29623 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 55543859Sml29623 55553859Sml29623 return (status); 55563859Sml29623 } 55573859Sml29623 55583859Sml29623 /*ARGSUSED*/ 55593859Sml29623 static nxge_status_t 55603859Sml29623 nxge_add_intrs(p_nxge_t nxgep) 55613859Sml29623 { 55623859Sml29623 55633859Sml29623 int intr_types; 55643859Sml29623 int type = 0; 55653859Sml29623 int ddi_status = DDI_SUCCESS; 55663859Sml29623 nxge_status_t status = NXGE_OK; 55673859Sml29623 55683859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 55693859Sml29623 55703859Sml29623 nxgep->nxge_intr_type.intr_registered = B_FALSE; 55713859Sml29623 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 55723859Sml29623 nxgep->nxge_intr_type.msi_intx_cnt = 0; 55733859Sml29623 nxgep->nxge_intr_type.intr_added = 0; 55743859Sml29623 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 55753859Sml29623 nxgep->nxge_intr_type.intr_type = 0; 55763859Sml29623 55773859Sml29623 if (nxgep->niu_type == N2_NIU) { 55783859Sml29623 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 55793859Sml29623 } else if (nxge_msi_enable) { 55803859Sml29623 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 55813859Sml29623 } 55823859Sml29623 55833859Sml29623 /* Get the supported interrupt types */ 55843859Sml29623 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5585*6512Ssowmini != DDI_SUCCESS) { 55863859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5587*6512Ssowmini "ddi_intr_get_supported_types failed: status 0x%08x", 5588*6512Ssowmini ddi_status)); 55893859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 55903859Sml29623 } 55913859Sml29623 nxgep->nxge_intr_type.intr_types = intr_types; 55923859Sml29623 55933859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5594*6512Ssowmini "ddi_intr_get_supported_types: 0x%08x", intr_types)); 55953859Sml29623 55963859Sml29623 /* 55973859Sml29623 * Solaris MSIX is not supported yet. use MSI for now. 55983859Sml29623 * nxge_msi_enable (1): 55993859Sml29623 * 1 - MSI 2 - MSI-X others - FIXED 56003859Sml29623 */ 56013859Sml29623 switch (nxge_msi_enable) { 56023859Sml29623 default: 56033859Sml29623 type = DDI_INTR_TYPE_FIXED; 56043859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5605*6512Ssowmini "use fixed (intx emulation) type %08x", 5606*6512Ssowmini type)); 56073859Sml29623 break; 56083859Sml29623 56093859Sml29623 case 2: 56103859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5611*6512Ssowmini "ddi_intr_get_supported_types: 0x%08x", intr_types)); 56123859Sml29623 if (intr_types & DDI_INTR_TYPE_MSIX) { 56133859Sml29623 type = DDI_INTR_TYPE_MSIX; 56143859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5615*6512Ssowmini "ddi_intr_get_supported_types: MSIX 0x%08x", 5616*6512Ssowmini type)); 56173859Sml29623 } else if (intr_types & DDI_INTR_TYPE_MSI) { 56183859Sml29623 type = DDI_INTR_TYPE_MSI; 56193859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5620*6512Ssowmini "ddi_intr_get_supported_types: MSI 0x%08x", 5621*6512Ssowmini type)); 56223859Sml29623 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 56233859Sml29623 type = DDI_INTR_TYPE_FIXED; 56243859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5625*6512Ssowmini "ddi_intr_get_supported_types: MSXED0x%08x", 5626*6512Ssowmini type)); 56273859Sml29623 } 56283859Sml29623 break; 56293859Sml29623 56303859Sml29623 case 1: 56313859Sml29623 if (intr_types & DDI_INTR_TYPE_MSI) { 56323859Sml29623 type = DDI_INTR_TYPE_MSI; 56333859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5634*6512Ssowmini "ddi_intr_get_supported_types: MSI 0x%08x", 5635*6512Ssowmini type)); 56363859Sml29623 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 56373859Sml29623 type = DDI_INTR_TYPE_MSIX; 56383859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5639*6512Ssowmini "ddi_intr_get_supported_types: MSIX 0x%08x", 5640*6512Ssowmini type)); 56413859Sml29623 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 56423859Sml29623 type = DDI_INTR_TYPE_FIXED; 56433859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5644*6512Ssowmini "ddi_intr_get_supported_types: MSXED0x%08x", 5645*6512Ssowmini type)); 56463859Sml29623 } 56473859Sml29623 } 56483859Sml29623 56493859Sml29623 nxgep->nxge_intr_type.intr_type = type; 56503859Sml29623 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5651*6512Ssowmini type == DDI_INTR_TYPE_FIXED) && 5652*6512Ssowmini nxgep->nxge_intr_type.niu_msi_enable) { 56533859Sml29623 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 56543859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5655*6512Ssowmini " nxge_add_intrs: " 5656*6512Ssowmini " nxge_add_intrs_adv failed: status 0x%08x", 5657*6512Ssowmini status)); 56583859Sml29623 return (status); 56593859Sml29623 } else { 56603859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5661*6512Ssowmini "interrupts registered : type %d", type)); 56623859Sml29623 nxgep->nxge_intr_type.intr_registered = B_TRUE; 56633859Sml29623 56643859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5665*6512Ssowmini "\nAdded advanced nxge add_intr_adv " 5666*6512Ssowmini "intr type 0x%x\n", type)); 56673859Sml29623 56683859Sml29623 return (status); 56693859Sml29623 } 56703859Sml29623 } 56713859Sml29623 56723859Sml29623 if (!nxgep->nxge_intr_type.intr_registered) { 56733859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5674*6512Ssowmini "failed to register interrupts")); 56753859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 56763859Sml29623 } 56773859Sml29623 56783859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 56793859Sml29623 return (status); 56803859Sml29623 } 56813859Sml29623 56823859Sml29623 /*ARGSUSED*/ 56833859Sml29623 static nxge_status_t 56843859Sml29623 nxge_add_soft_intrs(p_nxge_t nxgep) 56853859Sml29623 { 56863859Sml29623 56873859Sml29623 int ddi_status = DDI_SUCCESS; 56883859Sml29623 nxge_status_t status = NXGE_OK; 56893859Sml29623 56903859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 56913859Sml29623 56923859Sml29623 nxgep->resched_id = NULL; 56933859Sml29623 nxgep->resched_running = B_FALSE; 56943859Sml29623 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5695*6512Ssowmini &nxgep->resched_id, 5696*6512Ssowmini NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 56973859Sml29623 if (ddi_status != DDI_SUCCESS) { 56983859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5699*6512Ssowmini "ddi_add_softintrs failed: status 0x%08x", 5700*6512Ssowmini ddi_status)); 57013859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 57023859Sml29623 } 57033859Sml29623 57043859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 57053859Sml29623 57063859Sml29623 return (status); 57073859Sml29623 } 57083859Sml29623 57093859Sml29623 static nxge_status_t 57103859Sml29623 nxge_add_intrs_adv(p_nxge_t nxgep) 57113859Sml29623 { 57123859Sml29623 int intr_type; 57133859Sml29623 p_nxge_intr_t intrp; 57143859Sml29623 57153859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 57163859Sml29623 57173859Sml29623 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 57183859Sml29623 intr_type = intrp->intr_type; 57193859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5720*6512Ssowmini intr_type)); 57213859Sml29623 57223859Sml29623 switch (intr_type) { 57233859Sml29623 case DDI_INTR_TYPE_MSI: /* 0x2 */ 57243859Sml29623 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 57253859Sml29623 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 57263859Sml29623 57273859Sml29623 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 57283859Sml29623 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 57293859Sml29623 57303859Sml29623 default: 57313859Sml29623 return (NXGE_ERROR); 57323859Sml29623 } 57333859Sml29623 } 57343859Sml29623 57353859Sml29623 57363859Sml29623 /*ARGSUSED*/ 57373859Sml29623 static nxge_status_t 57383859Sml29623 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 57393859Sml29623 { 57403859Sml29623 dev_info_t *dip = nxgep->dip; 57413859Sml29623 p_nxge_ldg_t ldgp; 57423859Sml29623 p_nxge_intr_t intrp; 57433859Sml29623 uint_t *inthandler; 57443859Sml29623 void *arg1, *arg2; 57453859Sml29623 int behavior; 57465013Sml29623 int nintrs, navail, nrequest; 57473859Sml29623 int nactual, nrequired; 57483859Sml29623 int inum = 0; 57493859Sml29623 int x, y; 57503859Sml29623 int ddi_status = DDI_SUCCESS; 57513859Sml29623 nxge_status_t status = NXGE_OK; 57523859Sml29623 57533859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 57543859Sml29623 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 57553859Sml29623 intrp->start_inum = 0; 57563859Sml29623 57573859Sml29623 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 57583859Sml29623 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 57593859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5760*6512Ssowmini "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5761*6512Ssowmini "nintrs: %d", ddi_status, nintrs)); 57623859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 57633859Sml29623 } 57643859Sml29623 57653859Sml29623 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 57663859Sml29623 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 57673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5768*6512Ssowmini "ddi_intr_get_navail() failed, status: 0x%x%, " 5769*6512Ssowmini "nintrs: %d", ddi_status, navail)); 57703859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 57713859Sml29623 } 57723859Sml29623 57733859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5774*6512Ssowmini "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5775*6512Ssowmini nintrs, navail)); 57763859Sml29623 57775013Sml29623 /* PSARC/2007/453 MSI-X interrupt limit override */ 57785013Sml29623 if (int_type == DDI_INTR_TYPE_MSIX) { 57795013Sml29623 nrequest = nxge_create_msi_property(nxgep); 57805013Sml29623 if (nrequest < navail) { 57815013Sml29623 navail = nrequest; 57825013Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 57835013Sml29623 "nxge_add_intrs_adv_type: nintrs %d " 57845013Sml29623 "navail %d (nrequest %d)", 57855013Sml29623 nintrs, navail, nrequest)); 57865013Sml29623 } 57875013Sml29623 } 57885013Sml29623 57893859Sml29623 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 57903859Sml29623 /* MSI must be power of 2 */ 57913859Sml29623 if ((navail & 16) == 16) { 57923859Sml29623 navail = 16; 57933859Sml29623 } else if ((navail & 8) == 8) { 57943859Sml29623 navail = 8; 57953859Sml29623 } else if ((navail & 4) == 4) { 57963859Sml29623 navail = 4; 57973859Sml29623 } else if ((navail & 2) == 2) { 57983859Sml29623 navail = 2; 57993859Sml29623 } else { 58003859Sml29623 navail = 1; 58013859Sml29623 } 58023859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5803*6512Ssowmini "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5804*6512Ssowmini "navail %d", nintrs, navail)); 58053859Sml29623 } 58063859Sml29623 58073859Sml29623 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5808*6512Ssowmini DDI_INTR_ALLOC_NORMAL); 58093859Sml29623 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 58103859Sml29623 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 58113859Sml29623 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5812*6512Ssowmini navail, &nactual, behavior); 58133859Sml29623 if (ddi_status != DDI_SUCCESS || nactual == 0) { 58143859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5815*6512Ssowmini " ddi_intr_alloc() failed: %d", 5816*6512Ssowmini ddi_status)); 58173859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 58183859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 58193859Sml29623 } 58203859Sml29623 58213859Sml29623 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5822*6512Ssowmini (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 58233859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5824*6512Ssowmini " ddi_intr_get_pri() failed: %d", 5825*6512Ssowmini ddi_status)); 58263859Sml29623 /* Free already allocated interrupts */ 58273859Sml29623 for (y = 0; y < nactual; y++) { 58283859Sml29623 (void) ddi_intr_free(intrp->htable[y]); 58293859Sml29623 } 58303859Sml29623 58313859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 58323859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 58333859Sml29623 } 58343859Sml29623 58353859Sml29623 nrequired = 0; 58363859Sml29623 switch (nxgep->niu_type) { 58373859Sml29623 default: 58383859Sml29623 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 58393859Sml29623 break; 58403859Sml29623 58413859Sml29623 case N2_NIU: 58423859Sml29623 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 58433859Sml29623 break; 58443859Sml29623 } 58453859Sml29623 58463859Sml29623 if (status != NXGE_OK) { 58473859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5848*6512Ssowmini "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5849*6512Ssowmini "failed: 0x%x", status)); 58503859Sml29623 /* Free already allocated interrupts */ 58513859Sml29623 for (y = 0; y < nactual; y++) { 58523859Sml29623 (void) ddi_intr_free(intrp->htable[y]); 58533859Sml29623 } 58543859Sml29623 58553859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 58563859Sml29623 return (status); 58573859Sml29623 } 58583859Sml29623 58593859Sml29623 ldgp = nxgep->ldgvp->ldgp; 58603859Sml29623 for (x = 0; x < nrequired; x++, ldgp++) { 58613859Sml29623 ldgp->vector = (uint8_t)x; 58623859Sml29623 ldgp->intdata = SID_DATA(ldgp->func, x); 58633859Sml29623 arg1 = ldgp->ldvp; 58643859Sml29623 arg2 = nxgep; 58653859Sml29623 if (ldgp->nldvs == 1) { 58663859Sml29623 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 58673859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5868*6512Ssowmini "nxge_add_intrs_adv_type: " 5869*6512Ssowmini "arg1 0x%x arg2 0x%x: " 5870*6512Ssowmini "1-1 int handler (entry %d intdata 0x%x)\n", 5871*6512Ssowmini arg1, arg2, 5872*6512Ssowmini x, ldgp->intdata)); 58733859Sml29623 } else if (ldgp->nldvs > 1) { 58743859Sml29623 inthandler = (uint_t *)ldgp->sys_intr_handler; 58753859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5876*6512Ssowmini "nxge_add_intrs_adv_type: " 5877*6512Ssowmini "arg1 0x%x arg2 0x%x: " 5878*6512Ssowmini "nldevs %d int handler " 5879*6512Ssowmini "(entry %d intdata 0x%x)\n", 5880*6512Ssowmini arg1, arg2, 5881*6512Ssowmini ldgp->nldvs, x, ldgp->intdata)); 58823859Sml29623 } 58833859Sml29623 58843859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5885*6512Ssowmini "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5886*6512Ssowmini "htable 0x%llx", x, intrp->htable[x])); 58873859Sml29623 58883859Sml29623 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5889*6512Ssowmini (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5890*6512Ssowmini != DDI_SUCCESS) { 58913859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5892*6512Ssowmini "==> nxge_add_intrs_adv_type: failed #%d " 5893*6512Ssowmini "status 0x%x", x, ddi_status)); 58943859Sml29623 for (y = 0; y < intrp->intr_added; y++) { 58953859Sml29623 (void) ddi_intr_remove_handler( 5896*6512Ssowmini intrp->htable[y]); 58973859Sml29623 } 58983859Sml29623 /* Free already allocated intr */ 58993859Sml29623 for (y = 0; y < nactual; y++) { 59003859Sml29623 (void) ddi_intr_free(intrp->htable[y]); 59013859Sml29623 } 59023859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 59033859Sml29623 59043859Sml29623 (void) nxge_ldgv_uninit(nxgep); 59053859Sml29623 59063859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 59073859Sml29623 } 59083859Sml29623 intrp->intr_added++; 59093859Sml29623 } 59103859Sml29623 59113859Sml29623 intrp->msi_intx_cnt = nactual; 59123859Sml29623 59133859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5914*6512Ssowmini "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 5915*6512Ssowmini navail, nactual, 5916*6512Ssowmini intrp->msi_intx_cnt, 5917*6512Ssowmini intrp->intr_added)); 59183859Sml29623 59193859Sml29623 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 59203859Sml29623 59213859Sml29623 (void) nxge_intr_ldgv_init(nxgep); 59223859Sml29623 59233859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 59243859Sml29623 59253859Sml29623 return (status); 59263859Sml29623 } 59273859Sml29623 59283859Sml29623 /*ARGSUSED*/ 59293859Sml29623 static nxge_status_t 59303859Sml29623 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 59313859Sml29623 { 59323859Sml29623 dev_info_t *dip = nxgep->dip; 59333859Sml29623 p_nxge_ldg_t ldgp; 59343859Sml29623 p_nxge_intr_t intrp; 59353859Sml29623 uint_t *inthandler; 59363859Sml29623 void *arg1, *arg2; 59373859Sml29623 int behavior; 59383859Sml29623 int nintrs, navail; 59393859Sml29623 int nactual, nrequired; 59403859Sml29623 int inum = 0; 59413859Sml29623 int x, y; 59423859Sml29623 int ddi_status = DDI_SUCCESS; 59433859Sml29623 nxge_status_t status = NXGE_OK; 59443859Sml29623 59453859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 59463859Sml29623 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 59473859Sml29623 intrp->start_inum = 0; 59483859Sml29623 59493859Sml29623 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 59503859Sml29623 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 59513859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5952*6512Ssowmini "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5953*6512Ssowmini "nintrs: %d", status, nintrs)); 59543859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 59553859Sml29623 } 59563859Sml29623 59573859Sml29623 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 59583859Sml29623 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 59593859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5960*6512Ssowmini "ddi_intr_get_navail() failed, status: 0x%x%, " 5961*6512Ssowmini "nintrs: %d", ddi_status, navail)); 59623859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 59633859Sml29623 } 59643859Sml29623 59653859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5966*6512Ssowmini "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 5967*6512Ssowmini nintrs, navail)); 59683859Sml29623 59693859Sml29623 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5970*6512Ssowmini DDI_INTR_ALLOC_NORMAL); 59713859Sml29623 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 59723859Sml29623 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 59733859Sml29623 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5974*6512Ssowmini navail, &nactual, behavior); 59753859Sml29623 if (ddi_status != DDI_SUCCESS || nactual == 0) { 59763859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5977*6512Ssowmini " ddi_intr_alloc() failed: %d", 5978*6512Ssowmini ddi_status)); 59793859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 59803859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 59813859Sml29623 } 59823859Sml29623 59833859Sml29623 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5984*6512Ssowmini (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 59853859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5986*6512Ssowmini " ddi_intr_get_pri() failed: %d", 5987*6512Ssowmini ddi_status)); 59883859Sml29623 /* Free already allocated interrupts */ 59893859Sml29623 for (y = 0; y < nactual; y++) { 59903859Sml29623 (void) ddi_intr_free(intrp->htable[y]); 59913859Sml29623 } 59923859Sml29623 59933859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 59943859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 59953859Sml29623 } 59963859Sml29623 59973859Sml29623 nrequired = 0; 59983859Sml29623 switch (nxgep->niu_type) { 59993859Sml29623 default: 60003859Sml29623 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 60013859Sml29623 break; 60023859Sml29623 60033859Sml29623 case N2_NIU: 60043859Sml29623 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 60053859Sml29623 break; 60063859Sml29623 } 60073859Sml29623 60083859Sml29623 if (status != NXGE_OK) { 60093859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6010*6512Ssowmini "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6011*6512Ssowmini "failed: 0x%x", status)); 60123859Sml29623 /* Free already allocated interrupts */ 60133859Sml29623 for (y = 0; y < nactual; y++) { 60143859Sml29623 (void) ddi_intr_free(intrp->htable[y]); 60153859Sml29623 } 60163859Sml29623 60173859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 60183859Sml29623 return (status); 60193859Sml29623 } 60203859Sml29623 60213859Sml29623 ldgp = nxgep->ldgvp->ldgp; 60223859Sml29623 for (x = 0; x < nrequired; x++, ldgp++) { 60233859Sml29623 ldgp->vector = (uint8_t)x; 60243859Sml29623 if (nxgep->niu_type != N2_NIU) { 60253859Sml29623 ldgp->intdata = SID_DATA(ldgp->func, x); 60263859Sml29623 } 60273859Sml29623 60283859Sml29623 arg1 = ldgp->ldvp; 60293859Sml29623 arg2 = nxgep; 60303859Sml29623 if (ldgp->nldvs == 1) { 60313859Sml29623 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 60323859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6033*6512Ssowmini "nxge_add_intrs_adv_type_fix: " 6034*6512Ssowmini "1-1 int handler(%d) ldg %d ldv %d " 6035*6512Ssowmini "arg1 $%p arg2 $%p\n", 6036*6512Ssowmini x, ldgp->ldg, ldgp->ldvp->ldv, 6037*6512Ssowmini arg1, arg2)); 60383859Sml29623 } else if (ldgp->nldvs > 1) { 60393859Sml29623 inthandler = (uint_t *)ldgp->sys_intr_handler; 60403859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6041*6512Ssowmini "nxge_add_intrs_adv_type_fix: " 6042*6512Ssowmini "shared ldv %d int handler(%d) ldv %d ldg %d" 6043*6512Ssowmini "arg1 0x%016llx arg2 0x%016llx\n", 6044*6512Ssowmini x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6045*6512Ssowmini arg1, arg2)); 60463859Sml29623 } 60473859Sml29623 60483859Sml29623 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6049*6512Ssowmini (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6050*6512Ssowmini != DDI_SUCCESS) { 60513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6052*6512Ssowmini "==> nxge_add_intrs_adv_type_fix: failed #%d " 6053*6512Ssowmini "status 0x%x", x, ddi_status)); 60543859Sml29623 for (y = 0; y < intrp->intr_added; y++) { 60553859Sml29623 (void) ddi_intr_remove_handler( 6056*6512Ssowmini intrp->htable[y]); 60573859Sml29623 } 60583859Sml29623 for (y = 0; y < nactual; y++) { 60593859Sml29623 (void) ddi_intr_free(intrp->htable[y]); 60603859Sml29623 } 60613859Sml29623 /* Free already allocated intr */ 60623859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 60633859Sml29623 60643859Sml29623 (void) nxge_ldgv_uninit(nxgep); 60653859Sml29623 60663859Sml29623 return (NXGE_ERROR | NXGE_DDI_FAILED); 60673859Sml29623 } 60683859Sml29623 intrp->intr_added++; 60693859Sml29623 } 60703859Sml29623 60713859Sml29623 intrp->msi_intx_cnt = nactual; 60723859Sml29623 60733859Sml29623 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 60743859Sml29623 60753859Sml29623 status = nxge_intr_ldgv_init(nxgep); 60763859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 60773859Sml29623 60783859Sml29623 return (status); 60793859Sml29623 } 60803859Sml29623 60813859Sml29623 static void 60823859Sml29623 nxge_remove_intrs(p_nxge_t nxgep) 60833859Sml29623 { 60843859Sml29623 int i, inum; 60853859Sml29623 p_nxge_intr_t intrp; 60863859Sml29623 60873859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 60883859Sml29623 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 60893859Sml29623 if (!intrp->intr_registered) { 60903859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6091*6512Ssowmini "<== nxge_remove_intrs: interrupts not registered")); 60923859Sml29623 return; 60933859Sml29623 } 60943859Sml29623 60953859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 60963859Sml29623 60973859Sml29623 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 60983859Sml29623 (void) ddi_intr_block_disable(intrp->htable, 6099*6512Ssowmini intrp->intr_added); 61003859Sml29623 } else { 61013859Sml29623 for (i = 0; i < intrp->intr_added; i++) { 61023859Sml29623 (void) ddi_intr_disable(intrp->htable[i]); 61033859Sml29623 } 61043859Sml29623 } 61053859Sml29623 61063859Sml29623 for (inum = 0; inum < intrp->intr_added; inum++) { 61073859Sml29623 if (intrp->htable[inum]) { 61083859Sml29623 (void) ddi_intr_remove_handler(intrp->htable[inum]); 61093859Sml29623 } 61103859Sml29623 } 61113859Sml29623 61123859Sml29623 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 61133859Sml29623 if (intrp->htable[inum]) { 61143859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6115*6512Ssowmini "nxge_remove_intrs: ddi_intr_free inum %d " 6116*6512Ssowmini "msi_intx_cnt %d intr_added %d", 6117*6512Ssowmini inum, 6118*6512Ssowmini intrp->msi_intx_cnt, 6119*6512Ssowmini intrp->intr_added)); 61203859Sml29623 61213859Sml29623 (void) ddi_intr_free(intrp->htable[inum]); 61223859Sml29623 } 61233859Sml29623 } 61243859Sml29623 61253859Sml29623 kmem_free(intrp->htable, intrp->intr_size); 61263859Sml29623 intrp->intr_registered = B_FALSE; 61273859Sml29623 intrp->intr_enabled = B_FALSE; 61283859Sml29623 intrp->msi_intx_cnt = 0; 61293859Sml29623 intrp->intr_added = 0; 61303859Sml29623 61313859Sml29623 (void) nxge_ldgv_uninit(nxgep); 61323859Sml29623 61335013Sml29623 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 61345013Sml29623 "#msix-request"); 61355013Sml29623 61363859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 61373859Sml29623 } 61383859Sml29623 61393859Sml29623 /*ARGSUSED*/ 61403859Sml29623 static void 61413859Sml29623 nxge_remove_soft_intrs(p_nxge_t nxgep) 61423859Sml29623 { 61433859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 61443859Sml29623 if (nxgep->resched_id) { 61453859Sml29623 ddi_remove_softintr(nxgep->resched_id); 61463859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6147*6512Ssowmini "==> nxge_remove_soft_intrs: removed")); 61483859Sml29623 nxgep->resched_id = NULL; 61493859Sml29623 } 61503859Sml29623 61513859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 61523859Sml29623 } 61533859Sml29623 61543859Sml29623 /*ARGSUSED*/ 61553859Sml29623 static void 61563859Sml29623 nxge_intrs_enable(p_nxge_t nxgep) 61573859Sml29623 { 61583859Sml29623 p_nxge_intr_t intrp; 61593859Sml29623 int i; 61603859Sml29623 int status; 61613859Sml29623 61623859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 61633859Sml29623 61643859Sml29623 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 61653859Sml29623 61663859Sml29623 if (!intrp->intr_registered) { 61673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6168*6512Ssowmini "interrupts are not registered")); 61693859Sml29623 return; 61703859Sml29623 } 61713859Sml29623 61723859Sml29623 if (intrp->intr_enabled) { 61733859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6174*6512Ssowmini "<== nxge_intrs_enable: already enabled")); 61753859Sml29623 return; 61763859Sml29623 } 61773859Sml29623 61783859Sml29623 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 61793859Sml29623 status = ddi_intr_block_enable(intrp->htable, 6180*6512Ssowmini intrp->intr_added); 61813859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6182*6512Ssowmini "block enable - status 0x%x total inums #%d\n", 6183*6512Ssowmini status, intrp->intr_added)); 61843859Sml29623 } else { 61853859Sml29623 for (i = 0; i < intrp->intr_added; i++) { 61863859Sml29623 status = ddi_intr_enable(intrp->htable[i]); 61873859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6188*6512Ssowmini "ddi_intr_enable:enable - status 0x%x " 6189*6512Ssowmini "total inums %d enable inum #%d\n", 6190*6512Ssowmini status, intrp->intr_added, i)); 61913859Sml29623 if (status == DDI_SUCCESS) { 61923859Sml29623 intrp->intr_enabled = B_TRUE; 61933859Sml29623 } 61943859Sml29623 } 61953859Sml29623 } 61963859Sml29623 61973859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 61983859Sml29623 } 61993859Sml29623 62003859Sml29623 /*ARGSUSED*/ 62013859Sml29623 static void 62023859Sml29623 nxge_intrs_disable(p_nxge_t nxgep) 62033859Sml29623 { 62043859Sml29623 p_nxge_intr_t intrp; 62053859Sml29623 int i; 62063859Sml29623 62073859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 62083859Sml29623 62093859Sml29623 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 62103859Sml29623 62113859Sml29623 if (!intrp->intr_registered) { 62123859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6213*6512Ssowmini "interrupts are not registered")); 62143859Sml29623 return; 62153859Sml29623 } 62163859Sml29623 62173859Sml29623 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 62183859Sml29623 (void) ddi_intr_block_disable(intrp->htable, 6219*6512Ssowmini intrp->intr_added); 62203859Sml29623 } else { 62213859Sml29623 for (i = 0; i < intrp->intr_added; i++) { 62223859Sml29623 (void) ddi_intr_disable(intrp->htable[i]); 62233859Sml29623 } 62243859Sml29623 } 62253859Sml29623 62263859Sml29623 intrp->intr_enabled = B_FALSE; 62273859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 62283859Sml29623 } 62293859Sml29623 62303859Sml29623 static nxge_status_t 62313859Sml29623 nxge_mac_register(p_nxge_t nxgep) 62323859Sml29623 { 62333859Sml29623 mac_register_t *macp; 62343859Sml29623 int status; 62353859Sml29623 62363859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 62373859Sml29623 62383859Sml29623 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 62393859Sml29623 return (NXGE_ERROR); 62403859Sml29623 62413859Sml29623 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 62423859Sml29623 macp->m_driver = nxgep; 62433859Sml29623 macp->m_dip = nxgep->dip; 62443859Sml29623 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 62453859Sml29623 macp->m_callbacks = &nxge_m_callbacks; 62463859Sml29623 macp->m_min_sdu = 0; 62476439Sml29623 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 62486439Sml29623 NXGE_EHEADER_VLAN_CRC; 62496439Sml29623 macp->m_max_sdu = nxgep->mac.default_mtu; 62505895Syz147064 macp->m_margin = VLAN_TAGSZ; 6251*6512Ssowmini macp->m_priv_props = nxge_priv_props; 6252*6512Ssowmini macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 62533859Sml29623 62546439Sml29623 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 62556439Sml29623 "==> nxge_mac_register: instance %d " 62566439Sml29623 "max_sdu %d margin %d maxframe %d (header %d)", 62576439Sml29623 nxgep->instance, 62586439Sml29623 macp->m_max_sdu, macp->m_margin, 62596439Sml29623 nxgep->mac.maxframesize, 62606439Sml29623 NXGE_EHEADER_VLAN_CRC)); 62616439Sml29623 62623859Sml29623 status = mac_register(macp, &nxgep->mach); 62633859Sml29623 mac_free(macp); 62643859Sml29623 62653859Sml29623 if (status != 0) { 62663859Sml29623 cmn_err(CE_WARN, 6267*6512Ssowmini "!nxge_mac_register failed (status %d instance %d)", 6268*6512Ssowmini status, nxgep->instance); 62693859Sml29623 return (NXGE_ERROR); 62703859Sml29623 } 62713859Sml29623 62723859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6273*6512Ssowmini "(instance %d)", nxgep->instance)); 62743859Sml29623 62753859Sml29623 return (NXGE_OK); 62763859Sml29623 } 62773859Sml29623 62783859Sml29623 void 62793859Sml29623 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 62803859Sml29623 { 62813859Sml29623 ssize_t size; 62823859Sml29623 mblk_t *nmp; 62833859Sml29623 uint8_t blk_id; 62843859Sml29623 uint8_t chan; 62853859Sml29623 uint32_t err_id; 62863859Sml29623 err_inject_t *eip; 62873859Sml29623 62883859Sml29623 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 62893859Sml29623 62903859Sml29623 size = 1024; 62913859Sml29623 nmp = mp->b_cont; 62923859Sml29623 eip = (err_inject_t *)nmp->b_rptr; 62933859Sml29623 blk_id = eip->blk_id; 62943859Sml29623 err_id = eip->err_id; 62953859Sml29623 chan = eip->chan; 62963859Sml29623 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 62973859Sml29623 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 62983859Sml29623 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 62993859Sml29623 switch (blk_id) { 63003859Sml29623 case MAC_BLK_ID: 63013859Sml29623 break; 63023859Sml29623 case TXMAC_BLK_ID: 63033859Sml29623 break; 63043859Sml29623 case RXMAC_BLK_ID: 63053859Sml29623 break; 63063859Sml29623 case MIF_BLK_ID: 63073859Sml29623 break; 63083859Sml29623 case IPP_BLK_ID: 63093859Sml29623 nxge_ipp_inject_err(nxgep, err_id); 63103859Sml29623 break; 63113859Sml29623 case TXC_BLK_ID: 63123859Sml29623 nxge_txc_inject_err(nxgep, err_id); 63133859Sml29623 break; 63143859Sml29623 case TXDMA_BLK_ID: 63153859Sml29623 nxge_txdma_inject_err(nxgep, err_id, chan); 63163859Sml29623 break; 63173859Sml29623 case RXDMA_BLK_ID: 63183859Sml29623 nxge_rxdma_inject_err(nxgep, err_id, chan); 63193859Sml29623 break; 63203859Sml29623 case ZCP_BLK_ID: 63213859Sml29623 nxge_zcp_inject_err(nxgep, err_id); 63223859Sml29623 break; 63233859Sml29623 case ESPC_BLK_ID: 63243859Sml29623 break; 63253859Sml29623 case FFLP_BLK_ID: 63263859Sml29623 break; 63273859Sml29623 case PHY_BLK_ID: 63283859Sml29623 break; 63293859Sml29623 case ETHER_SERDES_BLK_ID: 63303859Sml29623 break; 63313859Sml29623 case PCIE_SERDES_BLK_ID: 63323859Sml29623 break; 63333859Sml29623 case VIR_BLK_ID: 63343859Sml29623 break; 63353859Sml29623 } 63363859Sml29623 63373859Sml29623 nmp->b_wptr = nmp->b_rptr + size; 63383859Sml29623 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 63393859Sml29623 63403859Sml29623 miocack(wq, mp, (int)size, 0); 63413859Sml29623 } 63423859Sml29623 63433859Sml29623 static int 63443859Sml29623 nxge_init_common_dev(p_nxge_t nxgep) 63453859Sml29623 { 63463859Sml29623 p_nxge_hw_list_t hw_p; 63473859Sml29623 dev_info_t *p_dip; 63483859Sml29623 63493859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 63503859Sml29623 63513859Sml29623 p_dip = nxgep->p_dip; 63523859Sml29623 MUTEX_ENTER(&nxge_common_lock); 63533859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6354*6512Ssowmini "==> nxge_init_common_dev:func # %d", 6355*6512Ssowmini nxgep->function_num)); 63563859Sml29623 /* 63573859Sml29623 * Loop through existing per neptune hardware list. 63583859Sml29623 */ 63593859Sml29623 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 63603859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6361*6512Ssowmini "==> nxge_init_common_device:func # %d " 6362*6512Ssowmini "hw_p $%p parent dip $%p", 6363*6512Ssowmini nxgep->function_num, 6364*6512Ssowmini hw_p, 6365*6512Ssowmini p_dip)); 63663859Sml29623 if (hw_p->parent_devp == p_dip) { 63673859Sml29623 nxgep->nxge_hw_p = hw_p; 63683859Sml29623 hw_p->ndevs++; 63693859Sml29623 hw_p->nxge_p[nxgep->function_num] = nxgep; 63703859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6371*6512Ssowmini "==> nxge_init_common_device:func # %d " 6372*6512Ssowmini "hw_p $%p parent dip $%p " 6373*6512Ssowmini "ndevs %d (found)", 6374*6512Ssowmini nxgep->function_num, 6375*6512Ssowmini hw_p, 6376*6512Ssowmini p_dip, 6377*6512Ssowmini hw_p->ndevs)); 63783859Sml29623 break; 63793859Sml29623 } 63803859Sml29623 } 63813859Sml29623 63823859Sml29623 if (hw_p == NULL) { 63833859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6384*6512Ssowmini "==> nxge_init_common_device:func # %d " 6385*6512Ssowmini "parent dip $%p (new)", 6386*6512Ssowmini nxgep->function_num, 6387*6512Ssowmini p_dip)); 63883859Sml29623 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 63893859Sml29623 hw_p->parent_devp = p_dip; 63903859Sml29623 hw_p->magic = NXGE_NEPTUNE_MAGIC; 63913859Sml29623 nxgep->nxge_hw_p = hw_p; 63923859Sml29623 hw_p->ndevs++; 63933859Sml29623 hw_p->nxge_p[nxgep->function_num] = nxgep; 63943859Sml29623 hw_p->next = nxge_hw_list; 63954732Sdavemq if (nxgep->niu_type == N2_NIU) { 63964732Sdavemq hw_p->niu_type = N2_NIU; 63974732Sdavemq hw_p->platform_type = P_NEPTUNE_NIU; 63984732Sdavemq } else { 63994732Sdavemq hw_p->niu_type = NIU_TYPE_NONE; 64004977Sraghus hw_p->platform_type = P_NEPTUNE_NONE; 64014732Sdavemq } 64023859Sml29623 64033859Sml29623 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 64043859Sml29623 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 64053859Sml29623 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 64063859Sml29623 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 64073859Sml29623 64083859Sml29623 nxge_hw_list = hw_p; 64094732Sdavemq 64104732Sdavemq (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 64113859Sml29623 } 64123859Sml29623 64133859Sml29623 MUTEX_EXIT(&nxge_common_lock); 64144732Sdavemq 64154977Sraghus nxgep->platform_type = hw_p->platform_type; 64164732Sdavemq if (nxgep->niu_type != N2_NIU) { 64174732Sdavemq nxgep->niu_type = hw_p->niu_type; 64184732Sdavemq } 64194732Sdavemq 64203859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6421*6512Ssowmini "==> nxge_init_common_device (nxge_hw_list) $%p", 6422*6512Ssowmini nxge_hw_list)); 64233859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 64243859Sml29623 64253859Sml29623 return (NXGE_OK); 64263859Sml29623 } 64273859Sml29623 64283859Sml29623 static void 64293859Sml29623 nxge_uninit_common_dev(p_nxge_t nxgep) 64303859Sml29623 { 64313859Sml29623 p_nxge_hw_list_t hw_p, h_hw_p; 64323859Sml29623 dev_info_t *p_dip; 64333859Sml29623 64343859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 64353859Sml29623 if (nxgep->nxge_hw_p == NULL) { 64363859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6437*6512Ssowmini "<== nxge_uninit_common_device (no common)")); 64383859Sml29623 return; 64393859Sml29623 } 64403859Sml29623 64413859Sml29623 MUTEX_ENTER(&nxge_common_lock); 64423859Sml29623 h_hw_p = nxge_hw_list; 64433859Sml29623 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 64443859Sml29623 p_dip = hw_p->parent_devp; 64453859Sml29623 if (nxgep->nxge_hw_p == hw_p && 6446*6512Ssowmini p_dip == nxgep->p_dip && 6447*6512Ssowmini nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6448*6512Ssowmini hw_p->magic == NXGE_NEPTUNE_MAGIC) { 64493859Sml29623 64503859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6451*6512Ssowmini "==> nxge_uninit_common_device:func # %d " 6452*6512Ssowmini "hw_p $%p parent dip $%p " 6453*6512Ssowmini "ndevs %d (found)", 6454*6512Ssowmini nxgep->function_num, 6455*6512Ssowmini hw_p, 6456*6512Ssowmini p_dip, 6457*6512Ssowmini hw_p->ndevs)); 64583859Sml29623 64593859Sml29623 if (hw_p->ndevs) { 64603859Sml29623 hw_p->ndevs--; 64613859Sml29623 } 64623859Sml29623 hw_p->nxge_p[nxgep->function_num] = NULL; 64633859Sml29623 if (!hw_p->ndevs) { 64643859Sml29623 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 64653859Sml29623 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 64663859Sml29623 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 64673859Sml29623 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 64683859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6469*6512Ssowmini "==> nxge_uninit_common_device: " 6470*6512Ssowmini "func # %d " 6471*6512Ssowmini "hw_p $%p parent dip $%p " 6472*6512Ssowmini "ndevs %d (last)", 6473*6512Ssowmini nxgep->function_num, 6474*6512Ssowmini hw_p, 6475*6512Ssowmini p_dip, 6476*6512Ssowmini hw_p->ndevs)); 64773859Sml29623 64786495Sspeer nxge_hio_uninit(nxgep); 64796495Sspeer 64803859Sml29623 if (hw_p == nxge_hw_list) { 64813859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6482*6512Ssowmini "==> nxge_uninit_common_device:" 6483*6512Ssowmini "remove head func # %d " 6484*6512Ssowmini "hw_p $%p parent dip $%p " 6485*6512Ssowmini "ndevs %d (head)", 6486*6512Ssowmini nxgep->function_num, 6487*6512Ssowmini hw_p, 6488*6512Ssowmini p_dip, 6489*6512Ssowmini hw_p->ndevs)); 64903859Sml29623 nxge_hw_list = hw_p->next; 64913859Sml29623 } else { 64923859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6493*6512Ssowmini "==> nxge_uninit_common_device:" 6494*6512Ssowmini "remove middle func # %d " 6495*6512Ssowmini "hw_p $%p parent dip $%p " 6496*6512Ssowmini "ndevs %d (middle)", 6497*6512Ssowmini nxgep->function_num, 6498*6512Ssowmini hw_p, 6499*6512Ssowmini p_dip, 6500*6512Ssowmini hw_p->ndevs)); 65013859Sml29623 h_hw_p->next = hw_p->next; 65023859Sml29623 } 65033859Sml29623 65046495Sspeer nxgep->nxge_hw_p = NULL; 65053859Sml29623 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 65063859Sml29623 } 65073859Sml29623 break; 65083859Sml29623 } else { 65093859Sml29623 h_hw_p = hw_p; 65103859Sml29623 } 65113859Sml29623 } 65123859Sml29623 65133859Sml29623 MUTEX_EXIT(&nxge_common_lock); 65143859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6515*6512Ssowmini "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6516*6512Ssowmini nxge_hw_list)); 65173859Sml29623 65183859Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 65193859Sml29623 } 65204732Sdavemq 65214732Sdavemq /* 65224977Sraghus * Determines the number of ports from the niu_type or the platform type. 65234732Sdavemq * Returns the number of ports, or returns zero on failure. 65244732Sdavemq */ 65254732Sdavemq 65264732Sdavemq int 65274977Sraghus nxge_get_nports(p_nxge_t nxgep) 65284732Sdavemq { 65294732Sdavemq int nports = 0; 65304732Sdavemq 65314977Sraghus switch (nxgep->niu_type) { 65324732Sdavemq case N2_NIU: 65334732Sdavemq case NEPTUNE_2_10GF: 65344732Sdavemq nports = 2; 65354732Sdavemq break; 65364732Sdavemq case NEPTUNE_4_1GC: 65374732Sdavemq case NEPTUNE_2_10GF_2_1GC: 65384732Sdavemq case NEPTUNE_1_10GF_3_1GC: 65394732Sdavemq case NEPTUNE_1_1GC_1_10GF_2_1GC: 65406261Sjoycey case NEPTUNE_2_10GF_2_1GRF: 65414732Sdavemq nports = 4; 65424732Sdavemq break; 65434732Sdavemq default: 65444977Sraghus switch (nxgep->platform_type) { 65454977Sraghus case P_NEPTUNE_NIU: 65464977Sraghus case P_NEPTUNE_ATLAS_2PORT: 65474977Sraghus nports = 2; 65484977Sraghus break; 65494977Sraghus case P_NEPTUNE_ATLAS_4PORT: 65504977Sraghus case P_NEPTUNE_MARAMBA_P0: 65514977Sraghus case P_NEPTUNE_MARAMBA_P1: 65525196Ssbehera case P_NEPTUNE_ALONSO: 65534977Sraghus nports = 4; 65544977Sraghus break; 65554977Sraghus default: 65564977Sraghus break; 65574977Sraghus } 65584732Sdavemq break; 65594732Sdavemq } 65604732Sdavemq 65614732Sdavemq return (nports); 65624732Sdavemq } 65635013Sml29623 65645013Sml29623 /* 65655013Sml29623 * The following two functions are to support 65665013Sml29623 * PSARC/2007/453 MSI-X interrupt limit override. 65675013Sml29623 */ 65685013Sml29623 static int 65695013Sml29623 nxge_create_msi_property(p_nxge_t nxgep) 65705013Sml29623 { 65715013Sml29623 int nmsi; 65725013Sml29623 extern int ncpus; 65735013Sml29623 65745013Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 65755013Sml29623 65765013Sml29623 switch (nxgep->mac.portmode) { 65775013Sml29623 case PORT_10G_COPPER: 65785013Sml29623 case PORT_10G_FIBER: 65795013Sml29623 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 65805013Sml29623 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 65815013Sml29623 /* 65825013Sml29623 * The maximum MSI-X requested will be 8. 65835013Sml29623 * If the # of CPUs is less than 8, we will reqeust 65845013Sml29623 * # MSI-X based on the # of CPUs. 65855013Sml29623 */ 65865013Sml29623 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 65875013Sml29623 nmsi = NXGE_MSIX_REQUEST_10G; 65885013Sml29623 } else { 65895013Sml29623 nmsi = ncpus; 65905013Sml29623 } 65915013Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 65925013Sml29623 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 65935013Sml29623 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 65945013Sml29623 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 65955013Sml29623 break; 65965013Sml29623 65975013Sml29623 default: 65985013Sml29623 nmsi = NXGE_MSIX_REQUEST_1G; 65995013Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 66005013Sml29623 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 66015013Sml29623 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 66025013Sml29623 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 66035013Sml29623 break; 66045013Sml29623 } 66055013Sml29623 66065013Sml29623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 66075013Sml29623 return (nmsi); 66085013Sml29623 } 6609*6512Ssowmini 6610*6512Ssowmini /* ARGSUSED */ 6611*6512Ssowmini static int 6612*6512Ssowmini nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6613*6512Ssowmini void *pr_val) 6614*6512Ssowmini { 6615*6512Ssowmini int err = 0; 6616*6512Ssowmini link_flowctrl_t fl; 6617*6512Ssowmini 6618*6512Ssowmini switch (pr_num) { 6619*6512Ssowmini case DLD_PROP_AUTONEG: 6620*6512Ssowmini *(uint8_t *)pr_val = 1; 6621*6512Ssowmini break; 6622*6512Ssowmini case DLD_PROP_FLOWCTRL: 6623*6512Ssowmini if (pr_valsize < sizeof (link_flowctrl_t)) 6624*6512Ssowmini return (EINVAL); 6625*6512Ssowmini fl = LINK_FLOWCTRL_RX; 6626*6512Ssowmini bcopy(&fl, pr_val, sizeof (fl)); 6627*6512Ssowmini break; 6628*6512Ssowmini case DLD_PROP_ADV_1000FDX_CAP: 6629*6512Ssowmini case DLD_PROP_EN_1000FDX_CAP: 6630*6512Ssowmini *(uint8_t *)pr_val = 1; 6631*6512Ssowmini break; 6632*6512Ssowmini case DLD_PROP_ADV_100FDX_CAP: 6633*6512Ssowmini case DLD_PROP_EN_100FDX_CAP: 6634*6512Ssowmini *(uint8_t *)pr_val = 1; 6635*6512Ssowmini break; 6636*6512Ssowmini default: 6637*6512Ssowmini err = ENOTSUP; 6638*6512Ssowmini break; 6639*6512Ssowmini } 6640*6512Ssowmini return (err); 6641*6512Ssowmini } 6642