11256Syl150051 /* 21256Syl150051 * CDDL HEADER START 31256Syl150051 * 41256Syl150051 * The contents of this file are subject to the terms of the 51256Syl150051 * Common Development and Distribution License (the "License"). 61256Syl150051 * You may not use this file except in compliance with the License. 71256Syl150051 * 81256Syl150051 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 91256Syl150051 * or http://www.opensolaris.org/os/licensing. 101256Syl150051 * See the License for the specific language governing permissions 111256Syl150051 * and limitations under the License. 121256Syl150051 * 131256Syl150051 * When distributing Covered Code, include this CDDL HEADER in each 141256Syl150051 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 151256Syl150051 * If applicable, add the following below this CDDL HEADER, with the 161256Syl150051 * fields enclosed by brackets "[]" replaced with your own identifying 171256Syl150051 * information: Portions Copyright [yyyy] [name of copyright owner] 181256Syl150051 * 191256Syl150051 * CDDL HEADER END 201256Syl150051 */ 211256Syl150051 221256Syl150051 /* 23*3392Syl150051 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 241256Syl150051 * Use is subject to license terms. 251256Syl150051 */ 261256Syl150051 271256Syl150051 #pragma ident "%Z%%M% %I% %E% SMI" 281256Syl150051 291256Syl150051 /* 301256Syl150051 * Copyright (c) 2002-2005 Neterion, Inc. 311256Syl150051 * All right Reserved. 321256Syl150051 * 331256Syl150051 * FileName : xgell.c 341256Syl150051 * 351256Syl150051 * Description: Xge Link Layer data path implementation 361256Syl150051 * 371256Syl150051 */ 381256Syl150051 391256Syl150051 #include "xgell.h" 401256Syl150051 411256Syl150051 #include <netinet/ip.h> 421256Syl150051 #include <netinet/tcp.h> 433115Syl150051 #include <netinet/udp.h> 441256Syl150051 452311Sseb #define XGELL_MAX_FRAME_SIZE(hldev) ((hldev)->config.mtu + \ 461256Syl150051 sizeof (struct ether_vlan_header)) 471256Syl150051 481256Syl150051 #define HEADROOM 2 /* for DIX-only packets */ 491256Syl150051 501256Syl150051 void header_free_func(void *arg) { } 511256Syl150051 frtn_t header_frtn = {header_free_func, NULL}; 521256Syl150051 531256Syl150051 /* DMA attributes used for Tx side */ 541256Syl150051 static struct ddi_dma_attr tx_dma_attr = { 551256Syl150051 DMA_ATTR_V0, /* dma_attr_version */ 561256Syl150051 0x0ULL, /* dma_attr_addr_lo */ 571256Syl150051 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */ 581256Syl150051 0xFFFFFFFFULL, /* dma_attr_count_max */ 591256Syl150051 0x1ULL, /* dma_attr_align */ 601256Syl150051 0xFFF, /* dma_attr_burstsizes */ 611256Syl150051 1, /* dma_attr_minxfer */ 621256Syl150051 0xFFFFFFFFULL, /* dma_attr_maxxfer */ 631256Syl150051 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */ 643115Syl150051 18, /* dma_attr_sgllen */ 651256Syl150051 1, /* dma_attr_granular */ 661256Syl150051 0 /* dma_attr_flags */ 671256Syl150051 }; 681256Syl150051 691256Syl150051 /* Aligned DMA attributes used for Tx side */ 701256Syl150051 struct ddi_dma_attr tx_dma_attr_align = { 711256Syl150051 DMA_ATTR_V0, /* dma_attr_version */ 721256Syl150051 0x0ULL, /* dma_attr_addr_lo */ 731256Syl150051 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */ 741256Syl150051 0xFFFFFFFFULL, /* dma_attr_count_max */ 751256Syl150051 4096, /* dma_attr_align */ 761256Syl150051 0xFFF, /* dma_attr_burstsizes */ 771256Syl150051 1, /* dma_attr_minxfer */ 781256Syl150051 0xFFFFFFFFULL, /* dma_attr_maxxfer */ 791256Syl150051 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */ 801256Syl150051 4, /* dma_attr_sgllen */ 811256Syl150051 1, /* dma_attr_granular */ 821256Syl150051 0 /* dma_attr_flags */ 831256Syl150051 }; 841256Syl150051 851256Syl150051 /* 861256Syl150051 * DMA attributes used when using ddi_dma_mem_alloc to 871256Syl150051 * allocat HAL descriptors and Rx buffers during replenish 881256Syl150051 */ 891256Syl150051 static struct ddi_dma_attr hal_dma_attr = { 901256Syl150051 DMA_ATTR_V0, /* dma_attr_version */ 911256Syl150051 0x0ULL, /* dma_attr_addr_lo */ 921256Syl150051 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */ 931256Syl150051 0xFFFFFFFFULL, /* dma_attr_count_max */ 941256Syl150051 0x1ULL, /* dma_attr_align */ 951256Syl150051 0xFFF, /* dma_attr_burstsizes */ 961256Syl150051 1, /* dma_attr_minxfer */ 971256Syl150051 0xFFFFFFFFULL, /* dma_attr_maxxfer */ 981256Syl150051 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */ 991256Syl150051 1, /* dma_attr_sgllen */ 1001256Syl150051 1, /* dma_attr_granular */ 1011256Syl150051 0 /* dma_attr_flags */ 1021256Syl150051 }; 1031256Syl150051 1041256Syl150051 /* 1051256Syl150051 * Aligned DMA attributes used when using ddi_dma_mem_alloc to 1061256Syl150051 * allocat HAL descriptors and Rx buffers during replenish 1071256Syl150051 */ 1081256Syl150051 struct ddi_dma_attr hal_dma_attr_aligned = { 1091256Syl150051 DMA_ATTR_V0, /* dma_attr_version */ 1101256Syl150051 0x0ULL, /* dma_attr_addr_lo */ 1111256Syl150051 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */ 1121256Syl150051 0xFFFFFFFFULL, /* dma_attr_count_max */ 1131256Syl150051 4096, /* dma_attr_align */ 1141256Syl150051 0xFFF, /* dma_attr_burstsizes */ 1151256Syl150051 1, /* dma_attr_minxfer */ 1161256Syl150051 0xFFFFFFFFULL, /* dma_attr_maxxfer */ 1171256Syl150051 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */ 1181256Syl150051 1, /* dma_attr_sgllen */ 1191256Syl150051 1, /* dma_attr_granular */ 1201256Syl150051 0 /* dma_attr_flags */ 1211256Syl150051 }; 1221256Syl150051 1231256Syl150051 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr; 1241256Syl150051 struct ddi_dma_attr *p_hal_dma_attr_aligned = &hal_dma_attr_aligned; 1251256Syl150051 1262311Sseb static int xgell_m_stat(void *, uint_t, uint64_t *); 1272311Sseb static int xgell_m_start(void *); 1282311Sseb static void xgell_m_stop(void *); 1292311Sseb static int xgell_m_promisc(void *, boolean_t); 1302311Sseb static int xgell_m_multicst(void *, boolean_t, const uint8_t *); 1312311Sseb static int xgell_m_unicst(void *, const uint8_t *); 1322311Sseb static void xgell_m_ioctl(void *, queue_t *, mblk_t *); 1332311Sseb static mblk_t *xgell_m_tx(void *, mblk_t *); 1342311Sseb static boolean_t xgell_m_getcapab(void *, mac_capab_t, void *); 1352311Sseb 1362311Sseb #define XGELL_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB) 1372311Sseb 1382311Sseb static mac_callbacks_t xgell_m_callbacks = { 1392311Sseb XGELL_M_CALLBACK_FLAGS, 1402311Sseb xgell_m_stat, 1412311Sseb xgell_m_start, 1422311Sseb xgell_m_stop, 1432311Sseb xgell_m_promisc, 1442311Sseb xgell_m_multicst, 1452311Sseb xgell_m_unicst, 1462311Sseb xgell_m_tx, 1472311Sseb NULL, 1482311Sseb xgell_m_ioctl, 1492311Sseb xgell_m_getcapab 1502311Sseb }; 1512311Sseb 1521256Syl150051 /* 1531256Syl150051 * xge_device_poll 1541256Syl150051 * 1551256Syl150051 * Cyclic should call me every 1s. xge_callback_event_queued should call me 1561256Syl150051 * when HAL hope event was rescheduled. 1571256Syl150051 */ 1581256Syl150051 /*ARGSUSED*/ 1591256Syl150051 void 1601256Syl150051 xge_device_poll(void *data) 1611256Syl150051 { 1621256Syl150051 xgelldev_t *lldev = xge_hal_device_private(data); 1631256Syl150051 1641256Syl150051 mutex_enter(&lldev->genlock); 1651256Syl150051 if (lldev->is_initialized) { 1661256Syl150051 xge_hal_device_poll(data); 1671256Syl150051 lldev->timeout_id = timeout(xge_device_poll, data, 1681256Syl150051 XGE_DEV_POLL_TICKS); 1693115Syl150051 } else if (lldev->in_reset == 1) { 1703115Syl150051 lldev->timeout_id = timeout(xge_device_poll, data, 1713115Syl150051 XGE_DEV_POLL_TICKS); 1723115Syl150051 } else { 1733115Syl150051 lldev->timeout_id = 0; 1741256Syl150051 } 1751256Syl150051 mutex_exit(&lldev->genlock); 1761256Syl150051 } 1771256Syl150051 1781256Syl150051 /* 1791256Syl150051 * xge_device_poll_now 1801256Syl150051 * 1811256Syl150051 * Will call xge_device_poll() immediately 1821256Syl150051 */ 1831256Syl150051 void 1841256Syl150051 xge_device_poll_now(void *data) 1851256Syl150051 { 1861256Syl150051 xgelldev_t *lldev = xge_hal_device_private(data); 1871256Syl150051 1881256Syl150051 mutex_enter(&lldev->genlock); 1893115Syl150051 if (lldev->is_initialized) { 1903115Syl150051 xge_hal_device_poll(data); 1913115Syl150051 } 1921256Syl150051 mutex_exit(&lldev->genlock); 1931256Syl150051 } 1941256Syl150051 1951256Syl150051 /* 1961256Syl150051 * xgell_callback_link_up 1971256Syl150051 * 1981256Syl150051 * This function called by HAL to notify HW link up state change. 1991256Syl150051 */ 2001256Syl150051 void 2011256Syl150051 xgell_callback_link_up(void *userdata) 2021256Syl150051 { 2031256Syl150051 xgelldev_t *lldev = (xgelldev_t *)userdata; 2041256Syl150051 2052311Sseb mac_link_update(lldev->mh, LINK_STATE_UP); 2061256Syl150051 /* Link states should be reported to user whenever it changes */ 2071256Syl150051 cmn_err(CE_NOTE, "!%s%d: Link is up [10 Gbps Full Duplex]", 2081256Syl150051 XGELL_IFNAME, lldev->instance); 2091256Syl150051 } 2101256Syl150051 2111256Syl150051 /* 2121256Syl150051 * xgell_callback_link_down 2131256Syl150051 * 2141256Syl150051 * This function called by HAL to notify HW link down state change. 2151256Syl150051 */ 2161256Syl150051 void 2171256Syl150051 xgell_callback_link_down(void *userdata) 2181256Syl150051 { 2191256Syl150051 xgelldev_t *lldev = (xgelldev_t *)userdata; 2201256Syl150051 2212311Sseb mac_link_update(lldev->mh, LINK_STATE_DOWN); 2221256Syl150051 /* Link states should be reported to user whenever it changes */ 2231256Syl150051 cmn_err(CE_NOTE, "!%s%d: Link is down", XGELL_IFNAME, 2241256Syl150051 lldev->instance); 2251256Syl150051 } 2261256Syl150051 2271256Syl150051 /* 2281256Syl150051 * xgell_rx_buffer_replenish_all 2291256Syl150051 * 2301256Syl150051 * To replenish all freed dtr(s) with buffers in free pool. It's called by 2311256Syl150051 * xgell_rx_buffer_recycle() or xgell_rx_1b_compl(). 2321256Syl150051 * Must be called with pool_lock held. 2331256Syl150051 */ 2341256Syl150051 static void 2351256Syl150051 xgell_rx_buffer_replenish_all(xgelldev_t *lldev) 2361256Syl150051 { 2371256Syl150051 xge_hal_dtr_h dtr; 2381256Syl150051 xgell_rx_buffer_t *rx_buffer; 2391256Syl150051 xgell_rxd_priv_t *rxd_priv; 2401256Syl150051 2413115Syl150051 xge_assert(mutex_owned(&lldev->bf_pool.pool_lock)); 2423115Syl150051 2431256Syl150051 while ((lldev->bf_pool.free > 0) && 2441256Syl150051 (xge_hal_ring_dtr_reserve(lldev->ring_main.channelh, &dtr) == 2451256Syl150051 XGE_HAL_OK)) { 2461256Syl150051 rx_buffer = lldev->bf_pool.head; 2471256Syl150051 lldev->bf_pool.head = rx_buffer->next; 2481256Syl150051 lldev->bf_pool.free--; 2491256Syl150051 2501256Syl150051 xge_assert(rx_buffer); 2511256Syl150051 xge_assert(rx_buffer->dma_addr); 2521256Syl150051 2531256Syl150051 rxd_priv = (xgell_rxd_priv_t *) 2541256Syl150051 xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr); 2551256Syl150051 xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, 2561256Syl150051 lldev->bf_pool.size); 2571256Syl150051 2581256Syl150051 rxd_priv->rx_buffer = rx_buffer; 2591256Syl150051 xge_hal_ring_dtr_post(lldev->ring_main.channelh, dtr); 2601256Syl150051 } 2611256Syl150051 } 2621256Syl150051 2631256Syl150051 /* 2641256Syl150051 * xgell_rx_buffer_release 2651256Syl150051 * 2661256Syl150051 * The only thing done here is to put the buffer back to the pool. 2673115Syl150051 * Calling this function need be protected by mutex, bf_pool.pool_lock. 2681256Syl150051 */ 2691256Syl150051 static void 2701256Syl150051 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer) 2711256Syl150051 { 2721256Syl150051 xgelldev_t *lldev = rx_buffer->lldev; 2731256Syl150051 2743115Syl150051 xge_assert(mutex_owned(&lldev->bf_pool.pool_lock)); 2751256Syl150051 2761256Syl150051 /* Put the buffer back to pool */ 2771256Syl150051 rx_buffer->next = lldev->bf_pool.head; 2781256Syl150051 lldev->bf_pool.head = rx_buffer; 2791256Syl150051 2801256Syl150051 lldev->bf_pool.free++; 2811256Syl150051 } 2821256Syl150051 2831256Syl150051 /* 2841256Syl150051 * xgell_rx_buffer_recycle 2851256Syl150051 * 2861256Syl150051 * Called by desballoc() to "free" the resource. 2871256Syl150051 * We will try to replenish all descripters. 2881256Syl150051 */ 2891256Syl150051 static void 2901256Syl150051 xgell_rx_buffer_recycle(char *arg) 2911256Syl150051 { 2921256Syl150051 xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg; 2931256Syl150051 xgelldev_t *lldev = rx_buffer->lldev; 2941256Syl150051 2953115Syl150051 mutex_enter(&lldev->bf_pool.pool_lock); 2961256Syl150051 2973115Syl150051 xgell_rx_buffer_release(rx_buffer); 2981256Syl150051 lldev->bf_pool.post--; 2991256Syl150051 3001256Syl150051 /* 3011256Syl150051 * Before finding a good way to set this hiwat, just always call to 3021256Syl150051 * replenish_all. *TODO* 3031256Syl150051 */ 3041256Syl150051 if (lldev->is_initialized != 0) { 3051256Syl150051 xgell_rx_buffer_replenish_all(lldev); 3061256Syl150051 } 3071256Syl150051 3081256Syl150051 mutex_exit(&lldev->bf_pool.pool_lock); 3091256Syl150051 } 3101256Syl150051 3111256Syl150051 /* 3121256Syl150051 * xgell_rx_buffer_alloc 3131256Syl150051 * 3141256Syl150051 * Allocate one rx buffer and return with the pointer to the buffer. 3151256Syl150051 * Return NULL if failed. 3161256Syl150051 */ 3171256Syl150051 static xgell_rx_buffer_t * 3181256Syl150051 xgell_rx_buffer_alloc(xgelldev_t *lldev) 3191256Syl150051 { 3201256Syl150051 xge_hal_device_t *hldev; 3211256Syl150051 void *vaddr; 3221256Syl150051 ddi_dma_handle_t dma_handle; 3231256Syl150051 ddi_acc_handle_t dma_acch; 3241256Syl150051 dma_addr_t dma_addr; 3251256Syl150051 uint_t ncookies; 3261256Syl150051 ddi_dma_cookie_t dma_cookie; 3271256Syl150051 size_t real_size; 3281256Syl150051 extern ddi_device_acc_attr_t *p_xge_dev_attr; 3291256Syl150051 xgell_rx_buffer_t *rx_buffer; 3301256Syl150051 3313115Syl150051 hldev = (xge_hal_device_t *)lldev->devh; 3321256Syl150051 3331256Syl150051 if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP, 3341256Syl150051 0, &dma_handle) != DDI_SUCCESS) { 3351256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle", 3361256Syl150051 XGELL_IFNAME, lldev->instance); 3371256Syl150051 goto handle_failed; 3381256Syl150051 } 3391256Syl150051 3401256Syl150051 /* reserve some space at the end of the buffer for recycling */ 3411256Syl150051 if (ddi_dma_mem_alloc(dma_handle, HEADROOM + lldev->bf_pool.size + 3421256Syl150051 sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING, 3431256Syl150051 DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) != 3441256Syl150051 DDI_SUCCESS) { 3451256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory", 3461256Syl150051 XGELL_IFNAME, lldev->instance); 3471256Syl150051 goto mem_failed; 3481256Syl150051 } 3491256Syl150051 3501256Syl150051 if (HEADROOM + lldev->bf_pool.size + sizeof (xgell_rx_buffer_t) > 3511256Syl150051 real_size) { 3521256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory", 3531256Syl150051 XGELL_IFNAME, lldev->instance); 3541256Syl150051 goto bind_failed; 3551256Syl150051 } 3561256Syl150051 3571256Syl150051 if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM, 3581256Syl150051 lldev->bf_pool.size, DDI_DMA_READ | DDI_DMA_STREAMING, 3591256Syl150051 DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) { 3601256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk", 3611256Syl150051 XGELL_IFNAME, lldev->instance); 3621256Syl150051 goto bind_failed; 3631256Syl150051 } 3641256Syl150051 3651256Syl150051 if (ncookies != 1 || dma_cookie.dmac_size < lldev->bf_pool.size) { 3661256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA", 3671256Syl150051 XGELL_IFNAME, lldev->instance); 3681256Syl150051 goto check_failed; 3691256Syl150051 } 3701256Syl150051 3711256Syl150051 dma_addr = dma_cookie.dmac_laddress; 3721256Syl150051 3731256Syl150051 rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size - 3741256Syl150051 sizeof (xgell_rx_buffer_t)); 3751256Syl150051 rx_buffer->next = NULL; 3761256Syl150051 rx_buffer->vaddr = vaddr; 3771256Syl150051 rx_buffer->dma_addr = dma_addr; 3781256Syl150051 rx_buffer->dma_handle = dma_handle; 3791256Syl150051 rx_buffer->dma_acch = dma_acch; 3801256Syl150051 rx_buffer->lldev = lldev; 3811256Syl150051 rx_buffer->frtn.free_func = xgell_rx_buffer_recycle; 3821256Syl150051 rx_buffer->frtn.free_arg = (void *)rx_buffer; 3831256Syl150051 3841256Syl150051 return (rx_buffer); 3851256Syl150051 3861256Syl150051 check_failed: 3871256Syl150051 (void) ddi_dma_unbind_handle(dma_handle); 3881256Syl150051 bind_failed: 3891256Syl150051 XGE_OS_MEMORY_CHECK_FREE(vaddr, 0); 3901256Syl150051 ddi_dma_mem_free(&dma_acch); 3911256Syl150051 mem_failed: 3921256Syl150051 ddi_dma_free_handle(&dma_handle); 3931256Syl150051 handle_failed: 3941256Syl150051 3951256Syl150051 return (NULL); 3961256Syl150051 } 3971256Syl150051 3981256Syl150051 /* 3991256Syl150051 * xgell_rx_destroy_buffer_pool 4001256Syl150051 * 4011256Syl150051 * Destroy buffer pool. If there is still any buffer hold by upper layer, 4021256Syl150051 * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded. 4031256Syl150051 */ 4041256Syl150051 static int 4051256Syl150051 xgell_rx_destroy_buffer_pool(xgelldev_t *lldev) 4061256Syl150051 { 4071256Syl150051 xgell_rx_buffer_t *rx_buffer; 4081256Syl150051 ddi_dma_handle_t dma_handle; 4091256Syl150051 ddi_acc_handle_t dma_acch; 4101256Syl150051 int i; 4111256Syl150051 4121256Syl150051 /* 4131256Syl150051 * If there is any posted buffer, the driver should reject to be 4141256Syl150051 * detached. Need notice upper layer to release them. 4151256Syl150051 */ 4161256Syl150051 if (lldev->bf_pool.post != 0) { 4171256Syl150051 xge_debug_ll(XGE_ERR, 4181256Syl150051 "%s%d has some buffers not be recycled, try later!", 4191256Syl150051 XGELL_IFNAME, lldev->instance); 4201256Syl150051 return (DDI_FAILURE); 4211256Syl150051 } 4221256Syl150051 4231256Syl150051 /* 4241256Syl150051 * Relase buffers one by one. 4251256Syl150051 */ 4261256Syl150051 for (i = lldev->bf_pool.total; i > 0; i--) { 4271256Syl150051 rx_buffer = lldev->bf_pool.head; 4281256Syl150051 xge_assert(rx_buffer != NULL); 4291256Syl150051 4301256Syl150051 lldev->bf_pool.head = rx_buffer->next; 4311256Syl150051 4321256Syl150051 dma_handle = rx_buffer->dma_handle; 4331256Syl150051 dma_acch = rx_buffer->dma_acch; 4341256Syl150051 4351256Syl150051 if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) { 4361256Syl150051 xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!"); 4371256Syl150051 lldev->bf_pool.head = rx_buffer; 4381256Syl150051 return (DDI_FAILURE); 4391256Syl150051 } 4401256Syl150051 ddi_dma_mem_free(&dma_acch); 4411256Syl150051 ddi_dma_free_handle(&dma_handle); 4421256Syl150051 4431256Syl150051 lldev->bf_pool.total--; 4441256Syl150051 lldev->bf_pool.free--; 4451256Syl150051 } 4461256Syl150051 4471256Syl150051 mutex_destroy(&lldev->bf_pool.pool_lock); 4481256Syl150051 return (DDI_SUCCESS); 4491256Syl150051 } 4501256Syl150051 4511256Syl150051 /* 4521256Syl150051 * xgell_rx_create_buffer_pool 4531256Syl150051 * 4541256Syl150051 * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t. 4551256Syl150051 */ 4561256Syl150051 static int 4571256Syl150051 xgell_rx_create_buffer_pool(xgelldev_t *lldev) 4581256Syl150051 { 4591256Syl150051 xge_hal_device_t *hldev; 4601256Syl150051 xgell_rx_buffer_t *rx_buffer; 4611256Syl150051 int i; 4621256Syl150051 4632311Sseb hldev = (xge_hal_device_t *)lldev->devh; 4641256Syl150051 4651256Syl150051 lldev->bf_pool.total = 0; 4662311Sseb lldev->bf_pool.size = XGELL_MAX_FRAME_SIZE(hldev); 4671256Syl150051 lldev->bf_pool.head = NULL; 4681256Syl150051 lldev->bf_pool.free = 0; 4691256Syl150051 lldev->bf_pool.post = 0; 4701256Syl150051 lldev->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat; 4711256Syl150051 4721256Syl150051 mutex_init(&lldev->bf_pool.pool_lock, NULL, MUTEX_DRIVER, 4731256Syl150051 hldev->irqh); 4741256Syl150051 4751256Syl150051 /* 4761256Syl150051 * Allocate buffers one by one. If failed, destroy whole pool by 4771256Syl150051 * call to xgell_rx_destroy_buffer_pool(). 4781256Syl150051 */ 4791256Syl150051 for (i = 0; i < lldev->config.rx_buffer_total; i++) { 4801256Syl150051 if ((rx_buffer = xgell_rx_buffer_alloc(lldev)) == NULL) { 4811256Syl150051 (void) xgell_rx_destroy_buffer_pool(lldev); 4821256Syl150051 return (DDI_FAILURE); 4831256Syl150051 } 4841256Syl150051 4851256Syl150051 rx_buffer->next = lldev->bf_pool.head; 4861256Syl150051 lldev->bf_pool.head = rx_buffer; 4871256Syl150051 4881256Syl150051 lldev->bf_pool.total++; 4891256Syl150051 lldev->bf_pool.free++; 4901256Syl150051 } 4911256Syl150051 4921256Syl150051 return (DDI_SUCCESS); 4931256Syl150051 } 4941256Syl150051 4951256Syl150051 /* 4961256Syl150051 * xgell_rx_dtr_replenish 4971256Syl150051 * 4981256Syl150051 * Replenish descriptor with rx_buffer in RX buffer pool. 4991256Syl150051 * The dtr should be post right away. 5001256Syl150051 */ 5011256Syl150051 xge_hal_status_e 5021256Syl150051 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index, 5031256Syl150051 void *userdata, xge_hal_channel_reopen_e reopen) 5041256Syl150051 { 5051256Syl150051 xgell_ring_t *ring = userdata; 5062311Sseb xgelldev_t *lldev = ring->lldev; 5071256Syl150051 xgell_rx_buffer_t *rx_buffer; 5081256Syl150051 xgell_rxd_priv_t *rxd_priv; 5091256Syl150051 5101256Syl150051 if (lldev->bf_pool.head == NULL) { 5111256Syl150051 xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!"); 5121256Syl150051 return (XGE_HAL_FAIL); 5131256Syl150051 } 5141256Syl150051 rx_buffer = lldev->bf_pool.head; 5151256Syl150051 lldev->bf_pool.head = rx_buffer->next; 5161256Syl150051 lldev->bf_pool.free--; 5171256Syl150051 5181256Syl150051 xge_assert(rx_buffer); 5191256Syl150051 xge_assert(rx_buffer->dma_addr); 5201256Syl150051 5211256Syl150051 rxd_priv = (xgell_rxd_priv_t *) 5221256Syl150051 xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr); 5231256Syl150051 xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, lldev->bf_pool.size); 5241256Syl150051 5251256Syl150051 rxd_priv->rx_buffer = rx_buffer; 5261256Syl150051 5271256Syl150051 return (XGE_HAL_OK); 5281256Syl150051 } 5291256Syl150051 5301256Syl150051 /* 5311256Syl150051 * xgell_get_ip_offset 5321256Syl150051 * 5331256Syl150051 * Calculate the offset to IP header. 5341256Syl150051 */ 5351256Syl150051 static inline int 5361256Syl150051 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info) 5371256Syl150051 { 5381256Syl150051 int ip_off; 5391256Syl150051 5401256Syl150051 /* get IP-header offset */ 5411256Syl150051 switch (ext_info->frame) { 5421256Syl150051 case XGE_HAL_FRAME_TYPE_DIX: 5431256Syl150051 ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE; 5441256Syl150051 break; 5451256Syl150051 case XGE_HAL_FRAME_TYPE_IPX: 5461256Syl150051 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 5471256Syl150051 XGE_HAL_HEADER_802_2_SIZE + 5481256Syl150051 XGE_HAL_HEADER_SNAP_SIZE); 5491256Syl150051 break; 5501256Syl150051 case XGE_HAL_FRAME_TYPE_LLC: 5511256Syl150051 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 5521256Syl150051 XGE_HAL_HEADER_802_2_SIZE); 5531256Syl150051 break; 5541256Syl150051 case XGE_HAL_FRAME_TYPE_SNAP: 5551256Syl150051 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 5561256Syl150051 XGE_HAL_HEADER_SNAP_SIZE); 5571256Syl150051 break; 5581256Syl150051 default: 5591256Syl150051 ip_off = 0; 5601256Syl150051 break; 5611256Syl150051 } 5621256Syl150051 5631256Syl150051 if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 || 5641256Syl150051 ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) && 5651256Syl150051 (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) { 5661256Syl150051 ip_off += XGE_HAL_HEADER_VLAN_SIZE; 5671256Syl150051 } 5681256Syl150051 5691256Syl150051 return (ip_off); 5701256Syl150051 } 5711256Syl150051 5721256Syl150051 /* 5731256Syl150051 * xgell_rx_hcksum_assoc 5741256Syl150051 * 5751256Syl150051 * Judge the packet type and then call to hcksum_assoc() to associate 5761256Syl150051 * h/w checksum information. 5771256Syl150051 */ 5781256Syl150051 static inline void 5791256Syl150051 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length, 5801256Syl150051 xge_hal_dtr_info_t *ext_info) 5811256Syl150051 { 5821256Syl150051 int cksum_flags = 0; 5831256Syl150051 5841256Syl150051 if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) { 5851256Syl150051 if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) { 5861256Syl150051 if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) { 5871256Syl150051 cksum_flags |= HCK_IPV4_HDRCKSUM; 5881256Syl150051 } 5891256Syl150051 if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) { 5901256Syl150051 cksum_flags |= HCK_FULLCKSUM_OK; 5911256Syl150051 } 5921256Syl150051 if (cksum_flags) { 5931256Syl150051 cksum_flags |= HCK_FULLCKSUM; 5941256Syl150051 (void) hcksum_assoc(mp, NULL, NULL, 0, 5951256Syl150051 0, 0, 0, cksum_flags, 0); 5961256Syl150051 } 5971256Syl150051 } 5981256Syl150051 } else if (ext_info->proto & 5991256Syl150051 (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) { 6001256Syl150051 /* 6011256Syl150051 * Just pass the partial cksum up to IP. 6021256Syl150051 */ 6033115Syl150051 int ip_off = xgell_get_ip_offset(ext_info); 6041256Syl150051 int start, end = pkt_length - ip_off; 6051256Syl150051 6061256Syl150051 if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) { 6071256Syl150051 struct ip *ip = 6081256Syl150051 (struct ip *)(vaddr + ip_off); 6091256Syl150051 start = ip->ip_hl * 4 + ip_off; 6101256Syl150051 } else { 6111256Syl150051 start = ip_off + 40; 6121256Syl150051 } 6131256Syl150051 cksum_flags |= HCK_PARTIALCKSUM; 6141256Syl150051 (void) hcksum_assoc(mp, NULL, NULL, start, 0, 6151256Syl150051 end, ntohs(ext_info->l4_cksum), cksum_flags, 6161256Syl150051 0); 6171256Syl150051 } 6181256Syl150051 } 6191256Syl150051 6201256Syl150051 /* 6211256Syl150051 * xgell_rx_1b_msg_alloc 6221256Syl150051 * 6231256Syl150051 * Allocate message header for data buffer, and decide if copy the packet to 6241256Syl150051 * new data buffer to release big rx_buffer to save memory. 6251256Syl150051 * 6263115Syl150051 * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate 6271256Syl150051 * new message and copy the payload in. 6281256Syl150051 */ 6291256Syl150051 static mblk_t * 6303115Syl150051 xgell_rx_1b_msg_alloc(xgelldev_t *lldev, xgell_rx_buffer_t *rx_buffer, 6313115Syl150051 int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit) 6321256Syl150051 { 6331256Syl150051 mblk_t *mp; 6341256Syl150051 char *vaddr; 6351256Syl150051 6361256Syl150051 vaddr = (char *)rx_buffer->vaddr + HEADROOM; 6371256Syl150051 /* 6381256Syl150051 * Copy packet into new allocated message buffer, if pkt_length 6393115Syl150051 * is less than XGELL_RX_DMA_LOWAT 6401256Syl150051 */ 6413115Syl150051 if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) { 642*3392Syl150051 if ((mp = allocb(pkt_length, 0)) == NULL) { 6431256Syl150051 return (NULL); 6441256Syl150051 } 6451256Syl150051 bcopy(vaddr, mp->b_rptr, pkt_length); 6461256Syl150051 mp->b_wptr = mp->b_rptr + pkt_length; 6471256Syl150051 *copyit = B_TRUE; 6481256Syl150051 return (mp); 6491256Syl150051 } 6501256Syl150051 6511256Syl150051 /* 6521256Syl150051 * Just allocate mblk for current data buffer 6531256Syl150051 */ 654*3392Syl150051 if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0, 6551256Syl150051 &rx_buffer->frtn)) == NULL) { 6561256Syl150051 /* Drop it */ 6571256Syl150051 return (NULL); 6581256Syl150051 } 6591256Syl150051 /* 660*3392Syl150051 * Adjust the b_rptr/b_wptr in the mblk_t structure. 6611256Syl150051 */ 662*3392Syl150051 mp->b_wptr += pkt_length; 6631256Syl150051 6641256Syl150051 return (mp); 6651256Syl150051 } 6661256Syl150051 6671256Syl150051 /* 6681256Syl150051 * xgell_rx_1b_compl 6691256Syl150051 * 6701256Syl150051 * If the interrupt is because of a received frame or if the receive ring 6711256Syl150051 * contains fresh as yet un-processed frames, this function is called. 6721256Syl150051 */ 6731256Syl150051 static xge_hal_status_e 6741256Syl150051 xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code, 6751256Syl150051 void *userdata) 6761256Syl150051 { 6772311Sseb xgelldev_t *lldev = ((xgell_ring_t *)userdata)->lldev; 6781256Syl150051 xgell_rx_buffer_t *rx_buffer; 6791256Syl150051 mblk_t *mp_head = NULL; 6801256Syl150051 mblk_t *mp_end = NULL; 6813115Syl150051 int pkt_burst = 0; 6823115Syl150051 6833115Syl150051 mutex_enter(&lldev->bf_pool.pool_lock); 6841256Syl150051 6851256Syl150051 do { 6861256Syl150051 int pkt_length; 6871256Syl150051 dma_addr_t dma_data; 6881256Syl150051 mblk_t *mp; 6891256Syl150051 boolean_t copyit = B_FALSE; 6901256Syl150051 6911256Syl150051 xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *) 6921256Syl150051 xge_hal_ring_dtr_private(channelh, dtr)); 6931256Syl150051 xge_hal_dtr_info_t ext_info; 6941256Syl150051 6951256Syl150051 rx_buffer = rxd_priv->rx_buffer; 6961256Syl150051 6971256Syl150051 xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length); 6981256Syl150051 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info); 6991256Syl150051 7001256Syl150051 xge_assert(dma_data == rx_buffer->dma_addr); 7011256Syl150051 7021256Syl150051 if (t_code != 0) { 7031256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64 7041256Syl150051 " completed due to error t_code %01x", XGELL_IFNAME, 7051256Syl150051 lldev->instance, (uint64_t)(uintptr_t)dtr, t_code); 7061256Syl150051 7071256Syl150051 (void) xge_hal_device_handle_tcode(channelh, dtr, 7081256Syl150051 t_code); 7091256Syl150051 xge_hal_ring_dtr_free(channelh, dtr); /* drop it */ 7101256Syl150051 xgell_rx_buffer_release(rx_buffer); 7111256Syl150051 continue; 7121256Syl150051 } 7131256Syl150051 7141256Syl150051 /* 7151256Syl150051 * Sync the DMA memory 7161256Syl150051 */ 7173115Syl150051 if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length, 7183115Syl150051 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) { 7191256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync", 7201256Syl150051 XGELL_IFNAME, lldev->instance); 7211256Syl150051 xge_hal_ring_dtr_free(channelh, dtr); /* drop it */ 7221256Syl150051 xgell_rx_buffer_release(rx_buffer); 7231256Syl150051 continue; 7241256Syl150051 } 7251256Syl150051 7261256Syl150051 /* 7271256Syl150051 * Allocate message for the packet. 7281256Syl150051 */ 7291256Syl150051 if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) { 7301256Syl150051 copyit = B_TRUE; 7311256Syl150051 } else { 7321256Syl150051 copyit = B_FALSE; 7331256Syl150051 } 7341256Syl150051 7353115Syl150051 mp = xgell_rx_1b_msg_alloc(lldev, rx_buffer, pkt_length, 7363115Syl150051 &ext_info, ©it); 7371256Syl150051 7381256Syl150051 xge_hal_ring_dtr_free(channelh, dtr); 7391256Syl150051 7401256Syl150051 /* 7411256Syl150051 * Release the buffer and recycle it later 7421256Syl150051 */ 7431256Syl150051 if ((mp == NULL) || copyit) { 7441256Syl150051 xgell_rx_buffer_release(rx_buffer); 7451256Syl150051 } else { 7461256Syl150051 /* 7471256Syl150051 * Count it since the buffer should be loaned up. 7481256Syl150051 */ 7491256Syl150051 lldev->bf_pool.post++; 7501256Syl150051 } 7511256Syl150051 if (mp == NULL) { 7521256Syl150051 xge_debug_ll(XGE_ERR, 7533115Syl150051 "%s%d: rx: can not allocate mp mblk", 7543115Syl150051 XGELL_IFNAME, lldev->instance); 7551256Syl150051 continue; 7561256Syl150051 } 7571256Syl150051 7581256Syl150051 /* 7593115Syl150051 * Associate cksum_flags per packet type and h/w 7603115Syl150051 * cksum flags. 7611256Syl150051 */ 7621256Syl150051 xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr + 7631256Syl150051 HEADROOM, pkt_length, &ext_info); 7641256Syl150051 7651256Syl150051 if (mp_head == NULL) { 7661256Syl150051 mp_head = mp; 7671256Syl150051 mp_end = mp; 7681256Syl150051 } else { 7691256Syl150051 mp_end->b_next = mp; 7701256Syl150051 mp_end = mp; 7711256Syl150051 } 7721256Syl150051 7733115Syl150051 if (++pkt_burst < lldev->config.rx_pkt_burst) 7743115Syl150051 continue; 7753115Syl150051 7763115Syl150051 if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) { 7773115Syl150051 /* Replenish rx buffers */ 7783115Syl150051 xgell_rx_buffer_replenish_all(lldev); 7793115Syl150051 } 7803115Syl150051 mutex_exit(&lldev->bf_pool.pool_lock); 7813115Syl150051 if (mp_head != NULL) { 7823115Syl150051 mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, 7833115Syl150051 mp_head); 7843115Syl150051 } 7853115Syl150051 mp_head = mp_end = NULL; 7863115Syl150051 pkt_burst = 0; 7873115Syl150051 mutex_enter(&lldev->bf_pool.pool_lock); 7883115Syl150051 7891256Syl150051 } while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) == 7901256Syl150051 XGE_HAL_OK); 7911256Syl150051 7921256Syl150051 /* 7931256Syl150051 * Always call replenish_all to recycle rx_buffers. 7941256Syl150051 */ 7951256Syl150051 xgell_rx_buffer_replenish_all(lldev); 7961256Syl150051 mutex_exit(&lldev->bf_pool.pool_lock); 7971256Syl150051 7983115Syl150051 if (mp_head != NULL) { 7993115Syl150051 mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head); 8003115Syl150051 } 8013115Syl150051 8021256Syl150051 return (XGE_HAL_OK); 8031256Syl150051 } 8041256Syl150051 8051256Syl150051 /* 8061256Syl150051 * xgell_xmit_compl 8071256Syl150051 * 8081256Syl150051 * If an interrupt was raised to indicate DMA complete of the Tx packet, 8091256Syl150051 * this function is called. It identifies the last TxD whose buffer was 8101256Syl150051 * freed and frees all skbs whose data have already DMA'ed into the NICs 8111256Syl150051 * internal memory. 8121256Syl150051 */ 8131256Syl150051 static xge_hal_status_e 8141256Syl150051 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code, 8151256Syl150051 void *userdata) 8161256Syl150051 { 8171256Syl150051 xgelldev_t *lldev = userdata; 8181256Syl150051 8191256Syl150051 do { 8201256Syl150051 xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *) 8211256Syl150051 xge_hal_fifo_dtr_private(dtr)); 8221256Syl150051 mblk_t *mp = txd_priv->mblk; 8231256Syl150051 int i; 8241256Syl150051 8251256Syl150051 if (t_code) { 8261256Syl150051 xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64 8271256Syl150051 " completed due to error t_code %01x", XGELL_IFNAME, 8281256Syl150051 lldev->instance, (uint64_t)(uintptr_t)dtr, t_code); 8291256Syl150051 8301256Syl150051 (void) xge_hal_device_handle_tcode(channelh, dtr, 8311256Syl150051 t_code); 8321256Syl150051 } 8331256Syl150051 8341256Syl150051 for (i = 0; i < txd_priv->handle_cnt; i++) { 8351256Syl150051 xge_assert(txd_priv->dma_handles[i]); 8361256Syl150051 (void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]); 8371256Syl150051 ddi_dma_free_handle(&txd_priv->dma_handles[i]); 8381256Syl150051 txd_priv->dma_handles[i] = 0; 8391256Syl150051 } 8401256Syl150051 8411256Syl150051 xge_hal_fifo_dtr_free(channelh, dtr); 8421256Syl150051 8431256Syl150051 freemsg(mp); 8441256Syl150051 lldev->resched_avail++; 8451256Syl150051 8461256Syl150051 } while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) == 8471256Syl150051 XGE_HAL_OK); 8481256Syl150051 8491256Syl150051 if (lldev->resched_retry && 8501256Syl150051 xge_queue_produce_context(xge_hal_device_queue(lldev->devh), 8511256Syl150051 XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) { 8521256Syl150051 xge_debug_ll(XGE_TRACE, "%s%d: IRQ produced event for queue %d", 8531256Syl150051 XGELL_IFNAME, lldev->instance, 8541256Syl150051 ((xge_hal_channel_t *)lldev->fifo_channel)->post_qid); 8551256Syl150051 lldev->resched_send = lldev->resched_avail; 8561256Syl150051 lldev->resched_retry = 0; 8571256Syl150051 } 8581256Syl150051 8591256Syl150051 return (XGE_HAL_OK); 8601256Syl150051 } 8611256Syl150051 8621256Syl150051 /* 8631256Syl150051 * xgell_send 8643115Syl150051 * @hldev: pointer to xge_hal_device_t strucutre 8651256Syl150051 * @mblk: pointer to network buffer, i.e. mblk_t structure 8661256Syl150051 * 8671256Syl150051 * Called by the xgell_m_tx to transmit the packet to the XFRAME firmware. 8681256Syl150051 * A pointer to an M_DATA message that contains the packet is passed to 8691256Syl150051 * this routine. 8701256Syl150051 */ 8711256Syl150051 static boolean_t 8723115Syl150051 xgell_send(xgelldev_t *lldev, mblk_t *mp) 8731256Syl150051 { 8741256Syl150051 mblk_t *bp; 8753115Syl150051 boolean_t retry; 8763115Syl150051 xge_hal_device_t *hldev = lldev->devh; 8771256Syl150051 xge_hal_status_e status; 8781256Syl150051 xge_hal_dtr_h dtr; 8791256Syl150051 xgell_txd_priv_t *txd_priv; 8803115Syl150051 uint32_t hckflags; 8813115Syl150051 uint32_t mss; 8823115Syl150051 int handle_cnt, frag_cnt, ret, i, copied; 8833115Syl150051 boolean_t used_copy; 8841256Syl150051 8851256Syl150051 _begin: 8863115Syl150051 retry = B_FALSE; 8871256Syl150051 handle_cnt = frag_cnt = 0; 8881256Syl150051 8891256Syl150051 if (!lldev->is_initialized || lldev->in_reset) 8901256Syl150051 return (B_FALSE); 8911256Syl150051 8921256Syl150051 /* 8931256Syl150051 * If the free Tx dtrs count reaches the lower threshold, 8941256Syl150051 * inform the gld to stop sending more packets till the free 8951256Syl150051 * dtrs count exceeds higher threshold. Driver informs the 8961256Syl150051 * gld through gld_sched call, when the free dtrs count exceeds 8971256Syl150051 * the higher threshold. 8981256Syl150051 */ 8993115Syl150051 if (xge_hal_channel_dtr_count(lldev->fifo_channel) 9001256Syl150051 <= XGELL_TX_LEVEL_LOW) { 9011256Syl150051 xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit," 9021256Syl150051 "free descriptors count at low threshold %d", 9031256Syl150051 XGELL_IFNAME, lldev->instance, 9041256Syl150051 ((xge_hal_channel_t *)lldev->fifo_channel)->post_qid, 9051256Syl150051 XGELL_TX_LEVEL_LOW); 9063115Syl150051 retry = B_TRUE; 9071256Syl150051 goto _exit; 9081256Syl150051 } 9091256Syl150051 9101256Syl150051 status = xge_hal_fifo_dtr_reserve(lldev->fifo_channel, &dtr); 9111256Syl150051 if (status != XGE_HAL_OK) { 9121256Syl150051 switch (status) { 9131256Syl150051 case XGE_HAL_INF_CHANNEL_IS_NOT_READY: 9141256Syl150051 xge_debug_ll(XGE_ERR, 9151256Syl150051 "%s%d: channel %d is not ready.", XGELL_IFNAME, 9161256Syl150051 lldev->instance, 9171256Syl150051 ((xge_hal_channel_t *) 9181256Syl150051 lldev->fifo_channel)->post_qid); 9193115Syl150051 retry = B_TRUE; 9201256Syl150051 goto _exit; 9211256Syl150051 case XGE_HAL_INF_OUT_OF_DESCRIPTORS: 9221256Syl150051 xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit," 9231256Syl150051 " out of descriptors.", XGELL_IFNAME, 9241256Syl150051 lldev->instance, 9251256Syl150051 ((xge_hal_channel_t *) 9261256Syl150051 lldev->fifo_channel)->post_qid); 9273115Syl150051 retry = B_TRUE; 9281256Syl150051 goto _exit; 9291256Syl150051 default: 9301256Syl150051 return (B_FALSE); 9311256Syl150051 } 9321256Syl150051 } 9331256Syl150051 9341256Syl150051 txd_priv = xge_hal_fifo_dtr_private(dtr); 9351256Syl150051 txd_priv->mblk = mp; 9361256Syl150051 9371256Syl150051 /* 9381256Syl150051 * VLAN tag should be passed down along with MAC header, so h/w needn't 9391256Syl150051 * do insertion. 9401256Syl150051 * 9411256Syl150051 * For NIC driver that has to strip and re-insert VLAN tag, the example 9421256Syl150051 * is the other implementation for xge. The driver can simple bcopy() 9431256Syl150051 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag 9441256Syl150051 * automatically, since it's impossible that GLD sends down mp(s) with 9451256Syl150051 * splited ether_vlan_header. 9461256Syl150051 * 9471256Syl150051 * struct ether_vlan_header *evhp; 9481256Syl150051 * uint16_t tci; 9491256Syl150051 * 9501256Syl150051 * evhp = (struct ether_vlan_header *)mp->b_rptr; 9511256Syl150051 * if (evhp->ether_tpid == htons(VLAN_TPID)) { 9523115Syl150051 * tci = ntohs(evhp->ether_tci); 9533115Syl150051 * (void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ, 9541256Syl150051 * 2 * ETHERADDRL); 9553115Syl150051 * mp->b_rptr += VLAN_TAGSZ; 9561256Syl150051 * 9573115Syl150051 * xge_hal_fifo_dtr_vlan_set(dtr, tci); 9581256Syl150051 * } 9591256Syl150051 */ 9601256Syl150051 9613115Syl150051 copied = 0; 9623115Syl150051 used_copy = B_FALSE; 9631256Syl150051 for (bp = mp; bp != NULL; bp = bp->b_cont) { 9641256Syl150051 int mblen; 9651256Syl150051 uint_t ncookies; 9661256Syl150051 ddi_dma_cookie_t dma_cookie; 9671256Syl150051 ddi_dma_handle_t dma_handle; 9681256Syl150051 9691256Syl150051 /* skip zero-length message blocks */ 9701256Syl150051 mblen = MBLKL(bp); 9711256Syl150051 if (mblen == 0) { 9721256Syl150051 continue; 9731256Syl150051 } 9741256Syl150051 9753115Syl150051 /* 9763115Syl150051 * Check the message length to decide to DMA or bcopy() data 9773115Syl150051 * to tx descriptor(s). 9783115Syl150051 */ 9793115Syl150051 if (mblen < lldev->config.tx_dma_lowat && 9803115Syl150051 (copied + mblen) < lldev->tx_copied_max) { 9813115Syl150051 xge_hal_status_e rc; 9823115Syl150051 rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel, 9833115Syl150051 dtr, bp->b_rptr, mblen); 9843115Syl150051 if (rc == XGE_HAL_OK) { 9853115Syl150051 used_copy = B_TRUE; 9863115Syl150051 copied += mblen; 9873115Syl150051 continue; 9883115Syl150051 } else if (used_copy) { 9893115Syl150051 xge_hal_fifo_dtr_buffer_finalize( 9903115Syl150051 lldev->fifo_channel, dtr, frag_cnt++); 9913115Syl150051 used_copy = B_FALSE; 9923115Syl150051 } 9933115Syl150051 } else if (used_copy) { 9943115Syl150051 xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, 9953115Syl150051 dtr, frag_cnt++); 9963115Syl150051 used_copy = B_FALSE; 9973115Syl150051 } 9983115Syl150051 9992311Sseb ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr, 10001256Syl150051 DDI_DMA_DONTWAIT, 0, &dma_handle); 10011256Syl150051 if (ret != DDI_SUCCESS) { 10021256Syl150051 xge_debug_ll(XGE_ERR, 10033115Syl150051 "%s%d: can not allocate dma handle", XGELL_IFNAME, 10043115Syl150051 lldev->instance); 10051256Syl150051 goto _exit_cleanup; 10061256Syl150051 } 10071256Syl150051 10081256Syl150051 ret = ddi_dma_addr_bind_handle(dma_handle, NULL, 10091256Syl150051 (caddr_t)bp->b_rptr, mblen, 10101256Syl150051 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 10111256Syl150051 &dma_cookie, &ncookies); 10121256Syl150051 10131256Syl150051 switch (ret) { 10141256Syl150051 case DDI_DMA_MAPPED: 10151256Syl150051 /* everything's fine */ 10161256Syl150051 break; 10171256Syl150051 10181256Syl150051 case DDI_DMA_NORESOURCES: 10191256Syl150051 xge_debug_ll(XGE_ERR, 10201256Syl150051 "%s%d: can not bind dma address", 10211256Syl150051 XGELL_IFNAME, lldev->instance); 10221256Syl150051 ddi_dma_free_handle(&dma_handle); 10231256Syl150051 goto _exit_cleanup; 10241256Syl150051 10251256Syl150051 case DDI_DMA_NOMAPPING: 10261256Syl150051 case DDI_DMA_INUSE: 10271256Syl150051 case DDI_DMA_TOOBIG: 10281256Syl150051 default: 10291256Syl150051 /* drop packet, don't retry */ 10301256Syl150051 xge_debug_ll(XGE_ERR, 10311256Syl150051 "%s%d: can not map message buffer", 10321256Syl150051 XGELL_IFNAME, lldev->instance); 10331256Syl150051 ddi_dma_free_handle(&dma_handle); 10341256Syl150051 goto _exit_cleanup; 10351256Syl150051 } 10361256Syl150051 10373115Syl150051 if (ncookies + frag_cnt > hldev->config.fifo.max_frags) { 10381256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: too many fragments, " 10391256Syl150051 "requested c:%d+f:%d", XGELL_IFNAME, 10401256Syl150051 lldev->instance, ncookies, frag_cnt); 10411256Syl150051 (void) ddi_dma_unbind_handle(dma_handle); 10421256Syl150051 ddi_dma_free_handle(&dma_handle); 10431256Syl150051 goto _exit_cleanup; 10441256Syl150051 } 10451256Syl150051 10461256Syl150051 /* setup the descriptors for this data buffer */ 10471256Syl150051 while (ncookies) { 10481256Syl150051 xge_hal_fifo_dtr_buffer_set(lldev->fifo_channel, dtr, 10491256Syl150051 frag_cnt++, dma_cookie.dmac_laddress, 10501256Syl150051 dma_cookie.dmac_size); 10511256Syl150051 if (--ncookies) { 10521256Syl150051 ddi_dma_nextcookie(dma_handle, &dma_cookie); 10531256Syl150051 } 10541256Syl150051 10551256Syl150051 } 10561256Syl150051 10571256Syl150051 txd_priv->dma_handles[handle_cnt++] = dma_handle; 10581256Syl150051 10591256Syl150051 if (bp->b_cont && 10601256Syl150051 (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >= 10613115Syl150051 hldev->config.fifo.max_frags)) { 10621256Syl150051 mblk_t *nmp; 10631256Syl150051 10641256Syl150051 xge_debug_ll(XGE_TRACE, 10651256Syl150051 "too many FRAGs [%d], pull up them", frag_cnt); 10661256Syl150051 10671256Syl150051 if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) { 10681256Syl150051 /* Drop packet, don't retry */ 10691256Syl150051 xge_debug_ll(XGE_ERR, 10701256Syl150051 "%s%d: can not pullup message buffer", 10711256Syl150051 XGELL_IFNAME, lldev->instance); 10721256Syl150051 goto _exit_cleanup; 10731256Syl150051 } 10741256Syl150051 freemsg(bp->b_cont); 10751256Syl150051 bp->b_cont = nmp; 10761256Syl150051 } 10771256Syl150051 } 10781256Syl150051 10793115Syl150051 /* finalize unfinished copies */ 10803115Syl150051 if (used_copy) { 10813115Syl150051 xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr, 10823115Syl150051 frag_cnt++); 10833115Syl150051 } 10843115Syl150051 10851256Syl150051 txd_priv->handle_cnt = handle_cnt; 10861256Syl150051 10873115Syl150051 /* 10883115Syl150051 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to 10893115Syl150051 * do all necessary work. 10903115Syl150051 */ 10913115Syl150051 hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, &mss, &hckflags); 10923115Syl150051 if ((hckflags & HW_LSO) && (mss != 0)) { 10933115Syl150051 xge_hal_fifo_dtr_mss_set(dtr, mss); 10943115Syl150051 } 10953115Syl150051 10963115Syl150051 if (hckflags & HCK_IPV4_HDRCKSUM) { 10971256Syl150051 xge_hal_fifo_dtr_cksum_set_bits(dtr, 10981256Syl150051 XGE_HAL_TXD_TX_CKO_IPV4_EN); 10991256Syl150051 } 11003115Syl150051 if (hckflags & HCK_FULLCKSUM) { 11011256Syl150051 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN | 11021256Syl150051 XGE_HAL_TXD_TX_CKO_UDP_EN); 11031256Syl150051 } 11041256Syl150051 11051256Syl150051 xge_hal_fifo_dtr_post(lldev->fifo_channel, dtr); 11061256Syl150051 11071256Syl150051 return (B_TRUE); 11081256Syl150051 11091256Syl150051 _exit_cleanup: 11101256Syl150051 11111256Syl150051 for (i = 0; i < handle_cnt; i++) { 11121256Syl150051 (void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]); 11131256Syl150051 ddi_dma_free_handle(&txd_priv->dma_handles[i]); 11141256Syl150051 txd_priv->dma_handles[i] = 0; 11151256Syl150051 } 11161256Syl150051 11171256Syl150051 xge_hal_fifo_dtr_free(lldev->fifo_channel, dtr); 11181256Syl150051 11191256Syl150051 _exit: 11201256Syl150051 if (retry) { 11211256Syl150051 if (lldev->resched_avail != lldev->resched_send && 11221256Syl150051 xge_queue_produce_context(xge_hal_device_queue(lldev->devh), 11231256Syl150051 XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) { 11241256Syl150051 lldev->resched_send = lldev->resched_avail; 11251256Syl150051 return (B_FALSE); 11261256Syl150051 } else { 11271256Syl150051 lldev->resched_retry = 1; 11281256Syl150051 } 11291256Syl150051 } 11301256Syl150051 11311256Syl150051 freemsg(mp); 11321256Syl150051 return (B_TRUE); 11331256Syl150051 } 11341256Syl150051 11351256Syl150051 /* 11361256Syl150051 * xge_m_tx 11373115Syl150051 * @arg: pointer to the xgelldev_t structure 11381256Syl150051 * @resid: resource id 11391256Syl150051 * @mp: pointer to the message buffer 11401256Syl150051 * 11411256Syl150051 * Called by MAC Layer to send a chain of packets 11421256Syl150051 */ 11431256Syl150051 static mblk_t * 11441256Syl150051 xgell_m_tx(void *arg, mblk_t *mp) 11451256Syl150051 { 11463115Syl150051 xgelldev_t *lldev = arg; 11471256Syl150051 mblk_t *next; 11481256Syl150051 11491256Syl150051 while (mp != NULL) { 11501256Syl150051 next = mp->b_next; 11511256Syl150051 mp->b_next = NULL; 11521256Syl150051 11533115Syl150051 if (!xgell_send(lldev, mp)) { 11541256Syl150051 mp->b_next = next; 11551256Syl150051 break; 11561256Syl150051 } 11571256Syl150051 mp = next; 11581256Syl150051 } 11591256Syl150051 11601256Syl150051 return (mp); 11611256Syl150051 } 11621256Syl150051 11631256Syl150051 /* 11641256Syl150051 * xgell_rx_dtr_term 11651256Syl150051 * 11661256Syl150051 * Function will be called by HAL to terminate all DTRs for 11671256Syl150051 * Ring(s) type of channels. 11681256Syl150051 */ 11691256Syl150051 static void 11701256Syl150051 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 11711256Syl150051 xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen) 11721256Syl150051 { 11731256Syl150051 xgell_rxd_priv_t *rxd_priv = 11741256Syl150051 ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh)); 11751256Syl150051 xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer; 11761256Syl150051 11771256Syl150051 if (state == XGE_HAL_DTR_STATE_POSTED) { 11783115Syl150051 xgelldev_t *lldev = rx_buffer->lldev; 11793115Syl150051 11803115Syl150051 mutex_enter(&lldev->bf_pool.pool_lock); 11811256Syl150051 xge_hal_ring_dtr_free(channelh, dtrh); 11821256Syl150051 xgell_rx_buffer_release(rx_buffer); 11833115Syl150051 mutex_exit(&lldev->bf_pool.pool_lock); 11841256Syl150051 } 11851256Syl150051 } 11861256Syl150051 11871256Syl150051 /* 11881256Syl150051 * xgell_tx_term 11891256Syl150051 * 11901256Syl150051 * Function will be called by HAL to terminate all DTRs for 11911256Syl150051 * Fifo(s) type of channels. 11921256Syl150051 */ 11931256Syl150051 static void 11941256Syl150051 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 11951256Syl150051 xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen) 11961256Syl150051 { 11971256Syl150051 xgell_txd_priv_t *txd_priv = 11981256Syl150051 ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh)); 11991256Syl150051 mblk_t *mp = txd_priv->mblk; 12001256Syl150051 int i; 12013115Syl150051 12021256Syl150051 /* 12031256Syl150051 * for Tx we must clean up the DTR *only* if it has been 12041256Syl150051 * posted! 12051256Syl150051 */ 12061256Syl150051 if (state != XGE_HAL_DTR_STATE_POSTED) { 12071256Syl150051 return; 12081256Syl150051 } 12091256Syl150051 12101256Syl150051 for (i = 0; i < txd_priv->handle_cnt; i++) { 12111256Syl150051 xge_assert(txd_priv->dma_handles[i]); 12121256Syl150051 (void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]); 12131256Syl150051 ddi_dma_free_handle(&txd_priv->dma_handles[i]); 12141256Syl150051 txd_priv->dma_handles[i] = 0; 12151256Syl150051 } 12161256Syl150051 12171256Syl150051 xge_hal_fifo_dtr_free(channelh, dtrh); 12181256Syl150051 12191256Syl150051 freemsg(mp); 12201256Syl150051 } 12211256Syl150051 12221256Syl150051 /* 12231256Syl150051 * xgell_tx_open 12241256Syl150051 * @lldev: the link layer object 12251256Syl150051 * 12261256Syl150051 * Initialize and open all Tx channels; 12271256Syl150051 */ 12281256Syl150051 static boolean_t 12291256Syl150051 xgell_tx_open(xgelldev_t *lldev) 12301256Syl150051 { 12311256Syl150051 xge_hal_status_e status; 12321256Syl150051 u64 adapter_status; 12331256Syl150051 xge_hal_channel_attr_t attr; 12341256Syl150051 12351256Syl150051 attr.post_qid = 0; 12361256Syl150051 attr.compl_qid = 0; 12371256Syl150051 attr.callback = xgell_xmit_compl; 12381256Syl150051 attr.per_dtr_space = sizeof (xgell_txd_priv_t); 12391256Syl150051 attr.flags = 0; 12401256Syl150051 attr.type = XGE_HAL_CHANNEL_TYPE_FIFO; 12411256Syl150051 attr.userdata = lldev; 12421256Syl150051 attr.dtr_init = NULL; 12431256Syl150051 attr.dtr_term = xgell_tx_term; 12441256Syl150051 12451256Syl150051 if (xge_hal_device_status(lldev->devh, &adapter_status)) { 12461256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: device is not ready " 12471256Syl150051 "adaper status reads 0x%"PRIx64, XGELL_IFNAME, 12481256Syl150051 lldev->instance, (uint64_t)adapter_status); 12491256Syl150051 return (B_FALSE); 12501256Syl150051 } 12511256Syl150051 12521256Syl150051 status = xge_hal_channel_open(lldev->devh, &attr, 12531256Syl150051 &lldev->fifo_channel, XGE_HAL_CHANNEL_OC_NORMAL); 12541256Syl150051 if (status != XGE_HAL_OK) { 12551256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel " 12561256Syl150051 "got status code %d", XGELL_IFNAME, 12571256Syl150051 lldev->instance, status); 12581256Syl150051 return (B_FALSE); 12591256Syl150051 } 12601256Syl150051 12611256Syl150051 return (B_TRUE); 12621256Syl150051 } 12631256Syl150051 12641256Syl150051 /* 12651256Syl150051 * xgell_rx_open 12661256Syl150051 * @lldev: the link layer object 12671256Syl150051 * 12681256Syl150051 * Initialize and open all Rx channels; 12691256Syl150051 */ 12701256Syl150051 static boolean_t 12711256Syl150051 xgell_rx_open(xgelldev_t *lldev) 12721256Syl150051 { 12731256Syl150051 xge_hal_status_e status; 12741256Syl150051 u64 adapter_status; 12751256Syl150051 xge_hal_channel_attr_t attr; 12761256Syl150051 12771256Syl150051 attr.post_qid = XGELL_RING_MAIN_QID; 12781256Syl150051 attr.compl_qid = 0; 12791256Syl150051 attr.callback = xgell_rx_1b_compl; 12801256Syl150051 attr.per_dtr_space = sizeof (xgell_rxd_priv_t); 12811256Syl150051 attr.flags = 0; 12821256Syl150051 attr.type = XGE_HAL_CHANNEL_TYPE_RING; 12831256Syl150051 attr.dtr_init = xgell_rx_dtr_replenish; 12841256Syl150051 attr.dtr_term = xgell_rx_dtr_term; 12851256Syl150051 12861256Syl150051 if (xge_hal_device_status(lldev->devh, &adapter_status)) { 12871256Syl150051 xge_debug_ll(XGE_ERR, 12881256Syl150051 "%s%d: device is not ready adaper status reads 0x%"PRIx64, 12891256Syl150051 XGELL_IFNAME, lldev->instance, 12901256Syl150051 (uint64_t)adapter_status); 12911256Syl150051 return (B_FALSE); 12921256Syl150051 } 12931256Syl150051 12942311Sseb lldev->ring_main.lldev = lldev; 12951256Syl150051 attr.userdata = &lldev->ring_main; 12961256Syl150051 12971256Syl150051 status = xge_hal_channel_open(lldev->devh, &attr, 12981256Syl150051 &lldev->ring_main.channelh, XGE_HAL_CHANNEL_OC_NORMAL); 12991256Syl150051 if (status != XGE_HAL_OK) { 13001256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status " 13011256Syl150051 " code %d", XGELL_IFNAME, lldev->instance, status); 13021256Syl150051 return (B_FALSE); 13031256Syl150051 } 13041256Syl150051 13051256Syl150051 return (B_TRUE); 13061256Syl150051 } 13071256Syl150051 13081256Syl150051 static int 13091256Syl150051 xgell_initiate_start(xgelldev_t *lldev) 13101256Syl150051 { 13111256Syl150051 xge_hal_status_e status; 13121256Syl150051 xge_hal_device_t *hldev = lldev->devh; 13132311Sseb int maxpkt = hldev->config.mtu; 13141256Syl150051 13151256Syl150051 /* check initial mtu before enabling the device */ 13161256Syl150051 status = xge_hal_device_mtu_check(lldev->devh, maxpkt); 13171256Syl150051 if (status != XGE_HAL_OK) { 13181256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid", 13191256Syl150051 XGELL_IFNAME, lldev->instance, maxpkt); 13201256Syl150051 return (EINVAL); 13211256Syl150051 } 13221256Syl150051 13231256Syl150051 /* set initial mtu before enabling the device */ 13241256Syl150051 status = xge_hal_device_mtu_set(lldev->devh, maxpkt); 13251256Syl150051 if (status != XGE_HAL_OK) { 13261256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d", 13271256Syl150051 XGELL_IFNAME, lldev->instance, maxpkt); 13281256Syl150051 return (EIO); 13291256Syl150051 } 13301256Syl150051 13313115Syl150051 /* tune jumbo/normal frame UFC counters */ 13323115Syl150051 hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_b = \ 13333115Syl150051 maxpkt > XGE_HAL_DEFAULT_MTU ? 13343115Syl150051 XGE_HAL_DEFAULT_RX_UFC_B_J : 13353115Syl150051 XGE_HAL_DEFAULT_RX_UFC_B_N; 13363115Syl150051 13373115Syl150051 hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_c = \ 13383115Syl150051 maxpkt > XGE_HAL_DEFAULT_MTU ? 13393115Syl150051 XGE_HAL_DEFAULT_RX_UFC_C_J : 13403115Syl150051 XGE_HAL_DEFAULT_RX_UFC_C_N; 13413115Syl150051 13421256Syl150051 /* now, enable the device */ 13431256Syl150051 status = xge_hal_device_enable(lldev->devh); 13441256Syl150051 if (status != XGE_HAL_OK) { 13451256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not enable the device", 13461256Syl150051 XGELL_IFNAME, lldev->instance); 13471256Syl150051 return (EIO); 13481256Syl150051 } 13491256Syl150051 13501256Syl150051 if (!xgell_rx_open(lldev)) { 13511256Syl150051 status = xge_hal_device_disable(lldev->devh); 13521256Syl150051 if (status != XGE_HAL_OK) { 13531256Syl150051 u64 adapter_status; 13541256Syl150051 (void) xge_hal_device_status(lldev->devh, 13551256Syl150051 &adapter_status); 13561256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable " 13571256Syl150051 "the device. adaper status 0x%"PRIx64 13581256Syl150051 " returned status %d", 13591256Syl150051 XGELL_IFNAME, lldev->instance, 13601256Syl150051 (uint64_t)adapter_status, status); 13611256Syl150051 } 13621256Syl150051 xge_os_mdelay(1500); 13631256Syl150051 return (ENOMEM); 13641256Syl150051 } 13651256Syl150051 13661256Syl150051 if (!xgell_tx_open(lldev)) { 13671256Syl150051 status = xge_hal_device_disable(lldev->devh); 13681256Syl150051 if (status != XGE_HAL_OK) { 13691256Syl150051 u64 adapter_status; 13701256Syl150051 (void) xge_hal_device_status(lldev->devh, 13711256Syl150051 &adapter_status); 13721256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable " 13731256Syl150051 "the device. adaper status 0x%"PRIx64 13741256Syl150051 " returned status %d", 13751256Syl150051 XGELL_IFNAME, lldev->instance, 13761256Syl150051 (uint64_t)adapter_status, status); 13771256Syl150051 } 13781256Syl150051 xge_os_mdelay(1500); 13791256Syl150051 xge_hal_channel_close(lldev->ring_main.channelh, 13801256Syl150051 XGE_HAL_CHANNEL_OC_NORMAL); 13811256Syl150051 return (ENOMEM); 13821256Syl150051 } 13831256Syl150051 13841256Syl150051 /* time to enable interrupts */ 13851256Syl150051 xge_hal_device_intr_enable(lldev->devh); 13861256Syl150051 13871256Syl150051 lldev->is_initialized = 1; 13881256Syl150051 13891256Syl150051 return (0); 13901256Syl150051 } 13911256Syl150051 13921256Syl150051 static void 13931256Syl150051 xgell_initiate_stop(xgelldev_t *lldev) 13941256Syl150051 { 13951256Syl150051 xge_hal_status_e status; 13961256Syl150051 13971256Syl150051 lldev->is_initialized = 0; 13981256Syl150051 13991256Syl150051 status = xge_hal_device_disable(lldev->devh); 14001256Syl150051 if (status != XGE_HAL_OK) { 14011256Syl150051 u64 adapter_status; 14021256Syl150051 (void) xge_hal_device_status(lldev->devh, &adapter_status); 14031256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable " 14041256Syl150051 "the device. adaper status 0x%"PRIx64" returned status %d", 14051256Syl150051 XGELL_IFNAME, lldev->instance, 14061256Syl150051 (uint64_t)adapter_status, status); 14071256Syl150051 } 14081256Syl150051 xge_hal_device_intr_disable(lldev->devh); 14091256Syl150051 14101256Syl150051 xge_debug_ll(XGE_TRACE, "%s", 14111256Syl150051 "waiting for device irq to become quiescent..."); 14121256Syl150051 xge_os_mdelay(1500); 14131256Syl150051 14141256Syl150051 xge_queue_flush(xge_hal_device_queue(lldev->devh)); 14151256Syl150051 14161256Syl150051 xge_hal_channel_close(lldev->ring_main.channelh, 14171256Syl150051 XGE_HAL_CHANNEL_OC_NORMAL); 14181256Syl150051 14191256Syl150051 xge_hal_channel_close(lldev->fifo_channel, 14201256Syl150051 XGE_HAL_CHANNEL_OC_NORMAL); 14211256Syl150051 } 14221256Syl150051 14231256Syl150051 /* 14241256Syl150051 * xgell_m_start 14251256Syl150051 * @arg: pointer to device private strucutre(hldev) 14261256Syl150051 * 14271256Syl150051 * This function is called by MAC Layer to enable the XFRAME 14281256Syl150051 * firmware to generate interrupts and also prepare the 14291256Syl150051 * driver to call mac_rx for delivering receive packets 14301256Syl150051 * to MAC Layer. 14311256Syl150051 */ 14321256Syl150051 static int 14331256Syl150051 xgell_m_start(void *arg) 14341256Syl150051 { 14353115Syl150051 xgelldev_t *lldev = arg; 14363115Syl150051 xge_hal_device_t *hldev = lldev->devh; 14371256Syl150051 int ret; 14381256Syl150051 14391256Syl150051 xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME, 14401256Syl150051 lldev->instance); 14411256Syl150051 14421256Syl150051 mutex_enter(&lldev->genlock); 14431256Syl150051 14441256Syl150051 if (lldev->is_initialized) { 14451256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: device is already initialized", 14461256Syl150051 XGELL_IFNAME, lldev->instance); 14471256Syl150051 mutex_exit(&lldev->genlock); 14481256Syl150051 return (EINVAL); 14491256Syl150051 } 14501256Syl150051 14511256Syl150051 hldev->terminating = 0; 14521256Syl150051 if (ret = xgell_initiate_start(lldev)) { 14531256Syl150051 mutex_exit(&lldev->genlock); 14541256Syl150051 return (ret); 14551256Syl150051 } 14561256Syl150051 14571256Syl150051 lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS); 14581256Syl150051 14591256Syl150051 mutex_exit(&lldev->genlock); 14601256Syl150051 14611256Syl150051 return (0); 14621256Syl150051 } 14631256Syl150051 14641256Syl150051 /* 14651256Syl150051 * xgell_m_stop 14661256Syl150051 * @arg: pointer to device private data (hldev) 14671256Syl150051 * 14681256Syl150051 * This function is called by the MAC Layer to disable 14691256Syl150051 * the XFRAME firmware for generating any interrupts and 14701256Syl150051 * also stop the driver from calling mac_rx() for 14711256Syl150051 * delivering data packets to the MAC Layer. 14721256Syl150051 */ 14731256Syl150051 static void 14741256Syl150051 xgell_m_stop(void *arg) 14751256Syl150051 { 14763115Syl150051 xgelldev_t *lldev = arg; 14773115Syl150051 xge_hal_device_t *hldev = lldev->devh; 14781256Syl150051 14791256Syl150051 xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP"); 14801256Syl150051 14811256Syl150051 mutex_enter(&lldev->genlock); 14821256Syl150051 if (!lldev->is_initialized) { 14831256Syl150051 xge_debug_ll(XGE_ERR, "%s", "device is not initialized..."); 14841256Syl150051 mutex_exit(&lldev->genlock); 14851256Syl150051 return; 14861256Syl150051 } 14871256Syl150051 14881256Syl150051 xge_hal_device_terminating(hldev); 14891256Syl150051 xgell_initiate_stop(lldev); 14901256Syl150051 14911256Syl150051 /* reset device */ 14921256Syl150051 (void) xge_hal_device_reset(lldev->devh); 14931256Syl150051 14941256Syl150051 mutex_exit(&lldev->genlock); 14951256Syl150051 14963115Syl150051 if (lldev->timeout_id != 0) { 14973115Syl150051 (void) untimeout(lldev->timeout_id); 14983115Syl150051 } 14991256Syl150051 15001256Syl150051 xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer..."); 15011256Syl150051 } 15021256Syl150051 15031256Syl150051 /* 15041256Syl150051 * xgell_onerr_reset 15051256Syl150051 * @lldev: pointer to xgelldev_t structure 15061256Syl150051 * 15071256Syl150051 * This function is called by HAL Event framework to reset the HW 15081256Syl150051 * This function is must be called with genlock taken. 15091256Syl150051 */ 15101256Syl150051 int 15111256Syl150051 xgell_onerr_reset(xgelldev_t *lldev) 15121256Syl150051 { 15131256Syl150051 int rc = 0; 15141256Syl150051 15151256Syl150051 if (!lldev->is_initialized) { 15161256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not reset", 15171256Syl150051 XGELL_IFNAME, lldev->instance); 15181256Syl150051 return (rc); 15191256Syl150051 } 15201256Syl150051 15211256Syl150051 lldev->in_reset = 1; 15221256Syl150051 xgell_initiate_stop(lldev); 15231256Syl150051 15241256Syl150051 /* reset device */ 15251256Syl150051 (void) xge_hal_device_reset(lldev->devh); 15261256Syl150051 15271256Syl150051 rc = xgell_initiate_start(lldev); 15281256Syl150051 lldev->in_reset = 0; 15291256Syl150051 15301256Syl150051 return (rc); 15311256Syl150051 } 15321256Syl150051 15331256Syl150051 15341256Syl150051 /* 15351256Syl150051 * xgell_m_unicst 15361256Syl150051 * @arg: pointer to device private strucutre(hldev) 15371256Syl150051 * @mac_addr: 15381256Syl150051 * 15391256Syl150051 * This function is called by MAC Layer to set the physical address 15401256Syl150051 * of the XFRAME firmware. 15411256Syl150051 */ 15421256Syl150051 static int 15431256Syl150051 xgell_m_unicst(void *arg, const uint8_t *macaddr) 15441256Syl150051 { 15451256Syl150051 xge_hal_status_e status; 15463115Syl150051 xgelldev_t *lldev = (xgelldev_t *)arg; 15473115Syl150051 xge_hal_device_t *hldev = lldev->devh; 15481256Syl150051 xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST"); 15491256Syl150051 15501256Syl150051 xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST"); 15511256Syl150051 15521256Syl150051 mutex_enter(&lldev->genlock); 15531256Syl150051 15541256Syl150051 xge_debug_ll(XGE_TRACE, 15551256Syl150051 "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x", 15561256Syl150051 macaddr[0], macaddr[1], macaddr[2], 15571256Syl150051 macaddr[3], macaddr[4], macaddr[5]); 15581256Syl150051 15591256Syl150051 status = xge_hal_device_macaddr_set(hldev, 0, (uchar_t *)macaddr); 15601256Syl150051 if (status != XGE_HAL_OK) { 15611256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not set mac address", 15621256Syl150051 XGELL_IFNAME, lldev->instance); 15631256Syl150051 mutex_exit(&lldev->genlock); 15641256Syl150051 return (EIO); 15651256Syl150051 } 15661256Syl150051 15671256Syl150051 mutex_exit(&lldev->genlock); 15681256Syl150051 15691256Syl150051 return (0); 15701256Syl150051 } 15711256Syl150051 15721256Syl150051 15731256Syl150051 /* 15741256Syl150051 * xgell_m_multicst 15751256Syl150051 * @arg: pointer to device private strucutre(hldev) 15761256Syl150051 * @add: 15771256Syl150051 * @mc_addr: 15781256Syl150051 * 15791256Syl150051 * This function is called by MAC Layer to enable or 15801256Syl150051 * disable device-level reception of specific multicast addresses. 15811256Syl150051 */ 15821256Syl150051 static int 15831256Syl150051 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr) 15841256Syl150051 { 15851256Syl150051 xge_hal_status_e status; 15863115Syl150051 xgelldev_t *lldev = (xgelldev_t *)arg; 15873115Syl150051 xge_hal_device_t *hldev = lldev->devh; 15881256Syl150051 15891256Syl150051 xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add); 15901256Syl150051 15911256Syl150051 mutex_enter(&lldev->genlock); 15921256Syl150051 15931256Syl150051 if (!lldev->is_initialized) { 15941256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not set multicast", 15951256Syl150051 XGELL_IFNAME, lldev->instance); 15961256Syl150051 mutex_exit(&lldev->genlock); 15971256Syl150051 return (EIO); 15981256Syl150051 } 15991256Syl150051 16001256Syl150051 /* FIXME: missing HAL functionality: enable_one() */ 16011256Syl150051 16021256Syl150051 status = (add) ? 16031256Syl150051 xge_hal_device_mcast_enable(hldev) : 16041256Syl150051 xge_hal_device_mcast_disable(hldev); 16051256Syl150051 16061256Syl150051 if (status != XGE_HAL_OK) { 16071256Syl150051 xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d", 16081256Syl150051 add ? "enable" : "disable", status); 16091256Syl150051 mutex_exit(&lldev->genlock); 16101256Syl150051 return (EIO); 16111256Syl150051 } 16121256Syl150051 16131256Syl150051 mutex_exit(&lldev->genlock); 16141256Syl150051 16151256Syl150051 return (0); 16161256Syl150051 } 16171256Syl150051 16181256Syl150051 16191256Syl150051 /* 16201256Syl150051 * xgell_m_promisc 16211256Syl150051 * @arg: pointer to device private strucutre(hldev) 16221256Syl150051 * @on: 16231256Syl150051 * 16241256Syl150051 * This function is called by MAC Layer to enable or 16251256Syl150051 * disable the reception of all the packets on the medium 16261256Syl150051 */ 16271256Syl150051 static int 16281256Syl150051 xgell_m_promisc(void *arg, boolean_t on) 16291256Syl150051 { 16303115Syl150051 xgelldev_t *lldev = (xgelldev_t *)arg; 16313115Syl150051 xge_hal_device_t *hldev = lldev->devh; 16321256Syl150051 16331256Syl150051 mutex_enter(&lldev->genlock); 16341256Syl150051 16351256Syl150051 xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET"); 16361256Syl150051 16371256Syl150051 if (!lldev->is_initialized) { 16381256Syl150051 xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous", 16391256Syl150051 XGELL_IFNAME, lldev->instance); 16401256Syl150051 mutex_exit(&lldev->genlock); 16411256Syl150051 return (EIO); 16421256Syl150051 } 16431256Syl150051 16441256Syl150051 if (on) { 16451256Syl150051 xge_hal_device_promisc_enable(hldev); 16461256Syl150051 } else { 16471256Syl150051 xge_hal_device_promisc_disable(hldev); 16481256Syl150051 } 16491256Syl150051 16501256Syl150051 mutex_exit(&lldev->genlock); 16511256Syl150051 16521256Syl150051 return (0); 16531256Syl150051 } 16541256Syl150051 16551256Syl150051 /* 16562311Sseb * xgell_m_stat 16571256Syl150051 * @arg: pointer to device private strucutre(hldev) 16581256Syl150051 * 16592311Sseb * This function is called by MAC Layer to get network statistics 16601256Syl150051 * from the driver. 16611256Syl150051 */ 16622311Sseb static int 16632311Sseb xgell_m_stat(void *arg, uint_t stat, uint64_t *val) 16641256Syl150051 { 16651256Syl150051 xge_hal_stats_hw_info_t *hw_info; 16663115Syl150051 xgelldev_t *lldev = (xgelldev_t *)arg; 16673115Syl150051 xge_hal_device_t *hldev = lldev->devh; 16681256Syl150051 16691256Syl150051 xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET"); 16701256Syl150051 16711256Syl150051 if (!mutex_tryenter(&lldev->genlock)) 16722311Sseb return (EAGAIN); 16731256Syl150051 16741256Syl150051 if (!lldev->is_initialized) { 16751256Syl150051 mutex_exit(&lldev->genlock); 16762311Sseb return (EAGAIN); 16771256Syl150051 } 16781256Syl150051 16791256Syl150051 if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) { 16801256Syl150051 mutex_exit(&lldev->genlock); 16812311Sseb return (EAGAIN); 16821256Syl150051 } 16831256Syl150051 16841256Syl150051 switch (stat) { 16851256Syl150051 case MAC_STAT_IFSPEED: 16862311Sseb *val = 10000000000ull; /* 10G */ 16871256Syl150051 break; 16881256Syl150051 16891256Syl150051 case MAC_STAT_MULTIRCV: 16903115Syl150051 *val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) | 16913115Syl150051 hw_info->rmac_vld_mcst_frms; 16921256Syl150051 break; 16931256Syl150051 16941256Syl150051 case MAC_STAT_BRDCSTRCV: 16953115Syl150051 *val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) | 16963115Syl150051 hw_info->rmac_vld_bcst_frms; 16971256Syl150051 break; 16981256Syl150051 16991256Syl150051 case MAC_STAT_MULTIXMT: 17003115Syl150051 *val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) | 17013115Syl150051 hw_info->tmac_mcst_frms; 17021256Syl150051 break; 17031256Syl150051 17041256Syl150051 case MAC_STAT_BRDCSTXMT: 17053115Syl150051 *val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) | 17063115Syl150051 hw_info->tmac_bcst_frms; 17071256Syl150051 break; 17081256Syl150051 17091256Syl150051 case MAC_STAT_RBYTES: 17103115Syl150051 *val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) | 17113115Syl150051 hw_info->rmac_ttl_octets; 17121256Syl150051 break; 17131256Syl150051 17141256Syl150051 case MAC_STAT_NORCVBUF: 17152311Sseb *val = hw_info->rmac_drop_frms; 17161256Syl150051 break; 17171256Syl150051 17181256Syl150051 case MAC_STAT_IERRORS: 17193115Syl150051 *val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) | 17203115Syl150051 hw_info->rmac_discarded_frms; 17211256Syl150051 break; 17221256Syl150051 17231256Syl150051 case MAC_STAT_OBYTES: 17243115Syl150051 *val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) | 17253115Syl150051 hw_info->tmac_ttl_octets; 17261256Syl150051 break; 17271256Syl150051 17281256Syl150051 case MAC_STAT_NOXMTBUF: 17292311Sseb *val = hw_info->tmac_drop_frms; 17301256Syl150051 break; 17311256Syl150051 17321256Syl150051 case MAC_STAT_OERRORS: 17333115Syl150051 *val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) | 17343115Syl150051 hw_info->tmac_any_err_frms; 17351256Syl150051 break; 17361256Syl150051 17371256Syl150051 case MAC_STAT_IPACKETS: 17383115Syl150051 *val = ((u64) hw_info->rmac_vld_frms_oflow << 32) | 17393115Syl150051 hw_info->rmac_vld_frms; 17401256Syl150051 break; 17411256Syl150051 17421256Syl150051 case MAC_STAT_OPACKETS: 17433115Syl150051 *val = ((u64) hw_info->tmac_frms_oflow << 32) | 17443115Syl150051 hw_info->tmac_frms; 17452311Sseb break; 17462311Sseb 17472311Sseb case ETHER_STAT_FCS_ERRORS: 17482311Sseb *val = hw_info->rmac_fcs_err_frms; 17491256Syl150051 break; 17501256Syl150051 17512311Sseb case ETHER_STAT_TOOLONG_ERRORS: 17522311Sseb *val = hw_info->rmac_long_frms; 17531256Syl150051 break; 17541256Syl150051 17552311Sseb case ETHER_STAT_LINK_DUPLEX: 17562311Sseb *val = LINK_DUPLEX_FULL; 17571256Syl150051 break; 17581256Syl150051 17591256Syl150051 default: 17602311Sseb mutex_exit(&lldev->genlock); 17612311Sseb return (ENOTSUP); 17621256Syl150051 } 17631256Syl150051 17641256Syl150051 mutex_exit(&lldev->genlock); 17651256Syl150051 17662311Sseb return (0); 17671256Syl150051 } 17681256Syl150051 17691256Syl150051 /* 17701256Syl150051 * xgell_device_alloc - Allocate new LL device 17711256Syl150051 */ 17721256Syl150051 int 17731256Syl150051 xgell_device_alloc(xge_hal_device_h devh, 17741256Syl150051 dev_info_t *dev_info, xgelldev_t **lldev_out) 17751256Syl150051 { 17761256Syl150051 xgelldev_t *lldev; 17771256Syl150051 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 17781256Syl150051 int instance = ddi_get_instance(dev_info); 17791256Syl150051 17801256Syl150051 *lldev_out = NULL; 17811256Syl150051 17821256Syl150051 xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...", 17831256Syl150051 XGELL_IFNAME, instance); 17841256Syl150051 17851256Syl150051 lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP); 17861256Syl150051 17871256Syl150051 lldev->devh = hldev; 17881256Syl150051 lldev->instance = instance; 17891256Syl150051 lldev->dev_info = dev_info; 17901256Syl150051 17911256Syl150051 *lldev_out = lldev; 17921256Syl150051 17931256Syl150051 ddi_set_driver_private(dev_info, (caddr_t)hldev); 17941256Syl150051 17951256Syl150051 return (DDI_SUCCESS); 17961256Syl150051 } 17971256Syl150051 17981256Syl150051 /* 17991256Syl150051 * xgell_device_free 18001256Syl150051 */ 18011256Syl150051 void 18021256Syl150051 xgell_device_free(xgelldev_t *lldev) 18031256Syl150051 { 18041256Syl150051 xge_debug_ll(XGE_TRACE, "freeing device %s%d", 18051256Syl150051 XGELL_IFNAME, lldev->instance); 18061256Syl150051 18071256Syl150051 kmem_free(lldev, sizeof (xgelldev_t)); 18081256Syl150051 } 18091256Syl150051 18101256Syl150051 /* 18111256Syl150051 * xgell_ioctl 18121256Syl150051 */ 18131256Syl150051 static void 18141256Syl150051 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 18151256Syl150051 { 18163115Syl150051 xgelldev_t *lldev = arg; 18171256Syl150051 struct iocblk *iocp; 18181256Syl150051 int err = 0; 18191256Syl150051 int cmd; 18201256Syl150051 int need_privilege = 1; 18211256Syl150051 int ret = 0; 18221256Syl150051 18231256Syl150051 18241256Syl150051 iocp = (struct iocblk *)mp->b_rptr; 18251256Syl150051 iocp->ioc_error = 0; 18261256Syl150051 cmd = iocp->ioc_cmd; 18271256Syl150051 xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd); 18281256Syl150051 switch (cmd) { 18291256Syl150051 case ND_GET: 18301256Syl150051 need_privilege = 0; 18311256Syl150051 /* FALLTHRU */ 18321256Syl150051 case ND_SET: 18331256Syl150051 break; 18341256Syl150051 default: 18351256Syl150051 xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd); 18361256Syl150051 miocnak(wq, mp, 0, EINVAL); 18371256Syl150051 return; 18381256Syl150051 } 18391256Syl150051 18401256Syl150051 if (need_privilege) { 18411256Syl150051 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 18421256Syl150051 if (err != 0) { 18431256Syl150051 xge_debug_ll(XGE_ERR, 18441256Syl150051 "drv_priv(): rejected cmd 0x%x, err %d", 18451256Syl150051 cmd, err); 18461256Syl150051 miocnak(wq, mp, 0, err); 18471256Syl150051 return; 18481256Syl150051 } 18491256Syl150051 } 18501256Syl150051 18511256Syl150051 switch (cmd) { 18521256Syl150051 case ND_GET: 18531256Syl150051 /* 18541256Syl150051 * If nd_getset() returns B_FALSE, the command was 18551256Syl150051 * not valid (e.g. unknown name), so we just tell the 18561256Syl150051 * top-level ioctl code to send a NAK (with code EINVAL). 18571256Syl150051 * 18581256Syl150051 * Otherwise, nd_getset() will have built the reply to 18591256Syl150051 * be sent (but not actually sent it), so we tell the 18601256Syl150051 * caller to send the prepared reply. 18611256Syl150051 */ 18621256Syl150051 ret = nd_getset(wq, lldev->ndp, mp); 18631256Syl150051 xge_debug_ll(XGE_TRACE, "got ndd get ioctl"); 18641256Syl150051 break; 18651256Syl150051 18661256Syl150051 case ND_SET: 18671256Syl150051 ret = nd_getset(wq, lldev->ndp, mp); 18681256Syl150051 xge_debug_ll(XGE_TRACE, "got ndd set ioctl"); 18691256Syl150051 break; 18701256Syl150051 18711256Syl150051 default: 18721256Syl150051 break; 18731256Syl150051 } 18741256Syl150051 18751256Syl150051 if (ret == B_FALSE) { 18761256Syl150051 xge_debug_ll(XGE_ERR, 18771256Syl150051 "nd_getset(): rejected cmd 0x%x, err %d", 18781256Syl150051 cmd, err); 18791256Syl150051 miocnak(wq, mp, 0, EINVAL); 18801256Syl150051 } else { 18811256Syl150051 mp->b_datap->db_type = iocp->ioc_error == 0 ? 18821256Syl150051 M_IOCACK : M_IOCNAK; 18831256Syl150051 qreply(wq, mp); 18841256Syl150051 } 18851256Syl150051 } 18861256Syl150051 18872311Sseb /* ARGSUSED */ 18882311Sseb static boolean_t 18892311Sseb xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 18901256Syl150051 { 18913115Syl150051 xgelldev_t *lldev = arg; 18923115Syl150051 18932311Sseb switch (cap) { 18942311Sseb case MAC_CAPAB_HCKSUM: { 18952311Sseb uint32_t *hcksum_txflags = cap_data; 18962311Sseb *hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 | 18972311Sseb HCKSUM_IPHDRCKSUM; 18982311Sseb break; 18992311Sseb } 19003115Syl150051 case MAC_CAPAB_LSO: { 19013115Syl150051 mac_capab_lso_t *cap_lso = cap_data; 19023115Syl150051 19033115Syl150051 if (lldev->config.lso_enable) { 19043115Syl150051 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 19053115Syl150051 cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN; 19063115Syl150051 break; 19073115Syl150051 } else { 19083115Syl150051 return (B_FALSE); 19093115Syl150051 } 19103115Syl150051 } 19112311Sseb default: 19122311Sseb return (B_FALSE); 19132311Sseb } 19142311Sseb return (B_TRUE); 19151256Syl150051 } 19161256Syl150051 19171256Syl150051 static int 19181256Syl150051 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) 19191256Syl150051 { 19201256Syl150051 xgelldev_t *lldev = (xgelldev_t *)cp; 19211256Syl150051 xge_hal_status_e status; 19221256Syl150051 int count = 0, retsize; 19231256Syl150051 char *buf; 19241256Syl150051 19251256Syl150051 buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP); 19261256Syl150051 if (buf == NULL) { 19271256Syl150051 return (ENOSPC); 19281256Syl150051 } 19291256Syl150051 19301256Syl150051 status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE, 19311256Syl150051 buf, &retsize); 19321256Syl150051 if (status != XGE_HAL_OK) { 19331256Syl150051 kmem_free(buf, XGELL_STATS_BUFSIZE); 19341256Syl150051 xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status); 19351256Syl150051 return (EINVAL); 19361256Syl150051 } 19371256Syl150051 count += retsize; 19381256Syl150051 19391256Syl150051 status = xge_hal_aux_stats_rmac_read(lldev->devh, 19401256Syl150051 XGELL_STATS_BUFSIZE - count, 19411256Syl150051 buf+count, &retsize); 19421256Syl150051 if (status != XGE_HAL_OK) { 19431256Syl150051 kmem_free(buf, XGELL_STATS_BUFSIZE); 19441256Syl150051 xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status); 19451256Syl150051 return (EINVAL); 19461256Syl150051 } 19471256Syl150051 count += retsize; 19481256Syl150051 19491256Syl150051 status = xge_hal_aux_stats_pci_read(lldev->devh, 19501256Syl150051 XGELL_STATS_BUFSIZE - count, buf + count, &retsize); 19511256Syl150051 if (status != XGE_HAL_OK) { 19521256Syl150051 kmem_free(buf, XGELL_STATS_BUFSIZE); 19531256Syl150051 xge_debug_ll(XGE_ERR, "pci_read(): status %d", status); 19541256Syl150051 return (EINVAL); 19551256Syl150051 } 19561256Syl150051 count += retsize; 19571256Syl150051 19581256Syl150051 status = xge_hal_aux_stats_sw_dev_read(lldev->devh, 19591256Syl150051 XGELL_STATS_BUFSIZE - count, buf + count, &retsize); 19601256Syl150051 if (status != XGE_HAL_OK) { 19611256Syl150051 kmem_free(buf, XGELL_STATS_BUFSIZE); 19621256Syl150051 xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status); 19631256Syl150051 return (EINVAL); 19641256Syl150051 } 19651256Syl150051 count += retsize; 19661256Syl150051 19671256Syl150051 status = xge_hal_aux_stats_hal_read(lldev->devh, 19681256Syl150051 XGELL_STATS_BUFSIZE - count, buf + count, &retsize); 19691256Syl150051 if (status != XGE_HAL_OK) { 19701256Syl150051 kmem_free(buf, XGELL_STATS_BUFSIZE); 19711256Syl150051 xge_debug_ll(XGE_ERR, "pci_read(): status %d", status); 19721256Syl150051 return (EINVAL); 19731256Syl150051 } 19741256Syl150051 count += retsize; 19751256Syl150051 19761256Syl150051 *(buf + count - 1) = '\0'; /* remove last '\n' */ 19771256Syl150051 (void) mi_mpprintf(mp, "%s", buf); 19781256Syl150051 kmem_free(buf, XGELL_STATS_BUFSIZE); 19791256Syl150051 19801256Syl150051 return (0); 19811256Syl150051 } 19821256Syl150051 19831256Syl150051 static int 19841256Syl150051 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) 19851256Syl150051 { 19861256Syl150051 xgelldev_t *lldev = (xgelldev_t *)cp; 19871256Syl150051 xge_hal_status_e status; 19881256Syl150051 int retsize; 19891256Syl150051 char *buf; 19901256Syl150051 19911256Syl150051 buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP); 19921256Syl150051 if (buf == NULL) { 19931256Syl150051 return (ENOSPC); 19941256Syl150051 } 19951256Syl150051 status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE, 19961256Syl150051 buf, &retsize); 19971256Syl150051 if (status != XGE_HAL_OK) { 19981256Syl150051 kmem_free(buf, XGELL_PCICONF_BUFSIZE); 19991256Syl150051 xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status); 20001256Syl150051 return (EINVAL); 20011256Syl150051 } 20021256Syl150051 *(buf + retsize - 1) = '\0'; /* remove last '\n' */ 20031256Syl150051 (void) mi_mpprintf(mp, "%s", buf); 20041256Syl150051 kmem_free(buf, XGELL_PCICONF_BUFSIZE); 20051256Syl150051 20061256Syl150051 return (0); 20071256Syl150051 } 20081256Syl150051 20091256Syl150051 static int 20101256Syl150051 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) 20111256Syl150051 { 20121256Syl150051 xgelldev_t *lldev = (xgelldev_t *)cp; 20131256Syl150051 xge_hal_status_e status; 20141256Syl150051 int retsize; 20151256Syl150051 char *buf; 20161256Syl150051 20171256Syl150051 buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP); 20181256Syl150051 if (buf == NULL) { 20191256Syl150051 return (ENOSPC); 20201256Syl150051 } 20211256Syl150051 status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE, 20221256Syl150051 buf, &retsize); 20231256Syl150051 if (status != XGE_HAL_OK) { 20241256Syl150051 kmem_free(buf, XGELL_ABOUT_BUFSIZE); 20251256Syl150051 xge_debug_ll(XGE_ERR, "about_read(): status %d", status); 20261256Syl150051 return (EINVAL); 20271256Syl150051 } 20281256Syl150051 *(buf + retsize - 1) = '\0'; /* remove last '\n' */ 20291256Syl150051 (void) mi_mpprintf(mp, "%s", buf); 20301256Syl150051 kmem_free(buf, XGELL_ABOUT_BUFSIZE); 20311256Syl150051 20321256Syl150051 return (0); 20331256Syl150051 } 20341256Syl150051 20351256Syl150051 static unsigned long bar0_offset = 0x110; /* adapter_control */ 20361256Syl150051 20371256Syl150051 static int 20381256Syl150051 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) 20391256Syl150051 { 20401256Syl150051 xgelldev_t *lldev = (xgelldev_t *)cp; 20411256Syl150051 xge_hal_status_e status; 20421256Syl150051 int retsize; 20431256Syl150051 char *buf; 20441256Syl150051 20451256Syl150051 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP); 20461256Syl150051 if (buf == NULL) { 20471256Syl150051 return (ENOSPC); 20481256Syl150051 } 20491256Syl150051 status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset, 20501256Syl150051 XGELL_IOCTL_BUFSIZE, buf, &retsize); 20511256Syl150051 if (status != XGE_HAL_OK) { 20521256Syl150051 kmem_free(buf, XGELL_IOCTL_BUFSIZE); 20531256Syl150051 xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status); 20541256Syl150051 return (EINVAL); 20551256Syl150051 } 20561256Syl150051 *(buf + retsize - 1) = '\0'; /* remove last '\n' */ 20571256Syl150051 (void) mi_mpprintf(mp, "%s", buf); 20581256Syl150051 kmem_free(buf, XGELL_IOCTL_BUFSIZE); 20591256Syl150051 20601256Syl150051 return (0); 20611256Syl150051 } 20621256Syl150051 20631256Syl150051 static int 20641256Syl150051 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp) 20651256Syl150051 { 20661256Syl150051 unsigned long old_offset = bar0_offset; 20671256Syl150051 char *end; 20681256Syl150051 20691256Syl150051 if (value && *value == '0' && 20701256Syl150051 (*(value + 1) == 'x' || *(value + 1) == 'X')) { 20711256Syl150051 value += 2; 20721256Syl150051 } 20731256Syl150051 20741256Syl150051 bar0_offset = mi_strtol(value, &end, 16); 20751256Syl150051 if (end == value) { 20761256Syl150051 bar0_offset = old_offset; 20771256Syl150051 return (EINVAL); 20781256Syl150051 } 20791256Syl150051 20801256Syl150051 xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset); 20811256Syl150051 20821256Syl150051 return (0); 20831256Syl150051 } 20841256Syl150051 20851256Syl150051 static int 20861256Syl150051 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) 20871256Syl150051 { 20881256Syl150051 char *buf; 20891256Syl150051 20901256Syl150051 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP); 20911256Syl150051 if (buf == NULL) { 20921256Syl150051 return (ENOSPC); 20931256Syl150051 } 20941256Syl150051 (void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level()); 20951256Syl150051 kmem_free(buf, XGELL_IOCTL_BUFSIZE); 20961256Syl150051 20971256Syl150051 return (0); 20981256Syl150051 } 20991256Syl150051 21001256Syl150051 static int 21011256Syl150051 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 21021256Syl150051 cred_t *credp) 21031256Syl150051 { 21041256Syl150051 int level; 21051256Syl150051 char *end; 21061256Syl150051 21071256Syl150051 level = mi_strtol(value, &end, 10); 21081256Syl150051 if (level < XGE_NONE || level > XGE_ERR || end == value) { 21091256Syl150051 return (EINVAL); 21101256Syl150051 } 21111256Syl150051 21121256Syl150051 xge_hal_driver_debug_level_set(level); 21131256Syl150051 21141256Syl150051 return (0); 21151256Syl150051 } 21161256Syl150051 21171256Syl150051 static int 21181256Syl150051 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) 21191256Syl150051 { 21201256Syl150051 char *buf; 21211256Syl150051 21221256Syl150051 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP); 21231256Syl150051 if (buf == NULL) { 21241256Syl150051 return (ENOSPC); 21251256Syl150051 } 21261256Syl150051 (void) mi_mpprintf(mp, "debug_module_mask 0x%08x", 21271256Syl150051 xge_hal_driver_debug_module_mask()); 21281256Syl150051 kmem_free(buf, XGELL_IOCTL_BUFSIZE); 21291256Syl150051 21301256Syl150051 return (0); 21311256Syl150051 } 21321256Syl150051 21331256Syl150051 static int 21341256Syl150051 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 21351256Syl150051 cred_t *credp) 21361256Syl150051 { 21371256Syl150051 u32 mask; 21381256Syl150051 char *end; 21391256Syl150051 21401256Syl150051 if (value && *value == '0' && 21411256Syl150051 (*(value + 1) == 'x' || *(value + 1) == 'X')) { 21421256Syl150051 value += 2; 21431256Syl150051 } 21441256Syl150051 21451256Syl150051 mask = mi_strtol(value, &end, 16); 21461256Syl150051 if (end == value) { 21471256Syl150051 return (EINVAL); 21481256Syl150051 } 21491256Syl150051 21501256Syl150051 xge_hal_driver_debug_module_mask_set(mask); 21511256Syl150051 21521256Syl150051 return (0); 21531256Syl150051 } 21541256Syl150051 21551256Syl150051 static int 21561256Syl150051 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) 21571256Syl150051 { 21581256Syl150051 xgelldev_t *lldev = (xgelldev_t *)(void *)cp; 21591256Syl150051 xge_hal_status_e status; 21601256Syl150051 int retsize; 21611256Syl150051 char *buf; 21621256Syl150051 21631256Syl150051 buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP); 21641256Syl150051 if (buf == NULL) { 21651256Syl150051 return (ENOSPC); 21661256Syl150051 } 21671256Syl150051 status = xge_hal_aux_device_config_read(lldev->devh, 21681256Syl150051 XGELL_DEVCONF_BUFSIZE, 21691256Syl150051 buf, &retsize); 21701256Syl150051 if (status != XGE_HAL_OK) { 21711256Syl150051 kmem_free(buf, XGELL_DEVCONF_BUFSIZE); 21721256Syl150051 xge_debug_ll(XGE_ERR, "device_config_read(): status %d", 21731256Syl150051 status); 21741256Syl150051 return (EINVAL); 21751256Syl150051 } 21761256Syl150051 *(buf + retsize - 1) = '\0'; /* remove last '\n' */ 21771256Syl150051 (void) mi_mpprintf(mp, "%s", buf); 21781256Syl150051 kmem_free(buf, XGELL_DEVCONF_BUFSIZE); 21791256Syl150051 21801256Syl150051 return (0); 21811256Syl150051 } 21821256Syl150051 21831256Syl150051 /* 21841256Syl150051 * xgell_device_register 21851256Syl150051 * @devh: pointer on HAL device 21861256Syl150051 * @config: pointer on this network device configuration 21871256Syl150051 * @ll_out: output pointer. Will be assigned to valid LL device. 21881256Syl150051 * 21891256Syl150051 * This function will allocate and register network device 21901256Syl150051 */ 21911256Syl150051 int 21921256Syl150051 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config) 21931256Syl150051 { 21943115Syl150051 mac_register_t *macp = NULL; 21951256Syl150051 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh; 21961256Syl150051 21971256Syl150051 if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL, 21982311Sseb (caddr_t)lldev) == B_FALSE) 21992311Sseb goto xgell_ndd_fail; 22001256Syl150051 22011256Syl150051 if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL, 22022311Sseb (caddr_t)lldev) == B_FALSE) 22032311Sseb goto xgell_ndd_fail; 22041256Syl150051 22051256Syl150051 if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL, 22062311Sseb (caddr_t)lldev) == B_FALSE) 22072311Sseb goto xgell_ndd_fail; 22081256Syl150051 22091256Syl150051 if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set, 22102311Sseb (caddr_t)lldev) == B_FALSE) 22112311Sseb goto xgell_ndd_fail; 22121256Syl150051 22131256Syl150051 if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get, 22142311Sseb xgell_debug_level_set, (caddr_t)lldev) == B_FALSE) 22152311Sseb goto xgell_ndd_fail; 22161256Syl150051 22171256Syl150051 if (nd_load(&lldev->ndp, "debug_module_mask", 22181256Syl150051 xgell_debug_module_mask_get, xgell_debug_module_mask_set, 22192311Sseb (caddr_t)lldev) == B_FALSE) 22202311Sseb goto xgell_ndd_fail; 22211256Syl150051 22221256Syl150051 if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL, 22232311Sseb (caddr_t)lldev) == B_FALSE) 22242311Sseb goto xgell_ndd_fail; 22251256Syl150051 22261256Syl150051 bcopy(config, &lldev->config, sizeof (xgell_config_t)); 22271256Syl150051 22281256Syl150051 if (xgell_rx_create_buffer_pool(lldev) != DDI_SUCCESS) { 22291256Syl150051 nd_free(&lldev->ndp); 22301256Syl150051 xge_debug_ll(XGE_ERR, "unable to create RX buffer pool"); 22311256Syl150051 return (DDI_FAILURE); 22321256Syl150051 } 22331256Syl150051 22341256Syl150051 mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER, hldev->irqh); 22351256Syl150051 22362311Sseb if ((macp = mac_alloc(MAC_VERSION)) == NULL) 22372311Sseb goto xgell_register_fail; 22382311Sseb macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 22393115Syl150051 macp->m_driver = lldev; 22402311Sseb macp->m_dip = lldev->dev_info; 22412311Sseb macp->m_src_addr = hldev->macaddr[0]; 22422311Sseb macp->m_callbacks = &xgell_m_callbacks; 22432311Sseb macp->m_min_sdu = 0; 22442311Sseb macp->m_max_sdu = hldev->config.mtu; 22451256Syl150051 /* 22461256Syl150051 * Finally, we're ready to register ourselves with the Nemo 22471256Syl150051 * interface; if this succeeds, we're all ready to start() 22481256Syl150051 */ 22493115Syl150051 22503115Syl150051 if (mac_register(macp, &lldev->mh) != 0) 22512311Sseb goto xgell_register_fail; 22521256Syl150051 2253*3392Syl150051 /* Always free the macp after register */ 2254*3392Syl150051 if (macp != NULL) 2255*3392Syl150051 mac_free(macp); 2256*3392Syl150051 22573115Syl150051 /* Calculate tx_copied_max here ??? */ 22583115Syl150051 lldev->tx_copied_max = hldev->config.fifo.max_frags * 22593115Syl150051 hldev->config.fifo.alignment_size * 22603115Syl150051 hldev->config.fifo.max_aligned_frags; 22613115Syl150051 22621256Syl150051 xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered", 22631256Syl150051 XGELL_IFNAME, lldev->instance); 22641256Syl150051 22651256Syl150051 return (DDI_SUCCESS); 22662311Sseb 22672311Sseb xgell_ndd_fail: 22682311Sseb nd_free(&lldev->ndp); 22692311Sseb xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter"); 22702311Sseb return (DDI_FAILURE); 22712311Sseb 22722311Sseb xgell_register_fail: 22733115Syl150051 if (macp != NULL) 22743115Syl150051 mac_free(macp); 22752311Sseb nd_free(&lldev->ndp); 22762311Sseb mutex_destroy(&lldev->genlock); 22772311Sseb /* Ignore return value, since RX not start */ 22782311Sseb (void) xgell_rx_destroy_buffer_pool(lldev); 22792311Sseb xge_debug_ll(XGE_ERR, "%s", "unable to register networking device"); 22802311Sseb return (DDI_FAILURE); 22811256Syl150051 } 22821256Syl150051 22831256Syl150051 /* 22841256Syl150051 * xgell_device_unregister 22851256Syl150051 * @devh: pointer on HAL device 22861256Syl150051 * @lldev: pointer to valid LL device. 22871256Syl150051 * 22881256Syl150051 * This function will unregister and free network device 22891256Syl150051 */ 22901256Syl150051 int 22911256Syl150051 xgell_device_unregister(xgelldev_t *lldev) 22921256Syl150051 { 22931256Syl150051 /* 22941256Syl150051 * Destroy RX buffer pool. 22951256Syl150051 */ 22961256Syl150051 if (xgell_rx_destroy_buffer_pool(lldev) != DDI_SUCCESS) { 22971256Syl150051 return (DDI_FAILURE); 22981256Syl150051 } 22991256Syl150051 23002311Sseb if (mac_unregister(lldev->mh) != 0) { 23011256Syl150051 xge_debug_ll(XGE_ERR, "unable to unregister device %s%d", 23021256Syl150051 XGELL_IFNAME, lldev->instance); 23031256Syl150051 return (DDI_FAILURE); 23041256Syl150051 } 23051256Syl150051 23061256Syl150051 mutex_destroy(&lldev->genlock); 23071256Syl150051 23081256Syl150051 nd_free(&lldev->ndp); 23091256Syl150051 23101256Syl150051 xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered", 23111256Syl150051 XGELL_IFNAME, lldev->instance); 23121256Syl150051 23131256Syl150051 return (DDI_SUCCESS); 23141256Syl150051 } 2315