10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51676Sjpk * Common Development and Distribution License (the "License"). 61676Sjpk * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 227240Srh87107 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* 270Sstevel@tonic-gate * Multithreaded STREAMS Local Transport Provider. 280Sstevel@tonic-gate * 290Sstevel@tonic-gate * OVERVIEW 300Sstevel@tonic-gate * ======== 310Sstevel@tonic-gate * 320Sstevel@tonic-gate * This driver provides TLI as well as socket semantics. It provides 330Sstevel@tonic-gate * connectionless, connection oriented, and connection oriented with orderly 340Sstevel@tonic-gate * release transports for TLI and sockets. Each transport type has separate name 350Sstevel@tonic-gate * spaces (i.e. it is not possible to connect from a socket to a TLI endpoint) - 360Sstevel@tonic-gate * this removes any name space conflicts when binding to socket style transport 370Sstevel@tonic-gate * addresses. 380Sstevel@tonic-gate * 390Sstevel@tonic-gate * NOTE: There is one exception: Socket ticots and ticotsord transports share 400Sstevel@tonic-gate * the same namespace. In fact, sockets always use ticotsord type transport. 410Sstevel@tonic-gate * 420Sstevel@tonic-gate * The driver mode is specified during open() by the minor number used for 430Sstevel@tonic-gate * open. 440Sstevel@tonic-gate * 450Sstevel@tonic-gate * The sockets in addition have the following semantic differences: 460Sstevel@tonic-gate * No support for passing up credentials (TL_SET[U]CRED). 470Sstevel@tonic-gate * 480Sstevel@tonic-gate * Options are passed through transparently on T_CONN_REQ to T_CONN_IND, 490Sstevel@tonic-gate * from T_UNITDATA_REQ to T_UNIDATA_IND, and from T_OPTDATA_REQ to 500Sstevel@tonic-gate * T_OPTDATA_IND. 510Sstevel@tonic-gate * 520Sstevel@tonic-gate * The T_CONN_CON is generated when processing the T_CONN_REQ i.e. before 530Sstevel@tonic-gate * a T_CONN_RES is received from the acceptor. This means that a socket 540Sstevel@tonic-gate * connect will complete before the peer has called accept. 550Sstevel@tonic-gate * 560Sstevel@tonic-gate * 570Sstevel@tonic-gate * MULTITHREADING 580Sstevel@tonic-gate * ============== 590Sstevel@tonic-gate * 600Sstevel@tonic-gate * The driver does not use STREAMS protection mechanisms. Instead it uses a 610Sstevel@tonic-gate * generic "serializer" abstraction. Most of the operations are executed behind 620Sstevel@tonic-gate * the serializer and are, essentially single-threaded. All functions executed 630Sstevel@tonic-gate * behind the same serializer are strictly serialized. So if one thread calls 640Sstevel@tonic-gate * serializer_enter(serializer, foo, mp1, arg1); and another thread calls 650Sstevel@tonic-gate * serializer_enter(serializer, bar, mp2, arg1); then (depending on which one 660Sstevel@tonic-gate * was called) the actual sequence will be foo(mp1, arg1); bar(mp1, arg2) or 670Sstevel@tonic-gate * bar(mp1, arg2); foo(mp1, arg1); But foo() and bar() will never run at the 680Sstevel@tonic-gate * same time. 690Sstevel@tonic-gate * 700Sstevel@tonic-gate * Connectionless transport use a single serializer per transport type (one for 710Sstevel@tonic-gate * TLI and one for sockets. Connection-oriented transports use finer-grained 720Sstevel@tonic-gate * serializers. 730Sstevel@tonic-gate * 740Sstevel@tonic-gate * All COTS-type endpoints start their life with private serializers. During 750Sstevel@tonic-gate * connection request processing the endpoint serializer is switched to the 760Sstevel@tonic-gate * listener's serializer and the rest of T_CONN_REQ processing is done on the 770Sstevel@tonic-gate * listener serializer. During T_CONN_RES processing the eager serializer is 780Sstevel@tonic-gate * switched from listener to acceptor serializer and after that point all 790Sstevel@tonic-gate * processing for eager and acceptor happens on this serializer. To avoid races 800Sstevel@tonic-gate * with endpoint closes while its serializer may be changing closes are blocked 810Sstevel@tonic-gate * while serializers are manipulated. 820Sstevel@tonic-gate * 830Sstevel@tonic-gate * References accounting 840Sstevel@tonic-gate * --------------------- 850Sstevel@tonic-gate * 860Sstevel@tonic-gate * Endpoints are reference counted and freed when the last reference is 870Sstevel@tonic-gate * dropped. Functions within the serializer may access an endpoint state even 880Sstevel@tonic-gate * after an endpoint closed. The te_closing being set on the endpoint indicates 890Sstevel@tonic-gate * that the endpoint entered its close routine. 900Sstevel@tonic-gate * 910Sstevel@tonic-gate * One reference is held for each opened endpoint instance. The reference 920Sstevel@tonic-gate * counter is incremented when the endpoint is linked to another endpoint and 930Sstevel@tonic-gate * decremented when the link disappears. It is also incremented when the 940Sstevel@tonic-gate * endpoint is found by the hash table lookup. This increment is atomic with the 950Sstevel@tonic-gate * lookup itself and happens while the hash table read lock is held. 960Sstevel@tonic-gate * 970Sstevel@tonic-gate * Close synchronization 980Sstevel@tonic-gate * --------------------- 990Sstevel@tonic-gate * 1000Sstevel@tonic-gate * During close the endpoint as marked as closing using te_closing flag. It is 1010Sstevel@tonic-gate * usually enough to check for te_closing flag since all other state changes 1020Sstevel@tonic-gate * happen after this flag is set and the close entered serializer. Immediately 1030Sstevel@tonic-gate * after setting te_closing flag tl_close() enters serializer and waits until 1040Sstevel@tonic-gate * the callback finishes. This allows all functions called within serializer to 1050Sstevel@tonic-gate * simply check te_closing without any locks. 1060Sstevel@tonic-gate * 1070Sstevel@tonic-gate * Serializer management. 1080Sstevel@tonic-gate * --------------------- 1090Sstevel@tonic-gate * 1100Sstevel@tonic-gate * For COTS transports serializers are created when the endpoint is constructed 1110Sstevel@tonic-gate * and destroyed when the endpoint is destructed. CLTS transports use global 1120Sstevel@tonic-gate * serializers - one for sockets and one for TLI. 1130Sstevel@tonic-gate * 1140Sstevel@tonic-gate * COTS serializers have separate reference counts to deal with several 1150Sstevel@tonic-gate * endpoints sharing the same serializer. There is a subtle problem related to 1160Sstevel@tonic-gate * the serializer destruction. The serializer should never be destroyed by any 1170Sstevel@tonic-gate * function executed inside serializer. This means that close has to wait till 1180Sstevel@tonic-gate * all serializer activity for this endpoint is finished before it can drop the 1190Sstevel@tonic-gate * last reference on the endpoint (which may as well free the serializer). This 1200Sstevel@tonic-gate * is only relevant for COTS transports which manage serializers 1210Sstevel@tonic-gate * dynamically. For CLTS transports close may complete without waiting for all 1220Sstevel@tonic-gate * serializer activity to finish since serializer is only destroyed at driver 1230Sstevel@tonic-gate * detach time. 1240Sstevel@tonic-gate * 1250Sstevel@tonic-gate * COTS endpoints keep track of the number of outstanding requests on the 1260Sstevel@tonic-gate * serializer for the endpoint. The code handling accept() avoids changing 1270Sstevel@tonic-gate * client serializer if it has any pending messages on the serializer and 1280Sstevel@tonic-gate * instead moves acceptor to listener's serializer. 1290Sstevel@tonic-gate * 1300Sstevel@tonic-gate * 1310Sstevel@tonic-gate * Use of hash tables 1320Sstevel@tonic-gate * ------------------ 1330Sstevel@tonic-gate * 1340Sstevel@tonic-gate * The driver uses modhash hash table implementation. Each transport uses two 1350Sstevel@tonic-gate * hash tables - one for finding endpoints by acceptor ID and another one for 1360Sstevel@tonic-gate * finding endpoints by address. For sockets TICOTS and TICOTSORD share the same 1370Sstevel@tonic-gate * pair of hash tables since sockets only use TICOTSORD. 1380Sstevel@tonic-gate * 1390Sstevel@tonic-gate * All hash tables lookups increment a reference count for returned endpoints, 1400Sstevel@tonic-gate * so we may safely check the endpoint state even when the endpoint is removed 1410Sstevel@tonic-gate * from the hash by another thread immediately after it is found. 1420Sstevel@tonic-gate * 1430Sstevel@tonic-gate * 1440Sstevel@tonic-gate * CLOSE processing 1450Sstevel@tonic-gate * ================ 1460Sstevel@tonic-gate * 1470Sstevel@tonic-gate * The driver enters serializer twice on close(). The close sequence is the 1480Sstevel@tonic-gate * following: 1490Sstevel@tonic-gate * 1500Sstevel@tonic-gate * 1) Wait until closing is safe (te_closewait becomes zero) 1510Sstevel@tonic-gate * This step is needed to prevent close during serializer switches. In most 1520Sstevel@tonic-gate * cases (close happening after connection establishment) te_closewait is 1530Sstevel@tonic-gate * zero. 1540Sstevel@tonic-gate * 1) Set te_closing. 1550Sstevel@tonic-gate * 2) Call tl_close_ser() within serializer and wait for it to complete. 1560Sstevel@tonic-gate * 1570Sstevel@tonic-gate * te_close_ser simply marks endpoint and wakes up waiting tl_close(). 1580Sstevel@tonic-gate * It also needs to clear write-side q_next pointers - this should be done 1590Sstevel@tonic-gate * before qprocsoff(). 1600Sstevel@tonic-gate * 1610Sstevel@tonic-gate * This synchronous serializer entry during close is needed to ensure that 1620Sstevel@tonic-gate * the queue is valid everywhere inside the serializer. 1630Sstevel@tonic-gate * 1640Sstevel@tonic-gate * Note that in many cases close will execute tl_close_ser() synchronously, 1650Sstevel@tonic-gate * so it will not wait at all. 1660Sstevel@tonic-gate * 1670Sstevel@tonic-gate * 3) Calls qprocsoff(). 1680Sstevel@tonic-gate * 4) Calls tl_close_finish_ser() within the serializer and waits for it to 1690Sstevel@tonic-gate * complete (for COTS transports). For CLTS transport there is no wait. 1700Sstevel@tonic-gate * 1710Sstevel@tonic-gate * tl_close_finish_ser() Finishes the close process and wakes up waiting 1720Sstevel@tonic-gate * close if there is any. 1730Sstevel@tonic-gate * 1740Sstevel@tonic-gate * Note that in most cases close will enter te_close_ser_finish() 1750Sstevel@tonic-gate * synchronously and will not wait at all. 1760Sstevel@tonic-gate * 1770Sstevel@tonic-gate * 1780Sstevel@tonic-gate * Flow Control 1790Sstevel@tonic-gate * ============ 1800Sstevel@tonic-gate * 1810Sstevel@tonic-gate * The driver implements both read and write side service routines. No one calls 1820Sstevel@tonic-gate * putq() on the read queue. The read side service routine tl_rsrv() is called 1830Sstevel@tonic-gate * when the read side stream is back-enabled. It enters serializer synchronously 1840Sstevel@tonic-gate * (waits till serializer processing is complete). Within serializer it 1850Sstevel@tonic-gate * back-enables all endpoints blocked by the queue for connection-less 1860Sstevel@tonic-gate * transports and enables write side service processing for the peer for 1870Sstevel@tonic-gate * connection-oriented transports. 1880Sstevel@tonic-gate * 1890Sstevel@tonic-gate * Read and write side service routines use special mblk_sized space in the 1900Sstevel@tonic-gate * endpoint structure to enter perimeter. 1910Sstevel@tonic-gate * 1920Sstevel@tonic-gate * Write-side flow control 1930Sstevel@tonic-gate * ----------------------- 1940Sstevel@tonic-gate * 1950Sstevel@tonic-gate * Write side flow control is a bit tricky. The driver needs to deal with two 1960Sstevel@tonic-gate * message queues - the explicit STREAMS message queue maintained by 1970Sstevel@tonic-gate * putq()/getq()/putbq() and the implicit queue within the serializer. These two 1980Sstevel@tonic-gate * queues should be synchronized to preserve message ordering and should 1990Sstevel@tonic-gate * maintain a single order determined by the order in which messages enter 2000Sstevel@tonic-gate * tl_wput(). In order to maintain the ordering between these two queues the 2010Sstevel@tonic-gate * STREAMS queue is only manipulated within the serializer, so the ordering is 2020Sstevel@tonic-gate * provided by the serializer. 2030Sstevel@tonic-gate * 2040Sstevel@tonic-gate * Functions called from the tl_wsrv() sometimes may call putbq(). To 2050Sstevel@tonic-gate * immediately stop any further processing of the STREAMS message queues the 2060Sstevel@tonic-gate * code calling putbq() also sets the te_nowsrv flag in the endpoint. The write 2070Sstevel@tonic-gate * side service processing stops when the flag is set. 2080Sstevel@tonic-gate * 2090Sstevel@tonic-gate * The tl_wsrv() function enters serializer synchronously and waits for it to 2100Sstevel@tonic-gate * complete. The serializer call-back tl_wsrv_ser() either drains all messages 2110Sstevel@tonic-gate * on the STREAMS queue or terminates when it notices the te_nowsrv flag 2120Sstevel@tonic-gate * set. Note that the maximum amount of messages processed by tl_wput_ser() is 2130Sstevel@tonic-gate * always bounded by the amount of messages on the STREAMS queue at the time 2140Sstevel@tonic-gate * tl_wsrv_ser() is entered. Any new messages may only appear on the STREAMS 2150Sstevel@tonic-gate * queue from another serialized entry which can't happen in parallel. This 2160Sstevel@tonic-gate * guarantees that tl_wput_ser() is complete in bounded time (there is no risk 2170Sstevel@tonic-gate * of it draining forever while writer places new messages on the STREAMS 2180Sstevel@tonic-gate * queue). 2190Sstevel@tonic-gate * 2200Sstevel@tonic-gate * Note that a closing endpoint never sets te_nowsrv and never calls putbq(). 2210Sstevel@tonic-gate * 2220Sstevel@tonic-gate * 2230Sstevel@tonic-gate * Unix Domain Sockets 2240Sstevel@tonic-gate * =================== 2250Sstevel@tonic-gate * 2260Sstevel@tonic-gate * The driver knows the structure of Unix Domain sockets addresses and treats 2270Sstevel@tonic-gate * them differently from generic TLI addresses. For sockets implicit binds are 2280Sstevel@tonic-gate * requested by setting SOU_MAGIC_IMPLICIT in the soua_magic part of the address 2290Sstevel@tonic-gate * instead of using address length of zero. Explicit binds specify 2300Sstevel@tonic-gate * SOU_MAGIC_EXPLICIT as magic. 2310Sstevel@tonic-gate * 2320Sstevel@tonic-gate * For implicit binds we always use minor number as soua_vp part of the address 2330Sstevel@tonic-gate * and avoid any hash table lookups. This saves two hash tables lookups per 2340Sstevel@tonic-gate * anonymous bind. 2350Sstevel@tonic-gate * 2360Sstevel@tonic-gate * For explicit address we hash the vnode pointer instead of hashing the 2370Sstevel@tonic-gate * full-scale address+zone+length. Hashing by pointer is more efficient then 2380Sstevel@tonic-gate * hashing by the full address. 2390Sstevel@tonic-gate * 2400Sstevel@tonic-gate * For unix domain sockets the te_ap is always pointing to te_uxaddr part of the 2410Sstevel@tonic-gate * tep structure, so it should be never freed. 2420Sstevel@tonic-gate * 2430Sstevel@tonic-gate * Also for sockets the driver always uses minor number as acceptor id. 2440Sstevel@tonic-gate * 2450Sstevel@tonic-gate * TPI VIOLATIONS 2460Sstevel@tonic-gate * -------------- 2470Sstevel@tonic-gate * 2480Sstevel@tonic-gate * This driver violates TPI in several respects for Unix Domain Sockets: 2490Sstevel@tonic-gate * 2500Sstevel@tonic-gate * 1) It treats O_T_BIND_REQ as T_BIND_REQ and refuses bind if an explicit bind 2510Sstevel@tonic-gate * is requested and the endpoint is already in use. There is no point in 2520Sstevel@tonic-gate * generating an unused address since this address will be rejected by 2530Sstevel@tonic-gate * sockfs anyway. For implicit binds it always generates a new address 2540Sstevel@tonic-gate * (sets soua_vp to its minor number). 2550Sstevel@tonic-gate * 2560Sstevel@tonic-gate * 2) It always uses minor number as acceptor ID and never uses queue 2570Sstevel@tonic-gate * pointer. It is ok since sockets get acceptor ID from T_CAPABILITY_REQ 2580Sstevel@tonic-gate * message and they do not use the queue pointer. 2590Sstevel@tonic-gate * 2600Sstevel@tonic-gate * 3) For Listener sockets the usual sequence is to issue bind() zero backlog 2610Sstevel@tonic-gate * followed by listen(). The listen() should be issued with non-zero 2620Sstevel@tonic-gate * backlog, so sotpi_listen() issues unbind request followed by bind 2630Sstevel@tonic-gate * request to the same address but with a non-zero qlen value. Both 2640Sstevel@tonic-gate * tl_bind() and tl_unbind() require write lock on the hash table to 2650Sstevel@tonic-gate * insert/remove the address. The driver does not remove the address from 2660Sstevel@tonic-gate * the hash for endpoints that are bound to the explicit address and have 2670Sstevel@tonic-gate * backlog of zero. During T_BIND_REQ processing if the address requested 2680Sstevel@tonic-gate * is equal to the address the endpoint already has it updates the backlog 2690Sstevel@tonic-gate * without reinserting the address in the hash table. This optimization 2700Sstevel@tonic-gate * avoids two hash table updates for each listener created. It always 2710Sstevel@tonic-gate * avoids the problem of a "stolen" address when another listener may use 2720Sstevel@tonic-gate * the same address between the unbind and bind and suddenly listen() fails 2730Sstevel@tonic-gate * because address is in use even though the bind() succeeded. 2740Sstevel@tonic-gate * 2750Sstevel@tonic-gate * 2760Sstevel@tonic-gate * CONNECTIONLESS TRANSPORTS 2770Sstevel@tonic-gate * ========================= 2780Sstevel@tonic-gate * 2790Sstevel@tonic-gate * Connectionless transports all share the same serializer (one for TLI and one 2800Sstevel@tonic-gate * for Sockets). Functions executing behind serializer can check or modify state 2810Sstevel@tonic-gate * of any endpoint. 2820Sstevel@tonic-gate * 2830Sstevel@tonic-gate * When endpoint X talks to another endpoint Y it caches the pointer to Y in the 2840Sstevel@tonic-gate * te_lastep field. The next time X talks to some address A it checks whether A 2850Sstevel@tonic-gate * is the same as Y's address and if it is there is no need to lookup Y. If the 2860Sstevel@tonic-gate * address is different or the state of Y is not appropriate (e.g. closed or not 2870Sstevel@tonic-gate * idle) X does a lookup using tl_find_peer() and caches the new address. 2880Sstevel@tonic-gate * NOTE: tl_find_peer() never returns closing endpoint and it places a refhold 2890Sstevel@tonic-gate * on the endpoint found. 2900Sstevel@tonic-gate * 2910Sstevel@tonic-gate * During close of endpoint Y it doesn't try to remove itself from other 2920Sstevel@tonic-gate * endpoints caches. They will detect that Y is gone and will search the peer 2930Sstevel@tonic-gate * endpoint again. 2940Sstevel@tonic-gate * 2950Sstevel@tonic-gate * Flow Control Handling. 2960Sstevel@tonic-gate * ---------------------- 2970Sstevel@tonic-gate * 2980Sstevel@tonic-gate * Each connectionless endpoint keeps a list of endpoints which are 2990Sstevel@tonic-gate * flow-controlled by its queue. It also keeps a pointer to the queue which 3000Sstevel@tonic-gate * flow-controls itself. Whenever flow control releases for endpoint X it 3010Sstevel@tonic-gate * enables all queues from the list. During close it also back-enables everyone 3020Sstevel@tonic-gate * in the list. If X is flow-controlled when it is closing it removes it from 3030Sstevel@tonic-gate * the peers list. 3040Sstevel@tonic-gate * 3050Sstevel@tonic-gate * DATA STRUCTURES 3060Sstevel@tonic-gate * =============== 3070Sstevel@tonic-gate * 3080Sstevel@tonic-gate * Each endpoint is represented by the tl_endpt_t structure which keeps all the 3090Sstevel@tonic-gate * endpoint state. For connection-oriented transports it has a keeps a list 3100Sstevel@tonic-gate * of pending connections (tl_icon_t). For connectionless transports it keeps a 3110Sstevel@tonic-gate * list of endpoints flow controlled by this one. 3120Sstevel@tonic-gate * 3130Sstevel@tonic-gate * Each transport type is represented by a per-transport data structure 3140Sstevel@tonic-gate * tl_transport_state_t. It contains a pointer to an acceptor ID hash and the 3150Sstevel@tonic-gate * endpoint address hash tables for each transport. It also contains pointer to 3160Sstevel@tonic-gate * transport serializer for connectionless transports. 3170Sstevel@tonic-gate * 3180Sstevel@tonic-gate * Each endpoint keeps a link to its transport structure, so the code can find 3190Sstevel@tonic-gate * all per-transport information quickly. 3200Sstevel@tonic-gate */ 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate #include <sys/types.h> 3230Sstevel@tonic-gate #include <sys/inttypes.h> 3240Sstevel@tonic-gate #include <sys/stream.h> 3250Sstevel@tonic-gate #include <sys/stropts.h> 3260Sstevel@tonic-gate #define _SUN_TPI_VERSION 2 3270Sstevel@tonic-gate #include <sys/tihdr.h> 3280Sstevel@tonic-gate #include <sys/strlog.h> 3290Sstevel@tonic-gate #include <sys/debug.h> 3300Sstevel@tonic-gate #include <sys/cred.h> 3310Sstevel@tonic-gate #include <sys/errno.h> 3320Sstevel@tonic-gate #include <sys/kmem.h> 3330Sstevel@tonic-gate #include <sys/id_space.h> 3340Sstevel@tonic-gate #include <sys/modhash.h> 3350Sstevel@tonic-gate #include <sys/mkdev.h> 3360Sstevel@tonic-gate #include <sys/tl.h> 3370Sstevel@tonic-gate #include <sys/stat.h> 3380Sstevel@tonic-gate #include <sys/conf.h> 3390Sstevel@tonic-gate #include <sys/modctl.h> 3400Sstevel@tonic-gate #include <sys/strsun.h> 3410Sstevel@tonic-gate #include <sys/socket.h> 3420Sstevel@tonic-gate #include <sys/socketvar.h> 3430Sstevel@tonic-gate #include <sys/sysmacros.h> 3440Sstevel@tonic-gate #include <sys/xti_xtiopt.h> 3450Sstevel@tonic-gate #include <sys/ddi.h> 3460Sstevel@tonic-gate #include <sys/sunddi.h> 3470Sstevel@tonic-gate #include <sys/zone.h> 3480Sstevel@tonic-gate #include <inet/common.h> /* typedef int (*pfi_t)() for inet/optcom.h */ 3490Sstevel@tonic-gate #include <inet/optcom.h> 3500Sstevel@tonic-gate #include <sys/strsubr.h> 3510Sstevel@tonic-gate #include <sys/ucred.h> 3520Sstevel@tonic-gate #include <sys/suntpi.h> 3530Sstevel@tonic-gate #include <sys/list.h> 3540Sstevel@tonic-gate #include <sys/serializer.h> 3550Sstevel@tonic-gate 3560Sstevel@tonic-gate /* 3570Sstevel@tonic-gate * TBD List 3580Sstevel@tonic-gate * 14 Eliminate state changes through table 3590Sstevel@tonic-gate * 16. AF_UNIX socket options 3600Sstevel@tonic-gate * 17. connect() for ticlts 3610Sstevel@tonic-gate * 18. support for "netstat" to show AF_UNIX plus TLI local 3620Sstevel@tonic-gate * transport connections 3630Sstevel@tonic-gate * 21. sanity check to flushing on sending M_ERROR 3640Sstevel@tonic-gate */ 3650Sstevel@tonic-gate 3660Sstevel@tonic-gate /* 3670Sstevel@tonic-gate * CONSTANT DECLARATIONS 3680Sstevel@tonic-gate * -------------------- 3690Sstevel@tonic-gate */ 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate /* 3720Sstevel@tonic-gate * Local declarations 3730Sstevel@tonic-gate */ 3740Sstevel@tonic-gate #define NEXTSTATE(EV, ST) ti_statetbl[EV][ST] 3750Sstevel@tonic-gate 3760Sstevel@tonic-gate #define BADSEQNUM (-1) /* initial seq number used by T_DISCON_IND */ 3770Sstevel@tonic-gate #define TL_BUFWAIT (10000) /* usecs to wait for allocb buffer timeout */ 3780Sstevel@tonic-gate #define TL_TIDUSZ (64*1024) /* tidu size when "strmsgz" is unlimited (0) */ 3790Sstevel@tonic-gate /* 3800Sstevel@tonic-gate * Hash tables size. 3810Sstevel@tonic-gate */ 3820Sstevel@tonic-gate #define TL_HASH_SIZE 311 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate /* 3850Sstevel@tonic-gate * Definitions for module_info 3860Sstevel@tonic-gate */ 3870Sstevel@tonic-gate #define TL_ID (104) /* module ID number */ 3880Sstevel@tonic-gate #define TL_NAME "tl" /* module name */ 3890Sstevel@tonic-gate #define TL_MINPSZ (0) /* min packet size */ 3900Sstevel@tonic-gate #define TL_MAXPSZ INFPSZ /* max packet size ZZZ */ 3910Sstevel@tonic-gate #define TL_HIWAT (16*1024) /* hi water mark */ 3920Sstevel@tonic-gate #define TL_LOWAT (256) /* lo water mark */ 3930Sstevel@tonic-gate /* 3940Sstevel@tonic-gate * Definition of minor numbers/modes for new transport provider modes. 3950Sstevel@tonic-gate * We view the socket use as a separate mode to get a separate name space. 3960Sstevel@tonic-gate */ 3970Sstevel@tonic-gate #define TL_TICOTS 0 /* connection oriented transport */ 3980Sstevel@tonic-gate #define TL_TICOTSORD 1 /* COTS w/ orderly release */ 3990Sstevel@tonic-gate #define TL_TICLTS 2 /* connectionless transport */ 4000Sstevel@tonic-gate #define TL_UNUSED 3 4010Sstevel@tonic-gate #define TL_SOCKET 4 /* Socket */ 4020Sstevel@tonic-gate #define TL_SOCK_COTS (TL_SOCKET|TL_TICOTS) 4030Sstevel@tonic-gate #define TL_SOCK_COTSORD (TL_SOCKET|TL_TICOTSORD) 4040Sstevel@tonic-gate #define TL_SOCK_CLTS (TL_SOCKET|TL_TICLTS) 4050Sstevel@tonic-gate 4060Sstevel@tonic-gate #define TL_MINOR_MASK 0x7 4070Sstevel@tonic-gate #define TL_MINOR_START (TL_TICLTS + 1) 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate /* 4100Sstevel@tonic-gate * LOCAL MACROS 4110Sstevel@tonic-gate */ 4120Sstevel@tonic-gate #define T_ALIGN(p) P2ROUNDUP((p), sizeof (t_scalar_t)) 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate /* 4150Sstevel@tonic-gate * EXTERNAL VARIABLE DECLARATIONS 4160Sstevel@tonic-gate * ----------------------------- 4170Sstevel@tonic-gate */ 4180Sstevel@tonic-gate /* 4190Sstevel@tonic-gate * state table defined in the OS space.c 4200Sstevel@tonic-gate */ 4210Sstevel@tonic-gate extern char ti_statetbl[TE_NOEVENTS][TS_NOSTATES]; 4220Sstevel@tonic-gate 4230Sstevel@tonic-gate /* 4240Sstevel@tonic-gate * STREAMS DRIVER ENTRY POINTS PROTOTYPES 4250Sstevel@tonic-gate */ 4260Sstevel@tonic-gate static int tl_open(queue_t *, dev_t *, int, int, cred_t *); 4270Sstevel@tonic-gate static int tl_close(queue_t *, int, cred_t *); 4280Sstevel@tonic-gate static void tl_wput(queue_t *, mblk_t *); 4290Sstevel@tonic-gate static void tl_wsrv(queue_t *); 4300Sstevel@tonic-gate static void tl_rsrv(queue_t *); 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate static int tl_attach(dev_info_t *, ddi_attach_cmd_t); 4330Sstevel@tonic-gate static int tl_detach(dev_info_t *, ddi_detach_cmd_t); 4340Sstevel@tonic-gate static int tl_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate 4370Sstevel@tonic-gate /* 4380Sstevel@tonic-gate * GLOBAL DATA STRUCTURES AND VARIABLES 4390Sstevel@tonic-gate * ----------------------------------- 4400Sstevel@tonic-gate */ 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate /* 4430Sstevel@tonic-gate * Table representing database of all options managed by T_SVR4_OPTMGMT_REQ 4440Sstevel@tonic-gate * For now, we only manage the SO_RECVUCRED option but we also have 4450Sstevel@tonic-gate * harmless dummy options to make things work with some common code we access. 4460Sstevel@tonic-gate */ 4470Sstevel@tonic-gate opdes_t tl_opt_arr[] = { 4480Sstevel@tonic-gate /* The SO_TYPE is needed for the hack below */ 4490Sstevel@tonic-gate { 4500Sstevel@tonic-gate SO_TYPE, 4510Sstevel@tonic-gate SOL_SOCKET, 4520Sstevel@tonic-gate OA_R, 4530Sstevel@tonic-gate OA_R, 4540Sstevel@tonic-gate OP_NP, 4550Sstevel@tonic-gate OP_PASSNEXT, 4560Sstevel@tonic-gate sizeof (t_scalar_t), 4570Sstevel@tonic-gate 0 4580Sstevel@tonic-gate }, 4590Sstevel@tonic-gate { 4600Sstevel@tonic-gate SO_RECVUCRED, 4610Sstevel@tonic-gate SOL_SOCKET, 4620Sstevel@tonic-gate OA_RW, 4630Sstevel@tonic-gate OA_RW, 4640Sstevel@tonic-gate OP_NP, 4650Sstevel@tonic-gate OP_PASSNEXT, 4660Sstevel@tonic-gate sizeof (int), 4670Sstevel@tonic-gate 0 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate }; 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate /* 4720Sstevel@tonic-gate * Table of all supported levels 4730Sstevel@tonic-gate * Note: Some levels (e.g. XTI_GENERIC) may be valid but may not have 4740Sstevel@tonic-gate * any supported options so we need this info separately. 4750Sstevel@tonic-gate * 4760Sstevel@tonic-gate * This is needed only for topmost tpi providers. 4770Sstevel@tonic-gate */ 4780Sstevel@tonic-gate optlevel_t tl_valid_levels_arr[] = { 4790Sstevel@tonic-gate XTI_GENERIC, 4800Sstevel@tonic-gate SOL_SOCKET, 4810Sstevel@tonic-gate TL_PROT_LEVEL 4820Sstevel@tonic-gate }; 4830Sstevel@tonic-gate 4840Sstevel@tonic-gate #define TL_VALID_LEVELS_CNT A_CNT(tl_valid_levels_arr) 4850Sstevel@tonic-gate /* 4860Sstevel@tonic-gate * Current upper bound on the amount of space needed to return all options. 4870Sstevel@tonic-gate * Additional options with data size of sizeof(long) are handled automatically. 4880Sstevel@tonic-gate * Others need hand job. 4890Sstevel@tonic-gate */ 4900Sstevel@tonic-gate #define TL_MAX_OPT_BUF_LEN \ 4910Sstevel@tonic-gate ((A_CNT(tl_opt_arr) << 2) + \ 4920Sstevel@tonic-gate (A_CNT(tl_opt_arr) * sizeof (struct opthdr)) + \ 4930Sstevel@tonic-gate + 64 + sizeof (struct T_optmgmt_ack)) 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate #define TL_OPT_ARR_CNT A_CNT(tl_opt_arr) 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate /* 4980Sstevel@tonic-gate * transport addr structure 4990Sstevel@tonic-gate */ 5000Sstevel@tonic-gate typedef struct tl_addr { 5010Sstevel@tonic-gate zoneid_t ta_zoneid; /* Zone scope of address */ 5020Sstevel@tonic-gate t_scalar_t ta_alen; /* length of abuf */ 5030Sstevel@tonic-gate void *ta_abuf; /* the addr itself */ 5040Sstevel@tonic-gate } tl_addr_t; 5050Sstevel@tonic-gate 5060Sstevel@tonic-gate /* 5070Sstevel@tonic-gate * Refcounted version of serializer. 5080Sstevel@tonic-gate */ 5090Sstevel@tonic-gate typedef struct tl_serializer { 5100Sstevel@tonic-gate uint_t ts_refcnt; 5110Sstevel@tonic-gate serializer_t *ts_serializer; 5120Sstevel@tonic-gate } tl_serializer_t; 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate /* 5150Sstevel@tonic-gate * Each transport type has a separate state. 5160Sstevel@tonic-gate * Per-transport state. 5170Sstevel@tonic-gate */ 5180Sstevel@tonic-gate typedef struct tl_transport_state { 5190Sstevel@tonic-gate char *tr_name; 5200Sstevel@tonic-gate minor_t tr_minor; 5210Sstevel@tonic-gate uint32_t tr_defaddr; 5220Sstevel@tonic-gate mod_hash_t *tr_ai_hash; 5230Sstevel@tonic-gate mod_hash_t *tr_addr_hash; 5240Sstevel@tonic-gate tl_serializer_t *tr_serializer; 5250Sstevel@tonic-gate } tl_transport_state_t; 5260Sstevel@tonic-gate 5270Sstevel@tonic-gate #define TL_DFADDR 0x1000 5280Sstevel@tonic-gate 5290Sstevel@tonic-gate static tl_transport_state_t tl_transports[] = { 5300Sstevel@tonic-gate { "ticots", TL_TICOTS, TL_DFADDR, NULL, NULL, NULL }, 5310Sstevel@tonic-gate { "ticotsord", TL_TICOTSORD, TL_DFADDR, NULL, NULL, NULL }, 5320Sstevel@tonic-gate { "ticlts", TL_TICLTS, TL_DFADDR, NULL, NULL, NULL }, 5330Sstevel@tonic-gate { "undefined", TL_UNUSED, TL_DFADDR, NULL, NULL, NULL }, 5340Sstevel@tonic-gate { "sticots", TL_SOCK_COTS, TL_DFADDR, NULL, NULL, NULL }, 5350Sstevel@tonic-gate { "sticotsord", TL_SOCK_COTSORD, TL_DFADDR, NULL, NULL }, 5360Sstevel@tonic-gate { "sticlts", TL_SOCK_CLTS, TL_DFADDR, NULL, NULL, NULL } 5370Sstevel@tonic-gate }; 5380Sstevel@tonic-gate 5390Sstevel@tonic-gate #define TL_MAXTRANSPORT A_CNT(tl_transports) 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate struct tl_endpt; 5420Sstevel@tonic-gate typedef struct tl_endpt tl_endpt_t; 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate typedef void (tlproc_t)(mblk_t *, tl_endpt_t *); 5450Sstevel@tonic-gate 5460Sstevel@tonic-gate /* 5470Sstevel@tonic-gate * Data structure used to represent pending connects. 5480Sstevel@tonic-gate * Records enough information so that the connecting peer can close 5490Sstevel@tonic-gate * before the connection gets accepted. 5500Sstevel@tonic-gate */ 5510Sstevel@tonic-gate typedef struct tl_icon { 5520Sstevel@tonic-gate list_node_t ti_node; 5530Sstevel@tonic-gate struct tl_endpt *ti_tep; /* NULL if peer has already closed */ 5540Sstevel@tonic-gate mblk_t *ti_mp; /* b_next list of data + ordrel_ind */ 5550Sstevel@tonic-gate t_scalar_t ti_seqno; /* Sequence number */ 5560Sstevel@tonic-gate } tl_icon_t; 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate typedef struct so_ux_addr soux_addr_t; 5590Sstevel@tonic-gate #define TL_SOUX_ADDRLEN sizeof (soux_addr_t) 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate /* 5622486Sakolb * Maximum number of unaccepted connection indications allowed per listener. 5632486Sakolb */ 5642486Sakolb #define TL_MAXQLEN 4096 5652486Sakolb int tl_maxqlen = TL_MAXQLEN; 5662486Sakolb 5672486Sakolb /* 5680Sstevel@tonic-gate * transport endpoint structure 5690Sstevel@tonic-gate */ 5700Sstevel@tonic-gate struct tl_endpt { 5710Sstevel@tonic-gate queue_t *te_rq; /* stream read queue */ 5720Sstevel@tonic-gate queue_t *te_wq; /* stream write queue */ 5730Sstevel@tonic-gate uint32_t te_refcnt; 5740Sstevel@tonic-gate int32_t te_state; /* TPI state of endpoint */ 5750Sstevel@tonic-gate minor_t te_minor; /* minor number */ 5760Sstevel@tonic-gate #define te_seqno te_minor 5770Sstevel@tonic-gate uint_t te_flag; /* flag field */ 5780Sstevel@tonic-gate boolean_t te_nowsrv; 5790Sstevel@tonic-gate tl_serializer_t *te_ser; /* Serializer to use */ 5800Sstevel@tonic-gate #define te_serializer te_ser->ts_serializer 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate soux_addr_t te_uxaddr; /* Socket address */ 5830Sstevel@tonic-gate #define te_magic te_uxaddr.soua_magic 5840Sstevel@tonic-gate #define te_vp te_uxaddr.soua_vp 5850Sstevel@tonic-gate tl_addr_t te_ap; /* addr bound to this endpt */ 5860Sstevel@tonic-gate #define te_zoneid te_ap.ta_zoneid 5870Sstevel@tonic-gate #define te_alen te_ap.ta_alen 5880Sstevel@tonic-gate #define te_abuf te_ap.ta_abuf 5890Sstevel@tonic-gate 5900Sstevel@tonic-gate tl_transport_state_t *te_transport; 5910Sstevel@tonic-gate #define te_addrhash te_transport->tr_addr_hash 5920Sstevel@tonic-gate #define te_aihash te_transport->tr_ai_hash 5930Sstevel@tonic-gate #define te_defaddr te_transport->tr_defaddr 5940Sstevel@tonic-gate cred_t *te_credp; /* endpoint user credentials */ 5950Sstevel@tonic-gate mod_hash_hndl_t te_hash_hndl; /* Handle for address hash */ 5960Sstevel@tonic-gate 5970Sstevel@tonic-gate /* 5980Sstevel@tonic-gate * State specific for connection-oriented and connectionless transports. 5990Sstevel@tonic-gate */ 6000Sstevel@tonic-gate union { 6010Sstevel@tonic-gate /* Connection-oriented state. */ 6020Sstevel@tonic-gate struct { 6030Sstevel@tonic-gate t_uscalar_t _te_nicon; /* count of conn requests */ 6040Sstevel@tonic-gate t_uscalar_t _te_qlen; /* max conn requests */ 6050Sstevel@tonic-gate tl_endpt_t *_te_oconp; /* conn request pending */ 6060Sstevel@tonic-gate tl_endpt_t *_te_conp; /* connected endpt */ 6070Sstevel@tonic-gate #ifndef _ILP32 6080Sstevel@tonic-gate void *_te_pad; 6090Sstevel@tonic-gate #endif 6100Sstevel@tonic-gate list_t _te_iconp; /* list of conn ind. pending */ 6110Sstevel@tonic-gate } _te_cots_state; 6120Sstevel@tonic-gate /* Connection-less state. */ 6130Sstevel@tonic-gate struct { 6140Sstevel@tonic-gate tl_endpt_t *_te_lastep; /* last dest. endpoint */ 6150Sstevel@tonic-gate tl_endpt_t *_te_flowq; /* flow controlled on whom */ 6160Sstevel@tonic-gate list_node_t _te_flows; /* lists of connections */ 6170Sstevel@tonic-gate list_t _te_flowlist; /* Who flowcontrols on me */ 6180Sstevel@tonic-gate } _te_clts_state; 6190Sstevel@tonic-gate } _te_transport_state; 6200Sstevel@tonic-gate #define te_nicon _te_transport_state._te_cots_state._te_nicon 6210Sstevel@tonic-gate #define te_qlen _te_transport_state._te_cots_state._te_qlen 6220Sstevel@tonic-gate #define te_oconp _te_transport_state._te_cots_state._te_oconp 6230Sstevel@tonic-gate #define te_conp _te_transport_state._te_cots_state._te_conp 6240Sstevel@tonic-gate #define te_iconp _te_transport_state._te_cots_state._te_iconp 6250Sstevel@tonic-gate #define te_lastep _te_transport_state._te_clts_state._te_lastep 6260Sstevel@tonic-gate #define te_flowq _te_transport_state._te_clts_state._te_flowq 6270Sstevel@tonic-gate #define te_flowlist _te_transport_state._te_clts_state._te_flowlist 6280Sstevel@tonic-gate #define te_flows _te_transport_state._te_clts_state._te_flows 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate bufcall_id_t te_bufcid; /* outstanding bufcall id */ 6310Sstevel@tonic-gate timeout_id_t te_timoutid; /* outstanding timeout id */ 6320Sstevel@tonic-gate pid_t te_cpid; /* cached pid of endpoint */ 6330Sstevel@tonic-gate t_uscalar_t te_acceptor_id; /* acceptor id for T_CONN_RES */ 6340Sstevel@tonic-gate /* 6350Sstevel@tonic-gate * Pieces of the endpoint state needed for closing. 6360Sstevel@tonic-gate */ 6370Sstevel@tonic-gate kmutex_t te_closelock; 6380Sstevel@tonic-gate kcondvar_t te_closecv; 6390Sstevel@tonic-gate uint8_t te_closing; /* The endpoint started closing */ 6400Sstevel@tonic-gate uint8_t te_closewait; /* Wait in close until zero */ 6410Sstevel@tonic-gate mblk_t te_closemp; /* for entering serializer on close */ 6420Sstevel@tonic-gate mblk_t te_rsrvmp; /* for entering serializer on rsrv */ 6430Sstevel@tonic-gate mblk_t te_wsrvmp; /* for entering serializer on wsrv */ 6440Sstevel@tonic-gate kmutex_t te_srv_lock; 6450Sstevel@tonic-gate kcondvar_t te_srv_cv; 6460Sstevel@tonic-gate uint8_t te_rsrv_active; /* Running in tl_rsrv() */ 6470Sstevel@tonic-gate uint8_t te_wsrv_active; /* Running in tl_wsrv() */ 6480Sstevel@tonic-gate /* 6490Sstevel@tonic-gate * Pieces of the endpoint state needed for serializer transitions. 6500Sstevel@tonic-gate */ 6510Sstevel@tonic-gate kmutex_t te_ser_lock; /* Protects the count below */ 6520Sstevel@tonic-gate uint_t te_ser_count; /* Number of messages on serializer */ 6530Sstevel@tonic-gate }; 6540Sstevel@tonic-gate 6550Sstevel@tonic-gate /* 6560Sstevel@tonic-gate * Flag values. Lower 4 bits specify that transport used. 6570Sstevel@tonic-gate * TL_LISTENER, TL_ACCEPTOR, TL_ACCEPTED and TL_EAGER are for debugging only, 6580Sstevel@tonic-gate * they allow to identify the endpoint more easily. 6590Sstevel@tonic-gate */ 6600Sstevel@tonic-gate #define TL_LISTENER 0x00010 /* the listener endpoint */ 6610Sstevel@tonic-gate #define TL_ACCEPTOR 0x00020 /* the accepting endpoint */ 6620Sstevel@tonic-gate #define TL_EAGER 0x00040 /* connecting endpoint */ 6630Sstevel@tonic-gate #define TL_ACCEPTED 0x00080 /* accepted connection */ 6640Sstevel@tonic-gate #define TL_SETCRED 0x00100 /* flag to indicate sending of credentials */ 6650Sstevel@tonic-gate #define TL_SETUCRED 0x00200 /* flag to indicate sending of ucred */ 6660Sstevel@tonic-gate #define TL_SOCKUCRED 0x00400 /* flag to indicate sending of SCM_UCRED */ 6670Sstevel@tonic-gate #define TL_ADDRHASHED 0x01000 /* Endpoint address is stored in te_addrhash */ 6680Sstevel@tonic-gate #define TL_CLOSE_SER 0x10000 /* Endpoint close has entered the serializer */ 6690Sstevel@tonic-gate /* 6700Sstevel@tonic-gate * Boolean checks for the endpoint type. 6710Sstevel@tonic-gate */ 6720Sstevel@tonic-gate #define IS_CLTS(x) (((x)->te_flag & TL_TICLTS) != 0) 6730Sstevel@tonic-gate #define IS_COTS(x) (((x)->te_flag & TL_TICLTS) == 0) 6740Sstevel@tonic-gate #define IS_COTSORD(x) (((x)->te_flag & TL_TICOTSORD) != 0) 6750Sstevel@tonic-gate #define IS_SOCKET(x) (((x)->te_flag & TL_SOCKET) != 0) 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate #define TLPID(mp, tep) (DB_CPID(mp) == -1 ? (tep)->te_cpid : DB_CPID(mp)) 6780Sstevel@tonic-gate 6790Sstevel@tonic-gate /* 6800Sstevel@tonic-gate * Certain operations are always used together. These macros reduce the chance 6810Sstevel@tonic-gate * of missing a part of a combination. 6820Sstevel@tonic-gate */ 6830Sstevel@tonic-gate #define TL_UNCONNECT(x) { tl_refrele(x); x = NULL; } 6840Sstevel@tonic-gate #define TL_REMOVE_PEER(x) { if ((x) != NULL) TL_UNCONNECT(x) } 6850Sstevel@tonic-gate 6860Sstevel@tonic-gate #define TL_PUTBQ(x, mp) { \ 6870Sstevel@tonic-gate ASSERT(!((x)->te_flag & TL_CLOSE_SER)); \ 6880Sstevel@tonic-gate (x)->te_nowsrv = B_TRUE; \ 6890Sstevel@tonic-gate (void) putbq((x)->te_wq, mp); \ 6900Sstevel@tonic-gate } 6910Sstevel@tonic-gate 6920Sstevel@tonic-gate #define TL_QENABLE(x) { (x)->te_nowsrv = B_FALSE; qenable((x)->te_wq); } 6930Sstevel@tonic-gate #define TL_PUTQ(x, mp) { (x)->te_nowsrv = B_FALSE; (void)putq((x)->te_wq, mp); } 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate /* 6960Sstevel@tonic-gate * STREAMS driver glue data structures. 6970Sstevel@tonic-gate */ 6980Sstevel@tonic-gate static struct module_info tl_minfo = { 6990Sstevel@tonic-gate TL_ID, /* mi_idnum */ 7000Sstevel@tonic-gate TL_NAME, /* mi_idname */ 7010Sstevel@tonic-gate TL_MINPSZ, /* mi_minpsz */ 7020Sstevel@tonic-gate TL_MAXPSZ, /* mi_maxpsz */ 7030Sstevel@tonic-gate TL_HIWAT, /* mi_hiwat */ 7040Sstevel@tonic-gate TL_LOWAT /* mi_lowat */ 7050Sstevel@tonic-gate }; 7060Sstevel@tonic-gate 7070Sstevel@tonic-gate static struct qinit tl_rinit = { 7080Sstevel@tonic-gate NULL, /* qi_putp */ 7090Sstevel@tonic-gate (int (*)())tl_rsrv, /* qi_srvp */ 7100Sstevel@tonic-gate tl_open, /* qi_qopen */ 7110Sstevel@tonic-gate tl_close, /* qi_qclose */ 7120Sstevel@tonic-gate NULL, /* qi_qadmin */ 7130Sstevel@tonic-gate &tl_minfo, /* qi_minfo */ 7140Sstevel@tonic-gate NULL /* qi_mstat */ 7150Sstevel@tonic-gate }; 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate static struct qinit tl_winit = { 7180Sstevel@tonic-gate (int (*)())tl_wput, /* qi_putp */ 7190Sstevel@tonic-gate (int (*)())tl_wsrv, /* qi_srvp */ 7200Sstevel@tonic-gate NULL, /* qi_qopen */ 7210Sstevel@tonic-gate NULL, /* qi_qclose */ 7220Sstevel@tonic-gate NULL, /* qi_qadmin */ 7230Sstevel@tonic-gate &tl_minfo, /* qi_minfo */ 7240Sstevel@tonic-gate NULL /* qi_mstat */ 7250Sstevel@tonic-gate }; 7260Sstevel@tonic-gate 7270Sstevel@tonic-gate static struct streamtab tlinfo = { 7280Sstevel@tonic-gate &tl_rinit, /* st_rdinit */ 7290Sstevel@tonic-gate &tl_winit, /* st_wrinit */ 7300Sstevel@tonic-gate NULL, /* st_muxrinit */ 7310Sstevel@tonic-gate NULL /* st_muxwrinit */ 7320Sstevel@tonic-gate }; 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate DDI_DEFINE_STREAM_OPS(tl_devops, nulldev, nulldev, tl_attach, tl_detach, 735*7656SSherry.Moore@Sun.COM nulldev, tl_info, D_MP, &tlinfo, ddi_quiesce_not_supported); 7360Sstevel@tonic-gate 7370Sstevel@tonic-gate static struct modldrv modldrv = { 7380Sstevel@tonic-gate &mod_driverops, /* Type of module -- pseudo driver here */ 7397240Srh87107 "TPI Local Transport (tl)", 7400Sstevel@tonic-gate &tl_devops, /* driver ops */ 7410Sstevel@tonic-gate }; 7420Sstevel@tonic-gate 7430Sstevel@tonic-gate /* 7440Sstevel@tonic-gate * Module linkage information for the kernel. 7450Sstevel@tonic-gate */ 7460Sstevel@tonic-gate static struct modlinkage modlinkage = { 7470Sstevel@tonic-gate MODREV_1, 7480Sstevel@tonic-gate &modldrv, 7490Sstevel@tonic-gate NULL 7500Sstevel@tonic-gate }; 7510Sstevel@tonic-gate 7520Sstevel@tonic-gate /* 7530Sstevel@tonic-gate * Templates for response to info request 7540Sstevel@tonic-gate * Check sanity of unlimited connect data etc. 7550Sstevel@tonic-gate */ 7560Sstevel@tonic-gate 7570Sstevel@tonic-gate #define TL_CLTS_PROVIDER_FLAG (XPG4_1|SENDZERO) 7580Sstevel@tonic-gate #define TL_COTS_PROVIDER_FLAG (XPG4_1|SENDZERO) 7590Sstevel@tonic-gate 7600Sstevel@tonic-gate static struct T_info_ack tl_cots_info_ack = 7610Sstevel@tonic-gate { 7620Sstevel@tonic-gate T_INFO_ACK, /* PRIM_type -always T_INFO_ACK */ 7630Sstevel@tonic-gate T_INFINITE, /* TSDU size */ 7640Sstevel@tonic-gate T_INFINITE, /* ETSDU size */ 7650Sstevel@tonic-gate T_INFINITE, /* CDATA_size */ 7660Sstevel@tonic-gate T_INFINITE, /* DDATA_size */ 7670Sstevel@tonic-gate T_INFINITE, /* ADDR_size */ 7680Sstevel@tonic-gate T_INFINITE, /* OPT_size */ 7690Sstevel@tonic-gate 0, /* TIDU_size - fill at run time */ 7700Sstevel@tonic-gate T_COTS, /* SERV_type */ 7710Sstevel@tonic-gate -1, /* CURRENT_state */ 7720Sstevel@tonic-gate TL_COTS_PROVIDER_FLAG /* PROVIDER_flag */ 7730Sstevel@tonic-gate }; 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate static struct T_info_ack tl_clts_info_ack = 7760Sstevel@tonic-gate { 7770Sstevel@tonic-gate T_INFO_ACK, /* PRIM_type - always T_INFO_ACK */ 7780Sstevel@tonic-gate 0, /* TSDU_size - fill at run time */ 7790Sstevel@tonic-gate -2, /* ETSDU_size -2 => not supported */ 7800Sstevel@tonic-gate -2, /* CDATA_size -2 => not supported */ 7810Sstevel@tonic-gate -2, /* DDATA_size -2 => not supported */ 7820Sstevel@tonic-gate -1, /* ADDR_size -1 => unlimited */ 7830Sstevel@tonic-gate -1, /* OPT_size */ 7840Sstevel@tonic-gate 0, /* TIDU_size - fill at run time */ 7850Sstevel@tonic-gate T_CLTS, /* SERV_type */ 7860Sstevel@tonic-gate -1, /* CURRENT_state */ 7870Sstevel@tonic-gate TL_CLTS_PROVIDER_FLAG /* PROVIDER_flag */ 7880Sstevel@tonic-gate }; 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate /* 7910Sstevel@tonic-gate * private copy of devinfo pointer used in tl_info 7920Sstevel@tonic-gate */ 7930Sstevel@tonic-gate static dev_info_t *tl_dip; 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate /* 7960Sstevel@tonic-gate * Endpoints cache. 7970Sstevel@tonic-gate */ 7980Sstevel@tonic-gate static kmem_cache_t *tl_cache; 7990Sstevel@tonic-gate /* 8000Sstevel@tonic-gate * Minor number space. 8010Sstevel@tonic-gate */ 8020Sstevel@tonic-gate static id_space_t *tl_minors; 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate /* 8050Sstevel@tonic-gate * Default Data Unit size. 8060Sstevel@tonic-gate */ 8070Sstevel@tonic-gate static t_scalar_t tl_tidusz; 8080Sstevel@tonic-gate 8090Sstevel@tonic-gate /* 8100Sstevel@tonic-gate * Size of hash tables. 8110Sstevel@tonic-gate */ 8120Sstevel@tonic-gate static size_t tl_hash_size = TL_HASH_SIZE; 8130Sstevel@tonic-gate 8140Sstevel@tonic-gate /* 8150Sstevel@tonic-gate * Debug and test variable ONLY. Turn off T_CONN_IND queueing 8160Sstevel@tonic-gate * for sockets. 8170Sstevel@tonic-gate */ 8180Sstevel@tonic-gate static int tl_disable_early_connect = 0; 8190Sstevel@tonic-gate static int tl_client_closing_when_accepting; 8200Sstevel@tonic-gate 8210Sstevel@tonic-gate static int tl_serializer_noswitch; 8220Sstevel@tonic-gate 8230Sstevel@tonic-gate /* 8240Sstevel@tonic-gate * LOCAL FUNCTION PROTOTYPES 8250Sstevel@tonic-gate * ------------------------- 8260Sstevel@tonic-gate */ 8270Sstevel@tonic-gate static boolean_t tl_eqaddr(tl_addr_t *, tl_addr_t *); 8280Sstevel@tonic-gate static void tl_do_proto(mblk_t *, tl_endpt_t *); 8290Sstevel@tonic-gate static void tl_do_ioctl(mblk_t *, tl_endpt_t *); 8300Sstevel@tonic-gate static void tl_do_ioctl_ser(mblk_t *, tl_endpt_t *); 8310Sstevel@tonic-gate static void tl_error_ack(queue_t *, mblk_t *, t_scalar_t, t_scalar_t, 8320Sstevel@tonic-gate t_scalar_t); 8330Sstevel@tonic-gate static void tl_bind(mblk_t *, tl_endpt_t *); 8340Sstevel@tonic-gate static void tl_bind_ser(mblk_t *, tl_endpt_t *); 8350Sstevel@tonic-gate static void tl_ok_ack(queue_t *, mblk_t *mp, t_scalar_t); 8360Sstevel@tonic-gate static void tl_unbind(mblk_t *, tl_endpt_t *); 8370Sstevel@tonic-gate static void tl_optmgmt(queue_t *, mblk_t *); 8380Sstevel@tonic-gate static void tl_conn_req(queue_t *, mblk_t *); 8390Sstevel@tonic-gate static void tl_conn_req_ser(mblk_t *, tl_endpt_t *); 8400Sstevel@tonic-gate static void tl_conn_res(mblk_t *, tl_endpt_t *); 8410Sstevel@tonic-gate static void tl_discon_req(mblk_t *, tl_endpt_t *); 8420Sstevel@tonic-gate static void tl_capability_req(mblk_t *, tl_endpt_t *); 8430Sstevel@tonic-gate static void tl_info_req_ser(mblk_t *, tl_endpt_t *); 8440Sstevel@tonic-gate static void tl_info_req(mblk_t *, tl_endpt_t *); 8450Sstevel@tonic-gate static void tl_addr_req(mblk_t *, tl_endpt_t *); 8460Sstevel@tonic-gate static void tl_connected_cots_addr_req(mblk_t *, tl_endpt_t *); 8470Sstevel@tonic-gate static void tl_data(mblk_t *, tl_endpt_t *); 8480Sstevel@tonic-gate static void tl_exdata(mblk_t *, tl_endpt_t *); 8490Sstevel@tonic-gate static void tl_ordrel(mblk_t *, tl_endpt_t *); 8500Sstevel@tonic-gate static void tl_unitdata(mblk_t *, tl_endpt_t *); 8510Sstevel@tonic-gate static void tl_unitdata_ser(mblk_t *, tl_endpt_t *); 8520Sstevel@tonic-gate static void tl_uderr(queue_t *, mblk_t *, t_scalar_t); 8530Sstevel@tonic-gate static tl_endpt_t *tl_find_peer(tl_endpt_t *, tl_addr_t *); 8540Sstevel@tonic-gate static tl_endpt_t *tl_sock_find_peer(tl_endpt_t *, struct so_ux_addr *); 8550Sstevel@tonic-gate static boolean_t tl_get_any_addr(tl_endpt_t *, tl_addr_t *); 8560Sstevel@tonic-gate static void tl_cl_backenable(tl_endpt_t *); 8570Sstevel@tonic-gate static void tl_co_unconnect(tl_endpt_t *); 8580Sstevel@tonic-gate static mblk_t *tl_resizemp(mblk_t *, ssize_t); 8590Sstevel@tonic-gate static void tl_discon_ind(tl_endpt_t *, uint32_t); 8600Sstevel@tonic-gate static mblk_t *tl_discon_ind_alloc(uint32_t, t_scalar_t); 8610Sstevel@tonic-gate static mblk_t *tl_ordrel_ind_alloc(void); 8620Sstevel@tonic-gate static tl_icon_t *tl_icon_find(tl_endpt_t *, t_scalar_t); 8630Sstevel@tonic-gate static void tl_icon_queuemsg(tl_endpt_t *, t_scalar_t, mblk_t *); 8640Sstevel@tonic-gate static boolean_t tl_icon_hasprim(tl_endpt_t *, t_scalar_t, t_scalar_t); 8650Sstevel@tonic-gate static void tl_icon_sendmsgs(tl_endpt_t *, mblk_t **); 8660Sstevel@tonic-gate static void tl_icon_freemsgs(mblk_t **); 8670Sstevel@tonic-gate static void tl_merror(queue_t *, mblk_t *, int); 8681676Sjpk static void tl_fill_option(uchar_t *, cred_t *, pid_t, int, cred_t *); 8690Sstevel@tonic-gate static int tl_default_opt(queue_t *, int, int, uchar_t *); 8700Sstevel@tonic-gate static int tl_get_opt(queue_t *, int, int, uchar_t *); 8710Sstevel@tonic-gate static int tl_set_opt(queue_t *, uint_t, int, int, uint_t, uchar_t *, uint_t *, 8720Sstevel@tonic-gate uchar_t *, void *, cred_t *, mblk_t *); 8730Sstevel@tonic-gate static void tl_memrecover(queue_t *, mblk_t *, size_t); 8740Sstevel@tonic-gate static void tl_freetip(tl_endpt_t *, tl_icon_t *); 8750Sstevel@tonic-gate static void tl_free(tl_endpt_t *); 8760Sstevel@tonic-gate static int tl_constructor(void *, void *, int); 8770Sstevel@tonic-gate static void tl_destructor(void *, void *); 8780Sstevel@tonic-gate static void tl_find_callback(mod_hash_key_t, mod_hash_val_t); 8790Sstevel@tonic-gate static tl_serializer_t *tl_serializer_alloc(int); 8800Sstevel@tonic-gate static void tl_serializer_refhold(tl_serializer_t *); 8810Sstevel@tonic-gate static void tl_serializer_refrele(tl_serializer_t *); 8820Sstevel@tonic-gate static void tl_serializer_enter(tl_endpt_t *, tlproc_t, mblk_t *); 8830Sstevel@tonic-gate static void tl_serializer_exit(tl_endpt_t *); 8840Sstevel@tonic-gate static boolean_t tl_noclose(tl_endpt_t *); 8850Sstevel@tonic-gate static void tl_closeok(tl_endpt_t *); 8860Sstevel@tonic-gate static void tl_refhold(tl_endpt_t *); 8870Sstevel@tonic-gate static void tl_refrele(tl_endpt_t *); 8880Sstevel@tonic-gate static int tl_hash_cmp_addr(mod_hash_key_t, mod_hash_key_t); 8890Sstevel@tonic-gate static uint_t tl_hash_by_addr(void *, mod_hash_key_t); 8900Sstevel@tonic-gate static void tl_close_ser(mblk_t *, tl_endpt_t *); 8910Sstevel@tonic-gate static void tl_close_finish_ser(mblk_t *, tl_endpt_t *); 8920Sstevel@tonic-gate static void tl_wput_data_ser(mblk_t *, tl_endpt_t *); 8930Sstevel@tonic-gate static void tl_proto_ser(mblk_t *, tl_endpt_t *); 8940Sstevel@tonic-gate static void tl_putq_ser(mblk_t *, tl_endpt_t *); 8950Sstevel@tonic-gate static void tl_wput_common_ser(mblk_t *, tl_endpt_t *); 8960Sstevel@tonic-gate static void tl_wput_ser(mblk_t *, tl_endpt_t *); 8970Sstevel@tonic-gate static void tl_wsrv_ser(mblk_t *, tl_endpt_t *); 8980Sstevel@tonic-gate static void tl_rsrv_ser(mblk_t *, tl_endpt_t *); 8990Sstevel@tonic-gate static void tl_addr_unbind(tl_endpt_t *); 9000Sstevel@tonic-gate 9010Sstevel@tonic-gate /* 9020Sstevel@tonic-gate * Intialize option database object for TL 9030Sstevel@tonic-gate */ 9040Sstevel@tonic-gate 9050Sstevel@tonic-gate optdb_obj_t tl_opt_obj = { 9060Sstevel@tonic-gate tl_default_opt, /* TL default value function pointer */ 9070Sstevel@tonic-gate tl_get_opt, /* TL get function pointer */ 9080Sstevel@tonic-gate tl_set_opt, /* TL set function pointer */ 9090Sstevel@tonic-gate B_TRUE, /* TL is tpi provider */ 9100Sstevel@tonic-gate TL_OPT_ARR_CNT, /* TL option database count of entries */ 9110Sstevel@tonic-gate tl_opt_arr, /* TL option database */ 9120Sstevel@tonic-gate TL_VALID_LEVELS_CNT, /* TL valid level count of entries */ 9130Sstevel@tonic-gate tl_valid_levels_arr /* TL valid level array */ 9140Sstevel@tonic-gate }; 9150Sstevel@tonic-gate 9160Sstevel@tonic-gate /* 9170Sstevel@tonic-gate * Logical operations. 9180Sstevel@tonic-gate * 9190Sstevel@tonic-gate * IMPLY(X, Y) means that X implies Y i.e. when X is true, Y 9200Sstevel@tonic-gate * should also be true. 9210Sstevel@tonic-gate * 9227409SRic.Aleshire@Sun.COM * EQUIV(X, Y) is logical equivalence. Both X and Y should be true or false at 9230Sstevel@tonic-gate * the same time. 9240Sstevel@tonic-gate */ 9250Sstevel@tonic-gate #define IMPLY(X, Y) (!(X) || (Y)) 9260Sstevel@tonic-gate #define EQUIV(X, Y) (IMPLY(X, Y) && IMPLY(Y, X)) 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate /* 9290Sstevel@tonic-gate * LOCAL FUNCTIONS AND DRIVER ENTRY POINTS 9300Sstevel@tonic-gate * --------------------------------------- 9310Sstevel@tonic-gate */ 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate /* 9340Sstevel@tonic-gate * Loadable module routines 9350Sstevel@tonic-gate */ 9360Sstevel@tonic-gate int 9370Sstevel@tonic-gate _init(void) 9380Sstevel@tonic-gate { 9390Sstevel@tonic-gate return (mod_install(&modlinkage)); 9400Sstevel@tonic-gate } 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate int 9430Sstevel@tonic-gate _fini(void) 9440Sstevel@tonic-gate { 9450Sstevel@tonic-gate return (mod_remove(&modlinkage)); 9460Sstevel@tonic-gate } 9470Sstevel@tonic-gate 9480Sstevel@tonic-gate int 9490Sstevel@tonic-gate _info(struct modinfo *modinfop) 9500Sstevel@tonic-gate { 9510Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 9520Sstevel@tonic-gate } 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate /* 9550Sstevel@tonic-gate * Driver Entry Points and Other routines 9560Sstevel@tonic-gate */ 9570Sstevel@tonic-gate static int 9580Sstevel@tonic-gate tl_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 9590Sstevel@tonic-gate { 9600Sstevel@tonic-gate int i; 9610Sstevel@tonic-gate char name[32]; 9620Sstevel@tonic-gate 9630Sstevel@tonic-gate /* 9640Sstevel@tonic-gate * Resume from a checkpoint state. 9650Sstevel@tonic-gate */ 9660Sstevel@tonic-gate if (cmd == DDI_RESUME) 9670Sstevel@tonic-gate return (DDI_SUCCESS); 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate if (cmd != DDI_ATTACH) 9700Sstevel@tonic-gate return (DDI_FAILURE); 9710Sstevel@tonic-gate 9720Sstevel@tonic-gate /* 9730Sstevel@tonic-gate * Deduce TIDU size to use. Note: "strmsgsz" being 0 has semantics that 9740Sstevel@tonic-gate * streams message sizes can be unlimited. We use a defined constant 9750Sstevel@tonic-gate * instead. 9760Sstevel@tonic-gate */ 9770Sstevel@tonic-gate tl_tidusz = strmsgsz != 0 ? (t_scalar_t)strmsgsz : TL_TIDUSZ; 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate /* 9800Sstevel@tonic-gate * Create subdevices for each transport. 9810Sstevel@tonic-gate */ 9820Sstevel@tonic-gate for (i = 0; i < TL_UNUSED; i++) { 9830Sstevel@tonic-gate if (ddi_create_minor_node(devi, 9845240Snordmark tl_transports[i].tr_name, 9855240Snordmark S_IFCHR, tl_transports[i].tr_minor, 9865240Snordmark DDI_PSEUDO, NULL) == DDI_FAILURE) { 9870Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 9880Sstevel@tonic-gate return (DDI_FAILURE); 9890Sstevel@tonic-gate } 9900Sstevel@tonic-gate } 9910Sstevel@tonic-gate 9920Sstevel@tonic-gate tl_cache = kmem_cache_create("tl_cache", sizeof (tl_endpt_t), 9930Sstevel@tonic-gate 0, tl_constructor, tl_destructor, NULL, NULL, NULL, 0); 9940Sstevel@tonic-gate 9950Sstevel@tonic-gate if (tl_cache == NULL) { 9960Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 9970Sstevel@tonic-gate return (DDI_FAILURE); 9980Sstevel@tonic-gate } 9990Sstevel@tonic-gate 10000Sstevel@tonic-gate tl_minors = id_space_create("tl_minor_space", 10010Sstevel@tonic-gate TL_MINOR_START, MAXMIN32 - TL_MINOR_START + 1); 10020Sstevel@tonic-gate 10030Sstevel@tonic-gate /* 10040Sstevel@tonic-gate * Create ID space for minor numbers 10050Sstevel@tonic-gate */ 10060Sstevel@tonic-gate for (i = 0; i < TL_MAXTRANSPORT; i++) { 10070Sstevel@tonic-gate tl_transport_state_t *t = &tl_transports[i]; 10080Sstevel@tonic-gate 10090Sstevel@tonic-gate if (i == TL_UNUSED) 10100Sstevel@tonic-gate continue; 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate /* Socket COTSORD shares namespace with COTS */ 10130Sstevel@tonic-gate if (i == TL_SOCK_COTSORD) { 10140Sstevel@tonic-gate t->tr_ai_hash = 10150Sstevel@tonic-gate tl_transports[TL_SOCK_COTS].tr_ai_hash; 10160Sstevel@tonic-gate ASSERT(t->tr_ai_hash != NULL); 10170Sstevel@tonic-gate t->tr_addr_hash = 10180Sstevel@tonic-gate tl_transports[TL_SOCK_COTS].tr_addr_hash; 10190Sstevel@tonic-gate ASSERT(t->tr_addr_hash != NULL); 10200Sstevel@tonic-gate continue; 10210Sstevel@tonic-gate } 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate /* 10240Sstevel@tonic-gate * Create hash tables. 10250Sstevel@tonic-gate */ 10260Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_ai_hash", 10270Sstevel@tonic-gate t->tr_name); 10280Sstevel@tonic-gate #ifdef _ILP32 10290Sstevel@tonic-gate if (i & TL_SOCKET) 10300Sstevel@tonic-gate t->tr_ai_hash = 10310Sstevel@tonic-gate mod_hash_create_idhash(name, tl_hash_size - 1, 10325240Snordmark mod_hash_null_valdtor); 10330Sstevel@tonic-gate else 10340Sstevel@tonic-gate t->tr_ai_hash = 10350Sstevel@tonic-gate mod_hash_create_ptrhash(name, tl_hash_size, 10365240Snordmark mod_hash_null_valdtor, sizeof (queue_t)); 10370Sstevel@tonic-gate #else 10380Sstevel@tonic-gate t->tr_ai_hash = 10390Sstevel@tonic-gate mod_hash_create_idhash(name, tl_hash_size - 1, 10405240Snordmark mod_hash_null_valdtor); 10410Sstevel@tonic-gate #endif /* _ILP32 */ 10420Sstevel@tonic-gate 10430Sstevel@tonic-gate if (i & TL_SOCKET) { 10440Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_sockaddr_hash", 10450Sstevel@tonic-gate t->tr_name); 10460Sstevel@tonic-gate t->tr_addr_hash = mod_hash_create_ptrhash(name, 10470Sstevel@tonic-gate tl_hash_size, mod_hash_null_valdtor, 10480Sstevel@tonic-gate sizeof (uintptr_t)); 10490Sstevel@tonic-gate } else { 10500Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_addr_hash", 10510Sstevel@tonic-gate t->tr_name); 10520Sstevel@tonic-gate t->tr_addr_hash = mod_hash_create_extended(name, 10530Sstevel@tonic-gate tl_hash_size, mod_hash_null_keydtor, 10540Sstevel@tonic-gate mod_hash_null_valdtor, 10550Sstevel@tonic-gate tl_hash_by_addr, NULL, tl_hash_cmp_addr, KM_SLEEP); 10560Sstevel@tonic-gate } 10570Sstevel@tonic-gate 10580Sstevel@tonic-gate /* Create serializer for connectionless transports. */ 10590Sstevel@tonic-gate if (i & TL_TICLTS) 10600Sstevel@tonic-gate t->tr_serializer = tl_serializer_alloc(KM_SLEEP); 10610Sstevel@tonic-gate } 10620Sstevel@tonic-gate 10630Sstevel@tonic-gate tl_dip = devi; 10640Sstevel@tonic-gate 10650Sstevel@tonic-gate return (DDI_SUCCESS); 10660Sstevel@tonic-gate } 10670Sstevel@tonic-gate 10680Sstevel@tonic-gate static int 10690Sstevel@tonic-gate tl_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 10700Sstevel@tonic-gate { 10710Sstevel@tonic-gate int i; 10720Sstevel@tonic-gate 10730Sstevel@tonic-gate if (cmd == DDI_SUSPEND) 10740Sstevel@tonic-gate return (DDI_SUCCESS); 10750Sstevel@tonic-gate 10760Sstevel@tonic-gate if (cmd != DDI_DETACH) 10770Sstevel@tonic-gate return (DDI_FAILURE); 10780Sstevel@tonic-gate 10790Sstevel@tonic-gate /* 10800Sstevel@tonic-gate * Destroy arenas and hash tables. 10810Sstevel@tonic-gate */ 10820Sstevel@tonic-gate for (i = 0; i < TL_MAXTRANSPORT; i++) { 10830Sstevel@tonic-gate tl_transport_state_t *t = &tl_transports[i]; 10840Sstevel@tonic-gate 10850Sstevel@tonic-gate if ((i == TL_UNUSED) || (i == TL_SOCK_COTSORD)) 10860Sstevel@tonic-gate continue; 10870Sstevel@tonic-gate 10880Sstevel@tonic-gate ASSERT(EQUIV(i & TL_TICLTS, t->tr_serializer != NULL)); 10890Sstevel@tonic-gate if (t->tr_serializer != NULL) { 10900Sstevel@tonic-gate tl_serializer_refrele(t->tr_serializer); 10910Sstevel@tonic-gate t->tr_serializer = NULL; 10920Sstevel@tonic-gate } 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate #ifdef _ILP32 10950Sstevel@tonic-gate if (i & TL_SOCKET) 10960Sstevel@tonic-gate mod_hash_destroy_idhash(t->tr_ai_hash); 10970Sstevel@tonic-gate else 10980Sstevel@tonic-gate mod_hash_destroy_ptrhash(t->tr_ai_hash); 10990Sstevel@tonic-gate #else 11000Sstevel@tonic-gate mod_hash_destroy_idhash(t->tr_ai_hash); 11010Sstevel@tonic-gate #endif /* _ILP32 */ 11020Sstevel@tonic-gate t->tr_ai_hash = NULL; 11030Sstevel@tonic-gate if (i & TL_SOCKET) 11040Sstevel@tonic-gate mod_hash_destroy_ptrhash(t->tr_addr_hash); 11050Sstevel@tonic-gate else 11060Sstevel@tonic-gate mod_hash_destroy_hash(t->tr_addr_hash); 11070Sstevel@tonic-gate t->tr_addr_hash = NULL; 11080Sstevel@tonic-gate } 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate kmem_cache_destroy(tl_cache); 11110Sstevel@tonic-gate tl_cache = NULL; 11120Sstevel@tonic-gate id_space_destroy(tl_minors); 11130Sstevel@tonic-gate tl_minors = NULL; 11140Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 11150Sstevel@tonic-gate return (DDI_SUCCESS); 11160Sstevel@tonic-gate } 11170Sstevel@tonic-gate 11180Sstevel@tonic-gate /* ARGSUSED */ 11190Sstevel@tonic-gate static int 11200Sstevel@tonic-gate tl_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 11210Sstevel@tonic-gate { 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate int retcode = DDI_FAILURE; 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate switch (infocmd) { 11260Sstevel@tonic-gate 11270Sstevel@tonic-gate case DDI_INFO_DEVT2DEVINFO: 11280Sstevel@tonic-gate if (tl_dip != NULL) { 11290Sstevel@tonic-gate *result = (void *)tl_dip; 11300Sstevel@tonic-gate retcode = DDI_SUCCESS; 11310Sstevel@tonic-gate } 11320Sstevel@tonic-gate break; 11330Sstevel@tonic-gate 11340Sstevel@tonic-gate case DDI_INFO_DEVT2INSTANCE: 11350Sstevel@tonic-gate *result = (void *)0; 11360Sstevel@tonic-gate retcode = DDI_SUCCESS; 11370Sstevel@tonic-gate break; 11380Sstevel@tonic-gate 11390Sstevel@tonic-gate default: 11400Sstevel@tonic-gate break; 11410Sstevel@tonic-gate } 11420Sstevel@tonic-gate return (retcode); 11430Sstevel@tonic-gate } 11440Sstevel@tonic-gate 11450Sstevel@tonic-gate /* 11460Sstevel@tonic-gate * Endpoint reference management. 11470Sstevel@tonic-gate */ 11480Sstevel@tonic-gate static void 11490Sstevel@tonic-gate tl_refhold(tl_endpt_t *tep) 11500Sstevel@tonic-gate { 11510Sstevel@tonic-gate atomic_add_32(&tep->te_refcnt, 1); 11520Sstevel@tonic-gate } 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate static void 11550Sstevel@tonic-gate tl_refrele(tl_endpt_t *tep) 11560Sstevel@tonic-gate { 11570Sstevel@tonic-gate ASSERT(tep->te_refcnt != 0); 11580Sstevel@tonic-gate 11590Sstevel@tonic-gate if (atomic_add_32_nv(&tep->te_refcnt, -1) == 0) 11600Sstevel@tonic-gate tl_free(tep); 11610Sstevel@tonic-gate } 11620Sstevel@tonic-gate 11630Sstevel@tonic-gate /*ARGSUSED*/ 11640Sstevel@tonic-gate static int 11650Sstevel@tonic-gate tl_constructor(void *buf, void *cdrarg, int kmflags) 11660Sstevel@tonic-gate { 11670Sstevel@tonic-gate tl_endpt_t *tep = buf; 11680Sstevel@tonic-gate 11690Sstevel@tonic-gate bzero(tep, sizeof (tl_endpt_t)); 11700Sstevel@tonic-gate mutex_init(&tep->te_closelock, NULL, MUTEX_DEFAULT, NULL); 11710Sstevel@tonic-gate cv_init(&tep->te_closecv, NULL, CV_DEFAULT, NULL); 11720Sstevel@tonic-gate mutex_init(&tep->te_srv_lock, NULL, MUTEX_DEFAULT, NULL); 11730Sstevel@tonic-gate cv_init(&tep->te_srv_cv, NULL, CV_DEFAULT, NULL); 11740Sstevel@tonic-gate mutex_init(&tep->te_ser_lock, NULL, MUTEX_DEFAULT, NULL); 11750Sstevel@tonic-gate 11760Sstevel@tonic-gate return (0); 11770Sstevel@tonic-gate } 11780Sstevel@tonic-gate 11790Sstevel@tonic-gate /*ARGSUSED*/ 11800Sstevel@tonic-gate static void 11810Sstevel@tonic-gate tl_destructor(void *buf, void *cdrarg) 11820Sstevel@tonic-gate { 11830Sstevel@tonic-gate tl_endpt_t *tep = buf; 11840Sstevel@tonic-gate 11850Sstevel@tonic-gate mutex_destroy(&tep->te_closelock); 11860Sstevel@tonic-gate cv_destroy(&tep->te_closecv); 11870Sstevel@tonic-gate mutex_destroy(&tep->te_srv_lock); 11880Sstevel@tonic-gate cv_destroy(&tep->te_srv_cv); 11890Sstevel@tonic-gate mutex_destroy(&tep->te_ser_lock); 11900Sstevel@tonic-gate } 11910Sstevel@tonic-gate 11920Sstevel@tonic-gate static void 11930Sstevel@tonic-gate tl_free(tl_endpt_t *tep) 11940Sstevel@tonic-gate { 11950Sstevel@tonic-gate ASSERT(tep->te_refcnt == 0); 11960Sstevel@tonic-gate ASSERT(tep->te_transport != NULL); 11970Sstevel@tonic-gate ASSERT(tep->te_rq == NULL); 11980Sstevel@tonic-gate ASSERT(tep->te_wq == NULL); 11990Sstevel@tonic-gate ASSERT(tep->te_ser != NULL); 12000Sstevel@tonic-gate ASSERT(tep->te_ser_count == 0); 12010Sstevel@tonic-gate ASSERT(! (tep->te_flag & TL_ADDRHASHED)); 12020Sstevel@tonic-gate 12030Sstevel@tonic-gate if (IS_SOCKET(tep)) { 12040Sstevel@tonic-gate ASSERT(tep->te_alen == TL_SOUX_ADDRLEN); 12050Sstevel@tonic-gate ASSERT(tep->te_abuf == &tep->te_uxaddr); 12060Sstevel@tonic-gate ASSERT(tep->te_vp == (void *)(uintptr_t)tep->te_minor); 12070Sstevel@tonic-gate ASSERT(tep->te_magic == SOU_MAGIC_IMPLICIT); 12080Sstevel@tonic-gate } else if (tep->te_abuf != NULL) { 12090Sstevel@tonic-gate kmem_free(tep->te_abuf, tep->te_alen); 12100Sstevel@tonic-gate tep->te_alen = -1; /* uninitialized */ 12110Sstevel@tonic-gate tep->te_abuf = NULL; 12120Sstevel@tonic-gate } else { 12130Sstevel@tonic-gate ASSERT(tep->te_alen == -1); 12140Sstevel@tonic-gate } 12150Sstevel@tonic-gate 12160Sstevel@tonic-gate id_free(tl_minors, tep->te_minor); 12170Sstevel@tonic-gate ASSERT(tep->te_credp == NULL); 12180Sstevel@tonic-gate 12190Sstevel@tonic-gate if (tep->te_hash_hndl != NULL) 12200Sstevel@tonic-gate mod_hash_cancel(tep->te_addrhash, &tep->te_hash_hndl); 12210Sstevel@tonic-gate 12220Sstevel@tonic-gate if (IS_COTS(tep)) { 12230Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_conp); 12240Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_oconp); 12250Sstevel@tonic-gate tl_serializer_refrele(tep->te_ser); 12260Sstevel@tonic-gate tep->te_ser = NULL; 12270Sstevel@tonic-gate ASSERT(tep->te_nicon == 0); 12280Sstevel@tonic-gate ASSERT(list_head(&tep->te_iconp) == NULL); 12290Sstevel@tonic-gate } else { 12300Sstevel@tonic-gate ASSERT(tep->te_lastep == NULL); 12310Sstevel@tonic-gate ASSERT(list_head(&tep->te_flowlist) == NULL); 12320Sstevel@tonic-gate ASSERT(tep->te_flowq == NULL); 12330Sstevel@tonic-gate } 12340Sstevel@tonic-gate 12350Sstevel@tonic-gate ASSERT(tep->te_bufcid == 0); 12360Sstevel@tonic-gate ASSERT(tep->te_timoutid == 0); 12370Sstevel@tonic-gate bzero(&tep->te_ap, sizeof (tep->te_ap)); 12380Sstevel@tonic-gate tep->te_acceptor_id = 0; 12390Sstevel@tonic-gate 12400Sstevel@tonic-gate ASSERT(tep->te_closewait == 0); 12410Sstevel@tonic-gate ASSERT(!tep->te_rsrv_active); 12420Sstevel@tonic-gate ASSERT(!tep->te_wsrv_active); 12430Sstevel@tonic-gate tep->te_closing = 0; 12440Sstevel@tonic-gate tep->te_nowsrv = B_FALSE; 12450Sstevel@tonic-gate tep->te_flag = 0; 12460Sstevel@tonic-gate 12470Sstevel@tonic-gate kmem_cache_free(tl_cache, tep); 12480Sstevel@tonic-gate } 12490Sstevel@tonic-gate 12500Sstevel@tonic-gate /* 12510Sstevel@tonic-gate * Allocate/free reference-counted wrappers for serializers. 12520Sstevel@tonic-gate */ 12530Sstevel@tonic-gate static tl_serializer_t * 12540Sstevel@tonic-gate tl_serializer_alloc(int flags) 12550Sstevel@tonic-gate { 12560Sstevel@tonic-gate tl_serializer_t *s = kmem_alloc(sizeof (tl_serializer_t), flags); 12570Sstevel@tonic-gate serializer_t *ser; 12580Sstevel@tonic-gate 12590Sstevel@tonic-gate if (s == NULL) 12600Sstevel@tonic-gate return (NULL); 12610Sstevel@tonic-gate 12620Sstevel@tonic-gate ser = serializer_create(flags); 12630Sstevel@tonic-gate 12640Sstevel@tonic-gate if (ser == NULL) { 12650Sstevel@tonic-gate kmem_free(s, sizeof (tl_serializer_t)); 12660Sstevel@tonic-gate return (NULL); 12670Sstevel@tonic-gate } 12680Sstevel@tonic-gate 12690Sstevel@tonic-gate s->ts_refcnt = 1; 12700Sstevel@tonic-gate s->ts_serializer = ser; 12710Sstevel@tonic-gate return (s); 12720Sstevel@tonic-gate } 12730Sstevel@tonic-gate 12740Sstevel@tonic-gate static void 12750Sstevel@tonic-gate tl_serializer_refhold(tl_serializer_t *s) 12760Sstevel@tonic-gate { 12770Sstevel@tonic-gate atomic_add_32(&s->ts_refcnt, 1); 12780Sstevel@tonic-gate } 12790Sstevel@tonic-gate 12800Sstevel@tonic-gate static void 12810Sstevel@tonic-gate tl_serializer_refrele(tl_serializer_t *s) 12820Sstevel@tonic-gate { 12830Sstevel@tonic-gate if (atomic_add_32_nv(&s->ts_refcnt, -1) == 0) { 12840Sstevel@tonic-gate serializer_destroy(s->ts_serializer); 12850Sstevel@tonic-gate kmem_free(s, sizeof (tl_serializer_t)); 12860Sstevel@tonic-gate } 12870Sstevel@tonic-gate } 12880Sstevel@tonic-gate 12890Sstevel@tonic-gate /* 12900Sstevel@tonic-gate * Post a request on the endpoint serializer. For COTS transports keep track of 12910Sstevel@tonic-gate * the number of pending requests. 12920Sstevel@tonic-gate */ 12930Sstevel@tonic-gate static void 12940Sstevel@tonic-gate tl_serializer_enter(tl_endpt_t *tep, tlproc_t tlproc, mblk_t *mp) 12950Sstevel@tonic-gate { 12960Sstevel@tonic-gate if (IS_COTS(tep)) { 12970Sstevel@tonic-gate mutex_enter(&tep->te_ser_lock); 12980Sstevel@tonic-gate tep->te_ser_count++; 12990Sstevel@tonic-gate mutex_exit(&tep->te_ser_lock); 13000Sstevel@tonic-gate } 13010Sstevel@tonic-gate serializer_enter(tep->te_serializer, (srproc_t *)tlproc, mp, tep); 13020Sstevel@tonic-gate } 13030Sstevel@tonic-gate 13040Sstevel@tonic-gate /* 13050Sstevel@tonic-gate * Complete processing the request on the serializer. Decrement the counter for 13060Sstevel@tonic-gate * pending requests for COTS transports. 13070Sstevel@tonic-gate */ 13080Sstevel@tonic-gate static void 13090Sstevel@tonic-gate tl_serializer_exit(tl_endpt_t *tep) 13100Sstevel@tonic-gate { 13110Sstevel@tonic-gate if (IS_COTS(tep)) { 13120Sstevel@tonic-gate mutex_enter(&tep->te_ser_lock); 13130Sstevel@tonic-gate ASSERT(tep->te_ser_count != 0); 13140Sstevel@tonic-gate tep->te_ser_count--; 13150Sstevel@tonic-gate mutex_exit(&tep->te_ser_lock); 13160Sstevel@tonic-gate } 13170Sstevel@tonic-gate } 13180Sstevel@tonic-gate 13190Sstevel@tonic-gate /* 13200Sstevel@tonic-gate * Hash management functions. 13210Sstevel@tonic-gate */ 13220Sstevel@tonic-gate 13230Sstevel@tonic-gate /* 13240Sstevel@tonic-gate * Return TRUE if two addresses are equal, false otherwise. 13250Sstevel@tonic-gate */ 13260Sstevel@tonic-gate static boolean_t 13270Sstevel@tonic-gate tl_eqaddr(tl_addr_t *ap1, tl_addr_t *ap2) 13280Sstevel@tonic-gate { 13290Sstevel@tonic-gate return ((ap1->ta_alen > 0) && 13300Sstevel@tonic-gate (ap1->ta_alen == ap2->ta_alen) && 13310Sstevel@tonic-gate (ap1->ta_zoneid == ap2->ta_zoneid) && 13320Sstevel@tonic-gate (bcmp(ap1->ta_abuf, ap2->ta_abuf, ap1->ta_alen) == 0)); 13330Sstevel@tonic-gate } 13340Sstevel@tonic-gate 13350Sstevel@tonic-gate /* 13360Sstevel@tonic-gate * This function is called whenever an endpoint is found in the hash table. 13370Sstevel@tonic-gate */ 13380Sstevel@tonic-gate /* ARGSUSED0 */ 13390Sstevel@tonic-gate static void 13400Sstevel@tonic-gate tl_find_callback(mod_hash_key_t key, mod_hash_val_t val) 13410Sstevel@tonic-gate { 13420Sstevel@tonic-gate tl_refhold((tl_endpt_t *)val); 13430Sstevel@tonic-gate } 13440Sstevel@tonic-gate 13450Sstevel@tonic-gate /* 13460Sstevel@tonic-gate * Address hash function. 13470Sstevel@tonic-gate */ 13480Sstevel@tonic-gate /* ARGSUSED */ 13490Sstevel@tonic-gate static uint_t 13500Sstevel@tonic-gate tl_hash_by_addr(void *hash_data, mod_hash_key_t key) 13510Sstevel@tonic-gate { 13520Sstevel@tonic-gate tl_addr_t *ap = (tl_addr_t *)key; 13530Sstevel@tonic-gate size_t len = ap->ta_alen; 13540Sstevel@tonic-gate uchar_t *p = ap->ta_abuf; 13550Sstevel@tonic-gate uint_t i, g; 13560Sstevel@tonic-gate 13570Sstevel@tonic-gate ASSERT((len > 0) && (p != NULL)); 13580Sstevel@tonic-gate 13590Sstevel@tonic-gate for (i = ap->ta_zoneid; len -- != 0; p++) { 13600Sstevel@tonic-gate i = (i << 4) + (*p); 13610Sstevel@tonic-gate if ((g = (i & 0xf0000000U)) != 0) { 13620Sstevel@tonic-gate i ^= (g >> 24); 13630Sstevel@tonic-gate i ^= g; 13640Sstevel@tonic-gate } 13650Sstevel@tonic-gate } 13660Sstevel@tonic-gate return (i); 13670Sstevel@tonic-gate } 13680Sstevel@tonic-gate 13690Sstevel@tonic-gate /* 13700Sstevel@tonic-gate * This function is used by hash lookups. It compares two generic addresses. 13710Sstevel@tonic-gate */ 13720Sstevel@tonic-gate static int 13730Sstevel@tonic-gate tl_hash_cmp_addr(mod_hash_key_t key1, mod_hash_key_t key2) 13740Sstevel@tonic-gate { 13750Sstevel@tonic-gate #ifdef DEBUG 13760Sstevel@tonic-gate tl_addr_t *ap1 = (tl_addr_t *)key1; 13770Sstevel@tonic-gate tl_addr_t *ap2 = (tl_addr_t *)key2; 13780Sstevel@tonic-gate 13790Sstevel@tonic-gate ASSERT(key1 != NULL); 13800Sstevel@tonic-gate ASSERT(key2 != NULL); 13810Sstevel@tonic-gate 13820Sstevel@tonic-gate ASSERT(ap1->ta_abuf != NULL); 13830Sstevel@tonic-gate ASSERT(ap2->ta_abuf != NULL); 13840Sstevel@tonic-gate ASSERT(ap1->ta_alen > 0); 13850Sstevel@tonic-gate ASSERT(ap2->ta_alen > 0); 13860Sstevel@tonic-gate #endif 13870Sstevel@tonic-gate 13880Sstevel@tonic-gate return (! tl_eqaddr((tl_addr_t *)key1, (tl_addr_t *)key2)); 13890Sstevel@tonic-gate } 13900Sstevel@tonic-gate 13910Sstevel@tonic-gate /* 13920Sstevel@tonic-gate * Prevent endpoint from closing if possible. 13930Sstevel@tonic-gate * Return B_TRUE on success, B_FALSE on failure. 13940Sstevel@tonic-gate */ 13950Sstevel@tonic-gate static boolean_t 13960Sstevel@tonic-gate tl_noclose(tl_endpt_t *tep) 13970Sstevel@tonic-gate { 13980Sstevel@tonic-gate boolean_t rc = B_FALSE; 13990Sstevel@tonic-gate 14000Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 14010Sstevel@tonic-gate if (! tep->te_closing) { 14020Sstevel@tonic-gate ASSERT(tep->te_closewait == 0); 14030Sstevel@tonic-gate tep->te_closewait++; 14040Sstevel@tonic-gate rc = B_TRUE; 14050Sstevel@tonic-gate } 14060Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 14070Sstevel@tonic-gate return (rc); 14080Sstevel@tonic-gate } 14090Sstevel@tonic-gate 14100Sstevel@tonic-gate /* 14110Sstevel@tonic-gate * Allow endpoint to close if needed. 14120Sstevel@tonic-gate */ 14130Sstevel@tonic-gate static void 14140Sstevel@tonic-gate tl_closeok(tl_endpt_t *tep) 14150Sstevel@tonic-gate { 14160Sstevel@tonic-gate ASSERT(tep->te_closewait > 0); 14170Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 14180Sstevel@tonic-gate ASSERT(tep->te_closewait == 1); 14190Sstevel@tonic-gate tep->te_closewait--; 14200Sstevel@tonic-gate cv_signal(&tep->te_closecv); 14210Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 14220Sstevel@tonic-gate } 14230Sstevel@tonic-gate 14240Sstevel@tonic-gate /* 14250Sstevel@tonic-gate * STREAMS open entry point. 14260Sstevel@tonic-gate */ 14270Sstevel@tonic-gate /* ARGSUSED */ 14280Sstevel@tonic-gate static int 14290Sstevel@tonic-gate tl_open(queue_t *rq, dev_t *devp, int oflag, int sflag, cred_t *credp) 14300Sstevel@tonic-gate { 14310Sstevel@tonic-gate tl_endpt_t *tep; 14320Sstevel@tonic-gate minor_t minor = getminor(*devp); 14330Sstevel@tonic-gate 14340Sstevel@tonic-gate /* 14350Sstevel@tonic-gate * Driver is called directly. Both CLONEOPEN and MODOPEN 14360Sstevel@tonic-gate * are illegal 14370Sstevel@tonic-gate */ 14380Sstevel@tonic-gate if ((sflag == CLONEOPEN) || (sflag == MODOPEN)) 14390Sstevel@tonic-gate return (ENXIO); 14400Sstevel@tonic-gate 14410Sstevel@tonic-gate if (rq->q_ptr != NULL) 14420Sstevel@tonic-gate return (0); 14430Sstevel@tonic-gate 14440Sstevel@tonic-gate /* Minor number should specify the mode used for the driver. */ 14450Sstevel@tonic-gate if ((minor >= TL_UNUSED)) 14460Sstevel@tonic-gate return (ENXIO); 14470Sstevel@tonic-gate 14480Sstevel@tonic-gate if (oflag & SO_SOCKSTR) { 14490Sstevel@tonic-gate minor |= TL_SOCKET; 14500Sstevel@tonic-gate } 14510Sstevel@tonic-gate 14520Sstevel@tonic-gate tep = kmem_cache_alloc(tl_cache, KM_SLEEP); 14530Sstevel@tonic-gate tep->te_refcnt = 1; 14540Sstevel@tonic-gate tep->te_cpid = curproc->p_pid; 14550Sstevel@tonic-gate rq->q_ptr = WR(rq)->q_ptr = tep; 14560Sstevel@tonic-gate tep->te_state = TS_UNBND; 14570Sstevel@tonic-gate tep->te_credp = credp; 14580Sstevel@tonic-gate crhold(credp); 14590Sstevel@tonic-gate tep->te_zoneid = getzoneid(); 14600Sstevel@tonic-gate 14610Sstevel@tonic-gate tep->te_flag = minor & TL_MINOR_MASK; 14620Sstevel@tonic-gate tep->te_transport = &tl_transports[minor]; 14630Sstevel@tonic-gate 14640Sstevel@tonic-gate /* Allocate a unique minor number for this instance. */ 14650Sstevel@tonic-gate tep->te_minor = (minor_t)id_alloc(tl_minors); 14660Sstevel@tonic-gate 14670Sstevel@tonic-gate /* Reserve hash handle for bind(). */ 14680Sstevel@tonic-gate (void) mod_hash_reserve(tep->te_addrhash, &tep->te_hash_hndl); 14690Sstevel@tonic-gate 14700Sstevel@tonic-gate /* Transport-specific initialization */ 14710Sstevel@tonic-gate if (IS_COTS(tep)) { 14720Sstevel@tonic-gate /* Use private serializer */ 14730Sstevel@tonic-gate tep->te_ser = tl_serializer_alloc(KM_SLEEP); 14740Sstevel@tonic-gate 14750Sstevel@tonic-gate /* Create list for pending connections */ 14760Sstevel@tonic-gate list_create(&tep->te_iconp, sizeof (tl_icon_t), 14770Sstevel@tonic-gate offsetof(tl_icon_t, ti_node)); 14780Sstevel@tonic-gate tep->te_qlen = 0; 14790Sstevel@tonic-gate tep->te_nicon = 0; 14800Sstevel@tonic-gate tep->te_oconp = NULL; 14810Sstevel@tonic-gate tep->te_conp = NULL; 14820Sstevel@tonic-gate } else { 14830Sstevel@tonic-gate /* Use shared serializer */ 14840Sstevel@tonic-gate tep->te_ser = tep->te_transport->tr_serializer; 14850Sstevel@tonic-gate bzero(&tep->te_flows, sizeof (list_node_t)); 14860Sstevel@tonic-gate /* Create list for flow control */ 14870Sstevel@tonic-gate list_create(&tep->te_flowlist, sizeof (tl_endpt_t), 14880Sstevel@tonic-gate offsetof(tl_endpt_t, te_flows)); 14890Sstevel@tonic-gate tep->te_flowq = NULL; 14900Sstevel@tonic-gate tep->te_lastep = NULL; 14910Sstevel@tonic-gate 14920Sstevel@tonic-gate } 14930Sstevel@tonic-gate 14940Sstevel@tonic-gate /* Initialize endpoint address */ 14950Sstevel@tonic-gate if (IS_SOCKET(tep)) { 14960Sstevel@tonic-gate /* Socket-specific address handling. */ 14970Sstevel@tonic-gate tep->te_alen = TL_SOUX_ADDRLEN; 14980Sstevel@tonic-gate tep->te_abuf = &tep->te_uxaddr; 14990Sstevel@tonic-gate tep->te_vp = (void *)(uintptr_t)tep->te_minor; 15000Sstevel@tonic-gate tep->te_magic = SOU_MAGIC_IMPLICIT; 15010Sstevel@tonic-gate } else { 15020Sstevel@tonic-gate tep->te_alen = -1; 15030Sstevel@tonic-gate tep->te_abuf = NULL; 15040Sstevel@tonic-gate } 15050Sstevel@tonic-gate 15060Sstevel@tonic-gate /* clone the driver */ 15070Sstevel@tonic-gate *devp = makedevice(getmajor(*devp), tep->te_minor); 15080Sstevel@tonic-gate 15090Sstevel@tonic-gate tep->te_rq = rq; 15100Sstevel@tonic-gate tep->te_wq = WR(rq); 15110Sstevel@tonic-gate 15120Sstevel@tonic-gate #ifdef _ILP32 15130Sstevel@tonic-gate if (IS_SOCKET(tep)) 15140Sstevel@tonic-gate tep->te_acceptor_id = tep->te_minor; 15150Sstevel@tonic-gate else 15160Sstevel@tonic-gate tep->te_acceptor_id = (t_uscalar_t)rq; 15170Sstevel@tonic-gate #else 15180Sstevel@tonic-gate tep->te_acceptor_id = tep->te_minor; 15190Sstevel@tonic-gate #endif /* _ILP32 */ 15200Sstevel@tonic-gate 15210Sstevel@tonic-gate 15220Sstevel@tonic-gate qprocson(rq); 15230Sstevel@tonic-gate 15240Sstevel@tonic-gate /* 15250Sstevel@tonic-gate * Insert acceptor ID in the hash. The AI hash always sleeps on 15260Sstevel@tonic-gate * insertion so insertion can't fail. 15270Sstevel@tonic-gate */ 15280Sstevel@tonic-gate (void) mod_hash_insert(tep->te_transport->tr_ai_hash, 15290Sstevel@tonic-gate (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id, 15300Sstevel@tonic-gate (mod_hash_val_t)tep); 15310Sstevel@tonic-gate 15320Sstevel@tonic-gate return (0); 15330Sstevel@tonic-gate } 15340Sstevel@tonic-gate 15350Sstevel@tonic-gate /* ARGSUSED1 */ 15360Sstevel@tonic-gate static int 15370Sstevel@tonic-gate tl_close(queue_t *rq, int flag, cred_t *credp) 15380Sstevel@tonic-gate { 15390Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr; 15400Sstevel@tonic-gate tl_endpt_t *elp = NULL; 15410Sstevel@tonic-gate queue_t *wq = tep->te_wq; 15420Sstevel@tonic-gate int rc; 15430Sstevel@tonic-gate 15440Sstevel@tonic-gate ASSERT(wq == WR(rq)); 15450Sstevel@tonic-gate 15460Sstevel@tonic-gate /* 15470Sstevel@tonic-gate * Remove the endpoint from acceptor hash. 15480Sstevel@tonic-gate */ 15490Sstevel@tonic-gate rc = mod_hash_remove(tep->te_transport->tr_ai_hash, 15500Sstevel@tonic-gate (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id, 15510Sstevel@tonic-gate (mod_hash_val_t *)&elp); 15520Sstevel@tonic-gate ASSERT(rc == 0 && tep == elp); 15530Sstevel@tonic-gate if ((rc != 0) || (tep != elp)) { 15540Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 15555240Snordmark SL_TRACE|SL_ERROR, 15565240Snordmark "tl_close:inconsistency in AI hash")); 15570Sstevel@tonic-gate } 15580Sstevel@tonic-gate 15590Sstevel@tonic-gate /* 15600Sstevel@tonic-gate * Wait till close is safe, then mark endpoint as closing. 15610Sstevel@tonic-gate */ 15620Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 15630Sstevel@tonic-gate while (tep->te_closewait) 15640Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock); 15650Sstevel@tonic-gate tep->te_closing = B_TRUE; 15660Sstevel@tonic-gate /* 15670Sstevel@tonic-gate * Will wait for the serializer part of the close to finish, so set 15680Sstevel@tonic-gate * te_closewait now. 15690Sstevel@tonic-gate */ 15700Sstevel@tonic-gate tep->te_closewait = 1; 15710Sstevel@tonic-gate tep->te_nowsrv = B_FALSE; 15720Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 15730Sstevel@tonic-gate 15740Sstevel@tonic-gate /* 15750Sstevel@tonic-gate * tl_close_ser doesn't drop reference, so no need to tl_refhold. 15760Sstevel@tonic-gate * It is safe because close will wait for tl_close_ser to finish. 15770Sstevel@tonic-gate */ 15780Sstevel@tonic-gate tl_serializer_enter(tep, tl_close_ser, &tep->te_closemp); 15790Sstevel@tonic-gate 15800Sstevel@tonic-gate /* 15810Sstevel@tonic-gate * Wait for the first phase of close to complete before qprocsoff(). 15820Sstevel@tonic-gate */ 15830Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 15840Sstevel@tonic-gate while (tep->te_closewait) 15850Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock); 15860Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 15870Sstevel@tonic-gate 15880Sstevel@tonic-gate qprocsoff(rq); 15890Sstevel@tonic-gate 15900Sstevel@tonic-gate if (tep->te_bufcid) { 15910Sstevel@tonic-gate qunbufcall(rq, tep->te_bufcid); 15920Sstevel@tonic-gate tep->te_bufcid = 0; 15930Sstevel@tonic-gate } 15940Sstevel@tonic-gate if (tep->te_timoutid) { 15950Sstevel@tonic-gate (void) quntimeout(rq, tep->te_timoutid); 15960Sstevel@tonic-gate tep->te_timoutid = 0; 15970Sstevel@tonic-gate } 15980Sstevel@tonic-gate 15990Sstevel@tonic-gate /* 16000Sstevel@tonic-gate * Finish close behind serializer. 16010Sstevel@tonic-gate * 16020Sstevel@tonic-gate * For a CLTS endpoint increase a refcount and continue close processing 16030Sstevel@tonic-gate * with serializer protection. This processing may happen asynchronously 16040Sstevel@tonic-gate * with the completion of tl_close(). 16050Sstevel@tonic-gate * 16060Sstevel@tonic-gate * Fot a COTS endpoint wait before destroying tep since the serializer 16070Sstevel@tonic-gate * may go away together with tep and we need to destroy serializer 16080Sstevel@tonic-gate * outside of serializer context. 16090Sstevel@tonic-gate */ 16100Sstevel@tonic-gate ASSERT(tep->te_closewait == 0); 16110Sstevel@tonic-gate if (IS_COTS(tep)) 16120Sstevel@tonic-gate tep->te_closewait = 1; 16130Sstevel@tonic-gate else 16140Sstevel@tonic-gate tl_refhold(tep); 16150Sstevel@tonic-gate 16160Sstevel@tonic-gate tl_serializer_enter(tep, tl_close_finish_ser, &tep->te_closemp); 16170Sstevel@tonic-gate 16180Sstevel@tonic-gate /* 16190Sstevel@tonic-gate * For connection-oriented transports wait for all serializer activity 16200Sstevel@tonic-gate * to settle down. 16210Sstevel@tonic-gate */ 16220Sstevel@tonic-gate if (IS_COTS(tep)) { 16230Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 16240Sstevel@tonic-gate while (tep->te_closewait) 16250Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock); 16260Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 16270Sstevel@tonic-gate } 16280Sstevel@tonic-gate 16290Sstevel@tonic-gate crfree(tep->te_credp); 16300Sstevel@tonic-gate tep->te_credp = NULL; 16310Sstevel@tonic-gate tep->te_wq = NULL; 16320Sstevel@tonic-gate tl_refrele(tep); 16330Sstevel@tonic-gate /* 16340Sstevel@tonic-gate * tep is likely to be destroyed now, so can't reference it any more. 16350Sstevel@tonic-gate */ 16360Sstevel@tonic-gate 16370Sstevel@tonic-gate rq->q_ptr = wq->q_ptr = NULL; 16380Sstevel@tonic-gate return (0); 16390Sstevel@tonic-gate } 16400Sstevel@tonic-gate 16410Sstevel@tonic-gate /* 16420Sstevel@tonic-gate * First phase of close processing done behind the serializer. 16430Sstevel@tonic-gate * 16440Sstevel@tonic-gate * Do not drop the reference in the end - tl_close() wants this reference to 16450Sstevel@tonic-gate * stay. 16460Sstevel@tonic-gate */ 16470Sstevel@tonic-gate /* ARGSUSED0 */ 16480Sstevel@tonic-gate static void 16490Sstevel@tonic-gate tl_close_ser(mblk_t *mp, tl_endpt_t *tep) 16500Sstevel@tonic-gate { 16510Sstevel@tonic-gate ASSERT(tep->te_closing); 16520Sstevel@tonic-gate ASSERT(tep->te_closewait == 1); 16530Sstevel@tonic-gate ASSERT(!(tep->te_flag & TL_CLOSE_SER)); 16540Sstevel@tonic-gate 16550Sstevel@tonic-gate tep->te_flag |= TL_CLOSE_SER; 16560Sstevel@tonic-gate 16570Sstevel@tonic-gate /* 16580Sstevel@tonic-gate * Drain out all messages on queue except for TL_TICOTS where the 16590Sstevel@tonic-gate * abortive release semantics permit discarding of data on close 16600Sstevel@tonic-gate */ 16610Sstevel@tonic-gate if (tep->te_wq->q_first && (IS_CLTS(tep) || IS_COTSORD(tep))) { 16620Sstevel@tonic-gate tl_wsrv_ser(NULL, tep); 16630Sstevel@tonic-gate } 16640Sstevel@tonic-gate 16650Sstevel@tonic-gate /* Remove address from hash table. */ 16660Sstevel@tonic-gate tl_addr_unbind(tep); 16670Sstevel@tonic-gate /* 16680Sstevel@tonic-gate * qprocsoff() gets confused when q->q_next is not NULL on the write 16690Sstevel@tonic-gate * queue of the driver, so clear these before qprocsoff() is called. 16700Sstevel@tonic-gate * Also clear q_next for the peer since this queue is going away. 16710Sstevel@tonic-gate */ 16720Sstevel@tonic-gate if (IS_COTS(tep) && !IS_SOCKET(tep)) { 16730Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp; 16740Sstevel@tonic-gate 16750Sstevel@tonic-gate tep->te_wq->q_next = NULL; 16760Sstevel@tonic-gate if ((peer_tep != NULL) && !peer_tep->te_closing) 16770Sstevel@tonic-gate peer_tep->te_wq->q_next = NULL; 16780Sstevel@tonic-gate } 16790Sstevel@tonic-gate 16800Sstevel@tonic-gate tep->te_rq = NULL; 16810Sstevel@tonic-gate 16820Sstevel@tonic-gate /* wake up tl_close() */ 16830Sstevel@tonic-gate tl_closeok(tep); 16840Sstevel@tonic-gate tl_serializer_exit(tep); 16850Sstevel@tonic-gate } 16860Sstevel@tonic-gate 16870Sstevel@tonic-gate /* 16880Sstevel@tonic-gate * Second phase of tl_close(). Should wakeup tl_close() for COTS mode and drop 16890Sstevel@tonic-gate * the reference for CLTS. 16900Sstevel@tonic-gate * 16910Sstevel@tonic-gate * Called from serializer. Should drop reference count for CLTS only. 16920Sstevel@tonic-gate */ 16930Sstevel@tonic-gate /* ARGSUSED0 */ 16940Sstevel@tonic-gate static void 16950Sstevel@tonic-gate tl_close_finish_ser(mblk_t *mp, tl_endpt_t *tep) 16960Sstevel@tonic-gate { 16970Sstevel@tonic-gate ASSERT(tep->te_closing); 16980Sstevel@tonic-gate ASSERT(IMPLY(IS_CLTS(tep), tep->te_closewait == 0)); 16990Sstevel@tonic-gate ASSERT(IMPLY(IS_COTS(tep), tep->te_closewait == 1)); 17000Sstevel@tonic-gate 17010Sstevel@tonic-gate tep->te_state = -1; /* Uninitialized */ 17020Sstevel@tonic-gate if (IS_COTS(tep)) { 17030Sstevel@tonic-gate tl_co_unconnect(tep); 17040Sstevel@tonic-gate } else { 17050Sstevel@tonic-gate /* Connectionless specific cleanup */ 17060Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_lastep); 17070Sstevel@tonic-gate /* 17080Sstevel@tonic-gate * Backenable anybody that is flow controlled waiting for 17090Sstevel@tonic-gate * this endpoint. 17100Sstevel@tonic-gate */ 17110Sstevel@tonic-gate tl_cl_backenable(tep); 17120Sstevel@tonic-gate if (tep->te_flowq != NULL) { 17130Sstevel@tonic-gate list_remove(&(tep->te_flowq->te_flowlist), tep); 17140Sstevel@tonic-gate tep->te_flowq = NULL; 17150Sstevel@tonic-gate } 17160Sstevel@tonic-gate } 17170Sstevel@tonic-gate 17180Sstevel@tonic-gate tl_serializer_exit(tep); 17190Sstevel@tonic-gate if (IS_COTS(tep)) 17200Sstevel@tonic-gate tl_closeok(tep); 17210Sstevel@tonic-gate else 17220Sstevel@tonic-gate tl_refrele(tep); 17230Sstevel@tonic-gate } 17240Sstevel@tonic-gate 17250Sstevel@tonic-gate /* 17260Sstevel@tonic-gate * STREAMS write-side put procedure. 17270Sstevel@tonic-gate * Enter serializer for most of the processing. 17280Sstevel@tonic-gate * 17290Sstevel@tonic-gate * The T_CONN_REQ is processed outside of serializer. 17300Sstevel@tonic-gate */ 17310Sstevel@tonic-gate static void 17320Sstevel@tonic-gate tl_wput(queue_t *wq, mblk_t *mp) 17330Sstevel@tonic-gate { 17340Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 17350Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 17360Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 17370Sstevel@tonic-gate tlproc_t *tl_proc = NULL; 17380Sstevel@tonic-gate 17390Sstevel@tonic-gate switch (DB_TYPE(mp)) { 17400Sstevel@tonic-gate case M_DATA: 17410Sstevel@tonic-gate /* Only valid for connection-oriented transports */ 17420Sstevel@tonic-gate if (IS_CLTS(tep)) { 17430Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 17445240Snordmark SL_TRACE|SL_ERROR, 17455240Snordmark "tl_wput:M_DATA invalid for ticlts driver")); 17460Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 1747165Sxy158873 return; 17480Sstevel@tonic-gate } 17490Sstevel@tonic-gate tl_proc = tl_wput_data_ser; 17500Sstevel@tonic-gate break; 17510Sstevel@tonic-gate 17520Sstevel@tonic-gate case M_IOCTL: 17530Sstevel@tonic-gate switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 17540Sstevel@tonic-gate case TL_IOC_CREDOPT: 17550Sstevel@tonic-gate /* FALLTHROUGH */ 17560Sstevel@tonic-gate case TL_IOC_UCREDOPT: 17570Sstevel@tonic-gate /* 17580Sstevel@tonic-gate * Serialize endpoint state change. 17590Sstevel@tonic-gate */ 17600Sstevel@tonic-gate tl_proc = tl_do_ioctl_ser; 17610Sstevel@tonic-gate break; 17620Sstevel@tonic-gate 17630Sstevel@tonic-gate default: 17640Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 17650Sstevel@tonic-gate return; 17660Sstevel@tonic-gate } 17670Sstevel@tonic-gate break; 17680Sstevel@tonic-gate 17690Sstevel@tonic-gate case M_FLUSH: 17700Sstevel@tonic-gate /* 17710Sstevel@tonic-gate * do canonical M_FLUSH processing 17720Sstevel@tonic-gate */ 17730Sstevel@tonic-gate if (*mp->b_rptr & FLUSHW) { 17740Sstevel@tonic-gate flushq(wq, FLUSHALL); 17750Sstevel@tonic-gate *mp->b_rptr &= ~FLUSHW; 17760Sstevel@tonic-gate } 17770Sstevel@tonic-gate if (*mp->b_rptr & FLUSHR) { 17780Sstevel@tonic-gate flushq(RD(wq), FLUSHALL); 17790Sstevel@tonic-gate qreply(wq, mp); 17800Sstevel@tonic-gate } else { 17810Sstevel@tonic-gate freemsg(mp); 17820Sstevel@tonic-gate } 17830Sstevel@tonic-gate return; 17840Sstevel@tonic-gate 17850Sstevel@tonic-gate case M_PROTO: 17860Sstevel@tonic-gate if (msz < sizeof (prim->type)) { 17870Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 17885240Snordmark SL_TRACE|SL_ERROR, 17895240Snordmark "tl_wput:M_PROTO data too short")); 17900Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 17910Sstevel@tonic-gate return; 17920Sstevel@tonic-gate } 17930Sstevel@tonic-gate switch (prim->type) { 17940Sstevel@tonic-gate case T_OPTMGMT_REQ: 17950Sstevel@tonic-gate case T_SVR4_OPTMGMT_REQ: 17960Sstevel@tonic-gate /* 17970Sstevel@tonic-gate * Process TPI option management requests immediately 17980Sstevel@tonic-gate * in put procedure regardless of in-order processing 17990Sstevel@tonic-gate * of already queued messages. 18000Sstevel@tonic-gate * (Note: This driver supports AF_UNIX socket 18010Sstevel@tonic-gate * implementation. Unless we implement this processing, 18020Sstevel@tonic-gate * setsockopt() on socket endpoint will block on flow 18030Sstevel@tonic-gate * controlled endpoints which it should not. That is 18040Sstevel@tonic-gate * required for successful execution of VSU socket tests 18050Sstevel@tonic-gate * and is consistent with BSD socket behavior). 18060Sstevel@tonic-gate */ 18070Sstevel@tonic-gate tl_optmgmt(wq, mp); 18080Sstevel@tonic-gate return; 18090Sstevel@tonic-gate case O_T_BIND_REQ: 18100Sstevel@tonic-gate case T_BIND_REQ: 18110Sstevel@tonic-gate tl_proc = tl_bind_ser; 18120Sstevel@tonic-gate break; 18130Sstevel@tonic-gate case T_CONN_REQ: 18140Sstevel@tonic-gate if (IS_CLTS(tep)) { 18150Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 18160Sstevel@tonic-gate return; 18170Sstevel@tonic-gate } 18180Sstevel@tonic-gate tl_conn_req(wq, mp); 18190Sstevel@tonic-gate return; 18200Sstevel@tonic-gate case T_DATA_REQ: 18210Sstevel@tonic-gate case T_OPTDATA_REQ: 18220Sstevel@tonic-gate case T_EXDATA_REQ: 18230Sstevel@tonic-gate case T_ORDREL_REQ: 18240Sstevel@tonic-gate tl_proc = tl_putq_ser; 18250Sstevel@tonic-gate break; 18260Sstevel@tonic-gate case T_UNITDATA_REQ: 18270Sstevel@tonic-gate if (IS_COTS(tep) || 18280Sstevel@tonic-gate (msz < sizeof (struct T_unitdata_req))) { 18290Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 18300Sstevel@tonic-gate return; 18310Sstevel@tonic-gate } 18320Sstevel@tonic-gate if ((tep->te_state == TS_IDLE) && !wq->q_first) { 18330Sstevel@tonic-gate tl_proc = tl_unitdata_ser; 18340Sstevel@tonic-gate } else { 18350Sstevel@tonic-gate tl_proc = tl_putq_ser; 18360Sstevel@tonic-gate } 18370Sstevel@tonic-gate break; 18380Sstevel@tonic-gate default: 18390Sstevel@tonic-gate /* 18400Sstevel@tonic-gate * process in service procedure if message already 18410Sstevel@tonic-gate * queued (maintain in-order processing) 18420Sstevel@tonic-gate */ 18430Sstevel@tonic-gate if (wq->q_first != NULL) { 18440Sstevel@tonic-gate tl_proc = tl_putq_ser; 18450Sstevel@tonic-gate } else { 18460Sstevel@tonic-gate tl_proc = tl_wput_ser; 18470Sstevel@tonic-gate } 18480Sstevel@tonic-gate break; 18490Sstevel@tonic-gate } 18500Sstevel@tonic-gate break; 18510Sstevel@tonic-gate 18520Sstevel@tonic-gate case M_PCPROTO: 18530Sstevel@tonic-gate /* 18540Sstevel@tonic-gate * Check that the message has enough data to figure out TPI 18550Sstevel@tonic-gate * primitive. 18560Sstevel@tonic-gate */ 18570Sstevel@tonic-gate if (msz < sizeof (prim->type)) { 18580Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 18595240Snordmark SL_TRACE|SL_ERROR, 18605240Snordmark "tl_wput:M_PCROTO data too short")); 18610Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 18620Sstevel@tonic-gate return; 18630Sstevel@tonic-gate } 18640Sstevel@tonic-gate switch (prim->type) { 18650Sstevel@tonic-gate case T_CAPABILITY_REQ: 18660Sstevel@tonic-gate tl_capability_req(mp, tep); 18670Sstevel@tonic-gate return; 18680Sstevel@tonic-gate case T_INFO_REQ: 18690Sstevel@tonic-gate tl_proc = tl_info_req_ser; 18700Sstevel@tonic-gate break; 18710Sstevel@tonic-gate default: 18720Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 18735240Snordmark SL_TRACE|SL_ERROR, 18745240Snordmark "tl_wput:unknown TPI msg primitive")); 18750Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 18760Sstevel@tonic-gate return; 18770Sstevel@tonic-gate } 18780Sstevel@tonic-gate break; 18790Sstevel@tonic-gate default: 18800Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 18815240Snordmark "tl_wput:default:unexpected Streams message")); 18820Sstevel@tonic-gate freemsg(mp); 18830Sstevel@tonic-gate return; 18840Sstevel@tonic-gate } 18850Sstevel@tonic-gate 18860Sstevel@tonic-gate /* 18870Sstevel@tonic-gate * Continue processing via serializer. 18880Sstevel@tonic-gate */ 18890Sstevel@tonic-gate ASSERT(tl_proc != NULL); 18900Sstevel@tonic-gate tl_refhold(tep); 18910Sstevel@tonic-gate tl_serializer_enter(tep, tl_proc, mp); 18920Sstevel@tonic-gate } 18930Sstevel@tonic-gate 18940Sstevel@tonic-gate /* 18950Sstevel@tonic-gate * Place message on the queue while preserving order. 18960Sstevel@tonic-gate */ 18970Sstevel@tonic-gate static void 18980Sstevel@tonic-gate tl_putq_ser(mblk_t *mp, tl_endpt_t *tep) 18990Sstevel@tonic-gate { 19000Sstevel@tonic-gate if (tep->te_closing) { 19010Sstevel@tonic-gate tl_wput_ser(mp, tep); 19020Sstevel@tonic-gate } else { 19030Sstevel@tonic-gate TL_PUTQ(tep, mp); 19040Sstevel@tonic-gate tl_serializer_exit(tep); 19050Sstevel@tonic-gate tl_refrele(tep); 19060Sstevel@tonic-gate } 19070Sstevel@tonic-gate 19080Sstevel@tonic-gate } 19090Sstevel@tonic-gate 19100Sstevel@tonic-gate static void 19110Sstevel@tonic-gate tl_wput_common_ser(mblk_t *mp, tl_endpt_t *tep) 19120Sstevel@tonic-gate { 19130Sstevel@tonic-gate ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO)); 19140Sstevel@tonic-gate 19150Sstevel@tonic-gate switch (DB_TYPE(mp)) { 19160Sstevel@tonic-gate case M_DATA: 19170Sstevel@tonic-gate tl_data(mp, tep); 19180Sstevel@tonic-gate break; 19190Sstevel@tonic-gate case M_PROTO: 19200Sstevel@tonic-gate tl_do_proto(mp, tep); 19210Sstevel@tonic-gate break; 19220Sstevel@tonic-gate default: 19230Sstevel@tonic-gate freemsg(mp); 19240Sstevel@tonic-gate break; 19250Sstevel@tonic-gate } 19260Sstevel@tonic-gate } 19270Sstevel@tonic-gate 19280Sstevel@tonic-gate /* 19290Sstevel@tonic-gate * Write side put procedure called from serializer. 19300Sstevel@tonic-gate */ 19310Sstevel@tonic-gate static void 19320Sstevel@tonic-gate tl_wput_ser(mblk_t *mp, tl_endpt_t *tep) 19330Sstevel@tonic-gate { 19340Sstevel@tonic-gate tl_wput_common_ser(mp, tep); 19350Sstevel@tonic-gate tl_serializer_exit(tep); 19360Sstevel@tonic-gate tl_refrele(tep); 19370Sstevel@tonic-gate } 19380Sstevel@tonic-gate 19390Sstevel@tonic-gate /* 19400Sstevel@tonic-gate * M_DATA processing. Called from serializer. 19410Sstevel@tonic-gate */ 19420Sstevel@tonic-gate static void 19430Sstevel@tonic-gate tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep) 19440Sstevel@tonic-gate { 19450Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp; 19460Sstevel@tonic-gate queue_t *peer_rq; 19470Sstevel@tonic-gate 19480Sstevel@tonic-gate ASSERT(DB_TYPE(mp) == M_DATA); 19490Sstevel@tonic-gate ASSERT(IS_COTS(tep)); 19500Sstevel@tonic-gate 19510Sstevel@tonic-gate ASSERT(IMPLY(peer_tep, tep->te_serializer == peer_tep->te_serializer)); 19520Sstevel@tonic-gate 19530Sstevel@tonic-gate /* 19540Sstevel@tonic-gate * fastpath for data. Ignore flow control if tep is closing. 19550Sstevel@tonic-gate */ 19560Sstevel@tonic-gate if ((peer_tep != NULL) && 19570Sstevel@tonic-gate !peer_tep->te_closing && 19580Sstevel@tonic-gate ((tep->te_state == TS_DATA_XFER) || 19595240Snordmark (tep->te_state == TS_WREQ_ORDREL)) && 19600Sstevel@tonic-gate (tep->te_wq != NULL) && 19610Sstevel@tonic-gate (tep->te_wq->q_first == NULL) && 19620Sstevel@tonic-gate ((peer_tep->te_state == TS_DATA_XFER) || 19635240Snordmark (peer_tep->te_state == TS_WREQ_ORDREL)) && 19640Sstevel@tonic-gate ((peer_rq = peer_tep->te_rq) != NULL) && 19650Sstevel@tonic-gate (canputnext(peer_rq) || tep->te_closing)) { 19660Sstevel@tonic-gate putnext(peer_rq, mp); 19670Sstevel@tonic-gate } else if (tep->te_closing) { 19680Sstevel@tonic-gate /* 19690Sstevel@tonic-gate * It is possible that by the time we got here tep started to 19700Sstevel@tonic-gate * close. If the write queue is not empty, and the state is 19710Sstevel@tonic-gate * TS_DATA_XFER the data should be delivered in order, so we 19720Sstevel@tonic-gate * call putq() instead of freeing the data. 19730Sstevel@tonic-gate */ 19740Sstevel@tonic-gate if ((tep->te_wq != NULL) && 19750Sstevel@tonic-gate ((tep->te_state == TS_DATA_XFER) || 19765240Snordmark (tep->te_state == TS_WREQ_ORDREL))) { 19770Sstevel@tonic-gate TL_PUTQ(tep, mp); 19780Sstevel@tonic-gate } else { 19790Sstevel@tonic-gate freemsg(mp); 19800Sstevel@tonic-gate } 19810Sstevel@tonic-gate } else { 19820Sstevel@tonic-gate TL_PUTQ(tep, mp); 19830Sstevel@tonic-gate } 19840Sstevel@tonic-gate 19850Sstevel@tonic-gate tl_serializer_exit(tep); 19860Sstevel@tonic-gate tl_refrele(tep); 19870Sstevel@tonic-gate } 19880Sstevel@tonic-gate 19890Sstevel@tonic-gate /* 19900Sstevel@tonic-gate * Write side service routine. 19910Sstevel@tonic-gate * 19920Sstevel@tonic-gate * All actual processing happens within serializer which is entered 19930Sstevel@tonic-gate * synchronously. It is possible that by the time tl_wsrv() wakes up, some new 19940Sstevel@tonic-gate * messages that need processing may have arrived, so tl_wsrv repeats until 19950Sstevel@tonic-gate * queue is empty or te_nowsrv is set. 19960Sstevel@tonic-gate */ 19970Sstevel@tonic-gate static void 19980Sstevel@tonic-gate tl_wsrv(queue_t *wq) 19990Sstevel@tonic-gate { 20000Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 20010Sstevel@tonic-gate 20020Sstevel@tonic-gate while ((wq->q_first != NULL) && !tep->te_nowsrv) { 20030Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 20040Sstevel@tonic-gate ASSERT(tep->te_wsrv_active == B_FALSE); 20050Sstevel@tonic-gate tep->te_wsrv_active = B_TRUE; 20060Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 20070Sstevel@tonic-gate 20080Sstevel@tonic-gate tl_serializer_enter(tep, tl_wsrv_ser, &tep->te_wsrvmp); 20090Sstevel@tonic-gate 20100Sstevel@tonic-gate /* 20110Sstevel@tonic-gate * Wait for serializer job to complete. 20120Sstevel@tonic-gate */ 20130Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 20140Sstevel@tonic-gate while (tep->te_wsrv_active) { 20150Sstevel@tonic-gate cv_wait(&tep->te_srv_cv, &tep->te_srv_lock); 20160Sstevel@tonic-gate } 20170Sstevel@tonic-gate cv_signal(&tep->te_srv_cv); 20180Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 20190Sstevel@tonic-gate } 20200Sstevel@tonic-gate } 20210Sstevel@tonic-gate 20220Sstevel@tonic-gate /* 20230Sstevel@tonic-gate * Serialized write side processing of the STREAMS queue. 20240Sstevel@tonic-gate * May be called either from tl_wsrv() or from tl_close() in which case ser_mp 20250Sstevel@tonic-gate * is NULL. 20260Sstevel@tonic-gate */ 20270Sstevel@tonic-gate static void 20280Sstevel@tonic-gate tl_wsrv_ser(mblk_t *ser_mp, tl_endpt_t *tep) 20290Sstevel@tonic-gate { 20300Sstevel@tonic-gate mblk_t *mp; 20310Sstevel@tonic-gate queue_t *wq = tep->te_wq; 20320Sstevel@tonic-gate 20330Sstevel@tonic-gate ASSERT(wq != NULL); 20340Sstevel@tonic-gate while (!tep->te_nowsrv && (mp = getq(wq)) != NULL) { 20350Sstevel@tonic-gate tl_wput_common_ser(mp, tep); 20360Sstevel@tonic-gate } 20370Sstevel@tonic-gate 20380Sstevel@tonic-gate /* 20390Sstevel@tonic-gate * Wakeup service routine unless called from close. 20400Sstevel@tonic-gate * If ser_mp is specified, the caller is tl_wsrv(). 20410Sstevel@tonic-gate * Otherwise, the caller is tl_close_ser(). Since tl_close_ser() doesn't 20420Sstevel@tonic-gate * call tl_serializer_enter() before calling tl_wsrv_ser(), there should 20430Sstevel@tonic-gate * be no matching tl_serializer_exit() in this case. 20440Sstevel@tonic-gate * Also, there is no need to wakeup anyone since tl_close_ser() is not 20450Sstevel@tonic-gate * waiting on te_srv_cv. 20460Sstevel@tonic-gate */ 20470Sstevel@tonic-gate if (ser_mp != NULL) { 20480Sstevel@tonic-gate /* 20490Sstevel@tonic-gate * We are called from tl_wsrv. 20500Sstevel@tonic-gate */ 20510Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 20520Sstevel@tonic-gate ASSERT(tep->te_wsrv_active); 20530Sstevel@tonic-gate tep->te_wsrv_active = B_FALSE; 20540Sstevel@tonic-gate cv_signal(&tep->te_srv_cv); 20550Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 20560Sstevel@tonic-gate tl_serializer_exit(tep); 20570Sstevel@tonic-gate } 20580Sstevel@tonic-gate } 20590Sstevel@tonic-gate 20600Sstevel@tonic-gate /* 20610Sstevel@tonic-gate * Called when the stream is backenabled. Enter serializer and qenable everyone 20620Sstevel@tonic-gate * flow controlled by tep. 20630Sstevel@tonic-gate * 20640Sstevel@tonic-gate * NOTE: The service routine should enter serializer synchronously. Otherwise it 20650Sstevel@tonic-gate * is possible that two instances of tl_rsrv will be running reusing the same 20660Sstevel@tonic-gate * rsrv mblk. 20670Sstevel@tonic-gate */ 20680Sstevel@tonic-gate static void 20690Sstevel@tonic-gate tl_rsrv(queue_t *rq) 20700Sstevel@tonic-gate { 20710Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr; 20720Sstevel@tonic-gate 20730Sstevel@tonic-gate ASSERT(rq->q_first == NULL); 20740Sstevel@tonic-gate ASSERT(tep->te_rsrv_active == 0); 20750Sstevel@tonic-gate 20760Sstevel@tonic-gate tep->te_rsrv_active = B_TRUE; 20770Sstevel@tonic-gate tl_serializer_enter(tep, tl_rsrv_ser, &tep->te_rsrvmp); 20780Sstevel@tonic-gate /* 20790Sstevel@tonic-gate * Wait for serializer job to complete. 20800Sstevel@tonic-gate */ 20810Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 20820Sstevel@tonic-gate while (tep->te_rsrv_active) { 20830Sstevel@tonic-gate cv_wait(&tep->te_srv_cv, &tep->te_srv_lock); 20840Sstevel@tonic-gate } 20850Sstevel@tonic-gate cv_signal(&tep->te_srv_cv); 20860Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 20870Sstevel@tonic-gate } 20880Sstevel@tonic-gate 20890Sstevel@tonic-gate /* ARGSUSED */ 20900Sstevel@tonic-gate static void 20910Sstevel@tonic-gate tl_rsrv_ser(mblk_t *mp, tl_endpt_t *tep) 20920Sstevel@tonic-gate { 20930Sstevel@tonic-gate tl_endpt_t *peer_tep; 20940Sstevel@tonic-gate 20950Sstevel@tonic-gate if (IS_CLTS(tep) && tep->te_state == TS_IDLE) { 20960Sstevel@tonic-gate tl_cl_backenable(tep); 20970Sstevel@tonic-gate } else if ( 20985240Snordmark IS_COTS(tep) && 20995240Snordmark ((peer_tep = tep->te_conp) != NULL) && 21005240Snordmark !peer_tep->te_closing && 21015240Snordmark ((tep->te_state == TS_DATA_XFER) || 21025240Snordmark (tep->te_state == TS_WIND_ORDREL)|| 21035240Snordmark (tep->te_state == TS_WREQ_ORDREL))) { 21040Sstevel@tonic-gate TL_QENABLE(peer_tep); 21050Sstevel@tonic-gate } 21060Sstevel@tonic-gate 21070Sstevel@tonic-gate /* 21080Sstevel@tonic-gate * Wakeup read side service routine. 21090Sstevel@tonic-gate */ 21100Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 21110Sstevel@tonic-gate ASSERT(tep->te_rsrv_active); 21120Sstevel@tonic-gate tep->te_rsrv_active = B_FALSE; 21130Sstevel@tonic-gate cv_signal(&tep->te_srv_cv); 21140Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 21150Sstevel@tonic-gate tl_serializer_exit(tep); 21160Sstevel@tonic-gate } 21170Sstevel@tonic-gate 21180Sstevel@tonic-gate /* 21190Sstevel@tonic-gate * process M_PROTO messages. Always called from serializer. 21200Sstevel@tonic-gate */ 21210Sstevel@tonic-gate static void 21220Sstevel@tonic-gate tl_do_proto(mblk_t *mp, tl_endpt_t *tep) 21230Sstevel@tonic-gate { 21240Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 21250Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 21260Sstevel@tonic-gate 21270Sstevel@tonic-gate /* Message size was validated by tl_wput(). */ 21280Sstevel@tonic-gate ASSERT(msz >= sizeof (prim->type)); 21290Sstevel@tonic-gate 21300Sstevel@tonic-gate switch (prim->type) { 21310Sstevel@tonic-gate case T_UNBIND_REQ: 21320Sstevel@tonic-gate tl_unbind(mp, tep); 21330Sstevel@tonic-gate break; 21340Sstevel@tonic-gate 21350Sstevel@tonic-gate case T_ADDR_REQ: 21360Sstevel@tonic-gate tl_addr_req(mp, tep); 21370Sstevel@tonic-gate break; 21380Sstevel@tonic-gate 21390Sstevel@tonic-gate case O_T_CONN_RES: 21400Sstevel@tonic-gate case T_CONN_RES: 21410Sstevel@tonic-gate if (IS_CLTS(tep)) { 21420Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21430Sstevel@tonic-gate break; 21440Sstevel@tonic-gate } 21450Sstevel@tonic-gate tl_conn_res(mp, tep); 21460Sstevel@tonic-gate break; 21470Sstevel@tonic-gate 21480Sstevel@tonic-gate case T_DISCON_REQ: 21490Sstevel@tonic-gate if (IS_CLTS(tep)) { 21500Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21510Sstevel@tonic-gate break; 21520Sstevel@tonic-gate } 21530Sstevel@tonic-gate tl_discon_req(mp, tep); 21540Sstevel@tonic-gate break; 21550Sstevel@tonic-gate 21560Sstevel@tonic-gate case T_DATA_REQ: 21570Sstevel@tonic-gate if (IS_CLTS(tep)) { 21580Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21590Sstevel@tonic-gate break; 21600Sstevel@tonic-gate } 21610Sstevel@tonic-gate tl_data(mp, tep); 21620Sstevel@tonic-gate break; 21630Sstevel@tonic-gate 21640Sstevel@tonic-gate case T_OPTDATA_REQ: 21650Sstevel@tonic-gate if (IS_CLTS(tep)) { 21660Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21670Sstevel@tonic-gate break; 21680Sstevel@tonic-gate } 21690Sstevel@tonic-gate tl_data(mp, tep); 21700Sstevel@tonic-gate break; 21710Sstevel@tonic-gate 21720Sstevel@tonic-gate case T_EXDATA_REQ: 21730Sstevel@tonic-gate if (IS_CLTS(tep)) { 21740Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21750Sstevel@tonic-gate break; 21760Sstevel@tonic-gate } 21770Sstevel@tonic-gate tl_exdata(mp, tep); 21780Sstevel@tonic-gate break; 21790Sstevel@tonic-gate 21800Sstevel@tonic-gate case T_ORDREL_REQ: 21810Sstevel@tonic-gate if (! IS_COTSORD(tep)) { 21820Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21830Sstevel@tonic-gate break; 21840Sstevel@tonic-gate } 21850Sstevel@tonic-gate tl_ordrel(mp, tep); 21860Sstevel@tonic-gate break; 21870Sstevel@tonic-gate 21880Sstevel@tonic-gate case T_UNITDATA_REQ: 21890Sstevel@tonic-gate if (IS_COTS(tep)) { 21900Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21910Sstevel@tonic-gate break; 21920Sstevel@tonic-gate } 21930Sstevel@tonic-gate tl_unitdata(mp, tep); 21940Sstevel@tonic-gate break; 21950Sstevel@tonic-gate 21960Sstevel@tonic-gate default: 21970Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21980Sstevel@tonic-gate break; 21990Sstevel@tonic-gate } 22000Sstevel@tonic-gate } 22010Sstevel@tonic-gate 22020Sstevel@tonic-gate /* 22030Sstevel@tonic-gate * Process ioctl from serializer. 22040Sstevel@tonic-gate * This is a wrapper around tl_do_ioctl(). 22050Sstevel@tonic-gate */ 22060Sstevel@tonic-gate static void 22070Sstevel@tonic-gate tl_do_ioctl_ser(mblk_t *mp, tl_endpt_t *tep) 22080Sstevel@tonic-gate { 22090Sstevel@tonic-gate if (! tep->te_closing) 22100Sstevel@tonic-gate tl_do_ioctl(mp, tep); 22110Sstevel@tonic-gate else 22120Sstevel@tonic-gate freemsg(mp); 22130Sstevel@tonic-gate 22140Sstevel@tonic-gate tl_serializer_exit(tep); 22150Sstevel@tonic-gate tl_refrele(tep); 22160Sstevel@tonic-gate } 22170Sstevel@tonic-gate 22180Sstevel@tonic-gate static void 22190Sstevel@tonic-gate tl_do_ioctl(mblk_t *mp, tl_endpt_t *tep) 22200Sstevel@tonic-gate { 22210Sstevel@tonic-gate struct iocblk *iocbp = (struct iocblk *)mp->b_rptr; 22220Sstevel@tonic-gate int cmd = iocbp->ioc_cmd; 22230Sstevel@tonic-gate queue_t *wq = tep->te_wq; 22240Sstevel@tonic-gate int error; 22250Sstevel@tonic-gate int thisopt, otheropt; 22260Sstevel@tonic-gate 22270Sstevel@tonic-gate ASSERT((cmd == TL_IOC_CREDOPT) || (cmd == TL_IOC_UCREDOPT)); 22280Sstevel@tonic-gate 22290Sstevel@tonic-gate switch (cmd) { 22300Sstevel@tonic-gate case TL_IOC_CREDOPT: 22310Sstevel@tonic-gate if (cmd == TL_IOC_CREDOPT) { 22320Sstevel@tonic-gate thisopt = TL_SETCRED; 22330Sstevel@tonic-gate otheropt = TL_SETUCRED; 22340Sstevel@tonic-gate } else { 22350Sstevel@tonic-gate /* FALLTHROUGH */ 22360Sstevel@tonic-gate case TL_IOC_UCREDOPT: 22370Sstevel@tonic-gate thisopt = TL_SETUCRED; 22380Sstevel@tonic-gate otheropt = TL_SETCRED; 22390Sstevel@tonic-gate } 22400Sstevel@tonic-gate /* 22410Sstevel@tonic-gate * The credentials passing does not apply to sockets. 22420Sstevel@tonic-gate * Only one of the cred options can be set at a given time. 22430Sstevel@tonic-gate */ 22440Sstevel@tonic-gate if (IS_SOCKET(tep) || (tep->te_flag & otheropt)) { 22450Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 22460Sstevel@tonic-gate return; 22470Sstevel@tonic-gate } 22480Sstevel@tonic-gate 22490Sstevel@tonic-gate /* 22500Sstevel@tonic-gate * Turn on generation of credential options for 22510Sstevel@tonic-gate * T_conn_req, T_conn_con, T_unidata_ind. 22520Sstevel@tonic-gate */ 22530Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint32_t)); 22540Sstevel@tonic-gate if (error != 0) { 22550Sstevel@tonic-gate miocnak(wq, mp, 0, error); 22560Sstevel@tonic-gate return; 22570Sstevel@tonic-gate } 22580Sstevel@tonic-gate if (!IS_P2ALIGNED(mp->b_cont->b_rptr, sizeof (uint32_t))) { 22590Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 22600Sstevel@tonic-gate return; 22610Sstevel@tonic-gate } 22620Sstevel@tonic-gate 22630Sstevel@tonic-gate if (*(uint32_t *)mp->b_cont->b_rptr) 22640Sstevel@tonic-gate tep->te_flag |= thisopt; 22650Sstevel@tonic-gate else 22660Sstevel@tonic-gate tep->te_flag &= ~thisopt; 22670Sstevel@tonic-gate 22680Sstevel@tonic-gate miocack(wq, mp, 0, 0); 22690Sstevel@tonic-gate break; 22700Sstevel@tonic-gate 22710Sstevel@tonic-gate default: 22720Sstevel@tonic-gate /* Should not be here */ 22730Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 22740Sstevel@tonic-gate break; 22750Sstevel@tonic-gate } 22760Sstevel@tonic-gate } 22770Sstevel@tonic-gate 22780Sstevel@tonic-gate 22790Sstevel@tonic-gate /* 22800Sstevel@tonic-gate * send T_ERROR_ACK 22810Sstevel@tonic-gate * Note: assumes enough memory or caller passed big enough mp 22820Sstevel@tonic-gate * - no recovery from allocb failures 22830Sstevel@tonic-gate */ 22840Sstevel@tonic-gate 22850Sstevel@tonic-gate static void 22860Sstevel@tonic-gate tl_error_ack(queue_t *wq, mblk_t *mp, t_scalar_t tli_err, 22870Sstevel@tonic-gate t_scalar_t unix_err, t_scalar_t type) 22880Sstevel@tonic-gate { 22890Sstevel@tonic-gate struct T_error_ack *err_ack; 22900Sstevel@tonic-gate mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_error_ack), 22910Sstevel@tonic-gate M_PCPROTO, T_ERROR_ACK); 22920Sstevel@tonic-gate 22930Sstevel@tonic-gate if (ackmp == NULL) { 22940Sstevel@tonic-gate (void) (STRLOG(TL_ID, 0, 1, SL_TRACE|SL_ERROR, 22955240Snordmark "tl_error_ack:out of mblk memory")); 22960Sstevel@tonic-gate tl_merror(wq, NULL, ENOSR); 22970Sstevel@tonic-gate return; 22980Sstevel@tonic-gate } 22990Sstevel@tonic-gate err_ack = (struct T_error_ack *)ackmp->b_rptr; 23000Sstevel@tonic-gate err_ack->ERROR_prim = type; 23010Sstevel@tonic-gate err_ack->TLI_error = tli_err; 23020Sstevel@tonic-gate err_ack->UNIX_error = unix_err; 23030Sstevel@tonic-gate 23040Sstevel@tonic-gate /* 23050Sstevel@tonic-gate * send error ack message 23060Sstevel@tonic-gate */ 23070Sstevel@tonic-gate qreply(wq, ackmp); 23080Sstevel@tonic-gate } 23090Sstevel@tonic-gate 23100Sstevel@tonic-gate 23110Sstevel@tonic-gate 23120Sstevel@tonic-gate /* 23130Sstevel@tonic-gate * send T_OK_ACK 23140Sstevel@tonic-gate * Note: assumes enough memory or caller passed big enough mp 23150Sstevel@tonic-gate * - no recovery from allocb failures 23160Sstevel@tonic-gate */ 23170Sstevel@tonic-gate static void 23180Sstevel@tonic-gate tl_ok_ack(queue_t *wq, mblk_t *mp, t_scalar_t type) 23190Sstevel@tonic-gate { 23200Sstevel@tonic-gate struct T_ok_ack *ok_ack; 23210Sstevel@tonic-gate mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_ok_ack), 23220Sstevel@tonic-gate M_PCPROTO, T_OK_ACK); 23230Sstevel@tonic-gate 23240Sstevel@tonic-gate if (ackmp == NULL) { 23250Sstevel@tonic-gate tl_merror(wq, NULL, ENOMEM); 23260Sstevel@tonic-gate return; 23270Sstevel@tonic-gate } 23280Sstevel@tonic-gate 23290Sstevel@tonic-gate ok_ack = (struct T_ok_ack *)ackmp->b_rptr; 23300Sstevel@tonic-gate ok_ack->CORRECT_prim = type; 23310Sstevel@tonic-gate 23320Sstevel@tonic-gate (void) qreply(wq, ackmp); 23330Sstevel@tonic-gate } 23340Sstevel@tonic-gate 23350Sstevel@tonic-gate /* 23360Sstevel@tonic-gate * Process T_BIND_REQ and O_T_BIND_REQ from serializer. 23370Sstevel@tonic-gate * This is a wrapper around tl_bind(). 23380Sstevel@tonic-gate */ 23390Sstevel@tonic-gate static void 23400Sstevel@tonic-gate tl_bind_ser(mblk_t *mp, tl_endpt_t *tep) 23410Sstevel@tonic-gate { 23420Sstevel@tonic-gate if (! tep->te_closing) 23430Sstevel@tonic-gate tl_bind(mp, tep); 23440Sstevel@tonic-gate else 23450Sstevel@tonic-gate freemsg(mp); 23460Sstevel@tonic-gate 23470Sstevel@tonic-gate tl_serializer_exit(tep); 23480Sstevel@tonic-gate tl_refrele(tep); 23490Sstevel@tonic-gate } 23500Sstevel@tonic-gate 23510Sstevel@tonic-gate /* 23520Sstevel@tonic-gate * Process T_BIND_REQ and O_T_BIND_REQ TPI requests. 23530Sstevel@tonic-gate * Assumes that the endpoint is in the unbound. 23540Sstevel@tonic-gate */ 23550Sstevel@tonic-gate static void 23560Sstevel@tonic-gate tl_bind(mblk_t *mp, tl_endpt_t *tep) 23570Sstevel@tonic-gate { 23580Sstevel@tonic-gate queue_t *wq = tep->te_wq; 23590Sstevel@tonic-gate struct T_bind_ack *b_ack; 23600Sstevel@tonic-gate struct T_bind_req *bind = (struct T_bind_req *)mp->b_rptr; 23610Sstevel@tonic-gate mblk_t *ackmp, *bamp; 23620Sstevel@tonic-gate soux_addr_t ux_addr; 23630Sstevel@tonic-gate t_uscalar_t qlen = 0; 23640Sstevel@tonic-gate t_scalar_t alen, aoff; 23650Sstevel@tonic-gate tl_addr_t addr_req; 23660Sstevel@tonic-gate void *addr_startp; 23670Sstevel@tonic-gate ssize_t msz = MBLKL(mp), basize; 23680Sstevel@tonic-gate t_scalar_t tli_err = 0, unix_err = 0; 23690Sstevel@tonic-gate t_scalar_t save_prim_type = bind->PRIM_type; 23700Sstevel@tonic-gate t_scalar_t save_state = tep->te_state; 23710Sstevel@tonic-gate 23720Sstevel@tonic-gate if (tep->te_state != TS_UNBND) { 23730Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 23745240Snordmark SL_TRACE|SL_ERROR, 23755240Snordmark "tl_wput:bind_request:out of state, state=%d", 23765240Snordmark tep->te_state)); 23770Sstevel@tonic-gate tli_err = TOUTSTATE; 23780Sstevel@tonic-gate goto error; 23790Sstevel@tonic-gate } 23800Sstevel@tonic-gate 23810Sstevel@tonic-gate if (msz < sizeof (struct T_bind_req)) { 23820Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 23830Sstevel@tonic-gate goto error; 23840Sstevel@tonic-gate } 23850Sstevel@tonic-gate 23860Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_BIND_REQ, tep->te_state); 23870Sstevel@tonic-gate 23880Sstevel@tonic-gate ASSERT((bind->PRIM_type == O_T_BIND_REQ) || 23890Sstevel@tonic-gate (bind->PRIM_type == T_BIND_REQ)); 23900Sstevel@tonic-gate 23910Sstevel@tonic-gate alen = bind->ADDR_length; 23920Sstevel@tonic-gate aoff = bind->ADDR_offset; 23930Sstevel@tonic-gate 23940Sstevel@tonic-gate /* negotiate max conn req pending */ 23950Sstevel@tonic-gate if (IS_COTS(tep)) { 23960Sstevel@tonic-gate qlen = bind->CONIND_number; 23972486Sakolb if (qlen > tl_maxqlen) 23982486Sakolb qlen = tl_maxqlen; 23990Sstevel@tonic-gate } 24000Sstevel@tonic-gate 24010Sstevel@tonic-gate /* 24020Sstevel@tonic-gate * Reserve hash handle. It can only be NULL if the endpoint is unbound 24030Sstevel@tonic-gate * and bound again. 24040Sstevel@tonic-gate */ 24050Sstevel@tonic-gate if ((tep->te_hash_hndl == NULL) && 24060Sstevel@tonic-gate ((tep->te_flag & TL_ADDRHASHED) == 0) && 24070Sstevel@tonic-gate mod_hash_reserve_nosleep(tep->te_addrhash, 24085240Snordmark &tep->te_hash_hndl) != 0) { 24090Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR; 24100Sstevel@tonic-gate goto error; 24110Sstevel@tonic-gate } 24120Sstevel@tonic-gate 24130Sstevel@tonic-gate /* 24140Sstevel@tonic-gate * Verify address correctness. 24150Sstevel@tonic-gate */ 24160Sstevel@tonic-gate if (IS_SOCKET(tep)) { 24170Sstevel@tonic-gate ASSERT(bind->PRIM_type == O_T_BIND_REQ); 24180Sstevel@tonic-gate 24190Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) || 24200Sstevel@tonic-gate (aoff < 0) || 24210Sstevel@tonic-gate (aoff + alen > msz)) { 24220Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24235240Snordmark 1, SL_TRACE|SL_ERROR, 24245240Snordmark "tl_bind: invalid socket addr")); 24250Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24260Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24270Sstevel@tonic-gate goto error; 24280Sstevel@tonic-gate } 24290Sstevel@tonic-gate /* Copy address from message to local buffer. */ 24300Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, sizeof (ux_addr)); 24310Sstevel@tonic-gate /* 24320Sstevel@tonic-gate * Check that we got correct address from sockets 24330Sstevel@tonic-gate */ 24340Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_EXPLICIT) && 24350Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_IMPLICIT)) { 24360Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24375240Snordmark 1, SL_TRACE|SL_ERROR, 24385240Snordmark "tl_bind: invalid socket magic")); 24390Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24400Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24410Sstevel@tonic-gate goto error; 24420Sstevel@tonic-gate } 24430Sstevel@tonic-gate if ((ux_addr.soua_magic == SOU_MAGIC_IMPLICIT) && 24440Sstevel@tonic-gate (ux_addr.soua_vp != NULL)) { 24450Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24465240Snordmark 1, SL_TRACE|SL_ERROR, 24475240Snordmark "tl_bind: implicit addr non-empty")); 24480Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24490Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24500Sstevel@tonic-gate goto error; 24510Sstevel@tonic-gate } 24520Sstevel@tonic-gate if ((ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) && 24530Sstevel@tonic-gate (ux_addr.soua_vp == NULL)) { 24540Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24555240Snordmark 1, SL_TRACE|SL_ERROR, 24565240Snordmark "tl_bind: explicit addr empty")); 24570Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24580Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24590Sstevel@tonic-gate goto error; 24600Sstevel@tonic-gate } 24610Sstevel@tonic-gate } else { 24620Sstevel@tonic-gate if ((alen > 0) && ((aoff < 0) || 24635240Snordmark ((ssize_t)(aoff + alen) > msz) || 24645240Snordmark ((aoff + alen) < 0))) { 24650Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24665240Snordmark 1, SL_TRACE|SL_ERROR, 24675240Snordmark "tl_bind: invalid message")); 24680Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24690Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24700Sstevel@tonic-gate goto error; 24710Sstevel@tonic-gate } 24720Sstevel@tonic-gate if ((alen < 0) || (alen > (msz - sizeof (struct T_bind_req)))) { 24730Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24745240Snordmark 1, SL_TRACE|SL_ERROR, 24755240Snordmark "tl_bind: bad addr in message")); 24760Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24770Sstevel@tonic-gate tli_err = TBADADDR; 24780Sstevel@tonic-gate goto error; 24790Sstevel@tonic-gate } 24800Sstevel@tonic-gate #ifdef DEBUG 24810Sstevel@tonic-gate /* 24820Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps. 24830Sstevel@tonic-gate * if (! assertion) 24840Sstevel@tonic-gate * log warning; 24850Sstevel@tonic-gate */ 24860Sstevel@tonic-gate if (! ((alen == 0 && aoff == 0) || 24870Sstevel@tonic-gate (aoff >= (t_scalar_t)(sizeof (struct T_bind_req))))) { 24880Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24890Sstevel@tonic-gate 3, SL_TRACE|SL_ERROR, 24900Sstevel@tonic-gate "tl_bind: addr overlaps TPI message")); 24910Sstevel@tonic-gate } 24920Sstevel@tonic-gate #endif 24930Sstevel@tonic-gate } 24940Sstevel@tonic-gate 24950Sstevel@tonic-gate /* 24960Sstevel@tonic-gate * Bind the address provided or allocate one if requested. 24970Sstevel@tonic-gate * Allow rebinds with a new qlen value. 24980Sstevel@tonic-gate */ 24990Sstevel@tonic-gate if (IS_SOCKET(tep)) { 25000Sstevel@tonic-gate /* 25010Sstevel@tonic-gate * For anonymous requests the te_ap is already set up properly 25020Sstevel@tonic-gate * so use minor number as an address. 25030Sstevel@tonic-gate * For explicit requests need to check whether the address is 25040Sstevel@tonic-gate * already in use. 25050Sstevel@tonic-gate */ 25060Sstevel@tonic-gate if (ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) { 25070Sstevel@tonic-gate int rc; 25080Sstevel@tonic-gate 25090Sstevel@tonic-gate if (tep->te_flag & TL_ADDRHASHED) { 25100Sstevel@tonic-gate ASSERT(IS_COTS(tep) && tep->te_qlen == 0); 25110Sstevel@tonic-gate if (tep->te_vp == ux_addr.soua_vp) 25120Sstevel@tonic-gate goto skip_addr_bind; 25130Sstevel@tonic-gate else /* Rebind to a new address. */ 25140Sstevel@tonic-gate tl_addr_unbind(tep); 25150Sstevel@tonic-gate } 25160Sstevel@tonic-gate /* 25170Sstevel@tonic-gate * Insert address in the hash if it is not already 25180Sstevel@tonic-gate * there. Since we use preallocated handle, the insert 25190Sstevel@tonic-gate * can fail only if the key is already present. 25200Sstevel@tonic-gate */ 25210Sstevel@tonic-gate rc = mod_hash_insert_reserve(tep->te_addrhash, 25220Sstevel@tonic-gate (mod_hash_key_t)ux_addr.soua_vp, 25230Sstevel@tonic-gate (mod_hash_val_t)tep, tep->te_hash_hndl); 25240Sstevel@tonic-gate 25250Sstevel@tonic-gate if (rc != 0) { 25260Sstevel@tonic-gate ASSERT(rc == MH_ERR_DUPLICATE); 25270Sstevel@tonic-gate /* 25280Sstevel@tonic-gate * Violate O_T_BIND_REQ semantics and fail with 25290Sstevel@tonic-gate * TADDRBUSY - sockets will not use any address 25300Sstevel@tonic-gate * other than supplied one for explicit binds. 25310Sstevel@tonic-gate */ 25320Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 25335240Snordmark SL_TRACE|SL_ERROR, 25345240Snordmark "tl_bind:requested addr %p is busy", 25355240Snordmark ux_addr.soua_vp)); 25360Sstevel@tonic-gate tli_err = TADDRBUSY; unix_err = 0; 25370Sstevel@tonic-gate goto error; 25380Sstevel@tonic-gate } 25390Sstevel@tonic-gate tep->te_uxaddr = ux_addr; 25400Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED; 25410Sstevel@tonic-gate tep->te_hash_hndl = NULL; 25420Sstevel@tonic-gate } 25430Sstevel@tonic-gate } else if (alen == 0) { 25440Sstevel@tonic-gate /* 25450Sstevel@tonic-gate * assign any free address 25460Sstevel@tonic-gate */ 25470Sstevel@tonic-gate if (! tl_get_any_addr(tep, NULL)) { 25480Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 25495240Snordmark 1, SL_TRACE|SL_ERROR, 25505240Snordmark "tl_bind:failed to get buffer for any " 25515240Snordmark "address")); 25520Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR; 25530Sstevel@tonic-gate goto error; 25540Sstevel@tonic-gate } 25550Sstevel@tonic-gate } else { 25560Sstevel@tonic-gate addr_req.ta_alen = alen; 25570Sstevel@tonic-gate addr_req.ta_abuf = (mp->b_rptr + aoff); 25580Sstevel@tonic-gate addr_req.ta_zoneid = tep->te_zoneid; 25590Sstevel@tonic-gate 25600Sstevel@tonic-gate tep->te_abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP); 25610Sstevel@tonic-gate if (tep->te_abuf == NULL) { 25620Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR; 25630Sstevel@tonic-gate goto error; 25640Sstevel@tonic-gate } 25650Sstevel@tonic-gate bcopy(addr_req.ta_abuf, tep->te_abuf, addr_req.ta_alen); 25660Sstevel@tonic-gate tep->te_alen = alen; 25670Sstevel@tonic-gate 25680Sstevel@tonic-gate if (mod_hash_insert_reserve(tep->te_addrhash, 25695240Snordmark (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep, 25705240Snordmark tep->te_hash_hndl) != 0) { 25710Sstevel@tonic-gate if (save_prim_type == T_BIND_REQ) { 25720Sstevel@tonic-gate /* 25730Sstevel@tonic-gate * The bind semantics for this primitive 25740Sstevel@tonic-gate * require a failure if the exact address 25750Sstevel@tonic-gate * requested is busy 25760Sstevel@tonic-gate */ 25770Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 25785240Snordmark SL_TRACE|SL_ERROR, 25795240Snordmark "tl_bind:requested addr is busy")); 25800Sstevel@tonic-gate tli_err = TADDRBUSY; unix_err = 0; 25810Sstevel@tonic-gate goto error; 25820Sstevel@tonic-gate } 25830Sstevel@tonic-gate 25840Sstevel@tonic-gate /* 25850Sstevel@tonic-gate * O_T_BIND_REQ semantics say if address if requested 25860Sstevel@tonic-gate * address is busy, bind to any available free address 25870Sstevel@tonic-gate */ 25880Sstevel@tonic-gate if (! tl_get_any_addr(tep, &addr_req)) { 25890Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 25905240Snordmark SL_TRACE|SL_ERROR, 25915240Snordmark "tl_bind:unable to get any addr buf")); 25920Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOMEM; 25930Sstevel@tonic-gate goto error; 25940Sstevel@tonic-gate } 25950Sstevel@tonic-gate } else { 25960Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED; 25970Sstevel@tonic-gate tep->te_hash_hndl = NULL; 25980Sstevel@tonic-gate } 25990Sstevel@tonic-gate } 26000Sstevel@tonic-gate 26010Sstevel@tonic-gate ASSERT(tep->te_alen >= 0); 26020Sstevel@tonic-gate 26030Sstevel@tonic-gate skip_addr_bind: 26040Sstevel@tonic-gate /* 26050Sstevel@tonic-gate * prepare T_BIND_ACK TPI message 26060Sstevel@tonic-gate */ 26070Sstevel@tonic-gate basize = sizeof (struct T_bind_ack) + tep->te_alen; 26080Sstevel@tonic-gate bamp = reallocb(mp, basize, 0); 26090Sstevel@tonic-gate if (bamp == NULL) { 26100Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 26115240Snordmark "tl_wput:tl_bind: allocb failed")); 26120Sstevel@tonic-gate /* 26130Sstevel@tonic-gate * roll back state changes 26140Sstevel@tonic-gate */ 26150Sstevel@tonic-gate tl_addr_unbind(tep); 26160Sstevel@tonic-gate tep->te_state = TS_UNBND; 26170Sstevel@tonic-gate tl_memrecover(wq, mp, basize); 26180Sstevel@tonic-gate return; 26190Sstevel@tonic-gate } 26200Sstevel@tonic-gate 26210Sstevel@tonic-gate DB_TYPE(bamp) = M_PCPROTO; 26220Sstevel@tonic-gate bamp->b_wptr = bamp->b_rptr + basize; 26230Sstevel@tonic-gate b_ack = (struct T_bind_ack *)bamp->b_rptr; 26240Sstevel@tonic-gate b_ack->PRIM_type = T_BIND_ACK; 26250Sstevel@tonic-gate b_ack->CONIND_number = qlen; 26260Sstevel@tonic-gate b_ack->ADDR_length = tep->te_alen; 26270Sstevel@tonic-gate b_ack->ADDR_offset = (t_scalar_t)sizeof (struct T_bind_ack); 26280Sstevel@tonic-gate addr_startp = bamp->b_rptr + b_ack->ADDR_offset; 26290Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen); 26300Sstevel@tonic-gate 26310Sstevel@tonic-gate if (IS_COTS(tep)) { 26320Sstevel@tonic-gate tep->te_qlen = qlen; 26330Sstevel@tonic-gate if (qlen > 0) 26340Sstevel@tonic-gate tep->te_flag |= TL_LISTENER; 26350Sstevel@tonic-gate } 26360Sstevel@tonic-gate 26370Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_BIND_ACK, tep->te_state); 26380Sstevel@tonic-gate /* 26390Sstevel@tonic-gate * send T_BIND_ACK message 26400Sstevel@tonic-gate */ 26410Sstevel@tonic-gate (void) qreply(wq, bamp); 26420Sstevel@tonic-gate return; 26430Sstevel@tonic-gate 26440Sstevel@tonic-gate error: 26450Sstevel@tonic-gate ackmp = reallocb(mp, sizeof (struct T_error_ack), 0); 26460Sstevel@tonic-gate if (ackmp == NULL) { 26470Sstevel@tonic-gate /* 26480Sstevel@tonic-gate * roll back state changes 26490Sstevel@tonic-gate */ 26500Sstevel@tonic-gate tep->te_state = save_state; 26510Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 26520Sstevel@tonic-gate return; 26530Sstevel@tonic-gate } 26540Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 26550Sstevel@tonic-gate tl_error_ack(wq, ackmp, tli_err, unix_err, save_prim_type); 26560Sstevel@tonic-gate } 26570Sstevel@tonic-gate 26580Sstevel@tonic-gate /* 26590Sstevel@tonic-gate * Process T_UNBIND_REQ. 26600Sstevel@tonic-gate * Called from serializer. 26610Sstevel@tonic-gate */ 26620Sstevel@tonic-gate static void 26630Sstevel@tonic-gate tl_unbind(mblk_t *mp, tl_endpt_t *tep) 26640Sstevel@tonic-gate { 26650Sstevel@tonic-gate queue_t *wq; 26660Sstevel@tonic-gate mblk_t *ackmp; 26670Sstevel@tonic-gate 26680Sstevel@tonic-gate if (tep->te_closing) { 26690Sstevel@tonic-gate freemsg(mp); 26700Sstevel@tonic-gate return; 26710Sstevel@tonic-gate } 26720Sstevel@tonic-gate 26730Sstevel@tonic-gate wq = tep->te_wq; 26740Sstevel@tonic-gate 26750Sstevel@tonic-gate /* 26760Sstevel@tonic-gate * preallocate memory for max of T_OK_ACK and T_ERROR_ACK 26770Sstevel@tonic-gate * ==> allocate for T_ERROR_ACK (known max) 26780Sstevel@tonic-gate */ 26790Sstevel@tonic-gate if ((ackmp = reallocb(mp, sizeof (struct T_error_ack), 0)) == NULL) { 26800Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 26810Sstevel@tonic-gate return; 26820Sstevel@tonic-gate } 26830Sstevel@tonic-gate /* 26840Sstevel@tonic-gate * memory resources committed 26850Sstevel@tonic-gate * Note: no message validation. T_UNBIND_REQ message is 26860Sstevel@tonic-gate * same size as PRIM_type field so already verified earlier. 26870Sstevel@tonic-gate */ 26880Sstevel@tonic-gate 26890Sstevel@tonic-gate /* 26900Sstevel@tonic-gate * validate state 26910Sstevel@tonic-gate */ 26920Sstevel@tonic-gate if (tep->te_state != TS_IDLE) { 26930Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 26945240Snordmark SL_TRACE|SL_ERROR, 26955240Snordmark "tl_wput:T_UNBIND_REQ:out of state, state=%d", 26965240Snordmark tep->te_state)); 26970Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_UNBIND_REQ); 26980Sstevel@tonic-gate return; 26990Sstevel@tonic-gate } 27000Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_UNBIND_REQ, tep->te_state); 27010Sstevel@tonic-gate 27020Sstevel@tonic-gate /* 27030Sstevel@tonic-gate * TPI says on T_UNBIND_REQ: 27040Sstevel@tonic-gate * send up a M_FLUSH to flush both 27050Sstevel@tonic-gate * read and write queues 27060Sstevel@tonic-gate */ 27070Sstevel@tonic-gate (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW); 27080Sstevel@tonic-gate 27090Sstevel@tonic-gate if (! IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 || 27100Sstevel@tonic-gate tep->te_magic != SOU_MAGIC_EXPLICIT) { 27110Sstevel@tonic-gate 27120Sstevel@tonic-gate /* 27130Sstevel@tonic-gate * Sockets use bind with qlen==0 followed by bind() to 27140Sstevel@tonic-gate * the same address with qlen > 0 for listeners. 27150Sstevel@tonic-gate * We allow rebind with a new qlen value. 27160Sstevel@tonic-gate */ 27170Sstevel@tonic-gate tl_addr_unbind(tep); 27180Sstevel@tonic-gate } 27190Sstevel@tonic-gate 27200Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state); 27210Sstevel@tonic-gate /* 27220Sstevel@tonic-gate * send T_OK_ACK 27230Sstevel@tonic-gate */ 27240Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_UNBIND_REQ); 27250Sstevel@tonic-gate } 27260Sstevel@tonic-gate 27270Sstevel@tonic-gate 27280Sstevel@tonic-gate /* 27290Sstevel@tonic-gate * Option management code from drv/ip is used here 27300Sstevel@tonic-gate * Note: TL_PROT_LEVEL/TL_IOC_CREDOPT option is not part of tl_opt_arr 27310Sstevel@tonic-gate * database of options. So optcom_req() will fail T_SVR4_OPTMGMT_REQ. 27320Sstevel@tonic-gate * However, that is what we want as that option is 'unorthodox' 27330Sstevel@tonic-gate * and only valid in T_CONN_IND, T_CONN_CON and T_UNITDATA_IND 27340Sstevel@tonic-gate * and not in T_SVR4_OPTMGMT_REQ/ACK 27350Sstevel@tonic-gate * Note2: use of optcom_req means this routine is an exception to 27360Sstevel@tonic-gate * recovery from allocb() failures. 27370Sstevel@tonic-gate */ 27380Sstevel@tonic-gate 27390Sstevel@tonic-gate static void 27400Sstevel@tonic-gate tl_optmgmt(queue_t *wq, mblk_t *mp) 27410Sstevel@tonic-gate { 27420Sstevel@tonic-gate tl_endpt_t *tep; 27430Sstevel@tonic-gate mblk_t *ackmp; 27440Sstevel@tonic-gate union T_primitives *prim; 27450Sstevel@tonic-gate 27460Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 27470Sstevel@tonic-gate prim = (union T_primitives *)mp->b_rptr; 27480Sstevel@tonic-gate 27490Sstevel@tonic-gate /* all states OK for AF_UNIX options ? */ 27500Sstevel@tonic-gate if (!IS_SOCKET(tep) && tep->te_state != TS_IDLE && 27510Sstevel@tonic-gate prim->type == T_SVR4_OPTMGMT_REQ) { 27520Sstevel@tonic-gate /* 27530Sstevel@tonic-gate * Broken TLI semantics that options can only be managed 27540Sstevel@tonic-gate * in TS_IDLE state. Needed for Sparc ABI test suite that 27550Sstevel@tonic-gate * tests this TLI (mis)feature using this device driver. 27560Sstevel@tonic-gate */ 27570Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 27585240Snordmark SL_TRACE|SL_ERROR, 27595240Snordmark "tl_wput:T_SVR4_OPTMGMT_REQ:out of state, state=%d", 27605240Snordmark tep->te_state)); 27610Sstevel@tonic-gate /* 27620Sstevel@tonic-gate * preallocate memory for T_ERROR_ACK 27630Sstevel@tonic-gate */ 27640Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED); 27650Sstevel@tonic-gate if (! ackmp) { 27660Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 27670Sstevel@tonic-gate return; 27680Sstevel@tonic-gate } 27690Sstevel@tonic-gate 27700Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_SVR4_OPTMGMT_REQ); 27710Sstevel@tonic-gate freemsg(mp); 27720Sstevel@tonic-gate return; 27730Sstevel@tonic-gate } 27740Sstevel@tonic-gate 27750Sstevel@tonic-gate /* 27760Sstevel@tonic-gate * call common option management routine from drv/ip 27770Sstevel@tonic-gate */ 27780Sstevel@tonic-gate if (prim->type == T_SVR4_OPTMGMT_REQ) { 27795240Snordmark (void) svr4_optcom_req(wq, mp, tep->te_credp, &tl_opt_obj, 27805240Snordmark B_FALSE); 27810Sstevel@tonic-gate } else { 27820Sstevel@tonic-gate ASSERT(prim->type == T_OPTMGMT_REQ); 27835240Snordmark (void) tpi_optcom_req(wq, mp, tep->te_credp, &tl_opt_obj, 27845240Snordmark B_FALSE); 27850Sstevel@tonic-gate } 27860Sstevel@tonic-gate } 27870Sstevel@tonic-gate 27880Sstevel@tonic-gate /* 27890Sstevel@tonic-gate * Handle T_conn_req - the driver part of accept(). 27900Sstevel@tonic-gate * If TL_SET[U]CRED generate the credentials options. 27910Sstevel@tonic-gate * If this is a socket pass through options unmodified. 27920Sstevel@tonic-gate * For sockets generate the T_CONN_CON here instead of 27930Sstevel@tonic-gate * waiting for the T_CONN_RES. 27940Sstevel@tonic-gate */ 27950Sstevel@tonic-gate static void 27960Sstevel@tonic-gate tl_conn_req(queue_t *wq, mblk_t *mp) 27970Sstevel@tonic-gate { 27980Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 27990Sstevel@tonic-gate struct T_conn_req *creq = (struct T_conn_req *)mp->b_rptr; 28000Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 28010Sstevel@tonic-gate t_scalar_t alen, aoff, olen, ooff, err = 0; 28020Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL; 28030Sstevel@tonic-gate mblk_t *ackmp; 28040Sstevel@tonic-gate mblk_t *dimp; 28050Sstevel@tonic-gate struct T_discon_ind *di; 28060Sstevel@tonic-gate soux_addr_t ux_addr; 28070Sstevel@tonic-gate tl_addr_t dst; 28080Sstevel@tonic-gate 28090Sstevel@tonic-gate ASSERT(IS_COTS(tep)); 28100Sstevel@tonic-gate 28110Sstevel@tonic-gate if (tep->te_closing) { 28120Sstevel@tonic-gate freemsg(mp); 28130Sstevel@tonic-gate return; 28140Sstevel@tonic-gate } 28150Sstevel@tonic-gate 28160Sstevel@tonic-gate /* 28170Sstevel@tonic-gate * preallocate memory for: 28180Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK 28190Sstevel@tonic-gate * ==> known max T_ERROR_ACK 28200Sstevel@tonic-gate * 2. max of T_DISCON_IND and T_CONN_IND 28210Sstevel@tonic-gate */ 28220Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED); 28230Sstevel@tonic-gate if (! ackmp) { 28240Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 28250Sstevel@tonic-gate return; 28260Sstevel@tonic-gate } 28270Sstevel@tonic-gate /* 28280Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now 28290Sstevel@tonic-gate * will be committed for T_DISCON_IND/T_CONN_IND later 28300Sstevel@tonic-gate */ 28310Sstevel@tonic-gate 28320Sstevel@tonic-gate if (tep->te_state != TS_IDLE) { 28330Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 28345240Snordmark SL_TRACE|SL_ERROR, 28355240Snordmark "tl_wput:T_CONN_REQ:out of state, state=%d", 28365240Snordmark tep->te_state)); 28370Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ); 28380Sstevel@tonic-gate freemsg(mp); 28390Sstevel@tonic-gate return; 28400Sstevel@tonic-gate } 28410Sstevel@tonic-gate 28420Sstevel@tonic-gate /* 28430Sstevel@tonic-gate * validate the message 28440Sstevel@tonic-gate * Note: dereference fields in struct inside message only 28450Sstevel@tonic-gate * after validating the message length. 28460Sstevel@tonic-gate */ 28470Sstevel@tonic-gate if (msz < sizeof (struct T_conn_req)) { 28480Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 28495240Snordmark "tl_conn_req:invalid message length")); 28500Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ); 28510Sstevel@tonic-gate freemsg(mp); 28520Sstevel@tonic-gate return; 28530Sstevel@tonic-gate } 28540Sstevel@tonic-gate alen = creq->DEST_length; 28550Sstevel@tonic-gate aoff = creq->DEST_offset; 28560Sstevel@tonic-gate olen = creq->OPT_length; 28570Sstevel@tonic-gate ooff = creq->OPT_offset; 28580Sstevel@tonic-gate if (olen == 0) 28590Sstevel@tonic-gate ooff = 0; 28600Sstevel@tonic-gate 28610Sstevel@tonic-gate if (IS_SOCKET(tep)) { 28620Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) || 28630Sstevel@tonic-gate (aoff < 0) || 28640Sstevel@tonic-gate (aoff + alen > msz) || 28650Sstevel@tonic-gate (alen > msz - sizeof (struct T_conn_req))) { 28660Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 28670Sstevel@tonic-gate 1, SL_TRACE|SL_ERROR, 28680Sstevel@tonic-gate "tl_conn_req: invalid socket addr")); 28690Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ); 28700Sstevel@tonic-gate freemsg(mp); 28710Sstevel@tonic-gate return; 28720Sstevel@tonic-gate } 28730Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN); 28740Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) && 28750Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) { 28760Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 28775240Snordmark 1, SL_TRACE|SL_ERROR, 28785240Snordmark "tl_conn_req: invalid socket magic")); 28790Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ); 28800Sstevel@tonic-gate freemsg(mp); 28810Sstevel@tonic-gate return; 28820Sstevel@tonic-gate } 28830Sstevel@tonic-gate } else { 28840Sstevel@tonic-gate if ((alen > 0 && ((aoff + alen) > msz || aoff + alen < 0)) || 28850Sstevel@tonic-gate (olen > 0 && ((ssize_t)(ooff + olen) > msz || 28865240Snordmark ooff + olen < 0)) || 28870Sstevel@tonic-gate olen < 0 || ooff < 0) { 28880Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 28895240Snordmark SL_TRACE|SL_ERROR, 28905240Snordmark "tl_conn_req:invalid message")); 28910Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ); 28920Sstevel@tonic-gate freemsg(mp); 28930Sstevel@tonic-gate return; 28940Sstevel@tonic-gate } 28950Sstevel@tonic-gate 28960Sstevel@tonic-gate if (alen <= 0 || aoff < 0 || 28970Sstevel@tonic-gate (ssize_t)alen > msz - sizeof (struct T_conn_req)) { 28980Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 28990Sstevel@tonic-gate SL_TRACE|SL_ERROR, 29000Sstevel@tonic-gate "tl_conn_req:bad addr in message, " 29010Sstevel@tonic-gate "alen=%d, msz=%ld", 29020Sstevel@tonic-gate alen, msz)); 29030Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADADDR, 0, T_CONN_REQ); 29040Sstevel@tonic-gate freemsg(mp); 29050Sstevel@tonic-gate return; 29060Sstevel@tonic-gate } 29070Sstevel@tonic-gate #ifdef DEBUG 29080Sstevel@tonic-gate /* 29090Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps. 29100Sstevel@tonic-gate * if (! assertion) 29110Sstevel@tonic-gate * log warning; 29120Sstevel@tonic-gate */ 29130Sstevel@tonic-gate if (! (aoff >= (t_scalar_t)sizeof (struct T_conn_req))) { 29140Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 29155240Snordmark SL_TRACE|SL_ERROR, 29165240Snordmark "tl_conn_req: addr overlaps TPI message")); 29170Sstevel@tonic-gate } 29180Sstevel@tonic-gate #endif 29190Sstevel@tonic-gate if (olen) { 29200Sstevel@tonic-gate /* 29210Sstevel@tonic-gate * no opts in connect req 29220Sstevel@tonic-gate * supported in this provider except for sockets. 29230Sstevel@tonic-gate */ 29240Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 29255240Snordmark SL_TRACE|SL_ERROR, 29265240Snordmark "tl_conn_req:options not supported " 29275240Snordmark "in message")); 29280Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADOPT, 0, T_CONN_REQ); 29290Sstevel@tonic-gate freemsg(mp); 29300Sstevel@tonic-gate return; 29310Sstevel@tonic-gate } 29320Sstevel@tonic-gate } 29330Sstevel@tonic-gate 29340Sstevel@tonic-gate /* 29350Sstevel@tonic-gate * Prevent tep from closing on us. 29360Sstevel@tonic-gate */ 29370Sstevel@tonic-gate if (! tl_noclose(tep)) { 29380Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 29395240Snordmark "tl_conn_req:endpoint is closing")); 29400Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ); 29410Sstevel@tonic-gate freemsg(mp); 29420Sstevel@tonic-gate return; 29430Sstevel@tonic-gate } 29440Sstevel@tonic-gate 29450Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_REQ, tep->te_state); 29460Sstevel@tonic-gate /* 29470Sstevel@tonic-gate * get endpoint to connect to 29480Sstevel@tonic-gate * check that peer with DEST addr is bound to addr 29490Sstevel@tonic-gate * and has CONIND_number > 0 29500Sstevel@tonic-gate */ 29510Sstevel@tonic-gate dst.ta_alen = alen; 29520Sstevel@tonic-gate dst.ta_abuf = mp->b_rptr + aoff; 29530Sstevel@tonic-gate dst.ta_zoneid = tep->te_zoneid; 29540Sstevel@tonic-gate 29550Sstevel@tonic-gate /* 29560Sstevel@tonic-gate * Verify if remote addr is in use 29570Sstevel@tonic-gate */ 29580Sstevel@tonic-gate peer_tep = (IS_SOCKET(tep) ? 29590Sstevel@tonic-gate tl_sock_find_peer(tep, &ux_addr) : 29600Sstevel@tonic-gate tl_find_peer(tep, &dst)); 29610Sstevel@tonic-gate 29620Sstevel@tonic-gate if (peer_tep == NULL) { 29630Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 29645240Snordmark "tl_conn_req:no one at connect address")); 29650Sstevel@tonic-gate err = ECONNREFUSED; 29660Sstevel@tonic-gate } else if (peer_tep->te_nicon >= peer_tep->te_qlen) { 29670Sstevel@tonic-gate /* 29680Sstevel@tonic-gate * validate that number of incoming connection is 29690Sstevel@tonic-gate * not to capacity on destination endpoint 29700Sstevel@tonic-gate */ 29710Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE, 29725240Snordmark "tl_conn_req: qlen overflow connection refused")); 29730Sstevel@tonic-gate err = ECONNREFUSED; 29740Sstevel@tonic-gate } 29750Sstevel@tonic-gate 29760Sstevel@tonic-gate /* 29773661Sakolb * Send T_DISCON_IND in case of error 29780Sstevel@tonic-gate */ 29790Sstevel@tonic-gate if (err != 0) { 29800Sstevel@tonic-gate if (peer_tep != NULL) 29810Sstevel@tonic-gate tl_refrele(peer_tep); 29820Sstevel@tonic-gate /* We are still expected to send T_OK_ACK */ 29830Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state); 29840Sstevel@tonic-gate tl_ok_ack(tep->te_wq, ackmp, T_CONN_REQ); 29850Sstevel@tonic-gate tl_closeok(tep); 29860Sstevel@tonic-gate dimp = tpi_ack_alloc(mp, sizeof (struct T_discon_ind), 29870Sstevel@tonic-gate M_PROTO, T_DISCON_IND); 29880Sstevel@tonic-gate if (dimp == NULL) { 29890Sstevel@tonic-gate tl_merror(wq, NULL, ENOSR); 29900Sstevel@tonic-gate return; 29910Sstevel@tonic-gate } 29920Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 29930Sstevel@tonic-gate di->DISCON_reason = err; 29940Sstevel@tonic-gate di->SEQ_number = BADSEQNUM; 29950Sstevel@tonic-gate 29960Sstevel@tonic-gate tep->te_state = TS_IDLE; 29970Sstevel@tonic-gate /* 29980Sstevel@tonic-gate * send T_DISCON_IND message 29990Sstevel@tonic-gate */ 30000Sstevel@tonic-gate putnext(tep->te_rq, dimp); 30010Sstevel@tonic-gate return; 30020Sstevel@tonic-gate } 30030Sstevel@tonic-gate 30040Sstevel@tonic-gate ASSERT(IS_COTS(peer_tep)); 30050Sstevel@tonic-gate 30060Sstevel@tonic-gate /* 30070Sstevel@tonic-gate * Found the listener. At this point processing will continue on 30080Sstevel@tonic-gate * listener serializer. Close of the endpoint should be blocked while we 30090Sstevel@tonic-gate * switch serializers. 30100Sstevel@tonic-gate */ 30110Sstevel@tonic-gate tl_serializer_refhold(peer_tep->te_ser); 30120Sstevel@tonic-gate tl_serializer_refrele(tep->te_ser); 30130Sstevel@tonic-gate tep->te_ser = peer_tep->te_ser; 30140Sstevel@tonic-gate ASSERT(tep->te_oconp == NULL); 30150Sstevel@tonic-gate tep->te_oconp = peer_tep; 30160Sstevel@tonic-gate 30170Sstevel@tonic-gate /* 30180Sstevel@tonic-gate * It is safe to close now. Close may continue on listener serializer. 30190Sstevel@tonic-gate */ 30200Sstevel@tonic-gate tl_closeok(tep); 30210Sstevel@tonic-gate 30220Sstevel@tonic-gate /* 30230Sstevel@tonic-gate * Pass ackmp to tl_conn_req_ser. Note that mp->b_cont may contain user 30240Sstevel@tonic-gate * data, so we link mp to ackmp. 30250Sstevel@tonic-gate */ 30260Sstevel@tonic-gate ackmp->b_cont = mp; 30270Sstevel@tonic-gate mp = ackmp; 30280Sstevel@tonic-gate 30290Sstevel@tonic-gate tl_refhold(tep); 30300Sstevel@tonic-gate tl_serializer_enter(tep, tl_conn_req_ser, mp); 30310Sstevel@tonic-gate } 30320Sstevel@tonic-gate 30330Sstevel@tonic-gate /* 30340Sstevel@tonic-gate * Finish T_CONN_REQ processing on listener serializer. 30350Sstevel@tonic-gate */ 30360Sstevel@tonic-gate static void 30370Sstevel@tonic-gate tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep) 30380Sstevel@tonic-gate { 30390Sstevel@tonic-gate queue_t *wq; 30400Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_oconp; 30410Sstevel@tonic-gate mblk_t *confmp, *cimp, *indmp; 30420Sstevel@tonic-gate void *opts = NULL; 30430Sstevel@tonic-gate mblk_t *ackmp = mp; 30440Sstevel@tonic-gate struct T_conn_req *creq = (struct T_conn_req *)mp->b_cont->b_rptr; 30450Sstevel@tonic-gate struct T_conn_ind *ci; 30460Sstevel@tonic-gate tl_icon_t *tip; 30470Sstevel@tonic-gate void *addr_startp; 30480Sstevel@tonic-gate t_scalar_t olen = creq->OPT_length; 30490Sstevel@tonic-gate t_scalar_t ooff = creq->OPT_offset; 30500Sstevel@tonic-gate size_t ci_msz; 30510Sstevel@tonic-gate size_t size; 30520Sstevel@tonic-gate 30530Sstevel@tonic-gate if (tep->te_closing) { 30540Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 30550Sstevel@tonic-gate tl_serializer_exit(tep); 30560Sstevel@tonic-gate tl_refrele(tep); 30570Sstevel@tonic-gate freemsg(mp); 30580Sstevel@tonic-gate return; 30590Sstevel@tonic-gate } 30600Sstevel@tonic-gate 30610Sstevel@tonic-gate wq = tep->te_wq; 30620Sstevel@tonic-gate tep->te_flag |= TL_EAGER; 30630Sstevel@tonic-gate 30640Sstevel@tonic-gate /* 30650Sstevel@tonic-gate * Extract preallocated ackmp from mp. 30660Sstevel@tonic-gate */ 30670Sstevel@tonic-gate mp = mp->b_cont; 30680Sstevel@tonic-gate ackmp->b_cont = NULL; 30690Sstevel@tonic-gate 30700Sstevel@tonic-gate if (olen == 0) 30710Sstevel@tonic-gate ooff = 0; 30720Sstevel@tonic-gate 30730Sstevel@tonic-gate if (peer_tep->te_closing || 30740Sstevel@tonic-gate !((peer_tep->te_state == TS_IDLE) || 30755240Snordmark (peer_tep->te_state == TS_WRES_CIND))) { 30763661Sakolb (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR, 30775240Snordmark "tl_conn_req:peer in bad state (%d)", 30785240Snordmark peer_tep->te_state)); 30790Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 30800Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, ECONNREFUSED, T_CONN_REQ); 30810Sstevel@tonic-gate freemsg(ackmp); 30820Sstevel@tonic-gate tl_serializer_exit(tep); 30830Sstevel@tonic-gate tl_refrele(tep); 30840Sstevel@tonic-gate return; 30850Sstevel@tonic-gate } 30860Sstevel@tonic-gate 30870Sstevel@tonic-gate /* 30880Sstevel@tonic-gate * preallocate now for T_DISCON_IND or T_CONN_IND 30890Sstevel@tonic-gate */ 30900Sstevel@tonic-gate /* 30910Sstevel@tonic-gate * calculate length of T_CONN_IND message 30920Sstevel@tonic-gate */ 30930Sstevel@tonic-gate if (peer_tep->te_flag & TL_SETCRED) { 30940Sstevel@tonic-gate ooff = 0; 30950Sstevel@tonic-gate olen = (t_scalar_t) sizeof (struct opthdr) + 30960Sstevel@tonic-gate OPTLEN(sizeof (tl_credopt_t)); 30970Sstevel@tonic-gate /* 1 option only */ 30980Sstevel@tonic-gate } else if (peer_tep->te_flag & TL_SETUCRED) { 30990Sstevel@tonic-gate ooff = 0; 31000Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) + 31010Sstevel@tonic-gate OPTLEN(ucredsize); 31020Sstevel@tonic-gate /* 1 option only */ 31030Sstevel@tonic-gate } 31040Sstevel@tonic-gate ci_msz = sizeof (struct T_conn_ind) + tep->te_alen; 31050Sstevel@tonic-gate ci_msz = T_ALIGN(ci_msz) + olen; 31060Sstevel@tonic-gate size = max(ci_msz, sizeof (struct T_discon_ind)); 31070Sstevel@tonic-gate 31080Sstevel@tonic-gate /* 31090Sstevel@tonic-gate * Save options from mp - we'll need them for T_CONN_IND. 31100Sstevel@tonic-gate */ 31110Sstevel@tonic-gate if (ooff != 0) { 31120Sstevel@tonic-gate opts = kmem_alloc(olen, KM_NOSLEEP); 31130Sstevel@tonic-gate if (opts == NULL) { 31140Sstevel@tonic-gate /* 31150Sstevel@tonic-gate * roll back state changes 31160Sstevel@tonic-gate */ 31170Sstevel@tonic-gate tep->te_state = TS_IDLE; 31180Sstevel@tonic-gate tl_memrecover(wq, mp, size); 31190Sstevel@tonic-gate freemsg(ackmp); 31200Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 31210Sstevel@tonic-gate tl_serializer_exit(tep); 31220Sstevel@tonic-gate tl_refrele(tep); 31230Sstevel@tonic-gate return; 31240Sstevel@tonic-gate } 31250Sstevel@tonic-gate /* Copy options to a temp buffer */ 31260Sstevel@tonic-gate bcopy(mp->b_rptr + ooff, opts, olen); 31270Sstevel@tonic-gate } 31280Sstevel@tonic-gate 31290Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) { 31300Sstevel@tonic-gate /* 31310Sstevel@tonic-gate * Generate a T_CONN_CON that has the identical address 31320Sstevel@tonic-gate * (and options) as the T_CONN_REQ. 31330Sstevel@tonic-gate * NOTE: assumes that the T_conn_req and T_conn_con structures 31340Sstevel@tonic-gate * are isomorphic. 31350Sstevel@tonic-gate */ 31360Sstevel@tonic-gate confmp = copyb(mp); 31370Sstevel@tonic-gate if (! confmp) { 31380Sstevel@tonic-gate /* 31390Sstevel@tonic-gate * roll back state changes 31400Sstevel@tonic-gate */ 31410Sstevel@tonic-gate tep->te_state = TS_IDLE; 31420Sstevel@tonic-gate tl_memrecover(wq, mp, mp->b_wptr - mp->b_rptr); 31430Sstevel@tonic-gate freemsg(ackmp); 31440Sstevel@tonic-gate if (opts != NULL) 31450Sstevel@tonic-gate kmem_free(opts, olen); 31460Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 31470Sstevel@tonic-gate tl_serializer_exit(tep); 31480Sstevel@tonic-gate tl_refrele(tep); 31490Sstevel@tonic-gate return; 31500Sstevel@tonic-gate } 31510Sstevel@tonic-gate ((struct T_conn_con *)(confmp->b_rptr))->PRIM_type = 31525240Snordmark T_CONN_CON; 31530Sstevel@tonic-gate } else { 31540Sstevel@tonic-gate confmp = NULL; 31550Sstevel@tonic-gate } 31560Sstevel@tonic-gate if ((indmp = reallocb(mp, size, 0)) == NULL) { 31570Sstevel@tonic-gate /* 31580Sstevel@tonic-gate * roll back state changes 31590Sstevel@tonic-gate */ 31600Sstevel@tonic-gate tep->te_state = TS_IDLE; 31610Sstevel@tonic-gate tl_memrecover(wq, mp, size); 31620Sstevel@tonic-gate freemsg(ackmp); 31630Sstevel@tonic-gate if (opts != NULL) 31640Sstevel@tonic-gate kmem_free(opts, olen); 31650Sstevel@tonic-gate freemsg(confmp); 31660Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 31670Sstevel@tonic-gate tl_serializer_exit(tep); 31680Sstevel@tonic-gate tl_refrele(tep); 31690Sstevel@tonic-gate return; 31700Sstevel@tonic-gate } 31710Sstevel@tonic-gate 31720Sstevel@tonic-gate tip = kmem_zalloc(sizeof (*tip), KM_NOSLEEP); 31730Sstevel@tonic-gate if (tip == NULL) { 31740Sstevel@tonic-gate /* 31750Sstevel@tonic-gate * roll back state changes 31760Sstevel@tonic-gate */ 31770Sstevel@tonic-gate tep->te_state = TS_IDLE; 31780Sstevel@tonic-gate tl_memrecover(wq, indmp, sizeof (*tip)); 31790Sstevel@tonic-gate freemsg(ackmp); 31800Sstevel@tonic-gate if (opts != NULL) 31810Sstevel@tonic-gate kmem_free(opts, olen); 31820Sstevel@tonic-gate freemsg(confmp); 31830Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 31840Sstevel@tonic-gate tl_serializer_exit(tep); 31850Sstevel@tonic-gate tl_refrele(tep); 31860Sstevel@tonic-gate return; 31870Sstevel@tonic-gate } 31880Sstevel@tonic-gate tip->ti_mp = NULL; 31890Sstevel@tonic-gate 31900Sstevel@tonic-gate /* 31910Sstevel@tonic-gate * memory is now committed for T_DISCON_IND/T_CONN_IND/T_CONN_CON 31920Sstevel@tonic-gate * and tl_icon_t cell. 31930Sstevel@tonic-gate */ 31940Sstevel@tonic-gate 31950Sstevel@tonic-gate /* 31960Sstevel@tonic-gate * ack validity of request and send the peer credential in the ACK. 31970Sstevel@tonic-gate */ 31980Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state); 31990Sstevel@tonic-gate 32000Sstevel@tonic-gate if (peer_tep != NULL && peer_tep->te_credp != NULL && 32010Sstevel@tonic-gate confmp != NULL) { 32020Sstevel@tonic-gate mblk_setcred(confmp, peer_tep->te_credp); 32030Sstevel@tonic-gate DB_CPID(confmp) = peer_tep->te_cpid; 32040Sstevel@tonic-gate } 32050Sstevel@tonic-gate 32060Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_CONN_REQ); 32070Sstevel@tonic-gate 32080Sstevel@tonic-gate /* 32090Sstevel@tonic-gate * prepare message to send T_CONN_IND 32100Sstevel@tonic-gate */ 32110Sstevel@tonic-gate /* 32120Sstevel@tonic-gate * allocate the message - original data blocks retained 32130Sstevel@tonic-gate * in the returned mblk 32140Sstevel@tonic-gate */ 32150Sstevel@tonic-gate cimp = tl_resizemp(indmp, size); 32160Sstevel@tonic-gate if (! cimp) { 32170Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 32185240Snordmark "tl_conn_req:con_ind:allocb failure")); 32190Sstevel@tonic-gate tl_merror(wq, indmp, ENOMEM); 32200Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 32210Sstevel@tonic-gate tl_serializer_exit(tep); 32220Sstevel@tonic-gate tl_refrele(tep); 32230Sstevel@tonic-gate if (opts != NULL) 32240Sstevel@tonic-gate kmem_free(opts, olen); 32250Sstevel@tonic-gate freemsg(confmp); 32260Sstevel@tonic-gate ASSERT(tip->ti_mp == NULL); 32270Sstevel@tonic-gate kmem_free(tip, sizeof (*tip)); 32280Sstevel@tonic-gate return; 32290Sstevel@tonic-gate } 32300Sstevel@tonic-gate 32310Sstevel@tonic-gate DB_TYPE(cimp) = M_PROTO; 32320Sstevel@tonic-gate ci = (struct T_conn_ind *)cimp->b_rptr; 32330Sstevel@tonic-gate ci->PRIM_type = T_CONN_IND; 32340Sstevel@tonic-gate ci->SRC_offset = (t_scalar_t)sizeof (struct T_conn_ind); 32350Sstevel@tonic-gate ci->SRC_length = tep->te_alen; 32360Sstevel@tonic-gate ci->SEQ_number = tep->te_seqno; 32370Sstevel@tonic-gate 32380Sstevel@tonic-gate addr_startp = cimp->b_rptr + ci->SRC_offset; 32390Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen); 32400Sstevel@tonic-gate if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED)) { 32410Sstevel@tonic-gate ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset + 32425240Snordmark ci->SRC_length); 32430Sstevel@tonic-gate ci->OPT_length = olen; /* because only 1 option */ 32440Sstevel@tonic-gate tl_fill_option(cimp->b_rptr + ci->OPT_offset, 32455240Snordmark DB_CREDDEF(cimp, tep->te_credp), 32465240Snordmark TLPID(cimp, tep), 32475240Snordmark peer_tep->te_flag, peer_tep->te_credp); 32480Sstevel@tonic-gate } else if (ooff != 0) { 32490Sstevel@tonic-gate /* Copy option from T_CONN_REQ */ 32500Sstevel@tonic-gate ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset + 32515240Snordmark ci->SRC_length); 32520Sstevel@tonic-gate ci->OPT_length = olen; 32530Sstevel@tonic-gate ASSERT(opts != NULL); 32540Sstevel@tonic-gate bcopy(opts, (void *)((uintptr_t)ci + ci->OPT_offset), olen); 32550Sstevel@tonic-gate } else { 32560Sstevel@tonic-gate ci->OPT_offset = 0; 32570Sstevel@tonic-gate ci->OPT_length = 0; 32580Sstevel@tonic-gate } 32590Sstevel@tonic-gate if (opts != NULL) 32600Sstevel@tonic-gate kmem_free(opts, olen); 32610Sstevel@tonic-gate 32620Sstevel@tonic-gate /* 32630Sstevel@tonic-gate * register connection request with server peer 32640Sstevel@tonic-gate * append to list of incoming connections 32650Sstevel@tonic-gate * increment references for both peer_tep and tep: peer_tep is placed on 32660Sstevel@tonic-gate * te_oconp and tep is placed on listeners queue. 32670Sstevel@tonic-gate */ 32680Sstevel@tonic-gate tip->ti_tep = tep; 32690Sstevel@tonic-gate tip->ti_seqno = tep->te_seqno; 32700Sstevel@tonic-gate list_insert_tail(&peer_tep->te_iconp, tip); 32710Sstevel@tonic-gate peer_tep->te_nicon++; 32720Sstevel@tonic-gate 32730Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_CONN_IND, peer_tep->te_state); 32740Sstevel@tonic-gate /* 32750Sstevel@tonic-gate * send the T_CONN_IND message 32760Sstevel@tonic-gate */ 32770Sstevel@tonic-gate putnext(peer_tep->te_rq, cimp); 32780Sstevel@tonic-gate 32790Sstevel@tonic-gate /* 32800Sstevel@tonic-gate * Send a T_CONN_CON message for sockets. 32810Sstevel@tonic-gate * Disable the queues until we have reached the correct state! 32820Sstevel@tonic-gate */ 32830Sstevel@tonic-gate if (confmp != NULL) { 32840Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_CON, tep->te_state); 32850Sstevel@tonic-gate noenable(wq); 32860Sstevel@tonic-gate putnext(tep->te_rq, confmp); 32870Sstevel@tonic-gate } 32880Sstevel@tonic-gate /* 32890Sstevel@tonic-gate * Now we need to increment tep reference because tep is referenced by 32900Sstevel@tonic-gate * server list of pending connections. We also need to decrement 32910Sstevel@tonic-gate * reference before exiting serializer. Two operations void each other 32920Sstevel@tonic-gate * so we don't modify reference at all. 32930Sstevel@tonic-gate */ 32940Sstevel@tonic-gate ASSERT(tep->te_refcnt >= 2); 32950Sstevel@tonic-gate ASSERT(peer_tep->te_refcnt >= 2); 32960Sstevel@tonic-gate tl_serializer_exit(tep); 32970Sstevel@tonic-gate } 32980Sstevel@tonic-gate 32990Sstevel@tonic-gate 33000Sstevel@tonic-gate 33010Sstevel@tonic-gate /* 33020Sstevel@tonic-gate * Handle T_conn_res on listener stream. Called on listener serializer. 33030Sstevel@tonic-gate * tl_conn_req has already generated the T_CONN_CON. 33040Sstevel@tonic-gate * tl_conn_res is called on listener serializer. 33050Sstevel@tonic-gate * No one accesses acceptor at this point, so it is safe to modify acceptor. 33060Sstevel@tonic-gate * Switch eager serializer to acceptor's. 33070Sstevel@tonic-gate * 33080Sstevel@tonic-gate * If TL_SET[U]CRED generate the credentials options. 33090Sstevel@tonic-gate * For sockets tl_conn_req has already generated the T_CONN_CON. 33100Sstevel@tonic-gate */ 33110Sstevel@tonic-gate static void 33120Sstevel@tonic-gate tl_conn_res(mblk_t *mp, tl_endpt_t *tep) 33130Sstevel@tonic-gate { 33140Sstevel@tonic-gate queue_t *wq; 33150Sstevel@tonic-gate struct T_conn_res *cres = (struct T_conn_res *)mp->b_rptr; 33160Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 33170Sstevel@tonic-gate t_scalar_t olen, ooff, err = 0; 33180Sstevel@tonic-gate t_scalar_t prim = cres->PRIM_type; 33190Sstevel@tonic-gate uchar_t *addr_startp; 33200Sstevel@tonic-gate tl_endpt_t *acc_ep = NULL, *cl_ep = NULL; 33210Sstevel@tonic-gate tl_icon_t *tip; 33220Sstevel@tonic-gate size_t size; 33230Sstevel@tonic-gate mblk_t *ackmp, *respmp; 33240Sstevel@tonic-gate mblk_t *dimp, *ccmp = NULL; 33250Sstevel@tonic-gate struct T_discon_ind *di; 33260Sstevel@tonic-gate struct T_conn_con *cc; 33270Sstevel@tonic-gate boolean_t client_noclose_set = B_FALSE; 33280Sstevel@tonic-gate boolean_t switch_client_serializer = B_TRUE; 33290Sstevel@tonic-gate 33300Sstevel@tonic-gate ASSERT(IS_COTS(tep)); 33310Sstevel@tonic-gate 33320Sstevel@tonic-gate if (tep->te_closing) { 33330Sstevel@tonic-gate freemsg(mp); 33340Sstevel@tonic-gate return; 33350Sstevel@tonic-gate } 33360Sstevel@tonic-gate 33370Sstevel@tonic-gate wq = tep->te_wq; 33380Sstevel@tonic-gate 33390Sstevel@tonic-gate /* 33400Sstevel@tonic-gate * preallocate memory for: 33410Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK 33420Sstevel@tonic-gate * ==> known max T_ERROR_ACK 33430Sstevel@tonic-gate * 2. max of T_DISCON_IND and T_CONN_CON 33440Sstevel@tonic-gate */ 33450Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED); 33460Sstevel@tonic-gate if (! ackmp) { 33470Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 33480Sstevel@tonic-gate return; 33490Sstevel@tonic-gate } 33500Sstevel@tonic-gate /* 33510Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now 33520Sstevel@tonic-gate * will be committed for T_DISCON_IND/T_CONN_CON later 33530Sstevel@tonic-gate */ 33540Sstevel@tonic-gate 33550Sstevel@tonic-gate 33560Sstevel@tonic-gate ASSERT(prim == T_CONN_RES || prim == O_T_CONN_RES); 33570Sstevel@tonic-gate 33580Sstevel@tonic-gate /* 33590Sstevel@tonic-gate * validate state 33600Sstevel@tonic-gate */ 33610Sstevel@tonic-gate if (tep->te_state != TS_WRES_CIND) { 33620Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 33635240Snordmark SL_TRACE|SL_ERROR, 33645240Snordmark "tl_wput:T_CONN_RES:out of state, state=%d", 33655240Snordmark tep->te_state)); 33660Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim); 33670Sstevel@tonic-gate freemsg(mp); 33680Sstevel@tonic-gate return; 33690Sstevel@tonic-gate } 33700Sstevel@tonic-gate 33710Sstevel@tonic-gate /* 33720Sstevel@tonic-gate * validate the message 33730Sstevel@tonic-gate * Note: dereference fields in struct inside message only 33740Sstevel@tonic-gate * after validating the message length. 33750Sstevel@tonic-gate */ 33760Sstevel@tonic-gate if (msz < sizeof (struct T_conn_res)) { 33770Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 33785240Snordmark "tl_conn_res:invalid message length")); 33790Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim); 33800Sstevel@tonic-gate freemsg(mp); 33810Sstevel@tonic-gate return; 33820Sstevel@tonic-gate } 33830Sstevel@tonic-gate olen = cres->OPT_length; 33840Sstevel@tonic-gate ooff = cres->OPT_offset; 33850Sstevel@tonic-gate if (((olen > 0) && ((ooff + olen) > msz))) { 33860Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 33875240Snordmark "tl_conn_res:invalid message")); 33880Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim); 33890Sstevel@tonic-gate freemsg(mp); 33900Sstevel@tonic-gate return; 33910Sstevel@tonic-gate } 33920Sstevel@tonic-gate if (olen) { 33930Sstevel@tonic-gate /* 33940Sstevel@tonic-gate * no opts in connect res 33950Sstevel@tonic-gate * supported in this provider 33960Sstevel@tonic-gate */ 33970Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 33985240Snordmark "tl_conn_res:options not supported in message")); 33990Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADOPT, 0, prim); 34000Sstevel@tonic-gate freemsg(mp); 34010Sstevel@tonic-gate return; 34020Sstevel@tonic-gate } 34030Sstevel@tonic-gate 34040Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_RES, tep->te_state); 34050Sstevel@tonic-gate ASSERT(tep->te_state == TS_WACK_CRES); 34060Sstevel@tonic-gate 34070Sstevel@tonic-gate if (cres->SEQ_number < TL_MINOR_START && 34085240Snordmark cres->SEQ_number >= BADSEQNUM) { 34090Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34105240Snordmark "tl_conn_res:remote endpoint sequence number bad")); 34110Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34120Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, prim); 34130Sstevel@tonic-gate freemsg(mp); 34140Sstevel@tonic-gate return; 34150Sstevel@tonic-gate } 34160Sstevel@tonic-gate 34170Sstevel@tonic-gate /* 34180Sstevel@tonic-gate * find accepting endpoint. Will have extra reference if found. 34190Sstevel@tonic-gate */ 34200Sstevel@tonic-gate if (mod_hash_find_cb(tep->te_transport->tr_ai_hash, 34215240Snordmark (mod_hash_key_t)(uintptr_t)cres->ACCEPTOR_id, 34225240Snordmark (mod_hash_val_t *)&acc_ep, tl_find_callback) != 0) { 34230Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34245240Snordmark "tl_conn_res:bad accepting endpoint")); 34250Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34260Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim); 34270Sstevel@tonic-gate freemsg(mp); 34280Sstevel@tonic-gate return; 34290Sstevel@tonic-gate } 34300Sstevel@tonic-gate 34310Sstevel@tonic-gate /* 34320Sstevel@tonic-gate * Prevent acceptor from closing. 34330Sstevel@tonic-gate */ 34340Sstevel@tonic-gate if (! tl_noclose(acc_ep)) { 34350Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34365240Snordmark "tl_conn_res:bad accepting endpoint")); 34370Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34380Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim); 34390Sstevel@tonic-gate tl_refrele(acc_ep); 34400Sstevel@tonic-gate freemsg(mp); 34410Sstevel@tonic-gate return; 34420Sstevel@tonic-gate } 34430Sstevel@tonic-gate 34440Sstevel@tonic-gate acc_ep->te_flag |= TL_ACCEPTOR; 34450Sstevel@tonic-gate 34460Sstevel@tonic-gate /* 34470Sstevel@tonic-gate * validate that accepting endpoint, if different from listening 34480Sstevel@tonic-gate * has address bound => state is TS_IDLE 34490Sstevel@tonic-gate * TROUBLE in XPG4 !!? 34500Sstevel@tonic-gate */ 34510Sstevel@tonic-gate if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) { 34520Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34535240Snordmark "tl_conn_res:accepting endpoint has no address bound," 34545240Snordmark "state=%d", acc_ep->te_state)); 34550Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34560Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim); 34570Sstevel@tonic-gate freemsg(mp); 34580Sstevel@tonic-gate tl_closeok(acc_ep); 34590Sstevel@tonic-gate tl_refrele(acc_ep); 34600Sstevel@tonic-gate return; 34610Sstevel@tonic-gate } 34620Sstevel@tonic-gate 34630Sstevel@tonic-gate /* 34640Sstevel@tonic-gate * validate if accepting endpt same as listening, then 34650Sstevel@tonic-gate * no other incoming connection should be on the queue 34660Sstevel@tonic-gate */ 34670Sstevel@tonic-gate 34680Sstevel@tonic-gate if ((tep == acc_ep) && (tep->te_nicon > 1)) { 34690Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 34705240Snordmark "tl_conn_res: > 1 conn_ind on listener-acceptor")); 34710Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34720Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim); 34730Sstevel@tonic-gate freemsg(mp); 34740Sstevel@tonic-gate tl_closeok(acc_ep); 34750Sstevel@tonic-gate tl_refrele(acc_ep); 34760Sstevel@tonic-gate return; 34770Sstevel@tonic-gate } 34780Sstevel@tonic-gate 34790Sstevel@tonic-gate /* 34800Sstevel@tonic-gate * Mark for deletion, the entry corresponding to client 34810Sstevel@tonic-gate * on list of pending connections made by the listener 34820Sstevel@tonic-gate * search list to see if client is one of the 34830Sstevel@tonic-gate * recorded as a listener. 34840Sstevel@tonic-gate */ 34850Sstevel@tonic-gate tip = tl_icon_find(tep, cres->SEQ_number); 34860Sstevel@tonic-gate if (tip == NULL) { 34870Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34885240Snordmark "tl_conn_res:no client in listener list")); 34890Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34900Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, prim); 34910Sstevel@tonic-gate freemsg(mp); 34920Sstevel@tonic-gate tl_closeok(acc_ep); 34930Sstevel@tonic-gate tl_refrele(acc_ep); 34940Sstevel@tonic-gate return; 34950Sstevel@tonic-gate } 34960Sstevel@tonic-gate 34970Sstevel@tonic-gate /* 34980Sstevel@tonic-gate * If ti_tep is NULL the client has already closed. In this case 34990Sstevel@tonic-gate * the code below will avoid any action on the client side 35000Sstevel@tonic-gate * but complete the server and acceptor state transitions. 35010Sstevel@tonic-gate */ 35020Sstevel@tonic-gate ASSERT(tip->ti_tep == NULL || 35035240Snordmark tip->ti_tep->te_seqno == cres->SEQ_number); 35040Sstevel@tonic-gate cl_ep = tip->ti_tep; 35050Sstevel@tonic-gate 35060Sstevel@tonic-gate /* 35070Sstevel@tonic-gate * If the client is present it is switched from listener's to acceptor's 35080Sstevel@tonic-gate * serializer. We should block client closes while serializers are 35090Sstevel@tonic-gate * being switched. 35100Sstevel@tonic-gate * 35110Sstevel@tonic-gate * It is possible that the client is present but is currently being 35120Sstevel@tonic-gate * closed. There are two possible cases: 35130Sstevel@tonic-gate * 35140Sstevel@tonic-gate * 1) The client has already entered tl_close_finish_ser() and sent 35150Sstevel@tonic-gate * T_ORDREL_IND. In this case we can just ignore the client (but we 35160Sstevel@tonic-gate * still need to send all messages from tip->ti_mp to the acceptor). 35170Sstevel@tonic-gate * 35180Sstevel@tonic-gate * 2) The client started the close but has not entered 35190Sstevel@tonic-gate * tl_close_finish_ser() yet. In this case, the client is already 35200Sstevel@tonic-gate * proceeding asynchronously on the listener's serializer, so we're 35210Sstevel@tonic-gate * forced to change the acceptor to use the listener's serializer to 35220Sstevel@tonic-gate * ensure that any operations on the acceptor are serialized with 35230Sstevel@tonic-gate * respect to the close that's in-progress. 35240Sstevel@tonic-gate */ 35250Sstevel@tonic-gate if (cl_ep != NULL) { 35260Sstevel@tonic-gate if (tl_noclose(cl_ep)) { 35270Sstevel@tonic-gate client_noclose_set = B_TRUE; 35280Sstevel@tonic-gate } else { 35290Sstevel@tonic-gate /* 35300Sstevel@tonic-gate * Client is closing. If it it has sent the 35310Sstevel@tonic-gate * T_ORDREL_IND, we can simply ignore it - otherwise, 35320Sstevel@tonic-gate * we have to let let the client continue until it is 35330Sstevel@tonic-gate * sent. 35340Sstevel@tonic-gate * 35350Sstevel@tonic-gate * If we do continue using the client, acceptor will 35360Sstevel@tonic-gate * switch to client's serializer which is used by client 35370Sstevel@tonic-gate * for its close. 35380Sstevel@tonic-gate */ 35390Sstevel@tonic-gate tl_client_closing_when_accepting++; 35400Sstevel@tonic-gate switch_client_serializer = B_FALSE; 35410Sstevel@tonic-gate if (!IS_SOCKET(cl_ep) || tl_disable_early_connect || 35420Sstevel@tonic-gate cl_ep->te_state == -1) 35430Sstevel@tonic-gate cl_ep = NULL; 35440Sstevel@tonic-gate } 35450Sstevel@tonic-gate } 35460Sstevel@tonic-gate 35470Sstevel@tonic-gate if (cl_ep != NULL) { 35480Sstevel@tonic-gate /* 35490Sstevel@tonic-gate * validate client state to be TS_WCON_CREQ or TS_DATA_XFER 35500Sstevel@tonic-gate * (latter for sockets only) 35510Sstevel@tonic-gate */ 35520Sstevel@tonic-gate if (cl_ep->te_state != TS_WCON_CREQ && 35530Sstevel@tonic-gate (cl_ep->te_state != TS_DATA_XFER && 35540Sstevel@tonic-gate IS_SOCKET(cl_ep))) { 35550Sstevel@tonic-gate err = ECONNREFUSED; 35560Sstevel@tonic-gate /* 35570Sstevel@tonic-gate * T_DISCON_IND sent later after committing memory 35580Sstevel@tonic-gate * and acking validity of request 35590Sstevel@tonic-gate */ 35600Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE, 35615240Snordmark "tl_conn_res:peer in bad state")); 35620Sstevel@tonic-gate } 35630Sstevel@tonic-gate 35640Sstevel@tonic-gate /* 35650Sstevel@tonic-gate * preallocate now for T_DISCON_IND or T_CONN_CONN 35660Sstevel@tonic-gate * ack validity of request (T_OK_ACK) after memory committed 35670Sstevel@tonic-gate */ 35680Sstevel@tonic-gate 35690Sstevel@tonic-gate if (err) 35700Sstevel@tonic-gate size = sizeof (struct T_discon_ind); 35710Sstevel@tonic-gate else { 35720Sstevel@tonic-gate /* 35730Sstevel@tonic-gate * calculate length of T_CONN_CON message 35740Sstevel@tonic-gate */ 35750Sstevel@tonic-gate olen = 0; 35760Sstevel@tonic-gate if (cl_ep->te_flag & TL_SETCRED) { 35770Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) + 35785240Snordmark OPTLEN(sizeof (tl_credopt_t)); 35790Sstevel@tonic-gate } else if (cl_ep->te_flag & TL_SETUCRED) { 35800Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) + 35815240Snordmark OPTLEN(ucredsize); 35820Sstevel@tonic-gate } 35830Sstevel@tonic-gate size = T_ALIGN(sizeof (struct T_conn_con) + 35845240Snordmark acc_ep->te_alen) + olen; 35850Sstevel@tonic-gate } 35860Sstevel@tonic-gate if ((respmp = reallocb(mp, size, 0)) == NULL) { 35870Sstevel@tonic-gate /* 35880Sstevel@tonic-gate * roll back state changes 35890Sstevel@tonic-gate */ 35900Sstevel@tonic-gate tep->te_state = TS_WRES_CIND; 35910Sstevel@tonic-gate tl_memrecover(wq, mp, size); 35920Sstevel@tonic-gate freemsg(ackmp); 35930Sstevel@tonic-gate if (client_noclose_set) 35940Sstevel@tonic-gate tl_closeok(cl_ep); 35950Sstevel@tonic-gate tl_closeok(acc_ep); 35960Sstevel@tonic-gate tl_refrele(acc_ep); 35970Sstevel@tonic-gate return; 35980Sstevel@tonic-gate } 35990Sstevel@tonic-gate mp = NULL; 36000Sstevel@tonic-gate } 36010Sstevel@tonic-gate 36020Sstevel@tonic-gate /* 36030Sstevel@tonic-gate * Now ack validity of request 36040Sstevel@tonic-gate */ 36050Sstevel@tonic-gate if (tep->te_nicon == 1) { 36060Sstevel@tonic-gate if (tep == acc_ep) 36070Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK2, tep->te_state); 36080Sstevel@tonic-gate else 36090Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK3, tep->te_state); 36100Sstevel@tonic-gate } else 36110Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK4, tep->te_state); 36120Sstevel@tonic-gate 36130Sstevel@tonic-gate /* 36140Sstevel@tonic-gate * send T_DISCON_IND now if client state validation failed earlier 36150Sstevel@tonic-gate */ 36160Sstevel@tonic-gate if (err) { 36170Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim); 36180Sstevel@tonic-gate /* 36190Sstevel@tonic-gate * flush the queues - why always ? 36200Sstevel@tonic-gate */ 36210Sstevel@tonic-gate (void) putnextctl1(acc_ep->te_rq, M_FLUSH, FLUSHR); 36220Sstevel@tonic-gate 36230Sstevel@tonic-gate dimp = tl_resizemp(respmp, size); 36240Sstevel@tonic-gate if (! dimp) { 36250Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 36265240Snordmark SL_TRACE|SL_ERROR, 36275240Snordmark "tl_conn_res:con_ind:allocb failure")); 36280Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 36290Sstevel@tonic-gate tl_closeok(acc_ep); 36300Sstevel@tonic-gate if (client_noclose_set) 36310Sstevel@tonic-gate tl_closeok(cl_ep); 36320Sstevel@tonic-gate tl_refrele(acc_ep); 36330Sstevel@tonic-gate return; 36340Sstevel@tonic-gate } 36350Sstevel@tonic-gate if (dimp->b_cont) { 36360Sstevel@tonic-gate /* no user data in provider generated discon ind */ 36370Sstevel@tonic-gate freemsg(dimp->b_cont); 36380Sstevel@tonic-gate dimp->b_cont = NULL; 36390Sstevel@tonic-gate } 36400Sstevel@tonic-gate 36410Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO; 36420Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 36430Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND; 36440Sstevel@tonic-gate di->DISCON_reason = err; 36450Sstevel@tonic-gate di->SEQ_number = BADSEQNUM; 36460Sstevel@tonic-gate 36470Sstevel@tonic-gate tep->te_state = TS_IDLE; 36480Sstevel@tonic-gate /* 36490Sstevel@tonic-gate * send T_DISCON_IND message 36500Sstevel@tonic-gate */ 36510Sstevel@tonic-gate putnext(acc_ep->te_rq, dimp); 36520Sstevel@tonic-gate if (client_noclose_set) 36530Sstevel@tonic-gate tl_closeok(cl_ep); 36540Sstevel@tonic-gate tl_closeok(acc_ep); 36550Sstevel@tonic-gate tl_refrele(acc_ep); 36560Sstevel@tonic-gate return; 36570Sstevel@tonic-gate } 36580Sstevel@tonic-gate 36590Sstevel@tonic-gate /* 36600Sstevel@tonic-gate * now start connecting the accepting endpoint 36610Sstevel@tonic-gate */ 36620Sstevel@tonic-gate if (tep != acc_ep) 36630Sstevel@tonic-gate acc_ep->te_state = NEXTSTATE(TE_PASS_CONN, acc_ep->te_state); 36640Sstevel@tonic-gate 36650Sstevel@tonic-gate if (cl_ep == NULL) { 36660Sstevel@tonic-gate /* 36670Sstevel@tonic-gate * The client has already closed. Send up any queued messages 36680Sstevel@tonic-gate * and change the state accordingly. 36690Sstevel@tonic-gate */ 36700Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim); 36710Sstevel@tonic-gate tl_icon_sendmsgs(acc_ep, &tip->ti_mp); 36720Sstevel@tonic-gate 36730Sstevel@tonic-gate /* 36740Sstevel@tonic-gate * remove endpoint from incoming connection 36750Sstevel@tonic-gate * delete client from list of incoming connections 36760Sstevel@tonic-gate */ 36770Sstevel@tonic-gate tl_freetip(tep, tip); 36780Sstevel@tonic-gate freemsg(mp); 36790Sstevel@tonic-gate tl_closeok(acc_ep); 36800Sstevel@tonic-gate tl_refrele(acc_ep); 36810Sstevel@tonic-gate return; 36820Sstevel@tonic-gate } else if (tip->ti_mp != NULL) { 36830Sstevel@tonic-gate /* 36840Sstevel@tonic-gate * The client could have queued a T_DISCON_IND which needs 36850Sstevel@tonic-gate * to be sent up. 36860Sstevel@tonic-gate * Note that t_discon_req can not operate the same as 36870Sstevel@tonic-gate * t_data_req since it is not possible for it to putbq 36880Sstevel@tonic-gate * the message and return -1 due to the use of qwriter. 36890Sstevel@tonic-gate */ 36900Sstevel@tonic-gate tl_icon_sendmsgs(acc_ep, &tip->ti_mp); 36910Sstevel@tonic-gate } 36920Sstevel@tonic-gate 36930Sstevel@tonic-gate /* 36940Sstevel@tonic-gate * prepare connect confirm T_CONN_CON message 36950Sstevel@tonic-gate */ 36960Sstevel@tonic-gate 36970Sstevel@tonic-gate /* 36980Sstevel@tonic-gate * allocate the message - original data blocks 36990Sstevel@tonic-gate * retained in the returned mblk 37000Sstevel@tonic-gate */ 37010Sstevel@tonic-gate if (! IS_SOCKET(cl_ep) || tl_disable_early_connect) { 37020Sstevel@tonic-gate ccmp = tl_resizemp(respmp, size); 37030Sstevel@tonic-gate if (ccmp == NULL) { 37040Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim); 37050Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 37065240Snordmark SL_TRACE|SL_ERROR, 37075240Snordmark "tl_conn_res:conn_con:allocb failure")); 37080Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 37090Sstevel@tonic-gate tl_closeok(acc_ep); 37100Sstevel@tonic-gate if (client_noclose_set) 37110Sstevel@tonic-gate tl_closeok(cl_ep); 37120Sstevel@tonic-gate tl_refrele(acc_ep); 37130Sstevel@tonic-gate return; 37140Sstevel@tonic-gate } 37150Sstevel@tonic-gate 37160Sstevel@tonic-gate DB_TYPE(ccmp) = M_PROTO; 37170Sstevel@tonic-gate cc = (struct T_conn_con *)ccmp->b_rptr; 37180Sstevel@tonic-gate cc->PRIM_type = T_CONN_CON; 37190Sstevel@tonic-gate cc->RES_offset = (t_scalar_t)sizeof (struct T_conn_con); 37200Sstevel@tonic-gate cc->RES_length = acc_ep->te_alen; 37210Sstevel@tonic-gate addr_startp = ccmp->b_rptr + cc->RES_offset; 37220Sstevel@tonic-gate bcopy(acc_ep->te_abuf, addr_startp, acc_ep->te_alen); 37230Sstevel@tonic-gate if (cl_ep->te_flag & (TL_SETCRED|TL_SETUCRED)) { 37240Sstevel@tonic-gate cc->OPT_offset = (t_scalar_t)T_ALIGN(cc->RES_offset + 37250Sstevel@tonic-gate cc->RES_length); 37260Sstevel@tonic-gate cc->OPT_length = olen; 37270Sstevel@tonic-gate tl_fill_option(ccmp->b_rptr + cc->OPT_offset, 37281676Sjpk acc_ep->te_credp, acc_ep->te_cpid, cl_ep->te_flag, 37291676Sjpk cl_ep->te_credp); 37300Sstevel@tonic-gate } else { 37310Sstevel@tonic-gate cc->OPT_offset = 0; 37320Sstevel@tonic-gate cc->OPT_length = 0; 37330Sstevel@tonic-gate } 37340Sstevel@tonic-gate /* 37350Sstevel@tonic-gate * Forward the credential in the packet so it can be picked up 37360Sstevel@tonic-gate * at the higher layers for more complete credential processing 37370Sstevel@tonic-gate */ 37380Sstevel@tonic-gate mblk_setcred(ccmp, acc_ep->te_credp); 37390Sstevel@tonic-gate DB_CPID(ccmp) = acc_ep->te_cpid; 37400Sstevel@tonic-gate } else { 37410Sstevel@tonic-gate freemsg(respmp); 37420Sstevel@tonic-gate respmp = NULL; 37430Sstevel@tonic-gate } 37440Sstevel@tonic-gate 37450Sstevel@tonic-gate /* 37460Sstevel@tonic-gate * make connection linking 37470Sstevel@tonic-gate * accepting and client endpoints 37480Sstevel@tonic-gate * No need to increment references: 37490Sstevel@tonic-gate * on client: it should already have one from tip->ti_tep linkage. 37500Sstevel@tonic-gate * on acceptor is should already have one from the table lookup. 37510Sstevel@tonic-gate * 37520Sstevel@tonic-gate * At this point both client and acceptor can't close. Set client 37530Sstevel@tonic-gate * serializer to acceptor's. 37540Sstevel@tonic-gate */ 37550Sstevel@tonic-gate ASSERT(cl_ep->te_refcnt >= 2); 37560Sstevel@tonic-gate ASSERT(acc_ep->te_refcnt >= 2); 37570Sstevel@tonic-gate ASSERT(cl_ep->te_conp == NULL); 37580Sstevel@tonic-gate ASSERT(acc_ep->te_conp == NULL); 37590Sstevel@tonic-gate cl_ep->te_conp = acc_ep; 37600Sstevel@tonic-gate acc_ep->te_conp = cl_ep; 37610Sstevel@tonic-gate ASSERT(cl_ep->te_ser == tep->te_ser); 37620Sstevel@tonic-gate if (switch_client_serializer) { 37630Sstevel@tonic-gate mutex_enter(&cl_ep->te_ser_lock); 37640Sstevel@tonic-gate if (cl_ep->te_ser_count > 0) { 37650Sstevel@tonic-gate switch_client_serializer = B_FALSE; 37660Sstevel@tonic-gate tl_serializer_noswitch++; 37670Sstevel@tonic-gate } else { 37680Sstevel@tonic-gate /* 37690Sstevel@tonic-gate * Move client to the acceptor's serializer. 37700Sstevel@tonic-gate */ 37710Sstevel@tonic-gate tl_serializer_refhold(acc_ep->te_ser); 37720Sstevel@tonic-gate tl_serializer_refrele(cl_ep->te_ser); 37730Sstevel@tonic-gate cl_ep->te_ser = acc_ep->te_ser; 37740Sstevel@tonic-gate } 37750Sstevel@tonic-gate mutex_exit(&cl_ep->te_ser_lock); 37760Sstevel@tonic-gate } 37770Sstevel@tonic-gate if (!switch_client_serializer) { 37780Sstevel@tonic-gate /* 37790Sstevel@tonic-gate * It is not possible to switch client to use acceptor's. 37800Sstevel@tonic-gate * Move acceptor to client's serializer (which is the same as 37810Sstevel@tonic-gate * listener's). 37820Sstevel@tonic-gate */ 37830Sstevel@tonic-gate tl_serializer_refhold(cl_ep->te_ser); 37840Sstevel@tonic-gate tl_serializer_refrele(acc_ep->te_ser); 37850Sstevel@tonic-gate acc_ep->te_ser = cl_ep->te_ser; 37860Sstevel@tonic-gate } 37870Sstevel@tonic-gate 37880Sstevel@tonic-gate TL_REMOVE_PEER(cl_ep->te_oconp); 37890Sstevel@tonic-gate TL_REMOVE_PEER(acc_ep->te_oconp); 37900Sstevel@tonic-gate 37910Sstevel@tonic-gate /* 37920Sstevel@tonic-gate * remove endpoint from incoming connection 37930Sstevel@tonic-gate * delete client from list of incoming connections 37940Sstevel@tonic-gate */ 37950Sstevel@tonic-gate tip->ti_tep = NULL; 37960Sstevel@tonic-gate tl_freetip(tep, tip); 37970Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim); 37980Sstevel@tonic-gate 37990Sstevel@tonic-gate /* 38000Sstevel@tonic-gate * data blocks already linked in reallocb() 38010Sstevel@tonic-gate */ 38020Sstevel@tonic-gate 38030Sstevel@tonic-gate /* 38040Sstevel@tonic-gate * link queues so that I_SENDFD will work 38050Sstevel@tonic-gate */ 38060Sstevel@tonic-gate if (! IS_SOCKET(tep)) { 38070Sstevel@tonic-gate acc_ep->te_wq->q_next = cl_ep->te_rq; 38080Sstevel@tonic-gate cl_ep->te_wq->q_next = acc_ep->te_rq; 38090Sstevel@tonic-gate } 38100Sstevel@tonic-gate 38110Sstevel@tonic-gate /* 38120Sstevel@tonic-gate * send T_CONN_CON up on client side unless it was already 38130Sstevel@tonic-gate * done (for a socket). In cases any data or ordrel req has been 38140Sstevel@tonic-gate * queued make sure that the service procedure runs. 38150Sstevel@tonic-gate */ 38160Sstevel@tonic-gate if (IS_SOCKET(cl_ep) && !tl_disable_early_connect) { 38170Sstevel@tonic-gate enableok(cl_ep->te_wq); 38180Sstevel@tonic-gate TL_QENABLE(cl_ep); 38190Sstevel@tonic-gate if (ccmp != NULL) 38200Sstevel@tonic-gate freemsg(ccmp); 38210Sstevel@tonic-gate } else { 38220Sstevel@tonic-gate /* 38230Sstevel@tonic-gate * change client state on TE_CONN_CON event 38240Sstevel@tonic-gate */ 38250Sstevel@tonic-gate cl_ep->te_state = NEXTSTATE(TE_CONN_CON, cl_ep->te_state); 38260Sstevel@tonic-gate putnext(cl_ep->te_rq, ccmp); 38270Sstevel@tonic-gate } 38280Sstevel@tonic-gate 38290Sstevel@tonic-gate /* Mark the both endpoints as accepted */ 38300Sstevel@tonic-gate cl_ep->te_flag |= TL_ACCEPTED; 38310Sstevel@tonic-gate acc_ep->te_flag |= TL_ACCEPTED; 38320Sstevel@tonic-gate 38330Sstevel@tonic-gate /* 38340Sstevel@tonic-gate * Allow client and acceptor to close. 38350Sstevel@tonic-gate */ 38360Sstevel@tonic-gate tl_closeok(acc_ep); 38370Sstevel@tonic-gate if (client_noclose_set) 38380Sstevel@tonic-gate tl_closeok(cl_ep); 38390Sstevel@tonic-gate } 38400Sstevel@tonic-gate 38410Sstevel@tonic-gate 38420Sstevel@tonic-gate 38430Sstevel@tonic-gate 38440Sstevel@tonic-gate static void 38450Sstevel@tonic-gate tl_discon_req(mblk_t *mp, tl_endpt_t *tep) 38460Sstevel@tonic-gate { 38470Sstevel@tonic-gate queue_t *wq; 38480Sstevel@tonic-gate struct T_discon_req *dr; 38490Sstevel@tonic-gate ssize_t msz; 38500Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp; 38510Sstevel@tonic-gate tl_endpt_t *srv_tep = tep->te_oconp; 38520Sstevel@tonic-gate tl_icon_t *tip; 38530Sstevel@tonic-gate size_t size; 38540Sstevel@tonic-gate mblk_t *ackmp, *dimp, *respmp; 38550Sstevel@tonic-gate struct T_discon_ind *di; 38560Sstevel@tonic-gate t_scalar_t save_state, new_state; 38570Sstevel@tonic-gate 38580Sstevel@tonic-gate if (tep->te_closing) { 38590Sstevel@tonic-gate freemsg(mp); 38600Sstevel@tonic-gate return; 38610Sstevel@tonic-gate } 38620Sstevel@tonic-gate 38630Sstevel@tonic-gate if ((peer_tep != NULL) && peer_tep->te_closing) { 38640Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp); 38650Sstevel@tonic-gate peer_tep = NULL; 38660Sstevel@tonic-gate } 38670Sstevel@tonic-gate if ((srv_tep != NULL) && srv_tep->te_closing) { 38680Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 38690Sstevel@tonic-gate srv_tep = NULL; 38700Sstevel@tonic-gate } 38710Sstevel@tonic-gate 38720Sstevel@tonic-gate wq = tep->te_wq; 38730Sstevel@tonic-gate 38740Sstevel@tonic-gate /* 38750Sstevel@tonic-gate * preallocate memory for: 38760Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK 38770Sstevel@tonic-gate * ==> known max T_ERROR_ACK 38780Sstevel@tonic-gate * 2. for T_DISCON_IND 38790Sstevel@tonic-gate */ 38800Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED); 38810Sstevel@tonic-gate if (! ackmp) { 38820Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 38830Sstevel@tonic-gate return; 38840Sstevel@tonic-gate } 38850Sstevel@tonic-gate /* 38860Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now 38870Sstevel@tonic-gate * will be committed for T_DISCON_IND later 38880Sstevel@tonic-gate */ 38890Sstevel@tonic-gate 38900Sstevel@tonic-gate dr = (struct T_discon_req *)mp->b_rptr; 38910Sstevel@tonic-gate msz = MBLKL(mp); 38920Sstevel@tonic-gate 38930Sstevel@tonic-gate /* 38940Sstevel@tonic-gate * validate the state 38950Sstevel@tonic-gate */ 38960Sstevel@tonic-gate save_state = new_state = tep->te_state; 38970Sstevel@tonic-gate if (! (save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) && 38980Sstevel@tonic-gate ! (save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) { 38990Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 39005240Snordmark SL_TRACE|SL_ERROR, 39015240Snordmark "tl_wput:T_DISCON_REQ:out of state, state=%d", 39025240Snordmark tep->te_state)); 39030Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_DISCON_REQ); 39040Sstevel@tonic-gate freemsg(mp); 39050Sstevel@tonic-gate return; 39060Sstevel@tonic-gate } 39070Sstevel@tonic-gate /* 39080Sstevel@tonic-gate * Defer committing the state change until it is determined if 39090Sstevel@tonic-gate * the message will be queued with the tl_icon or not. 39100Sstevel@tonic-gate */ 39110Sstevel@tonic-gate new_state = NEXTSTATE(TE_DISCON_REQ, tep->te_state); 39120Sstevel@tonic-gate 39130Sstevel@tonic-gate /* validate the message */ 39140Sstevel@tonic-gate if (msz < sizeof (struct T_discon_req)) { 39150Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 39165240Snordmark "tl_discon_req:invalid message")); 39170Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state); 39180Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_DISCON_REQ); 39190Sstevel@tonic-gate freemsg(mp); 39200Sstevel@tonic-gate return; 39210Sstevel@tonic-gate } 39220Sstevel@tonic-gate 39230Sstevel@tonic-gate /* 39240Sstevel@tonic-gate * if server, then validate that client exists 39250Sstevel@tonic-gate * by connection sequence number etc. 39260Sstevel@tonic-gate */ 39270Sstevel@tonic-gate if (tep->te_nicon > 0) { /* server */ 39280Sstevel@tonic-gate 39290Sstevel@tonic-gate /* 39300Sstevel@tonic-gate * search server list for disconnect client 39310Sstevel@tonic-gate */ 39320Sstevel@tonic-gate tip = tl_icon_find(tep, dr->SEQ_number); 39330Sstevel@tonic-gate if (tip == NULL) { 39340Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 39355240Snordmark SL_TRACE|SL_ERROR, 39365240Snordmark "tl_discon_req:no disconnect endpoint")); 39370Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state); 39380Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, T_DISCON_REQ); 39390Sstevel@tonic-gate freemsg(mp); 39400Sstevel@tonic-gate return; 39410Sstevel@tonic-gate } 39420Sstevel@tonic-gate /* 39430Sstevel@tonic-gate * If ti_tep is NULL the client has already closed. In this case 39440Sstevel@tonic-gate * the code below will avoid any action on the client side. 39450Sstevel@tonic-gate */ 39460Sstevel@tonic-gate 39470Sstevel@tonic-gate ASSERT(IMPLY(tip->ti_tep != NULL, 39485240Snordmark tip->ti_tep->te_seqno == dr->SEQ_number)); 39490Sstevel@tonic-gate peer_tep = tip->ti_tep; 39500Sstevel@tonic-gate } 39510Sstevel@tonic-gate 39520Sstevel@tonic-gate /* 39530Sstevel@tonic-gate * preallocate now for T_DISCON_IND 39540Sstevel@tonic-gate * ack validity of request (T_OK_ACK) after memory committed 39550Sstevel@tonic-gate */ 39560Sstevel@tonic-gate size = sizeof (struct T_discon_ind); 39570Sstevel@tonic-gate if ((respmp = reallocb(mp, size, 0)) == NULL) { 39580Sstevel@tonic-gate tl_memrecover(wq, mp, size); 39590Sstevel@tonic-gate freemsg(ackmp); 39600Sstevel@tonic-gate return; 39610Sstevel@tonic-gate } 39620Sstevel@tonic-gate 39630Sstevel@tonic-gate /* 39640Sstevel@tonic-gate * prepare message to ack validity of request 39650Sstevel@tonic-gate */ 39660Sstevel@tonic-gate if (tep->te_nicon == 0) 39670Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK1, new_state); 39680Sstevel@tonic-gate else 39690Sstevel@tonic-gate if (tep->te_nicon == 1) 39700Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK2, new_state); 39710Sstevel@tonic-gate else 39720Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK4, new_state); 39730Sstevel@tonic-gate 39740Sstevel@tonic-gate /* 39750Sstevel@tonic-gate * Flushing queues according to TPI. Using the old state. 39760Sstevel@tonic-gate */ 39770Sstevel@tonic-gate if ((tep->te_nicon <= 1) && 39780Sstevel@tonic-gate ((save_state == TS_DATA_XFER) || 39790Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) || 39800Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL))) 39810Sstevel@tonic-gate (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW); 39820Sstevel@tonic-gate 39830Sstevel@tonic-gate /* send T_OK_ACK up */ 39840Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_DISCON_REQ); 39850Sstevel@tonic-gate 39860Sstevel@tonic-gate /* 39870Sstevel@tonic-gate * now do disconnect business 39880Sstevel@tonic-gate */ 39890Sstevel@tonic-gate if (tep->te_nicon > 0) { /* listener */ 39900Sstevel@tonic-gate if (peer_tep != NULL && !peer_tep->te_closing) { 39910Sstevel@tonic-gate /* 39920Sstevel@tonic-gate * disconnect incoming connect request pending to tep 39930Sstevel@tonic-gate */ 39940Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) { 39950Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 39965240Snordmark SL_TRACE|SL_ERROR, 39975240Snordmark "tl_discon_req: reallocb failed")); 39980Sstevel@tonic-gate tep->te_state = new_state; 39990Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 40000Sstevel@tonic-gate return; 40010Sstevel@tonic-gate } 40020Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 40030Sstevel@tonic-gate di->SEQ_number = BADSEQNUM; 40040Sstevel@tonic-gate save_state = peer_tep->te_state; 40050Sstevel@tonic-gate peer_tep->te_state = TS_IDLE; 40060Sstevel@tonic-gate 40070Sstevel@tonic-gate TL_REMOVE_PEER(peer_tep->te_oconp); 40080Sstevel@tonic-gate enableok(peer_tep->te_wq); 40090Sstevel@tonic-gate TL_QENABLE(peer_tep); 40100Sstevel@tonic-gate } else { 40110Sstevel@tonic-gate freemsg(respmp); 40120Sstevel@tonic-gate dimp = NULL; 40130Sstevel@tonic-gate } 40140Sstevel@tonic-gate 40150Sstevel@tonic-gate /* 40160Sstevel@tonic-gate * remove endpoint from incoming connection list 40170Sstevel@tonic-gate * - remove disconnect client from list on server 40180Sstevel@tonic-gate */ 40190Sstevel@tonic-gate tl_freetip(tep, tip); 40200Sstevel@tonic-gate } else if ((peer_tep = tep->te_oconp) != NULL) { /* client */ 40210Sstevel@tonic-gate /* 40220Sstevel@tonic-gate * disconnect an outgoing request pending from tep 40230Sstevel@tonic-gate */ 40240Sstevel@tonic-gate 40250Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) { 40260Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 40275240Snordmark SL_TRACE|SL_ERROR, 40285240Snordmark "tl_discon_req: reallocb failed")); 40290Sstevel@tonic-gate tep->te_state = new_state; 40300Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 40310Sstevel@tonic-gate return; 40320Sstevel@tonic-gate } 40330Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 40340Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO; 40350Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND; 40360Sstevel@tonic-gate di->DISCON_reason = ECONNRESET; 40370Sstevel@tonic-gate di->SEQ_number = tep->te_seqno; 40380Sstevel@tonic-gate 40390Sstevel@tonic-gate /* 40400Sstevel@tonic-gate * If this is a socket the T_DISCON_IND is queued with 40410Sstevel@tonic-gate * the T_CONN_IND. Otherwise the T_CONN_IND is removed 40420Sstevel@tonic-gate * from the list of pending connections. 40430Sstevel@tonic-gate * Note that when te_oconp is set the peer better have 40440Sstevel@tonic-gate * a t_connind_t for the client. 40450Sstevel@tonic-gate */ 40460Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) { 40470Sstevel@tonic-gate /* 40480Sstevel@tonic-gate * No need to check that 40490Sstevel@tonic-gate * ti_tep == NULL since the T_DISCON_IND 40500Sstevel@tonic-gate * takes precedence over other queued 40510Sstevel@tonic-gate * messages. 40520Sstevel@tonic-gate */ 40530Sstevel@tonic-gate tl_icon_queuemsg(peer_tep, tep->te_seqno, dimp); 40540Sstevel@tonic-gate peer_tep = NULL; 40550Sstevel@tonic-gate dimp = NULL; 40560Sstevel@tonic-gate /* 40570Sstevel@tonic-gate * Can't clear te_oconp since tl_co_unconnect needs 40580Sstevel@tonic-gate * it as a hint not to free the tep. 40590Sstevel@tonic-gate * Keep the state unchanged since tl_conn_res inspects 40600Sstevel@tonic-gate * it. 40610Sstevel@tonic-gate */ 40620Sstevel@tonic-gate new_state = tep->te_state; 40630Sstevel@tonic-gate } else { 40640Sstevel@tonic-gate /* Found - delete it */ 40650Sstevel@tonic-gate tip = tl_icon_find(peer_tep, tep->te_seqno); 40660Sstevel@tonic-gate if (tip != NULL) { 40670Sstevel@tonic-gate ASSERT(tep == tip->ti_tep); 40680Sstevel@tonic-gate save_state = peer_tep->te_state; 40690Sstevel@tonic-gate if (peer_tep->te_nicon == 1) 40700Sstevel@tonic-gate peer_tep->te_state = 40710Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND2, 40725240Snordmark peer_tep->te_state); 40730Sstevel@tonic-gate else 40740Sstevel@tonic-gate peer_tep->te_state = 40750Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND3, 40765240Snordmark peer_tep->te_state); 40770Sstevel@tonic-gate tl_freetip(peer_tep, tip); 40780Sstevel@tonic-gate } 40790Sstevel@tonic-gate ASSERT(tep->te_oconp != NULL); 40800Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 40810Sstevel@tonic-gate } 40820Sstevel@tonic-gate } else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */ 40830Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) { 40840Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 40855240Snordmark SL_TRACE|SL_ERROR, 40865240Snordmark "tl_discon_req: reallocb failed")); 40870Sstevel@tonic-gate tep->te_state = new_state; 40880Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 40890Sstevel@tonic-gate return; 40900Sstevel@tonic-gate } 40910Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 40920Sstevel@tonic-gate di->SEQ_number = BADSEQNUM; 40930Sstevel@tonic-gate 40940Sstevel@tonic-gate save_state = peer_tep->te_state; 40950Sstevel@tonic-gate peer_tep->te_state = TS_IDLE; 40960Sstevel@tonic-gate } else { 40970Sstevel@tonic-gate /* Not connected */ 40980Sstevel@tonic-gate tep->te_state = new_state; 40990Sstevel@tonic-gate freemsg(respmp); 41000Sstevel@tonic-gate return; 41010Sstevel@tonic-gate } 41020Sstevel@tonic-gate 41030Sstevel@tonic-gate /* Commit state changes */ 41040Sstevel@tonic-gate tep->te_state = new_state; 41050Sstevel@tonic-gate 41060Sstevel@tonic-gate if (peer_tep == NULL) { 41070Sstevel@tonic-gate ASSERT(dimp == NULL); 41080Sstevel@tonic-gate goto done; 41090Sstevel@tonic-gate } 41100Sstevel@tonic-gate /* 41110Sstevel@tonic-gate * Flush queues on peer before sending up 41120Sstevel@tonic-gate * T_DISCON_IND according to TPI 41130Sstevel@tonic-gate */ 41140Sstevel@tonic-gate 41150Sstevel@tonic-gate if ((save_state == TS_DATA_XFER) || 41160Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) || 41170Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL)) 41180Sstevel@tonic-gate (void) putnextctl1(peer_tep->te_rq, M_FLUSH, FLUSHRW); 41190Sstevel@tonic-gate 41200Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO; 41210Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND; 41220Sstevel@tonic-gate di->DISCON_reason = ECONNRESET; 41230Sstevel@tonic-gate 41240Sstevel@tonic-gate /* 41250Sstevel@tonic-gate * data blocks already linked into dimp by reallocb() 41260Sstevel@tonic-gate */ 41270Sstevel@tonic-gate /* 41280Sstevel@tonic-gate * send indication message to peer user module 41290Sstevel@tonic-gate */ 41300Sstevel@tonic-gate ASSERT(dimp != NULL); 41310Sstevel@tonic-gate putnext(peer_tep->te_rq, dimp); 41320Sstevel@tonic-gate done: 41330Sstevel@tonic-gate if (tep->te_conp) { /* disconnect pointers if connected */ 41340Sstevel@tonic-gate ASSERT(! peer_tep->te_closing); 41350Sstevel@tonic-gate 41360Sstevel@tonic-gate /* 41370Sstevel@tonic-gate * Messages may be queued on peer's write queue 41380Sstevel@tonic-gate * waiting to be processed by its write service 41390Sstevel@tonic-gate * procedure. Before the pointer to the peer transport 41400Sstevel@tonic-gate * structure is set to NULL, qenable the peer's write 41410Sstevel@tonic-gate * queue so that the queued up messages are processed. 41420Sstevel@tonic-gate */ 41430Sstevel@tonic-gate if ((save_state == TS_DATA_XFER) || 41440Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) || 41450Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL)) 41460Sstevel@tonic-gate TL_QENABLE(peer_tep); 41470Sstevel@tonic-gate ASSERT(peer_tep != NULL && peer_tep->te_conp != NULL); 41480Sstevel@tonic-gate TL_UNCONNECT(peer_tep->te_conp); 41490Sstevel@tonic-gate if (! IS_SOCKET(tep)) { 41500Sstevel@tonic-gate /* 41510Sstevel@tonic-gate * unlink the streams 41520Sstevel@tonic-gate */ 41530Sstevel@tonic-gate tep->te_wq->q_next = NULL; 41540Sstevel@tonic-gate peer_tep->te_wq->q_next = NULL; 41550Sstevel@tonic-gate } 41560Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp); 41570Sstevel@tonic-gate } 41580Sstevel@tonic-gate } 41590Sstevel@tonic-gate 41600Sstevel@tonic-gate 41610Sstevel@tonic-gate static void 41620Sstevel@tonic-gate tl_addr_req(mblk_t *mp, tl_endpt_t *tep) 41630Sstevel@tonic-gate { 41640Sstevel@tonic-gate queue_t *wq; 41650Sstevel@tonic-gate size_t ack_sz; 41660Sstevel@tonic-gate mblk_t *ackmp; 41670Sstevel@tonic-gate struct T_addr_ack *taa; 41680Sstevel@tonic-gate 41690Sstevel@tonic-gate if (tep->te_closing) { 41700Sstevel@tonic-gate freemsg(mp); 41710Sstevel@tonic-gate return; 41720Sstevel@tonic-gate } 41730Sstevel@tonic-gate 41740Sstevel@tonic-gate wq = tep->te_wq; 41750Sstevel@tonic-gate 41760Sstevel@tonic-gate /* 41770Sstevel@tonic-gate * Note: T_ADDR_REQ message has only PRIM_type field 41780Sstevel@tonic-gate * so it is already validated earlier. 41790Sstevel@tonic-gate */ 41800Sstevel@tonic-gate 41810Sstevel@tonic-gate if (IS_CLTS(tep) || 41820Sstevel@tonic-gate (tep->te_state > TS_WREQ_ORDREL) || 41830Sstevel@tonic-gate (tep->te_state < TS_DATA_XFER)) { 41840Sstevel@tonic-gate /* 41850Sstevel@tonic-gate * Either connectionless or connection oriented but not 41860Sstevel@tonic-gate * in connected data transfer state or half-closed states. 41870Sstevel@tonic-gate */ 41880Sstevel@tonic-gate ack_sz = sizeof (struct T_addr_ack); 41890Sstevel@tonic-gate if (tep->te_state >= TS_IDLE) 41900Sstevel@tonic-gate /* is bound */ 41910Sstevel@tonic-gate ack_sz += tep->te_alen; 41920Sstevel@tonic-gate ackmp = reallocb(mp, ack_sz, 0); 41930Sstevel@tonic-gate if (ackmp == NULL) { 41940Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 41955240Snordmark SL_TRACE|SL_ERROR, 41965240Snordmark "tl_addr_req: reallocb failed")); 41970Sstevel@tonic-gate tl_memrecover(wq, mp, ack_sz); 41980Sstevel@tonic-gate return; 41990Sstevel@tonic-gate } 42000Sstevel@tonic-gate 42010Sstevel@tonic-gate taa = (struct T_addr_ack *)ackmp->b_rptr; 42020Sstevel@tonic-gate 42030Sstevel@tonic-gate bzero(taa, sizeof (struct T_addr_ack)); 42040Sstevel@tonic-gate 42050Sstevel@tonic-gate taa->PRIM_type = T_ADDR_ACK; 42060Sstevel@tonic-gate ackmp->b_datap->db_type = M_PCPROTO; 42070Sstevel@tonic-gate ackmp->b_wptr = (uchar_t *)&taa[1]; 42080Sstevel@tonic-gate 42090Sstevel@tonic-gate if (tep->te_state >= TS_IDLE) { 42100Sstevel@tonic-gate /* endpoint is bound */ 42110Sstevel@tonic-gate taa->LOCADDR_length = tep->te_alen; 42120Sstevel@tonic-gate taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa); 42130Sstevel@tonic-gate 42140Sstevel@tonic-gate bcopy(tep->te_abuf, ackmp->b_wptr, 42155240Snordmark tep->te_alen); 42160Sstevel@tonic-gate ackmp->b_wptr += tep->te_alen; 42170Sstevel@tonic-gate ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim); 42180Sstevel@tonic-gate } 42190Sstevel@tonic-gate 42200Sstevel@tonic-gate (void) qreply(wq, ackmp); 42210Sstevel@tonic-gate } else { 42220Sstevel@tonic-gate ASSERT(tep->te_state == TS_DATA_XFER || 42235240Snordmark tep->te_state == TS_WIND_ORDREL || 42245240Snordmark tep->te_state == TS_WREQ_ORDREL); 42250Sstevel@tonic-gate /* connection oriented in data transfer */ 42260Sstevel@tonic-gate tl_connected_cots_addr_req(mp, tep); 42270Sstevel@tonic-gate } 42280Sstevel@tonic-gate } 42290Sstevel@tonic-gate 42300Sstevel@tonic-gate 42310Sstevel@tonic-gate static void 42320Sstevel@tonic-gate tl_connected_cots_addr_req(mblk_t *mp, tl_endpt_t *tep) 42330Sstevel@tonic-gate { 42340Sstevel@tonic-gate tl_endpt_t *peer_tep; 42350Sstevel@tonic-gate size_t ack_sz; 42360Sstevel@tonic-gate mblk_t *ackmp; 42370Sstevel@tonic-gate struct T_addr_ack *taa; 42380Sstevel@tonic-gate uchar_t *addr_startp; 42390Sstevel@tonic-gate 42400Sstevel@tonic-gate if (tep->te_closing) { 42410Sstevel@tonic-gate freemsg(mp); 42420Sstevel@tonic-gate return; 42430Sstevel@tonic-gate } 42440Sstevel@tonic-gate 42450Sstevel@tonic-gate ASSERT(tep->te_state >= TS_IDLE); 42460Sstevel@tonic-gate 42470Sstevel@tonic-gate ack_sz = sizeof (struct T_addr_ack); 42480Sstevel@tonic-gate ack_sz += T_ALIGN(tep->te_alen); 42490Sstevel@tonic-gate peer_tep = tep->te_conp; 42500Sstevel@tonic-gate ack_sz += peer_tep->te_alen; 42510Sstevel@tonic-gate 42520Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, ack_sz, M_PCPROTO, T_ADDR_ACK); 42530Sstevel@tonic-gate if (ackmp == NULL) { 42540Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 42555240Snordmark "tl_connected_cots_addr_req: reallocb failed")); 42560Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp, ack_sz); 42570Sstevel@tonic-gate return; 42580Sstevel@tonic-gate } 42590Sstevel@tonic-gate 42600Sstevel@tonic-gate taa = (struct T_addr_ack *)ackmp->b_rptr; 42610Sstevel@tonic-gate 42620Sstevel@tonic-gate /* endpoint is bound */ 42630Sstevel@tonic-gate taa->LOCADDR_length = tep->te_alen; 42640Sstevel@tonic-gate taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa); 42650Sstevel@tonic-gate 42660Sstevel@tonic-gate addr_startp = (uchar_t *)&taa[1]; 42670Sstevel@tonic-gate 42680Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, 42690Sstevel@tonic-gate tep->te_alen); 42700Sstevel@tonic-gate 42710Sstevel@tonic-gate taa->REMADDR_length = peer_tep->te_alen; 42720Sstevel@tonic-gate taa->REMADDR_offset = (t_scalar_t)T_ALIGN(taa->LOCADDR_offset + 42735240Snordmark taa->LOCADDR_length); 42740Sstevel@tonic-gate addr_startp = ackmp->b_rptr + taa->REMADDR_offset; 42750Sstevel@tonic-gate bcopy(peer_tep->te_abuf, addr_startp, 42760Sstevel@tonic-gate peer_tep->te_alen); 42770Sstevel@tonic-gate ackmp->b_wptr = (uchar_t *)ackmp->b_rptr + 42780Sstevel@tonic-gate taa->REMADDR_offset + peer_tep->te_alen; 42790Sstevel@tonic-gate ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim); 42800Sstevel@tonic-gate 42810Sstevel@tonic-gate putnext(tep->te_rq, ackmp); 42820Sstevel@tonic-gate } 42830Sstevel@tonic-gate 42840Sstevel@tonic-gate static void 42850Sstevel@tonic-gate tl_copy_info(struct T_info_ack *ia, tl_endpt_t *tep) 42860Sstevel@tonic-gate { 42870Sstevel@tonic-gate if (IS_CLTS(tep)) { 42880Sstevel@tonic-gate *ia = tl_clts_info_ack; 42890Sstevel@tonic-gate ia->TSDU_size = tl_tidusz; /* TSDU and TIDU size are same */ 42900Sstevel@tonic-gate } else { 42910Sstevel@tonic-gate *ia = tl_cots_info_ack; 42920Sstevel@tonic-gate if (IS_COTSORD(tep)) 42930Sstevel@tonic-gate ia->SERV_type = T_COTS_ORD; 42940Sstevel@tonic-gate } 42950Sstevel@tonic-gate ia->TIDU_size = tl_tidusz; 42960Sstevel@tonic-gate ia->CURRENT_state = tep->te_state; 42970Sstevel@tonic-gate } 42980Sstevel@tonic-gate 42990Sstevel@tonic-gate /* 43000Sstevel@tonic-gate * This routine responds to T_CAPABILITY_REQ messages. It is called by 43010Sstevel@tonic-gate * tl_wput. 43020Sstevel@tonic-gate */ 43030Sstevel@tonic-gate static void 43040Sstevel@tonic-gate tl_capability_req(mblk_t *mp, tl_endpt_t *tep) 43050Sstevel@tonic-gate { 43060Sstevel@tonic-gate mblk_t *ackmp; 43070Sstevel@tonic-gate t_uscalar_t cap_bits1; 43080Sstevel@tonic-gate struct T_capability_ack *tcap; 43090Sstevel@tonic-gate 43100Sstevel@tonic-gate if (tep->te_closing) { 43110Sstevel@tonic-gate freemsg(mp); 43120Sstevel@tonic-gate return; 43130Sstevel@tonic-gate } 43140Sstevel@tonic-gate 43150Sstevel@tonic-gate cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 43160Sstevel@tonic-gate 43170Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 43180Sstevel@tonic-gate M_PCPROTO, T_CAPABILITY_ACK); 43190Sstevel@tonic-gate if (ackmp == NULL) { 43200Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 43215240Snordmark "tl_capability_req: reallocb failed")); 43220Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp, 43230Sstevel@tonic-gate sizeof (struct T_capability_ack)); 43240Sstevel@tonic-gate return; 43250Sstevel@tonic-gate } 43260Sstevel@tonic-gate 43270Sstevel@tonic-gate tcap = (struct T_capability_ack *)ackmp->b_rptr; 43280Sstevel@tonic-gate tcap->CAP_bits1 = 0; 43290Sstevel@tonic-gate 43300Sstevel@tonic-gate if (cap_bits1 & TC1_INFO) { 43310Sstevel@tonic-gate tl_copy_info(&tcap->INFO_ack, tep); 43320Sstevel@tonic-gate tcap->CAP_bits1 |= TC1_INFO; 43330Sstevel@tonic-gate } 43340Sstevel@tonic-gate 43350Sstevel@tonic-gate if (cap_bits1 & TC1_ACCEPTOR_ID) { 43360Sstevel@tonic-gate tcap->ACCEPTOR_id = tep->te_acceptor_id; 43370Sstevel@tonic-gate tcap->CAP_bits1 |= TC1_ACCEPTOR_ID; 43380Sstevel@tonic-gate } 43390Sstevel@tonic-gate 43400Sstevel@tonic-gate putnext(tep->te_rq, ackmp); 43410Sstevel@tonic-gate } 43420Sstevel@tonic-gate 43430Sstevel@tonic-gate static void 43440Sstevel@tonic-gate tl_info_req_ser(mblk_t *mp, tl_endpt_t *tep) 43450Sstevel@tonic-gate { 43460Sstevel@tonic-gate if (! tep->te_closing) 43470Sstevel@tonic-gate tl_info_req(mp, tep); 43480Sstevel@tonic-gate else 43490Sstevel@tonic-gate freemsg(mp); 43500Sstevel@tonic-gate 43510Sstevel@tonic-gate tl_serializer_exit(tep); 43520Sstevel@tonic-gate tl_refrele(tep); 43530Sstevel@tonic-gate } 43540Sstevel@tonic-gate 43550Sstevel@tonic-gate static void 43560Sstevel@tonic-gate tl_info_req(mblk_t *mp, tl_endpt_t *tep) 43570Sstevel@tonic-gate { 43580Sstevel@tonic-gate mblk_t *ackmp; 43590Sstevel@tonic-gate 43600Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), 43610Sstevel@tonic-gate M_PCPROTO, T_INFO_ACK); 43620Sstevel@tonic-gate if (ackmp == NULL) { 43630Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 43645240Snordmark "tl_info_req: reallocb failed")); 43650Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp, sizeof (struct T_info_ack)); 43660Sstevel@tonic-gate return; 43670Sstevel@tonic-gate } 43680Sstevel@tonic-gate 43690Sstevel@tonic-gate /* 43700Sstevel@tonic-gate * fill in T_INFO_ACK contents 43710Sstevel@tonic-gate */ 43720Sstevel@tonic-gate tl_copy_info((struct T_info_ack *)ackmp->b_rptr, tep); 43730Sstevel@tonic-gate 43740Sstevel@tonic-gate /* 43750Sstevel@tonic-gate * send ack message 43760Sstevel@tonic-gate */ 43770Sstevel@tonic-gate putnext(tep->te_rq, ackmp); 43780Sstevel@tonic-gate } 43790Sstevel@tonic-gate 43800Sstevel@tonic-gate /* 43810Sstevel@tonic-gate * Handle M_DATA, T_data_req and T_optdata_req. 43820Sstevel@tonic-gate * If this is a socket pass through T_optdata_req options unmodified. 43830Sstevel@tonic-gate */ 43840Sstevel@tonic-gate static void 43850Sstevel@tonic-gate tl_data(mblk_t *mp, tl_endpt_t *tep) 43860Sstevel@tonic-gate { 43870Sstevel@tonic-gate queue_t *wq = tep->te_wq; 43880Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 43890Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 43900Sstevel@tonic-gate tl_endpt_t *peer_tep; 43910Sstevel@tonic-gate queue_t *peer_rq; 43920Sstevel@tonic-gate boolean_t closing = tep->te_closing; 43930Sstevel@tonic-gate 43940Sstevel@tonic-gate if (IS_CLTS(tep)) { 43950Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 43965240Snordmark SL_TRACE|SL_ERROR, 43975240Snordmark "tl_wput:clts:unattached M_DATA")); 43980Sstevel@tonic-gate if (!closing) { 43990Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 44000Sstevel@tonic-gate } else { 44010Sstevel@tonic-gate freemsg(mp); 44020Sstevel@tonic-gate } 44030Sstevel@tonic-gate return; 44040Sstevel@tonic-gate } 44050Sstevel@tonic-gate 44060Sstevel@tonic-gate /* 44070Sstevel@tonic-gate * If the endpoint is closing it should still forward any data to the 44080Sstevel@tonic-gate * peer (if it has one). If it is not allowed to forward it can just 44090Sstevel@tonic-gate * free the message. 44100Sstevel@tonic-gate */ 44110Sstevel@tonic-gate if (closing && 44120Sstevel@tonic-gate (tep->te_state != TS_DATA_XFER) && 44130Sstevel@tonic-gate (tep->te_state != TS_WREQ_ORDREL)) { 44140Sstevel@tonic-gate freemsg(mp); 44150Sstevel@tonic-gate return; 44160Sstevel@tonic-gate } 44170Sstevel@tonic-gate 44180Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) { 44190Sstevel@tonic-gate if (prim->type == T_DATA_REQ && 44200Sstevel@tonic-gate msz < sizeof (struct T_data_req)) { 44210Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 44220Sstevel@tonic-gate SL_TRACE|SL_ERROR, 44230Sstevel@tonic-gate "tl_data:T_DATA_REQ:invalid message")); 44240Sstevel@tonic-gate if (!closing) { 44250Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 44260Sstevel@tonic-gate } else { 44270Sstevel@tonic-gate freemsg(mp); 44280Sstevel@tonic-gate } 44290Sstevel@tonic-gate return; 44300Sstevel@tonic-gate } else if (prim->type == T_OPTDATA_REQ && 4431*7656SSherry.Moore@Sun.COM (msz < sizeof (struct T_optdata_req) || !IS_SOCKET(tep))) { 44320Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 44335240Snordmark SL_TRACE|SL_ERROR, 44345240Snordmark "tl_data:T_OPTDATA_REQ:invalid message")); 44350Sstevel@tonic-gate if (!closing) { 44360Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 44370Sstevel@tonic-gate } else { 44380Sstevel@tonic-gate freemsg(mp); 44390Sstevel@tonic-gate } 44400Sstevel@tonic-gate return; 44410Sstevel@tonic-gate } 44420Sstevel@tonic-gate } 44430Sstevel@tonic-gate 44440Sstevel@tonic-gate /* 44450Sstevel@tonic-gate * connection oriented provider 44460Sstevel@tonic-gate */ 44470Sstevel@tonic-gate switch (tep->te_state) { 44480Sstevel@tonic-gate case TS_IDLE: 44490Sstevel@tonic-gate /* 44500Sstevel@tonic-gate * Other end not here - do nothing. 44510Sstevel@tonic-gate */ 44520Sstevel@tonic-gate freemsg(mp); 44530Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 44545240Snordmark "tl_data:cots with endpoint idle")); 44550Sstevel@tonic-gate return; 44560Sstevel@tonic-gate 44570Sstevel@tonic-gate case TS_DATA_XFER: 44580Sstevel@tonic-gate /* valid states */ 44590Sstevel@tonic-gate if (tep->te_conp != NULL) 44600Sstevel@tonic-gate break; 44610Sstevel@tonic-gate 44620Sstevel@tonic-gate if (tep->te_oconp == NULL) { 44630Sstevel@tonic-gate if (!closing) { 44640Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 44650Sstevel@tonic-gate } else { 44660Sstevel@tonic-gate freemsg(mp); 44670Sstevel@tonic-gate } 44680Sstevel@tonic-gate return; 44690Sstevel@tonic-gate } 44700Sstevel@tonic-gate /* 44710Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus 44720Sstevel@tonic-gate * the peer might not yet have accepted the connection. 44730Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND. 44740Sstevel@tonic-gate * Otherwise defer processing the packet until the peer 44750Sstevel@tonic-gate * accepts the connection. 44760Sstevel@tonic-gate * Note that the queue is noenabled when we go into this 44770Sstevel@tonic-gate * state. 44780Sstevel@tonic-gate */ 44790Sstevel@tonic-gate if (!closing) { 44800Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 44815240Snordmark SL_TRACE|SL_ERROR, 44825240Snordmark "tl_data: ocon")); 44830Sstevel@tonic-gate TL_PUTBQ(tep, mp); 44840Sstevel@tonic-gate return; 44850Sstevel@tonic-gate } 44860Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) { 44870Sstevel@tonic-gate if (msz < sizeof (t_scalar_t)) { 44880Sstevel@tonic-gate freemsg(mp); 44890Sstevel@tonic-gate return; 44900Sstevel@tonic-gate } 44910Sstevel@tonic-gate /* reuse message block - just change REQ to IND */ 44920Sstevel@tonic-gate if (prim->type == T_DATA_REQ) 44930Sstevel@tonic-gate prim->type = T_DATA_IND; 44940Sstevel@tonic-gate else 44950Sstevel@tonic-gate prim->type = T_OPTDATA_IND; 44960Sstevel@tonic-gate } 44970Sstevel@tonic-gate tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp); 44980Sstevel@tonic-gate return; 44990Sstevel@tonic-gate 45000Sstevel@tonic-gate case TS_WREQ_ORDREL: 45010Sstevel@tonic-gate if (tep->te_conp == NULL) { 45020Sstevel@tonic-gate /* 45030Sstevel@tonic-gate * Other end closed - generate discon_ind 45040Sstevel@tonic-gate * with reason 0 to cause an EPIPE but no 45050Sstevel@tonic-gate * read side error on AF_UNIX sockets. 45060Sstevel@tonic-gate */ 45070Sstevel@tonic-gate freemsg(mp); 45080Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 45095240Snordmark SL_TRACE|SL_ERROR, 45105240Snordmark "tl_data: WREQ_ORDREL and no peer")); 45110Sstevel@tonic-gate tl_discon_ind(tep, 0); 45120Sstevel@tonic-gate return; 45130Sstevel@tonic-gate } 45140Sstevel@tonic-gate break; 45150Sstevel@tonic-gate 45160Sstevel@tonic-gate default: 45170Sstevel@tonic-gate /* invalid state for event TE_DATA_REQ */ 45180Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 45195240Snordmark "tl_data:cots:out of state")); 45200Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 45210Sstevel@tonic-gate return; 45220Sstevel@tonic-gate } 45230Sstevel@tonic-gate /* 45240Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_DATA_REQ, tep->te_state); 45250Sstevel@tonic-gate * (State stays same on this event) 45260Sstevel@tonic-gate */ 45270Sstevel@tonic-gate 45280Sstevel@tonic-gate /* 45290Sstevel@tonic-gate * get connected endpoint 45300Sstevel@tonic-gate */ 45310Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) { 45320Sstevel@tonic-gate freemsg(mp); 45330Sstevel@tonic-gate /* Peer closed */ 45340Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 45355240Snordmark "tl_data: peer gone")); 45360Sstevel@tonic-gate return; 45370Sstevel@tonic-gate } 45380Sstevel@tonic-gate 45390Sstevel@tonic-gate ASSERT(tep->te_serializer == peer_tep->te_serializer); 45400Sstevel@tonic-gate peer_rq = peer_tep->te_rq; 45410Sstevel@tonic-gate 45420Sstevel@tonic-gate /* 45430Sstevel@tonic-gate * Put it back if flow controlled 45440Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded 45450Sstevel@tonic-gate * so we can ignore flow control. 45460Sstevel@tonic-gate */ 45470Sstevel@tonic-gate if (!canputnext(peer_rq) && !closing) { 45480Sstevel@tonic-gate TL_PUTBQ(tep, mp); 45490Sstevel@tonic-gate return; 45500Sstevel@tonic-gate } 45510Sstevel@tonic-gate 45520Sstevel@tonic-gate /* 45530Sstevel@tonic-gate * validate peer state 45540Sstevel@tonic-gate */ 45550Sstevel@tonic-gate switch (peer_tep->te_state) { 45560Sstevel@tonic-gate case TS_DATA_XFER: 45570Sstevel@tonic-gate case TS_WIND_ORDREL: 45580Sstevel@tonic-gate /* valid states */ 45590Sstevel@tonic-gate break; 45600Sstevel@tonic-gate default: 45610Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 45625240Snordmark "tl_data:rx side:invalid state")); 45630Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO); 45640Sstevel@tonic-gate return; 45650Sstevel@tonic-gate } 45660Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) { 45670Sstevel@tonic-gate /* reuse message block - just change REQ to IND */ 45680Sstevel@tonic-gate if (prim->type == T_DATA_REQ) 45690Sstevel@tonic-gate prim->type = T_DATA_IND; 45700Sstevel@tonic-gate else 45710Sstevel@tonic-gate prim->type = T_OPTDATA_IND; 45720Sstevel@tonic-gate } 45730Sstevel@tonic-gate /* 45740Sstevel@tonic-gate * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state); 45750Sstevel@tonic-gate * (peer state stays same on this event) 45760Sstevel@tonic-gate */ 45770Sstevel@tonic-gate /* 45780Sstevel@tonic-gate * send data to connected peer 45790Sstevel@tonic-gate */ 45800Sstevel@tonic-gate putnext(peer_rq, mp); 45810Sstevel@tonic-gate } 45820Sstevel@tonic-gate 45830Sstevel@tonic-gate 45840Sstevel@tonic-gate 45850Sstevel@tonic-gate static void 45860Sstevel@tonic-gate tl_exdata(mblk_t *mp, tl_endpt_t *tep) 45870Sstevel@tonic-gate { 45880Sstevel@tonic-gate queue_t *wq = tep->te_wq; 45890Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 45900Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 45910Sstevel@tonic-gate tl_endpt_t *peer_tep; 45920Sstevel@tonic-gate queue_t *peer_rq; 45930Sstevel@tonic-gate boolean_t closing = tep->te_closing; 45940Sstevel@tonic-gate 45950Sstevel@tonic-gate if (msz < sizeof (struct T_exdata_req)) { 45960Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 45975240Snordmark "tl_exdata:invalid message")); 45980Sstevel@tonic-gate if (!closing) { 45990Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 46000Sstevel@tonic-gate } else { 46010Sstevel@tonic-gate freemsg(mp); 46020Sstevel@tonic-gate } 46030Sstevel@tonic-gate return; 46040Sstevel@tonic-gate } 46050Sstevel@tonic-gate 46060Sstevel@tonic-gate /* 46070Sstevel@tonic-gate * If the endpoint is closing it should still forward any data to the 46080Sstevel@tonic-gate * peer (if it has one). If it is not allowed to forward it can just 46090Sstevel@tonic-gate * free the message. 46100Sstevel@tonic-gate */ 46110Sstevel@tonic-gate if (closing && 46120Sstevel@tonic-gate (tep->te_state != TS_DATA_XFER) && 46130Sstevel@tonic-gate (tep->te_state != TS_WREQ_ORDREL)) { 46140Sstevel@tonic-gate freemsg(mp); 46150Sstevel@tonic-gate return; 46160Sstevel@tonic-gate } 46170Sstevel@tonic-gate 46180Sstevel@tonic-gate /* 46190Sstevel@tonic-gate * validate state 46200Sstevel@tonic-gate */ 46210Sstevel@tonic-gate switch (tep->te_state) { 46220Sstevel@tonic-gate case TS_IDLE: 46230Sstevel@tonic-gate /* 46240Sstevel@tonic-gate * Other end not here - do nothing. 46250Sstevel@tonic-gate */ 46260Sstevel@tonic-gate freemsg(mp); 46270Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 46285240Snordmark "tl_exdata:cots with endpoint idle")); 46290Sstevel@tonic-gate return; 46300Sstevel@tonic-gate 46310Sstevel@tonic-gate case TS_DATA_XFER: 46320Sstevel@tonic-gate /* valid states */ 46330Sstevel@tonic-gate if (tep->te_conp != NULL) 46340Sstevel@tonic-gate break; 46350Sstevel@tonic-gate 46360Sstevel@tonic-gate if (tep->te_oconp == NULL) { 46370Sstevel@tonic-gate if (!closing) { 46380Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 46390Sstevel@tonic-gate } else { 46400Sstevel@tonic-gate freemsg(mp); 46410Sstevel@tonic-gate } 46420Sstevel@tonic-gate return; 46430Sstevel@tonic-gate } 46440Sstevel@tonic-gate /* 46450Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus 46460Sstevel@tonic-gate * the peer might not yet have accepted the connection. 46470Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND. 46480Sstevel@tonic-gate * Otherwise defer processing the packet until the peer 46490Sstevel@tonic-gate * accepts the connection. 46500Sstevel@tonic-gate * Note that the queue is noenabled when we go into this 46510Sstevel@tonic-gate * state. 46520Sstevel@tonic-gate */ 46530Sstevel@tonic-gate if (!closing) { 46540Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 46555240Snordmark SL_TRACE|SL_ERROR, 46565240Snordmark "tl_exdata: ocon")); 46570Sstevel@tonic-gate TL_PUTBQ(tep, mp); 46580Sstevel@tonic-gate return; 46590Sstevel@tonic-gate } 46600Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 46615240Snordmark "tl_exdata: closing socket ocon")); 46620Sstevel@tonic-gate prim->type = T_EXDATA_IND; 46630Sstevel@tonic-gate tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp); 46640Sstevel@tonic-gate return; 46650Sstevel@tonic-gate 46660Sstevel@tonic-gate case TS_WREQ_ORDREL: 46670Sstevel@tonic-gate if (tep->te_conp == NULL) { 46680Sstevel@tonic-gate /* 46690Sstevel@tonic-gate * Other end closed - generate discon_ind 46700Sstevel@tonic-gate * with reason 0 to cause an EPIPE but no 46710Sstevel@tonic-gate * read side error on AF_UNIX sockets. 46720Sstevel@tonic-gate */ 46730Sstevel@tonic-gate freemsg(mp); 46740Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 46755240Snordmark SL_TRACE|SL_ERROR, 46765240Snordmark "tl_exdata: WREQ_ORDREL and no peer")); 46770Sstevel@tonic-gate tl_discon_ind(tep, 0); 46780Sstevel@tonic-gate return; 46790Sstevel@tonic-gate } 46800Sstevel@tonic-gate break; 46810Sstevel@tonic-gate 46820Sstevel@tonic-gate default: 46830Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 46845240Snordmark SL_TRACE|SL_ERROR, 46855240Snordmark "tl_wput:T_EXDATA_REQ:out of state, state=%d", 46865240Snordmark tep->te_state)); 46870Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 46880Sstevel@tonic-gate return; 46890Sstevel@tonic-gate } 46900Sstevel@tonic-gate /* 46910Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_EXDATA_REQ, tep->te_state); 46920Sstevel@tonic-gate * (state stays same on this event) 46930Sstevel@tonic-gate */ 46940Sstevel@tonic-gate 46950Sstevel@tonic-gate /* 46960Sstevel@tonic-gate * get connected endpoint 46970Sstevel@tonic-gate */ 46980Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) { 46990Sstevel@tonic-gate freemsg(mp); 47000Sstevel@tonic-gate /* Peer closed */ 47010Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 47025240Snordmark "tl_exdata: peer gone")); 47030Sstevel@tonic-gate return; 47040Sstevel@tonic-gate } 47050Sstevel@tonic-gate 47060Sstevel@tonic-gate peer_rq = peer_tep->te_rq; 47070Sstevel@tonic-gate 47080Sstevel@tonic-gate /* 47090Sstevel@tonic-gate * Put it back if flow controlled 47100Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded 47110Sstevel@tonic-gate * so we can ignore flow control. 47120Sstevel@tonic-gate */ 47130Sstevel@tonic-gate if (!canputnext(peer_rq) && !closing) { 47140Sstevel@tonic-gate TL_PUTBQ(tep, mp); 47150Sstevel@tonic-gate return; 47160Sstevel@tonic-gate } 47170Sstevel@tonic-gate 47180Sstevel@tonic-gate /* 47190Sstevel@tonic-gate * validate state on peer 47200Sstevel@tonic-gate */ 47210Sstevel@tonic-gate switch (peer_tep->te_state) { 47220Sstevel@tonic-gate case TS_DATA_XFER: 47230Sstevel@tonic-gate case TS_WIND_ORDREL: 47240Sstevel@tonic-gate /* valid states */ 47250Sstevel@tonic-gate break; 47260Sstevel@tonic-gate default: 47270Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 47285240Snordmark "tl_exdata:rx side:invalid state")); 47290Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO); 47300Sstevel@tonic-gate return; 47310Sstevel@tonic-gate } 47320Sstevel@tonic-gate /* 47330Sstevel@tonic-gate * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state); 47340Sstevel@tonic-gate * (peer state stays same on this event) 47350Sstevel@tonic-gate */ 47360Sstevel@tonic-gate /* 47370Sstevel@tonic-gate * reuse message block 47380Sstevel@tonic-gate */ 47390Sstevel@tonic-gate prim->type = T_EXDATA_IND; 47400Sstevel@tonic-gate 47410Sstevel@tonic-gate /* 47420Sstevel@tonic-gate * send data to connected peer 47430Sstevel@tonic-gate */ 47440Sstevel@tonic-gate putnext(peer_rq, mp); 47450Sstevel@tonic-gate } 47460Sstevel@tonic-gate 47470Sstevel@tonic-gate 47480Sstevel@tonic-gate 47490Sstevel@tonic-gate static void 47500Sstevel@tonic-gate tl_ordrel(mblk_t *mp, tl_endpt_t *tep) 47510Sstevel@tonic-gate { 47520Sstevel@tonic-gate queue_t *wq = tep->te_wq; 47530Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 47540Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 47550Sstevel@tonic-gate tl_endpt_t *peer_tep; 47560Sstevel@tonic-gate queue_t *peer_rq; 47570Sstevel@tonic-gate boolean_t closing = tep->te_closing; 47580Sstevel@tonic-gate 47590Sstevel@tonic-gate if (msz < sizeof (struct T_ordrel_req)) { 47600Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 47615240Snordmark "tl_ordrel:invalid message")); 47620Sstevel@tonic-gate if (!closing) { 47630Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 47640Sstevel@tonic-gate } else { 47650Sstevel@tonic-gate freemsg(mp); 47660Sstevel@tonic-gate } 47670Sstevel@tonic-gate return; 47680Sstevel@tonic-gate } 47690Sstevel@tonic-gate 47700Sstevel@tonic-gate /* 47710Sstevel@tonic-gate * validate state 47720Sstevel@tonic-gate */ 47730Sstevel@tonic-gate switch (tep->te_state) { 47740Sstevel@tonic-gate case TS_DATA_XFER: 47750Sstevel@tonic-gate case TS_WREQ_ORDREL: 47760Sstevel@tonic-gate /* valid states */ 47770Sstevel@tonic-gate if (tep->te_conp != NULL) 47780Sstevel@tonic-gate break; 47790Sstevel@tonic-gate 47800Sstevel@tonic-gate if (tep->te_oconp == NULL) 47810Sstevel@tonic-gate break; 47820Sstevel@tonic-gate 47830Sstevel@tonic-gate /* 47840Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus 47850Sstevel@tonic-gate * the peer might not yet have accepted the connection. 47860Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND. 47870Sstevel@tonic-gate * Otherwise defer processing the packet until the peer 47880Sstevel@tonic-gate * accepts the connection. 47890Sstevel@tonic-gate * Note that the queue is noenabled when we go into this 47900Sstevel@tonic-gate * state. 47910Sstevel@tonic-gate */ 47920Sstevel@tonic-gate if (!closing) { 47930Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 47945240Snordmark SL_TRACE|SL_ERROR, 47955240Snordmark "tl_ordlrel: ocon")); 47960Sstevel@tonic-gate TL_PUTBQ(tep, mp); 47970Sstevel@tonic-gate return; 47980Sstevel@tonic-gate } 47990Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 48005240Snordmark "tl_ordlrel: closing socket ocon")); 48010Sstevel@tonic-gate prim->type = T_ORDREL_IND; 48020Sstevel@tonic-gate (void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp); 48030Sstevel@tonic-gate return; 48040Sstevel@tonic-gate 48050Sstevel@tonic-gate default: 48060Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 48075240Snordmark SL_TRACE|SL_ERROR, 48085240Snordmark "tl_wput:T_ORDREL_REQ:out of state, state=%d", 48095240Snordmark tep->te_state)); 48100Sstevel@tonic-gate if (!closing) { 48110Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 48120Sstevel@tonic-gate } else { 48130Sstevel@tonic-gate freemsg(mp); 48140Sstevel@tonic-gate } 48150Sstevel@tonic-gate return; 48160Sstevel@tonic-gate } 48170Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ORDREL_REQ, tep->te_state); 48180Sstevel@tonic-gate 48190Sstevel@tonic-gate /* 48200Sstevel@tonic-gate * get connected endpoint 48210Sstevel@tonic-gate */ 48220Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) { 48230Sstevel@tonic-gate /* Peer closed */ 48240Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 48255240Snordmark "tl_ordrel: peer gone")); 48260Sstevel@tonic-gate freemsg(mp); 48270Sstevel@tonic-gate return; 48280Sstevel@tonic-gate } 48290Sstevel@tonic-gate 48300Sstevel@tonic-gate peer_rq = peer_tep->te_rq; 48310Sstevel@tonic-gate 48320Sstevel@tonic-gate /* 48330Sstevel@tonic-gate * Put it back if flow controlled except when we are closing. 48340Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded 48350Sstevel@tonic-gate * so we can ignore flow control. 48360Sstevel@tonic-gate */ 48370Sstevel@tonic-gate if (! canputnext(peer_rq) && !closing) { 48380Sstevel@tonic-gate TL_PUTBQ(tep, mp); 48390Sstevel@tonic-gate return; 48400Sstevel@tonic-gate } 48410Sstevel@tonic-gate 48420Sstevel@tonic-gate /* 48430Sstevel@tonic-gate * validate state on peer 48440Sstevel@tonic-gate */ 48450Sstevel@tonic-gate switch (peer_tep->te_state) { 48460Sstevel@tonic-gate case TS_DATA_XFER: 48470Sstevel@tonic-gate case TS_WIND_ORDREL: 48480Sstevel@tonic-gate /* valid states */ 48490Sstevel@tonic-gate break; 48500Sstevel@tonic-gate default: 48510Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 48525240Snordmark "tl_ordrel:rx side:invalid state")); 48530Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO); 48540Sstevel@tonic-gate return; 48550Sstevel@tonic-gate } 48560Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state); 48570Sstevel@tonic-gate 48580Sstevel@tonic-gate /* 48590Sstevel@tonic-gate * reuse message block 48600Sstevel@tonic-gate */ 48610Sstevel@tonic-gate prim->type = T_ORDREL_IND; 48620Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 48635240Snordmark "tl_ordrel: send ordrel_ind")); 48640Sstevel@tonic-gate 48650Sstevel@tonic-gate /* 48660Sstevel@tonic-gate * send data to connected peer 48670Sstevel@tonic-gate */ 48680Sstevel@tonic-gate putnext(peer_rq, mp); 48690Sstevel@tonic-gate } 48700Sstevel@tonic-gate 48710Sstevel@tonic-gate 48720Sstevel@tonic-gate /* 48730Sstevel@tonic-gate * Send T_UDERROR_IND. The error should be from the <sys/errno.h> space. 48740Sstevel@tonic-gate */ 48750Sstevel@tonic-gate static void 48760Sstevel@tonic-gate tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err) 48770Sstevel@tonic-gate { 48780Sstevel@tonic-gate size_t err_sz; 48790Sstevel@tonic-gate tl_endpt_t *tep; 48800Sstevel@tonic-gate struct T_unitdata_req *udreq; 48810Sstevel@tonic-gate mblk_t *err_mp; 48820Sstevel@tonic-gate t_scalar_t alen; 48830Sstevel@tonic-gate t_scalar_t olen; 48840Sstevel@tonic-gate struct T_uderror_ind *uderr; 48850Sstevel@tonic-gate uchar_t *addr_startp; 48860Sstevel@tonic-gate 48870Sstevel@tonic-gate err_sz = sizeof (struct T_uderror_ind); 48880Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 48890Sstevel@tonic-gate udreq = (struct T_unitdata_req *)mp->b_rptr; 48900Sstevel@tonic-gate alen = udreq->DEST_length; 48910Sstevel@tonic-gate olen = udreq->OPT_length; 48920Sstevel@tonic-gate 48930Sstevel@tonic-gate if (alen > 0) 48940Sstevel@tonic-gate err_sz = T_ALIGN(err_sz + alen); 48950Sstevel@tonic-gate if (olen > 0) 48960Sstevel@tonic-gate err_sz += olen; 48970Sstevel@tonic-gate 48980Sstevel@tonic-gate err_mp = allocb(err_sz, BPRI_MED); 48990Sstevel@tonic-gate if (! err_mp) { 49000Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 49015240Snordmark "tl_uderr:allocb failure")); 49020Sstevel@tonic-gate /* 49030Sstevel@tonic-gate * Note: no rollback of state needed as it does 49040Sstevel@tonic-gate * not change in connectionless transport 49050Sstevel@tonic-gate */ 49060Sstevel@tonic-gate tl_memrecover(wq, mp, err_sz); 49070Sstevel@tonic-gate return; 49080Sstevel@tonic-gate } 49090Sstevel@tonic-gate 49100Sstevel@tonic-gate DB_TYPE(err_mp) = M_PROTO; 49110Sstevel@tonic-gate err_mp->b_wptr = err_mp->b_rptr + err_sz; 49120Sstevel@tonic-gate uderr = (struct T_uderror_ind *)err_mp->b_rptr; 49130Sstevel@tonic-gate uderr->PRIM_type = T_UDERROR_IND; 49140Sstevel@tonic-gate uderr->ERROR_type = err; 49150Sstevel@tonic-gate uderr->DEST_length = alen; 49160Sstevel@tonic-gate uderr->OPT_length = olen; 49170Sstevel@tonic-gate if (alen <= 0) { 49180Sstevel@tonic-gate uderr->DEST_offset = 0; 49190Sstevel@tonic-gate } else { 49200Sstevel@tonic-gate uderr->DEST_offset = 49215240Snordmark (t_scalar_t)sizeof (struct T_uderror_ind); 49220Sstevel@tonic-gate addr_startp = mp->b_rptr + udreq->DEST_offset; 49230Sstevel@tonic-gate bcopy(addr_startp, err_mp->b_rptr + uderr->DEST_offset, 49245240Snordmark (size_t)alen); 49250Sstevel@tonic-gate } 49260Sstevel@tonic-gate if (olen <= 0) { 49270Sstevel@tonic-gate uderr->OPT_offset = 0; 49280Sstevel@tonic-gate } else { 49290Sstevel@tonic-gate uderr->OPT_offset = 49305240Snordmark (t_scalar_t)T_ALIGN(sizeof (struct T_uderror_ind) + 49315240Snordmark uderr->DEST_length); 49320Sstevel@tonic-gate addr_startp = mp->b_rptr + udreq->OPT_offset; 49330Sstevel@tonic-gate bcopy(addr_startp, err_mp->b_rptr+uderr->OPT_offset, 49345240Snordmark (size_t)olen); 49350Sstevel@tonic-gate } 49360Sstevel@tonic-gate freemsg(mp); 49370Sstevel@tonic-gate 49380Sstevel@tonic-gate /* 49390Sstevel@tonic-gate * send indication message 49400Sstevel@tonic-gate */ 49410Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_UDERROR_IND, tep->te_state); 49420Sstevel@tonic-gate 49430Sstevel@tonic-gate qreply(wq, err_mp); 49440Sstevel@tonic-gate } 49450Sstevel@tonic-gate 49460Sstevel@tonic-gate static void 49470Sstevel@tonic-gate tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep) 49480Sstevel@tonic-gate { 49490Sstevel@tonic-gate queue_t *wq = tep->te_wq; 49500Sstevel@tonic-gate 49510Sstevel@tonic-gate if (!tep->te_closing && (wq->q_first != NULL)) { 49520Sstevel@tonic-gate TL_PUTQ(tep, mp); 49530Sstevel@tonic-gate } else if (tep->te_rq != NULL) 49540Sstevel@tonic-gate tl_unitdata(mp, tep); 49550Sstevel@tonic-gate else 49560Sstevel@tonic-gate freemsg(mp); 49570Sstevel@tonic-gate 49580Sstevel@tonic-gate tl_serializer_exit(tep); 49590Sstevel@tonic-gate tl_refrele(tep); 49600Sstevel@tonic-gate } 49610Sstevel@tonic-gate 49620Sstevel@tonic-gate /* 49630Sstevel@tonic-gate * Handle T_unitdata_req. 49640Sstevel@tonic-gate * If TL_SET[U]CRED or TL_SOCKUCRED generate the credentials options. 49650Sstevel@tonic-gate * If this is a socket pass through options unmodified. 49660Sstevel@tonic-gate */ 49670Sstevel@tonic-gate static void 49680Sstevel@tonic-gate tl_unitdata(mblk_t *mp, tl_endpt_t *tep) 49690Sstevel@tonic-gate { 49700Sstevel@tonic-gate queue_t *wq = tep->te_wq; 49710Sstevel@tonic-gate soux_addr_t ux_addr; 49720Sstevel@tonic-gate tl_addr_t destaddr; 49730Sstevel@tonic-gate uchar_t *addr_startp; 49740Sstevel@tonic-gate tl_endpt_t *peer_tep; 49750Sstevel@tonic-gate struct T_unitdata_ind *udind; 49760Sstevel@tonic-gate struct T_unitdata_req *udreq; 49770Sstevel@tonic-gate ssize_t msz, ui_sz; 49780Sstevel@tonic-gate t_scalar_t alen, aoff, olen, ooff; 49790Sstevel@tonic-gate t_scalar_t oldolen = 0; 49800Sstevel@tonic-gate 49810Sstevel@tonic-gate udreq = (struct T_unitdata_req *)mp->b_rptr; 49820Sstevel@tonic-gate msz = MBLKL(mp); 49830Sstevel@tonic-gate 49840Sstevel@tonic-gate /* 49850Sstevel@tonic-gate * validate the state 49860Sstevel@tonic-gate */ 49870Sstevel@tonic-gate if (tep->te_state != TS_IDLE) { 49880Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 49895240Snordmark SL_TRACE|SL_ERROR, 49905240Snordmark "tl_wput:T_CONN_REQ:out of state")); 49910Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 49920Sstevel@tonic-gate return; 49930Sstevel@tonic-gate } 49940Sstevel@tonic-gate /* 49950Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_UNITDATA_REQ, tep->te_state); 49960Sstevel@tonic-gate * (state does not change on this event) 49970Sstevel@tonic-gate */ 49980Sstevel@tonic-gate 49990Sstevel@tonic-gate /* 50000Sstevel@tonic-gate * validate the message 50010Sstevel@tonic-gate * Note: dereference fields in struct inside message only 50020Sstevel@tonic-gate * after validating the message length. 50030Sstevel@tonic-gate */ 50040Sstevel@tonic-gate if (msz < sizeof (struct T_unitdata_req)) { 50050Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 50065240Snordmark "tl_unitdata:invalid message length")); 50070Sstevel@tonic-gate tl_merror(wq, mp, EINVAL); 50080Sstevel@tonic-gate return; 50090Sstevel@tonic-gate } 50100Sstevel@tonic-gate alen = udreq->DEST_length; 50110Sstevel@tonic-gate aoff = udreq->DEST_offset; 50120Sstevel@tonic-gate oldolen = olen = udreq->OPT_length; 50130Sstevel@tonic-gate ooff = udreq->OPT_offset; 50140Sstevel@tonic-gate if (olen == 0) 50150Sstevel@tonic-gate ooff = 0; 50160Sstevel@tonic-gate 50170Sstevel@tonic-gate if (IS_SOCKET(tep)) { 50180Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) || 50190Sstevel@tonic-gate (aoff < 0) || 50200Sstevel@tonic-gate (aoff + alen > msz) || 50210Sstevel@tonic-gate (olen < 0) || (ooff < 0) || 50220Sstevel@tonic-gate ((olen > 0) && ((ooff + olen) > msz))) { 50230Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 50245240Snordmark 1, SL_TRACE|SL_ERROR, 50255240Snordmark "tl_unitdata_req: invalid socket addr " 50265240Snordmark "(msz=%d, al=%d, ao=%d, ol=%d, oo = %d)", 50275240Snordmark (int)msz, alen, aoff, olen, ooff)); 50280Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ); 50290Sstevel@tonic-gate return; 50300Sstevel@tonic-gate } 50310Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN); 50320Sstevel@tonic-gate 50330Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) && 50340Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) { 50350Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 50365240Snordmark 1, SL_TRACE|SL_ERROR, 50375240Snordmark "tl_conn_req: invalid socket magic")); 50380Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ); 50390Sstevel@tonic-gate return; 50400Sstevel@tonic-gate } 50410Sstevel@tonic-gate } else { 50420Sstevel@tonic-gate if ((alen < 0) || 50430Sstevel@tonic-gate (aoff < 0) || 50440Sstevel@tonic-gate ((alen > 0) && ((aoff + alen) > msz)) || 50450Sstevel@tonic-gate ((ssize_t)alen > (msz - sizeof (struct T_unitdata_req))) || 50460Sstevel@tonic-gate ((aoff + alen) < 0) || 50470Sstevel@tonic-gate ((olen > 0) && ((ooff + olen) > msz)) || 50480Sstevel@tonic-gate (olen < 0) || 50490Sstevel@tonic-gate (ooff < 0) || 50500Sstevel@tonic-gate ((ssize_t)olen > (msz - sizeof (struct T_unitdata_req)))) { 50510Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 50520Sstevel@tonic-gate SL_TRACE|SL_ERROR, 50530Sstevel@tonic-gate "tl_unitdata:invalid unit data message")); 50540Sstevel@tonic-gate tl_merror(wq, mp, EINVAL); 50550Sstevel@tonic-gate return; 50560Sstevel@tonic-gate } 50570Sstevel@tonic-gate } 50580Sstevel@tonic-gate 50590Sstevel@tonic-gate /* Options not supported unless it's a socket */ 50600Sstevel@tonic-gate if (alen == 0 || (olen != 0 && !IS_SOCKET(tep))) { 50610Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 50620Sstevel@tonic-gate "tl_unitdata:option use(unsupported) or zero len addr")); 50630Sstevel@tonic-gate tl_uderr(wq, mp, EPROTO); 50640Sstevel@tonic-gate return; 50650Sstevel@tonic-gate } 50660Sstevel@tonic-gate #ifdef DEBUG 50670Sstevel@tonic-gate /* 50680Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps. 50690Sstevel@tonic-gate * if (! assertion) 50700Sstevel@tonic-gate * log warning; 50710Sstevel@tonic-gate */ 50720Sstevel@tonic-gate if (! (aoff >= (t_scalar_t)sizeof (struct T_unitdata_req))) { 50730Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 50745240Snordmark "tl_unitdata:addr overlaps TPI message")); 50750Sstevel@tonic-gate } 50760Sstevel@tonic-gate #endif 50770Sstevel@tonic-gate /* 50780Sstevel@tonic-gate * get destination endpoint 50790Sstevel@tonic-gate */ 50800Sstevel@tonic-gate destaddr.ta_alen = alen; 50810Sstevel@tonic-gate destaddr.ta_abuf = mp->b_rptr + aoff; 50820Sstevel@tonic-gate destaddr.ta_zoneid = tep->te_zoneid; 50830Sstevel@tonic-gate 50840Sstevel@tonic-gate /* 50850Sstevel@tonic-gate * Check whether the destination is the same that was used previously 50860Sstevel@tonic-gate * and the destination endpoint is in the right state. If something is 50870Sstevel@tonic-gate * wrong, find destination again and cache it. 50880Sstevel@tonic-gate */ 50890Sstevel@tonic-gate peer_tep = tep->te_lastep; 50900Sstevel@tonic-gate 50910Sstevel@tonic-gate if ((peer_tep == NULL) || peer_tep->te_closing || 50920Sstevel@tonic-gate (peer_tep->te_state != TS_IDLE) || 50930Sstevel@tonic-gate !tl_eqaddr(&destaddr, &peer_tep->te_ap)) { 50940Sstevel@tonic-gate /* 50950Sstevel@tonic-gate * Not the same as cached destination , need to find the right 50960Sstevel@tonic-gate * destination. 50970Sstevel@tonic-gate */ 50980Sstevel@tonic-gate peer_tep = (IS_SOCKET(tep) ? 50990Sstevel@tonic-gate tl_sock_find_peer(tep, &ux_addr) : 51000Sstevel@tonic-gate tl_find_peer(tep, &destaddr)); 51010Sstevel@tonic-gate 51020Sstevel@tonic-gate if (peer_tep == NULL) { 51030Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 51045240Snordmark SL_TRACE|SL_ERROR, 51055240Snordmark "tl_unitdata:no one at destination address")); 51060Sstevel@tonic-gate tl_uderr(wq, mp, ECONNRESET); 51070Sstevel@tonic-gate return; 51080Sstevel@tonic-gate } 51090Sstevel@tonic-gate 51100Sstevel@tonic-gate /* 51110Sstevel@tonic-gate * Cache the new peer. 51120Sstevel@tonic-gate */ 51130Sstevel@tonic-gate if (tep->te_lastep != NULL) 51140Sstevel@tonic-gate tl_refrele(tep->te_lastep); 51150Sstevel@tonic-gate 51160Sstevel@tonic-gate tep->te_lastep = peer_tep; 51170Sstevel@tonic-gate } 51180Sstevel@tonic-gate 51190Sstevel@tonic-gate if (peer_tep->te_state != TS_IDLE) { 51200Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 51215240Snordmark "tl_unitdata:provider in invalid state")); 51220Sstevel@tonic-gate tl_uderr(wq, mp, EPROTO); 51230Sstevel@tonic-gate return; 51240Sstevel@tonic-gate } 51250Sstevel@tonic-gate 51260Sstevel@tonic-gate ASSERT(peer_tep->te_rq != NULL); 51270Sstevel@tonic-gate 51280Sstevel@tonic-gate /* 51290Sstevel@tonic-gate * Put it back if flow controlled except when we are closing. 51300Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded 51310Sstevel@tonic-gate * so we can ignore flow control. 51320Sstevel@tonic-gate */ 51330Sstevel@tonic-gate if (!canputnext(peer_tep->te_rq) && !(tep->te_closing)) { 51340Sstevel@tonic-gate /* record what we are flow controlled on */ 51350Sstevel@tonic-gate if (tep->te_flowq != NULL) { 51360Sstevel@tonic-gate list_remove(&tep->te_flowq->te_flowlist, tep); 51370Sstevel@tonic-gate } 51380Sstevel@tonic-gate list_insert_head(&peer_tep->te_flowlist, tep); 51390Sstevel@tonic-gate tep->te_flowq = peer_tep; 51400Sstevel@tonic-gate TL_PUTBQ(tep, mp); 51410Sstevel@tonic-gate return; 51420Sstevel@tonic-gate } 51430Sstevel@tonic-gate /* 51440Sstevel@tonic-gate * prepare indication message 51450Sstevel@tonic-gate */ 51460Sstevel@tonic-gate 51470Sstevel@tonic-gate /* 51480Sstevel@tonic-gate * calculate length of message 51490Sstevel@tonic-gate */ 51500Sstevel@tonic-gate if (peer_tep->te_flag & TL_SETCRED) { 51510Sstevel@tonic-gate ASSERT(olen == 0); 51520Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) + 51535240Snordmark OPTLEN(sizeof (tl_credopt_t)); 51540Sstevel@tonic-gate /* 1 option only */ 51550Sstevel@tonic-gate } else if (peer_tep->te_flag & TL_SETUCRED) { 51560Sstevel@tonic-gate ASSERT(olen == 0); 51570Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) + OPTLEN(ucredsize); 51580Sstevel@tonic-gate /* 1 option only */ 51590Sstevel@tonic-gate } else if (peer_tep->te_flag & TL_SOCKUCRED) { 51600Sstevel@tonic-gate /* Possibly more than one option */ 51610Sstevel@tonic-gate olen += (t_scalar_t)sizeof (struct T_opthdr) + 51620Sstevel@tonic-gate OPTLEN(ucredsize); 51630Sstevel@tonic-gate } 51640Sstevel@tonic-gate 51650Sstevel@tonic-gate ui_sz = T_ALIGN(sizeof (struct T_unitdata_ind) + tep->te_alen) + 51665240Snordmark olen; 51670Sstevel@tonic-gate /* 51680Sstevel@tonic-gate * If the unitdata_ind fits and we are not adding options 51690Sstevel@tonic-gate * reuse the udreq mblk. 51700Sstevel@tonic-gate */ 51710Sstevel@tonic-gate if (msz >= ui_sz && alen >= tep->te_alen && 51720Sstevel@tonic-gate !(peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED))) { 51730Sstevel@tonic-gate /* 51740Sstevel@tonic-gate * Reuse the original mblk. Leave options in place. 51750Sstevel@tonic-gate */ 51760Sstevel@tonic-gate udind = (struct T_unitdata_ind *)mp->b_rptr; 51770Sstevel@tonic-gate udind->PRIM_type = T_UNITDATA_IND; 51780Sstevel@tonic-gate udind->SRC_length = tep->te_alen; 51790Sstevel@tonic-gate addr_startp = mp->b_rptr + udind->SRC_offset; 51800Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen); 51810Sstevel@tonic-gate } else { 51820Sstevel@tonic-gate /* Allocate a new T_unidata_ind message */ 51830Sstevel@tonic-gate mblk_t *ui_mp; 51840Sstevel@tonic-gate 51850Sstevel@tonic-gate ui_mp = allocb(ui_sz, BPRI_MED); 51860Sstevel@tonic-gate if (! ui_mp) { 51870Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 4, SL_TRACE, 51885240Snordmark "tl_unitdata:allocb failure:message queued")); 51890Sstevel@tonic-gate tl_memrecover(wq, mp, ui_sz); 51900Sstevel@tonic-gate return; 51910Sstevel@tonic-gate } 51920Sstevel@tonic-gate 51930Sstevel@tonic-gate /* 51940Sstevel@tonic-gate * fill in T_UNITDATA_IND contents 51950Sstevel@tonic-gate */ 51960Sstevel@tonic-gate DB_TYPE(ui_mp) = M_PROTO; 51970Sstevel@tonic-gate ui_mp->b_wptr = ui_mp->b_rptr + ui_sz; 51980Sstevel@tonic-gate udind = (struct T_unitdata_ind *)ui_mp->b_rptr; 51990Sstevel@tonic-gate udind->PRIM_type = T_UNITDATA_IND; 52000Sstevel@tonic-gate udind->SRC_offset = (t_scalar_t)sizeof (struct T_unitdata_ind); 52010Sstevel@tonic-gate udind->SRC_length = tep->te_alen; 52020Sstevel@tonic-gate addr_startp = ui_mp->b_rptr + udind->SRC_offset; 52030Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen); 52040Sstevel@tonic-gate udind->OPT_offset = 52050Sstevel@tonic-gate (t_scalar_t)T_ALIGN(udind->SRC_offset + udind->SRC_length); 52060Sstevel@tonic-gate udind->OPT_length = olen; 52070Sstevel@tonic-gate if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) { 52080Sstevel@tonic-gate if (oldolen != 0) { 52090Sstevel@tonic-gate bcopy((void *)((uintptr_t)udreq + ooff), 52100Sstevel@tonic-gate (void *)((uintptr_t)udind + 52110Sstevel@tonic-gate udind->OPT_offset), 52120Sstevel@tonic-gate oldolen); 52130Sstevel@tonic-gate } 52140Sstevel@tonic-gate tl_fill_option(ui_mp->b_rptr + udind->OPT_offset + 52150Sstevel@tonic-gate oldolen, 52160Sstevel@tonic-gate DB_CREDDEF(mp, tep->te_credp), TLPID(mp, tep), 52171676Sjpk peer_tep->te_flag, peer_tep->te_credp); 52180Sstevel@tonic-gate } else { 52190Sstevel@tonic-gate bcopy((void *)((uintptr_t)udreq + ooff), 52205240Snordmark (void *)((uintptr_t)udind + udind->OPT_offset), 52215240Snordmark olen); 52220Sstevel@tonic-gate } 52230Sstevel@tonic-gate 52240Sstevel@tonic-gate /* 52250Sstevel@tonic-gate * relink data blocks from mp to ui_mp 52260Sstevel@tonic-gate */ 52270Sstevel@tonic-gate ui_mp->b_cont = mp->b_cont; 52280Sstevel@tonic-gate freeb(mp); 52290Sstevel@tonic-gate mp = ui_mp; 52300Sstevel@tonic-gate } 52310Sstevel@tonic-gate /* 52320Sstevel@tonic-gate * send indication message 52330Sstevel@tonic-gate */ 52340Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_UNITDATA_IND, peer_tep->te_state); 52350Sstevel@tonic-gate putnext(peer_tep->te_rq, mp); 52360Sstevel@tonic-gate } 52370Sstevel@tonic-gate 52380Sstevel@tonic-gate 52390Sstevel@tonic-gate 52400Sstevel@tonic-gate /* 52410Sstevel@tonic-gate * Check if a given addr is in use. 52420Sstevel@tonic-gate * Endpoint ptr returned or NULL if not found. 52430Sstevel@tonic-gate * The name space is separate for each mode. This implies that 52440Sstevel@tonic-gate * sockets get their own name space. 52450Sstevel@tonic-gate */ 52460Sstevel@tonic-gate static tl_endpt_t * 52470Sstevel@tonic-gate tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap) 52480Sstevel@tonic-gate { 52490Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL; 52500Sstevel@tonic-gate int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap, 52510Sstevel@tonic-gate (mod_hash_val_t *)&peer_tep, tl_find_callback); 52520Sstevel@tonic-gate 52530Sstevel@tonic-gate ASSERT(! IS_SOCKET(tep)); 52540Sstevel@tonic-gate 52550Sstevel@tonic-gate ASSERT(ap != NULL && ap->ta_alen > 0); 52560Sstevel@tonic-gate ASSERT(ap->ta_zoneid == tep->te_zoneid); 52570Sstevel@tonic-gate ASSERT(ap->ta_abuf != NULL); 52580Sstevel@tonic-gate ASSERT(EQUIV(rc == 0, peer_tep != NULL)); 52590Sstevel@tonic-gate ASSERT(IMPLY(rc == 0, 52605240Snordmark (tep->te_zoneid == peer_tep->te_zoneid) && 52615240Snordmark (tep->te_transport == peer_tep->te_transport))); 52620Sstevel@tonic-gate 52630Sstevel@tonic-gate if ((rc == 0) && (peer_tep->te_closing)) { 52640Sstevel@tonic-gate tl_refrele(peer_tep); 52650Sstevel@tonic-gate peer_tep = NULL; 52660Sstevel@tonic-gate } 52670Sstevel@tonic-gate 52680Sstevel@tonic-gate return (peer_tep); 52690Sstevel@tonic-gate } 52700Sstevel@tonic-gate 52710Sstevel@tonic-gate /* 52720Sstevel@tonic-gate * Find peer for a socket based on unix domain address. 52730Sstevel@tonic-gate * For implicit addresses our peer can be found by minor number in ai hash. For 52747409SRic.Aleshire@Sun.COM * explicit binds we look vnode address at addr_hash. 52750Sstevel@tonic-gate */ 52760Sstevel@tonic-gate static tl_endpt_t * 52770Sstevel@tonic-gate tl_sock_find_peer(tl_endpt_t *tep, soux_addr_t *ux_addr) 52780Sstevel@tonic-gate { 52790Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL; 52800Sstevel@tonic-gate mod_hash_t *hash = ux_addr->soua_magic == SOU_MAGIC_IMPLICIT ? 52810Sstevel@tonic-gate tep->te_aihash : tep->te_addrhash; 52820Sstevel@tonic-gate int rc = mod_hash_find_cb(hash, (mod_hash_key_t)ux_addr->soua_vp, 52830Sstevel@tonic-gate (mod_hash_val_t *)&peer_tep, tl_find_callback); 52840Sstevel@tonic-gate 52850Sstevel@tonic-gate ASSERT(IS_SOCKET(tep)); 52860Sstevel@tonic-gate ASSERT(EQUIV(rc == 0, peer_tep != NULL)); 52877409SRic.Aleshire@Sun.COM ASSERT(IMPLY(rc == 0, (tep->te_transport == peer_tep->te_transport))); 52887409SRic.Aleshire@Sun.COM 52897409SRic.Aleshire@Sun.COM if (peer_tep != NULL) { 52907409SRic.Aleshire@Sun.COM /* Don't attempt to use closing peer. */ 52917409SRic.Aleshire@Sun.COM if (peer_tep->te_closing) 52927409SRic.Aleshire@Sun.COM goto errout; 52937409SRic.Aleshire@Sun.COM 52947409SRic.Aleshire@Sun.COM /* 52957409SRic.Aleshire@Sun.COM * Cross-zone unix sockets are permitted, but for Trusted 52967409SRic.Aleshire@Sun.COM * Extensions only, the "server" for these must be in the 52977409SRic.Aleshire@Sun.COM * global zone. 52987409SRic.Aleshire@Sun.COM */ 52997409SRic.Aleshire@Sun.COM if ((peer_tep->te_zoneid != tep->te_zoneid) && 53007409SRic.Aleshire@Sun.COM is_system_labeled() && 53017409SRic.Aleshire@Sun.COM (peer_tep->te_zoneid != GLOBAL_ZONEID)) 53027409SRic.Aleshire@Sun.COM goto errout; 53030Sstevel@tonic-gate } 53040Sstevel@tonic-gate 53050Sstevel@tonic-gate return (peer_tep); 53067409SRic.Aleshire@Sun.COM 53077409SRic.Aleshire@Sun.COM errout: 53087409SRic.Aleshire@Sun.COM tl_refrele(peer_tep); 53097409SRic.Aleshire@Sun.COM return (NULL); 53100Sstevel@tonic-gate } 53110Sstevel@tonic-gate 53120Sstevel@tonic-gate /* 53130Sstevel@tonic-gate * Generate a free addr and return it in struct pointed by ap 53140Sstevel@tonic-gate * but allocating space for address buffer. 53150Sstevel@tonic-gate * The generated address will be at least 4 bytes long and, if req->ta_alen 53160Sstevel@tonic-gate * exceeds 4 bytes, be req->ta_alen bytes long. 53170Sstevel@tonic-gate * 53180Sstevel@tonic-gate * If address is found it will be inserted in the hash. 53190Sstevel@tonic-gate * 53200Sstevel@tonic-gate * If req->ta_alen is larger than the default alen (4 bytes) the last 53210Sstevel@tonic-gate * alen-4 bytes will always be the same as in req. 53220Sstevel@tonic-gate * 53230Sstevel@tonic-gate * Return 0 for failure. 53240Sstevel@tonic-gate * Return non-zero for success. 53250Sstevel@tonic-gate */ 53260Sstevel@tonic-gate static boolean_t 53270Sstevel@tonic-gate tl_get_any_addr(tl_endpt_t *tep, tl_addr_t *req) 53280Sstevel@tonic-gate { 53290Sstevel@tonic-gate t_scalar_t alen; 53300Sstevel@tonic-gate uint32_t loopcnt; /* Limit loop to 2^32 */ 53310Sstevel@tonic-gate 53320Sstevel@tonic-gate ASSERT(tep->te_hash_hndl != NULL); 53330Sstevel@tonic-gate ASSERT(! IS_SOCKET(tep)); 53340Sstevel@tonic-gate 53350Sstevel@tonic-gate if (tep->te_hash_hndl == NULL) 53360Sstevel@tonic-gate return (B_FALSE); 53370Sstevel@tonic-gate 53380Sstevel@tonic-gate /* 53390Sstevel@tonic-gate * check if default addr is in use 53400Sstevel@tonic-gate * if it is - bump it and try again 53410Sstevel@tonic-gate */ 53420Sstevel@tonic-gate if (req == NULL) { 53430Sstevel@tonic-gate alen = sizeof (uint32_t); 53440Sstevel@tonic-gate } else { 53450Sstevel@tonic-gate alen = max(req->ta_alen, sizeof (uint32_t)); 53460Sstevel@tonic-gate ASSERT(tep->te_zoneid == req->ta_zoneid); 53470Sstevel@tonic-gate } 53480Sstevel@tonic-gate 53490Sstevel@tonic-gate if (tep->te_alen < alen) { 53500Sstevel@tonic-gate void *abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP); 53510Sstevel@tonic-gate 53520Sstevel@tonic-gate /* 53530Sstevel@tonic-gate * Not enough space in tep->ta_ap to hold the address, 53540Sstevel@tonic-gate * allocate a bigger space. 53550Sstevel@tonic-gate */ 53560Sstevel@tonic-gate if (abuf == NULL) 53570Sstevel@tonic-gate return (B_FALSE); 53580Sstevel@tonic-gate 53590Sstevel@tonic-gate if (tep->te_alen > 0) 53600Sstevel@tonic-gate kmem_free(tep->te_abuf, tep->te_alen); 53610Sstevel@tonic-gate 53620Sstevel@tonic-gate tep->te_alen = alen; 53630Sstevel@tonic-gate tep->te_abuf = abuf; 53640Sstevel@tonic-gate } 53650Sstevel@tonic-gate 53660Sstevel@tonic-gate /* Copy in the address in req */ 53670Sstevel@tonic-gate if (req != NULL) { 53680Sstevel@tonic-gate ASSERT(alen >= req->ta_alen); 53690Sstevel@tonic-gate bcopy(req->ta_abuf, tep->te_abuf, (size_t)req->ta_alen); 53700Sstevel@tonic-gate } 53710Sstevel@tonic-gate 53720Sstevel@tonic-gate /* 53730Sstevel@tonic-gate * First try minor number then try default addresses. 53740Sstevel@tonic-gate */ 53750Sstevel@tonic-gate bcopy(&tep->te_minor, tep->te_abuf, sizeof (uint32_t)); 53760Sstevel@tonic-gate 53770Sstevel@tonic-gate for (loopcnt = 0; loopcnt < UINT32_MAX; loopcnt++) { 53780Sstevel@tonic-gate if (mod_hash_insert_reserve(tep->te_addrhash, 53795240Snordmark (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep, 53805240Snordmark tep->te_hash_hndl) == 0) { 53810Sstevel@tonic-gate /* 53820Sstevel@tonic-gate * found free address 53830Sstevel@tonic-gate */ 53840Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED; 53850Sstevel@tonic-gate tep->te_hash_hndl = NULL; 53860Sstevel@tonic-gate 53870Sstevel@tonic-gate return (B_TRUE); /* successful return */ 53880Sstevel@tonic-gate } 53890Sstevel@tonic-gate /* 53900Sstevel@tonic-gate * Use default address. 53910Sstevel@tonic-gate */ 53920Sstevel@tonic-gate bcopy(&tep->te_defaddr, tep->te_abuf, sizeof (uint32_t)); 53930Sstevel@tonic-gate atomic_add_32(&tep->te_defaddr, 1); 53940Sstevel@tonic-gate } 53950Sstevel@tonic-gate 53960Sstevel@tonic-gate /* 53970Sstevel@tonic-gate * Failed to find anything. 53980Sstevel@tonic-gate */ 53990Sstevel@tonic-gate (void) (STRLOG(TL_ID, -1, 1, SL_ERROR, 54005240Snordmark "tl_get_any_addr:looped 2^32 times")); 54010Sstevel@tonic-gate return (B_FALSE); 54020Sstevel@tonic-gate } 54030Sstevel@tonic-gate 54040Sstevel@tonic-gate /* 54050Sstevel@tonic-gate * reallocb + set r/w ptrs to reflect size. 54060Sstevel@tonic-gate */ 54070Sstevel@tonic-gate static mblk_t * 54080Sstevel@tonic-gate tl_resizemp(mblk_t *mp, ssize_t new_size) 54090Sstevel@tonic-gate { 54100Sstevel@tonic-gate if ((mp = reallocb(mp, new_size, 0)) == NULL) 54110Sstevel@tonic-gate return (NULL); 54120Sstevel@tonic-gate 54130Sstevel@tonic-gate mp->b_rptr = DB_BASE(mp); 54140Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + new_size; 54150Sstevel@tonic-gate return (mp); 54160Sstevel@tonic-gate } 54170Sstevel@tonic-gate 54180Sstevel@tonic-gate static void 54190Sstevel@tonic-gate tl_cl_backenable(tl_endpt_t *tep) 54200Sstevel@tonic-gate { 54210Sstevel@tonic-gate list_t *l = &tep->te_flowlist; 54220Sstevel@tonic-gate tl_endpt_t *elp; 54230Sstevel@tonic-gate 54240Sstevel@tonic-gate ASSERT(IS_CLTS(tep)); 54250Sstevel@tonic-gate 54260Sstevel@tonic-gate for (elp = list_head(l); elp != NULL; elp = list_head(l)) { 54270Sstevel@tonic-gate ASSERT(tep->te_ser == elp->te_ser); 54280Sstevel@tonic-gate ASSERT(elp->te_flowq == tep); 54290Sstevel@tonic-gate if (! elp->te_closing) 54300Sstevel@tonic-gate TL_QENABLE(elp); 54310Sstevel@tonic-gate elp->te_flowq = NULL; 54320Sstevel@tonic-gate list_remove(l, elp); 54330Sstevel@tonic-gate } 54340Sstevel@tonic-gate } 54350Sstevel@tonic-gate 54360Sstevel@tonic-gate /* 54370Sstevel@tonic-gate * Unconnect endpoints. 54380Sstevel@tonic-gate */ 54390Sstevel@tonic-gate static void 54400Sstevel@tonic-gate tl_co_unconnect(tl_endpt_t *tep) 54410Sstevel@tonic-gate { 54420Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp; 54430Sstevel@tonic-gate tl_endpt_t *srv_tep = tep->te_oconp; 54440Sstevel@tonic-gate list_t *l; 54450Sstevel@tonic-gate tl_icon_t *tip; 54460Sstevel@tonic-gate tl_endpt_t *cl_tep; 54470Sstevel@tonic-gate mblk_t *d_mp; 54480Sstevel@tonic-gate 54490Sstevel@tonic-gate ASSERT(IS_COTS(tep)); 54500Sstevel@tonic-gate /* 54510Sstevel@tonic-gate * If our peer is closing, don't use it. 54520Sstevel@tonic-gate */ 54530Sstevel@tonic-gate if ((peer_tep != NULL) && peer_tep->te_closing) { 54540Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp); 54550Sstevel@tonic-gate peer_tep = NULL; 54560Sstevel@tonic-gate } 54570Sstevel@tonic-gate if ((srv_tep != NULL) && srv_tep->te_closing) { 54580Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 54590Sstevel@tonic-gate srv_tep = NULL; 54600Sstevel@tonic-gate } 54610Sstevel@tonic-gate 54620Sstevel@tonic-gate if (tep->te_nicon > 0) { 54630Sstevel@tonic-gate l = &tep->te_iconp; 54640Sstevel@tonic-gate /* 54650Sstevel@tonic-gate * If incoming requests pending, change state 54660Sstevel@tonic-gate * of clients on disconnect ind event and send 54670Sstevel@tonic-gate * discon_ind pdu to modules above them 54680Sstevel@tonic-gate * for server: all clients get disconnect 54690Sstevel@tonic-gate */ 54700Sstevel@tonic-gate 54710Sstevel@tonic-gate while (tep->te_nicon > 0) { 54720Sstevel@tonic-gate tip = list_head(l); 54730Sstevel@tonic-gate cl_tep = tip->ti_tep; 54740Sstevel@tonic-gate 54750Sstevel@tonic-gate if (cl_tep == NULL) { 54760Sstevel@tonic-gate tl_freetip(tep, tip); 54770Sstevel@tonic-gate continue; 54780Sstevel@tonic-gate } 54790Sstevel@tonic-gate 54800Sstevel@tonic-gate if (cl_tep->te_oconp != NULL) { 54810Sstevel@tonic-gate ASSERT(cl_tep != cl_tep->te_oconp); 54820Sstevel@tonic-gate TL_UNCONNECT(cl_tep->te_oconp); 54830Sstevel@tonic-gate } 54840Sstevel@tonic-gate 54850Sstevel@tonic-gate if (cl_tep->te_closing) { 54860Sstevel@tonic-gate tl_freetip(tep, tip); 54870Sstevel@tonic-gate continue; 54880Sstevel@tonic-gate } 54890Sstevel@tonic-gate 54900Sstevel@tonic-gate enableok(cl_tep->te_wq); 54910Sstevel@tonic-gate TL_QENABLE(cl_tep); 54920Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(ECONNREFUSED, BADSEQNUM); 54930Sstevel@tonic-gate if (d_mp != NULL) { 54940Sstevel@tonic-gate cl_tep->te_state = TS_IDLE; 54950Sstevel@tonic-gate putnext(cl_tep->te_rq, d_mp); 54960Sstevel@tonic-gate } else { 54970Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 54985240Snordmark SL_TRACE|SL_ERROR, 54995240Snordmark "tl_co_unconnect:icmng: " 55005240Snordmark "allocb failure")); 55010Sstevel@tonic-gate } 55020Sstevel@tonic-gate tl_freetip(tep, tip); 55030Sstevel@tonic-gate } 55040Sstevel@tonic-gate } else if (srv_tep != NULL) { 55050Sstevel@tonic-gate /* 55060Sstevel@tonic-gate * If outgoing request pending, change state 55070Sstevel@tonic-gate * of server on discon ind event 55080Sstevel@tonic-gate */ 55090Sstevel@tonic-gate 55100Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect && 55110Sstevel@tonic-gate IS_COTSORD(srv_tep) && 55120Sstevel@tonic-gate !tl_icon_hasprim(srv_tep, tep->te_seqno, T_ORDREL_IND)) { 55130Sstevel@tonic-gate /* 55140Sstevel@tonic-gate * Queue ordrel_ind for server to be picked up 55150Sstevel@tonic-gate * when the connection is accepted. 55160Sstevel@tonic-gate */ 55170Sstevel@tonic-gate d_mp = tl_ordrel_ind_alloc(); 55180Sstevel@tonic-gate } else { 55190Sstevel@tonic-gate /* 55200Sstevel@tonic-gate * send discon_ind to server 55210Sstevel@tonic-gate */ 55220Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(ECONNRESET, tep->te_seqno); 55230Sstevel@tonic-gate } 55240Sstevel@tonic-gate if (d_mp == NULL) { 55250Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 55265240Snordmark SL_TRACE|SL_ERROR, 55275240Snordmark "tl_co_unconnect:outgoing:allocb failure")); 55280Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 55290Sstevel@tonic-gate goto discon_peer; 55300Sstevel@tonic-gate } 55310Sstevel@tonic-gate 55320Sstevel@tonic-gate /* 55330Sstevel@tonic-gate * If this is a socket the T_DISCON_IND is queued with 55340Sstevel@tonic-gate * the T_CONN_IND. Otherwise the T_CONN_IND is removed 55350Sstevel@tonic-gate * from the list of pending connections. 55360Sstevel@tonic-gate * Note that when te_oconp is set the peer better have 55370Sstevel@tonic-gate * a t_connind_t for the client. 55380Sstevel@tonic-gate */ 55390Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) { 55400Sstevel@tonic-gate /* 55410Sstevel@tonic-gate * Queue the disconnection message. 55420Sstevel@tonic-gate */ 55430Sstevel@tonic-gate tl_icon_queuemsg(srv_tep, tep->te_seqno, d_mp); 55440Sstevel@tonic-gate } else { 55450Sstevel@tonic-gate tip = tl_icon_find(srv_tep, tep->te_seqno); 55460Sstevel@tonic-gate if (tip == NULL) { 55470Sstevel@tonic-gate freemsg(d_mp); 55480Sstevel@tonic-gate } else { 55490Sstevel@tonic-gate ASSERT(tep == tip->ti_tep); 55500Sstevel@tonic-gate ASSERT(tep->te_ser == srv_tep->te_ser); 55510Sstevel@tonic-gate /* 55520Sstevel@tonic-gate * Delete tip from the server list. 55530Sstevel@tonic-gate */ 55540Sstevel@tonic-gate if (srv_tep->te_nicon == 1) { 55550Sstevel@tonic-gate srv_tep->te_state = 55560Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND2, 55575240Snordmark srv_tep->te_state); 55580Sstevel@tonic-gate } else { 55590Sstevel@tonic-gate srv_tep->te_state = 55600Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND3, 55615240Snordmark srv_tep->te_state); 55620Sstevel@tonic-gate } 55630Sstevel@tonic-gate ASSERT(*(uint32_t *)(d_mp->b_rptr) == 55640Sstevel@tonic-gate T_DISCON_IND); 55650Sstevel@tonic-gate putnext(srv_tep->te_rq, d_mp); 55660Sstevel@tonic-gate tl_freetip(srv_tep, tip); 55670Sstevel@tonic-gate } 55680Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 55690Sstevel@tonic-gate srv_tep = NULL; 55700Sstevel@tonic-gate } 55710Sstevel@tonic-gate } else if (peer_tep != NULL) { 55720Sstevel@tonic-gate /* 55730Sstevel@tonic-gate * unconnect existing connection 55740Sstevel@tonic-gate * If connected, change state of peer on 55750Sstevel@tonic-gate * discon ind event and send discon ind pdu 55760Sstevel@tonic-gate * to module above it 55770Sstevel@tonic-gate */ 55780Sstevel@tonic-gate 55790Sstevel@tonic-gate ASSERT(tep->te_ser == peer_tep->te_ser); 55800Sstevel@tonic-gate if (IS_COTSORD(peer_tep) && 55810Sstevel@tonic-gate (peer_tep->te_state == TS_WIND_ORDREL || 55820Sstevel@tonic-gate peer_tep->te_state == TS_DATA_XFER)) { 55830Sstevel@tonic-gate /* 55840Sstevel@tonic-gate * send ordrel ind 55850Sstevel@tonic-gate */ 55860Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 55870Sstevel@tonic-gate "tl_co_unconnect:connected: ordrel_ind state %d->%d", 55885240Snordmark peer_tep->te_state, 55895240Snordmark NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state))); 55900Sstevel@tonic-gate d_mp = tl_ordrel_ind_alloc(); 55910Sstevel@tonic-gate if (! d_mp) { 55920Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 55930Sstevel@tonic-gate SL_TRACE|SL_ERROR, 55940Sstevel@tonic-gate "tl_co_unconnect:connected:" 55950Sstevel@tonic-gate "allocb failure")); 55960Sstevel@tonic-gate /* 55970Sstevel@tonic-gate * Continue with cleaning up peer as 55980Sstevel@tonic-gate * this side may go away with the close 55990Sstevel@tonic-gate */ 56000Sstevel@tonic-gate TL_QENABLE(peer_tep); 56010Sstevel@tonic-gate goto discon_peer; 56020Sstevel@tonic-gate } 56030Sstevel@tonic-gate peer_tep->te_state = 56045240Snordmark NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state); 56050Sstevel@tonic-gate 56060Sstevel@tonic-gate putnext(peer_tep->te_rq, d_mp); 56070Sstevel@tonic-gate /* 56080Sstevel@tonic-gate * Handle flow control case. This will generate 56090Sstevel@tonic-gate * a t_discon_ind message with reason 0 if there 56100Sstevel@tonic-gate * is data queued on the write side. 56110Sstevel@tonic-gate */ 56120Sstevel@tonic-gate TL_QENABLE(peer_tep); 56130Sstevel@tonic-gate } else if (IS_COTSORD(peer_tep) && 56140Sstevel@tonic-gate peer_tep->te_state == TS_WREQ_ORDREL) { 56150Sstevel@tonic-gate /* 56160Sstevel@tonic-gate * Sent an ordrel_ind. We send a discon with 56170Sstevel@tonic-gate * with error 0 to inform that the peer is gone. 56180Sstevel@tonic-gate */ 56190Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 56205240Snordmark SL_TRACE|SL_ERROR, 56215240Snordmark "tl_co_unconnect: discon in state %d", 56225240Snordmark tep->te_state)); 56230Sstevel@tonic-gate tl_discon_ind(peer_tep, 0); 56240Sstevel@tonic-gate } else { 56250Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 56265240Snordmark SL_TRACE|SL_ERROR, 56275240Snordmark "tl_co_unconnect: state %d", tep->te_state)); 56280Sstevel@tonic-gate tl_discon_ind(peer_tep, ECONNRESET); 56290Sstevel@tonic-gate } 56300Sstevel@tonic-gate 56310Sstevel@tonic-gate discon_peer: 56320Sstevel@tonic-gate /* 56330Sstevel@tonic-gate * Disconnect cross-pointers only for close 56340Sstevel@tonic-gate */ 56350Sstevel@tonic-gate if (tep->te_closing) { 56360Sstevel@tonic-gate peer_tep = tep->te_conp; 56370Sstevel@tonic-gate TL_REMOVE_PEER(peer_tep->te_conp); 56380Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_conp); 56390Sstevel@tonic-gate } 56400Sstevel@tonic-gate } 56410Sstevel@tonic-gate } 56420Sstevel@tonic-gate 56430Sstevel@tonic-gate /* 56440Sstevel@tonic-gate * Note: The following routine does not recover from allocb() 56450Sstevel@tonic-gate * failures 56460Sstevel@tonic-gate * The reason should be from the <sys/errno.h> space. 56470Sstevel@tonic-gate */ 56480Sstevel@tonic-gate static void 56490Sstevel@tonic-gate tl_discon_ind(tl_endpt_t *tep, uint32_t reason) 56500Sstevel@tonic-gate { 56510Sstevel@tonic-gate mblk_t *d_mp; 56520Sstevel@tonic-gate 56530Sstevel@tonic-gate if (tep->te_closing) 56540Sstevel@tonic-gate return; 56550Sstevel@tonic-gate 56560Sstevel@tonic-gate /* 56570Sstevel@tonic-gate * flush the queues. 56580Sstevel@tonic-gate */ 56590Sstevel@tonic-gate flushq(tep->te_rq, FLUSHDATA); 56600Sstevel@tonic-gate (void) putnextctl1(tep->te_rq, M_FLUSH, FLUSHRW); 56610Sstevel@tonic-gate 56620Sstevel@tonic-gate /* 56630Sstevel@tonic-gate * send discon ind 56640Sstevel@tonic-gate */ 56650Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(reason, tep->te_seqno); 56660Sstevel@tonic-gate if (! d_mp) { 56670Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 56685240Snordmark "tl_discon_ind:allocb failure")); 56690Sstevel@tonic-gate return; 56700Sstevel@tonic-gate } 56710Sstevel@tonic-gate tep->te_state = TS_IDLE; 56720Sstevel@tonic-gate putnext(tep->te_rq, d_mp); 56730Sstevel@tonic-gate } 56740Sstevel@tonic-gate 56750Sstevel@tonic-gate /* 56760Sstevel@tonic-gate * Note: The following routine does not recover from allocb() 56770Sstevel@tonic-gate * failures 56780Sstevel@tonic-gate * The reason should be from the <sys/errno.h> space. 56790Sstevel@tonic-gate */ 56800Sstevel@tonic-gate static mblk_t * 56810Sstevel@tonic-gate tl_discon_ind_alloc(uint32_t reason, t_scalar_t seqnum) 56820Sstevel@tonic-gate { 56830Sstevel@tonic-gate mblk_t *mp; 56840Sstevel@tonic-gate struct T_discon_ind *tdi; 56850Sstevel@tonic-gate 56860Sstevel@tonic-gate if (mp = allocb(sizeof (struct T_discon_ind), BPRI_MED)) { 56870Sstevel@tonic-gate DB_TYPE(mp) = M_PROTO; 56880Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind); 56890Sstevel@tonic-gate tdi = (struct T_discon_ind *)mp->b_rptr; 56900Sstevel@tonic-gate tdi->PRIM_type = T_DISCON_IND; 56910Sstevel@tonic-gate tdi->DISCON_reason = reason; 56920Sstevel@tonic-gate tdi->SEQ_number = seqnum; 56930Sstevel@tonic-gate } 56940Sstevel@tonic-gate return (mp); 56950Sstevel@tonic-gate } 56960Sstevel@tonic-gate 56970Sstevel@tonic-gate 56980Sstevel@tonic-gate /* 56990Sstevel@tonic-gate * Note: The following routine does not recover from allocb() 57000Sstevel@tonic-gate * failures 57010Sstevel@tonic-gate */ 57020Sstevel@tonic-gate static mblk_t * 57030Sstevel@tonic-gate tl_ordrel_ind_alloc(void) 57040Sstevel@tonic-gate { 57050Sstevel@tonic-gate mblk_t *mp; 57060Sstevel@tonic-gate struct T_ordrel_ind *toi; 57070Sstevel@tonic-gate 57080Sstevel@tonic-gate if (mp = allocb(sizeof (struct T_ordrel_ind), BPRI_MED)) { 57090Sstevel@tonic-gate DB_TYPE(mp) = M_PROTO; 57100Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (struct T_ordrel_ind); 57110Sstevel@tonic-gate toi = (struct T_ordrel_ind *)mp->b_rptr; 57120Sstevel@tonic-gate toi->PRIM_type = T_ORDREL_IND; 57130Sstevel@tonic-gate } 57140Sstevel@tonic-gate return (mp); 57150Sstevel@tonic-gate } 57160Sstevel@tonic-gate 57170Sstevel@tonic-gate 57180Sstevel@tonic-gate /* 57190Sstevel@tonic-gate * Lookup the seqno in the list of queued connections. 57200Sstevel@tonic-gate */ 57210Sstevel@tonic-gate static tl_icon_t * 57220Sstevel@tonic-gate tl_icon_find(tl_endpt_t *tep, t_scalar_t seqno) 57230Sstevel@tonic-gate { 57240Sstevel@tonic-gate list_t *l = &tep->te_iconp; 57250Sstevel@tonic-gate tl_icon_t *tip = list_head(l); 57260Sstevel@tonic-gate 57270Sstevel@tonic-gate ASSERT(seqno != 0); 57280Sstevel@tonic-gate 57290Sstevel@tonic-gate for (; tip != NULL && (tip->ti_seqno != seqno); tip = list_next(l, tip)) 57300Sstevel@tonic-gate ; 57310Sstevel@tonic-gate 57320Sstevel@tonic-gate return (tip); 57330Sstevel@tonic-gate } 57340Sstevel@tonic-gate 57350Sstevel@tonic-gate /* 57360Sstevel@tonic-gate * Queue data for a given T_CONN_IND while verifying that redundant 57370Sstevel@tonic-gate * messages, such as a T_ORDREL_IND after a T_DISCON_IND, are not queued. 57380Sstevel@tonic-gate * Used when the originator of the connection closes. 57390Sstevel@tonic-gate */ 57400Sstevel@tonic-gate static void 57410Sstevel@tonic-gate tl_icon_queuemsg(tl_endpt_t *tep, t_scalar_t seqno, mblk_t *nmp) 57420Sstevel@tonic-gate { 57430Sstevel@tonic-gate tl_icon_t *tip; 57440Sstevel@tonic-gate mblk_t **mpp, *mp; 57450Sstevel@tonic-gate int prim, nprim; 57460Sstevel@tonic-gate 57470Sstevel@tonic-gate if (nmp->b_datap->db_type == M_PROTO) 57480Sstevel@tonic-gate nprim = ((union T_primitives *)nmp->b_rptr)->type; 57490Sstevel@tonic-gate else 57500Sstevel@tonic-gate nprim = -1; /* M_DATA */ 57510Sstevel@tonic-gate 57520Sstevel@tonic-gate tip = tl_icon_find(tep, seqno); 57530Sstevel@tonic-gate if (tip == NULL) { 57540Sstevel@tonic-gate freemsg(nmp); 57550Sstevel@tonic-gate return; 57560Sstevel@tonic-gate } 57570Sstevel@tonic-gate 57580Sstevel@tonic-gate ASSERT(tip->ti_seqno != 0); 57590Sstevel@tonic-gate mpp = &tip->ti_mp; 57600Sstevel@tonic-gate while (*mpp != NULL) { 57610Sstevel@tonic-gate mp = *mpp; 57620Sstevel@tonic-gate 57630Sstevel@tonic-gate if (mp->b_datap->db_type == M_PROTO) 57640Sstevel@tonic-gate prim = ((union T_primitives *)mp->b_rptr)->type; 57650Sstevel@tonic-gate else 57660Sstevel@tonic-gate prim = -1; /* M_DATA */ 57670Sstevel@tonic-gate 57680Sstevel@tonic-gate /* 57690Sstevel@tonic-gate * Allow nothing after a T_DISCON_IND 57700Sstevel@tonic-gate */ 57710Sstevel@tonic-gate if (prim == T_DISCON_IND) { 57720Sstevel@tonic-gate freemsg(nmp); 57730Sstevel@tonic-gate return; 57740Sstevel@tonic-gate } 57750Sstevel@tonic-gate /* 57760Sstevel@tonic-gate * Only allow a T_DISCON_IND after an T_ORDREL_IND 57770Sstevel@tonic-gate */ 57780Sstevel@tonic-gate if (prim == T_ORDREL_IND && nprim != T_DISCON_IND) { 57790Sstevel@tonic-gate freemsg(nmp); 57800Sstevel@tonic-gate return; 57810Sstevel@tonic-gate } 57820Sstevel@tonic-gate mpp = &(mp->b_next); 57830Sstevel@tonic-gate } 57840Sstevel@tonic-gate *mpp = nmp; 57850Sstevel@tonic-gate } 57860Sstevel@tonic-gate 57870Sstevel@tonic-gate /* 57880Sstevel@tonic-gate * Verify if a certain TPI primitive exists on the connind queue. 57890Sstevel@tonic-gate * Use prim -1 for M_DATA. 57900Sstevel@tonic-gate * Return non-zero if found. 57910Sstevel@tonic-gate */ 57920Sstevel@tonic-gate static boolean_t 57930Sstevel@tonic-gate tl_icon_hasprim(tl_endpt_t *tep, t_scalar_t seqno, t_scalar_t prim) 57940Sstevel@tonic-gate { 57950Sstevel@tonic-gate tl_icon_t *tip = tl_icon_find(tep, seqno); 57960Sstevel@tonic-gate boolean_t found = B_FALSE; 57970Sstevel@tonic-gate 57980Sstevel@tonic-gate if (tip != NULL) { 57990Sstevel@tonic-gate mblk_t *mp; 58000Sstevel@tonic-gate for (mp = tip->ti_mp; !found && mp != NULL; mp = mp->b_next) { 58010Sstevel@tonic-gate found = (DB_TYPE(mp) == M_PROTO && 58020Sstevel@tonic-gate ((union T_primitives *)mp->b_rptr)->type == prim); 58030Sstevel@tonic-gate } 58040Sstevel@tonic-gate } 58050Sstevel@tonic-gate return (found); 58060Sstevel@tonic-gate } 58070Sstevel@tonic-gate 58080Sstevel@tonic-gate /* 58090Sstevel@tonic-gate * Send the b_next mblk chain that has accumulated before the connection 58100Sstevel@tonic-gate * was accepted. Perform the necessary state transitions. 58110Sstevel@tonic-gate */ 58120Sstevel@tonic-gate static void 58130Sstevel@tonic-gate tl_icon_sendmsgs(tl_endpt_t *tep, mblk_t **mpp) 58140Sstevel@tonic-gate { 58150Sstevel@tonic-gate mblk_t *mp; 58160Sstevel@tonic-gate union T_primitives *primp; 58170Sstevel@tonic-gate 58180Sstevel@tonic-gate if (tep->te_closing) { 58190Sstevel@tonic-gate tl_icon_freemsgs(mpp); 58200Sstevel@tonic-gate return; 58210Sstevel@tonic-gate } 58220Sstevel@tonic-gate 58230Sstevel@tonic-gate ASSERT(tep->te_state == TS_DATA_XFER); 58240Sstevel@tonic-gate ASSERT(tep->te_rq->q_first == NULL); 58250Sstevel@tonic-gate 58260Sstevel@tonic-gate while ((mp = *mpp) != NULL) { 58270Sstevel@tonic-gate *mpp = mp->b_next; 58280Sstevel@tonic-gate mp->b_next = NULL; 58290Sstevel@tonic-gate 58300Sstevel@tonic-gate ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO)); 58310Sstevel@tonic-gate switch (DB_TYPE(mp)) { 58320Sstevel@tonic-gate default: 58330Sstevel@tonic-gate freemsg(mp); 58340Sstevel@tonic-gate break; 58350Sstevel@tonic-gate case M_DATA: 58360Sstevel@tonic-gate putnext(tep->te_rq, mp); 58370Sstevel@tonic-gate break; 58380Sstevel@tonic-gate case M_PROTO: 58390Sstevel@tonic-gate primp = (union T_primitives *)mp->b_rptr; 58400Sstevel@tonic-gate switch (primp->type) { 58410Sstevel@tonic-gate case T_UNITDATA_IND: 58420Sstevel@tonic-gate case T_DATA_IND: 58430Sstevel@tonic-gate case T_OPTDATA_IND: 58440Sstevel@tonic-gate case T_EXDATA_IND: 58450Sstevel@tonic-gate putnext(tep->te_rq, mp); 58460Sstevel@tonic-gate break; 58470Sstevel@tonic-gate case T_ORDREL_IND: 58480Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ORDREL_IND, 58495240Snordmark tep->te_state); 58500Sstevel@tonic-gate putnext(tep->te_rq, mp); 58510Sstevel@tonic-gate break; 58520Sstevel@tonic-gate case T_DISCON_IND: 58530Sstevel@tonic-gate tep->te_state = TS_IDLE; 58540Sstevel@tonic-gate putnext(tep->te_rq, mp); 58550Sstevel@tonic-gate break; 58560Sstevel@tonic-gate default: 58570Sstevel@tonic-gate #ifdef DEBUG 58580Sstevel@tonic-gate cmn_err(CE_PANIC, 58595240Snordmark "tl_icon_sendmsgs: unknown primitive"); 58600Sstevel@tonic-gate #endif /* DEBUG */ 58610Sstevel@tonic-gate freemsg(mp); 58620Sstevel@tonic-gate break; 58630Sstevel@tonic-gate } 58640Sstevel@tonic-gate break; 58650Sstevel@tonic-gate } 58660Sstevel@tonic-gate } 58670Sstevel@tonic-gate } 58680Sstevel@tonic-gate 58690Sstevel@tonic-gate /* 58700Sstevel@tonic-gate * Free the b_next mblk chain that has accumulated before the connection 58710Sstevel@tonic-gate * was accepted. 58720Sstevel@tonic-gate */ 58730Sstevel@tonic-gate static void 58740Sstevel@tonic-gate tl_icon_freemsgs(mblk_t **mpp) 58750Sstevel@tonic-gate { 58760Sstevel@tonic-gate mblk_t *mp; 58770Sstevel@tonic-gate 58780Sstevel@tonic-gate while ((mp = *mpp) != NULL) { 58790Sstevel@tonic-gate *mpp = mp->b_next; 58800Sstevel@tonic-gate mp->b_next = NULL; 58810Sstevel@tonic-gate freemsg(mp); 58820Sstevel@tonic-gate } 58830Sstevel@tonic-gate } 58840Sstevel@tonic-gate 58850Sstevel@tonic-gate /* 58860Sstevel@tonic-gate * Send M_ERROR 58870Sstevel@tonic-gate * Note: assumes caller ensured enough space in mp or enough 58880Sstevel@tonic-gate * memory available. Does not attempt recovery from allocb() 58890Sstevel@tonic-gate * failures 58900Sstevel@tonic-gate */ 58910Sstevel@tonic-gate 58920Sstevel@tonic-gate static void 58930Sstevel@tonic-gate tl_merror(queue_t *wq, mblk_t *mp, int error) 58940Sstevel@tonic-gate { 58950Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 58960Sstevel@tonic-gate 58970Sstevel@tonic-gate if (tep->te_closing) { 58980Sstevel@tonic-gate freemsg(mp); 58990Sstevel@tonic-gate return; 59000Sstevel@tonic-gate } 59010Sstevel@tonic-gate 59020Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 59035240Snordmark SL_TRACE|SL_ERROR, 59047240Srh87107 "tl_merror: tep=%p, err=%d", (void *)tep, error)); 59050Sstevel@tonic-gate 59060Sstevel@tonic-gate /* 59070Sstevel@tonic-gate * flush all messages on queue. we are shutting 59080Sstevel@tonic-gate * the stream down on fatal error 59090Sstevel@tonic-gate */ 59100Sstevel@tonic-gate flushq(wq, FLUSHALL); 59110Sstevel@tonic-gate if (IS_COTS(tep)) { 59120Sstevel@tonic-gate /* connection oriented - unconnect endpoints */ 59130Sstevel@tonic-gate tl_co_unconnect(tep); 59140Sstevel@tonic-gate } 59150Sstevel@tonic-gate if (mp->b_cont) { 59160Sstevel@tonic-gate freemsg(mp->b_cont); 59170Sstevel@tonic-gate mp->b_cont = NULL; 59180Sstevel@tonic-gate } 59190Sstevel@tonic-gate 59200Sstevel@tonic-gate if ((MBLKSIZE(mp) < 1) || (DB_REF(mp) > 1)) { 59210Sstevel@tonic-gate freemsg(mp); 59220Sstevel@tonic-gate mp = allocb(1, BPRI_HI); 59230Sstevel@tonic-gate if (!mp) { 59240Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 59255240Snordmark SL_TRACE|SL_ERROR, 59265240Snordmark "tl_merror:M_PROTO: out of memory")); 59270Sstevel@tonic-gate return; 59280Sstevel@tonic-gate } 59290Sstevel@tonic-gate } 59300Sstevel@tonic-gate if (mp) { 59310Sstevel@tonic-gate DB_TYPE(mp) = M_ERROR; 59320Sstevel@tonic-gate mp->b_rptr = DB_BASE(mp); 59330Sstevel@tonic-gate *mp->b_rptr = (char)error; 59340Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (char); 59350Sstevel@tonic-gate qreply(wq, mp); 59360Sstevel@tonic-gate } else { 59370Sstevel@tonic-gate (void) putnextctl1(tep->te_rq, M_ERROR, error); 59380Sstevel@tonic-gate } 59390Sstevel@tonic-gate } 59400Sstevel@tonic-gate 59410Sstevel@tonic-gate static void 59421676Sjpk tl_fill_option(uchar_t *buf, cred_t *cr, pid_t cpid, int flag, cred_t *pcr) 59430Sstevel@tonic-gate { 59440Sstevel@tonic-gate if (flag & TL_SETCRED) { 59450Sstevel@tonic-gate struct opthdr *opt = (struct opthdr *)buf; 59460Sstevel@tonic-gate tl_credopt_t *tlcred; 59470Sstevel@tonic-gate 59480Sstevel@tonic-gate opt->level = TL_PROT_LEVEL; 59490Sstevel@tonic-gate opt->name = TL_OPT_PEER_CRED; 59500Sstevel@tonic-gate opt->len = (t_uscalar_t)OPTLEN(sizeof (tl_credopt_t)); 59510Sstevel@tonic-gate 59520Sstevel@tonic-gate tlcred = (tl_credopt_t *)(opt + 1); 59530Sstevel@tonic-gate tlcred->tc_uid = crgetuid(cr); 59540Sstevel@tonic-gate tlcred->tc_gid = crgetgid(cr); 59550Sstevel@tonic-gate tlcred->tc_ruid = crgetruid(cr); 59560Sstevel@tonic-gate tlcred->tc_rgid = crgetrgid(cr); 59570Sstevel@tonic-gate tlcred->tc_suid = crgetsuid(cr); 59580Sstevel@tonic-gate tlcred->tc_sgid = crgetsgid(cr); 59590Sstevel@tonic-gate tlcred->tc_ngroups = crgetngroups(cr); 59600Sstevel@tonic-gate } else if (flag & TL_SETUCRED) { 59610Sstevel@tonic-gate struct opthdr *opt = (struct opthdr *)buf; 59620Sstevel@tonic-gate 59630Sstevel@tonic-gate opt->level = TL_PROT_LEVEL; 59640Sstevel@tonic-gate opt->name = TL_OPT_PEER_UCRED; 59650Sstevel@tonic-gate opt->len = (t_uscalar_t)OPTLEN(ucredsize); 59660Sstevel@tonic-gate 59671676Sjpk (void) cred2ucred(cr, cpid, (void *)(opt + 1), pcr); 59680Sstevel@tonic-gate } else { 59690Sstevel@tonic-gate struct T_opthdr *topt = (struct T_opthdr *)buf; 59700Sstevel@tonic-gate ASSERT(flag & TL_SOCKUCRED); 59710Sstevel@tonic-gate 59720Sstevel@tonic-gate topt->level = SOL_SOCKET; 59730Sstevel@tonic-gate topt->name = SCM_UCRED; 59740Sstevel@tonic-gate topt->len = ucredsize + sizeof (*topt); 59750Sstevel@tonic-gate topt->status = 0; 59761676Sjpk (void) cred2ucred(cr, cpid, (void *)(topt + 1), pcr); 59770Sstevel@tonic-gate } 59780Sstevel@tonic-gate } 59790Sstevel@tonic-gate 59800Sstevel@tonic-gate /* ARGSUSED */ 59810Sstevel@tonic-gate static int 59820Sstevel@tonic-gate tl_default_opt(queue_t *wq, int level, int name, uchar_t *ptr) 59830Sstevel@tonic-gate { 59840Sstevel@tonic-gate /* no default value processed in protocol specific code currently */ 59850Sstevel@tonic-gate return (-1); 59860Sstevel@tonic-gate } 59870Sstevel@tonic-gate 59880Sstevel@tonic-gate /* ARGSUSED */ 59890Sstevel@tonic-gate static int 59900Sstevel@tonic-gate tl_get_opt(queue_t *wq, int level, int name, uchar_t *ptr) 59910Sstevel@tonic-gate { 59920Sstevel@tonic-gate int len; 59930Sstevel@tonic-gate tl_endpt_t *tep; 59940Sstevel@tonic-gate int *valp; 59950Sstevel@tonic-gate 59960Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 59970Sstevel@tonic-gate 59980Sstevel@tonic-gate len = 0; 59990Sstevel@tonic-gate 60000Sstevel@tonic-gate /* 60010Sstevel@tonic-gate * Assumes: option level and name sanity check done elsewhere 60020Sstevel@tonic-gate */ 60030Sstevel@tonic-gate 60040Sstevel@tonic-gate switch (level) { 60050Sstevel@tonic-gate case SOL_SOCKET: 60060Sstevel@tonic-gate if (! IS_SOCKET(tep)) 60070Sstevel@tonic-gate break; 60080Sstevel@tonic-gate switch (name) { 60090Sstevel@tonic-gate case SO_RECVUCRED: 60100Sstevel@tonic-gate len = sizeof (int); 60110Sstevel@tonic-gate valp = (int *)ptr; 60120Sstevel@tonic-gate *valp = (tep->te_flag & TL_SOCKUCRED) != 0; 60130Sstevel@tonic-gate break; 60140Sstevel@tonic-gate default: 60150Sstevel@tonic-gate break; 60160Sstevel@tonic-gate } 60170Sstevel@tonic-gate break; 60180Sstevel@tonic-gate case TL_PROT_LEVEL: 60190Sstevel@tonic-gate switch (name) { 60200Sstevel@tonic-gate case TL_OPT_PEER_CRED: 60210Sstevel@tonic-gate case TL_OPT_PEER_UCRED: 60220Sstevel@tonic-gate /* 60230Sstevel@tonic-gate * option not supposed to retrieved directly 60240Sstevel@tonic-gate * Only sent in T_CON_{IND,CON}, T_UNITDATA_IND 60250Sstevel@tonic-gate * when some internal flags set by other options 60260Sstevel@tonic-gate * Direct retrieval always designed to fail(ignored) 60270Sstevel@tonic-gate * for this option. 60280Sstevel@tonic-gate */ 60290Sstevel@tonic-gate break; 60300Sstevel@tonic-gate } 60310Sstevel@tonic-gate } 60320Sstevel@tonic-gate return (len); 60330Sstevel@tonic-gate } 60340Sstevel@tonic-gate 60350Sstevel@tonic-gate /* ARGSUSED */ 60360Sstevel@tonic-gate static int 60370Sstevel@tonic-gate tl_set_opt( 60380Sstevel@tonic-gate queue_t *wq, 60390Sstevel@tonic-gate uint_t mgmt_flags, 60400Sstevel@tonic-gate int level, 60410Sstevel@tonic-gate int name, 60420Sstevel@tonic-gate uint_t inlen, 60430Sstevel@tonic-gate uchar_t *invalp, 60440Sstevel@tonic-gate uint_t *outlenp, 60450Sstevel@tonic-gate uchar_t *outvalp, 60460Sstevel@tonic-gate void *thisdg_attrs, 60470Sstevel@tonic-gate cred_t *cr, 60480Sstevel@tonic-gate mblk_t *mblk) 60490Sstevel@tonic-gate { 60500Sstevel@tonic-gate int error; 60510Sstevel@tonic-gate tl_endpt_t *tep; 60520Sstevel@tonic-gate 60530Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 60540Sstevel@tonic-gate 60550Sstevel@tonic-gate error = 0; /* NOERROR */ 60560Sstevel@tonic-gate 60570Sstevel@tonic-gate /* 60580Sstevel@tonic-gate * Assumes: option level and name sanity checks done elsewhere 60590Sstevel@tonic-gate */ 60600Sstevel@tonic-gate 60610Sstevel@tonic-gate switch (level) { 60620Sstevel@tonic-gate case SOL_SOCKET: 60630Sstevel@tonic-gate if (! IS_SOCKET(tep)) { 60640Sstevel@tonic-gate error = EINVAL; 60650Sstevel@tonic-gate break; 60660Sstevel@tonic-gate } 60670Sstevel@tonic-gate /* 60680Sstevel@tonic-gate * TBD: fill in other AF_UNIX socket options and then stop 60690Sstevel@tonic-gate * returning error. 60700Sstevel@tonic-gate */ 60710Sstevel@tonic-gate switch (name) { 60720Sstevel@tonic-gate case SO_RECVUCRED: 60730Sstevel@tonic-gate /* 60740Sstevel@tonic-gate * We only support this for datagram sockets; 60750Sstevel@tonic-gate * getpeerucred handles the connection oriented 60760Sstevel@tonic-gate * transports. 60770Sstevel@tonic-gate */ 60780Sstevel@tonic-gate if (! IS_CLTS(tep)) { 60790Sstevel@tonic-gate error = EINVAL; 60800Sstevel@tonic-gate break; 60810Sstevel@tonic-gate } 60820Sstevel@tonic-gate if (*(int *)invalp == 0) 60830Sstevel@tonic-gate tep->te_flag &= ~TL_SOCKUCRED; 60840Sstevel@tonic-gate else 60850Sstevel@tonic-gate tep->te_flag |= TL_SOCKUCRED; 60860Sstevel@tonic-gate break; 60870Sstevel@tonic-gate default: 60880Sstevel@tonic-gate error = EINVAL; 60890Sstevel@tonic-gate break; 60900Sstevel@tonic-gate } 60910Sstevel@tonic-gate break; 60920Sstevel@tonic-gate case TL_PROT_LEVEL: 60930Sstevel@tonic-gate switch (name) { 60940Sstevel@tonic-gate case TL_OPT_PEER_CRED: 60950Sstevel@tonic-gate case TL_OPT_PEER_UCRED: 60960Sstevel@tonic-gate /* 60970Sstevel@tonic-gate * option not supposed to be set directly 60980Sstevel@tonic-gate * Its value in initialized for each endpoint at 60990Sstevel@tonic-gate * driver open time. 61000Sstevel@tonic-gate * Direct setting always designed to fail for this 61010Sstevel@tonic-gate * option. 61020Sstevel@tonic-gate */ 61030Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 61045240Snordmark SL_TRACE|SL_ERROR, 61055240Snordmark "tl_set_opt: option is not supported")); 61060Sstevel@tonic-gate error = EPROTO; 61070Sstevel@tonic-gate break; 61080Sstevel@tonic-gate } 61090Sstevel@tonic-gate } 61100Sstevel@tonic-gate return (error); 61110Sstevel@tonic-gate } 61120Sstevel@tonic-gate 61130Sstevel@tonic-gate 61140Sstevel@tonic-gate static void 61150Sstevel@tonic-gate tl_timer(void *arg) 61160Sstevel@tonic-gate { 61170Sstevel@tonic-gate queue_t *wq = arg; 61180Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 61190Sstevel@tonic-gate 61200Sstevel@tonic-gate ASSERT(tep); 61210Sstevel@tonic-gate 61220Sstevel@tonic-gate tep->te_timoutid = 0; 61230Sstevel@tonic-gate 61240Sstevel@tonic-gate enableok(wq); 61250Sstevel@tonic-gate /* 61260Sstevel@tonic-gate * Note: can call wsrv directly here and save context switch 61270Sstevel@tonic-gate * Consider change when qtimeout (not timeout) is active 61280Sstevel@tonic-gate */ 61290Sstevel@tonic-gate qenable(wq); 61300Sstevel@tonic-gate } 61310Sstevel@tonic-gate 61320Sstevel@tonic-gate static void 61330Sstevel@tonic-gate tl_buffer(void *arg) 61340Sstevel@tonic-gate { 61350Sstevel@tonic-gate queue_t *wq = arg; 61360Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 61370Sstevel@tonic-gate 61380Sstevel@tonic-gate ASSERT(tep); 61390Sstevel@tonic-gate 61400Sstevel@tonic-gate tep->te_bufcid = 0; 61410Sstevel@tonic-gate tep->te_nowsrv = B_FALSE; 61420Sstevel@tonic-gate 61430Sstevel@tonic-gate enableok(wq); 61440Sstevel@tonic-gate /* 61450Sstevel@tonic-gate * Note: can call wsrv directly here and save context switch 61460Sstevel@tonic-gate * Consider change when qbufcall (not bufcall) is active 61470Sstevel@tonic-gate */ 61480Sstevel@tonic-gate qenable(wq); 61490Sstevel@tonic-gate } 61500Sstevel@tonic-gate 61510Sstevel@tonic-gate static void 61520Sstevel@tonic-gate tl_memrecover(queue_t *wq, mblk_t *mp, size_t size) 61530Sstevel@tonic-gate { 61540Sstevel@tonic-gate tl_endpt_t *tep; 61550Sstevel@tonic-gate 61560Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 61570Sstevel@tonic-gate 61580Sstevel@tonic-gate if (tep->te_closing) { 61590Sstevel@tonic-gate freemsg(mp); 61600Sstevel@tonic-gate return; 61610Sstevel@tonic-gate } 61620Sstevel@tonic-gate noenable(wq); 61630Sstevel@tonic-gate 61640Sstevel@tonic-gate (void) insq(wq, wq->q_first, mp); 61650Sstevel@tonic-gate 61660Sstevel@tonic-gate if (tep->te_bufcid || tep->te_timoutid) { 61670Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 61685240Snordmark "tl_memrecover:recover %p pending", (void *)wq)); 61690Sstevel@tonic-gate return; 61700Sstevel@tonic-gate } 61710Sstevel@tonic-gate 61720Sstevel@tonic-gate if (!(tep->te_bufcid = qbufcall(wq, size, BPRI_MED, tl_buffer, wq))) { 61730Sstevel@tonic-gate tep->te_timoutid = qtimeout(wq, tl_timer, wq, 61740Sstevel@tonic-gate drv_usectohz(TL_BUFWAIT)); 61750Sstevel@tonic-gate } 61760Sstevel@tonic-gate } 61770Sstevel@tonic-gate 61780Sstevel@tonic-gate static void 61790Sstevel@tonic-gate tl_freetip(tl_endpt_t *tep, tl_icon_t *tip) 61800Sstevel@tonic-gate { 61810Sstevel@tonic-gate ASSERT(tip->ti_seqno != 0); 61820Sstevel@tonic-gate 61830Sstevel@tonic-gate if (tip->ti_mp != NULL) { 61840Sstevel@tonic-gate tl_icon_freemsgs(&tip->ti_mp); 61850Sstevel@tonic-gate tip->ti_mp = NULL; 61860Sstevel@tonic-gate } 61870Sstevel@tonic-gate if (tip->ti_tep != NULL) { 61880Sstevel@tonic-gate tl_refrele(tip->ti_tep); 61890Sstevel@tonic-gate tip->ti_tep = NULL; 61900Sstevel@tonic-gate } 61910Sstevel@tonic-gate list_remove(&tep->te_iconp, tip); 61920Sstevel@tonic-gate kmem_free(tip, sizeof (tl_icon_t)); 61930Sstevel@tonic-gate tep->te_nicon--; 61940Sstevel@tonic-gate } 61950Sstevel@tonic-gate 61960Sstevel@tonic-gate /* 61970Sstevel@tonic-gate * Remove address from address hash. 61980Sstevel@tonic-gate */ 61990Sstevel@tonic-gate static void 62000Sstevel@tonic-gate tl_addr_unbind(tl_endpt_t *tep) 62010Sstevel@tonic-gate { 62020Sstevel@tonic-gate tl_endpt_t *elp; 62030Sstevel@tonic-gate 62040Sstevel@tonic-gate if (tep->te_flag & TL_ADDRHASHED) { 62050Sstevel@tonic-gate if (IS_SOCKET(tep)) { 62060Sstevel@tonic-gate (void) mod_hash_remove(tep->te_addrhash, 62070Sstevel@tonic-gate (mod_hash_key_t)tep->te_vp, 62080Sstevel@tonic-gate (mod_hash_val_t *)&elp); 62090Sstevel@tonic-gate tep->te_vp = (void *)(uintptr_t)tep->te_minor; 62100Sstevel@tonic-gate tep->te_magic = SOU_MAGIC_IMPLICIT; 62110Sstevel@tonic-gate } else { 62120Sstevel@tonic-gate (void) mod_hash_remove(tep->te_addrhash, 62130Sstevel@tonic-gate (mod_hash_key_t)&tep->te_ap, 62140Sstevel@tonic-gate (mod_hash_val_t *)&elp); 62150Sstevel@tonic-gate (void) kmem_free(tep->te_abuf, tep->te_alen); 62160Sstevel@tonic-gate tep->te_alen = -1; 62170Sstevel@tonic-gate tep->te_abuf = NULL; 62180Sstevel@tonic-gate } 62190Sstevel@tonic-gate tep->te_flag &= ~TL_ADDRHASHED; 62200Sstevel@tonic-gate } 62210Sstevel@tonic-gate } 6222