10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51676Sjpk * Common Development and Distribution License (the "License"). 61676Sjpk * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 228778SErik.Nordmark@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* 270Sstevel@tonic-gate * Multithreaded STREAMS Local Transport Provider. 280Sstevel@tonic-gate * 290Sstevel@tonic-gate * OVERVIEW 300Sstevel@tonic-gate * ======== 310Sstevel@tonic-gate * 320Sstevel@tonic-gate * This driver provides TLI as well as socket semantics. It provides 330Sstevel@tonic-gate * connectionless, connection oriented, and connection oriented with orderly 340Sstevel@tonic-gate * release transports for TLI and sockets. Each transport type has separate name 350Sstevel@tonic-gate * spaces (i.e. it is not possible to connect from a socket to a TLI endpoint) - 360Sstevel@tonic-gate * this removes any name space conflicts when binding to socket style transport 370Sstevel@tonic-gate * addresses. 380Sstevel@tonic-gate * 390Sstevel@tonic-gate * NOTE: There is one exception: Socket ticots and ticotsord transports share 400Sstevel@tonic-gate * the same namespace. In fact, sockets always use ticotsord type transport. 410Sstevel@tonic-gate * 420Sstevel@tonic-gate * The driver mode is specified during open() by the minor number used for 430Sstevel@tonic-gate * open. 440Sstevel@tonic-gate * 450Sstevel@tonic-gate * The sockets in addition have the following semantic differences: 460Sstevel@tonic-gate * No support for passing up credentials (TL_SET[U]CRED). 470Sstevel@tonic-gate * 480Sstevel@tonic-gate * Options are passed through transparently on T_CONN_REQ to T_CONN_IND, 490Sstevel@tonic-gate * from T_UNITDATA_REQ to T_UNIDATA_IND, and from T_OPTDATA_REQ to 500Sstevel@tonic-gate * T_OPTDATA_IND. 510Sstevel@tonic-gate * 520Sstevel@tonic-gate * The T_CONN_CON is generated when processing the T_CONN_REQ i.e. before 530Sstevel@tonic-gate * a T_CONN_RES is received from the acceptor. This means that a socket 540Sstevel@tonic-gate * connect will complete before the peer has called accept. 550Sstevel@tonic-gate * 560Sstevel@tonic-gate * 570Sstevel@tonic-gate * MULTITHREADING 580Sstevel@tonic-gate * ============== 590Sstevel@tonic-gate * 600Sstevel@tonic-gate * The driver does not use STREAMS protection mechanisms. Instead it uses a 610Sstevel@tonic-gate * generic "serializer" abstraction. Most of the operations are executed behind 620Sstevel@tonic-gate * the serializer and are, essentially single-threaded. All functions executed 630Sstevel@tonic-gate * behind the same serializer are strictly serialized. So if one thread calls 640Sstevel@tonic-gate * serializer_enter(serializer, foo, mp1, arg1); and another thread calls 650Sstevel@tonic-gate * serializer_enter(serializer, bar, mp2, arg1); then (depending on which one 660Sstevel@tonic-gate * was called) the actual sequence will be foo(mp1, arg1); bar(mp1, arg2) or 670Sstevel@tonic-gate * bar(mp1, arg2); foo(mp1, arg1); But foo() and bar() will never run at the 680Sstevel@tonic-gate * same time. 690Sstevel@tonic-gate * 700Sstevel@tonic-gate * Connectionless transport use a single serializer per transport type (one for 710Sstevel@tonic-gate * TLI and one for sockets. Connection-oriented transports use finer-grained 720Sstevel@tonic-gate * serializers. 730Sstevel@tonic-gate * 740Sstevel@tonic-gate * All COTS-type endpoints start their life with private serializers. During 750Sstevel@tonic-gate * connection request processing the endpoint serializer is switched to the 760Sstevel@tonic-gate * listener's serializer and the rest of T_CONN_REQ processing is done on the 770Sstevel@tonic-gate * listener serializer. During T_CONN_RES processing the eager serializer is 780Sstevel@tonic-gate * switched from listener to acceptor serializer and after that point all 790Sstevel@tonic-gate * processing for eager and acceptor happens on this serializer. To avoid races 800Sstevel@tonic-gate * with endpoint closes while its serializer may be changing closes are blocked 810Sstevel@tonic-gate * while serializers are manipulated. 820Sstevel@tonic-gate * 830Sstevel@tonic-gate * References accounting 840Sstevel@tonic-gate * --------------------- 850Sstevel@tonic-gate * 860Sstevel@tonic-gate * Endpoints are reference counted and freed when the last reference is 870Sstevel@tonic-gate * dropped. Functions within the serializer may access an endpoint state even 880Sstevel@tonic-gate * after an endpoint closed. The te_closing being set on the endpoint indicates 890Sstevel@tonic-gate * that the endpoint entered its close routine. 900Sstevel@tonic-gate * 910Sstevel@tonic-gate * One reference is held for each opened endpoint instance. The reference 920Sstevel@tonic-gate * counter is incremented when the endpoint is linked to another endpoint and 930Sstevel@tonic-gate * decremented when the link disappears. It is also incremented when the 940Sstevel@tonic-gate * endpoint is found by the hash table lookup. This increment is atomic with the 950Sstevel@tonic-gate * lookup itself and happens while the hash table read lock is held. 960Sstevel@tonic-gate * 970Sstevel@tonic-gate * Close synchronization 980Sstevel@tonic-gate * --------------------- 990Sstevel@tonic-gate * 1000Sstevel@tonic-gate * During close the endpoint as marked as closing using te_closing flag. It is 1010Sstevel@tonic-gate * usually enough to check for te_closing flag since all other state changes 1020Sstevel@tonic-gate * happen after this flag is set and the close entered serializer. Immediately 1030Sstevel@tonic-gate * after setting te_closing flag tl_close() enters serializer and waits until 1040Sstevel@tonic-gate * the callback finishes. This allows all functions called within serializer to 1050Sstevel@tonic-gate * simply check te_closing without any locks. 1060Sstevel@tonic-gate * 1070Sstevel@tonic-gate * Serializer management. 1080Sstevel@tonic-gate * --------------------- 1090Sstevel@tonic-gate * 1100Sstevel@tonic-gate * For COTS transports serializers are created when the endpoint is constructed 1110Sstevel@tonic-gate * and destroyed when the endpoint is destructed. CLTS transports use global 1120Sstevel@tonic-gate * serializers - one for sockets and one for TLI. 1130Sstevel@tonic-gate * 1140Sstevel@tonic-gate * COTS serializers have separate reference counts to deal with several 1150Sstevel@tonic-gate * endpoints sharing the same serializer. There is a subtle problem related to 1160Sstevel@tonic-gate * the serializer destruction. The serializer should never be destroyed by any 1170Sstevel@tonic-gate * function executed inside serializer. This means that close has to wait till 1180Sstevel@tonic-gate * all serializer activity for this endpoint is finished before it can drop the 1190Sstevel@tonic-gate * last reference on the endpoint (which may as well free the serializer). This 1200Sstevel@tonic-gate * is only relevant for COTS transports which manage serializers 1210Sstevel@tonic-gate * dynamically. For CLTS transports close may complete without waiting for all 1220Sstevel@tonic-gate * serializer activity to finish since serializer is only destroyed at driver 1230Sstevel@tonic-gate * detach time. 1240Sstevel@tonic-gate * 1250Sstevel@tonic-gate * COTS endpoints keep track of the number of outstanding requests on the 1260Sstevel@tonic-gate * serializer for the endpoint. The code handling accept() avoids changing 1270Sstevel@tonic-gate * client serializer if it has any pending messages on the serializer and 1280Sstevel@tonic-gate * instead moves acceptor to listener's serializer. 1290Sstevel@tonic-gate * 1300Sstevel@tonic-gate * 1310Sstevel@tonic-gate * Use of hash tables 1320Sstevel@tonic-gate * ------------------ 1330Sstevel@tonic-gate * 1340Sstevel@tonic-gate * The driver uses modhash hash table implementation. Each transport uses two 1350Sstevel@tonic-gate * hash tables - one for finding endpoints by acceptor ID and another one for 1360Sstevel@tonic-gate * finding endpoints by address. For sockets TICOTS and TICOTSORD share the same 1370Sstevel@tonic-gate * pair of hash tables since sockets only use TICOTSORD. 1380Sstevel@tonic-gate * 1390Sstevel@tonic-gate * All hash tables lookups increment a reference count for returned endpoints, 1400Sstevel@tonic-gate * so we may safely check the endpoint state even when the endpoint is removed 1410Sstevel@tonic-gate * from the hash by another thread immediately after it is found. 1420Sstevel@tonic-gate * 1430Sstevel@tonic-gate * 1440Sstevel@tonic-gate * CLOSE processing 1450Sstevel@tonic-gate * ================ 1460Sstevel@tonic-gate * 1470Sstevel@tonic-gate * The driver enters serializer twice on close(). The close sequence is the 1480Sstevel@tonic-gate * following: 1490Sstevel@tonic-gate * 1500Sstevel@tonic-gate * 1) Wait until closing is safe (te_closewait becomes zero) 1510Sstevel@tonic-gate * This step is needed to prevent close during serializer switches. In most 1520Sstevel@tonic-gate * cases (close happening after connection establishment) te_closewait is 1530Sstevel@tonic-gate * zero. 1540Sstevel@tonic-gate * 1) Set te_closing. 1550Sstevel@tonic-gate * 2) Call tl_close_ser() within serializer and wait for it to complete. 1560Sstevel@tonic-gate * 1570Sstevel@tonic-gate * te_close_ser simply marks endpoint and wakes up waiting tl_close(). 1580Sstevel@tonic-gate * It also needs to clear write-side q_next pointers - this should be done 1590Sstevel@tonic-gate * before qprocsoff(). 1600Sstevel@tonic-gate * 1610Sstevel@tonic-gate * This synchronous serializer entry during close is needed to ensure that 1620Sstevel@tonic-gate * the queue is valid everywhere inside the serializer. 1630Sstevel@tonic-gate * 1640Sstevel@tonic-gate * Note that in many cases close will execute tl_close_ser() synchronously, 1650Sstevel@tonic-gate * so it will not wait at all. 1660Sstevel@tonic-gate * 1670Sstevel@tonic-gate * 3) Calls qprocsoff(). 1680Sstevel@tonic-gate * 4) Calls tl_close_finish_ser() within the serializer and waits for it to 1690Sstevel@tonic-gate * complete (for COTS transports). For CLTS transport there is no wait. 1700Sstevel@tonic-gate * 1710Sstevel@tonic-gate * tl_close_finish_ser() Finishes the close process and wakes up waiting 1720Sstevel@tonic-gate * close if there is any. 1730Sstevel@tonic-gate * 1740Sstevel@tonic-gate * Note that in most cases close will enter te_close_ser_finish() 1750Sstevel@tonic-gate * synchronously and will not wait at all. 1760Sstevel@tonic-gate * 1770Sstevel@tonic-gate * 1780Sstevel@tonic-gate * Flow Control 1790Sstevel@tonic-gate * ============ 1800Sstevel@tonic-gate * 1810Sstevel@tonic-gate * The driver implements both read and write side service routines. No one calls 1820Sstevel@tonic-gate * putq() on the read queue. The read side service routine tl_rsrv() is called 1830Sstevel@tonic-gate * when the read side stream is back-enabled. It enters serializer synchronously 1840Sstevel@tonic-gate * (waits till serializer processing is complete). Within serializer it 1850Sstevel@tonic-gate * back-enables all endpoints blocked by the queue for connection-less 1860Sstevel@tonic-gate * transports and enables write side service processing for the peer for 1870Sstevel@tonic-gate * connection-oriented transports. 1880Sstevel@tonic-gate * 1890Sstevel@tonic-gate * Read and write side service routines use special mblk_sized space in the 1900Sstevel@tonic-gate * endpoint structure to enter perimeter. 1910Sstevel@tonic-gate * 1920Sstevel@tonic-gate * Write-side flow control 1930Sstevel@tonic-gate * ----------------------- 1940Sstevel@tonic-gate * 1950Sstevel@tonic-gate * Write side flow control is a bit tricky. The driver needs to deal with two 1960Sstevel@tonic-gate * message queues - the explicit STREAMS message queue maintained by 1970Sstevel@tonic-gate * putq()/getq()/putbq() and the implicit queue within the serializer. These two 1980Sstevel@tonic-gate * queues should be synchronized to preserve message ordering and should 1990Sstevel@tonic-gate * maintain a single order determined by the order in which messages enter 2000Sstevel@tonic-gate * tl_wput(). In order to maintain the ordering between these two queues the 2010Sstevel@tonic-gate * STREAMS queue is only manipulated within the serializer, so the ordering is 2020Sstevel@tonic-gate * provided by the serializer. 2030Sstevel@tonic-gate * 2040Sstevel@tonic-gate * Functions called from the tl_wsrv() sometimes may call putbq(). To 2050Sstevel@tonic-gate * immediately stop any further processing of the STREAMS message queues the 2060Sstevel@tonic-gate * code calling putbq() also sets the te_nowsrv flag in the endpoint. The write 2070Sstevel@tonic-gate * side service processing stops when the flag is set. 2080Sstevel@tonic-gate * 2090Sstevel@tonic-gate * The tl_wsrv() function enters serializer synchronously and waits for it to 2100Sstevel@tonic-gate * complete. The serializer call-back tl_wsrv_ser() either drains all messages 2110Sstevel@tonic-gate * on the STREAMS queue or terminates when it notices the te_nowsrv flag 2120Sstevel@tonic-gate * set. Note that the maximum amount of messages processed by tl_wput_ser() is 2130Sstevel@tonic-gate * always bounded by the amount of messages on the STREAMS queue at the time 2140Sstevel@tonic-gate * tl_wsrv_ser() is entered. Any new messages may only appear on the STREAMS 2150Sstevel@tonic-gate * queue from another serialized entry which can't happen in parallel. This 2160Sstevel@tonic-gate * guarantees that tl_wput_ser() is complete in bounded time (there is no risk 2170Sstevel@tonic-gate * of it draining forever while writer places new messages on the STREAMS 2180Sstevel@tonic-gate * queue). 2190Sstevel@tonic-gate * 2200Sstevel@tonic-gate * Note that a closing endpoint never sets te_nowsrv and never calls putbq(). 2210Sstevel@tonic-gate * 2220Sstevel@tonic-gate * 2230Sstevel@tonic-gate * Unix Domain Sockets 2240Sstevel@tonic-gate * =================== 2250Sstevel@tonic-gate * 2260Sstevel@tonic-gate * The driver knows the structure of Unix Domain sockets addresses and treats 2270Sstevel@tonic-gate * them differently from generic TLI addresses. For sockets implicit binds are 2280Sstevel@tonic-gate * requested by setting SOU_MAGIC_IMPLICIT in the soua_magic part of the address 2290Sstevel@tonic-gate * instead of using address length of zero. Explicit binds specify 2300Sstevel@tonic-gate * SOU_MAGIC_EXPLICIT as magic. 2310Sstevel@tonic-gate * 2320Sstevel@tonic-gate * For implicit binds we always use minor number as soua_vp part of the address 2330Sstevel@tonic-gate * and avoid any hash table lookups. This saves two hash tables lookups per 2340Sstevel@tonic-gate * anonymous bind. 2350Sstevel@tonic-gate * 2360Sstevel@tonic-gate * For explicit address we hash the vnode pointer instead of hashing the 2370Sstevel@tonic-gate * full-scale address+zone+length. Hashing by pointer is more efficient then 2380Sstevel@tonic-gate * hashing by the full address. 2390Sstevel@tonic-gate * 2400Sstevel@tonic-gate * For unix domain sockets the te_ap is always pointing to te_uxaddr part of the 2410Sstevel@tonic-gate * tep structure, so it should be never freed. 2420Sstevel@tonic-gate * 2430Sstevel@tonic-gate * Also for sockets the driver always uses minor number as acceptor id. 2440Sstevel@tonic-gate * 2450Sstevel@tonic-gate * TPI VIOLATIONS 2460Sstevel@tonic-gate * -------------- 2470Sstevel@tonic-gate * 2480Sstevel@tonic-gate * This driver violates TPI in several respects for Unix Domain Sockets: 2490Sstevel@tonic-gate * 2500Sstevel@tonic-gate * 1) It treats O_T_BIND_REQ as T_BIND_REQ and refuses bind if an explicit bind 2510Sstevel@tonic-gate * is requested and the endpoint is already in use. There is no point in 2520Sstevel@tonic-gate * generating an unused address since this address will be rejected by 2530Sstevel@tonic-gate * sockfs anyway. For implicit binds it always generates a new address 2540Sstevel@tonic-gate * (sets soua_vp to its minor number). 2550Sstevel@tonic-gate * 2560Sstevel@tonic-gate * 2) It always uses minor number as acceptor ID and never uses queue 2570Sstevel@tonic-gate * pointer. It is ok since sockets get acceptor ID from T_CAPABILITY_REQ 2580Sstevel@tonic-gate * message and they do not use the queue pointer. 2590Sstevel@tonic-gate * 2600Sstevel@tonic-gate * 3) For Listener sockets the usual sequence is to issue bind() zero backlog 2610Sstevel@tonic-gate * followed by listen(). The listen() should be issued with non-zero 2620Sstevel@tonic-gate * backlog, so sotpi_listen() issues unbind request followed by bind 2630Sstevel@tonic-gate * request to the same address but with a non-zero qlen value. Both 2640Sstevel@tonic-gate * tl_bind() and tl_unbind() require write lock on the hash table to 2650Sstevel@tonic-gate * insert/remove the address. The driver does not remove the address from 2660Sstevel@tonic-gate * the hash for endpoints that are bound to the explicit address and have 2670Sstevel@tonic-gate * backlog of zero. During T_BIND_REQ processing if the address requested 2680Sstevel@tonic-gate * is equal to the address the endpoint already has it updates the backlog 2690Sstevel@tonic-gate * without reinserting the address in the hash table. This optimization 2700Sstevel@tonic-gate * avoids two hash table updates for each listener created. It always 2710Sstevel@tonic-gate * avoids the problem of a "stolen" address when another listener may use 2720Sstevel@tonic-gate * the same address between the unbind and bind and suddenly listen() fails 2730Sstevel@tonic-gate * because address is in use even though the bind() succeeded. 2740Sstevel@tonic-gate * 2750Sstevel@tonic-gate * 2760Sstevel@tonic-gate * CONNECTIONLESS TRANSPORTS 2770Sstevel@tonic-gate * ========================= 2780Sstevel@tonic-gate * 2790Sstevel@tonic-gate * Connectionless transports all share the same serializer (one for TLI and one 2800Sstevel@tonic-gate * for Sockets). Functions executing behind serializer can check or modify state 2810Sstevel@tonic-gate * of any endpoint. 2820Sstevel@tonic-gate * 2830Sstevel@tonic-gate * When endpoint X talks to another endpoint Y it caches the pointer to Y in the 2840Sstevel@tonic-gate * te_lastep field. The next time X talks to some address A it checks whether A 2850Sstevel@tonic-gate * is the same as Y's address and if it is there is no need to lookup Y. If the 2860Sstevel@tonic-gate * address is different or the state of Y is not appropriate (e.g. closed or not 2870Sstevel@tonic-gate * idle) X does a lookup using tl_find_peer() and caches the new address. 2880Sstevel@tonic-gate * NOTE: tl_find_peer() never returns closing endpoint and it places a refhold 2890Sstevel@tonic-gate * on the endpoint found. 2900Sstevel@tonic-gate * 2910Sstevel@tonic-gate * During close of endpoint Y it doesn't try to remove itself from other 2920Sstevel@tonic-gate * endpoints caches. They will detect that Y is gone and will search the peer 2930Sstevel@tonic-gate * endpoint again. 2940Sstevel@tonic-gate * 2950Sstevel@tonic-gate * Flow Control Handling. 2960Sstevel@tonic-gate * ---------------------- 2970Sstevel@tonic-gate * 2980Sstevel@tonic-gate * Each connectionless endpoint keeps a list of endpoints which are 2990Sstevel@tonic-gate * flow-controlled by its queue. It also keeps a pointer to the queue which 3000Sstevel@tonic-gate * flow-controls itself. Whenever flow control releases for endpoint X it 3010Sstevel@tonic-gate * enables all queues from the list. During close it also back-enables everyone 3020Sstevel@tonic-gate * in the list. If X is flow-controlled when it is closing it removes it from 3030Sstevel@tonic-gate * the peers list. 3040Sstevel@tonic-gate * 3050Sstevel@tonic-gate * DATA STRUCTURES 3060Sstevel@tonic-gate * =============== 3070Sstevel@tonic-gate * 3080Sstevel@tonic-gate * Each endpoint is represented by the tl_endpt_t structure which keeps all the 3090Sstevel@tonic-gate * endpoint state. For connection-oriented transports it has a keeps a list 3100Sstevel@tonic-gate * of pending connections (tl_icon_t). For connectionless transports it keeps a 3110Sstevel@tonic-gate * list of endpoints flow controlled by this one. 3120Sstevel@tonic-gate * 3130Sstevel@tonic-gate * Each transport type is represented by a per-transport data structure 3140Sstevel@tonic-gate * tl_transport_state_t. It contains a pointer to an acceptor ID hash and the 3150Sstevel@tonic-gate * endpoint address hash tables for each transport. It also contains pointer to 3160Sstevel@tonic-gate * transport serializer for connectionless transports. 3170Sstevel@tonic-gate * 3180Sstevel@tonic-gate * Each endpoint keeps a link to its transport structure, so the code can find 3190Sstevel@tonic-gate * all per-transport information quickly. 3200Sstevel@tonic-gate */ 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate #include <sys/types.h> 3230Sstevel@tonic-gate #include <sys/inttypes.h> 3240Sstevel@tonic-gate #include <sys/stream.h> 3250Sstevel@tonic-gate #include <sys/stropts.h> 3260Sstevel@tonic-gate #define _SUN_TPI_VERSION 2 3270Sstevel@tonic-gate #include <sys/tihdr.h> 3280Sstevel@tonic-gate #include <sys/strlog.h> 3290Sstevel@tonic-gate #include <sys/debug.h> 3300Sstevel@tonic-gate #include <sys/cred.h> 3310Sstevel@tonic-gate #include <sys/errno.h> 3320Sstevel@tonic-gate #include <sys/kmem.h> 3330Sstevel@tonic-gate #include <sys/id_space.h> 3340Sstevel@tonic-gate #include <sys/modhash.h> 3350Sstevel@tonic-gate #include <sys/mkdev.h> 3360Sstevel@tonic-gate #include <sys/tl.h> 3370Sstevel@tonic-gate #include <sys/stat.h> 3380Sstevel@tonic-gate #include <sys/conf.h> 3390Sstevel@tonic-gate #include <sys/modctl.h> 3400Sstevel@tonic-gate #include <sys/strsun.h> 3410Sstevel@tonic-gate #include <sys/socket.h> 3420Sstevel@tonic-gate #include <sys/socketvar.h> 3430Sstevel@tonic-gate #include <sys/sysmacros.h> 3440Sstevel@tonic-gate #include <sys/xti_xtiopt.h> 3450Sstevel@tonic-gate #include <sys/ddi.h> 3460Sstevel@tonic-gate #include <sys/sunddi.h> 3470Sstevel@tonic-gate #include <sys/zone.h> 3480Sstevel@tonic-gate #include <inet/common.h> /* typedef int (*pfi_t)() for inet/optcom.h */ 3490Sstevel@tonic-gate #include <inet/optcom.h> 3500Sstevel@tonic-gate #include <sys/strsubr.h> 3510Sstevel@tonic-gate #include <sys/ucred.h> 3520Sstevel@tonic-gate #include <sys/suntpi.h> 3530Sstevel@tonic-gate #include <sys/list.h> 3540Sstevel@tonic-gate #include <sys/serializer.h> 3550Sstevel@tonic-gate 3560Sstevel@tonic-gate /* 3570Sstevel@tonic-gate * TBD List 3580Sstevel@tonic-gate * 14 Eliminate state changes through table 3590Sstevel@tonic-gate * 16. AF_UNIX socket options 3600Sstevel@tonic-gate * 17. connect() for ticlts 3610Sstevel@tonic-gate * 18. support for "netstat" to show AF_UNIX plus TLI local 3620Sstevel@tonic-gate * transport connections 3630Sstevel@tonic-gate * 21. sanity check to flushing on sending M_ERROR 3640Sstevel@tonic-gate */ 3650Sstevel@tonic-gate 3660Sstevel@tonic-gate /* 3670Sstevel@tonic-gate * CONSTANT DECLARATIONS 3680Sstevel@tonic-gate * -------------------- 3690Sstevel@tonic-gate */ 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate /* 3720Sstevel@tonic-gate * Local declarations 3730Sstevel@tonic-gate */ 3740Sstevel@tonic-gate #define NEXTSTATE(EV, ST) ti_statetbl[EV][ST] 3750Sstevel@tonic-gate 3760Sstevel@tonic-gate #define BADSEQNUM (-1) /* initial seq number used by T_DISCON_IND */ 3770Sstevel@tonic-gate #define TL_BUFWAIT (10000) /* usecs to wait for allocb buffer timeout */ 3780Sstevel@tonic-gate #define TL_TIDUSZ (64*1024) /* tidu size when "strmsgz" is unlimited (0) */ 3790Sstevel@tonic-gate /* 3800Sstevel@tonic-gate * Hash tables size. 3810Sstevel@tonic-gate */ 3820Sstevel@tonic-gate #define TL_HASH_SIZE 311 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate /* 3850Sstevel@tonic-gate * Definitions for module_info 3860Sstevel@tonic-gate */ 3870Sstevel@tonic-gate #define TL_ID (104) /* module ID number */ 3880Sstevel@tonic-gate #define TL_NAME "tl" /* module name */ 3890Sstevel@tonic-gate #define TL_MINPSZ (0) /* min packet size */ 3900Sstevel@tonic-gate #define TL_MAXPSZ INFPSZ /* max packet size ZZZ */ 3910Sstevel@tonic-gate #define TL_HIWAT (16*1024) /* hi water mark */ 3920Sstevel@tonic-gate #define TL_LOWAT (256) /* lo water mark */ 3930Sstevel@tonic-gate /* 3940Sstevel@tonic-gate * Definition of minor numbers/modes for new transport provider modes. 3950Sstevel@tonic-gate * We view the socket use as a separate mode to get a separate name space. 3960Sstevel@tonic-gate */ 3970Sstevel@tonic-gate #define TL_TICOTS 0 /* connection oriented transport */ 3980Sstevel@tonic-gate #define TL_TICOTSORD 1 /* COTS w/ orderly release */ 3990Sstevel@tonic-gate #define TL_TICLTS 2 /* connectionless transport */ 4000Sstevel@tonic-gate #define TL_UNUSED 3 4010Sstevel@tonic-gate #define TL_SOCKET 4 /* Socket */ 4020Sstevel@tonic-gate #define TL_SOCK_COTS (TL_SOCKET|TL_TICOTS) 4030Sstevel@tonic-gate #define TL_SOCK_COTSORD (TL_SOCKET|TL_TICOTSORD) 4040Sstevel@tonic-gate #define TL_SOCK_CLTS (TL_SOCKET|TL_TICLTS) 4050Sstevel@tonic-gate 4060Sstevel@tonic-gate #define TL_MINOR_MASK 0x7 4070Sstevel@tonic-gate #define TL_MINOR_START (TL_TICLTS + 1) 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate /* 4100Sstevel@tonic-gate * LOCAL MACROS 4110Sstevel@tonic-gate */ 4120Sstevel@tonic-gate #define T_ALIGN(p) P2ROUNDUP((p), sizeof (t_scalar_t)) 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate /* 4150Sstevel@tonic-gate * EXTERNAL VARIABLE DECLARATIONS 4160Sstevel@tonic-gate * ----------------------------- 4170Sstevel@tonic-gate */ 4180Sstevel@tonic-gate /* 4190Sstevel@tonic-gate * state table defined in the OS space.c 4200Sstevel@tonic-gate */ 4210Sstevel@tonic-gate extern char ti_statetbl[TE_NOEVENTS][TS_NOSTATES]; 4220Sstevel@tonic-gate 4230Sstevel@tonic-gate /* 4240Sstevel@tonic-gate * STREAMS DRIVER ENTRY POINTS PROTOTYPES 4250Sstevel@tonic-gate */ 4260Sstevel@tonic-gate static int tl_open(queue_t *, dev_t *, int, int, cred_t *); 4270Sstevel@tonic-gate static int tl_close(queue_t *, int, cred_t *); 4280Sstevel@tonic-gate static void tl_wput(queue_t *, mblk_t *); 4290Sstevel@tonic-gate static void tl_wsrv(queue_t *); 4300Sstevel@tonic-gate static void tl_rsrv(queue_t *); 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate static int tl_attach(dev_info_t *, ddi_attach_cmd_t); 4330Sstevel@tonic-gate static int tl_detach(dev_info_t *, ddi_detach_cmd_t); 4340Sstevel@tonic-gate static int tl_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate 4370Sstevel@tonic-gate /* 4380Sstevel@tonic-gate * GLOBAL DATA STRUCTURES AND VARIABLES 4390Sstevel@tonic-gate * ----------------------------------- 4400Sstevel@tonic-gate */ 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate /* 4430Sstevel@tonic-gate * Table representing database of all options managed by T_SVR4_OPTMGMT_REQ 4440Sstevel@tonic-gate * For now, we only manage the SO_RECVUCRED option but we also have 4450Sstevel@tonic-gate * harmless dummy options to make things work with some common code we access. 4460Sstevel@tonic-gate */ 4470Sstevel@tonic-gate opdes_t tl_opt_arr[] = { 4480Sstevel@tonic-gate /* The SO_TYPE is needed for the hack below */ 4490Sstevel@tonic-gate { 4500Sstevel@tonic-gate SO_TYPE, 4510Sstevel@tonic-gate SOL_SOCKET, 4520Sstevel@tonic-gate OA_R, 4530Sstevel@tonic-gate OA_R, 4540Sstevel@tonic-gate OP_NP, 45511042SErik.Nordmark@Sun.COM 0, 4560Sstevel@tonic-gate sizeof (t_scalar_t), 4570Sstevel@tonic-gate 0 4580Sstevel@tonic-gate }, 4590Sstevel@tonic-gate { 4600Sstevel@tonic-gate SO_RECVUCRED, 4610Sstevel@tonic-gate SOL_SOCKET, 4620Sstevel@tonic-gate OA_RW, 4630Sstevel@tonic-gate OA_RW, 4640Sstevel@tonic-gate OP_NP, 46511042SErik.Nordmark@Sun.COM 0, 4660Sstevel@tonic-gate sizeof (int), 4670Sstevel@tonic-gate 0 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate }; 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate /* 4720Sstevel@tonic-gate * Table of all supported levels 4730Sstevel@tonic-gate * Note: Some levels (e.g. XTI_GENERIC) may be valid but may not have 4740Sstevel@tonic-gate * any supported options so we need this info separately. 4750Sstevel@tonic-gate * 4760Sstevel@tonic-gate * This is needed only for topmost tpi providers. 4770Sstevel@tonic-gate */ 4780Sstevel@tonic-gate optlevel_t tl_valid_levels_arr[] = { 4790Sstevel@tonic-gate XTI_GENERIC, 4800Sstevel@tonic-gate SOL_SOCKET, 4810Sstevel@tonic-gate TL_PROT_LEVEL 4820Sstevel@tonic-gate }; 4830Sstevel@tonic-gate 4840Sstevel@tonic-gate #define TL_VALID_LEVELS_CNT A_CNT(tl_valid_levels_arr) 4850Sstevel@tonic-gate /* 4860Sstevel@tonic-gate * Current upper bound on the amount of space needed to return all options. 4870Sstevel@tonic-gate * Additional options with data size of sizeof(long) are handled automatically. 4880Sstevel@tonic-gate * Others need hand job. 4890Sstevel@tonic-gate */ 4900Sstevel@tonic-gate #define TL_MAX_OPT_BUF_LEN \ 4910Sstevel@tonic-gate ((A_CNT(tl_opt_arr) << 2) + \ 4920Sstevel@tonic-gate (A_CNT(tl_opt_arr) * sizeof (struct opthdr)) + \ 4930Sstevel@tonic-gate + 64 + sizeof (struct T_optmgmt_ack)) 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate #define TL_OPT_ARR_CNT A_CNT(tl_opt_arr) 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate /* 4980Sstevel@tonic-gate * transport addr structure 4990Sstevel@tonic-gate */ 5000Sstevel@tonic-gate typedef struct tl_addr { 5010Sstevel@tonic-gate zoneid_t ta_zoneid; /* Zone scope of address */ 5020Sstevel@tonic-gate t_scalar_t ta_alen; /* length of abuf */ 5030Sstevel@tonic-gate void *ta_abuf; /* the addr itself */ 5040Sstevel@tonic-gate } tl_addr_t; 5050Sstevel@tonic-gate 5060Sstevel@tonic-gate /* 5070Sstevel@tonic-gate * Refcounted version of serializer. 5080Sstevel@tonic-gate */ 5090Sstevel@tonic-gate typedef struct tl_serializer { 5100Sstevel@tonic-gate uint_t ts_refcnt; 5110Sstevel@tonic-gate serializer_t *ts_serializer; 5120Sstevel@tonic-gate } tl_serializer_t; 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate /* 5150Sstevel@tonic-gate * Each transport type has a separate state. 5160Sstevel@tonic-gate * Per-transport state. 5170Sstevel@tonic-gate */ 5180Sstevel@tonic-gate typedef struct tl_transport_state { 5190Sstevel@tonic-gate char *tr_name; 5200Sstevel@tonic-gate minor_t tr_minor; 5210Sstevel@tonic-gate uint32_t tr_defaddr; 5220Sstevel@tonic-gate mod_hash_t *tr_ai_hash; 5230Sstevel@tonic-gate mod_hash_t *tr_addr_hash; 5240Sstevel@tonic-gate tl_serializer_t *tr_serializer; 5250Sstevel@tonic-gate } tl_transport_state_t; 5260Sstevel@tonic-gate 5270Sstevel@tonic-gate #define TL_DFADDR 0x1000 5280Sstevel@tonic-gate 5290Sstevel@tonic-gate static tl_transport_state_t tl_transports[] = { 5300Sstevel@tonic-gate { "ticots", TL_TICOTS, TL_DFADDR, NULL, NULL, NULL }, 5310Sstevel@tonic-gate { "ticotsord", TL_TICOTSORD, TL_DFADDR, NULL, NULL, NULL }, 5320Sstevel@tonic-gate { "ticlts", TL_TICLTS, TL_DFADDR, NULL, NULL, NULL }, 5330Sstevel@tonic-gate { "undefined", TL_UNUSED, TL_DFADDR, NULL, NULL, NULL }, 5340Sstevel@tonic-gate { "sticots", TL_SOCK_COTS, TL_DFADDR, NULL, NULL, NULL }, 5350Sstevel@tonic-gate { "sticotsord", TL_SOCK_COTSORD, TL_DFADDR, NULL, NULL }, 5360Sstevel@tonic-gate { "sticlts", TL_SOCK_CLTS, TL_DFADDR, NULL, NULL, NULL } 5370Sstevel@tonic-gate }; 5380Sstevel@tonic-gate 5390Sstevel@tonic-gate #define TL_MAXTRANSPORT A_CNT(tl_transports) 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate struct tl_endpt; 5420Sstevel@tonic-gate typedef struct tl_endpt tl_endpt_t; 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate typedef void (tlproc_t)(mblk_t *, tl_endpt_t *); 5450Sstevel@tonic-gate 5460Sstevel@tonic-gate /* 5470Sstevel@tonic-gate * Data structure used to represent pending connects. 5480Sstevel@tonic-gate * Records enough information so that the connecting peer can close 5490Sstevel@tonic-gate * before the connection gets accepted. 5500Sstevel@tonic-gate */ 5510Sstevel@tonic-gate typedef struct tl_icon { 5520Sstevel@tonic-gate list_node_t ti_node; 5530Sstevel@tonic-gate struct tl_endpt *ti_tep; /* NULL if peer has already closed */ 5540Sstevel@tonic-gate mblk_t *ti_mp; /* b_next list of data + ordrel_ind */ 5550Sstevel@tonic-gate t_scalar_t ti_seqno; /* Sequence number */ 5560Sstevel@tonic-gate } tl_icon_t; 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate typedef struct so_ux_addr soux_addr_t; 5590Sstevel@tonic-gate #define TL_SOUX_ADDRLEN sizeof (soux_addr_t) 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate /* 5622486Sakolb * Maximum number of unaccepted connection indications allowed per listener. 5632486Sakolb */ 5642486Sakolb #define TL_MAXQLEN 4096 5652486Sakolb int tl_maxqlen = TL_MAXQLEN; 5662486Sakolb 5672486Sakolb /* 5680Sstevel@tonic-gate * transport endpoint structure 5690Sstevel@tonic-gate */ 5700Sstevel@tonic-gate struct tl_endpt { 5710Sstevel@tonic-gate queue_t *te_rq; /* stream read queue */ 5720Sstevel@tonic-gate queue_t *te_wq; /* stream write queue */ 5730Sstevel@tonic-gate uint32_t te_refcnt; 5740Sstevel@tonic-gate int32_t te_state; /* TPI state of endpoint */ 5750Sstevel@tonic-gate minor_t te_minor; /* minor number */ 5760Sstevel@tonic-gate #define te_seqno te_minor 5770Sstevel@tonic-gate uint_t te_flag; /* flag field */ 5780Sstevel@tonic-gate boolean_t te_nowsrv; 5790Sstevel@tonic-gate tl_serializer_t *te_ser; /* Serializer to use */ 5800Sstevel@tonic-gate #define te_serializer te_ser->ts_serializer 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate soux_addr_t te_uxaddr; /* Socket address */ 5830Sstevel@tonic-gate #define te_magic te_uxaddr.soua_magic 5840Sstevel@tonic-gate #define te_vp te_uxaddr.soua_vp 5850Sstevel@tonic-gate tl_addr_t te_ap; /* addr bound to this endpt */ 5860Sstevel@tonic-gate #define te_zoneid te_ap.ta_zoneid 5870Sstevel@tonic-gate #define te_alen te_ap.ta_alen 5880Sstevel@tonic-gate #define te_abuf te_ap.ta_abuf 5890Sstevel@tonic-gate 5900Sstevel@tonic-gate tl_transport_state_t *te_transport; 5910Sstevel@tonic-gate #define te_addrhash te_transport->tr_addr_hash 5920Sstevel@tonic-gate #define te_aihash te_transport->tr_ai_hash 5930Sstevel@tonic-gate #define te_defaddr te_transport->tr_defaddr 5940Sstevel@tonic-gate cred_t *te_credp; /* endpoint user credentials */ 5950Sstevel@tonic-gate mod_hash_hndl_t te_hash_hndl; /* Handle for address hash */ 5960Sstevel@tonic-gate 5970Sstevel@tonic-gate /* 5980Sstevel@tonic-gate * State specific for connection-oriented and connectionless transports. 5990Sstevel@tonic-gate */ 6000Sstevel@tonic-gate union { 6010Sstevel@tonic-gate /* Connection-oriented state. */ 6020Sstevel@tonic-gate struct { 6030Sstevel@tonic-gate t_uscalar_t _te_nicon; /* count of conn requests */ 6040Sstevel@tonic-gate t_uscalar_t _te_qlen; /* max conn requests */ 6050Sstevel@tonic-gate tl_endpt_t *_te_oconp; /* conn request pending */ 6060Sstevel@tonic-gate tl_endpt_t *_te_conp; /* connected endpt */ 6070Sstevel@tonic-gate #ifndef _ILP32 6080Sstevel@tonic-gate void *_te_pad; 6090Sstevel@tonic-gate #endif 6100Sstevel@tonic-gate list_t _te_iconp; /* list of conn ind. pending */ 6110Sstevel@tonic-gate } _te_cots_state; 6120Sstevel@tonic-gate /* Connection-less state. */ 6130Sstevel@tonic-gate struct { 6140Sstevel@tonic-gate tl_endpt_t *_te_lastep; /* last dest. endpoint */ 6150Sstevel@tonic-gate tl_endpt_t *_te_flowq; /* flow controlled on whom */ 6160Sstevel@tonic-gate list_node_t _te_flows; /* lists of connections */ 6170Sstevel@tonic-gate list_t _te_flowlist; /* Who flowcontrols on me */ 6180Sstevel@tonic-gate } _te_clts_state; 6190Sstevel@tonic-gate } _te_transport_state; 6200Sstevel@tonic-gate #define te_nicon _te_transport_state._te_cots_state._te_nicon 6210Sstevel@tonic-gate #define te_qlen _te_transport_state._te_cots_state._te_qlen 6220Sstevel@tonic-gate #define te_oconp _te_transport_state._te_cots_state._te_oconp 6230Sstevel@tonic-gate #define te_conp _te_transport_state._te_cots_state._te_conp 6240Sstevel@tonic-gate #define te_iconp _te_transport_state._te_cots_state._te_iconp 6250Sstevel@tonic-gate #define te_lastep _te_transport_state._te_clts_state._te_lastep 6260Sstevel@tonic-gate #define te_flowq _te_transport_state._te_clts_state._te_flowq 6270Sstevel@tonic-gate #define te_flowlist _te_transport_state._te_clts_state._te_flowlist 6280Sstevel@tonic-gate #define te_flows _te_transport_state._te_clts_state._te_flows 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate bufcall_id_t te_bufcid; /* outstanding bufcall id */ 6310Sstevel@tonic-gate timeout_id_t te_timoutid; /* outstanding timeout id */ 6320Sstevel@tonic-gate pid_t te_cpid; /* cached pid of endpoint */ 6330Sstevel@tonic-gate t_uscalar_t te_acceptor_id; /* acceptor id for T_CONN_RES */ 6340Sstevel@tonic-gate /* 6350Sstevel@tonic-gate * Pieces of the endpoint state needed for closing. 6360Sstevel@tonic-gate */ 6370Sstevel@tonic-gate kmutex_t te_closelock; 6380Sstevel@tonic-gate kcondvar_t te_closecv; 6390Sstevel@tonic-gate uint8_t te_closing; /* The endpoint started closing */ 6400Sstevel@tonic-gate uint8_t te_closewait; /* Wait in close until zero */ 6410Sstevel@tonic-gate mblk_t te_closemp; /* for entering serializer on close */ 6420Sstevel@tonic-gate mblk_t te_rsrvmp; /* for entering serializer on rsrv */ 6430Sstevel@tonic-gate mblk_t te_wsrvmp; /* for entering serializer on wsrv */ 6440Sstevel@tonic-gate kmutex_t te_srv_lock; 6450Sstevel@tonic-gate kcondvar_t te_srv_cv; 6460Sstevel@tonic-gate uint8_t te_rsrv_active; /* Running in tl_rsrv() */ 6470Sstevel@tonic-gate uint8_t te_wsrv_active; /* Running in tl_wsrv() */ 6480Sstevel@tonic-gate /* 6490Sstevel@tonic-gate * Pieces of the endpoint state needed for serializer transitions. 6500Sstevel@tonic-gate */ 6510Sstevel@tonic-gate kmutex_t te_ser_lock; /* Protects the count below */ 6520Sstevel@tonic-gate uint_t te_ser_count; /* Number of messages on serializer */ 6530Sstevel@tonic-gate }; 6540Sstevel@tonic-gate 6550Sstevel@tonic-gate /* 6560Sstevel@tonic-gate * Flag values. Lower 4 bits specify that transport used. 6570Sstevel@tonic-gate * TL_LISTENER, TL_ACCEPTOR, TL_ACCEPTED and TL_EAGER are for debugging only, 6580Sstevel@tonic-gate * they allow to identify the endpoint more easily. 6590Sstevel@tonic-gate */ 6600Sstevel@tonic-gate #define TL_LISTENER 0x00010 /* the listener endpoint */ 6610Sstevel@tonic-gate #define TL_ACCEPTOR 0x00020 /* the accepting endpoint */ 6620Sstevel@tonic-gate #define TL_EAGER 0x00040 /* connecting endpoint */ 6630Sstevel@tonic-gate #define TL_ACCEPTED 0x00080 /* accepted connection */ 6640Sstevel@tonic-gate #define TL_SETCRED 0x00100 /* flag to indicate sending of credentials */ 6650Sstevel@tonic-gate #define TL_SETUCRED 0x00200 /* flag to indicate sending of ucred */ 6660Sstevel@tonic-gate #define TL_SOCKUCRED 0x00400 /* flag to indicate sending of SCM_UCRED */ 6670Sstevel@tonic-gate #define TL_ADDRHASHED 0x01000 /* Endpoint address is stored in te_addrhash */ 6680Sstevel@tonic-gate #define TL_CLOSE_SER 0x10000 /* Endpoint close has entered the serializer */ 6690Sstevel@tonic-gate /* 6700Sstevel@tonic-gate * Boolean checks for the endpoint type. 6710Sstevel@tonic-gate */ 6720Sstevel@tonic-gate #define IS_CLTS(x) (((x)->te_flag & TL_TICLTS) != 0) 6730Sstevel@tonic-gate #define IS_COTS(x) (((x)->te_flag & TL_TICLTS) == 0) 6740Sstevel@tonic-gate #define IS_COTSORD(x) (((x)->te_flag & TL_TICOTSORD) != 0) 6750Sstevel@tonic-gate #define IS_SOCKET(x) (((x)->te_flag & TL_SOCKET) != 0) 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate /* 6780Sstevel@tonic-gate * Certain operations are always used together. These macros reduce the chance 6790Sstevel@tonic-gate * of missing a part of a combination. 6800Sstevel@tonic-gate */ 6810Sstevel@tonic-gate #define TL_UNCONNECT(x) { tl_refrele(x); x = NULL; } 6820Sstevel@tonic-gate #define TL_REMOVE_PEER(x) { if ((x) != NULL) TL_UNCONNECT(x) } 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate #define TL_PUTBQ(x, mp) { \ 6850Sstevel@tonic-gate ASSERT(!((x)->te_flag & TL_CLOSE_SER)); \ 6860Sstevel@tonic-gate (x)->te_nowsrv = B_TRUE; \ 6870Sstevel@tonic-gate (void) putbq((x)->te_wq, mp); \ 6880Sstevel@tonic-gate } 6890Sstevel@tonic-gate 6900Sstevel@tonic-gate #define TL_QENABLE(x) { (x)->te_nowsrv = B_FALSE; qenable((x)->te_wq); } 6910Sstevel@tonic-gate #define TL_PUTQ(x, mp) { (x)->te_nowsrv = B_FALSE; (void)putq((x)->te_wq, mp); } 6920Sstevel@tonic-gate 6930Sstevel@tonic-gate /* 6940Sstevel@tonic-gate * STREAMS driver glue data structures. 6950Sstevel@tonic-gate */ 6960Sstevel@tonic-gate static struct module_info tl_minfo = { 6970Sstevel@tonic-gate TL_ID, /* mi_idnum */ 6980Sstevel@tonic-gate TL_NAME, /* mi_idname */ 6990Sstevel@tonic-gate TL_MINPSZ, /* mi_minpsz */ 7000Sstevel@tonic-gate TL_MAXPSZ, /* mi_maxpsz */ 7010Sstevel@tonic-gate TL_HIWAT, /* mi_hiwat */ 7020Sstevel@tonic-gate TL_LOWAT /* mi_lowat */ 7030Sstevel@tonic-gate }; 7040Sstevel@tonic-gate 7050Sstevel@tonic-gate static struct qinit tl_rinit = { 7060Sstevel@tonic-gate NULL, /* qi_putp */ 7070Sstevel@tonic-gate (int (*)())tl_rsrv, /* qi_srvp */ 7080Sstevel@tonic-gate tl_open, /* qi_qopen */ 7090Sstevel@tonic-gate tl_close, /* qi_qclose */ 7100Sstevel@tonic-gate NULL, /* qi_qadmin */ 7110Sstevel@tonic-gate &tl_minfo, /* qi_minfo */ 7120Sstevel@tonic-gate NULL /* qi_mstat */ 7130Sstevel@tonic-gate }; 7140Sstevel@tonic-gate 7150Sstevel@tonic-gate static struct qinit tl_winit = { 7160Sstevel@tonic-gate (int (*)())tl_wput, /* qi_putp */ 7170Sstevel@tonic-gate (int (*)())tl_wsrv, /* qi_srvp */ 7180Sstevel@tonic-gate NULL, /* qi_qopen */ 7190Sstevel@tonic-gate NULL, /* qi_qclose */ 7200Sstevel@tonic-gate NULL, /* qi_qadmin */ 7210Sstevel@tonic-gate &tl_minfo, /* qi_minfo */ 7220Sstevel@tonic-gate NULL /* qi_mstat */ 7230Sstevel@tonic-gate }; 7240Sstevel@tonic-gate 7250Sstevel@tonic-gate static struct streamtab tlinfo = { 7260Sstevel@tonic-gate &tl_rinit, /* st_rdinit */ 7270Sstevel@tonic-gate &tl_winit, /* st_wrinit */ 7280Sstevel@tonic-gate NULL, /* st_muxrinit */ 7290Sstevel@tonic-gate NULL /* st_muxwrinit */ 7300Sstevel@tonic-gate }; 7310Sstevel@tonic-gate 7320Sstevel@tonic-gate DDI_DEFINE_STREAM_OPS(tl_devops, nulldev, nulldev, tl_attach, tl_detach, 7337656SSherry.Moore@Sun.COM nulldev, tl_info, D_MP, &tlinfo, ddi_quiesce_not_supported); 7340Sstevel@tonic-gate 7350Sstevel@tonic-gate static struct modldrv modldrv = { 7360Sstevel@tonic-gate &mod_driverops, /* Type of module -- pseudo driver here */ 7377240Srh87107 "TPI Local Transport (tl)", 7380Sstevel@tonic-gate &tl_devops, /* driver ops */ 7390Sstevel@tonic-gate }; 7400Sstevel@tonic-gate 7410Sstevel@tonic-gate /* 7420Sstevel@tonic-gate * Module linkage information for the kernel. 7430Sstevel@tonic-gate */ 7440Sstevel@tonic-gate static struct modlinkage modlinkage = { 7450Sstevel@tonic-gate MODREV_1, 7460Sstevel@tonic-gate &modldrv, 7470Sstevel@tonic-gate NULL 7480Sstevel@tonic-gate }; 7490Sstevel@tonic-gate 7500Sstevel@tonic-gate /* 7510Sstevel@tonic-gate * Templates for response to info request 7520Sstevel@tonic-gate * Check sanity of unlimited connect data etc. 7530Sstevel@tonic-gate */ 7540Sstevel@tonic-gate 7550Sstevel@tonic-gate #define TL_CLTS_PROVIDER_FLAG (XPG4_1|SENDZERO) 7560Sstevel@tonic-gate #define TL_COTS_PROVIDER_FLAG (XPG4_1|SENDZERO) 7570Sstevel@tonic-gate 7580Sstevel@tonic-gate static struct T_info_ack tl_cots_info_ack = 7590Sstevel@tonic-gate { 7600Sstevel@tonic-gate T_INFO_ACK, /* PRIM_type -always T_INFO_ACK */ 7610Sstevel@tonic-gate T_INFINITE, /* TSDU size */ 7620Sstevel@tonic-gate T_INFINITE, /* ETSDU size */ 7630Sstevel@tonic-gate T_INFINITE, /* CDATA_size */ 7640Sstevel@tonic-gate T_INFINITE, /* DDATA_size */ 7650Sstevel@tonic-gate T_INFINITE, /* ADDR_size */ 7660Sstevel@tonic-gate T_INFINITE, /* OPT_size */ 7670Sstevel@tonic-gate 0, /* TIDU_size - fill at run time */ 7680Sstevel@tonic-gate T_COTS, /* SERV_type */ 7690Sstevel@tonic-gate -1, /* CURRENT_state */ 7700Sstevel@tonic-gate TL_COTS_PROVIDER_FLAG /* PROVIDER_flag */ 7710Sstevel@tonic-gate }; 7720Sstevel@tonic-gate 7730Sstevel@tonic-gate static struct T_info_ack tl_clts_info_ack = 7740Sstevel@tonic-gate { 7750Sstevel@tonic-gate T_INFO_ACK, /* PRIM_type - always T_INFO_ACK */ 7760Sstevel@tonic-gate 0, /* TSDU_size - fill at run time */ 7770Sstevel@tonic-gate -2, /* ETSDU_size -2 => not supported */ 7780Sstevel@tonic-gate -2, /* CDATA_size -2 => not supported */ 7790Sstevel@tonic-gate -2, /* DDATA_size -2 => not supported */ 7800Sstevel@tonic-gate -1, /* ADDR_size -1 => unlimited */ 7810Sstevel@tonic-gate -1, /* OPT_size */ 7820Sstevel@tonic-gate 0, /* TIDU_size - fill at run time */ 7830Sstevel@tonic-gate T_CLTS, /* SERV_type */ 7840Sstevel@tonic-gate -1, /* CURRENT_state */ 7850Sstevel@tonic-gate TL_CLTS_PROVIDER_FLAG /* PROVIDER_flag */ 7860Sstevel@tonic-gate }; 7870Sstevel@tonic-gate 7880Sstevel@tonic-gate /* 7890Sstevel@tonic-gate * private copy of devinfo pointer used in tl_info 7900Sstevel@tonic-gate */ 7910Sstevel@tonic-gate static dev_info_t *tl_dip; 7920Sstevel@tonic-gate 7930Sstevel@tonic-gate /* 7940Sstevel@tonic-gate * Endpoints cache. 7950Sstevel@tonic-gate */ 7960Sstevel@tonic-gate static kmem_cache_t *tl_cache; 7970Sstevel@tonic-gate /* 7980Sstevel@tonic-gate * Minor number space. 7990Sstevel@tonic-gate */ 8000Sstevel@tonic-gate static id_space_t *tl_minors; 8010Sstevel@tonic-gate 8020Sstevel@tonic-gate /* 8030Sstevel@tonic-gate * Default Data Unit size. 8040Sstevel@tonic-gate */ 8050Sstevel@tonic-gate static t_scalar_t tl_tidusz; 8060Sstevel@tonic-gate 8070Sstevel@tonic-gate /* 8080Sstevel@tonic-gate * Size of hash tables. 8090Sstevel@tonic-gate */ 8100Sstevel@tonic-gate static size_t tl_hash_size = TL_HASH_SIZE; 8110Sstevel@tonic-gate 8120Sstevel@tonic-gate /* 8130Sstevel@tonic-gate * Debug and test variable ONLY. Turn off T_CONN_IND queueing 8140Sstevel@tonic-gate * for sockets. 8150Sstevel@tonic-gate */ 8160Sstevel@tonic-gate static int tl_disable_early_connect = 0; 8170Sstevel@tonic-gate static int tl_client_closing_when_accepting; 8180Sstevel@tonic-gate 8190Sstevel@tonic-gate static int tl_serializer_noswitch; 8200Sstevel@tonic-gate 8210Sstevel@tonic-gate /* 8220Sstevel@tonic-gate * LOCAL FUNCTION PROTOTYPES 8230Sstevel@tonic-gate * ------------------------- 8240Sstevel@tonic-gate */ 8250Sstevel@tonic-gate static boolean_t tl_eqaddr(tl_addr_t *, tl_addr_t *); 8260Sstevel@tonic-gate static void tl_do_proto(mblk_t *, tl_endpt_t *); 8270Sstevel@tonic-gate static void tl_do_ioctl(mblk_t *, tl_endpt_t *); 8280Sstevel@tonic-gate static void tl_do_ioctl_ser(mblk_t *, tl_endpt_t *); 8290Sstevel@tonic-gate static void tl_error_ack(queue_t *, mblk_t *, t_scalar_t, t_scalar_t, 8300Sstevel@tonic-gate t_scalar_t); 8310Sstevel@tonic-gate static void tl_bind(mblk_t *, tl_endpt_t *); 8320Sstevel@tonic-gate static void tl_bind_ser(mblk_t *, tl_endpt_t *); 8330Sstevel@tonic-gate static void tl_ok_ack(queue_t *, mblk_t *mp, t_scalar_t); 8340Sstevel@tonic-gate static void tl_unbind(mblk_t *, tl_endpt_t *); 8350Sstevel@tonic-gate static void tl_optmgmt(queue_t *, mblk_t *); 8360Sstevel@tonic-gate static void tl_conn_req(queue_t *, mblk_t *); 8370Sstevel@tonic-gate static void tl_conn_req_ser(mblk_t *, tl_endpt_t *); 8380Sstevel@tonic-gate static void tl_conn_res(mblk_t *, tl_endpt_t *); 8390Sstevel@tonic-gate static void tl_discon_req(mblk_t *, tl_endpt_t *); 8400Sstevel@tonic-gate static void tl_capability_req(mblk_t *, tl_endpt_t *); 8410Sstevel@tonic-gate static void tl_info_req_ser(mblk_t *, tl_endpt_t *); 8420Sstevel@tonic-gate static void tl_info_req(mblk_t *, tl_endpt_t *); 8430Sstevel@tonic-gate static void tl_addr_req(mblk_t *, tl_endpt_t *); 8440Sstevel@tonic-gate static void tl_connected_cots_addr_req(mblk_t *, tl_endpt_t *); 8450Sstevel@tonic-gate static void tl_data(mblk_t *, tl_endpt_t *); 8460Sstevel@tonic-gate static void tl_exdata(mblk_t *, tl_endpt_t *); 8470Sstevel@tonic-gate static void tl_ordrel(mblk_t *, tl_endpt_t *); 8480Sstevel@tonic-gate static void tl_unitdata(mblk_t *, tl_endpt_t *); 8490Sstevel@tonic-gate static void tl_unitdata_ser(mblk_t *, tl_endpt_t *); 8500Sstevel@tonic-gate static void tl_uderr(queue_t *, mblk_t *, t_scalar_t); 8510Sstevel@tonic-gate static tl_endpt_t *tl_find_peer(tl_endpt_t *, tl_addr_t *); 8520Sstevel@tonic-gate static tl_endpt_t *tl_sock_find_peer(tl_endpt_t *, struct so_ux_addr *); 8530Sstevel@tonic-gate static boolean_t tl_get_any_addr(tl_endpt_t *, tl_addr_t *); 8540Sstevel@tonic-gate static void tl_cl_backenable(tl_endpt_t *); 8550Sstevel@tonic-gate static void tl_co_unconnect(tl_endpt_t *); 8560Sstevel@tonic-gate static mblk_t *tl_resizemp(mblk_t *, ssize_t); 8570Sstevel@tonic-gate static void tl_discon_ind(tl_endpt_t *, uint32_t); 8580Sstevel@tonic-gate static mblk_t *tl_discon_ind_alloc(uint32_t, t_scalar_t); 8590Sstevel@tonic-gate static mblk_t *tl_ordrel_ind_alloc(void); 8600Sstevel@tonic-gate static tl_icon_t *tl_icon_find(tl_endpt_t *, t_scalar_t); 8610Sstevel@tonic-gate static void tl_icon_queuemsg(tl_endpt_t *, t_scalar_t, mblk_t *); 8620Sstevel@tonic-gate static boolean_t tl_icon_hasprim(tl_endpt_t *, t_scalar_t, t_scalar_t); 8630Sstevel@tonic-gate static void tl_icon_sendmsgs(tl_endpt_t *, mblk_t **); 8640Sstevel@tonic-gate static void tl_icon_freemsgs(mblk_t **); 8650Sstevel@tonic-gate static void tl_merror(queue_t *, mblk_t *, int); 8661676Sjpk static void tl_fill_option(uchar_t *, cred_t *, pid_t, int, cred_t *); 8670Sstevel@tonic-gate static int tl_default_opt(queue_t *, int, int, uchar_t *); 8680Sstevel@tonic-gate static int tl_get_opt(queue_t *, int, int, uchar_t *); 8690Sstevel@tonic-gate static int tl_set_opt(queue_t *, uint_t, int, int, uint_t, uchar_t *, uint_t *, 87011042SErik.Nordmark@Sun.COM uchar_t *, void *, cred_t *); 8710Sstevel@tonic-gate static void tl_memrecover(queue_t *, mblk_t *, size_t); 8720Sstevel@tonic-gate static void tl_freetip(tl_endpt_t *, tl_icon_t *); 8730Sstevel@tonic-gate static void tl_free(tl_endpt_t *); 8740Sstevel@tonic-gate static int tl_constructor(void *, void *, int); 8750Sstevel@tonic-gate static void tl_destructor(void *, void *); 8760Sstevel@tonic-gate static void tl_find_callback(mod_hash_key_t, mod_hash_val_t); 8770Sstevel@tonic-gate static tl_serializer_t *tl_serializer_alloc(int); 8780Sstevel@tonic-gate static void tl_serializer_refhold(tl_serializer_t *); 8790Sstevel@tonic-gate static void tl_serializer_refrele(tl_serializer_t *); 8800Sstevel@tonic-gate static void tl_serializer_enter(tl_endpt_t *, tlproc_t, mblk_t *); 8810Sstevel@tonic-gate static void tl_serializer_exit(tl_endpt_t *); 8820Sstevel@tonic-gate static boolean_t tl_noclose(tl_endpt_t *); 8830Sstevel@tonic-gate static void tl_closeok(tl_endpt_t *); 8840Sstevel@tonic-gate static void tl_refhold(tl_endpt_t *); 8850Sstevel@tonic-gate static void tl_refrele(tl_endpt_t *); 8860Sstevel@tonic-gate static int tl_hash_cmp_addr(mod_hash_key_t, mod_hash_key_t); 8870Sstevel@tonic-gate static uint_t tl_hash_by_addr(void *, mod_hash_key_t); 8880Sstevel@tonic-gate static void tl_close_ser(mblk_t *, tl_endpt_t *); 8890Sstevel@tonic-gate static void tl_close_finish_ser(mblk_t *, tl_endpt_t *); 8900Sstevel@tonic-gate static void tl_wput_data_ser(mblk_t *, tl_endpt_t *); 8910Sstevel@tonic-gate static void tl_proto_ser(mblk_t *, tl_endpt_t *); 8920Sstevel@tonic-gate static void tl_putq_ser(mblk_t *, tl_endpt_t *); 8930Sstevel@tonic-gate static void tl_wput_common_ser(mblk_t *, tl_endpt_t *); 8940Sstevel@tonic-gate static void tl_wput_ser(mblk_t *, tl_endpt_t *); 8950Sstevel@tonic-gate static void tl_wsrv_ser(mblk_t *, tl_endpt_t *); 8960Sstevel@tonic-gate static void tl_rsrv_ser(mblk_t *, tl_endpt_t *); 8970Sstevel@tonic-gate static void tl_addr_unbind(tl_endpt_t *); 8980Sstevel@tonic-gate 8990Sstevel@tonic-gate /* 9000Sstevel@tonic-gate * Intialize option database object for TL 9010Sstevel@tonic-gate */ 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate optdb_obj_t tl_opt_obj = { 9040Sstevel@tonic-gate tl_default_opt, /* TL default value function pointer */ 9050Sstevel@tonic-gate tl_get_opt, /* TL get function pointer */ 9060Sstevel@tonic-gate tl_set_opt, /* TL set function pointer */ 9070Sstevel@tonic-gate TL_OPT_ARR_CNT, /* TL option database count of entries */ 9080Sstevel@tonic-gate tl_opt_arr, /* TL option database */ 9090Sstevel@tonic-gate TL_VALID_LEVELS_CNT, /* TL valid level count of entries */ 9100Sstevel@tonic-gate tl_valid_levels_arr /* TL valid level array */ 9110Sstevel@tonic-gate }; 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate /* 9140Sstevel@tonic-gate * Logical operations. 9150Sstevel@tonic-gate * 9160Sstevel@tonic-gate * IMPLY(X, Y) means that X implies Y i.e. when X is true, Y 9170Sstevel@tonic-gate * should also be true. 9180Sstevel@tonic-gate * 9197409SRic.Aleshire@Sun.COM * EQUIV(X, Y) is logical equivalence. Both X and Y should be true or false at 9200Sstevel@tonic-gate * the same time. 9210Sstevel@tonic-gate */ 9220Sstevel@tonic-gate #define IMPLY(X, Y) (!(X) || (Y)) 9230Sstevel@tonic-gate #define EQUIV(X, Y) (IMPLY(X, Y) && IMPLY(Y, X)) 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate /* 9260Sstevel@tonic-gate * LOCAL FUNCTIONS AND DRIVER ENTRY POINTS 9270Sstevel@tonic-gate * --------------------------------------- 9280Sstevel@tonic-gate */ 9290Sstevel@tonic-gate 9300Sstevel@tonic-gate /* 9310Sstevel@tonic-gate * Loadable module routines 9320Sstevel@tonic-gate */ 9330Sstevel@tonic-gate int 9340Sstevel@tonic-gate _init(void) 9350Sstevel@tonic-gate { 9360Sstevel@tonic-gate return (mod_install(&modlinkage)); 9370Sstevel@tonic-gate } 9380Sstevel@tonic-gate 9390Sstevel@tonic-gate int 9400Sstevel@tonic-gate _fini(void) 9410Sstevel@tonic-gate { 9420Sstevel@tonic-gate return (mod_remove(&modlinkage)); 9430Sstevel@tonic-gate } 9440Sstevel@tonic-gate 9450Sstevel@tonic-gate int 9460Sstevel@tonic-gate _info(struct modinfo *modinfop) 9470Sstevel@tonic-gate { 9480Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 9490Sstevel@tonic-gate } 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate /* 9520Sstevel@tonic-gate * Driver Entry Points and Other routines 9530Sstevel@tonic-gate */ 9540Sstevel@tonic-gate static int 9550Sstevel@tonic-gate tl_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 9560Sstevel@tonic-gate { 9570Sstevel@tonic-gate int i; 9580Sstevel@tonic-gate char name[32]; 9590Sstevel@tonic-gate 9600Sstevel@tonic-gate /* 9610Sstevel@tonic-gate * Resume from a checkpoint state. 9620Sstevel@tonic-gate */ 9630Sstevel@tonic-gate if (cmd == DDI_RESUME) 9640Sstevel@tonic-gate return (DDI_SUCCESS); 9650Sstevel@tonic-gate 9660Sstevel@tonic-gate if (cmd != DDI_ATTACH) 9670Sstevel@tonic-gate return (DDI_FAILURE); 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate /* 9700Sstevel@tonic-gate * Deduce TIDU size to use. Note: "strmsgsz" being 0 has semantics that 9710Sstevel@tonic-gate * streams message sizes can be unlimited. We use a defined constant 9720Sstevel@tonic-gate * instead. 9730Sstevel@tonic-gate */ 9740Sstevel@tonic-gate tl_tidusz = strmsgsz != 0 ? (t_scalar_t)strmsgsz : TL_TIDUSZ; 9750Sstevel@tonic-gate 9760Sstevel@tonic-gate /* 9770Sstevel@tonic-gate * Create subdevices for each transport. 9780Sstevel@tonic-gate */ 9790Sstevel@tonic-gate for (i = 0; i < TL_UNUSED; i++) { 9800Sstevel@tonic-gate if (ddi_create_minor_node(devi, 9815240Snordmark tl_transports[i].tr_name, 9825240Snordmark S_IFCHR, tl_transports[i].tr_minor, 9835240Snordmark DDI_PSEUDO, NULL) == DDI_FAILURE) { 9840Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 9850Sstevel@tonic-gate return (DDI_FAILURE); 9860Sstevel@tonic-gate } 9870Sstevel@tonic-gate } 9880Sstevel@tonic-gate 9890Sstevel@tonic-gate tl_cache = kmem_cache_create("tl_cache", sizeof (tl_endpt_t), 9900Sstevel@tonic-gate 0, tl_constructor, tl_destructor, NULL, NULL, NULL, 0); 9910Sstevel@tonic-gate 9920Sstevel@tonic-gate if (tl_cache == NULL) { 9930Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 9940Sstevel@tonic-gate return (DDI_FAILURE); 9950Sstevel@tonic-gate } 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate tl_minors = id_space_create("tl_minor_space", 9980Sstevel@tonic-gate TL_MINOR_START, MAXMIN32 - TL_MINOR_START + 1); 9990Sstevel@tonic-gate 10000Sstevel@tonic-gate /* 10010Sstevel@tonic-gate * Create ID space for minor numbers 10020Sstevel@tonic-gate */ 10030Sstevel@tonic-gate for (i = 0; i < TL_MAXTRANSPORT; i++) { 10040Sstevel@tonic-gate tl_transport_state_t *t = &tl_transports[i]; 10050Sstevel@tonic-gate 10060Sstevel@tonic-gate if (i == TL_UNUSED) 10070Sstevel@tonic-gate continue; 10080Sstevel@tonic-gate 10090Sstevel@tonic-gate /* Socket COTSORD shares namespace with COTS */ 10100Sstevel@tonic-gate if (i == TL_SOCK_COTSORD) { 10110Sstevel@tonic-gate t->tr_ai_hash = 10120Sstevel@tonic-gate tl_transports[TL_SOCK_COTS].tr_ai_hash; 10130Sstevel@tonic-gate ASSERT(t->tr_ai_hash != NULL); 10140Sstevel@tonic-gate t->tr_addr_hash = 10150Sstevel@tonic-gate tl_transports[TL_SOCK_COTS].tr_addr_hash; 10160Sstevel@tonic-gate ASSERT(t->tr_addr_hash != NULL); 10170Sstevel@tonic-gate continue; 10180Sstevel@tonic-gate } 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate /* 10210Sstevel@tonic-gate * Create hash tables. 10220Sstevel@tonic-gate */ 10230Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_ai_hash", 10240Sstevel@tonic-gate t->tr_name); 10250Sstevel@tonic-gate #ifdef _ILP32 10260Sstevel@tonic-gate if (i & TL_SOCKET) 10270Sstevel@tonic-gate t->tr_ai_hash = 10280Sstevel@tonic-gate mod_hash_create_idhash(name, tl_hash_size - 1, 10295240Snordmark mod_hash_null_valdtor); 10300Sstevel@tonic-gate else 10310Sstevel@tonic-gate t->tr_ai_hash = 10320Sstevel@tonic-gate mod_hash_create_ptrhash(name, tl_hash_size, 10335240Snordmark mod_hash_null_valdtor, sizeof (queue_t)); 10340Sstevel@tonic-gate #else 10350Sstevel@tonic-gate t->tr_ai_hash = 10360Sstevel@tonic-gate mod_hash_create_idhash(name, tl_hash_size - 1, 10375240Snordmark mod_hash_null_valdtor); 10380Sstevel@tonic-gate #endif /* _ILP32 */ 10390Sstevel@tonic-gate 10400Sstevel@tonic-gate if (i & TL_SOCKET) { 10410Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_sockaddr_hash", 10420Sstevel@tonic-gate t->tr_name); 10430Sstevel@tonic-gate t->tr_addr_hash = mod_hash_create_ptrhash(name, 10440Sstevel@tonic-gate tl_hash_size, mod_hash_null_valdtor, 10450Sstevel@tonic-gate sizeof (uintptr_t)); 10460Sstevel@tonic-gate } else { 10470Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_addr_hash", 10480Sstevel@tonic-gate t->tr_name); 10490Sstevel@tonic-gate t->tr_addr_hash = mod_hash_create_extended(name, 10500Sstevel@tonic-gate tl_hash_size, mod_hash_null_keydtor, 10510Sstevel@tonic-gate mod_hash_null_valdtor, 10520Sstevel@tonic-gate tl_hash_by_addr, NULL, tl_hash_cmp_addr, KM_SLEEP); 10530Sstevel@tonic-gate } 10540Sstevel@tonic-gate 10550Sstevel@tonic-gate /* Create serializer for connectionless transports. */ 10560Sstevel@tonic-gate if (i & TL_TICLTS) 10570Sstevel@tonic-gate t->tr_serializer = tl_serializer_alloc(KM_SLEEP); 10580Sstevel@tonic-gate } 10590Sstevel@tonic-gate 10600Sstevel@tonic-gate tl_dip = devi; 10610Sstevel@tonic-gate 10620Sstevel@tonic-gate return (DDI_SUCCESS); 10630Sstevel@tonic-gate } 10640Sstevel@tonic-gate 10650Sstevel@tonic-gate static int 10660Sstevel@tonic-gate tl_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 10670Sstevel@tonic-gate { 10680Sstevel@tonic-gate int i; 10690Sstevel@tonic-gate 10700Sstevel@tonic-gate if (cmd == DDI_SUSPEND) 10710Sstevel@tonic-gate return (DDI_SUCCESS); 10720Sstevel@tonic-gate 10730Sstevel@tonic-gate if (cmd != DDI_DETACH) 10740Sstevel@tonic-gate return (DDI_FAILURE); 10750Sstevel@tonic-gate 10760Sstevel@tonic-gate /* 10770Sstevel@tonic-gate * Destroy arenas and hash tables. 10780Sstevel@tonic-gate */ 10790Sstevel@tonic-gate for (i = 0; i < TL_MAXTRANSPORT; i++) { 10800Sstevel@tonic-gate tl_transport_state_t *t = &tl_transports[i]; 10810Sstevel@tonic-gate 10820Sstevel@tonic-gate if ((i == TL_UNUSED) || (i == TL_SOCK_COTSORD)) 10830Sstevel@tonic-gate continue; 10840Sstevel@tonic-gate 10850Sstevel@tonic-gate ASSERT(EQUIV(i & TL_TICLTS, t->tr_serializer != NULL)); 10860Sstevel@tonic-gate if (t->tr_serializer != NULL) { 10870Sstevel@tonic-gate tl_serializer_refrele(t->tr_serializer); 10880Sstevel@tonic-gate t->tr_serializer = NULL; 10890Sstevel@tonic-gate } 10900Sstevel@tonic-gate 10910Sstevel@tonic-gate #ifdef _ILP32 10920Sstevel@tonic-gate if (i & TL_SOCKET) 10930Sstevel@tonic-gate mod_hash_destroy_idhash(t->tr_ai_hash); 10940Sstevel@tonic-gate else 10950Sstevel@tonic-gate mod_hash_destroy_ptrhash(t->tr_ai_hash); 10960Sstevel@tonic-gate #else 10970Sstevel@tonic-gate mod_hash_destroy_idhash(t->tr_ai_hash); 10980Sstevel@tonic-gate #endif /* _ILP32 */ 10990Sstevel@tonic-gate t->tr_ai_hash = NULL; 11000Sstevel@tonic-gate if (i & TL_SOCKET) 11010Sstevel@tonic-gate mod_hash_destroy_ptrhash(t->tr_addr_hash); 11020Sstevel@tonic-gate else 11030Sstevel@tonic-gate mod_hash_destroy_hash(t->tr_addr_hash); 11040Sstevel@tonic-gate t->tr_addr_hash = NULL; 11050Sstevel@tonic-gate } 11060Sstevel@tonic-gate 11070Sstevel@tonic-gate kmem_cache_destroy(tl_cache); 11080Sstevel@tonic-gate tl_cache = NULL; 11090Sstevel@tonic-gate id_space_destroy(tl_minors); 11100Sstevel@tonic-gate tl_minors = NULL; 11110Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 11120Sstevel@tonic-gate return (DDI_SUCCESS); 11130Sstevel@tonic-gate } 11140Sstevel@tonic-gate 11150Sstevel@tonic-gate /* ARGSUSED */ 11160Sstevel@tonic-gate static int 11170Sstevel@tonic-gate tl_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 11180Sstevel@tonic-gate { 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate int retcode = DDI_FAILURE; 11210Sstevel@tonic-gate 11220Sstevel@tonic-gate switch (infocmd) { 11230Sstevel@tonic-gate 11240Sstevel@tonic-gate case DDI_INFO_DEVT2DEVINFO: 11250Sstevel@tonic-gate if (tl_dip != NULL) { 11260Sstevel@tonic-gate *result = (void *)tl_dip; 11270Sstevel@tonic-gate retcode = DDI_SUCCESS; 11280Sstevel@tonic-gate } 11290Sstevel@tonic-gate break; 11300Sstevel@tonic-gate 11310Sstevel@tonic-gate case DDI_INFO_DEVT2INSTANCE: 11320Sstevel@tonic-gate *result = (void *)0; 11330Sstevel@tonic-gate retcode = DDI_SUCCESS; 11340Sstevel@tonic-gate break; 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate default: 11370Sstevel@tonic-gate break; 11380Sstevel@tonic-gate } 11390Sstevel@tonic-gate return (retcode); 11400Sstevel@tonic-gate } 11410Sstevel@tonic-gate 11420Sstevel@tonic-gate /* 11430Sstevel@tonic-gate * Endpoint reference management. 11440Sstevel@tonic-gate */ 11450Sstevel@tonic-gate static void 11460Sstevel@tonic-gate tl_refhold(tl_endpt_t *tep) 11470Sstevel@tonic-gate { 11480Sstevel@tonic-gate atomic_add_32(&tep->te_refcnt, 1); 11490Sstevel@tonic-gate } 11500Sstevel@tonic-gate 11510Sstevel@tonic-gate static void 11520Sstevel@tonic-gate tl_refrele(tl_endpt_t *tep) 11530Sstevel@tonic-gate { 11540Sstevel@tonic-gate ASSERT(tep->te_refcnt != 0); 11550Sstevel@tonic-gate 11560Sstevel@tonic-gate if (atomic_add_32_nv(&tep->te_refcnt, -1) == 0) 11570Sstevel@tonic-gate tl_free(tep); 11580Sstevel@tonic-gate } 11590Sstevel@tonic-gate 11600Sstevel@tonic-gate /*ARGSUSED*/ 11610Sstevel@tonic-gate static int 11620Sstevel@tonic-gate tl_constructor(void *buf, void *cdrarg, int kmflags) 11630Sstevel@tonic-gate { 11640Sstevel@tonic-gate tl_endpt_t *tep = buf; 11650Sstevel@tonic-gate 11660Sstevel@tonic-gate bzero(tep, sizeof (tl_endpt_t)); 11670Sstevel@tonic-gate mutex_init(&tep->te_closelock, NULL, MUTEX_DEFAULT, NULL); 11680Sstevel@tonic-gate cv_init(&tep->te_closecv, NULL, CV_DEFAULT, NULL); 11690Sstevel@tonic-gate mutex_init(&tep->te_srv_lock, NULL, MUTEX_DEFAULT, NULL); 11700Sstevel@tonic-gate cv_init(&tep->te_srv_cv, NULL, CV_DEFAULT, NULL); 11710Sstevel@tonic-gate mutex_init(&tep->te_ser_lock, NULL, MUTEX_DEFAULT, NULL); 11720Sstevel@tonic-gate 11730Sstevel@tonic-gate return (0); 11740Sstevel@tonic-gate } 11750Sstevel@tonic-gate 11760Sstevel@tonic-gate /*ARGSUSED*/ 11770Sstevel@tonic-gate static void 11780Sstevel@tonic-gate tl_destructor(void *buf, void *cdrarg) 11790Sstevel@tonic-gate { 11800Sstevel@tonic-gate tl_endpt_t *tep = buf; 11810Sstevel@tonic-gate 11820Sstevel@tonic-gate mutex_destroy(&tep->te_closelock); 11830Sstevel@tonic-gate cv_destroy(&tep->te_closecv); 11840Sstevel@tonic-gate mutex_destroy(&tep->te_srv_lock); 11850Sstevel@tonic-gate cv_destroy(&tep->te_srv_cv); 11860Sstevel@tonic-gate mutex_destroy(&tep->te_ser_lock); 11870Sstevel@tonic-gate } 11880Sstevel@tonic-gate 11890Sstevel@tonic-gate static void 11900Sstevel@tonic-gate tl_free(tl_endpt_t *tep) 11910Sstevel@tonic-gate { 11920Sstevel@tonic-gate ASSERT(tep->te_refcnt == 0); 11930Sstevel@tonic-gate ASSERT(tep->te_transport != NULL); 11940Sstevel@tonic-gate ASSERT(tep->te_rq == NULL); 11950Sstevel@tonic-gate ASSERT(tep->te_wq == NULL); 11960Sstevel@tonic-gate ASSERT(tep->te_ser != NULL); 11970Sstevel@tonic-gate ASSERT(tep->te_ser_count == 0); 11980Sstevel@tonic-gate ASSERT(! (tep->te_flag & TL_ADDRHASHED)); 11990Sstevel@tonic-gate 12000Sstevel@tonic-gate if (IS_SOCKET(tep)) { 12010Sstevel@tonic-gate ASSERT(tep->te_alen == TL_SOUX_ADDRLEN); 12020Sstevel@tonic-gate ASSERT(tep->te_abuf == &tep->te_uxaddr); 12030Sstevel@tonic-gate ASSERT(tep->te_vp == (void *)(uintptr_t)tep->te_minor); 12040Sstevel@tonic-gate ASSERT(tep->te_magic == SOU_MAGIC_IMPLICIT); 12050Sstevel@tonic-gate } else if (tep->te_abuf != NULL) { 12060Sstevel@tonic-gate kmem_free(tep->te_abuf, tep->te_alen); 12070Sstevel@tonic-gate tep->te_alen = -1; /* uninitialized */ 12080Sstevel@tonic-gate tep->te_abuf = NULL; 12090Sstevel@tonic-gate } else { 12100Sstevel@tonic-gate ASSERT(tep->te_alen == -1); 12110Sstevel@tonic-gate } 12120Sstevel@tonic-gate 12130Sstevel@tonic-gate id_free(tl_minors, tep->te_minor); 12140Sstevel@tonic-gate ASSERT(tep->te_credp == NULL); 12150Sstevel@tonic-gate 12160Sstevel@tonic-gate if (tep->te_hash_hndl != NULL) 12170Sstevel@tonic-gate mod_hash_cancel(tep->te_addrhash, &tep->te_hash_hndl); 12180Sstevel@tonic-gate 12190Sstevel@tonic-gate if (IS_COTS(tep)) { 12200Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_conp); 12210Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_oconp); 12220Sstevel@tonic-gate tl_serializer_refrele(tep->te_ser); 12230Sstevel@tonic-gate tep->te_ser = NULL; 12240Sstevel@tonic-gate ASSERT(tep->te_nicon == 0); 12250Sstevel@tonic-gate ASSERT(list_head(&tep->te_iconp) == NULL); 12260Sstevel@tonic-gate } else { 12270Sstevel@tonic-gate ASSERT(tep->te_lastep == NULL); 12280Sstevel@tonic-gate ASSERT(list_head(&tep->te_flowlist) == NULL); 12290Sstevel@tonic-gate ASSERT(tep->te_flowq == NULL); 12300Sstevel@tonic-gate } 12310Sstevel@tonic-gate 12320Sstevel@tonic-gate ASSERT(tep->te_bufcid == 0); 12330Sstevel@tonic-gate ASSERT(tep->te_timoutid == 0); 12340Sstevel@tonic-gate bzero(&tep->te_ap, sizeof (tep->te_ap)); 12350Sstevel@tonic-gate tep->te_acceptor_id = 0; 12360Sstevel@tonic-gate 12370Sstevel@tonic-gate ASSERT(tep->te_closewait == 0); 12380Sstevel@tonic-gate ASSERT(!tep->te_rsrv_active); 12390Sstevel@tonic-gate ASSERT(!tep->te_wsrv_active); 12400Sstevel@tonic-gate tep->te_closing = 0; 12410Sstevel@tonic-gate tep->te_nowsrv = B_FALSE; 12420Sstevel@tonic-gate tep->te_flag = 0; 12430Sstevel@tonic-gate 12440Sstevel@tonic-gate kmem_cache_free(tl_cache, tep); 12450Sstevel@tonic-gate } 12460Sstevel@tonic-gate 12470Sstevel@tonic-gate /* 12480Sstevel@tonic-gate * Allocate/free reference-counted wrappers for serializers. 12490Sstevel@tonic-gate */ 12500Sstevel@tonic-gate static tl_serializer_t * 12510Sstevel@tonic-gate tl_serializer_alloc(int flags) 12520Sstevel@tonic-gate { 12530Sstevel@tonic-gate tl_serializer_t *s = kmem_alloc(sizeof (tl_serializer_t), flags); 12540Sstevel@tonic-gate serializer_t *ser; 12550Sstevel@tonic-gate 12560Sstevel@tonic-gate if (s == NULL) 12570Sstevel@tonic-gate return (NULL); 12580Sstevel@tonic-gate 12590Sstevel@tonic-gate ser = serializer_create(flags); 12600Sstevel@tonic-gate 12610Sstevel@tonic-gate if (ser == NULL) { 12620Sstevel@tonic-gate kmem_free(s, sizeof (tl_serializer_t)); 12630Sstevel@tonic-gate return (NULL); 12640Sstevel@tonic-gate } 12650Sstevel@tonic-gate 12660Sstevel@tonic-gate s->ts_refcnt = 1; 12670Sstevel@tonic-gate s->ts_serializer = ser; 12680Sstevel@tonic-gate return (s); 12690Sstevel@tonic-gate } 12700Sstevel@tonic-gate 12710Sstevel@tonic-gate static void 12720Sstevel@tonic-gate tl_serializer_refhold(tl_serializer_t *s) 12730Sstevel@tonic-gate { 12740Sstevel@tonic-gate atomic_add_32(&s->ts_refcnt, 1); 12750Sstevel@tonic-gate } 12760Sstevel@tonic-gate 12770Sstevel@tonic-gate static void 12780Sstevel@tonic-gate tl_serializer_refrele(tl_serializer_t *s) 12790Sstevel@tonic-gate { 12800Sstevel@tonic-gate if (atomic_add_32_nv(&s->ts_refcnt, -1) == 0) { 12810Sstevel@tonic-gate serializer_destroy(s->ts_serializer); 12820Sstevel@tonic-gate kmem_free(s, sizeof (tl_serializer_t)); 12830Sstevel@tonic-gate } 12840Sstevel@tonic-gate } 12850Sstevel@tonic-gate 12860Sstevel@tonic-gate /* 12870Sstevel@tonic-gate * Post a request on the endpoint serializer. For COTS transports keep track of 12880Sstevel@tonic-gate * the number of pending requests. 12890Sstevel@tonic-gate */ 12900Sstevel@tonic-gate static void 12910Sstevel@tonic-gate tl_serializer_enter(tl_endpt_t *tep, tlproc_t tlproc, mblk_t *mp) 12920Sstevel@tonic-gate { 12930Sstevel@tonic-gate if (IS_COTS(tep)) { 12940Sstevel@tonic-gate mutex_enter(&tep->te_ser_lock); 12950Sstevel@tonic-gate tep->te_ser_count++; 12960Sstevel@tonic-gate mutex_exit(&tep->te_ser_lock); 12970Sstevel@tonic-gate } 12980Sstevel@tonic-gate serializer_enter(tep->te_serializer, (srproc_t *)tlproc, mp, tep); 12990Sstevel@tonic-gate } 13000Sstevel@tonic-gate 13010Sstevel@tonic-gate /* 13020Sstevel@tonic-gate * Complete processing the request on the serializer. Decrement the counter for 13030Sstevel@tonic-gate * pending requests for COTS transports. 13040Sstevel@tonic-gate */ 13050Sstevel@tonic-gate static void 13060Sstevel@tonic-gate tl_serializer_exit(tl_endpt_t *tep) 13070Sstevel@tonic-gate { 13080Sstevel@tonic-gate if (IS_COTS(tep)) { 13090Sstevel@tonic-gate mutex_enter(&tep->te_ser_lock); 13100Sstevel@tonic-gate ASSERT(tep->te_ser_count != 0); 13110Sstevel@tonic-gate tep->te_ser_count--; 13120Sstevel@tonic-gate mutex_exit(&tep->te_ser_lock); 13130Sstevel@tonic-gate } 13140Sstevel@tonic-gate } 13150Sstevel@tonic-gate 13160Sstevel@tonic-gate /* 13170Sstevel@tonic-gate * Hash management functions. 13180Sstevel@tonic-gate */ 13190Sstevel@tonic-gate 13200Sstevel@tonic-gate /* 13210Sstevel@tonic-gate * Return TRUE if two addresses are equal, false otherwise. 13220Sstevel@tonic-gate */ 13230Sstevel@tonic-gate static boolean_t 13240Sstevel@tonic-gate tl_eqaddr(tl_addr_t *ap1, tl_addr_t *ap2) 13250Sstevel@tonic-gate { 13260Sstevel@tonic-gate return ((ap1->ta_alen > 0) && 13270Sstevel@tonic-gate (ap1->ta_alen == ap2->ta_alen) && 13280Sstevel@tonic-gate (ap1->ta_zoneid == ap2->ta_zoneid) && 13290Sstevel@tonic-gate (bcmp(ap1->ta_abuf, ap2->ta_abuf, ap1->ta_alen) == 0)); 13300Sstevel@tonic-gate } 13310Sstevel@tonic-gate 13320Sstevel@tonic-gate /* 13330Sstevel@tonic-gate * This function is called whenever an endpoint is found in the hash table. 13340Sstevel@tonic-gate */ 13350Sstevel@tonic-gate /* ARGSUSED0 */ 13360Sstevel@tonic-gate static void 13370Sstevel@tonic-gate tl_find_callback(mod_hash_key_t key, mod_hash_val_t val) 13380Sstevel@tonic-gate { 13390Sstevel@tonic-gate tl_refhold((tl_endpt_t *)val); 13400Sstevel@tonic-gate } 13410Sstevel@tonic-gate 13420Sstevel@tonic-gate /* 13430Sstevel@tonic-gate * Address hash function. 13440Sstevel@tonic-gate */ 13450Sstevel@tonic-gate /* ARGSUSED */ 13460Sstevel@tonic-gate static uint_t 13470Sstevel@tonic-gate tl_hash_by_addr(void *hash_data, mod_hash_key_t key) 13480Sstevel@tonic-gate { 13490Sstevel@tonic-gate tl_addr_t *ap = (tl_addr_t *)key; 13500Sstevel@tonic-gate size_t len = ap->ta_alen; 13510Sstevel@tonic-gate uchar_t *p = ap->ta_abuf; 13520Sstevel@tonic-gate uint_t i, g; 13530Sstevel@tonic-gate 13540Sstevel@tonic-gate ASSERT((len > 0) && (p != NULL)); 13550Sstevel@tonic-gate 13560Sstevel@tonic-gate for (i = ap->ta_zoneid; len -- != 0; p++) { 13570Sstevel@tonic-gate i = (i << 4) + (*p); 13580Sstevel@tonic-gate if ((g = (i & 0xf0000000U)) != 0) { 13590Sstevel@tonic-gate i ^= (g >> 24); 13600Sstevel@tonic-gate i ^= g; 13610Sstevel@tonic-gate } 13620Sstevel@tonic-gate } 13630Sstevel@tonic-gate return (i); 13640Sstevel@tonic-gate } 13650Sstevel@tonic-gate 13660Sstevel@tonic-gate /* 13670Sstevel@tonic-gate * This function is used by hash lookups. It compares two generic addresses. 13680Sstevel@tonic-gate */ 13690Sstevel@tonic-gate static int 13700Sstevel@tonic-gate tl_hash_cmp_addr(mod_hash_key_t key1, mod_hash_key_t key2) 13710Sstevel@tonic-gate { 13720Sstevel@tonic-gate #ifdef DEBUG 13730Sstevel@tonic-gate tl_addr_t *ap1 = (tl_addr_t *)key1; 13740Sstevel@tonic-gate tl_addr_t *ap2 = (tl_addr_t *)key2; 13750Sstevel@tonic-gate 13760Sstevel@tonic-gate ASSERT(key1 != NULL); 13770Sstevel@tonic-gate ASSERT(key2 != NULL); 13780Sstevel@tonic-gate 13790Sstevel@tonic-gate ASSERT(ap1->ta_abuf != NULL); 13800Sstevel@tonic-gate ASSERT(ap2->ta_abuf != NULL); 13810Sstevel@tonic-gate ASSERT(ap1->ta_alen > 0); 13820Sstevel@tonic-gate ASSERT(ap2->ta_alen > 0); 13830Sstevel@tonic-gate #endif 13840Sstevel@tonic-gate 13850Sstevel@tonic-gate return (! tl_eqaddr((tl_addr_t *)key1, (tl_addr_t *)key2)); 13860Sstevel@tonic-gate } 13870Sstevel@tonic-gate 13880Sstevel@tonic-gate /* 13890Sstevel@tonic-gate * Prevent endpoint from closing if possible. 13900Sstevel@tonic-gate * Return B_TRUE on success, B_FALSE on failure. 13910Sstevel@tonic-gate */ 13920Sstevel@tonic-gate static boolean_t 13930Sstevel@tonic-gate tl_noclose(tl_endpt_t *tep) 13940Sstevel@tonic-gate { 13950Sstevel@tonic-gate boolean_t rc = B_FALSE; 13960Sstevel@tonic-gate 13970Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 13980Sstevel@tonic-gate if (! tep->te_closing) { 13990Sstevel@tonic-gate ASSERT(tep->te_closewait == 0); 14000Sstevel@tonic-gate tep->te_closewait++; 14010Sstevel@tonic-gate rc = B_TRUE; 14020Sstevel@tonic-gate } 14030Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 14040Sstevel@tonic-gate return (rc); 14050Sstevel@tonic-gate } 14060Sstevel@tonic-gate 14070Sstevel@tonic-gate /* 14080Sstevel@tonic-gate * Allow endpoint to close if needed. 14090Sstevel@tonic-gate */ 14100Sstevel@tonic-gate static void 14110Sstevel@tonic-gate tl_closeok(tl_endpt_t *tep) 14120Sstevel@tonic-gate { 14130Sstevel@tonic-gate ASSERT(tep->te_closewait > 0); 14140Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 14150Sstevel@tonic-gate ASSERT(tep->te_closewait == 1); 14160Sstevel@tonic-gate tep->te_closewait--; 14170Sstevel@tonic-gate cv_signal(&tep->te_closecv); 14180Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 14190Sstevel@tonic-gate } 14200Sstevel@tonic-gate 14210Sstevel@tonic-gate /* 14220Sstevel@tonic-gate * STREAMS open entry point. 14230Sstevel@tonic-gate */ 14240Sstevel@tonic-gate /* ARGSUSED */ 14250Sstevel@tonic-gate static int 14260Sstevel@tonic-gate tl_open(queue_t *rq, dev_t *devp, int oflag, int sflag, cred_t *credp) 14270Sstevel@tonic-gate { 14280Sstevel@tonic-gate tl_endpt_t *tep; 14290Sstevel@tonic-gate minor_t minor = getminor(*devp); 14300Sstevel@tonic-gate 14310Sstevel@tonic-gate /* 14320Sstevel@tonic-gate * Driver is called directly. Both CLONEOPEN and MODOPEN 14330Sstevel@tonic-gate * are illegal 14340Sstevel@tonic-gate */ 14350Sstevel@tonic-gate if ((sflag == CLONEOPEN) || (sflag == MODOPEN)) 14360Sstevel@tonic-gate return (ENXIO); 14370Sstevel@tonic-gate 14380Sstevel@tonic-gate if (rq->q_ptr != NULL) 14390Sstevel@tonic-gate return (0); 14400Sstevel@tonic-gate 14410Sstevel@tonic-gate /* Minor number should specify the mode used for the driver. */ 14420Sstevel@tonic-gate if ((minor >= TL_UNUSED)) 14430Sstevel@tonic-gate return (ENXIO); 14440Sstevel@tonic-gate 14450Sstevel@tonic-gate if (oflag & SO_SOCKSTR) { 14460Sstevel@tonic-gate minor |= TL_SOCKET; 14470Sstevel@tonic-gate } 14480Sstevel@tonic-gate 14490Sstevel@tonic-gate tep = kmem_cache_alloc(tl_cache, KM_SLEEP); 14500Sstevel@tonic-gate tep->te_refcnt = 1; 14510Sstevel@tonic-gate tep->te_cpid = curproc->p_pid; 14520Sstevel@tonic-gate rq->q_ptr = WR(rq)->q_ptr = tep; 14530Sstevel@tonic-gate tep->te_state = TS_UNBND; 14540Sstevel@tonic-gate tep->te_credp = credp; 14550Sstevel@tonic-gate crhold(credp); 14560Sstevel@tonic-gate tep->te_zoneid = getzoneid(); 14570Sstevel@tonic-gate 14580Sstevel@tonic-gate tep->te_flag = minor & TL_MINOR_MASK; 14590Sstevel@tonic-gate tep->te_transport = &tl_transports[minor]; 14600Sstevel@tonic-gate 14610Sstevel@tonic-gate /* Allocate a unique minor number for this instance. */ 14620Sstevel@tonic-gate tep->te_minor = (minor_t)id_alloc(tl_minors); 14630Sstevel@tonic-gate 14640Sstevel@tonic-gate /* Reserve hash handle for bind(). */ 14650Sstevel@tonic-gate (void) mod_hash_reserve(tep->te_addrhash, &tep->te_hash_hndl); 14660Sstevel@tonic-gate 14670Sstevel@tonic-gate /* Transport-specific initialization */ 14680Sstevel@tonic-gate if (IS_COTS(tep)) { 14690Sstevel@tonic-gate /* Use private serializer */ 14700Sstevel@tonic-gate tep->te_ser = tl_serializer_alloc(KM_SLEEP); 14710Sstevel@tonic-gate 14720Sstevel@tonic-gate /* Create list for pending connections */ 14730Sstevel@tonic-gate list_create(&tep->te_iconp, sizeof (tl_icon_t), 14740Sstevel@tonic-gate offsetof(tl_icon_t, ti_node)); 14750Sstevel@tonic-gate tep->te_qlen = 0; 14760Sstevel@tonic-gate tep->te_nicon = 0; 14770Sstevel@tonic-gate tep->te_oconp = NULL; 14780Sstevel@tonic-gate tep->te_conp = NULL; 14790Sstevel@tonic-gate } else { 14800Sstevel@tonic-gate /* Use shared serializer */ 14810Sstevel@tonic-gate tep->te_ser = tep->te_transport->tr_serializer; 14820Sstevel@tonic-gate bzero(&tep->te_flows, sizeof (list_node_t)); 14830Sstevel@tonic-gate /* Create list for flow control */ 14840Sstevel@tonic-gate list_create(&tep->te_flowlist, sizeof (tl_endpt_t), 14850Sstevel@tonic-gate offsetof(tl_endpt_t, te_flows)); 14860Sstevel@tonic-gate tep->te_flowq = NULL; 14870Sstevel@tonic-gate tep->te_lastep = NULL; 14880Sstevel@tonic-gate 14890Sstevel@tonic-gate } 14900Sstevel@tonic-gate 14910Sstevel@tonic-gate /* Initialize endpoint address */ 14920Sstevel@tonic-gate if (IS_SOCKET(tep)) { 14930Sstevel@tonic-gate /* Socket-specific address handling. */ 14940Sstevel@tonic-gate tep->te_alen = TL_SOUX_ADDRLEN; 14950Sstevel@tonic-gate tep->te_abuf = &tep->te_uxaddr; 14960Sstevel@tonic-gate tep->te_vp = (void *)(uintptr_t)tep->te_minor; 14970Sstevel@tonic-gate tep->te_magic = SOU_MAGIC_IMPLICIT; 14980Sstevel@tonic-gate } else { 14990Sstevel@tonic-gate tep->te_alen = -1; 15000Sstevel@tonic-gate tep->te_abuf = NULL; 15010Sstevel@tonic-gate } 15020Sstevel@tonic-gate 15030Sstevel@tonic-gate /* clone the driver */ 15040Sstevel@tonic-gate *devp = makedevice(getmajor(*devp), tep->te_minor); 15050Sstevel@tonic-gate 15060Sstevel@tonic-gate tep->te_rq = rq; 15070Sstevel@tonic-gate tep->te_wq = WR(rq); 15080Sstevel@tonic-gate 15090Sstevel@tonic-gate #ifdef _ILP32 15100Sstevel@tonic-gate if (IS_SOCKET(tep)) 15110Sstevel@tonic-gate tep->te_acceptor_id = tep->te_minor; 15120Sstevel@tonic-gate else 15130Sstevel@tonic-gate tep->te_acceptor_id = (t_uscalar_t)rq; 15140Sstevel@tonic-gate #else 15150Sstevel@tonic-gate tep->te_acceptor_id = tep->te_minor; 15160Sstevel@tonic-gate #endif /* _ILP32 */ 15170Sstevel@tonic-gate 15180Sstevel@tonic-gate 15190Sstevel@tonic-gate qprocson(rq); 15200Sstevel@tonic-gate 15210Sstevel@tonic-gate /* 15220Sstevel@tonic-gate * Insert acceptor ID in the hash. The AI hash always sleeps on 15230Sstevel@tonic-gate * insertion so insertion can't fail. 15240Sstevel@tonic-gate */ 15250Sstevel@tonic-gate (void) mod_hash_insert(tep->te_transport->tr_ai_hash, 15260Sstevel@tonic-gate (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id, 15270Sstevel@tonic-gate (mod_hash_val_t)tep); 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate return (0); 15300Sstevel@tonic-gate } 15310Sstevel@tonic-gate 15320Sstevel@tonic-gate /* ARGSUSED1 */ 15330Sstevel@tonic-gate static int 15340Sstevel@tonic-gate tl_close(queue_t *rq, int flag, cred_t *credp) 15350Sstevel@tonic-gate { 15360Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr; 15370Sstevel@tonic-gate tl_endpt_t *elp = NULL; 15380Sstevel@tonic-gate queue_t *wq = tep->te_wq; 15390Sstevel@tonic-gate int rc; 15400Sstevel@tonic-gate 15410Sstevel@tonic-gate ASSERT(wq == WR(rq)); 15420Sstevel@tonic-gate 15430Sstevel@tonic-gate /* 15440Sstevel@tonic-gate * Remove the endpoint from acceptor hash. 15450Sstevel@tonic-gate */ 15460Sstevel@tonic-gate rc = mod_hash_remove(tep->te_transport->tr_ai_hash, 15470Sstevel@tonic-gate (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id, 15480Sstevel@tonic-gate (mod_hash_val_t *)&elp); 15490Sstevel@tonic-gate ASSERT(rc == 0 && tep == elp); 15500Sstevel@tonic-gate if ((rc != 0) || (tep != elp)) { 15510Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 15525240Snordmark SL_TRACE|SL_ERROR, 15535240Snordmark "tl_close:inconsistency in AI hash")); 15540Sstevel@tonic-gate } 15550Sstevel@tonic-gate 15560Sstevel@tonic-gate /* 15570Sstevel@tonic-gate * Wait till close is safe, then mark endpoint as closing. 15580Sstevel@tonic-gate */ 15590Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 15600Sstevel@tonic-gate while (tep->te_closewait) 15610Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock); 15620Sstevel@tonic-gate tep->te_closing = B_TRUE; 15630Sstevel@tonic-gate /* 15640Sstevel@tonic-gate * Will wait for the serializer part of the close to finish, so set 15650Sstevel@tonic-gate * te_closewait now. 15660Sstevel@tonic-gate */ 15670Sstevel@tonic-gate tep->te_closewait = 1; 15680Sstevel@tonic-gate tep->te_nowsrv = B_FALSE; 15690Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 15700Sstevel@tonic-gate 15710Sstevel@tonic-gate /* 15720Sstevel@tonic-gate * tl_close_ser doesn't drop reference, so no need to tl_refhold. 15730Sstevel@tonic-gate * It is safe because close will wait for tl_close_ser to finish. 15740Sstevel@tonic-gate */ 15750Sstevel@tonic-gate tl_serializer_enter(tep, tl_close_ser, &tep->te_closemp); 15760Sstevel@tonic-gate 15770Sstevel@tonic-gate /* 15780Sstevel@tonic-gate * Wait for the first phase of close to complete before qprocsoff(). 15790Sstevel@tonic-gate */ 15800Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 15810Sstevel@tonic-gate while (tep->te_closewait) 15820Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock); 15830Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 15840Sstevel@tonic-gate 15850Sstevel@tonic-gate qprocsoff(rq); 15860Sstevel@tonic-gate 15870Sstevel@tonic-gate if (tep->te_bufcid) { 15880Sstevel@tonic-gate qunbufcall(rq, tep->te_bufcid); 15890Sstevel@tonic-gate tep->te_bufcid = 0; 15900Sstevel@tonic-gate } 15910Sstevel@tonic-gate if (tep->te_timoutid) { 15920Sstevel@tonic-gate (void) quntimeout(rq, tep->te_timoutid); 15930Sstevel@tonic-gate tep->te_timoutid = 0; 15940Sstevel@tonic-gate } 15950Sstevel@tonic-gate 15960Sstevel@tonic-gate /* 15970Sstevel@tonic-gate * Finish close behind serializer. 15980Sstevel@tonic-gate * 15990Sstevel@tonic-gate * For a CLTS endpoint increase a refcount and continue close processing 16000Sstevel@tonic-gate * with serializer protection. This processing may happen asynchronously 16010Sstevel@tonic-gate * with the completion of tl_close(). 16020Sstevel@tonic-gate * 16030Sstevel@tonic-gate * Fot a COTS endpoint wait before destroying tep since the serializer 16040Sstevel@tonic-gate * may go away together with tep and we need to destroy serializer 16050Sstevel@tonic-gate * outside of serializer context. 16060Sstevel@tonic-gate */ 16070Sstevel@tonic-gate ASSERT(tep->te_closewait == 0); 16080Sstevel@tonic-gate if (IS_COTS(tep)) 16090Sstevel@tonic-gate tep->te_closewait = 1; 16100Sstevel@tonic-gate else 16110Sstevel@tonic-gate tl_refhold(tep); 16120Sstevel@tonic-gate 16130Sstevel@tonic-gate tl_serializer_enter(tep, tl_close_finish_ser, &tep->te_closemp); 16140Sstevel@tonic-gate 16150Sstevel@tonic-gate /* 16160Sstevel@tonic-gate * For connection-oriented transports wait for all serializer activity 16170Sstevel@tonic-gate * to settle down. 16180Sstevel@tonic-gate */ 16190Sstevel@tonic-gate if (IS_COTS(tep)) { 16200Sstevel@tonic-gate mutex_enter(&tep->te_closelock); 16210Sstevel@tonic-gate while (tep->te_closewait) 16220Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock); 16230Sstevel@tonic-gate mutex_exit(&tep->te_closelock); 16240Sstevel@tonic-gate } 16250Sstevel@tonic-gate 16260Sstevel@tonic-gate crfree(tep->te_credp); 16270Sstevel@tonic-gate tep->te_credp = NULL; 16280Sstevel@tonic-gate tep->te_wq = NULL; 16290Sstevel@tonic-gate tl_refrele(tep); 16300Sstevel@tonic-gate /* 16310Sstevel@tonic-gate * tep is likely to be destroyed now, so can't reference it any more. 16320Sstevel@tonic-gate */ 16330Sstevel@tonic-gate 16340Sstevel@tonic-gate rq->q_ptr = wq->q_ptr = NULL; 16350Sstevel@tonic-gate return (0); 16360Sstevel@tonic-gate } 16370Sstevel@tonic-gate 16380Sstevel@tonic-gate /* 16390Sstevel@tonic-gate * First phase of close processing done behind the serializer. 16400Sstevel@tonic-gate * 16410Sstevel@tonic-gate * Do not drop the reference in the end - tl_close() wants this reference to 16420Sstevel@tonic-gate * stay. 16430Sstevel@tonic-gate */ 16440Sstevel@tonic-gate /* ARGSUSED0 */ 16450Sstevel@tonic-gate static void 16460Sstevel@tonic-gate tl_close_ser(mblk_t *mp, tl_endpt_t *tep) 16470Sstevel@tonic-gate { 16480Sstevel@tonic-gate ASSERT(tep->te_closing); 16490Sstevel@tonic-gate ASSERT(tep->te_closewait == 1); 16500Sstevel@tonic-gate ASSERT(!(tep->te_flag & TL_CLOSE_SER)); 16510Sstevel@tonic-gate 16520Sstevel@tonic-gate tep->te_flag |= TL_CLOSE_SER; 16530Sstevel@tonic-gate 16540Sstevel@tonic-gate /* 16550Sstevel@tonic-gate * Drain out all messages on queue except for TL_TICOTS where the 16560Sstevel@tonic-gate * abortive release semantics permit discarding of data on close 16570Sstevel@tonic-gate */ 16580Sstevel@tonic-gate if (tep->te_wq->q_first && (IS_CLTS(tep) || IS_COTSORD(tep))) { 16590Sstevel@tonic-gate tl_wsrv_ser(NULL, tep); 16600Sstevel@tonic-gate } 16610Sstevel@tonic-gate 16620Sstevel@tonic-gate /* Remove address from hash table. */ 16630Sstevel@tonic-gate tl_addr_unbind(tep); 16640Sstevel@tonic-gate /* 16650Sstevel@tonic-gate * qprocsoff() gets confused when q->q_next is not NULL on the write 16660Sstevel@tonic-gate * queue of the driver, so clear these before qprocsoff() is called. 16670Sstevel@tonic-gate * Also clear q_next for the peer since this queue is going away. 16680Sstevel@tonic-gate */ 16690Sstevel@tonic-gate if (IS_COTS(tep) && !IS_SOCKET(tep)) { 16700Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp; 16710Sstevel@tonic-gate 16720Sstevel@tonic-gate tep->te_wq->q_next = NULL; 16730Sstevel@tonic-gate if ((peer_tep != NULL) && !peer_tep->te_closing) 16740Sstevel@tonic-gate peer_tep->te_wq->q_next = NULL; 16750Sstevel@tonic-gate } 16760Sstevel@tonic-gate 16770Sstevel@tonic-gate tep->te_rq = NULL; 16780Sstevel@tonic-gate 16790Sstevel@tonic-gate /* wake up tl_close() */ 16800Sstevel@tonic-gate tl_closeok(tep); 16810Sstevel@tonic-gate tl_serializer_exit(tep); 16820Sstevel@tonic-gate } 16830Sstevel@tonic-gate 16840Sstevel@tonic-gate /* 16850Sstevel@tonic-gate * Second phase of tl_close(). Should wakeup tl_close() for COTS mode and drop 16860Sstevel@tonic-gate * the reference for CLTS. 16870Sstevel@tonic-gate * 16880Sstevel@tonic-gate * Called from serializer. Should drop reference count for CLTS only. 16890Sstevel@tonic-gate */ 16900Sstevel@tonic-gate /* ARGSUSED0 */ 16910Sstevel@tonic-gate static void 16920Sstevel@tonic-gate tl_close_finish_ser(mblk_t *mp, tl_endpt_t *tep) 16930Sstevel@tonic-gate { 16940Sstevel@tonic-gate ASSERT(tep->te_closing); 16950Sstevel@tonic-gate ASSERT(IMPLY(IS_CLTS(tep), tep->te_closewait == 0)); 16960Sstevel@tonic-gate ASSERT(IMPLY(IS_COTS(tep), tep->te_closewait == 1)); 16970Sstevel@tonic-gate 16980Sstevel@tonic-gate tep->te_state = -1; /* Uninitialized */ 16990Sstevel@tonic-gate if (IS_COTS(tep)) { 17000Sstevel@tonic-gate tl_co_unconnect(tep); 17010Sstevel@tonic-gate } else { 17020Sstevel@tonic-gate /* Connectionless specific cleanup */ 17030Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_lastep); 17040Sstevel@tonic-gate /* 17050Sstevel@tonic-gate * Backenable anybody that is flow controlled waiting for 17060Sstevel@tonic-gate * this endpoint. 17070Sstevel@tonic-gate */ 17080Sstevel@tonic-gate tl_cl_backenable(tep); 17090Sstevel@tonic-gate if (tep->te_flowq != NULL) { 17100Sstevel@tonic-gate list_remove(&(tep->te_flowq->te_flowlist), tep); 17110Sstevel@tonic-gate tep->te_flowq = NULL; 17120Sstevel@tonic-gate } 17130Sstevel@tonic-gate } 17140Sstevel@tonic-gate 17150Sstevel@tonic-gate tl_serializer_exit(tep); 17160Sstevel@tonic-gate if (IS_COTS(tep)) 17170Sstevel@tonic-gate tl_closeok(tep); 17180Sstevel@tonic-gate else 17190Sstevel@tonic-gate tl_refrele(tep); 17200Sstevel@tonic-gate } 17210Sstevel@tonic-gate 17220Sstevel@tonic-gate /* 17230Sstevel@tonic-gate * STREAMS write-side put procedure. 17240Sstevel@tonic-gate * Enter serializer for most of the processing. 17250Sstevel@tonic-gate * 17260Sstevel@tonic-gate * The T_CONN_REQ is processed outside of serializer. 17270Sstevel@tonic-gate */ 17280Sstevel@tonic-gate static void 17290Sstevel@tonic-gate tl_wput(queue_t *wq, mblk_t *mp) 17300Sstevel@tonic-gate { 17310Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 17320Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 17330Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 17340Sstevel@tonic-gate tlproc_t *tl_proc = NULL; 17350Sstevel@tonic-gate 17360Sstevel@tonic-gate switch (DB_TYPE(mp)) { 17370Sstevel@tonic-gate case M_DATA: 17380Sstevel@tonic-gate /* Only valid for connection-oriented transports */ 17390Sstevel@tonic-gate if (IS_CLTS(tep)) { 17400Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 17415240Snordmark SL_TRACE|SL_ERROR, 17425240Snordmark "tl_wput:M_DATA invalid for ticlts driver")); 17430Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 1744165Sxy158873 return; 17450Sstevel@tonic-gate } 17460Sstevel@tonic-gate tl_proc = tl_wput_data_ser; 17470Sstevel@tonic-gate break; 17480Sstevel@tonic-gate 17490Sstevel@tonic-gate case M_IOCTL: 17500Sstevel@tonic-gate switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 17510Sstevel@tonic-gate case TL_IOC_CREDOPT: 17520Sstevel@tonic-gate /* FALLTHROUGH */ 17530Sstevel@tonic-gate case TL_IOC_UCREDOPT: 17540Sstevel@tonic-gate /* 17550Sstevel@tonic-gate * Serialize endpoint state change. 17560Sstevel@tonic-gate */ 17570Sstevel@tonic-gate tl_proc = tl_do_ioctl_ser; 17580Sstevel@tonic-gate break; 17590Sstevel@tonic-gate 17600Sstevel@tonic-gate default: 17610Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 17620Sstevel@tonic-gate return; 17630Sstevel@tonic-gate } 17640Sstevel@tonic-gate break; 17650Sstevel@tonic-gate 17660Sstevel@tonic-gate case M_FLUSH: 17670Sstevel@tonic-gate /* 17680Sstevel@tonic-gate * do canonical M_FLUSH processing 17690Sstevel@tonic-gate */ 17700Sstevel@tonic-gate if (*mp->b_rptr & FLUSHW) { 17710Sstevel@tonic-gate flushq(wq, FLUSHALL); 17720Sstevel@tonic-gate *mp->b_rptr &= ~FLUSHW; 17730Sstevel@tonic-gate } 17740Sstevel@tonic-gate if (*mp->b_rptr & FLUSHR) { 17750Sstevel@tonic-gate flushq(RD(wq), FLUSHALL); 17760Sstevel@tonic-gate qreply(wq, mp); 17770Sstevel@tonic-gate } else { 17780Sstevel@tonic-gate freemsg(mp); 17790Sstevel@tonic-gate } 17800Sstevel@tonic-gate return; 17810Sstevel@tonic-gate 17820Sstevel@tonic-gate case M_PROTO: 17830Sstevel@tonic-gate if (msz < sizeof (prim->type)) { 17840Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 17855240Snordmark SL_TRACE|SL_ERROR, 17865240Snordmark "tl_wput:M_PROTO data too short")); 17870Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 17880Sstevel@tonic-gate return; 17890Sstevel@tonic-gate } 17900Sstevel@tonic-gate switch (prim->type) { 17910Sstevel@tonic-gate case T_OPTMGMT_REQ: 17920Sstevel@tonic-gate case T_SVR4_OPTMGMT_REQ: 17930Sstevel@tonic-gate /* 17940Sstevel@tonic-gate * Process TPI option management requests immediately 17950Sstevel@tonic-gate * in put procedure regardless of in-order processing 17960Sstevel@tonic-gate * of already queued messages. 17970Sstevel@tonic-gate * (Note: This driver supports AF_UNIX socket 17980Sstevel@tonic-gate * implementation. Unless we implement this processing, 17990Sstevel@tonic-gate * setsockopt() on socket endpoint will block on flow 18000Sstevel@tonic-gate * controlled endpoints which it should not. That is 18010Sstevel@tonic-gate * required for successful execution of VSU socket tests 18020Sstevel@tonic-gate * and is consistent with BSD socket behavior). 18030Sstevel@tonic-gate */ 18040Sstevel@tonic-gate tl_optmgmt(wq, mp); 18050Sstevel@tonic-gate return; 18060Sstevel@tonic-gate case O_T_BIND_REQ: 18070Sstevel@tonic-gate case T_BIND_REQ: 18080Sstevel@tonic-gate tl_proc = tl_bind_ser; 18090Sstevel@tonic-gate break; 18100Sstevel@tonic-gate case T_CONN_REQ: 18110Sstevel@tonic-gate if (IS_CLTS(tep)) { 18120Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 18130Sstevel@tonic-gate return; 18140Sstevel@tonic-gate } 18150Sstevel@tonic-gate tl_conn_req(wq, mp); 18160Sstevel@tonic-gate return; 18170Sstevel@tonic-gate case T_DATA_REQ: 18180Sstevel@tonic-gate case T_OPTDATA_REQ: 18190Sstevel@tonic-gate case T_EXDATA_REQ: 18200Sstevel@tonic-gate case T_ORDREL_REQ: 18210Sstevel@tonic-gate tl_proc = tl_putq_ser; 18220Sstevel@tonic-gate break; 18230Sstevel@tonic-gate case T_UNITDATA_REQ: 18240Sstevel@tonic-gate if (IS_COTS(tep) || 18250Sstevel@tonic-gate (msz < sizeof (struct T_unitdata_req))) { 18260Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 18270Sstevel@tonic-gate return; 18280Sstevel@tonic-gate } 18290Sstevel@tonic-gate if ((tep->te_state == TS_IDLE) && !wq->q_first) { 18300Sstevel@tonic-gate tl_proc = tl_unitdata_ser; 18310Sstevel@tonic-gate } else { 18320Sstevel@tonic-gate tl_proc = tl_putq_ser; 18330Sstevel@tonic-gate } 18340Sstevel@tonic-gate break; 18350Sstevel@tonic-gate default: 18360Sstevel@tonic-gate /* 18370Sstevel@tonic-gate * process in service procedure if message already 18380Sstevel@tonic-gate * queued (maintain in-order processing) 18390Sstevel@tonic-gate */ 18400Sstevel@tonic-gate if (wq->q_first != NULL) { 18410Sstevel@tonic-gate tl_proc = tl_putq_ser; 18420Sstevel@tonic-gate } else { 18430Sstevel@tonic-gate tl_proc = tl_wput_ser; 18440Sstevel@tonic-gate } 18450Sstevel@tonic-gate break; 18460Sstevel@tonic-gate } 18470Sstevel@tonic-gate break; 18480Sstevel@tonic-gate 18490Sstevel@tonic-gate case M_PCPROTO: 18500Sstevel@tonic-gate /* 18510Sstevel@tonic-gate * Check that the message has enough data to figure out TPI 18520Sstevel@tonic-gate * primitive. 18530Sstevel@tonic-gate */ 18540Sstevel@tonic-gate if (msz < sizeof (prim->type)) { 18550Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 18565240Snordmark SL_TRACE|SL_ERROR, 18575240Snordmark "tl_wput:M_PCROTO data too short")); 18580Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 18590Sstevel@tonic-gate return; 18600Sstevel@tonic-gate } 18610Sstevel@tonic-gate switch (prim->type) { 18620Sstevel@tonic-gate case T_CAPABILITY_REQ: 18630Sstevel@tonic-gate tl_capability_req(mp, tep); 18640Sstevel@tonic-gate return; 18650Sstevel@tonic-gate case T_INFO_REQ: 18660Sstevel@tonic-gate tl_proc = tl_info_req_ser; 18670Sstevel@tonic-gate break; 18680Sstevel@tonic-gate default: 18690Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 18705240Snordmark SL_TRACE|SL_ERROR, 18715240Snordmark "tl_wput:unknown TPI msg primitive")); 18720Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 18730Sstevel@tonic-gate return; 18740Sstevel@tonic-gate } 18750Sstevel@tonic-gate break; 18760Sstevel@tonic-gate default: 18770Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 18785240Snordmark "tl_wput:default:unexpected Streams message")); 18790Sstevel@tonic-gate freemsg(mp); 18800Sstevel@tonic-gate return; 18810Sstevel@tonic-gate } 18820Sstevel@tonic-gate 18830Sstevel@tonic-gate /* 18840Sstevel@tonic-gate * Continue processing via serializer. 18850Sstevel@tonic-gate */ 18860Sstevel@tonic-gate ASSERT(tl_proc != NULL); 18870Sstevel@tonic-gate tl_refhold(tep); 18880Sstevel@tonic-gate tl_serializer_enter(tep, tl_proc, mp); 18890Sstevel@tonic-gate } 18900Sstevel@tonic-gate 18910Sstevel@tonic-gate /* 18920Sstevel@tonic-gate * Place message on the queue while preserving order. 18930Sstevel@tonic-gate */ 18940Sstevel@tonic-gate static void 18950Sstevel@tonic-gate tl_putq_ser(mblk_t *mp, tl_endpt_t *tep) 18960Sstevel@tonic-gate { 18970Sstevel@tonic-gate if (tep->te_closing) { 18980Sstevel@tonic-gate tl_wput_ser(mp, tep); 18990Sstevel@tonic-gate } else { 19000Sstevel@tonic-gate TL_PUTQ(tep, mp); 19010Sstevel@tonic-gate tl_serializer_exit(tep); 19020Sstevel@tonic-gate tl_refrele(tep); 19030Sstevel@tonic-gate } 19040Sstevel@tonic-gate 19050Sstevel@tonic-gate } 19060Sstevel@tonic-gate 19070Sstevel@tonic-gate static void 19080Sstevel@tonic-gate tl_wput_common_ser(mblk_t *mp, tl_endpt_t *tep) 19090Sstevel@tonic-gate { 19100Sstevel@tonic-gate ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO)); 19110Sstevel@tonic-gate 19120Sstevel@tonic-gate switch (DB_TYPE(mp)) { 19130Sstevel@tonic-gate case M_DATA: 19140Sstevel@tonic-gate tl_data(mp, tep); 19150Sstevel@tonic-gate break; 19160Sstevel@tonic-gate case M_PROTO: 19170Sstevel@tonic-gate tl_do_proto(mp, tep); 19180Sstevel@tonic-gate break; 19190Sstevel@tonic-gate default: 19200Sstevel@tonic-gate freemsg(mp); 19210Sstevel@tonic-gate break; 19220Sstevel@tonic-gate } 19230Sstevel@tonic-gate } 19240Sstevel@tonic-gate 19250Sstevel@tonic-gate /* 19260Sstevel@tonic-gate * Write side put procedure called from serializer. 19270Sstevel@tonic-gate */ 19280Sstevel@tonic-gate static void 19290Sstevel@tonic-gate tl_wput_ser(mblk_t *mp, tl_endpt_t *tep) 19300Sstevel@tonic-gate { 19310Sstevel@tonic-gate tl_wput_common_ser(mp, tep); 19320Sstevel@tonic-gate tl_serializer_exit(tep); 19330Sstevel@tonic-gate tl_refrele(tep); 19340Sstevel@tonic-gate } 19350Sstevel@tonic-gate 19360Sstevel@tonic-gate /* 19370Sstevel@tonic-gate * M_DATA processing. Called from serializer. 19380Sstevel@tonic-gate */ 19390Sstevel@tonic-gate static void 19400Sstevel@tonic-gate tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep) 19410Sstevel@tonic-gate { 19420Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp; 19430Sstevel@tonic-gate queue_t *peer_rq; 19440Sstevel@tonic-gate 19450Sstevel@tonic-gate ASSERT(DB_TYPE(mp) == M_DATA); 19460Sstevel@tonic-gate ASSERT(IS_COTS(tep)); 19470Sstevel@tonic-gate 19480Sstevel@tonic-gate ASSERT(IMPLY(peer_tep, tep->te_serializer == peer_tep->te_serializer)); 19490Sstevel@tonic-gate 19500Sstevel@tonic-gate /* 19510Sstevel@tonic-gate * fastpath for data. Ignore flow control if tep is closing. 19520Sstevel@tonic-gate */ 19530Sstevel@tonic-gate if ((peer_tep != NULL) && 19540Sstevel@tonic-gate !peer_tep->te_closing && 19550Sstevel@tonic-gate ((tep->te_state == TS_DATA_XFER) || 19565240Snordmark (tep->te_state == TS_WREQ_ORDREL)) && 19570Sstevel@tonic-gate (tep->te_wq != NULL) && 19580Sstevel@tonic-gate (tep->te_wq->q_first == NULL) && 19590Sstevel@tonic-gate ((peer_tep->te_state == TS_DATA_XFER) || 19605240Snordmark (peer_tep->te_state == TS_WREQ_ORDREL)) && 19610Sstevel@tonic-gate ((peer_rq = peer_tep->te_rq) != NULL) && 19620Sstevel@tonic-gate (canputnext(peer_rq) || tep->te_closing)) { 19630Sstevel@tonic-gate putnext(peer_rq, mp); 19640Sstevel@tonic-gate } else if (tep->te_closing) { 19650Sstevel@tonic-gate /* 19660Sstevel@tonic-gate * It is possible that by the time we got here tep started to 19670Sstevel@tonic-gate * close. If the write queue is not empty, and the state is 19680Sstevel@tonic-gate * TS_DATA_XFER the data should be delivered in order, so we 19690Sstevel@tonic-gate * call putq() instead of freeing the data. 19700Sstevel@tonic-gate */ 19710Sstevel@tonic-gate if ((tep->te_wq != NULL) && 19720Sstevel@tonic-gate ((tep->te_state == TS_DATA_XFER) || 19735240Snordmark (tep->te_state == TS_WREQ_ORDREL))) { 19740Sstevel@tonic-gate TL_PUTQ(tep, mp); 19750Sstevel@tonic-gate } else { 19760Sstevel@tonic-gate freemsg(mp); 19770Sstevel@tonic-gate } 19780Sstevel@tonic-gate } else { 19790Sstevel@tonic-gate TL_PUTQ(tep, mp); 19800Sstevel@tonic-gate } 19810Sstevel@tonic-gate 19820Sstevel@tonic-gate tl_serializer_exit(tep); 19830Sstevel@tonic-gate tl_refrele(tep); 19840Sstevel@tonic-gate } 19850Sstevel@tonic-gate 19860Sstevel@tonic-gate /* 19870Sstevel@tonic-gate * Write side service routine. 19880Sstevel@tonic-gate * 19890Sstevel@tonic-gate * All actual processing happens within serializer which is entered 19900Sstevel@tonic-gate * synchronously. It is possible that by the time tl_wsrv() wakes up, some new 19910Sstevel@tonic-gate * messages that need processing may have arrived, so tl_wsrv repeats until 19920Sstevel@tonic-gate * queue is empty or te_nowsrv is set. 19930Sstevel@tonic-gate */ 19940Sstevel@tonic-gate static void 19950Sstevel@tonic-gate tl_wsrv(queue_t *wq) 19960Sstevel@tonic-gate { 19970Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 19980Sstevel@tonic-gate 19990Sstevel@tonic-gate while ((wq->q_first != NULL) && !tep->te_nowsrv) { 20000Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 20010Sstevel@tonic-gate ASSERT(tep->te_wsrv_active == B_FALSE); 20020Sstevel@tonic-gate tep->te_wsrv_active = B_TRUE; 20030Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 20040Sstevel@tonic-gate 20050Sstevel@tonic-gate tl_serializer_enter(tep, tl_wsrv_ser, &tep->te_wsrvmp); 20060Sstevel@tonic-gate 20070Sstevel@tonic-gate /* 20080Sstevel@tonic-gate * Wait for serializer job to complete. 20090Sstevel@tonic-gate */ 20100Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 20110Sstevel@tonic-gate while (tep->te_wsrv_active) { 20120Sstevel@tonic-gate cv_wait(&tep->te_srv_cv, &tep->te_srv_lock); 20130Sstevel@tonic-gate } 20140Sstevel@tonic-gate cv_signal(&tep->te_srv_cv); 20150Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 20160Sstevel@tonic-gate } 20170Sstevel@tonic-gate } 20180Sstevel@tonic-gate 20190Sstevel@tonic-gate /* 20200Sstevel@tonic-gate * Serialized write side processing of the STREAMS queue. 20210Sstevel@tonic-gate * May be called either from tl_wsrv() or from tl_close() in which case ser_mp 20220Sstevel@tonic-gate * is NULL. 20230Sstevel@tonic-gate */ 20240Sstevel@tonic-gate static void 20250Sstevel@tonic-gate tl_wsrv_ser(mblk_t *ser_mp, tl_endpt_t *tep) 20260Sstevel@tonic-gate { 20270Sstevel@tonic-gate mblk_t *mp; 20280Sstevel@tonic-gate queue_t *wq = tep->te_wq; 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate ASSERT(wq != NULL); 20310Sstevel@tonic-gate while (!tep->te_nowsrv && (mp = getq(wq)) != NULL) { 20320Sstevel@tonic-gate tl_wput_common_ser(mp, tep); 20330Sstevel@tonic-gate } 20340Sstevel@tonic-gate 20350Sstevel@tonic-gate /* 20360Sstevel@tonic-gate * Wakeup service routine unless called from close. 20370Sstevel@tonic-gate * If ser_mp is specified, the caller is tl_wsrv(). 20380Sstevel@tonic-gate * Otherwise, the caller is tl_close_ser(). Since tl_close_ser() doesn't 20390Sstevel@tonic-gate * call tl_serializer_enter() before calling tl_wsrv_ser(), there should 20400Sstevel@tonic-gate * be no matching tl_serializer_exit() in this case. 20410Sstevel@tonic-gate * Also, there is no need to wakeup anyone since tl_close_ser() is not 20420Sstevel@tonic-gate * waiting on te_srv_cv. 20430Sstevel@tonic-gate */ 20440Sstevel@tonic-gate if (ser_mp != NULL) { 20450Sstevel@tonic-gate /* 20460Sstevel@tonic-gate * We are called from tl_wsrv. 20470Sstevel@tonic-gate */ 20480Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 20490Sstevel@tonic-gate ASSERT(tep->te_wsrv_active); 20500Sstevel@tonic-gate tep->te_wsrv_active = B_FALSE; 20510Sstevel@tonic-gate cv_signal(&tep->te_srv_cv); 20520Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 20530Sstevel@tonic-gate tl_serializer_exit(tep); 20540Sstevel@tonic-gate } 20550Sstevel@tonic-gate } 20560Sstevel@tonic-gate 20570Sstevel@tonic-gate /* 20580Sstevel@tonic-gate * Called when the stream is backenabled. Enter serializer and qenable everyone 20590Sstevel@tonic-gate * flow controlled by tep. 20600Sstevel@tonic-gate * 20610Sstevel@tonic-gate * NOTE: The service routine should enter serializer synchronously. Otherwise it 20620Sstevel@tonic-gate * is possible that two instances of tl_rsrv will be running reusing the same 20630Sstevel@tonic-gate * rsrv mblk. 20640Sstevel@tonic-gate */ 20650Sstevel@tonic-gate static void 20660Sstevel@tonic-gate tl_rsrv(queue_t *rq) 20670Sstevel@tonic-gate { 20680Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr; 20690Sstevel@tonic-gate 20700Sstevel@tonic-gate ASSERT(rq->q_first == NULL); 20710Sstevel@tonic-gate ASSERT(tep->te_rsrv_active == 0); 20720Sstevel@tonic-gate 20730Sstevel@tonic-gate tep->te_rsrv_active = B_TRUE; 20740Sstevel@tonic-gate tl_serializer_enter(tep, tl_rsrv_ser, &tep->te_rsrvmp); 20750Sstevel@tonic-gate /* 20760Sstevel@tonic-gate * Wait for serializer job to complete. 20770Sstevel@tonic-gate */ 20780Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 20790Sstevel@tonic-gate while (tep->te_rsrv_active) { 20800Sstevel@tonic-gate cv_wait(&tep->te_srv_cv, &tep->te_srv_lock); 20810Sstevel@tonic-gate } 20820Sstevel@tonic-gate cv_signal(&tep->te_srv_cv); 20830Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 20840Sstevel@tonic-gate } 20850Sstevel@tonic-gate 20860Sstevel@tonic-gate /* ARGSUSED */ 20870Sstevel@tonic-gate static void 20880Sstevel@tonic-gate tl_rsrv_ser(mblk_t *mp, tl_endpt_t *tep) 20890Sstevel@tonic-gate { 20900Sstevel@tonic-gate tl_endpt_t *peer_tep; 20910Sstevel@tonic-gate 20920Sstevel@tonic-gate if (IS_CLTS(tep) && tep->te_state == TS_IDLE) { 20930Sstevel@tonic-gate tl_cl_backenable(tep); 20940Sstevel@tonic-gate } else if ( 20955240Snordmark IS_COTS(tep) && 20965240Snordmark ((peer_tep = tep->te_conp) != NULL) && 20975240Snordmark !peer_tep->te_closing && 20985240Snordmark ((tep->te_state == TS_DATA_XFER) || 20995240Snordmark (tep->te_state == TS_WIND_ORDREL)|| 21005240Snordmark (tep->te_state == TS_WREQ_ORDREL))) { 21010Sstevel@tonic-gate TL_QENABLE(peer_tep); 21020Sstevel@tonic-gate } 21030Sstevel@tonic-gate 21040Sstevel@tonic-gate /* 21050Sstevel@tonic-gate * Wakeup read side service routine. 21060Sstevel@tonic-gate */ 21070Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock); 21080Sstevel@tonic-gate ASSERT(tep->te_rsrv_active); 21090Sstevel@tonic-gate tep->te_rsrv_active = B_FALSE; 21100Sstevel@tonic-gate cv_signal(&tep->te_srv_cv); 21110Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock); 21120Sstevel@tonic-gate tl_serializer_exit(tep); 21130Sstevel@tonic-gate } 21140Sstevel@tonic-gate 21150Sstevel@tonic-gate /* 21160Sstevel@tonic-gate * process M_PROTO messages. Always called from serializer. 21170Sstevel@tonic-gate */ 21180Sstevel@tonic-gate static void 21190Sstevel@tonic-gate tl_do_proto(mblk_t *mp, tl_endpt_t *tep) 21200Sstevel@tonic-gate { 21210Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 21220Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 21230Sstevel@tonic-gate 21240Sstevel@tonic-gate /* Message size was validated by tl_wput(). */ 21250Sstevel@tonic-gate ASSERT(msz >= sizeof (prim->type)); 21260Sstevel@tonic-gate 21270Sstevel@tonic-gate switch (prim->type) { 21280Sstevel@tonic-gate case T_UNBIND_REQ: 21290Sstevel@tonic-gate tl_unbind(mp, tep); 21300Sstevel@tonic-gate break; 21310Sstevel@tonic-gate 21320Sstevel@tonic-gate case T_ADDR_REQ: 21330Sstevel@tonic-gate tl_addr_req(mp, tep); 21340Sstevel@tonic-gate break; 21350Sstevel@tonic-gate 21360Sstevel@tonic-gate case O_T_CONN_RES: 21370Sstevel@tonic-gate case T_CONN_RES: 21380Sstevel@tonic-gate if (IS_CLTS(tep)) { 21390Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21400Sstevel@tonic-gate break; 21410Sstevel@tonic-gate } 21420Sstevel@tonic-gate tl_conn_res(mp, tep); 21430Sstevel@tonic-gate break; 21440Sstevel@tonic-gate 21450Sstevel@tonic-gate case T_DISCON_REQ: 21460Sstevel@tonic-gate if (IS_CLTS(tep)) { 21470Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21480Sstevel@tonic-gate break; 21490Sstevel@tonic-gate } 21500Sstevel@tonic-gate tl_discon_req(mp, tep); 21510Sstevel@tonic-gate break; 21520Sstevel@tonic-gate 21530Sstevel@tonic-gate case T_DATA_REQ: 21540Sstevel@tonic-gate if (IS_CLTS(tep)) { 21550Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21560Sstevel@tonic-gate break; 21570Sstevel@tonic-gate } 21580Sstevel@tonic-gate tl_data(mp, tep); 21590Sstevel@tonic-gate break; 21600Sstevel@tonic-gate 21610Sstevel@tonic-gate case T_OPTDATA_REQ: 21620Sstevel@tonic-gate if (IS_CLTS(tep)) { 21630Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21640Sstevel@tonic-gate break; 21650Sstevel@tonic-gate } 21660Sstevel@tonic-gate tl_data(mp, tep); 21670Sstevel@tonic-gate break; 21680Sstevel@tonic-gate 21690Sstevel@tonic-gate case T_EXDATA_REQ: 21700Sstevel@tonic-gate if (IS_CLTS(tep)) { 21710Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21720Sstevel@tonic-gate break; 21730Sstevel@tonic-gate } 21740Sstevel@tonic-gate tl_exdata(mp, tep); 21750Sstevel@tonic-gate break; 21760Sstevel@tonic-gate 21770Sstevel@tonic-gate case T_ORDREL_REQ: 21780Sstevel@tonic-gate if (! IS_COTSORD(tep)) { 21790Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21800Sstevel@tonic-gate break; 21810Sstevel@tonic-gate } 21820Sstevel@tonic-gate tl_ordrel(mp, tep); 21830Sstevel@tonic-gate break; 21840Sstevel@tonic-gate 21850Sstevel@tonic-gate case T_UNITDATA_REQ: 21860Sstevel@tonic-gate if (IS_COTS(tep)) { 21870Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21880Sstevel@tonic-gate break; 21890Sstevel@tonic-gate } 21900Sstevel@tonic-gate tl_unitdata(mp, tep); 21910Sstevel@tonic-gate break; 21920Sstevel@tonic-gate 21930Sstevel@tonic-gate default: 21940Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO); 21950Sstevel@tonic-gate break; 21960Sstevel@tonic-gate } 21970Sstevel@tonic-gate } 21980Sstevel@tonic-gate 21990Sstevel@tonic-gate /* 22000Sstevel@tonic-gate * Process ioctl from serializer. 22010Sstevel@tonic-gate * This is a wrapper around tl_do_ioctl(). 22020Sstevel@tonic-gate */ 22030Sstevel@tonic-gate static void 22040Sstevel@tonic-gate tl_do_ioctl_ser(mblk_t *mp, tl_endpt_t *tep) 22050Sstevel@tonic-gate { 22060Sstevel@tonic-gate if (! tep->te_closing) 22070Sstevel@tonic-gate tl_do_ioctl(mp, tep); 22080Sstevel@tonic-gate else 22090Sstevel@tonic-gate freemsg(mp); 22100Sstevel@tonic-gate 22110Sstevel@tonic-gate tl_serializer_exit(tep); 22120Sstevel@tonic-gate tl_refrele(tep); 22130Sstevel@tonic-gate } 22140Sstevel@tonic-gate 22150Sstevel@tonic-gate static void 22160Sstevel@tonic-gate tl_do_ioctl(mblk_t *mp, tl_endpt_t *tep) 22170Sstevel@tonic-gate { 22180Sstevel@tonic-gate struct iocblk *iocbp = (struct iocblk *)mp->b_rptr; 22190Sstevel@tonic-gate int cmd = iocbp->ioc_cmd; 22200Sstevel@tonic-gate queue_t *wq = tep->te_wq; 22210Sstevel@tonic-gate int error; 22220Sstevel@tonic-gate int thisopt, otheropt; 22230Sstevel@tonic-gate 22240Sstevel@tonic-gate ASSERT((cmd == TL_IOC_CREDOPT) || (cmd == TL_IOC_UCREDOPT)); 22250Sstevel@tonic-gate 22260Sstevel@tonic-gate switch (cmd) { 22270Sstevel@tonic-gate case TL_IOC_CREDOPT: 22280Sstevel@tonic-gate if (cmd == TL_IOC_CREDOPT) { 22290Sstevel@tonic-gate thisopt = TL_SETCRED; 22300Sstevel@tonic-gate otheropt = TL_SETUCRED; 22310Sstevel@tonic-gate } else { 22320Sstevel@tonic-gate /* FALLTHROUGH */ 22330Sstevel@tonic-gate case TL_IOC_UCREDOPT: 22340Sstevel@tonic-gate thisopt = TL_SETUCRED; 22350Sstevel@tonic-gate otheropt = TL_SETCRED; 22360Sstevel@tonic-gate } 22370Sstevel@tonic-gate /* 22380Sstevel@tonic-gate * The credentials passing does not apply to sockets. 22390Sstevel@tonic-gate * Only one of the cred options can be set at a given time. 22400Sstevel@tonic-gate */ 22410Sstevel@tonic-gate if (IS_SOCKET(tep) || (tep->te_flag & otheropt)) { 22420Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 22430Sstevel@tonic-gate return; 22440Sstevel@tonic-gate } 22450Sstevel@tonic-gate 22460Sstevel@tonic-gate /* 22470Sstevel@tonic-gate * Turn on generation of credential options for 22480Sstevel@tonic-gate * T_conn_req, T_conn_con, T_unidata_ind. 22490Sstevel@tonic-gate */ 22500Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint32_t)); 22510Sstevel@tonic-gate if (error != 0) { 22520Sstevel@tonic-gate miocnak(wq, mp, 0, error); 22530Sstevel@tonic-gate return; 22540Sstevel@tonic-gate } 22550Sstevel@tonic-gate if (!IS_P2ALIGNED(mp->b_cont->b_rptr, sizeof (uint32_t))) { 22560Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 22570Sstevel@tonic-gate return; 22580Sstevel@tonic-gate } 22590Sstevel@tonic-gate 22600Sstevel@tonic-gate if (*(uint32_t *)mp->b_cont->b_rptr) 22610Sstevel@tonic-gate tep->te_flag |= thisopt; 22620Sstevel@tonic-gate else 22630Sstevel@tonic-gate tep->te_flag &= ~thisopt; 22640Sstevel@tonic-gate 22650Sstevel@tonic-gate miocack(wq, mp, 0, 0); 22660Sstevel@tonic-gate break; 22670Sstevel@tonic-gate 22680Sstevel@tonic-gate default: 22690Sstevel@tonic-gate /* Should not be here */ 22700Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL); 22710Sstevel@tonic-gate break; 22720Sstevel@tonic-gate } 22730Sstevel@tonic-gate } 22740Sstevel@tonic-gate 22750Sstevel@tonic-gate 22760Sstevel@tonic-gate /* 22770Sstevel@tonic-gate * send T_ERROR_ACK 22780Sstevel@tonic-gate * Note: assumes enough memory or caller passed big enough mp 22790Sstevel@tonic-gate * - no recovery from allocb failures 22800Sstevel@tonic-gate */ 22810Sstevel@tonic-gate 22820Sstevel@tonic-gate static void 22830Sstevel@tonic-gate tl_error_ack(queue_t *wq, mblk_t *mp, t_scalar_t tli_err, 22840Sstevel@tonic-gate t_scalar_t unix_err, t_scalar_t type) 22850Sstevel@tonic-gate { 22860Sstevel@tonic-gate struct T_error_ack *err_ack; 22870Sstevel@tonic-gate mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_error_ack), 22880Sstevel@tonic-gate M_PCPROTO, T_ERROR_ACK); 22890Sstevel@tonic-gate 22900Sstevel@tonic-gate if (ackmp == NULL) { 22910Sstevel@tonic-gate (void) (STRLOG(TL_ID, 0, 1, SL_TRACE|SL_ERROR, 22925240Snordmark "tl_error_ack:out of mblk memory")); 22930Sstevel@tonic-gate tl_merror(wq, NULL, ENOSR); 22940Sstevel@tonic-gate return; 22950Sstevel@tonic-gate } 22960Sstevel@tonic-gate err_ack = (struct T_error_ack *)ackmp->b_rptr; 22970Sstevel@tonic-gate err_ack->ERROR_prim = type; 22980Sstevel@tonic-gate err_ack->TLI_error = tli_err; 22990Sstevel@tonic-gate err_ack->UNIX_error = unix_err; 23000Sstevel@tonic-gate 23010Sstevel@tonic-gate /* 23020Sstevel@tonic-gate * send error ack message 23030Sstevel@tonic-gate */ 23040Sstevel@tonic-gate qreply(wq, ackmp); 23050Sstevel@tonic-gate } 23060Sstevel@tonic-gate 23070Sstevel@tonic-gate 23080Sstevel@tonic-gate 23090Sstevel@tonic-gate /* 23100Sstevel@tonic-gate * send T_OK_ACK 23110Sstevel@tonic-gate * Note: assumes enough memory or caller passed big enough mp 23120Sstevel@tonic-gate * - no recovery from allocb failures 23130Sstevel@tonic-gate */ 23140Sstevel@tonic-gate static void 23150Sstevel@tonic-gate tl_ok_ack(queue_t *wq, mblk_t *mp, t_scalar_t type) 23160Sstevel@tonic-gate { 23170Sstevel@tonic-gate struct T_ok_ack *ok_ack; 23180Sstevel@tonic-gate mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_ok_ack), 23190Sstevel@tonic-gate M_PCPROTO, T_OK_ACK); 23200Sstevel@tonic-gate 23210Sstevel@tonic-gate if (ackmp == NULL) { 23220Sstevel@tonic-gate tl_merror(wq, NULL, ENOMEM); 23230Sstevel@tonic-gate return; 23240Sstevel@tonic-gate } 23250Sstevel@tonic-gate 23260Sstevel@tonic-gate ok_ack = (struct T_ok_ack *)ackmp->b_rptr; 23270Sstevel@tonic-gate ok_ack->CORRECT_prim = type; 23280Sstevel@tonic-gate 23290Sstevel@tonic-gate (void) qreply(wq, ackmp); 23300Sstevel@tonic-gate } 23310Sstevel@tonic-gate 23320Sstevel@tonic-gate /* 23330Sstevel@tonic-gate * Process T_BIND_REQ and O_T_BIND_REQ from serializer. 23340Sstevel@tonic-gate * This is a wrapper around tl_bind(). 23350Sstevel@tonic-gate */ 23360Sstevel@tonic-gate static void 23370Sstevel@tonic-gate tl_bind_ser(mblk_t *mp, tl_endpt_t *tep) 23380Sstevel@tonic-gate { 23390Sstevel@tonic-gate if (! tep->te_closing) 23400Sstevel@tonic-gate tl_bind(mp, tep); 23410Sstevel@tonic-gate else 23420Sstevel@tonic-gate freemsg(mp); 23430Sstevel@tonic-gate 23440Sstevel@tonic-gate tl_serializer_exit(tep); 23450Sstevel@tonic-gate tl_refrele(tep); 23460Sstevel@tonic-gate } 23470Sstevel@tonic-gate 23480Sstevel@tonic-gate /* 23490Sstevel@tonic-gate * Process T_BIND_REQ and O_T_BIND_REQ TPI requests. 23500Sstevel@tonic-gate * Assumes that the endpoint is in the unbound. 23510Sstevel@tonic-gate */ 23520Sstevel@tonic-gate static void 23530Sstevel@tonic-gate tl_bind(mblk_t *mp, tl_endpt_t *tep) 23540Sstevel@tonic-gate { 23550Sstevel@tonic-gate queue_t *wq = tep->te_wq; 23560Sstevel@tonic-gate struct T_bind_ack *b_ack; 23570Sstevel@tonic-gate struct T_bind_req *bind = (struct T_bind_req *)mp->b_rptr; 23580Sstevel@tonic-gate mblk_t *ackmp, *bamp; 23590Sstevel@tonic-gate soux_addr_t ux_addr; 23600Sstevel@tonic-gate t_uscalar_t qlen = 0; 23610Sstevel@tonic-gate t_scalar_t alen, aoff; 23620Sstevel@tonic-gate tl_addr_t addr_req; 23630Sstevel@tonic-gate void *addr_startp; 23640Sstevel@tonic-gate ssize_t msz = MBLKL(mp), basize; 23650Sstevel@tonic-gate t_scalar_t tli_err = 0, unix_err = 0; 23660Sstevel@tonic-gate t_scalar_t save_prim_type = bind->PRIM_type; 23670Sstevel@tonic-gate t_scalar_t save_state = tep->te_state; 23680Sstevel@tonic-gate 23690Sstevel@tonic-gate if (tep->te_state != TS_UNBND) { 23700Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 23715240Snordmark SL_TRACE|SL_ERROR, 23725240Snordmark "tl_wput:bind_request:out of state, state=%d", 23735240Snordmark tep->te_state)); 23740Sstevel@tonic-gate tli_err = TOUTSTATE; 23750Sstevel@tonic-gate goto error; 23760Sstevel@tonic-gate } 23770Sstevel@tonic-gate 23780Sstevel@tonic-gate if (msz < sizeof (struct T_bind_req)) { 23790Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 23800Sstevel@tonic-gate goto error; 23810Sstevel@tonic-gate } 23820Sstevel@tonic-gate 23830Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_BIND_REQ, tep->te_state); 23840Sstevel@tonic-gate 23850Sstevel@tonic-gate ASSERT((bind->PRIM_type == O_T_BIND_REQ) || 23860Sstevel@tonic-gate (bind->PRIM_type == T_BIND_REQ)); 23870Sstevel@tonic-gate 23880Sstevel@tonic-gate alen = bind->ADDR_length; 23890Sstevel@tonic-gate aoff = bind->ADDR_offset; 23900Sstevel@tonic-gate 23910Sstevel@tonic-gate /* negotiate max conn req pending */ 23920Sstevel@tonic-gate if (IS_COTS(tep)) { 23930Sstevel@tonic-gate qlen = bind->CONIND_number; 23942486Sakolb if (qlen > tl_maxqlen) 23952486Sakolb qlen = tl_maxqlen; 23960Sstevel@tonic-gate } 23970Sstevel@tonic-gate 23980Sstevel@tonic-gate /* 23990Sstevel@tonic-gate * Reserve hash handle. It can only be NULL if the endpoint is unbound 24000Sstevel@tonic-gate * and bound again. 24010Sstevel@tonic-gate */ 24020Sstevel@tonic-gate if ((tep->te_hash_hndl == NULL) && 24030Sstevel@tonic-gate ((tep->te_flag & TL_ADDRHASHED) == 0) && 24040Sstevel@tonic-gate mod_hash_reserve_nosleep(tep->te_addrhash, 24055240Snordmark &tep->te_hash_hndl) != 0) { 24060Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR; 24070Sstevel@tonic-gate goto error; 24080Sstevel@tonic-gate } 24090Sstevel@tonic-gate 24100Sstevel@tonic-gate /* 24110Sstevel@tonic-gate * Verify address correctness. 24120Sstevel@tonic-gate */ 24130Sstevel@tonic-gate if (IS_SOCKET(tep)) { 24140Sstevel@tonic-gate ASSERT(bind->PRIM_type == O_T_BIND_REQ); 24150Sstevel@tonic-gate 24160Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) || 24170Sstevel@tonic-gate (aoff < 0) || 24180Sstevel@tonic-gate (aoff + alen > msz)) { 24190Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24205240Snordmark 1, SL_TRACE|SL_ERROR, 24215240Snordmark "tl_bind: invalid socket addr")); 24220Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24230Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24240Sstevel@tonic-gate goto error; 24250Sstevel@tonic-gate } 24260Sstevel@tonic-gate /* Copy address from message to local buffer. */ 24270Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, sizeof (ux_addr)); 24280Sstevel@tonic-gate /* 24290Sstevel@tonic-gate * Check that we got correct address from sockets 24300Sstevel@tonic-gate */ 24310Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_EXPLICIT) && 24320Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_IMPLICIT)) { 24330Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24345240Snordmark 1, SL_TRACE|SL_ERROR, 24355240Snordmark "tl_bind: invalid socket magic")); 24360Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24370Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24380Sstevel@tonic-gate goto error; 24390Sstevel@tonic-gate } 24400Sstevel@tonic-gate if ((ux_addr.soua_magic == SOU_MAGIC_IMPLICIT) && 24410Sstevel@tonic-gate (ux_addr.soua_vp != NULL)) { 24420Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24435240Snordmark 1, SL_TRACE|SL_ERROR, 24445240Snordmark "tl_bind: implicit addr non-empty")); 24450Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24460Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24470Sstevel@tonic-gate goto error; 24480Sstevel@tonic-gate } 24490Sstevel@tonic-gate if ((ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) && 24500Sstevel@tonic-gate (ux_addr.soua_vp == NULL)) { 24510Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24525240Snordmark 1, SL_TRACE|SL_ERROR, 24535240Snordmark "tl_bind: explicit addr empty")); 24540Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24550Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24560Sstevel@tonic-gate goto error; 24570Sstevel@tonic-gate } 24580Sstevel@tonic-gate } else { 24590Sstevel@tonic-gate if ((alen > 0) && ((aoff < 0) || 24605240Snordmark ((ssize_t)(aoff + alen) > msz) || 24615240Snordmark ((aoff + alen) < 0))) { 24620Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24635240Snordmark 1, SL_TRACE|SL_ERROR, 24645240Snordmark "tl_bind: invalid message")); 24650Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24660Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL; 24670Sstevel@tonic-gate goto error; 24680Sstevel@tonic-gate } 24690Sstevel@tonic-gate if ((alen < 0) || (alen > (msz - sizeof (struct T_bind_req)))) { 24700Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24715240Snordmark 1, SL_TRACE|SL_ERROR, 24725240Snordmark "tl_bind: bad addr in message")); 24730Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 24740Sstevel@tonic-gate tli_err = TBADADDR; 24750Sstevel@tonic-gate goto error; 24760Sstevel@tonic-gate } 24770Sstevel@tonic-gate #ifdef DEBUG 24780Sstevel@tonic-gate /* 24790Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps. 24800Sstevel@tonic-gate * if (! assertion) 24810Sstevel@tonic-gate * log warning; 24820Sstevel@tonic-gate */ 24830Sstevel@tonic-gate if (! ((alen == 0 && aoff == 0) || 24840Sstevel@tonic-gate (aoff >= (t_scalar_t)(sizeof (struct T_bind_req))))) { 24850Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 24860Sstevel@tonic-gate 3, SL_TRACE|SL_ERROR, 24870Sstevel@tonic-gate "tl_bind: addr overlaps TPI message")); 24880Sstevel@tonic-gate } 24890Sstevel@tonic-gate #endif 24900Sstevel@tonic-gate } 24910Sstevel@tonic-gate 24920Sstevel@tonic-gate /* 24930Sstevel@tonic-gate * Bind the address provided or allocate one if requested. 24940Sstevel@tonic-gate * Allow rebinds with a new qlen value. 24950Sstevel@tonic-gate */ 24960Sstevel@tonic-gate if (IS_SOCKET(tep)) { 24970Sstevel@tonic-gate /* 24980Sstevel@tonic-gate * For anonymous requests the te_ap is already set up properly 24990Sstevel@tonic-gate * so use minor number as an address. 25000Sstevel@tonic-gate * For explicit requests need to check whether the address is 25010Sstevel@tonic-gate * already in use. 25020Sstevel@tonic-gate */ 25030Sstevel@tonic-gate if (ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) { 25040Sstevel@tonic-gate int rc; 25050Sstevel@tonic-gate 25060Sstevel@tonic-gate if (tep->te_flag & TL_ADDRHASHED) { 25070Sstevel@tonic-gate ASSERT(IS_COTS(tep) && tep->te_qlen == 0); 25080Sstevel@tonic-gate if (tep->te_vp == ux_addr.soua_vp) 25090Sstevel@tonic-gate goto skip_addr_bind; 25100Sstevel@tonic-gate else /* Rebind to a new address. */ 25110Sstevel@tonic-gate tl_addr_unbind(tep); 25120Sstevel@tonic-gate } 25130Sstevel@tonic-gate /* 25140Sstevel@tonic-gate * Insert address in the hash if it is not already 25150Sstevel@tonic-gate * there. Since we use preallocated handle, the insert 25160Sstevel@tonic-gate * can fail only if the key is already present. 25170Sstevel@tonic-gate */ 25180Sstevel@tonic-gate rc = mod_hash_insert_reserve(tep->te_addrhash, 25190Sstevel@tonic-gate (mod_hash_key_t)ux_addr.soua_vp, 25200Sstevel@tonic-gate (mod_hash_val_t)tep, tep->te_hash_hndl); 25210Sstevel@tonic-gate 25220Sstevel@tonic-gate if (rc != 0) { 25230Sstevel@tonic-gate ASSERT(rc == MH_ERR_DUPLICATE); 25240Sstevel@tonic-gate /* 25250Sstevel@tonic-gate * Violate O_T_BIND_REQ semantics and fail with 25260Sstevel@tonic-gate * TADDRBUSY - sockets will not use any address 25270Sstevel@tonic-gate * other than supplied one for explicit binds. 25280Sstevel@tonic-gate */ 25290Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 25305240Snordmark SL_TRACE|SL_ERROR, 25315240Snordmark "tl_bind:requested addr %p is busy", 25325240Snordmark ux_addr.soua_vp)); 25330Sstevel@tonic-gate tli_err = TADDRBUSY; unix_err = 0; 25340Sstevel@tonic-gate goto error; 25350Sstevel@tonic-gate } 25360Sstevel@tonic-gate tep->te_uxaddr = ux_addr; 25370Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED; 25380Sstevel@tonic-gate tep->te_hash_hndl = NULL; 25390Sstevel@tonic-gate } 25400Sstevel@tonic-gate } else if (alen == 0) { 25410Sstevel@tonic-gate /* 25420Sstevel@tonic-gate * assign any free address 25430Sstevel@tonic-gate */ 25440Sstevel@tonic-gate if (! tl_get_any_addr(tep, NULL)) { 25450Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 25465240Snordmark 1, SL_TRACE|SL_ERROR, 25475240Snordmark "tl_bind:failed to get buffer for any " 25485240Snordmark "address")); 25490Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR; 25500Sstevel@tonic-gate goto error; 25510Sstevel@tonic-gate } 25520Sstevel@tonic-gate } else { 25530Sstevel@tonic-gate addr_req.ta_alen = alen; 25540Sstevel@tonic-gate addr_req.ta_abuf = (mp->b_rptr + aoff); 25550Sstevel@tonic-gate addr_req.ta_zoneid = tep->te_zoneid; 25560Sstevel@tonic-gate 25570Sstevel@tonic-gate tep->te_abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP); 25580Sstevel@tonic-gate if (tep->te_abuf == NULL) { 25590Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR; 25600Sstevel@tonic-gate goto error; 25610Sstevel@tonic-gate } 25620Sstevel@tonic-gate bcopy(addr_req.ta_abuf, tep->te_abuf, addr_req.ta_alen); 25630Sstevel@tonic-gate tep->te_alen = alen; 25640Sstevel@tonic-gate 25650Sstevel@tonic-gate if (mod_hash_insert_reserve(tep->te_addrhash, 25665240Snordmark (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep, 25675240Snordmark tep->te_hash_hndl) != 0) { 25680Sstevel@tonic-gate if (save_prim_type == T_BIND_REQ) { 25690Sstevel@tonic-gate /* 25700Sstevel@tonic-gate * The bind semantics for this primitive 25710Sstevel@tonic-gate * require a failure if the exact address 25720Sstevel@tonic-gate * requested is busy 25730Sstevel@tonic-gate */ 25740Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 25755240Snordmark SL_TRACE|SL_ERROR, 25765240Snordmark "tl_bind:requested addr is busy")); 25770Sstevel@tonic-gate tli_err = TADDRBUSY; unix_err = 0; 25780Sstevel@tonic-gate goto error; 25790Sstevel@tonic-gate } 25800Sstevel@tonic-gate 25810Sstevel@tonic-gate /* 25820Sstevel@tonic-gate * O_T_BIND_REQ semantics say if address if requested 25830Sstevel@tonic-gate * address is busy, bind to any available free address 25840Sstevel@tonic-gate */ 25850Sstevel@tonic-gate if (! tl_get_any_addr(tep, &addr_req)) { 25860Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 25875240Snordmark SL_TRACE|SL_ERROR, 25885240Snordmark "tl_bind:unable to get any addr buf")); 25890Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOMEM; 25900Sstevel@tonic-gate goto error; 25910Sstevel@tonic-gate } 25920Sstevel@tonic-gate } else { 25930Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED; 25940Sstevel@tonic-gate tep->te_hash_hndl = NULL; 25950Sstevel@tonic-gate } 25960Sstevel@tonic-gate } 25970Sstevel@tonic-gate 25980Sstevel@tonic-gate ASSERT(tep->te_alen >= 0); 25990Sstevel@tonic-gate 26000Sstevel@tonic-gate skip_addr_bind: 26010Sstevel@tonic-gate /* 26020Sstevel@tonic-gate * prepare T_BIND_ACK TPI message 26030Sstevel@tonic-gate */ 26040Sstevel@tonic-gate basize = sizeof (struct T_bind_ack) + tep->te_alen; 26050Sstevel@tonic-gate bamp = reallocb(mp, basize, 0); 26060Sstevel@tonic-gate if (bamp == NULL) { 26070Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 26085240Snordmark "tl_wput:tl_bind: allocb failed")); 26090Sstevel@tonic-gate /* 26100Sstevel@tonic-gate * roll back state changes 26110Sstevel@tonic-gate */ 26120Sstevel@tonic-gate tl_addr_unbind(tep); 26130Sstevel@tonic-gate tep->te_state = TS_UNBND; 26140Sstevel@tonic-gate tl_memrecover(wq, mp, basize); 26150Sstevel@tonic-gate return; 26160Sstevel@tonic-gate } 26170Sstevel@tonic-gate 26180Sstevel@tonic-gate DB_TYPE(bamp) = M_PCPROTO; 26190Sstevel@tonic-gate bamp->b_wptr = bamp->b_rptr + basize; 26200Sstevel@tonic-gate b_ack = (struct T_bind_ack *)bamp->b_rptr; 26210Sstevel@tonic-gate b_ack->PRIM_type = T_BIND_ACK; 26220Sstevel@tonic-gate b_ack->CONIND_number = qlen; 26230Sstevel@tonic-gate b_ack->ADDR_length = tep->te_alen; 26240Sstevel@tonic-gate b_ack->ADDR_offset = (t_scalar_t)sizeof (struct T_bind_ack); 26250Sstevel@tonic-gate addr_startp = bamp->b_rptr + b_ack->ADDR_offset; 26260Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen); 26270Sstevel@tonic-gate 26280Sstevel@tonic-gate if (IS_COTS(tep)) { 26290Sstevel@tonic-gate tep->te_qlen = qlen; 26300Sstevel@tonic-gate if (qlen > 0) 26310Sstevel@tonic-gate tep->te_flag |= TL_LISTENER; 26320Sstevel@tonic-gate } 26330Sstevel@tonic-gate 26340Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_BIND_ACK, tep->te_state); 26350Sstevel@tonic-gate /* 26360Sstevel@tonic-gate * send T_BIND_ACK message 26370Sstevel@tonic-gate */ 26380Sstevel@tonic-gate (void) qreply(wq, bamp); 26390Sstevel@tonic-gate return; 26400Sstevel@tonic-gate 26410Sstevel@tonic-gate error: 26420Sstevel@tonic-gate ackmp = reallocb(mp, sizeof (struct T_error_ack), 0); 26430Sstevel@tonic-gate if (ackmp == NULL) { 26440Sstevel@tonic-gate /* 26450Sstevel@tonic-gate * roll back state changes 26460Sstevel@tonic-gate */ 26470Sstevel@tonic-gate tep->te_state = save_state; 26480Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 26490Sstevel@tonic-gate return; 26500Sstevel@tonic-gate } 26510Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 26520Sstevel@tonic-gate tl_error_ack(wq, ackmp, tli_err, unix_err, save_prim_type); 26530Sstevel@tonic-gate } 26540Sstevel@tonic-gate 26550Sstevel@tonic-gate /* 26560Sstevel@tonic-gate * Process T_UNBIND_REQ. 26570Sstevel@tonic-gate * Called from serializer. 26580Sstevel@tonic-gate */ 26590Sstevel@tonic-gate static void 26600Sstevel@tonic-gate tl_unbind(mblk_t *mp, tl_endpt_t *tep) 26610Sstevel@tonic-gate { 26620Sstevel@tonic-gate queue_t *wq; 26630Sstevel@tonic-gate mblk_t *ackmp; 26640Sstevel@tonic-gate 26650Sstevel@tonic-gate if (tep->te_closing) { 26660Sstevel@tonic-gate freemsg(mp); 26670Sstevel@tonic-gate return; 26680Sstevel@tonic-gate } 26690Sstevel@tonic-gate 26700Sstevel@tonic-gate wq = tep->te_wq; 26710Sstevel@tonic-gate 26720Sstevel@tonic-gate /* 26730Sstevel@tonic-gate * preallocate memory for max of T_OK_ACK and T_ERROR_ACK 26740Sstevel@tonic-gate * ==> allocate for T_ERROR_ACK (known max) 26750Sstevel@tonic-gate */ 26760Sstevel@tonic-gate if ((ackmp = reallocb(mp, sizeof (struct T_error_ack), 0)) == NULL) { 26770Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 26780Sstevel@tonic-gate return; 26790Sstevel@tonic-gate } 26800Sstevel@tonic-gate /* 26810Sstevel@tonic-gate * memory resources committed 26820Sstevel@tonic-gate * Note: no message validation. T_UNBIND_REQ message is 26830Sstevel@tonic-gate * same size as PRIM_type field so already verified earlier. 26840Sstevel@tonic-gate */ 26850Sstevel@tonic-gate 26860Sstevel@tonic-gate /* 26870Sstevel@tonic-gate * validate state 26880Sstevel@tonic-gate */ 26890Sstevel@tonic-gate if (tep->te_state != TS_IDLE) { 26900Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 26915240Snordmark SL_TRACE|SL_ERROR, 26925240Snordmark "tl_wput:T_UNBIND_REQ:out of state, state=%d", 26935240Snordmark tep->te_state)); 26940Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_UNBIND_REQ); 26950Sstevel@tonic-gate return; 26960Sstevel@tonic-gate } 26970Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_UNBIND_REQ, tep->te_state); 26980Sstevel@tonic-gate 26990Sstevel@tonic-gate /* 27000Sstevel@tonic-gate * TPI says on T_UNBIND_REQ: 27010Sstevel@tonic-gate * send up a M_FLUSH to flush both 27020Sstevel@tonic-gate * read and write queues 27030Sstevel@tonic-gate */ 27040Sstevel@tonic-gate (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW); 27050Sstevel@tonic-gate 27060Sstevel@tonic-gate if (! IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 || 27070Sstevel@tonic-gate tep->te_magic != SOU_MAGIC_EXPLICIT) { 27080Sstevel@tonic-gate 27090Sstevel@tonic-gate /* 27100Sstevel@tonic-gate * Sockets use bind with qlen==0 followed by bind() to 27110Sstevel@tonic-gate * the same address with qlen > 0 for listeners. 27120Sstevel@tonic-gate * We allow rebind with a new qlen value. 27130Sstevel@tonic-gate */ 27140Sstevel@tonic-gate tl_addr_unbind(tep); 27150Sstevel@tonic-gate } 27160Sstevel@tonic-gate 27170Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state); 27180Sstevel@tonic-gate /* 27190Sstevel@tonic-gate * send T_OK_ACK 27200Sstevel@tonic-gate */ 27210Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_UNBIND_REQ); 27220Sstevel@tonic-gate } 27230Sstevel@tonic-gate 27240Sstevel@tonic-gate 27250Sstevel@tonic-gate /* 27260Sstevel@tonic-gate * Option management code from drv/ip is used here 27270Sstevel@tonic-gate * Note: TL_PROT_LEVEL/TL_IOC_CREDOPT option is not part of tl_opt_arr 27280Sstevel@tonic-gate * database of options. So optcom_req() will fail T_SVR4_OPTMGMT_REQ. 27290Sstevel@tonic-gate * However, that is what we want as that option is 'unorthodox' 27300Sstevel@tonic-gate * and only valid in T_CONN_IND, T_CONN_CON and T_UNITDATA_IND 27310Sstevel@tonic-gate * and not in T_SVR4_OPTMGMT_REQ/ACK 27320Sstevel@tonic-gate * Note2: use of optcom_req means this routine is an exception to 27330Sstevel@tonic-gate * recovery from allocb() failures. 27340Sstevel@tonic-gate */ 27350Sstevel@tonic-gate 27360Sstevel@tonic-gate static void 27370Sstevel@tonic-gate tl_optmgmt(queue_t *wq, mblk_t *mp) 27380Sstevel@tonic-gate { 27390Sstevel@tonic-gate tl_endpt_t *tep; 27400Sstevel@tonic-gate mblk_t *ackmp; 27410Sstevel@tonic-gate union T_primitives *prim; 27428778SErik.Nordmark@Sun.COM cred_t *cr; 27430Sstevel@tonic-gate 27440Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 27450Sstevel@tonic-gate prim = (union T_primitives *)mp->b_rptr; 27460Sstevel@tonic-gate 27478778SErik.Nordmark@Sun.COM /* 27488778SErik.Nordmark@Sun.COM * All Solaris components should pass a db_credp 27498778SErik.Nordmark@Sun.COM * for this TPI message, hence we ASSERT. 27508778SErik.Nordmark@Sun.COM * But in case there is some other M_PROTO that looks 27518778SErik.Nordmark@Sun.COM * like a TPI message sent by some other kernel 27528778SErik.Nordmark@Sun.COM * component, we check and return an error. 27538778SErik.Nordmark@Sun.COM */ 27548778SErik.Nordmark@Sun.COM cr = msg_getcred(mp, NULL); 27558778SErik.Nordmark@Sun.COM ASSERT(cr != NULL); 27568778SErik.Nordmark@Sun.COM if (cr == NULL) { 27578778SErik.Nordmark@Sun.COM tl_error_ack(wq, mp, TSYSERR, EINVAL, prim->type); 27588778SErik.Nordmark@Sun.COM return; 27598778SErik.Nordmark@Sun.COM } 27608778SErik.Nordmark@Sun.COM 27610Sstevel@tonic-gate /* all states OK for AF_UNIX options ? */ 27620Sstevel@tonic-gate if (!IS_SOCKET(tep) && tep->te_state != TS_IDLE && 27630Sstevel@tonic-gate prim->type == T_SVR4_OPTMGMT_REQ) { 27640Sstevel@tonic-gate /* 27650Sstevel@tonic-gate * Broken TLI semantics that options can only be managed 27660Sstevel@tonic-gate * in TS_IDLE state. Needed for Sparc ABI test suite that 27670Sstevel@tonic-gate * tests this TLI (mis)feature using this device driver. 27680Sstevel@tonic-gate */ 27690Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 27705240Snordmark SL_TRACE|SL_ERROR, 27715240Snordmark "tl_wput:T_SVR4_OPTMGMT_REQ:out of state, state=%d", 27725240Snordmark tep->te_state)); 27730Sstevel@tonic-gate /* 27740Sstevel@tonic-gate * preallocate memory for T_ERROR_ACK 27750Sstevel@tonic-gate */ 27760Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED); 27770Sstevel@tonic-gate if (! ackmp) { 27780Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 27790Sstevel@tonic-gate return; 27800Sstevel@tonic-gate } 27810Sstevel@tonic-gate 27820Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_SVR4_OPTMGMT_REQ); 27830Sstevel@tonic-gate freemsg(mp); 27840Sstevel@tonic-gate return; 27850Sstevel@tonic-gate } 27860Sstevel@tonic-gate 27870Sstevel@tonic-gate /* 27880Sstevel@tonic-gate * call common option management routine from drv/ip 27890Sstevel@tonic-gate */ 27900Sstevel@tonic-gate if (prim->type == T_SVR4_OPTMGMT_REQ) { 279111042SErik.Nordmark@Sun.COM svr4_optcom_req(wq, mp, cr, &tl_opt_obj); 27920Sstevel@tonic-gate } else { 27930Sstevel@tonic-gate ASSERT(prim->type == T_OPTMGMT_REQ); 279411042SErik.Nordmark@Sun.COM tpi_optcom_req(wq, mp, cr, &tl_opt_obj); 27950Sstevel@tonic-gate } 27960Sstevel@tonic-gate } 27970Sstevel@tonic-gate 27980Sstevel@tonic-gate /* 27990Sstevel@tonic-gate * Handle T_conn_req - the driver part of accept(). 28000Sstevel@tonic-gate * If TL_SET[U]CRED generate the credentials options. 28010Sstevel@tonic-gate * If this is a socket pass through options unmodified. 28020Sstevel@tonic-gate * For sockets generate the T_CONN_CON here instead of 28030Sstevel@tonic-gate * waiting for the T_CONN_RES. 28040Sstevel@tonic-gate */ 28050Sstevel@tonic-gate static void 28060Sstevel@tonic-gate tl_conn_req(queue_t *wq, mblk_t *mp) 28070Sstevel@tonic-gate { 28080Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 28090Sstevel@tonic-gate struct T_conn_req *creq = (struct T_conn_req *)mp->b_rptr; 28100Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 28110Sstevel@tonic-gate t_scalar_t alen, aoff, olen, ooff, err = 0; 28120Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL; 28130Sstevel@tonic-gate mblk_t *ackmp; 28140Sstevel@tonic-gate mblk_t *dimp; 28150Sstevel@tonic-gate struct T_discon_ind *di; 28160Sstevel@tonic-gate soux_addr_t ux_addr; 28170Sstevel@tonic-gate tl_addr_t dst; 28180Sstevel@tonic-gate 28190Sstevel@tonic-gate ASSERT(IS_COTS(tep)); 28200Sstevel@tonic-gate 28210Sstevel@tonic-gate if (tep->te_closing) { 28220Sstevel@tonic-gate freemsg(mp); 28230Sstevel@tonic-gate return; 28240Sstevel@tonic-gate } 28250Sstevel@tonic-gate 28260Sstevel@tonic-gate /* 28270Sstevel@tonic-gate * preallocate memory for: 28280Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK 28290Sstevel@tonic-gate * ==> known max T_ERROR_ACK 28300Sstevel@tonic-gate * 2. max of T_DISCON_IND and T_CONN_IND 28310Sstevel@tonic-gate */ 28320Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED); 28330Sstevel@tonic-gate if (! ackmp) { 28340Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 28350Sstevel@tonic-gate return; 28360Sstevel@tonic-gate } 28370Sstevel@tonic-gate /* 28380Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now 28390Sstevel@tonic-gate * will be committed for T_DISCON_IND/T_CONN_IND later 28400Sstevel@tonic-gate */ 28410Sstevel@tonic-gate 28420Sstevel@tonic-gate if (tep->te_state != TS_IDLE) { 28430Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 28445240Snordmark SL_TRACE|SL_ERROR, 28455240Snordmark "tl_wput:T_CONN_REQ:out of state, state=%d", 28465240Snordmark tep->te_state)); 28470Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ); 28480Sstevel@tonic-gate freemsg(mp); 28490Sstevel@tonic-gate return; 28500Sstevel@tonic-gate } 28510Sstevel@tonic-gate 28520Sstevel@tonic-gate /* 28530Sstevel@tonic-gate * validate the message 28540Sstevel@tonic-gate * Note: dereference fields in struct inside message only 28550Sstevel@tonic-gate * after validating the message length. 28560Sstevel@tonic-gate */ 28570Sstevel@tonic-gate if (msz < sizeof (struct T_conn_req)) { 28580Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 28595240Snordmark "tl_conn_req:invalid message length")); 28600Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ); 28610Sstevel@tonic-gate freemsg(mp); 28620Sstevel@tonic-gate return; 28630Sstevel@tonic-gate } 28640Sstevel@tonic-gate alen = creq->DEST_length; 28650Sstevel@tonic-gate aoff = creq->DEST_offset; 28660Sstevel@tonic-gate olen = creq->OPT_length; 28670Sstevel@tonic-gate ooff = creq->OPT_offset; 28680Sstevel@tonic-gate if (olen == 0) 28690Sstevel@tonic-gate ooff = 0; 28700Sstevel@tonic-gate 28710Sstevel@tonic-gate if (IS_SOCKET(tep)) { 28720Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) || 28730Sstevel@tonic-gate (aoff < 0) || 28740Sstevel@tonic-gate (aoff + alen > msz) || 28750Sstevel@tonic-gate (alen > msz - sizeof (struct T_conn_req))) { 28760Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 28770Sstevel@tonic-gate 1, SL_TRACE|SL_ERROR, 28780Sstevel@tonic-gate "tl_conn_req: invalid socket addr")); 28790Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ); 28800Sstevel@tonic-gate freemsg(mp); 28810Sstevel@tonic-gate return; 28820Sstevel@tonic-gate } 28830Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN); 28840Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) && 28850Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) { 28860Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 28875240Snordmark 1, SL_TRACE|SL_ERROR, 28885240Snordmark "tl_conn_req: invalid socket magic")); 28890Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ); 28900Sstevel@tonic-gate freemsg(mp); 28910Sstevel@tonic-gate return; 28920Sstevel@tonic-gate } 28930Sstevel@tonic-gate } else { 28940Sstevel@tonic-gate if ((alen > 0 && ((aoff + alen) > msz || aoff + alen < 0)) || 28950Sstevel@tonic-gate (olen > 0 && ((ssize_t)(ooff + olen) > msz || 28965240Snordmark ooff + olen < 0)) || 28970Sstevel@tonic-gate olen < 0 || ooff < 0) { 28980Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 28995240Snordmark SL_TRACE|SL_ERROR, 29005240Snordmark "tl_conn_req:invalid message")); 29010Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ); 29020Sstevel@tonic-gate freemsg(mp); 29030Sstevel@tonic-gate return; 29040Sstevel@tonic-gate } 29050Sstevel@tonic-gate 29060Sstevel@tonic-gate if (alen <= 0 || aoff < 0 || 29070Sstevel@tonic-gate (ssize_t)alen > msz - sizeof (struct T_conn_req)) { 29080Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 29090Sstevel@tonic-gate SL_TRACE|SL_ERROR, 29100Sstevel@tonic-gate "tl_conn_req:bad addr in message, " 29110Sstevel@tonic-gate "alen=%d, msz=%ld", 29120Sstevel@tonic-gate alen, msz)); 29130Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADADDR, 0, T_CONN_REQ); 29140Sstevel@tonic-gate freemsg(mp); 29150Sstevel@tonic-gate return; 29160Sstevel@tonic-gate } 29170Sstevel@tonic-gate #ifdef DEBUG 29180Sstevel@tonic-gate /* 29190Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps. 29200Sstevel@tonic-gate * if (! assertion) 29210Sstevel@tonic-gate * log warning; 29220Sstevel@tonic-gate */ 29230Sstevel@tonic-gate if (! (aoff >= (t_scalar_t)sizeof (struct T_conn_req))) { 29240Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 29255240Snordmark SL_TRACE|SL_ERROR, 29265240Snordmark "tl_conn_req: addr overlaps TPI message")); 29270Sstevel@tonic-gate } 29280Sstevel@tonic-gate #endif 29290Sstevel@tonic-gate if (olen) { 29300Sstevel@tonic-gate /* 29310Sstevel@tonic-gate * no opts in connect req 29320Sstevel@tonic-gate * supported in this provider except for sockets. 29330Sstevel@tonic-gate */ 29340Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 29355240Snordmark SL_TRACE|SL_ERROR, 29365240Snordmark "tl_conn_req:options not supported " 29375240Snordmark "in message")); 29380Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADOPT, 0, T_CONN_REQ); 29390Sstevel@tonic-gate freemsg(mp); 29400Sstevel@tonic-gate return; 29410Sstevel@tonic-gate } 29420Sstevel@tonic-gate } 29430Sstevel@tonic-gate 29440Sstevel@tonic-gate /* 29450Sstevel@tonic-gate * Prevent tep from closing on us. 29460Sstevel@tonic-gate */ 29470Sstevel@tonic-gate if (! tl_noclose(tep)) { 29480Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 29495240Snordmark "tl_conn_req:endpoint is closing")); 29500Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ); 29510Sstevel@tonic-gate freemsg(mp); 29520Sstevel@tonic-gate return; 29530Sstevel@tonic-gate } 29540Sstevel@tonic-gate 29550Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_REQ, tep->te_state); 29560Sstevel@tonic-gate /* 29570Sstevel@tonic-gate * get endpoint to connect to 29580Sstevel@tonic-gate * check that peer with DEST addr is bound to addr 29590Sstevel@tonic-gate * and has CONIND_number > 0 29600Sstevel@tonic-gate */ 29610Sstevel@tonic-gate dst.ta_alen = alen; 29620Sstevel@tonic-gate dst.ta_abuf = mp->b_rptr + aoff; 29630Sstevel@tonic-gate dst.ta_zoneid = tep->te_zoneid; 29640Sstevel@tonic-gate 29650Sstevel@tonic-gate /* 29660Sstevel@tonic-gate * Verify if remote addr is in use 29670Sstevel@tonic-gate */ 29680Sstevel@tonic-gate peer_tep = (IS_SOCKET(tep) ? 29690Sstevel@tonic-gate tl_sock_find_peer(tep, &ux_addr) : 29700Sstevel@tonic-gate tl_find_peer(tep, &dst)); 29710Sstevel@tonic-gate 29720Sstevel@tonic-gate if (peer_tep == NULL) { 29730Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 29745240Snordmark "tl_conn_req:no one at connect address")); 29750Sstevel@tonic-gate err = ECONNREFUSED; 29760Sstevel@tonic-gate } else if (peer_tep->te_nicon >= peer_tep->te_qlen) { 29770Sstevel@tonic-gate /* 29780Sstevel@tonic-gate * validate that number of incoming connection is 29790Sstevel@tonic-gate * not to capacity on destination endpoint 29800Sstevel@tonic-gate */ 29810Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE, 29825240Snordmark "tl_conn_req: qlen overflow connection refused")); 29830Sstevel@tonic-gate err = ECONNREFUSED; 29840Sstevel@tonic-gate } 29850Sstevel@tonic-gate 29860Sstevel@tonic-gate /* 29873661Sakolb * Send T_DISCON_IND in case of error 29880Sstevel@tonic-gate */ 29890Sstevel@tonic-gate if (err != 0) { 29900Sstevel@tonic-gate if (peer_tep != NULL) 29910Sstevel@tonic-gate tl_refrele(peer_tep); 29920Sstevel@tonic-gate /* We are still expected to send T_OK_ACK */ 29930Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state); 29940Sstevel@tonic-gate tl_ok_ack(tep->te_wq, ackmp, T_CONN_REQ); 29950Sstevel@tonic-gate tl_closeok(tep); 29960Sstevel@tonic-gate dimp = tpi_ack_alloc(mp, sizeof (struct T_discon_ind), 29970Sstevel@tonic-gate M_PROTO, T_DISCON_IND); 29980Sstevel@tonic-gate if (dimp == NULL) { 29990Sstevel@tonic-gate tl_merror(wq, NULL, ENOSR); 30000Sstevel@tonic-gate return; 30010Sstevel@tonic-gate } 30020Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 30030Sstevel@tonic-gate di->DISCON_reason = err; 30040Sstevel@tonic-gate di->SEQ_number = BADSEQNUM; 30050Sstevel@tonic-gate 30060Sstevel@tonic-gate tep->te_state = TS_IDLE; 30070Sstevel@tonic-gate /* 30080Sstevel@tonic-gate * send T_DISCON_IND message 30090Sstevel@tonic-gate */ 30100Sstevel@tonic-gate putnext(tep->te_rq, dimp); 30110Sstevel@tonic-gate return; 30120Sstevel@tonic-gate } 30130Sstevel@tonic-gate 30140Sstevel@tonic-gate ASSERT(IS_COTS(peer_tep)); 30150Sstevel@tonic-gate 30160Sstevel@tonic-gate /* 30170Sstevel@tonic-gate * Found the listener. At this point processing will continue on 30180Sstevel@tonic-gate * listener serializer. Close of the endpoint should be blocked while we 30190Sstevel@tonic-gate * switch serializers. 30200Sstevel@tonic-gate */ 30210Sstevel@tonic-gate tl_serializer_refhold(peer_tep->te_ser); 30220Sstevel@tonic-gate tl_serializer_refrele(tep->te_ser); 30230Sstevel@tonic-gate tep->te_ser = peer_tep->te_ser; 30240Sstevel@tonic-gate ASSERT(tep->te_oconp == NULL); 30250Sstevel@tonic-gate tep->te_oconp = peer_tep; 30260Sstevel@tonic-gate 30270Sstevel@tonic-gate /* 30280Sstevel@tonic-gate * It is safe to close now. Close may continue on listener serializer. 30290Sstevel@tonic-gate */ 30300Sstevel@tonic-gate tl_closeok(tep); 30310Sstevel@tonic-gate 30320Sstevel@tonic-gate /* 30330Sstevel@tonic-gate * Pass ackmp to tl_conn_req_ser. Note that mp->b_cont may contain user 30340Sstevel@tonic-gate * data, so we link mp to ackmp. 30350Sstevel@tonic-gate */ 30360Sstevel@tonic-gate ackmp->b_cont = mp; 30370Sstevel@tonic-gate mp = ackmp; 30380Sstevel@tonic-gate 30390Sstevel@tonic-gate tl_refhold(tep); 30400Sstevel@tonic-gate tl_serializer_enter(tep, tl_conn_req_ser, mp); 30410Sstevel@tonic-gate } 30420Sstevel@tonic-gate 30430Sstevel@tonic-gate /* 30440Sstevel@tonic-gate * Finish T_CONN_REQ processing on listener serializer. 30450Sstevel@tonic-gate */ 30460Sstevel@tonic-gate static void 30470Sstevel@tonic-gate tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep) 30480Sstevel@tonic-gate { 30490Sstevel@tonic-gate queue_t *wq; 30500Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_oconp; 30510Sstevel@tonic-gate mblk_t *confmp, *cimp, *indmp; 30520Sstevel@tonic-gate void *opts = NULL; 30530Sstevel@tonic-gate mblk_t *ackmp = mp; 30540Sstevel@tonic-gate struct T_conn_req *creq = (struct T_conn_req *)mp->b_cont->b_rptr; 30550Sstevel@tonic-gate struct T_conn_ind *ci; 30560Sstevel@tonic-gate tl_icon_t *tip; 30570Sstevel@tonic-gate void *addr_startp; 30580Sstevel@tonic-gate t_scalar_t olen = creq->OPT_length; 30590Sstevel@tonic-gate t_scalar_t ooff = creq->OPT_offset; 30600Sstevel@tonic-gate size_t ci_msz; 30610Sstevel@tonic-gate size_t size; 3062*11134SCasper.Dik@Sun.COM cred_t *cr = NULL; 3063*11134SCasper.Dik@Sun.COM pid_t cpid; 30640Sstevel@tonic-gate 30650Sstevel@tonic-gate if (tep->te_closing) { 30660Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 30670Sstevel@tonic-gate tl_serializer_exit(tep); 30680Sstevel@tonic-gate tl_refrele(tep); 30690Sstevel@tonic-gate freemsg(mp); 30700Sstevel@tonic-gate return; 30710Sstevel@tonic-gate } 30720Sstevel@tonic-gate 30730Sstevel@tonic-gate wq = tep->te_wq; 30740Sstevel@tonic-gate tep->te_flag |= TL_EAGER; 30750Sstevel@tonic-gate 30760Sstevel@tonic-gate /* 30770Sstevel@tonic-gate * Extract preallocated ackmp from mp. 30780Sstevel@tonic-gate */ 30790Sstevel@tonic-gate mp = mp->b_cont; 30800Sstevel@tonic-gate ackmp->b_cont = NULL; 30810Sstevel@tonic-gate 30820Sstevel@tonic-gate if (olen == 0) 30830Sstevel@tonic-gate ooff = 0; 30840Sstevel@tonic-gate 30850Sstevel@tonic-gate if (peer_tep->te_closing || 30860Sstevel@tonic-gate !((peer_tep->te_state == TS_IDLE) || 30875240Snordmark (peer_tep->te_state == TS_WRES_CIND))) { 30883661Sakolb (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR, 30895240Snordmark "tl_conn_req:peer in bad state (%d)", 30905240Snordmark peer_tep->te_state)); 30910Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 30920Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, ECONNREFUSED, T_CONN_REQ); 30930Sstevel@tonic-gate freemsg(ackmp); 30940Sstevel@tonic-gate tl_serializer_exit(tep); 30950Sstevel@tonic-gate tl_refrele(tep); 30960Sstevel@tonic-gate return; 30970Sstevel@tonic-gate } 30980Sstevel@tonic-gate 30990Sstevel@tonic-gate /* 31000Sstevel@tonic-gate * preallocate now for T_DISCON_IND or T_CONN_IND 31010Sstevel@tonic-gate */ 31020Sstevel@tonic-gate /* 31030Sstevel@tonic-gate * calculate length of T_CONN_IND message 31040Sstevel@tonic-gate */ 3105*11134SCasper.Dik@Sun.COM if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED)) { 3106*11134SCasper.Dik@Sun.COM cr = msg_getcred(mp, &cpid); 3107*11134SCasper.Dik@Sun.COM ASSERT(cr != NULL); 3108*11134SCasper.Dik@Sun.COM if (peer_tep->te_flag & TL_SETCRED) { 3109*11134SCasper.Dik@Sun.COM ooff = 0; 3110*11134SCasper.Dik@Sun.COM olen = (t_scalar_t) sizeof (struct opthdr) + 3111*11134SCasper.Dik@Sun.COM OPTLEN(sizeof (tl_credopt_t)); 3112*11134SCasper.Dik@Sun.COM /* 1 option only */ 3113*11134SCasper.Dik@Sun.COM } else { 3114*11134SCasper.Dik@Sun.COM ooff = 0; 3115*11134SCasper.Dik@Sun.COM olen = (t_scalar_t)sizeof (struct opthdr) + 3116*11134SCasper.Dik@Sun.COM OPTLEN(ucredminsize(cr)); 3117*11134SCasper.Dik@Sun.COM /* 1 option only */ 3118*11134SCasper.Dik@Sun.COM } 31190Sstevel@tonic-gate } 31200Sstevel@tonic-gate ci_msz = sizeof (struct T_conn_ind) + tep->te_alen; 31210Sstevel@tonic-gate ci_msz = T_ALIGN(ci_msz) + olen; 31220Sstevel@tonic-gate size = max(ci_msz, sizeof (struct T_discon_ind)); 31230Sstevel@tonic-gate 31240Sstevel@tonic-gate /* 31250Sstevel@tonic-gate * Save options from mp - we'll need them for T_CONN_IND. 31260Sstevel@tonic-gate */ 31270Sstevel@tonic-gate if (ooff != 0) { 31280Sstevel@tonic-gate opts = kmem_alloc(olen, KM_NOSLEEP); 31290Sstevel@tonic-gate if (opts == NULL) { 31300Sstevel@tonic-gate /* 31310Sstevel@tonic-gate * roll back state changes 31320Sstevel@tonic-gate */ 31330Sstevel@tonic-gate tep->te_state = TS_IDLE; 31340Sstevel@tonic-gate tl_memrecover(wq, mp, size); 31350Sstevel@tonic-gate freemsg(ackmp); 31360Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 31370Sstevel@tonic-gate tl_serializer_exit(tep); 31380Sstevel@tonic-gate tl_refrele(tep); 31390Sstevel@tonic-gate return; 31400Sstevel@tonic-gate } 31410Sstevel@tonic-gate /* Copy options to a temp buffer */ 31420Sstevel@tonic-gate bcopy(mp->b_rptr + ooff, opts, olen); 31430Sstevel@tonic-gate } 31440Sstevel@tonic-gate 31450Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) { 31460Sstevel@tonic-gate /* 31470Sstevel@tonic-gate * Generate a T_CONN_CON that has the identical address 31480Sstevel@tonic-gate * (and options) as the T_CONN_REQ. 31490Sstevel@tonic-gate * NOTE: assumes that the T_conn_req and T_conn_con structures 31500Sstevel@tonic-gate * are isomorphic. 31510Sstevel@tonic-gate */ 31520Sstevel@tonic-gate confmp = copyb(mp); 31530Sstevel@tonic-gate if (! confmp) { 31540Sstevel@tonic-gate /* 31550Sstevel@tonic-gate * roll back state changes 31560Sstevel@tonic-gate */ 31570Sstevel@tonic-gate tep->te_state = TS_IDLE; 31580Sstevel@tonic-gate tl_memrecover(wq, mp, mp->b_wptr - mp->b_rptr); 31590Sstevel@tonic-gate freemsg(ackmp); 31600Sstevel@tonic-gate if (opts != NULL) 31610Sstevel@tonic-gate kmem_free(opts, olen); 31620Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 31630Sstevel@tonic-gate tl_serializer_exit(tep); 31640Sstevel@tonic-gate tl_refrele(tep); 31650Sstevel@tonic-gate return; 31660Sstevel@tonic-gate } 31670Sstevel@tonic-gate ((struct T_conn_con *)(confmp->b_rptr))->PRIM_type = 31685240Snordmark T_CONN_CON; 31690Sstevel@tonic-gate } else { 31700Sstevel@tonic-gate confmp = NULL; 31710Sstevel@tonic-gate } 31720Sstevel@tonic-gate if ((indmp = reallocb(mp, size, 0)) == NULL) { 31730Sstevel@tonic-gate /* 31740Sstevel@tonic-gate * roll back state changes 31750Sstevel@tonic-gate */ 31760Sstevel@tonic-gate tep->te_state = TS_IDLE; 31770Sstevel@tonic-gate tl_memrecover(wq, mp, size); 31780Sstevel@tonic-gate freemsg(ackmp); 31790Sstevel@tonic-gate if (opts != NULL) 31800Sstevel@tonic-gate kmem_free(opts, olen); 31810Sstevel@tonic-gate freemsg(confmp); 31820Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 31830Sstevel@tonic-gate tl_serializer_exit(tep); 31840Sstevel@tonic-gate tl_refrele(tep); 31850Sstevel@tonic-gate return; 31860Sstevel@tonic-gate } 31870Sstevel@tonic-gate 31880Sstevel@tonic-gate tip = kmem_zalloc(sizeof (*tip), KM_NOSLEEP); 31890Sstevel@tonic-gate if (tip == NULL) { 31900Sstevel@tonic-gate /* 31910Sstevel@tonic-gate * roll back state changes 31920Sstevel@tonic-gate */ 31930Sstevel@tonic-gate tep->te_state = TS_IDLE; 31940Sstevel@tonic-gate tl_memrecover(wq, indmp, sizeof (*tip)); 31950Sstevel@tonic-gate freemsg(ackmp); 31960Sstevel@tonic-gate if (opts != NULL) 31970Sstevel@tonic-gate kmem_free(opts, olen); 31980Sstevel@tonic-gate freemsg(confmp); 31990Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 32000Sstevel@tonic-gate tl_serializer_exit(tep); 32010Sstevel@tonic-gate tl_refrele(tep); 32020Sstevel@tonic-gate return; 32030Sstevel@tonic-gate } 32040Sstevel@tonic-gate tip->ti_mp = NULL; 32050Sstevel@tonic-gate 32060Sstevel@tonic-gate /* 32070Sstevel@tonic-gate * memory is now committed for T_DISCON_IND/T_CONN_IND/T_CONN_CON 32080Sstevel@tonic-gate * and tl_icon_t cell. 32090Sstevel@tonic-gate */ 32100Sstevel@tonic-gate 32110Sstevel@tonic-gate /* 32120Sstevel@tonic-gate * ack validity of request and send the peer credential in the ACK. 32130Sstevel@tonic-gate */ 32140Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state); 32150Sstevel@tonic-gate 32160Sstevel@tonic-gate if (peer_tep != NULL && peer_tep->te_credp != NULL && 32170Sstevel@tonic-gate confmp != NULL) { 32188778SErik.Nordmark@Sun.COM mblk_setcred(confmp, peer_tep->te_credp, peer_tep->te_cpid); 32190Sstevel@tonic-gate } 32200Sstevel@tonic-gate 32210Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_CONN_REQ); 32220Sstevel@tonic-gate 32230Sstevel@tonic-gate /* 32240Sstevel@tonic-gate * prepare message to send T_CONN_IND 32250Sstevel@tonic-gate */ 32260Sstevel@tonic-gate /* 32270Sstevel@tonic-gate * allocate the message - original data blocks retained 32280Sstevel@tonic-gate * in the returned mblk 32290Sstevel@tonic-gate */ 32300Sstevel@tonic-gate cimp = tl_resizemp(indmp, size); 32310Sstevel@tonic-gate if (! cimp) { 32320Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 32335240Snordmark "tl_conn_req:con_ind:allocb failure")); 32340Sstevel@tonic-gate tl_merror(wq, indmp, ENOMEM); 32350Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 32360Sstevel@tonic-gate tl_serializer_exit(tep); 32370Sstevel@tonic-gate tl_refrele(tep); 32380Sstevel@tonic-gate if (opts != NULL) 32390Sstevel@tonic-gate kmem_free(opts, olen); 32400Sstevel@tonic-gate freemsg(confmp); 32410Sstevel@tonic-gate ASSERT(tip->ti_mp == NULL); 32420Sstevel@tonic-gate kmem_free(tip, sizeof (*tip)); 32430Sstevel@tonic-gate return; 32440Sstevel@tonic-gate } 32450Sstevel@tonic-gate 32460Sstevel@tonic-gate DB_TYPE(cimp) = M_PROTO; 32470Sstevel@tonic-gate ci = (struct T_conn_ind *)cimp->b_rptr; 32480Sstevel@tonic-gate ci->PRIM_type = T_CONN_IND; 32490Sstevel@tonic-gate ci->SRC_offset = (t_scalar_t)sizeof (struct T_conn_ind); 32500Sstevel@tonic-gate ci->SRC_length = tep->te_alen; 32510Sstevel@tonic-gate ci->SEQ_number = tep->te_seqno; 32520Sstevel@tonic-gate 32530Sstevel@tonic-gate addr_startp = cimp->b_rptr + ci->SRC_offset; 32540Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen); 32550Sstevel@tonic-gate if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED)) { 32568778SErik.Nordmark@Sun.COM 32570Sstevel@tonic-gate ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset + 32585240Snordmark ci->SRC_length); 32590Sstevel@tonic-gate ci->OPT_length = olen; /* because only 1 option */ 32600Sstevel@tonic-gate tl_fill_option(cimp->b_rptr + ci->OPT_offset, 32618778SErik.Nordmark@Sun.COM cr, cpid, 32625240Snordmark peer_tep->te_flag, peer_tep->te_credp); 32630Sstevel@tonic-gate } else if (ooff != 0) { 32640Sstevel@tonic-gate /* Copy option from T_CONN_REQ */ 32650Sstevel@tonic-gate ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset + 32665240Snordmark ci->SRC_length); 32670Sstevel@tonic-gate ci->OPT_length = olen; 32680Sstevel@tonic-gate ASSERT(opts != NULL); 32690Sstevel@tonic-gate bcopy(opts, (void *)((uintptr_t)ci + ci->OPT_offset), olen); 32700Sstevel@tonic-gate } else { 32710Sstevel@tonic-gate ci->OPT_offset = 0; 32720Sstevel@tonic-gate ci->OPT_length = 0; 32730Sstevel@tonic-gate } 32740Sstevel@tonic-gate if (opts != NULL) 32750Sstevel@tonic-gate kmem_free(opts, olen); 32760Sstevel@tonic-gate 32770Sstevel@tonic-gate /* 32780Sstevel@tonic-gate * register connection request with server peer 32790Sstevel@tonic-gate * append to list of incoming connections 32800Sstevel@tonic-gate * increment references for both peer_tep and tep: peer_tep is placed on 32810Sstevel@tonic-gate * te_oconp and tep is placed on listeners queue. 32820Sstevel@tonic-gate */ 32830Sstevel@tonic-gate tip->ti_tep = tep; 32840Sstevel@tonic-gate tip->ti_seqno = tep->te_seqno; 32850Sstevel@tonic-gate list_insert_tail(&peer_tep->te_iconp, tip); 32860Sstevel@tonic-gate peer_tep->te_nicon++; 32870Sstevel@tonic-gate 32880Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_CONN_IND, peer_tep->te_state); 32890Sstevel@tonic-gate /* 32900Sstevel@tonic-gate * send the T_CONN_IND message 32910Sstevel@tonic-gate */ 32920Sstevel@tonic-gate putnext(peer_tep->te_rq, cimp); 32930Sstevel@tonic-gate 32940Sstevel@tonic-gate /* 32950Sstevel@tonic-gate * Send a T_CONN_CON message for sockets. 32960Sstevel@tonic-gate * Disable the queues until we have reached the correct state! 32970Sstevel@tonic-gate */ 32980Sstevel@tonic-gate if (confmp != NULL) { 32990Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_CON, tep->te_state); 33000Sstevel@tonic-gate noenable(wq); 33010Sstevel@tonic-gate putnext(tep->te_rq, confmp); 33020Sstevel@tonic-gate } 33030Sstevel@tonic-gate /* 33040Sstevel@tonic-gate * Now we need to increment tep reference because tep is referenced by 33050Sstevel@tonic-gate * server list of pending connections. We also need to decrement 33060Sstevel@tonic-gate * reference before exiting serializer. Two operations void each other 33070Sstevel@tonic-gate * so we don't modify reference at all. 33080Sstevel@tonic-gate */ 33090Sstevel@tonic-gate ASSERT(tep->te_refcnt >= 2); 33100Sstevel@tonic-gate ASSERT(peer_tep->te_refcnt >= 2); 33110Sstevel@tonic-gate tl_serializer_exit(tep); 33120Sstevel@tonic-gate } 33130Sstevel@tonic-gate 33140Sstevel@tonic-gate 33150Sstevel@tonic-gate 33160Sstevel@tonic-gate /* 33170Sstevel@tonic-gate * Handle T_conn_res on listener stream. Called on listener serializer. 33180Sstevel@tonic-gate * tl_conn_req has already generated the T_CONN_CON. 33190Sstevel@tonic-gate * tl_conn_res is called on listener serializer. 33200Sstevel@tonic-gate * No one accesses acceptor at this point, so it is safe to modify acceptor. 33210Sstevel@tonic-gate * Switch eager serializer to acceptor's. 33220Sstevel@tonic-gate * 33230Sstevel@tonic-gate * If TL_SET[U]CRED generate the credentials options. 33240Sstevel@tonic-gate * For sockets tl_conn_req has already generated the T_CONN_CON. 33250Sstevel@tonic-gate */ 33260Sstevel@tonic-gate static void 33270Sstevel@tonic-gate tl_conn_res(mblk_t *mp, tl_endpt_t *tep) 33280Sstevel@tonic-gate { 33290Sstevel@tonic-gate queue_t *wq; 33300Sstevel@tonic-gate struct T_conn_res *cres = (struct T_conn_res *)mp->b_rptr; 33310Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 33320Sstevel@tonic-gate t_scalar_t olen, ooff, err = 0; 33330Sstevel@tonic-gate t_scalar_t prim = cres->PRIM_type; 33340Sstevel@tonic-gate uchar_t *addr_startp; 33350Sstevel@tonic-gate tl_endpt_t *acc_ep = NULL, *cl_ep = NULL; 33360Sstevel@tonic-gate tl_icon_t *tip; 33370Sstevel@tonic-gate size_t size; 33380Sstevel@tonic-gate mblk_t *ackmp, *respmp; 33390Sstevel@tonic-gate mblk_t *dimp, *ccmp = NULL; 33400Sstevel@tonic-gate struct T_discon_ind *di; 33410Sstevel@tonic-gate struct T_conn_con *cc; 33420Sstevel@tonic-gate boolean_t client_noclose_set = B_FALSE; 33430Sstevel@tonic-gate boolean_t switch_client_serializer = B_TRUE; 33440Sstevel@tonic-gate 33450Sstevel@tonic-gate ASSERT(IS_COTS(tep)); 33460Sstevel@tonic-gate 33470Sstevel@tonic-gate if (tep->te_closing) { 33480Sstevel@tonic-gate freemsg(mp); 33490Sstevel@tonic-gate return; 33500Sstevel@tonic-gate } 33510Sstevel@tonic-gate 33520Sstevel@tonic-gate wq = tep->te_wq; 33530Sstevel@tonic-gate 33540Sstevel@tonic-gate /* 33550Sstevel@tonic-gate * preallocate memory for: 33560Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK 33570Sstevel@tonic-gate * ==> known max T_ERROR_ACK 33580Sstevel@tonic-gate * 2. max of T_DISCON_IND and T_CONN_CON 33590Sstevel@tonic-gate */ 33600Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED); 33610Sstevel@tonic-gate if (! ackmp) { 33620Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 33630Sstevel@tonic-gate return; 33640Sstevel@tonic-gate } 33650Sstevel@tonic-gate /* 33660Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now 33670Sstevel@tonic-gate * will be committed for T_DISCON_IND/T_CONN_CON later 33680Sstevel@tonic-gate */ 33690Sstevel@tonic-gate 33700Sstevel@tonic-gate 33710Sstevel@tonic-gate ASSERT(prim == T_CONN_RES || prim == O_T_CONN_RES); 33720Sstevel@tonic-gate 33730Sstevel@tonic-gate /* 33740Sstevel@tonic-gate * validate state 33750Sstevel@tonic-gate */ 33760Sstevel@tonic-gate if (tep->te_state != TS_WRES_CIND) { 33770Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 33785240Snordmark SL_TRACE|SL_ERROR, 33795240Snordmark "tl_wput:T_CONN_RES:out of state, state=%d", 33805240Snordmark tep->te_state)); 33810Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim); 33820Sstevel@tonic-gate freemsg(mp); 33830Sstevel@tonic-gate return; 33840Sstevel@tonic-gate } 33850Sstevel@tonic-gate 33860Sstevel@tonic-gate /* 33870Sstevel@tonic-gate * validate the message 33880Sstevel@tonic-gate * Note: dereference fields in struct inside message only 33890Sstevel@tonic-gate * after validating the message length. 33900Sstevel@tonic-gate */ 33910Sstevel@tonic-gate if (msz < sizeof (struct T_conn_res)) { 33920Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 33935240Snordmark "tl_conn_res:invalid message length")); 33940Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim); 33950Sstevel@tonic-gate freemsg(mp); 33960Sstevel@tonic-gate return; 33970Sstevel@tonic-gate } 33980Sstevel@tonic-gate olen = cres->OPT_length; 33990Sstevel@tonic-gate ooff = cres->OPT_offset; 34000Sstevel@tonic-gate if (((olen > 0) && ((ooff + olen) > msz))) { 34010Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 34025240Snordmark "tl_conn_res:invalid message")); 34030Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim); 34040Sstevel@tonic-gate freemsg(mp); 34050Sstevel@tonic-gate return; 34060Sstevel@tonic-gate } 34070Sstevel@tonic-gate if (olen) { 34080Sstevel@tonic-gate /* 34090Sstevel@tonic-gate * no opts in connect res 34100Sstevel@tonic-gate * supported in this provider 34110Sstevel@tonic-gate */ 34120Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 34135240Snordmark "tl_conn_res:options not supported in message")); 34140Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADOPT, 0, prim); 34150Sstevel@tonic-gate freemsg(mp); 34160Sstevel@tonic-gate return; 34170Sstevel@tonic-gate } 34180Sstevel@tonic-gate 34190Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_RES, tep->te_state); 34200Sstevel@tonic-gate ASSERT(tep->te_state == TS_WACK_CRES); 34210Sstevel@tonic-gate 34220Sstevel@tonic-gate if (cres->SEQ_number < TL_MINOR_START && 34235240Snordmark cres->SEQ_number >= BADSEQNUM) { 34240Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34255240Snordmark "tl_conn_res:remote endpoint sequence number bad")); 34260Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34270Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, prim); 34280Sstevel@tonic-gate freemsg(mp); 34290Sstevel@tonic-gate return; 34300Sstevel@tonic-gate } 34310Sstevel@tonic-gate 34320Sstevel@tonic-gate /* 34330Sstevel@tonic-gate * find accepting endpoint. Will have extra reference if found. 34340Sstevel@tonic-gate */ 34350Sstevel@tonic-gate if (mod_hash_find_cb(tep->te_transport->tr_ai_hash, 34365240Snordmark (mod_hash_key_t)(uintptr_t)cres->ACCEPTOR_id, 34375240Snordmark (mod_hash_val_t *)&acc_ep, tl_find_callback) != 0) { 34380Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34395240Snordmark "tl_conn_res:bad accepting endpoint")); 34400Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34410Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim); 34420Sstevel@tonic-gate freemsg(mp); 34430Sstevel@tonic-gate return; 34440Sstevel@tonic-gate } 34450Sstevel@tonic-gate 34460Sstevel@tonic-gate /* 34470Sstevel@tonic-gate * Prevent acceptor from closing. 34480Sstevel@tonic-gate */ 34490Sstevel@tonic-gate if (! tl_noclose(acc_ep)) { 34500Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34515240Snordmark "tl_conn_res:bad accepting endpoint")); 34520Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34530Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim); 34540Sstevel@tonic-gate tl_refrele(acc_ep); 34550Sstevel@tonic-gate freemsg(mp); 34560Sstevel@tonic-gate return; 34570Sstevel@tonic-gate } 34580Sstevel@tonic-gate 34590Sstevel@tonic-gate acc_ep->te_flag |= TL_ACCEPTOR; 34600Sstevel@tonic-gate 34610Sstevel@tonic-gate /* 34620Sstevel@tonic-gate * validate that accepting endpoint, if different from listening 34630Sstevel@tonic-gate * has address bound => state is TS_IDLE 34640Sstevel@tonic-gate * TROUBLE in XPG4 !!? 34650Sstevel@tonic-gate */ 34660Sstevel@tonic-gate if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) { 34670Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 34685240Snordmark "tl_conn_res:accepting endpoint has no address bound," 34695240Snordmark "state=%d", acc_ep->te_state)); 34700Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34710Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim); 34720Sstevel@tonic-gate freemsg(mp); 34730Sstevel@tonic-gate tl_closeok(acc_ep); 34740Sstevel@tonic-gate tl_refrele(acc_ep); 34750Sstevel@tonic-gate return; 34760Sstevel@tonic-gate } 34770Sstevel@tonic-gate 34780Sstevel@tonic-gate /* 34790Sstevel@tonic-gate * validate if accepting endpt same as listening, then 34800Sstevel@tonic-gate * no other incoming connection should be on the queue 34810Sstevel@tonic-gate */ 34820Sstevel@tonic-gate 34830Sstevel@tonic-gate if ((tep == acc_ep) && (tep->te_nicon > 1)) { 34840Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 34855240Snordmark "tl_conn_res: > 1 conn_ind on listener-acceptor")); 34860Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 34870Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim); 34880Sstevel@tonic-gate freemsg(mp); 34890Sstevel@tonic-gate tl_closeok(acc_ep); 34900Sstevel@tonic-gate tl_refrele(acc_ep); 34910Sstevel@tonic-gate return; 34920Sstevel@tonic-gate } 34930Sstevel@tonic-gate 34940Sstevel@tonic-gate /* 34950Sstevel@tonic-gate * Mark for deletion, the entry corresponding to client 34960Sstevel@tonic-gate * on list of pending connections made by the listener 34970Sstevel@tonic-gate * search list to see if client is one of the 34980Sstevel@tonic-gate * recorded as a listener. 34990Sstevel@tonic-gate */ 35000Sstevel@tonic-gate tip = tl_icon_find(tep, cres->SEQ_number); 35010Sstevel@tonic-gate if (tip == NULL) { 35020Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR, 35035240Snordmark "tl_conn_res:no client in listener list")); 35040Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state); 35050Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, prim); 35060Sstevel@tonic-gate freemsg(mp); 35070Sstevel@tonic-gate tl_closeok(acc_ep); 35080Sstevel@tonic-gate tl_refrele(acc_ep); 35090Sstevel@tonic-gate return; 35100Sstevel@tonic-gate } 35110Sstevel@tonic-gate 35120Sstevel@tonic-gate /* 35130Sstevel@tonic-gate * If ti_tep is NULL the client has already closed. In this case 35140Sstevel@tonic-gate * the code below will avoid any action on the client side 35150Sstevel@tonic-gate * but complete the server and acceptor state transitions. 35160Sstevel@tonic-gate */ 35170Sstevel@tonic-gate ASSERT(tip->ti_tep == NULL || 35185240Snordmark tip->ti_tep->te_seqno == cres->SEQ_number); 35190Sstevel@tonic-gate cl_ep = tip->ti_tep; 35200Sstevel@tonic-gate 35210Sstevel@tonic-gate /* 35220Sstevel@tonic-gate * If the client is present it is switched from listener's to acceptor's 35230Sstevel@tonic-gate * serializer. We should block client closes while serializers are 35240Sstevel@tonic-gate * being switched. 35250Sstevel@tonic-gate * 35260Sstevel@tonic-gate * It is possible that the client is present but is currently being 35270Sstevel@tonic-gate * closed. There are two possible cases: 35280Sstevel@tonic-gate * 35290Sstevel@tonic-gate * 1) The client has already entered tl_close_finish_ser() and sent 35300Sstevel@tonic-gate * T_ORDREL_IND. In this case we can just ignore the client (but we 35310Sstevel@tonic-gate * still need to send all messages from tip->ti_mp to the acceptor). 35320Sstevel@tonic-gate * 35330Sstevel@tonic-gate * 2) The client started the close but has not entered 35340Sstevel@tonic-gate * tl_close_finish_ser() yet. In this case, the client is already 35350Sstevel@tonic-gate * proceeding asynchronously on the listener's serializer, so we're 35360Sstevel@tonic-gate * forced to change the acceptor to use the listener's serializer to 35370Sstevel@tonic-gate * ensure that any operations on the acceptor are serialized with 35380Sstevel@tonic-gate * respect to the close that's in-progress. 35390Sstevel@tonic-gate */ 35400Sstevel@tonic-gate if (cl_ep != NULL) { 35410Sstevel@tonic-gate if (tl_noclose(cl_ep)) { 35420Sstevel@tonic-gate client_noclose_set = B_TRUE; 35430Sstevel@tonic-gate } else { 35440Sstevel@tonic-gate /* 35450Sstevel@tonic-gate * Client is closing. If it it has sent the 35460Sstevel@tonic-gate * T_ORDREL_IND, we can simply ignore it - otherwise, 35470Sstevel@tonic-gate * we have to let let the client continue until it is 35480Sstevel@tonic-gate * sent. 35490Sstevel@tonic-gate * 35500Sstevel@tonic-gate * If we do continue using the client, acceptor will 35510Sstevel@tonic-gate * switch to client's serializer which is used by client 35520Sstevel@tonic-gate * for its close. 35530Sstevel@tonic-gate */ 35540Sstevel@tonic-gate tl_client_closing_when_accepting++; 35550Sstevel@tonic-gate switch_client_serializer = B_FALSE; 35560Sstevel@tonic-gate if (!IS_SOCKET(cl_ep) || tl_disable_early_connect || 35570Sstevel@tonic-gate cl_ep->te_state == -1) 35580Sstevel@tonic-gate cl_ep = NULL; 35590Sstevel@tonic-gate } 35600Sstevel@tonic-gate } 35610Sstevel@tonic-gate 35620Sstevel@tonic-gate if (cl_ep != NULL) { 35630Sstevel@tonic-gate /* 35640Sstevel@tonic-gate * validate client state to be TS_WCON_CREQ or TS_DATA_XFER 35650Sstevel@tonic-gate * (latter for sockets only) 35660Sstevel@tonic-gate */ 35670Sstevel@tonic-gate if (cl_ep->te_state != TS_WCON_CREQ && 35680Sstevel@tonic-gate (cl_ep->te_state != TS_DATA_XFER && 35690Sstevel@tonic-gate IS_SOCKET(cl_ep))) { 35700Sstevel@tonic-gate err = ECONNREFUSED; 35710Sstevel@tonic-gate /* 35720Sstevel@tonic-gate * T_DISCON_IND sent later after committing memory 35730Sstevel@tonic-gate * and acking validity of request 35740Sstevel@tonic-gate */ 35750Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE, 35765240Snordmark "tl_conn_res:peer in bad state")); 35770Sstevel@tonic-gate } 35780Sstevel@tonic-gate 35790Sstevel@tonic-gate /* 35800Sstevel@tonic-gate * preallocate now for T_DISCON_IND or T_CONN_CONN 35810Sstevel@tonic-gate * ack validity of request (T_OK_ACK) after memory committed 35820Sstevel@tonic-gate */ 35830Sstevel@tonic-gate 35840Sstevel@tonic-gate if (err) 35850Sstevel@tonic-gate size = sizeof (struct T_discon_ind); 35860Sstevel@tonic-gate else { 35870Sstevel@tonic-gate /* 35880Sstevel@tonic-gate * calculate length of T_CONN_CON message 35890Sstevel@tonic-gate */ 35900Sstevel@tonic-gate olen = 0; 35910Sstevel@tonic-gate if (cl_ep->te_flag & TL_SETCRED) { 35920Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) + 35935240Snordmark OPTLEN(sizeof (tl_credopt_t)); 35940Sstevel@tonic-gate } else if (cl_ep->te_flag & TL_SETUCRED) { 35950Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) + 3596*11134SCasper.Dik@Sun.COM OPTLEN(ucredminsize(acc_ep->te_credp)); 35970Sstevel@tonic-gate } 35980Sstevel@tonic-gate size = T_ALIGN(sizeof (struct T_conn_con) + 35995240Snordmark acc_ep->te_alen) + olen; 36000Sstevel@tonic-gate } 36010Sstevel@tonic-gate if ((respmp = reallocb(mp, size, 0)) == NULL) { 36020Sstevel@tonic-gate /* 36030Sstevel@tonic-gate * roll back state changes 36040Sstevel@tonic-gate */ 36050Sstevel@tonic-gate tep->te_state = TS_WRES_CIND; 36060Sstevel@tonic-gate tl_memrecover(wq, mp, size); 36070Sstevel@tonic-gate freemsg(ackmp); 36080Sstevel@tonic-gate if (client_noclose_set) 36090Sstevel@tonic-gate tl_closeok(cl_ep); 36100Sstevel@tonic-gate tl_closeok(acc_ep); 36110Sstevel@tonic-gate tl_refrele(acc_ep); 36120Sstevel@tonic-gate return; 36130Sstevel@tonic-gate } 36140Sstevel@tonic-gate mp = NULL; 36150Sstevel@tonic-gate } 36160Sstevel@tonic-gate 36170Sstevel@tonic-gate /* 36180Sstevel@tonic-gate * Now ack validity of request 36190Sstevel@tonic-gate */ 36200Sstevel@tonic-gate if (tep->te_nicon == 1) { 36210Sstevel@tonic-gate if (tep == acc_ep) 36220Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK2, tep->te_state); 36230Sstevel@tonic-gate else 36240Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK3, tep->te_state); 36250Sstevel@tonic-gate } else 36260Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK4, tep->te_state); 36270Sstevel@tonic-gate 36280Sstevel@tonic-gate /* 36290Sstevel@tonic-gate * send T_DISCON_IND now if client state validation failed earlier 36300Sstevel@tonic-gate */ 36310Sstevel@tonic-gate if (err) { 36320Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim); 36330Sstevel@tonic-gate /* 36340Sstevel@tonic-gate * flush the queues - why always ? 36350Sstevel@tonic-gate */ 36360Sstevel@tonic-gate (void) putnextctl1(acc_ep->te_rq, M_FLUSH, FLUSHR); 36370Sstevel@tonic-gate 36380Sstevel@tonic-gate dimp = tl_resizemp(respmp, size); 36390Sstevel@tonic-gate if (! dimp) { 36400Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 36415240Snordmark SL_TRACE|SL_ERROR, 36425240Snordmark "tl_conn_res:con_ind:allocb failure")); 36430Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 36440Sstevel@tonic-gate tl_closeok(acc_ep); 36450Sstevel@tonic-gate if (client_noclose_set) 36460Sstevel@tonic-gate tl_closeok(cl_ep); 36470Sstevel@tonic-gate tl_refrele(acc_ep); 36480Sstevel@tonic-gate return; 36490Sstevel@tonic-gate } 36500Sstevel@tonic-gate if (dimp->b_cont) { 36510Sstevel@tonic-gate /* no user data in provider generated discon ind */ 36520Sstevel@tonic-gate freemsg(dimp->b_cont); 36530Sstevel@tonic-gate dimp->b_cont = NULL; 36540Sstevel@tonic-gate } 36550Sstevel@tonic-gate 36560Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO; 36570Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 36580Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND; 36590Sstevel@tonic-gate di->DISCON_reason = err; 36600Sstevel@tonic-gate di->SEQ_number = BADSEQNUM; 36610Sstevel@tonic-gate 36620Sstevel@tonic-gate tep->te_state = TS_IDLE; 36630Sstevel@tonic-gate /* 36640Sstevel@tonic-gate * send T_DISCON_IND message 36650Sstevel@tonic-gate */ 36660Sstevel@tonic-gate putnext(acc_ep->te_rq, dimp); 36670Sstevel@tonic-gate if (client_noclose_set) 36680Sstevel@tonic-gate tl_closeok(cl_ep); 36690Sstevel@tonic-gate tl_closeok(acc_ep); 36700Sstevel@tonic-gate tl_refrele(acc_ep); 36710Sstevel@tonic-gate return; 36720Sstevel@tonic-gate } 36730Sstevel@tonic-gate 36740Sstevel@tonic-gate /* 36750Sstevel@tonic-gate * now start connecting the accepting endpoint 36760Sstevel@tonic-gate */ 36770Sstevel@tonic-gate if (tep != acc_ep) 36780Sstevel@tonic-gate acc_ep->te_state = NEXTSTATE(TE_PASS_CONN, acc_ep->te_state); 36790Sstevel@tonic-gate 36800Sstevel@tonic-gate if (cl_ep == NULL) { 36810Sstevel@tonic-gate /* 36820Sstevel@tonic-gate * The client has already closed. Send up any queued messages 36830Sstevel@tonic-gate * and change the state accordingly. 36840Sstevel@tonic-gate */ 36850Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim); 36860Sstevel@tonic-gate tl_icon_sendmsgs(acc_ep, &tip->ti_mp); 36870Sstevel@tonic-gate 36880Sstevel@tonic-gate /* 36890Sstevel@tonic-gate * remove endpoint from incoming connection 36900Sstevel@tonic-gate * delete client from list of incoming connections 36910Sstevel@tonic-gate */ 36920Sstevel@tonic-gate tl_freetip(tep, tip); 36930Sstevel@tonic-gate freemsg(mp); 36940Sstevel@tonic-gate tl_closeok(acc_ep); 36950Sstevel@tonic-gate tl_refrele(acc_ep); 36960Sstevel@tonic-gate return; 36970Sstevel@tonic-gate } else if (tip->ti_mp != NULL) { 36980Sstevel@tonic-gate /* 36990Sstevel@tonic-gate * The client could have queued a T_DISCON_IND which needs 37000Sstevel@tonic-gate * to be sent up. 37010Sstevel@tonic-gate * Note that t_discon_req can not operate the same as 37020Sstevel@tonic-gate * t_data_req since it is not possible for it to putbq 37030Sstevel@tonic-gate * the message and return -1 due to the use of qwriter. 37040Sstevel@tonic-gate */ 37050Sstevel@tonic-gate tl_icon_sendmsgs(acc_ep, &tip->ti_mp); 37060Sstevel@tonic-gate } 37070Sstevel@tonic-gate 37080Sstevel@tonic-gate /* 37090Sstevel@tonic-gate * prepare connect confirm T_CONN_CON message 37100Sstevel@tonic-gate */ 37110Sstevel@tonic-gate 37120Sstevel@tonic-gate /* 37130Sstevel@tonic-gate * allocate the message - original data blocks 37140Sstevel@tonic-gate * retained in the returned mblk 37150Sstevel@tonic-gate */ 37160Sstevel@tonic-gate if (! IS_SOCKET(cl_ep) || tl_disable_early_connect) { 37170Sstevel@tonic-gate ccmp = tl_resizemp(respmp, size); 37180Sstevel@tonic-gate if (ccmp == NULL) { 37190Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim); 37200Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 37215240Snordmark SL_TRACE|SL_ERROR, 37225240Snordmark "tl_conn_res:conn_con:allocb failure")); 37230Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 37240Sstevel@tonic-gate tl_closeok(acc_ep); 37250Sstevel@tonic-gate if (client_noclose_set) 37260Sstevel@tonic-gate tl_closeok(cl_ep); 37270Sstevel@tonic-gate tl_refrele(acc_ep); 37280Sstevel@tonic-gate return; 37290Sstevel@tonic-gate } 37300Sstevel@tonic-gate 37310Sstevel@tonic-gate DB_TYPE(ccmp) = M_PROTO; 37320Sstevel@tonic-gate cc = (struct T_conn_con *)ccmp->b_rptr; 37330Sstevel@tonic-gate cc->PRIM_type = T_CONN_CON; 37340Sstevel@tonic-gate cc->RES_offset = (t_scalar_t)sizeof (struct T_conn_con); 37350Sstevel@tonic-gate cc->RES_length = acc_ep->te_alen; 37360Sstevel@tonic-gate addr_startp = ccmp->b_rptr + cc->RES_offset; 37370Sstevel@tonic-gate bcopy(acc_ep->te_abuf, addr_startp, acc_ep->te_alen); 37380Sstevel@tonic-gate if (cl_ep->te_flag & (TL_SETCRED|TL_SETUCRED)) { 37390Sstevel@tonic-gate cc->OPT_offset = (t_scalar_t)T_ALIGN(cc->RES_offset + 37400Sstevel@tonic-gate cc->RES_length); 37410Sstevel@tonic-gate cc->OPT_length = olen; 37420Sstevel@tonic-gate tl_fill_option(ccmp->b_rptr + cc->OPT_offset, 37431676Sjpk acc_ep->te_credp, acc_ep->te_cpid, cl_ep->te_flag, 37441676Sjpk cl_ep->te_credp); 37450Sstevel@tonic-gate } else { 37460Sstevel@tonic-gate cc->OPT_offset = 0; 37470Sstevel@tonic-gate cc->OPT_length = 0; 37480Sstevel@tonic-gate } 37490Sstevel@tonic-gate /* 37500Sstevel@tonic-gate * Forward the credential in the packet so it can be picked up 37510Sstevel@tonic-gate * at the higher layers for more complete credential processing 37520Sstevel@tonic-gate */ 37538778SErik.Nordmark@Sun.COM mblk_setcred(ccmp, acc_ep->te_credp, acc_ep->te_cpid); 37540Sstevel@tonic-gate } else { 37550Sstevel@tonic-gate freemsg(respmp); 37560Sstevel@tonic-gate respmp = NULL; 37570Sstevel@tonic-gate } 37580Sstevel@tonic-gate 37590Sstevel@tonic-gate /* 37600Sstevel@tonic-gate * make connection linking 37610Sstevel@tonic-gate * accepting and client endpoints 37620Sstevel@tonic-gate * No need to increment references: 37630Sstevel@tonic-gate * on client: it should already have one from tip->ti_tep linkage. 37640Sstevel@tonic-gate * on acceptor is should already have one from the table lookup. 37650Sstevel@tonic-gate * 37660Sstevel@tonic-gate * At this point both client and acceptor can't close. Set client 37670Sstevel@tonic-gate * serializer to acceptor's. 37680Sstevel@tonic-gate */ 37690Sstevel@tonic-gate ASSERT(cl_ep->te_refcnt >= 2); 37700Sstevel@tonic-gate ASSERT(acc_ep->te_refcnt >= 2); 37710Sstevel@tonic-gate ASSERT(cl_ep->te_conp == NULL); 37720Sstevel@tonic-gate ASSERT(acc_ep->te_conp == NULL); 37730Sstevel@tonic-gate cl_ep->te_conp = acc_ep; 37740Sstevel@tonic-gate acc_ep->te_conp = cl_ep; 37750Sstevel@tonic-gate ASSERT(cl_ep->te_ser == tep->te_ser); 37760Sstevel@tonic-gate if (switch_client_serializer) { 37770Sstevel@tonic-gate mutex_enter(&cl_ep->te_ser_lock); 37780Sstevel@tonic-gate if (cl_ep->te_ser_count > 0) { 37790Sstevel@tonic-gate switch_client_serializer = B_FALSE; 37800Sstevel@tonic-gate tl_serializer_noswitch++; 37810Sstevel@tonic-gate } else { 37820Sstevel@tonic-gate /* 37830Sstevel@tonic-gate * Move client to the acceptor's serializer. 37840Sstevel@tonic-gate */ 37850Sstevel@tonic-gate tl_serializer_refhold(acc_ep->te_ser); 37860Sstevel@tonic-gate tl_serializer_refrele(cl_ep->te_ser); 37870Sstevel@tonic-gate cl_ep->te_ser = acc_ep->te_ser; 37880Sstevel@tonic-gate } 37890Sstevel@tonic-gate mutex_exit(&cl_ep->te_ser_lock); 37900Sstevel@tonic-gate } 37910Sstevel@tonic-gate if (!switch_client_serializer) { 37920Sstevel@tonic-gate /* 37930Sstevel@tonic-gate * It is not possible to switch client to use acceptor's. 37940Sstevel@tonic-gate * Move acceptor to client's serializer (which is the same as 37950Sstevel@tonic-gate * listener's). 37960Sstevel@tonic-gate */ 37970Sstevel@tonic-gate tl_serializer_refhold(cl_ep->te_ser); 37980Sstevel@tonic-gate tl_serializer_refrele(acc_ep->te_ser); 37990Sstevel@tonic-gate acc_ep->te_ser = cl_ep->te_ser; 38000Sstevel@tonic-gate } 38010Sstevel@tonic-gate 38020Sstevel@tonic-gate TL_REMOVE_PEER(cl_ep->te_oconp); 38030Sstevel@tonic-gate TL_REMOVE_PEER(acc_ep->te_oconp); 38040Sstevel@tonic-gate 38050Sstevel@tonic-gate /* 38060Sstevel@tonic-gate * remove endpoint from incoming connection 38070Sstevel@tonic-gate * delete client from list of incoming connections 38080Sstevel@tonic-gate */ 38090Sstevel@tonic-gate tip->ti_tep = NULL; 38100Sstevel@tonic-gate tl_freetip(tep, tip); 38110Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim); 38120Sstevel@tonic-gate 38130Sstevel@tonic-gate /* 38140Sstevel@tonic-gate * data blocks already linked in reallocb() 38150Sstevel@tonic-gate */ 38160Sstevel@tonic-gate 38170Sstevel@tonic-gate /* 38180Sstevel@tonic-gate * link queues so that I_SENDFD will work 38190Sstevel@tonic-gate */ 38200Sstevel@tonic-gate if (! IS_SOCKET(tep)) { 38210Sstevel@tonic-gate acc_ep->te_wq->q_next = cl_ep->te_rq; 38220Sstevel@tonic-gate cl_ep->te_wq->q_next = acc_ep->te_rq; 38230Sstevel@tonic-gate } 38240Sstevel@tonic-gate 38250Sstevel@tonic-gate /* 38260Sstevel@tonic-gate * send T_CONN_CON up on client side unless it was already 38270Sstevel@tonic-gate * done (for a socket). In cases any data or ordrel req has been 38280Sstevel@tonic-gate * queued make sure that the service procedure runs. 38290Sstevel@tonic-gate */ 38300Sstevel@tonic-gate if (IS_SOCKET(cl_ep) && !tl_disable_early_connect) { 38310Sstevel@tonic-gate enableok(cl_ep->te_wq); 38320Sstevel@tonic-gate TL_QENABLE(cl_ep); 38330Sstevel@tonic-gate if (ccmp != NULL) 38340Sstevel@tonic-gate freemsg(ccmp); 38350Sstevel@tonic-gate } else { 38360Sstevel@tonic-gate /* 38370Sstevel@tonic-gate * change client state on TE_CONN_CON event 38380Sstevel@tonic-gate */ 38390Sstevel@tonic-gate cl_ep->te_state = NEXTSTATE(TE_CONN_CON, cl_ep->te_state); 38400Sstevel@tonic-gate putnext(cl_ep->te_rq, ccmp); 38410Sstevel@tonic-gate } 38420Sstevel@tonic-gate 38430Sstevel@tonic-gate /* Mark the both endpoints as accepted */ 38440Sstevel@tonic-gate cl_ep->te_flag |= TL_ACCEPTED; 38450Sstevel@tonic-gate acc_ep->te_flag |= TL_ACCEPTED; 38460Sstevel@tonic-gate 38470Sstevel@tonic-gate /* 38480Sstevel@tonic-gate * Allow client and acceptor to close. 38490Sstevel@tonic-gate */ 38500Sstevel@tonic-gate tl_closeok(acc_ep); 38510Sstevel@tonic-gate if (client_noclose_set) 38520Sstevel@tonic-gate tl_closeok(cl_ep); 38530Sstevel@tonic-gate } 38540Sstevel@tonic-gate 38550Sstevel@tonic-gate 38560Sstevel@tonic-gate 38570Sstevel@tonic-gate 38580Sstevel@tonic-gate static void 38590Sstevel@tonic-gate tl_discon_req(mblk_t *mp, tl_endpt_t *tep) 38600Sstevel@tonic-gate { 38610Sstevel@tonic-gate queue_t *wq; 38620Sstevel@tonic-gate struct T_discon_req *dr; 38630Sstevel@tonic-gate ssize_t msz; 38640Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp; 38650Sstevel@tonic-gate tl_endpt_t *srv_tep = tep->te_oconp; 38660Sstevel@tonic-gate tl_icon_t *tip; 38670Sstevel@tonic-gate size_t size; 38680Sstevel@tonic-gate mblk_t *ackmp, *dimp, *respmp; 38690Sstevel@tonic-gate struct T_discon_ind *di; 38700Sstevel@tonic-gate t_scalar_t save_state, new_state; 38710Sstevel@tonic-gate 38720Sstevel@tonic-gate if (tep->te_closing) { 38730Sstevel@tonic-gate freemsg(mp); 38740Sstevel@tonic-gate return; 38750Sstevel@tonic-gate } 38760Sstevel@tonic-gate 38770Sstevel@tonic-gate if ((peer_tep != NULL) && peer_tep->te_closing) { 38780Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp); 38790Sstevel@tonic-gate peer_tep = NULL; 38800Sstevel@tonic-gate } 38810Sstevel@tonic-gate if ((srv_tep != NULL) && srv_tep->te_closing) { 38820Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 38830Sstevel@tonic-gate srv_tep = NULL; 38840Sstevel@tonic-gate } 38850Sstevel@tonic-gate 38860Sstevel@tonic-gate wq = tep->te_wq; 38870Sstevel@tonic-gate 38880Sstevel@tonic-gate /* 38890Sstevel@tonic-gate * preallocate memory for: 38900Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK 38910Sstevel@tonic-gate * ==> known max T_ERROR_ACK 38920Sstevel@tonic-gate * 2. for T_DISCON_IND 38930Sstevel@tonic-gate */ 38940Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED); 38950Sstevel@tonic-gate if (! ackmp) { 38960Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack)); 38970Sstevel@tonic-gate return; 38980Sstevel@tonic-gate } 38990Sstevel@tonic-gate /* 39000Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now 39010Sstevel@tonic-gate * will be committed for T_DISCON_IND later 39020Sstevel@tonic-gate */ 39030Sstevel@tonic-gate 39040Sstevel@tonic-gate dr = (struct T_discon_req *)mp->b_rptr; 39050Sstevel@tonic-gate msz = MBLKL(mp); 39060Sstevel@tonic-gate 39070Sstevel@tonic-gate /* 39080Sstevel@tonic-gate * validate the state 39090Sstevel@tonic-gate */ 39100Sstevel@tonic-gate save_state = new_state = tep->te_state; 39110Sstevel@tonic-gate if (! (save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) && 39120Sstevel@tonic-gate ! (save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) { 39130Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 39145240Snordmark SL_TRACE|SL_ERROR, 39155240Snordmark "tl_wput:T_DISCON_REQ:out of state, state=%d", 39165240Snordmark tep->te_state)); 39170Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_DISCON_REQ); 39180Sstevel@tonic-gate freemsg(mp); 39190Sstevel@tonic-gate return; 39200Sstevel@tonic-gate } 39210Sstevel@tonic-gate /* 39220Sstevel@tonic-gate * Defer committing the state change until it is determined if 39230Sstevel@tonic-gate * the message will be queued with the tl_icon or not. 39240Sstevel@tonic-gate */ 39250Sstevel@tonic-gate new_state = NEXTSTATE(TE_DISCON_REQ, tep->te_state); 39260Sstevel@tonic-gate 39270Sstevel@tonic-gate /* validate the message */ 39280Sstevel@tonic-gate if (msz < sizeof (struct T_discon_req)) { 39290Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 39305240Snordmark "tl_discon_req:invalid message")); 39310Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state); 39320Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_DISCON_REQ); 39330Sstevel@tonic-gate freemsg(mp); 39340Sstevel@tonic-gate return; 39350Sstevel@tonic-gate } 39360Sstevel@tonic-gate 39370Sstevel@tonic-gate /* 39380Sstevel@tonic-gate * if server, then validate that client exists 39390Sstevel@tonic-gate * by connection sequence number etc. 39400Sstevel@tonic-gate */ 39410Sstevel@tonic-gate if (tep->te_nicon > 0) { /* server */ 39420Sstevel@tonic-gate 39430Sstevel@tonic-gate /* 39440Sstevel@tonic-gate * search server list for disconnect client 39450Sstevel@tonic-gate */ 39460Sstevel@tonic-gate tip = tl_icon_find(tep, dr->SEQ_number); 39470Sstevel@tonic-gate if (tip == NULL) { 39480Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 39495240Snordmark SL_TRACE|SL_ERROR, 39505240Snordmark "tl_discon_req:no disconnect endpoint")); 39510Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state); 39520Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, T_DISCON_REQ); 39530Sstevel@tonic-gate freemsg(mp); 39540Sstevel@tonic-gate return; 39550Sstevel@tonic-gate } 39560Sstevel@tonic-gate /* 39570Sstevel@tonic-gate * If ti_tep is NULL the client has already closed. In this case 39580Sstevel@tonic-gate * the code below will avoid any action on the client side. 39590Sstevel@tonic-gate */ 39600Sstevel@tonic-gate 39610Sstevel@tonic-gate ASSERT(IMPLY(tip->ti_tep != NULL, 39625240Snordmark tip->ti_tep->te_seqno == dr->SEQ_number)); 39630Sstevel@tonic-gate peer_tep = tip->ti_tep; 39640Sstevel@tonic-gate } 39650Sstevel@tonic-gate 39660Sstevel@tonic-gate /* 39670Sstevel@tonic-gate * preallocate now for T_DISCON_IND 39680Sstevel@tonic-gate * ack validity of request (T_OK_ACK) after memory committed 39690Sstevel@tonic-gate */ 39700Sstevel@tonic-gate size = sizeof (struct T_discon_ind); 39710Sstevel@tonic-gate if ((respmp = reallocb(mp, size, 0)) == NULL) { 39720Sstevel@tonic-gate tl_memrecover(wq, mp, size); 39730Sstevel@tonic-gate freemsg(ackmp); 39740Sstevel@tonic-gate return; 39750Sstevel@tonic-gate } 39760Sstevel@tonic-gate 39770Sstevel@tonic-gate /* 39780Sstevel@tonic-gate * prepare message to ack validity of request 39790Sstevel@tonic-gate */ 39800Sstevel@tonic-gate if (tep->te_nicon == 0) 39810Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK1, new_state); 39820Sstevel@tonic-gate else 39830Sstevel@tonic-gate if (tep->te_nicon == 1) 39840Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK2, new_state); 39850Sstevel@tonic-gate else 39860Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK4, new_state); 39870Sstevel@tonic-gate 39880Sstevel@tonic-gate /* 39890Sstevel@tonic-gate * Flushing queues according to TPI. Using the old state. 39900Sstevel@tonic-gate */ 39910Sstevel@tonic-gate if ((tep->te_nicon <= 1) && 39920Sstevel@tonic-gate ((save_state == TS_DATA_XFER) || 39930Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) || 39940Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL))) 39950Sstevel@tonic-gate (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW); 39960Sstevel@tonic-gate 39970Sstevel@tonic-gate /* send T_OK_ACK up */ 39980Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_DISCON_REQ); 39990Sstevel@tonic-gate 40000Sstevel@tonic-gate /* 40010Sstevel@tonic-gate * now do disconnect business 40020Sstevel@tonic-gate */ 40030Sstevel@tonic-gate if (tep->te_nicon > 0) { /* listener */ 40040Sstevel@tonic-gate if (peer_tep != NULL && !peer_tep->te_closing) { 40050Sstevel@tonic-gate /* 40060Sstevel@tonic-gate * disconnect incoming connect request pending to tep 40070Sstevel@tonic-gate */ 40080Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) { 40090Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 40105240Snordmark SL_TRACE|SL_ERROR, 40115240Snordmark "tl_discon_req: reallocb failed")); 40120Sstevel@tonic-gate tep->te_state = new_state; 40130Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 40140Sstevel@tonic-gate return; 40150Sstevel@tonic-gate } 40160Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 40170Sstevel@tonic-gate di->SEQ_number = BADSEQNUM; 40180Sstevel@tonic-gate save_state = peer_tep->te_state; 40190Sstevel@tonic-gate peer_tep->te_state = TS_IDLE; 40200Sstevel@tonic-gate 40210Sstevel@tonic-gate TL_REMOVE_PEER(peer_tep->te_oconp); 40220Sstevel@tonic-gate enableok(peer_tep->te_wq); 40230Sstevel@tonic-gate TL_QENABLE(peer_tep); 40240Sstevel@tonic-gate } else { 40250Sstevel@tonic-gate freemsg(respmp); 40260Sstevel@tonic-gate dimp = NULL; 40270Sstevel@tonic-gate } 40280Sstevel@tonic-gate 40290Sstevel@tonic-gate /* 40300Sstevel@tonic-gate * remove endpoint from incoming connection list 40310Sstevel@tonic-gate * - remove disconnect client from list on server 40320Sstevel@tonic-gate */ 40330Sstevel@tonic-gate tl_freetip(tep, tip); 40340Sstevel@tonic-gate } else if ((peer_tep = tep->te_oconp) != NULL) { /* client */ 40350Sstevel@tonic-gate /* 40360Sstevel@tonic-gate * disconnect an outgoing request pending from tep 40370Sstevel@tonic-gate */ 40380Sstevel@tonic-gate 40390Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) { 40400Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 40415240Snordmark SL_TRACE|SL_ERROR, 40425240Snordmark "tl_discon_req: reallocb failed")); 40430Sstevel@tonic-gate tep->te_state = new_state; 40440Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 40450Sstevel@tonic-gate return; 40460Sstevel@tonic-gate } 40470Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 40480Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO; 40490Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND; 40500Sstevel@tonic-gate di->DISCON_reason = ECONNRESET; 40510Sstevel@tonic-gate di->SEQ_number = tep->te_seqno; 40520Sstevel@tonic-gate 40530Sstevel@tonic-gate /* 40540Sstevel@tonic-gate * If this is a socket the T_DISCON_IND is queued with 40550Sstevel@tonic-gate * the T_CONN_IND. Otherwise the T_CONN_IND is removed 40560Sstevel@tonic-gate * from the list of pending connections. 40570Sstevel@tonic-gate * Note that when te_oconp is set the peer better have 40580Sstevel@tonic-gate * a t_connind_t for the client. 40590Sstevel@tonic-gate */ 40600Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) { 40610Sstevel@tonic-gate /* 40620Sstevel@tonic-gate * No need to check that 40630Sstevel@tonic-gate * ti_tep == NULL since the T_DISCON_IND 40640Sstevel@tonic-gate * takes precedence over other queued 40650Sstevel@tonic-gate * messages. 40660Sstevel@tonic-gate */ 40670Sstevel@tonic-gate tl_icon_queuemsg(peer_tep, tep->te_seqno, dimp); 40680Sstevel@tonic-gate peer_tep = NULL; 40690Sstevel@tonic-gate dimp = NULL; 40700Sstevel@tonic-gate /* 40710Sstevel@tonic-gate * Can't clear te_oconp since tl_co_unconnect needs 40720Sstevel@tonic-gate * it as a hint not to free the tep. 40730Sstevel@tonic-gate * Keep the state unchanged since tl_conn_res inspects 40740Sstevel@tonic-gate * it. 40750Sstevel@tonic-gate */ 40760Sstevel@tonic-gate new_state = tep->te_state; 40770Sstevel@tonic-gate } else { 40780Sstevel@tonic-gate /* Found - delete it */ 40790Sstevel@tonic-gate tip = tl_icon_find(peer_tep, tep->te_seqno); 40800Sstevel@tonic-gate if (tip != NULL) { 40810Sstevel@tonic-gate ASSERT(tep == tip->ti_tep); 40820Sstevel@tonic-gate save_state = peer_tep->te_state; 40830Sstevel@tonic-gate if (peer_tep->te_nicon == 1) 40840Sstevel@tonic-gate peer_tep->te_state = 40850Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND2, 40865240Snordmark peer_tep->te_state); 40870Sstevel@tonic-gate else 40880Sstevel@tonic-gate peer_tep->te_state = 40890Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND3, 40905240Snordmark peer_tep->te_state); 40910Sstevel@tonic-gate tl_freetip(peer_tep, tip); 40920Sstevel@tonic-gate } 40930Sstevel@tonic-gate ASSERT(tep->te_oconp != NULL); 40940Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 40950Sstevel@tonic-gate } 40960Sstevel@tonic-gate } else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */ 40970Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) { 40980Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 40995240Snordmark SL_TRACE|SL_ERROR, 41005240Snordmark "tl_discon_req: reallocb failed")); 41010Sstevel@tonic-gate tep->te_state = new_state; 41020Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM); 41030Sstevel@tonic-gate return; 41040Sstevel@tonic-gate } 41050Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr; 41060Sstevel@tonic-gate di->SEQ_number = BADSEQNUM; 41070Sstevel@tonic-gate 41080Sstevel@tonic-gate save_state = peer_tep->te_state; 41090Sstevel@tonic-gate peer_tep->te_state = TS_IDLE; 41100Sstevel@tonic-gate } else { 41110Sstevel@tonic-gate /* Not connected */ 41120Sstevel@tonic-gate tep->te_state = new_state; 41130Sstevel@tonic-gate freemsg(respmp); 41140Sstevel@tonic-gate return; 41150Sstevel@tonic-gate } 41160Sstevel@tonic-gate 41170Sstevel@tonic-gate /* Commit state changes */ 41180Sstevel@tonic-gate tep->te_state = new_state; 41190Sstevel@tonic-gate 41200Sstevel@tonic-gate if (peer_tep == NULL) { 41210Sstevel@tonic-gate ASSERT(dimp == NULL); 41220Sstevel@tonic-gate goto done; 41230Sstevel@tonic-gate } 41240Sstevel@tonic-gate /* 41250Sstevel@tonic-gate * Flush queues on peer before sending up 41260Sstevel@tonic-gate * T_DISCON_IND according to TPI 41270Sstevel@tonic-gate */ 41280Sstevel@tonic-gate 41290Sstevel@tonic-gate if ((save_state == TS_DATA_XFER) || 41300Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) || 41310Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL)) 41320Sstevel@tonic-gate (void) putnextctl1(peer_tep->te_rq, M_FLUSH, FLUSHRW); 41330Sstevel@tonic-gate 41340Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO; 41350Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND; 41360Sstevel@tonic-gate di->DISCON_reason = ECONNRESET; 41370Sstevel@tonic-gate 41380Sstevel@tonic-gate /* 41390Sstevel@tonic-gate * data blocks already linked into dimp by reallocb() 41400Sstevel@tonic-gate */ 41410Sstevel@tonic-gate /* 41420Sstevel@tonic-gate * send indication message to peer user module 41430Sstevel@tonic-gate */ 41440Sstevel@tonic-gate ASSERT(dimp != NULL); 41450Sstevel@tonic-gate putnext(peer_tep->te_rq, dimp); 41460Sstevel@tonic-gate done: 41470Sstevel@tonic-gate if (tep->te_conp) { /* disconnect pointers if connected */ 41480Sstevel@tonic-gate ASSERT(! peer_tep->te_closing); 41490Sstevel@tonic-gate 41500Sstevel@tonic-gate /* 41510Sstevel@tonic-gate * Messages may be queued on peer's write queue 41520Sstevel@tonic-gate * waiting to be processed by its write service 41530Sstevel@tonic-gate * procedure. Before the pointer to the peer transport 41540Sstevel@tonic-gate * structure is set to NULL, qenable the peer's write 41550Sstevel@tonic-gate * queue so that the queued up messages are processed. 41560Sstevel@tonic-gate */ 41570Sstevel@tonic-gate if ((save_state == TS_DATA_XFER) || 41580Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) || 41590Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL)) 41600Sstevel@tonic-gate TL_QENABLE(peer_tep); 41610Sstevel@tonic-gate ASSERT(peer_tep != NULL && peer_tep->te_conp != NULL); 41620Sstevel@tonic-gate TL_UNCONNECT(peer_tep->te_conp); 41630Sstevel@tonic-gate if (! IS_SOCKET(tep)) { 41640Sstevel@tonic-gate /* 41650Sstevel@tonic-gate * unlink the streams 41660Sstevel@tonic-gate */ 41670Sstevel@tonic-gate tep->te_wq->q_next = NULL; 41680Sstevel@tonic-gate peer_tep->te_wq->q_next = NULL; 41690Sstevel@tonic-gate } 41700Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp); 41710Sstevel@tonic-gate } 41720Sstevel@tonic-gate } 41730Sstevel@tonic-gate 41740Sstevel@tonic-gate 41750Sstevel@tonic-gate static void 41760Sstevel@tonic-gate tl_addr_req(mblk_t *mp, tl_endpt_t *tep) 41770Sstevel@tonic-gate { 41780Sstevel@tonic-gate queue_t *wq; 41790Sstevel@tonic-gate size_t ack_sz; 41800Sstevel@tonic-gate mblk_t *ackmp; 41810Sstevel@tonic-gate struct T_addr_ack *taa; 41820Sstevel@tonic-gate 41830Sstevel@tonic-gate if (tep->te_closing) { 41840Sstevel@tonic-gate freemsg(mp); 41850Sstevel@tonic-gate return; 41860Sstevel@tonic-gate } 41870Sstevel@tonic-gate 41880Sstevel@tonic-gate wq = tep->te_wq; 41890Sstevel@tonic-gate 41900Sstevel@tonic-gate /* 41910Sstevel@tonic-gate * Note: T_ADDR_REQ message has only PRIM_type field 41920Sstevel@tonic-gate * so it is already validated earlier. 41930Sstevel@tonic-gate */ 41940Sstevel@tonic-gate 41950Sstevel@tonic-gate if (IS_CLTS(tep) || 41960Sstevel@tonic-gate (tep->te_state > TS_WREQ_ORDREL) || 41970Sstevel@tonic-gate (tep->te_state < TS_DATA_XFER)) { 41980Sstevel@tonic-gate /* 41990Sstevel@tonic-gate * Either connectionless or connection oriented but not 42000Sstevel@tonic-gate * in connected data transfer state or half-closed states. 42010Sstevel@tonic-gate */ 42020Sstevel@tonic-gate ack_sz = sizeof (struct T_addr_ack); 42030Sstevel@tonic-gate if (tep->te_state >= TS_IDLE) 42040Sstevel@tonic-gate /* is bound */ 42050Sstevel@tonic-gate ack_sz += tep->te_alen; 42060Sstevel@tonic-gate ackmp = reallocb(mp, ack_sz, 0); 42070Sstevel@tonic-gate if (ackmp == NULL) { 42080Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 42095240Snordmark SL_TRACE|SL_ERROR, 42105240Snordmark "tl_addr_req: reallocb failed")); 42110Sstevel@tonic-gate tl_memrecover(wq, mp, ack_sz); 42120Sstevel@tonic-gate return; 42130Sstevel@tonic-gate } 42140Sstevel@tonic-gate 42150Sstevel@tonic-gate taa = (struct T_addr_ack *)ackmp->b_rptr; 42160Sstevel@tonic-gate 42170Sstevel@tonic-gate bzero(taa, sizeof (struct T_addr_ack)); 42180Sstevel@tonic-gate 42190Sstevel@tonic-gate taa->PRIM_type = T_ADDR_ACK; 42200Sstevel@tonic-gate ackmp->b_datap->db_type = M_PCPROTO; 42210Sstevel@tonic-gate ackmp->b_wptr = (uchar_t *)&taa[1]; 42220Sstevel@tonic-gate 42230Sstevel@tonic-gate if (tep->te_state >= TS_IDLE) { 42240Sstevel@tonic-gate /* endpoint is bound */ 42250Sstevel@tonic-gate taa->LOCADDR_length = tep->te_alen; 42260Sstevel@tonic-gate taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa); 42270Sstevel@tonic-gate 42280Sstevel@tonic-gate bcopy(tep->te_abuf, ackmp->b_wptr, 42295240Snordmark tep->te_alen); 42300Sstevel@tonic-gate ackmp->b_wptr += tep->te_alen; 42310Sstevel@tonic-gate ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim); 42320Sstevel@tonic-gate } 42330Sstevel@tonic-gate 42340Sstevel@tonic-gate (void) qreply(wq, ackmp); 42350Sstevel@tonic-gate } else { 42360Sstevel@tonic-gate ASSERT(tep->te_state == TS_DATA_XFER || 42375240Snordmark tep->te_state == TS_WIND_ORDREL || 42385240Snordmark tep->te_state == TS_WREQ_ORDREL); 42390Sstevel@tonic-gate /* connection oriented in data transfer */ 42400Sstevel@tonic-gate tl_connected_cots_addr_req(mp, tep); 42410Sstevel@tonic-gate } 42420Sstevel@tonic-gate } 42430Sstevel@tonic-gate 42440Sstevel@tonic-gate 42450Sstevel@tonic-gate static void 42460Sstevel@tonic-gate tl_connected_cots_addr_req(mblk_t *mp, tl_endpt_t *tep) 42470Sstevel@tonic-gate { 42480Sstevel@tonic-gate tl_endpt_t *peer_tep; 42490Sstevel@tonic-gate size_t ack_sz; 42500Sstevel@tonic-gate mblk_t *ackmp; 42510Sstevel@tonic-gate struct T_addr_ack *taa; 42520Sstevel@tonic-gate uchar_t *addr_startp; 42530Sstevel@tonic-gate 42540Sstevel@tonic-gate if (tep->te_closing) { 42550Sstevel@tonic-gate freemsg(mp); 42560Sstevel@tonic-gate return; 42570Sstevel@tonic-gate } 42580Sstevel@tonic-gate 42590Sstevel@tonic-gate ASSERT(tep->te_state >= TS_IDLE); 42600Sstevel@tonic-gate 42610Sstevel@tonic-gate ack_sz = sizeof (struct T_addr_ack); 42620Sstevel@tonic-gate ack_sz += T_ALIGN(tep->te_alen); 42630Sstevel@tonic-gate peer_tep = tep->te_conp; 42640Sstevel@tonic-gate ack_sz += peer_tep->te_alen; 42650Sstevel@tonic-gate 42660Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, ack_sz, M_PCPROTO, T_ADDR_ACK); 42670Sstevel@tonic-gate if (ackmp == NULL) { 42680Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 42695240Snordmark "tl_connected_cots_addr_req: reallocb failed")); 42700Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp, ack_sz); 42710Sstevel@tonic-gate return; 42720Sstevel@tonic-gate } 42730Sstevel@tonic-gate 42740Sstevel@tonic-gate taa = (struct T_addr_ack *)ackmp->b_rptr; 42750Sstevel@tonic-gate 42760Sstevel@tonic-gate /* endpoint is bound */ 42770Sstevel@tonic-gate taa->LOCADDR_length = tep->te_alen; 42780Sstevel@tonic-gate taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa); 42790Sstevel@tonic-gate 42800Sstevel@tonic-gate addr_startp = (uchar_t *)&taa[1]; 42810Sstevel@tonic-gate 42820Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, 42830Sstevel@tonic-gate tep->te_alen); 42840Sstevel@tonic-gate 42850Sstevel@tonic-gate taa->REMADDR_length = peer_tep->te_alen; 42860Sstevel@tonic-gate taa->REMADDR_offset = (t_scalar_t)T_ALIGN(taa->LOCADDR_offset + 42875240Snordmark taa->LOCADDR_length); 42880Sstevel@tonic-gate addr_startp = ackmp->b_rptr + taa->REMADDR_offset; 42890Sstevel@tonic-gate bcopy(peer_tep->te_abuf, addr_startp, 42900Sstevel@tonic-gate peer_tep->te_alen); 42910Sstevel@tonic-gate ackmp->b_wptr = (uchar_t *)ackmp->b_rptr + 42920Sstevel@tonic-gate taa->REMADDR_offset + peer_tep->te_alen; 42930Sstevel@tonic-gate ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim); 42940Sstevel@tonic-gate 42950Sstevel@tonic-gate putnext(tep->te_rq, ackmp); 42960Sstevel@tonic-gate } 42970Sstevel@tonic-gate 42980Sstevel@tonic-gate static void 42990Sstevel@tonic-gate tl_copy_info(struct T_info_ack *ia, tl_endpt_t *tep) 43000Sstevel@tonic-gate { 43010Sstevel@tonic-gate if (IS_CLTS(tep)) { 43020Sstevel@tonic-gate *ia = tl_clts_info_ack; 43030Sstevel@tonic-gate ia->TSDU_size = tl_tidusz; /* TSDU and TIDU size are same */ 43040Sstevel@tonic-gate } else { 43050Sstevel@tonic-gate *ia = tl_cots_info_ack; 43060Sstevel@tonic-gate if (IS_COTSORD(tep)) 43070Sstevel@tonic-gate ia->SERV_type = T_COTS_ORD; 43080Sstevel@tonic-gate } 43090Sstevel@tonic-gate ia->TIDU_size = tl_tidusz; 43100Sstevel@tonic-gate ia->CURRENT_state = tep->te_state; 43110Sstevel@tonic-gate } 43120Sstevel@tonic-gate 43130Sstevel@tonic-gate /* 43140Sstevel@tonic-gate * This routine responds to T_CAPABILITY_REQ messages. It is called by 43150Sstevel@tonic-gate * tl_wput. 43160Sstevel@tonic-gate */ 43170Sstevel@tonic-gate static void 43180Sstevel@tonic-gate tl_capability_req(mblk_t *mp, tl_endpt_t *tep) 43190Sstevel@tonic-gate { 43200Sstevel@tonic-gate mblk_t *ackmp; 43210Sstevel@tonic-gate t_uscalar_t cap_bits1; 43220Sstevel@tonic-gate struct T_capability_ack *tcap; 43230Sstevel@tonic-gate 43240Sstevel@tonic-gate if (tep->te_closing) { 43250Sstevel@tonic-gate freemsg(mp); 43260Sstevel@tonic-gate return; 43270Sstevel@tonic-gate } 43280Sstevel@tonic-gate 43290Sstevel@tonic-gate cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 43300Sstevel@tonic-gate 43310Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 43320Sstevel@tonic-gate M_PCPROTO, T_CAPABILITY_ACK); 43330Sstevel@tonic-gate if (ackmp == NULL) { 43340Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 43355240Snordmark "tl_capability_req: reallocb failed")); 43360Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp, 43370Sstevel@tonic-gate sizeof (struct T_capability_ack)); 43380Sstevel@tonic-gate return; 43390Sstevel@tonic-gate } 43400Sstevel@tonic-gate 43410Sstevel@tonic-gate tcap = (struct T_capability_ack *)ackmp->b_rptr; 43420Sstevel@tonic-gate tcap->CAP_bits1 = 0; 43430Sstevel@tonic-gate 43440Sstevel@tonic-gate if (cap_bits1 & TC1_INFO) { 43450Sstevel@tonic-gate tl_copy_info(&tcap->INFO_ack, tep); 43460Sstevel@tonic-gate tcap->CAP_bits1 |= TC1_INFO; 43470Sstevel@tonic-gate } 43480Sstevel@tonic-gate 43490Sstevel@tonic-gate if (cap_bits1 & TC1_ACCEPTOR_ID) { 43500Sstevel@tonic-gate tcap->ACCEPTOR_id = tep->te_acceptor_id; 43510Sstevel@tonic-gate tcap->CAP_bits1 |= TC1_ACCEPTOR_ID; 43520Sstevel@tonic-gate } 43530Sstevel@tonic-gate 43540Sstevel@tonic-gate putnext(tep->te_rq, ackmp); 43550Sstevel@tonic-gate } 43560Sstevel@tonic-gate 43570Sstevel@tonic-gate static void 43580Sstevel@tonic-gate tl_info_req_ser(mblk_t *mp, tl_endpt_t *tep) 43590Sstevel@tonic-gate { 43600Sstevel@tonic-gate if (! tep->te_closing) 43610Sstevel@tonic-gate tl_info_req(mp, tep); 43620Sstevel@tonic-gate else 43630Sstevel@tonic-gate freemsg(mp); 43640Sstevel@tonic-gate 43650Sstevel@tonic-gate tl_serializer_exit(tep); 43660Sstevel@tonic-gate tl_refrele(tep); 43670Sstevel@tonic-gate } 43680Sstevel@tonic-gate 43690Sstevel@tonic-gate static void 43700Sstevel@tonic-gate tl_info_req(mblk_t *mp, tl_endpt_t *tep) 43710Sstevel@tonic-gate { 43720Sstevel@tonic-gate mblk_t *ackmp; 43730Sstevel@tonic-gate 43740Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), 43750Sstevel@tonic-gate M_PCPROTO, T_INFO_ACK); 43760Sstevel@tonic-gate if (ackmp == NULL) { 43770Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 43785240Snordmark "tl_info_req: reallocb failed")); 43790Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp, sizeof (struct T_info_ack)); 43800Sstevel@tonic-gate return; 43810Sstevel@tonic-gate } 43820Sstevel@tonic-gate 43830Sstevel@tonic-gate /* 43840Sstevel@tonic-gate * fill in T_INFO_ACK contents 43850Sstevel@tonic-gate */ 43860Sstevel@tonic-gate tl_copy_info((struct T_info_ack *)ackmp->b_rptr, tep); 43870Sstevel@tonic-gate 43880Sstevel@tonic-gate /* 43890Sstevel@tonic-gate * send ack message 43900Sstevel@tonic-gate */ 43910Sstevel@tonic-gate putnext(tep->te_rq, ackmp); 43920Sstevel@tonic-gate } 43930Sstevel@tonic-gate 43940Sstevel@tonic-gate /* 43950Sstevel@tonic-gate * Handle M_DATA, T_data_req and T_optdata_req. 43960Sstevel@tonic-gate * If this is a socket pass through T_optdata_req options unmodified. 43970Sstevel@tonic-gate */ 43980Sstevel@tonic-gate static void 43990Sstevel@tonic-gate tl_data(mblk_t *mp, tl_endpt_t *tep) 44000Sstevel@tonic-gate { 44010Sstevel@tonic-gate queue_t *wq = tep->te_wq; 44020Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 44030Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 44040Sstevel@tonic-gate tl_endpt_t *peer_tep; 44050Sstevel@tonic-gate queue_t *peer_rq; 44060Sstevel@tonic-gate boolean_t closing = tep->te_closing; 44070Sstevel@tonic-gate 44080Sstevel@tonic-gate if (IS_CLTS(tep)) { 44090Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, 44105240Snordmark SL_TRACE|SL_ERROR, 44115240Snordmark "tl_wput:clts:unattached M_DATA")); 44120Sstevel@tonic-gate if (!closing) { 44130Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 44140Sstevel@tonic-gate } else { 44150Sstevel@tonic-gate freemsg(mp); 44160Sstevel@tonic-gate } 44170Sstevel@tonic-gate return; 44180Sstevel@tonic-gate } 44190Sstevel@tonic-gate 44200Sstevel@tonic-gate /* 44210Sstevel@tonic-gate * If the endpoint is closing it should still forward any data to the 44220Sstevel@tonic-gate * peer (if it has one). If it is not allowed to forward it can just 44230Sstevel@tonic-gate * free the message. 44240Sstevel@tonic-gate */ 44250Sstevel@tonic-gate if (closing && 44260Sstevel@tonic-gate (tep->te_state != TS_DATA_XFER) && 44270Sstevel@tonic-gate (tep->te_state != TS_WREQ_ORDREL)) { 44280Sstevel@tonic-gate freemsg(mp); 44290Sstevel@tonic-gate return; 44300Sstevel@tonic-gate } 44310Sstevel@tonic-gate 44320Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) { 44330Sstevel@tonic-gate if (prim->type == T_DATA_REQ && 44340Sstevel@tonic-gate msz < sizeof (struct T_data_req)) { 44350Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 44360Sstevel@tonic-gate SL_TRACE|SL_ERROR, 44370Sstevel@tonic-gate "tl_data:T_DATA_REQ:invalid message")); 44380Sstevel@tonic-gate if (!closing) { 44390Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 44400Sstevel@tonic-gate } else { 44410Sstevel@tonic-gate freemsg(mp); 44420Sstevel@tonic-gate } 44430Sstevel@tonic-gate return; 44440Sstevel@tonic-gate } else if (prim->type == T_OPTDATA_REQ && 44457656SSherry.Moore@Sun.COM (msz < sizeof (struct T_optdata_req) || !IS_SOCKET(tep))) { 44460Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 44475240Snordmark SL_TRACE|SL_ERROR, 44485240Snordmark "tl_data:T_OPTDATA_REQ:invalid message")); 44490Sstevel@tonic-gate if (!closing) { 44500Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 44510Sstevel@tonic-gate } else { 44520Sstevel@tonic-gate freemsg(mp); 44530Sstevel@tonic-gate } 44540Sstevel@tonic-gate return; 44550Sstevel@tonic-gate } 44560Sstevel@tonic-gate } 44570Sstevel@tonic-gate 44580Sstevel@tonic-gate /* 44590Sstevel@tonic-gate * connection oriented provider 44600Sstevel@tonic-gate */ 44610Sstevel@tonic-gate switch (tep->te_state) { 44620Sstevel@tonic-gate case TS_IDLE: 44630Sstevel@tonic-gate /* 44640Sstevel@tonic-gate * Other end not here - do nothing. 44650Sstevel@tonic-gate */ 44660Sstevel@tonic-gate freemsg(mp); 44670Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 44685240Snordmark "tl_data:cots with endpoint idle")); 44690Sstevel@tonic-gate return; 44700Sstevel@tonic-gate 44710Sstevel@tonic-gate case TS_DATA_XFER: 44720Sstevel@tonic-gate /* valid states */ 44730Sstevel@tonic-gate if (tep->te_conp != NULL) 44740Sstevel@tonic-gate break; 44750Sstevel@tonic-gate 44760Sstevel@tonic-gate if (tep->te_oconp == NULL) { 44770Sstevel@tonic-gate if (!closing) { 44780Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 44790Sstevel@tonic-gate } else { 44800Sstevel@tonic-gate freemsg(mp); 44810Sstevel@tonic-gate } 44820Sstevel@tonic-gate return; 44830Sstevel@tonic-gate } 44840Sstevel@tonic-gate /* 44850Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus 44860Sstevel@tonic-gate * the peer might not yet have accepted the connection. 44870Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND. 44880Sstevel@tonic-gate * Otherwise defer processing the packet until the peer 44890Sstevel@tonic-gate * accepts the connection. 44900Sstevel@tonic-gate * Note that the queue is noenabled when we go into this 44910Sstevel@tonic-gate * state. 44920Sstevel@tonic-gate */ 44930Sstevel@tonic-gate if (!closing) { 44940Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 44955240Snordmark SL_TRACE|SL_ERROR, 44965240Snordmark "tl_data: ocon")); 44970Sstevel@tonic-gate TL_PUTBQ(tep, mp); 44980Sstevel@tonic-gate return; 44990Sstevel@tonic-gate } 45000Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) { 45010Sstevel@tonic-gate if (msz < sizeof (t_scalar_t)) { 45020Sstevel@tonic-gate freemsg(mp); 45030Sstevel@tonic-gate return; 45040Sstevel@tonic-gate } 45050Sstevel@tonic-gate /* reuse message block - just change REQ to IND */ 45060Sstevel@tonic-gate if (prim->type == T_DATA_REQ) 45070Sstevel@tonic-gate prim->type = T_DATA_IND; 45080Sstevel@tonic-gate else 45090Sstevel@tonic-gate prim->type = T_OPTDATA_IND; 45100Sstevel@tonic-gate } 45110Sstevel@tonic-gate tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp); 45120Sstevel@tonic-gate return; 45130Sstevel@tonic-gate 45140Sstevel@tonic-gate case TS_WREQ_ORDREL: 45150Sstevel@tonic-gate if (tep->te_conp == NULL) { 45160Sstevel@tonic-gate /* 45170Sstevel@tonic-gate * Other end closed - generate discon_ind 45180Sstevel@tonic-gate * with reason 0 to cause an EPIPE but no 45190Sstevel@tonic-gate * read side error on AF_UNIX sockets. 45200Sstevel@tonic-gate */ 45210Sstevel@tonic-gate freemsg(mp); 45220Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 45235240Snordmark SL_TRACE|SL_ERROR, 45245240Snordmark "tl_data: WREQ_ORDREL and no peer")); 45250Sstevel@tonic-gate tl_discon_ind(tep, 0); 45260Sstevel@tonic-gate return; 45270Sstevel@tonic-gate } 45280Sstevel@tonic-gate break; 45290Sstevel@tonic-gate 45300Sstevel@tonic-gate default: 45310Sstevel@tonic-gate /* invalid state for event TE_DATA_REQ */ 45320Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 45335240Snordmark "tl_data:cots:out of state")); 45340Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 45350Sstevel@tonic-gate return; 45360Sstevel@tonic-gate } 45370Sstevel@tonic-gate /* 45380Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_DATA_REQ, tep->te_state); 45390Sstevel@tonic-gate * (State stays same on this event) 45400Sstevel@tonic-gate */ 45410Sstevel@tonic-gate 45420Sstevel@tonic-gate /* 45430Sstevel@tonic-gate * get connected endpoint 45440Sstevel@tonic-gate */ 45450Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) { 45460Sstevel@tonic-gate freemsg(mp); 45470Sstevel@tonic-gate /* Peer closed */ 45480Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 45495240Snordmark "tl_data: peer gone")); 45500Sstevel@tonic-gate return; 45510Sstevel@tonic-gate } 45520Sstevel@tonic-gate 45530Sstevel@tonic-gate ASSERT(tep->te_serializer == peer_tep->te_serializer); 45540Sstevel@tonic-gate peer_rq = peer_tep->te_rq; 45550Sstevel@tonic-gate 45560Sstevel@tonic-gate /* 45570Sstevel@tonic-gate * Put it back if flow controlled 45580Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded 45590Sstevel@tonic-gate * so we can ignore flow control. 45600Sstevel@tonic-gate */ 45610Sstevel@tonic-gate if (!canputnext(peer_rq) && !closing) { 45620Sstevel@tonic-gate TL_PUTBQ(tep, mp); 45630Sstevel@tonic-gate return; 45640Sstevel@tonic-gate } 45650Sstevel@tonic-gate 45660Sstevel@tonic-gate /* 45670Sstevel@tonic-gate * validate peer state 45680Sstevel@tonic-gate */ 45690Sstevel@tonic-gate switch (peer_tep->te_state) { 45700Sstevel@tonic-gate case TS_DATA_XFER: 45710Sstevel@tonic-gate case TS_WIND_ORDREL: 45720Sstevel@tonic-gate /* valid states */ 45730Sstevel@tonic-gate break; 45740Sstevel@tonic-gate default: 45750Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 45765240Snordmark "tl_data:rx side:invalid state")); 45770Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO); 45780Sstevel@tonic-gate return; 45790Sstevel@tonic-gate } 45800Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) { 45810Sstevel@tonic-gate /* reuse message block - just change REQ to IND */ 45820Sstevel@tonic-gate if (prim->type == T_DATA_REQ) 45830Sstevel@tonic-gate prim->type = T_DATA_IND; 45840Sstevel@tonic-gate else 45850Sstevel@tonic-gate prim->type = T_OPTDATA_IND; 45860Sstevel@tonic-gate } 45870Sstevel@tonic-gate /* 45880Sstevel@tonic-gate * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state); 45890Sstevel@tonic-gate * (peer state stays same on this event) 45900Sstevel@tonic-gate */ 45910Sstevel@tonic-gate /* 45920Sstevel@tonic-gate * send data to connected peer 45930Sstevel@tonic-gate */ 45940Sstevel@tonic-gate putnext(peer_rq, mp); 45950Sstevel@tonic-gate } 45960Sstevel@tonic-gate 45970Sstevel@tonic-gate 45980Sstevel@tonic-gate 45990Sstevel@tonic-gate static void 46000Sstevel@tonic-gate tl_exdata(mblk_t *mp, tl_endpt_t *tep) 46010Sstevel@tonic-gate { 46020Sstevel@tonic-gate queue_t *wq = tep->te_wq; 46030Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 46040Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 46050Sstevel@tonic-gate tl_endpt_t *peer_tep; 46060Sstevel@tonic-gate queue_t *peer_rq; 46070Sstevel@tonic-gate boolean_t closing = tep->te_closing; 46080Sstevel@tonic-gate 46090Sstevel@tonic-gate if (msz < sizeof (struct T_exdata_req)) { 46100Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 46115240Snordmark "tl_exdata:invalid message")); 46120Sstevel@tonic-gate if (!closing) { 46130Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 46140Sstevel@tonic-gate } else { 46150Sstevel@tonic-gate freemsg(mp); 46160Sstevel@tonic-gate } 46170Sstevel@tonic-gate return; 46180Sstevel@tonic-gate } 46190Sstevel@tonic-gate 46200Sstevel@tonic-gate /* 46210Sstevel@tonic-gate * If the endpoint is closing it should still forward any data to the 46220Sstevel@tonic-gate * peer (if it has one). If it is not allowed to forward it can just 46230Sstevel@tonic-gate * free the message. 46240Sstevel@tonic-gate */ 46250Sstevel@tonic-gate if (closing && 46260Sstevel@tonic-gate (tep->te_state != TS_DATA_XFER) && 46270Sstevel@tonic-gate (tep->te_state != TS_WREQ_ORDREL)) { 46280Sstevel@tonic-gate freemsg(mp); 46290Sstevel@tonic-gate return; 46300Sstevel@tonic-gate } 46310Sstevel@tonic-gate 46320Sstevel@tonic-gate /* 46330Sstevel@tonic-gate * validate state 46340Sstevel@tonic-gate */ 46350Sstevel@tonic-gate switch (tep->te_state) { 46360Sstevel@tonic-gate case TS_IDLE: 46370Sstevel@tonic-gate /* 46380Sstevel@tonic-gate * Other end not here - do nothing. 46390Sstevel@tonic-gate */ 46400Sstevel@tonic-gate freemsg(mp); 46410Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 46425240Snordmark "tl_exdata:cots with endpoint idle")); 46430Sstevel@tonic-gate return; 46440Sstevel@tonic-gate 46450Sstevel@tonic-gate case TS_DATA_XFER: 46460Sstevel@tonic-gate /* valid states */ 46470Sstevel@tonic-gate if (tep->te_conp != NULL) 46480Sstevel@tonic-gate break; 46490Sstevel@tonic-gate 46500Sstevel@tonic-gate if (tep->te_oconp == NULL) { 46510Sstevel@tonic-gate if (!closing) { 46520Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 46530Sstevel@tonic-gate } else { 46540Sstevel@tonic-gate freemsg(mp); 46550Sstevel@tonic-gate } 46560Sstevel@tonic-gate return; 46570Sstevel@tonic-gate } 46580Sstevel@tonic-gate /* 46590Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus 46600Sstevel@tonic-gate * the peer might not yet have accepted the connection. 46610Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND. 46620Sstevel@tonic-gate * Otherwise defer processing the packet until the peer 46630Sstevel@tonic-gate * accepts the connection. 46640Sstevel@tonic-gate * Note that the queue is noenabled when we go into this 46650Sstevel@tonic-gate * state. 46660Sstevel@tonic-gate */ 46670Sstevel@tonic-gate if (!closing) { 46680Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 46695240Snordmark SL_TRACE|SL_ERROR, 46705240Snordmark "tl_exdata: ocon")); 46710Sstevel@tonic-gate TL_PUTBQ(tep, mp); 46720Sstevel@tonic-gate return; 46730Sstevel@tonic-gate } 46740Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 46755240Snordmark "tl_exdata: closing socket ocon")); 46760Sstevel@tonic-gate prim->type = T_EXDATA_IND; 46770Sstevel@tonic-gate tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp); 46780Sstevel@tonic-gate return; 46790Sstevel@tonic-gate 46800Sstevel@tonic-gate case TS_WREQ_ORDREL: 46810Sstevel@tonic-gate if (tep->te_conp == NULL) { 46820Sstevel@tonic-gate /* 46830Sstevel@tonic-gate * Other end closed - generate discon_ind 46840Sstevel@tonic-gate * with reason 0 to cause an EPIPE but no 46850Sstevel@tonic-gate * read side error on AF_UNIX sockets. 46860Sstevel@tonic-gate */ 46870Sstevel@tonic-gate freemsg(mp); 46880Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 46895240Snordmark SL_TRACE|SL_ERROR, 46905240Snordmark "tl_exdata: WREQ_ORDREL and no peer")); 46910Sstevel@tonic-gate tl_discon_ind(tep, 0); 46920Sstevel@tonic-gate return; 46930Sstevel@tonic-gate } 46940Sstevel@tonic-gate break; 46950Sstevel@tonic-gate 46960Sstevel@tonic-gate default: 46970Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 46985240Snordmark SL_TRACE|SL_ERROR, 46995240Snordmark "tl_wput:T_EXDATA_REQ:out of state, state=%d", 47005240Snordmark tep->te_state)); 47010Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 47020Sstevel@tonic-gate return; 47030Sstevel@tonic-gate } 47040Sstevel@tonic-gate /* 47050Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_EXDATA_REQ, tep->te_state); 47060Sstevel@tonic-gate * (state stays same on this event) 47070Sstevel@tonic-gate */ 47080Sstevel@tonic-gate 47090Sstevel@tonic-gate /* 47100Sstevel@tonic-gate * get connected endpoint 47110Sstevel@tonic-gate */ 47120Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) { 47130Sstevel@tonic-gate freemsg(mp); 47140Sstevel@tonic-gate /* Peer closed */ 47150Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 47165240Snordmark "tl_exdata: peer gone")); 47170Sstevel@tonic-gate return; 47180Sstevel@tonic-gate } 47190Sstevel@tonic-gate 47200Sstevel@tonic-gate peer_rq = peer_tep->te_rq; 47210Sstevel@tonic-gate 47220Sstevel@tonic-gate /* 47230Sstevel@tonic-gate * Put it back if flow controlled 47240Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded 47250Sstevel@tonic-gate * so we can ignore flow control. 47260Sstevel@tonic-gate */ 47270Sstevel@tonic-gate if (!canputnext(peer_rq) && !closing) { 47280Sstevel@tonic-gate TL_PUTBQ(tep, mp); 47290Sstevel@tonic-gate return; 47300Sstevel@tonic-gate } 47310Sstevel@tonic-gate 47320Sstevel@tonic-gate /* 47330Sstevel@tonic-gate * validate state on peer 47340Sstevel@tonic-gate */ 47350Sstevel@tonic-gate switch (peer_tep->te_state) { 47360Sstevel@tonic-gate case TS_DATA_XFER: 47370Sstevel@tonic-gate case TS_WIND_ORDREL: 47380Sstevel@tonic-gate /* valid states */ 47390Sstevel@tonic-gate break; 47400Sstevel@tonic-gate default: 47410Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 47425240Snordmark "tl_exdata:rx side:invalid state")); 47430Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO); 47440Sstevel@tonic-gate return; 47450Sstevel@tonic-gate } 47460Sstevel@tonic-gate /* 47470Sstevel@tonic-gate * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state); 47480Sstevel@tonic-gate * (peer state stays same on this event) 47490Sstevel@tonic-gate */ 47500Sstevel@tonic-gate /* 47510Sstevel@tonic-gate * reuse message block 47520Sstevel@tonic-gate */ 47530Sstevel@tonic-gate prim->type = T_EXDATA_IND; 47540Sstevel@tonic-gate 47550Sstevel@tonic-gate /* 47560Sstevel@tonic-gate * send data to connected peer 47570Sstevel@tonic-gate */ 47580Sstevel@tonic-gate putnext(peer_rq, mp); 47590Sstevel@tonic-gate } 47600Sstevel@tonic-gate 47610Sstevel@tonic-gate 47620Sstevel@tonic-gate 47630Sstevel@tonic-gate static void 47640Sstevel@tonic-gate tl_ordrel(mblk_t *mp, tl_endpt_t *tep) 47650Sstevel@tonic-gate { 47660Sstevel@tonic-gate queue_t *wq = tep->te_wq; 47670Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr; 47680Sstevel@tonic-gate ssize_t msz = MBLKL(mp); 47690Sstevel@tonic-gate tl_endpt_t *peer_tep; 47700Sstevel@tonic-gate queue_t *peer_rq; 47710Sstevel@tonic-gate boolean_t closing = tep->te_closing; 47720Sstevel@tonic-gate 47730Sstevel@tonic-gate if (msz < sizeof (struct T_ordrel_req)) { 47740Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 47755240Snordmark "tl_ordrel:invalid message")); 47760Sstevel@tonic-gate if (!closing) { 47770Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 47780Sstevel@tonic-gate } else { 47790Sstevel@tonic-gate freemsg(mp); 47800Sstevel@tonic-gate } 47810Sstevel@tonic-gate return; 47820Sstevel@tonic-gate } 47830Sstevel@tonic-gate 47840Sstevel@tonic-gate /* 47850Sstevel@tonic-gate * validate state 47860Sstevel@tonic-gate */ 47870Sstevel@tonic-gate switch (tep->te_state) { 47880Sstevel@tonic-gate case TS_DATA_XFER: 47890Sstevel@tonic-gate case TS_WREQ_ORDREL: 47900Sstevel@tonic-gate /* valid states */ 47910Sstevel@tonic-gate if (tep->te_conp != NULL) 47920Sstevel@tonic-gate break; 47930Sstevel@tonic-gate 47940Sstevel@tonic-gate if (tep->te_oconp == NULL) 47950Sstevel@tonic-gate break; 47960Sstevel@tonic-gate 47970Sstevel@tonic-gate /* 47980Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus 47990Sstevel@tonic-gate * the peer might not yet have accepted the connection. 48000Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND. 48010Sstevel@tonic-gate * Otherwise defer processing the packet until the peer 48020Sstevel@tonic-gate * accepts the connection. 48030Sstevel@tonic-gate * Note that the queue is noenabled when we go into this 48040Sstevel@tonic-gate * state. 48050Sstevel@tonic-gate */ 48060Sstevel@tonic-gate if (!closing) { 48070Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 48085240Snordmark SL_TRACE|SL_ERROR, 48095240Snordmark "tl_ordlrel: ocon")); 48100Sstevel@tonic-gate TL_PUTBQ(tep, mp); 48110Sstevel@tonic-gate return; 48120Sstevel@tonic-gate } 48130Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 48145240Snordmark "tl_ordlrel: closing socket ocon")); 48150Sstevel@tonic-gate prim->type = T_ORDREL_IND; 48160Sstevel@tonic-gate (void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp); 48170Sstevel@tonic-gate return; 48180Sstevel@tonic-gate 48190Sstevel@tonic-gate default: 48200Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 48215240Snordmark SL_TRACE|SL_ERROR, 48225240Snordmark "tl_wput:T_ORDREL_REQ:out of state, state=%d", 48235240Snordmark tep->te_state)); 48240Sstevel@tonic-gate if (!closing) { 48250Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 48260Sstevel@tonic-gate } else { 48270Sstevel@tonic-gate freemsg(mp); 48280Sstevel@tonic-gate } 48290Sstevel@tonic-gate return; 48300Sstevel@tonic-gate } 48310Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ORDREL_REQ, tep->te_state); 48320Sstevel@tonic-gate 48330Sstevel@tonic-gate /* 48340Sstevel@tonic-gate * get connected endpoint 48350Sstevel@tonic-gate */ 48360Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) { 48370Sstevel@tonic-gate /* Peer closed */ 48380Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 48395240Snordmark "tl_ordrel: peer gone")); 48400Sstevel@tonic-gate freemsg(mp); 48410Sstevel@tonic-gate return; 48420Sstevel@tonic-gate } 48430Sstevel@tonic-gate 48440Sstevel@tonic-gate peer_rq = peer_tep->te_rq; 48450Sstevel@tonic-gate 48460Sstevel@tonic-gate /* 48470Sstevel@tonic-gate * Put it back if flow controlled except when we are closing. 48480Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded 48490Sstevel@tonic-gate * so we can ignore flow control. 48500Sstevel@tonic-gate */ 48510Sstevel@tonic-gate if (! canputnext(peer_rq) && !closing) { 48520Sstevel@tonic-gate TL_PUTBQ(tep, mp); 48530Sstevel@tonic-gate return; 48540Sstevel@tonic-gate } 48550Sstevel@tonic-gate 48560Sstevel@tonic-gate /* 48570Sstevel@tonic-gate * validate state on peer 48580Sstevel@tonic-gate */ 48590Sstevel@tonic-gate switch (peer_tep->te_state) { 48600Sstevel@tonic-gate case TS_DATA_XFER: 48610Sstevel@tonic-gate case TS_WIND_ORDREL: 48620Sstevel@tonic-gate /* valid states */ 48630Sstevel@tonic-gate break; 48640Sstevel@tonic-gate default: 48650Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 48665240Snordmark "tl_ordrel:rx side:invalid state")); 48670Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO); 48680Sstevel@tonic-gate return; 48690Sstevel@tonic-gate } 48700Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state); 48710Sstevel@tonic-gate 48720Sstevel@tonic-gate /* 48730Sstevel@tonic-gate * reuse message block 48740Sstevel@tonic-gate */ 48750Sstevel@tonic-gate prim->type = T_ORDREL_IND; 48760Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 48775240Snordmark "tl_ordrel: send ordrel_ind")); 48780Sstevel@tonic-gate 48790Sstevel@tonic-gate /* 48800Sstevel@tonic-gate * send data to connected peer 48810Sstevel@tonic-gate */ 48820Sstevel@tonic-gate putnext(peer_rq, mp); 48830Sstevel@tonic-gate } 48840Sstevel@tonic-gate 48850Sstevel@tonic-gate 48860Sstevel@tonic-gate /* 48870Sstevel@tonic-gate * Send T_UDERROR_IND. The error should be from the <sys/errno.h> space. 48880Sstevel@tonic-gate */ 48890Sstevel@tonic-gate static void 48900Sstevel@tonic-gate tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err) 48910Sstevel@tonic-gate { 48920Sstevel@tonic-gate size_t err_sz; 48930Sstevel@tonic-gate tl_endpt_t *tep; 48940Sstevel@tonic-gate struct T_unitdata_req *udreq; 48950Sstevel@tonic-gate mblk_t *err_mp; 48960Sstevel@tonic-gate t_scalar_t alen; 48970Sstevel@tonic-gate t_scalar_t olen; 48980Sstevel@tonic-gate struct T_uderror_ind *uderr; 48990Sstevel@tonic-gate uchar_t *addr_startp; 49000Sstevel@tonic-gate 49010Sstevel@tonic-gate err_sz = sizeof (struct T_uderror_ind); 49020Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 49030Sstevel@tonic-gate udreq = (struct T_unitdata_req *)mp->b_rptr; 49040Sstevel@tonic-gate alen = udreq->DEST_length; 49050Sstevel@tonic-gate olen = udreq->OPT_length; 49060Sstevel@tonic-gate 49070Sstevel@tonic-gate if (alen > 0) 49080Sstevel@tonic-gate err_sz = T_ALIGN(err_sz + alen); 49090Sstevel@tonic-gate if (olen > 0) 49100Sstevel@tonic-gate err_sz += olen; 49110Sstevel@tonic-gate 49120Sstevel@tonic-gate err_mp = allocb(err_sz, BPRI_MED); 49130Sstevel@tonic-gate if (! err_mp) { 49140Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 49155240Snordmark "tl_uderr:allocb failure")); 49160Sstevel@tonic-gate /* 49170Sstevel@tonic-gate * Note: no rollback of state needed as it does 49180Sstevel@tonic-gate * not change in connectionless transport 49190Sstevel@tonic-gate */ 49200Sstevel@tonic-gate tl_memrecover(wq, mp, err_sz); 49210Sstevel@tonic-gate return; 49220Sstevel@tonic-gate } 49230Sstevel@tonic-gate 49240Sstevel@tonic-gate DB_TYPE(err_mp) = M_PROTO; 49250Sstevel@tonic-gate err_mp->b_wptr = err_mp->b_rptr + err_sz; 49260Sstevel@tonic-gate uderr = (struct T_uderror_ind *)err_mp->b_rptr; 49270Sstevel@tonic-gate uderr->PRIM_type = T_UDERROR_IND; 49280Sstevel@tonic-gate uderr->ERROR_type = err; 49290Sstevel@tonic-gate uderr->DEST_length = alen; 49300Sstevel@tonic-gate uderr->OPT_length = olen; 49310Sstevel@tonic-gate if (alen <= 0) { 49320Sstevel@tonic-gate uderr->DEST_offset = 0; 49330Sstevel@tonic-gate } else { 49340Sstevel@tonic-gate uderr->DEST_offset = 49355240Snordmark (t_scalar_t)sizeof (struct T_uderror_ind); 49360Sstevel@tonic-gate addr_startp = mp->b_rptr + udreq->DEST_offset; 49370Sstevel@tonic-gate bcopy(addr_startp, err_mp->b_rptr + uderr->DEST_offset, 49385240Snordmark (size_t)alen); 49390Sstevel@tonic-gate } 49400Sstevel@tonic-gate if (olen <= 0) { 49410Sstevel@tonic-gate uderr->OPT_offset = 0; 49420Sstevel@tonic-gate } else { 49430Sstevel@tonic-gate uderr->OPT_offset = 49445240Snordmark (t_scalar_t)T_ALIGN(sizeof (struct T_uderror_ind) + 49455240Snordmark uderr->DEST_length); 49460Sstevel@tonic-gate addr_startp = mp->b_rptr + udreq->OPT_offset; 49470Sstevel@tonic-gate bcopy(addr_startp, err_mp->b_rptr+uderr->OPT_offset, 49485240Snordmark (size_t)olen); 49490Sstevel@tonic-gate } 49500Sstevel@tonic-gate freemsg(mp); 49510Sstevel@tonic-gate 49520Sstevel@tonic-gate /* 49530Sstevel@tonic-gate * send indication message 49540Sstevel@tonic-gate */ 49550Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_UDERROR_IND, tep->te_state); 49560Sstevel@tonic-gate 49570Sstevel@tonic-gate qreply(wq, err_mp); 49580Sstevel@tonic-gate } 49590Sstevel@tonic-gate 49600Sstevel@tonic-gate static void 49610Sstevel@tonic-gate tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep) 49620Sstevel@tonic-gate { 49630Sstevel@tonic-gate queue_t *wq = tep->te_wq; 49640Sstevel@tonic-gate 49650Sstevel@tonic-gate if (!tep->te_closing && (wq->q_first != NULL)) { 49660Sstevel@tonic-gate TL_PUTQ(tep, mp); 49670Sstevel@tonic-gate } else if (tep->te_rq != NULL) 49680Sstevel@tonic-gate tl_unitdata(mp, tep); 49690Sstevel@tonic-gate else 49700Sstevel@tonic-gate freemsg(mp); 49710Sstevel@tonic-gate 49720Sstevel@tonic-gate tl_serializer_exit(tep); 49730Sstevel@tonic-gate tl_refrele(tep); 49740Sstevel@tonic-gate } 49750Sstevel@tonic-gate 49760Sstevel@tonic-gate /* 49770Sstevel@tonic-gate * Handle T_unitdata_req. 49780Sstevel@tonic-gate * If TL_SET[U]CRED or TL_SOCKUCRED generate the credentials options. 49790Sstevel@tonic-gate * If this is a socket pass through options unmodified. 49800Sstevel@tonic-gate */ 49810Sstevel@tonic-gate static void 49820Sstevel@tonic-gate tl_unitdata(mblk_t *mp, tl_endpt_t *tep) 49830Sstevel@tonic-gate { 49840Sstevel@tonic-gate queue_t *wq = tep->te_wq; 49850Sstevel@tonic-gate soux_addr_t ux_addr; 49860Sstevel@tonic-gate tl_addr_t destaddr; 49870Sstevel@tonic-gate uchar_t *addr_startp; 49880Sstevel@tonic-gate tl_endpt_t *peer_tep; 49890Sstevel@tonic-gate struct T_unitdata_ind *udind; 49900Sstevel@tonic-gate struct T_unitdata_req *udreq; 49910Sstevel@tonic-gate ssize_t msz, ui_sz; 49920Sstevel@tonic-gate t_scalar_t alen, aoff, olen, ooff; 49930Sstevel@tonic-gate t_scalar_t oldolen = 0; 4994*11134SCasper.Dik@Sun.COM cred_t *cr = NULL; 4995*11134SCasper.Dik@Sun.COM pid_t cpid; 49960Sstevel@tonic-gate 49970Sstevel@tonic-gate udreq = (struct T_unitdata_req *)mp->b_rptr; 49980Sstevel@tonic-gate msz = MBLKL(mp); 49990Sstevel@tonic-gate 50000Sstevel@tonic-gate /* 50010Sstevel@tonic-gate * validate the state 50020Sstevel@tonic-gate */ 50030Sstevel@tonic-gate if (tep->te_state != TS_IDLE) { 50040Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 50055240Snordmark SL_TRACE|SL_ERROR, 50065240Snordmark "tl_wput:T_CONN_REQ:out of state")); 50070Sstevel@tonic-gate tl_merror(wq, mp, EPROTO); 50080Sstevel@tonic-gate return; 50090Sstevel@tonic-gate } 50100Sstevel@tonic-gate /* 50110Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_UNITDATA_REQ, tep->te_state); 50120Sstevel@tonic-gate * (state does not change on this event) 50130Sstevel@tonic-gate */ 50140Sstevel@tonic-gate 50150Sstevel@tonic-gate /* 50160Sstevel@tonic-gate * validate the message 50170Sstevel@tonic-gate * Note: dereference fields in struct inside message only 50180Sstevel@tonic-gate * after validating the message length. 50190Sstevel@tonic-gate */ 50200Sstevel@tonic-gate if (msz < sizeof (struct T_unitdata_req)) { 50210Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 50225240Snordmark "tl_unitdata:invalid message length")); 50230Sstevel@tonic-gate tl_merror(wq, mp, EINVAL); 50240Sstevel@tonic-gate return; 50250Sstevel@tonic-gate } 50260Sstevel@tonic-gate alen = udreq->DEST_length; 50270Sstevel@tonic-gate aoff = udreq->DEST_offset; 50280Sstevel@tonic-gate oldolen = olen = udreq->OPT_length; 50290Sstevel@tonic-gate ooff = udreq->OPT_offset; 50300Sstevel@tonic-gate if (olen == 0) 50310Sstevel@tonic-gate ooff = 0; 50320Sstevel@tonic-gate 50330Sstevel@tonic-gate if (IS_SOCKET(tep)) { 50340Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) || 50350Sstevel@tonic-gate (aoff < 0) || 50360Sstevel@tonic-gate (aoff + alen > msz) || 50370Sstevel@tonic-gate (olen < 0) || (ooff < 0) || 50380Sstevel@tonic-gate ((olen > 0) && ((ooff + olen) > msz))) { 50390Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 50405240Snordmark 1, SL_TRACE|SL_ERROR, 50415240Snordmark "tl_unitdata_req: invalid socket addr " 50425240Snordmark "(msz=%d, al=%d, ao=%d, ol=%d, oo = %d)", 50435240Snordmark (int)msz, alen, aoff, olen, ooff)); 50440Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ); 50450Sstevel@tonic-gate return; 50460Sstevel@tonic-gate } 50470Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN); 50480Sstevel@tonic-gate 50490Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) && 50500Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) { 50510Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 50525240Snordmark 1, SL_TRACE|SL_ERROR, 50535240Snordmark "tl_conn_req: invalid socket magic")); 50540Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ); 50550Sstevel@tonic-gate return; 50560Sstevel@tonic-gate } 50570Sstevel@tonic-gate } else { 50580Sstevel@tonic-gate if ((alen < 0) || 50590Sstevel@tonic-gate (aoff < 0) || 50600Sstevel@tonic-gate ((alen > 0) && ((aoff + alen) > msz)) || 50610Sstevel@tonic-gate ((ssize_t)alen > (msz - sizeof (struct T_unitdata_req))) || 50620Sstevel@tonic-gate ((aoff + alen) < 0) || 50630Sstevel@tonic-gate ((olen > 0) && ((ooff + olen) > msz)) || 50640Sstevel@tonic-gate (olen < 0) || 50650Sstevel@tonic-gate (ooff < 0) || 50660Sstevel@tonic-gate ((ssize_t)olen > (msz - sizeof (struct T_unitdata_req)))) { 50670Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 50680Sstevel@tonic-gate SL_TRACE|SL_ERROR, 50690Sstevel@tonic-gate "tl_unitdata:invalid unit data message")); 50700Sstevel@tonic-gate tl_merror(wq, mp, EINVAL); 50710Sstevel@tonic-gate return; 50720Sstevel@tonic-gate } 50730Sstevel@tonic-gate } 50740Sstevel@tonic-gate 50750Sstevel@tonic-gate /* Options not supported unless it's a socket */ 50760Sstevel@tonic-gate if (alen == 0 || (olen != 0 && !IS_SOCKET(tep))) { 50770Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 50780Sstevel@tonic-gate "tl_unitdata:option use(unsupported) or zero len addr")); 50790Sstevel@tonic-gate tl_uderr(wq, mp, EPROTO); 50800Sstevel@tonic-gate return; 50810Sstevel@tonic-gate } 50820Sstevel@tonic-gate #ifdef DEBUG 50830Sstevel@tonic-gate /* 50840Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps. 50850Sstevel@tonic-gate * if (! assertion) 50860Sstevel@tonic-gate * log warning; 50870Sstevel@tonic-gate */ 50880Sstevel@tonic-gate if (! (aoff >= (t_scalar_t)sizeof (struct T_unitdata_req))) { 50890Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 50905240Snordmark "tl_unitdata:addr overlaps TPI message")); 50910Sstevel@tonic-gate } 50920Sstevel@tonic-gate #endif 50930Sstevel@tonic-gate /* 50940Sstevel@tonic-gate * get destination endpoint 50950Sstevel@tonic-gate */ 50960Sstevel@tonic-gate destaddr.ta_alen = alen; 50970Sstevel@tonic-gate destaddr.ta_abuf = mp->b_rptr + aoff; 50980Sstevel@tonic-gate destaddr.ta_zoneid = tep->te_zoneid; 50990Sstevel@tonic-gate 51000Sstevel@tonic-gate /* 51010Sstevel@tonic-gate * Check whether the destination is the same that was used previously 51020Sstevel@tonic-gate * and the destination endpoint is in the right state. If something is 51030Sstevel@tonic-gate * wrong, find destination again and cache it. 51040Sstevel@tonic-gate */ 51050Sstevel@tonic-gate peer_tep = tep->te_lastep; 51060Sstevel@tonic-gate 51070Sstevel@tonic-gate if ((peer_tep == NULL) || peer_tep->te_closing || 51080Sstevel@tonic-gate (peer_tep->te_state != TS_IDLE) || 51090Sstevel@tonic-gate !tl_eqaddr(&destaddr, &peer_tep->te_ap)) { 51100Sstevel@tonic-gate /* 51110Sstevel@tonic-gate * Not the same as cached destination , need to find the right 51120Sstevel@tonic-gate * destination. 51130Sstevel@tonic-gate */ 51140Sstevel@tonic-gate peer_tep = (IS_SOCKET(tep) ? 51150Sstevel@tonic-gate tl_sock_find_peer(tep, &ux_addr) : 51160Sstevel@tonic-gate tl_find_peer(tep, &destaddr)); 51170Sstevel@tonic-gate 51180Sstevel@tonic-gate if (peer_tep == NULL) { 51190Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 51205240Snordmark SL_TRACE|SL_ERROR, 51215240Snordmark "tl_unitdata:no one at destination address")); 51220Sstevel@tonic-gate tl_uderr(wq, mp, ECONNRESET); 51230Sstevel@tonic-gate return; 51240Sstevel@tonic-gate } 51250Sstevel@tonic-gate 51260Sstevel@tonic-gate /* 51270Sstevel@tonic-gate * Cache the new peer. 51280Sstevel@tonic-gate */ 51290Sstevel@tonic-gate if (tep->te_lastep != NULL) 51300Sstevel@tonic-gate tl_refrele(tep->te_lastep); 51310Sstevel@tonic-gate 51320Sstevel@tonic-gate tep->te_lastep = peer_tep; 51330Sstevel@tonic-gate } 51340Sstevel@tonic-gate 51350Sstevel@tonic-gate if (peer_tep->te_state != TS_IDLE) { 51360Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 51375240Snordmark "tl_unitdata:provider in invalid state")); 51380Sstevel@tonic-gate tl_uderr(wq, mp, EPROTO); 51390Sstevel@tonic-gate return; 51400Sstevel@tonic-gate } 51410Sstevel@tonic-gate 51420Sstevel@tonic-gate ASSERT(peer_tep->te_rq != NULL); 51430Sstevel@tonic-gate 51440Sstevel@tonic-gate /* 51450Sstevel@tonic-gate * Put it back if flow controlled except when we are closing. 51460Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded 51470Sstevel@tonic-gate * so we can ignore flow control. 51480Sstevel@tonic-gate */ 51490Sstevel@tonic-gate if (!canputnext(peer_tep->te_rq) && !(tep->te_closing)) { 51500Sstevel@tonic-gate /* record what we are flow controlled on */ 51510Sstevel@tonic-gate if (tep->te_flowq != NULL) { 51520Sstevel@tonic-gate list_remove(&tep->te_flowq->te_flowlist, tep); 51530Sstevel@tonic-gate } 51540Sstevel@tonic-gate list_insert_head(&peer_tep->te_flowlist, tep); 51550Sstevel@tonic-gate tep->te_flowq = peer_tep; 51560Sstevel@tonic-gate TL_PUTBQ(tep, mp); 51570Sstevel@tonic-gate return; 51580Sstevel@tonic-gate } 51590Sstevel@tonic-gate /* 51600Sstevel@tonic-gate * prepare indication message 51610Sstevel@tonic-gate */ 51620Sstevel@tonic-gate 51630Sstevel@tonic-gate /* 51640Sstevel@tonic-gate * calculate length of message 51650Sstevel@tonic-gate */ 5166*11134SCasper.Dik@Sun.COM if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) { 5167*11134SCasper.Dik@Sun.COM cr = msg_getcred(mp, &cpid); 5168*11134SCasper.Dik@Sun.COM ASSERT(cr != NULL); 5169*11134SCasper.Dik@Sun.COM 5170*11134SCasper.Dik@Sun.COM if (peer_tep->te_flag & TL_SETCRED) { 5171*11134SCasper.Dik@Sun.COM ASSERT(olen == 0); 5172*11134SCasper.Dik@Sun.COM olen = (t_scalar_t)sizeof (struct opthdr) + 5173*11134SCasper.Dik@Sun.COM OPTLEN(sizeof (tl_credopt_t)); 5174*11134SCasper.Dik@Sun.COM /* 1 option only */ 5175*11134SCasper.Dik@Sun.COM } else if (peer_tep->te_flag & TL_SETUCRED) { 5176*11134SCasper.Dik@Sun.COM ASSERT(olen == 0); 5177*11134SCasper.Dik@Sun.COM olen = (t_scalar_t)sizeof (struct opthdr) + 5178*11134SCasper.Dik@Sun.COM OPTLEN(ucredminsize(cr)); 5179*11134SCasper.Dik@Sun.COM /* 1 option only */ 5180*11134SCasper.Dik@Sun.COM } else { 5181*11134SCasper.Dik@Sun.COM /* Possibly more than one option */ 5182*11134SCasper.Dik@Sun.COM olen += (t_scalar_t)sizeof (struct T_opthdr) + 5183*11134SCasper.Dik@Sun.COM OPTLEN(ucredminsize(cr)); 5184*11134SCasper.Dik@Sun.COM } 51850Sstevel@tonic-gate } 51860Sstevel@tonic-gate 51870Sstevel@tonic-gate ui_sz = T_ALIGN(sizeof (struct T_unitdata_ind) + tep->te_alen) + 51885240Snordmark olen; 51890Sstevel@tonic-gate /* 51900Sstevel@tonic-gate * If the unitdata_ind fits and we are not adding options 51910Sstevel@tonic-gate * reuse the udreq mblk. 51920Sstevel@tonic-gate */ 51930Sstevel@tonic-gate if (msz >= ui_sz && alen >= tep->te_alen && 51940Sstevel@tonic-gate !(peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED))) { 51950Sstevel@tonic-gate /* 51960Sstevel@tonic-gate * Reuse the original mblk. Leave options in place. 51970Sstevel@tonic-gate */ 51980Sstevel@tonic-gate udind = (struct T_unitdata_ind *)mp->b_rptr; 51990Sstevel@tonic-gate udind->PRIM_type = T_UNITDATA_IND; 52000Sstevel@tonic-gate udind->SRC_length = tep->te_alen; 52010Sstevel@tonic-gate addr_startp = mp->b_rptr + udind->SRC_offset; 52020Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen); 52030Sstevel@tonic-gate } else { 52040Sstevel@tonic-gate /* Allocate a new T_unidata_ind message */ 52050Sstevel@tonic-gate mblk_t *ui_mp; 52060Sstevel@tonic-gate 52070Sstevel@tonic-gate ui_mp = allocb(ui_sz, BPRI_MED); 52080Sstevel@tonic-gate if (! ui_mp) { 52090Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 4, SL_TRACE, 52105240Snordmark "tl_unitdata:allocb failure:message queued")); 52110Sstevel@tonic-gate tl_memrecover(wq, mp, ui_sz); 52120Sstevel@tonic-gate return; 52130Sstevel@tonic-gate } 52140Sstevel@tonic-gate 52150Sstevel@tonic-gate /* 52160Sstevel@tonic-gate * fill in T_UNITDATA_IND contents 52170Sstevel@tonic-gate */ 52180Sstevel@tonic-gate DB_TYPE(ui_mp) = M_PROTO; 52190Sstevel@tonic-gate ui_mp->b_wptr = ui_mp->b_rptr + ui_sz; 52200Sstevel@tonic-gate udind = (struct T_unitdata_ind *)ui_mp->b_rptr; 52210Sstevel@tonic-gate udind->PRIM_type = T_UNITDATA_IND; 52220Sstevel@tonic-gate udind->SRC_offset = (t_scalar_t)sizeof (struct T_unitdata_ind); 52230Sstevel@tonic-gate udind->SRC_length = tep->te_alen; 52240Sstevel@tonic-gate addr_startp = ui_mp->b_rptr + udind->SRC_offset; 52250Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen); 52260Sstevel@tonic-gate udind->OPT_offset = 52270Sstevel@tonic-gate (t_scalar_t)T_ALIGN(udind->SRC_offset + udind->SRC_length); 52280Sstevel@tonic-gate udind->OPT_length = olen; 52290Sstevel@tonic-gate if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) { 52308778SErik.Nordmark@Sun.COM 52310Sstevel@tonic-gate if (oldolen != 0) { 52320Sstevel@tonic-gate bcopy((void *)((uintptr_t)udreq + ooff), 52330Sstevel@tonic-gate (void *)((uintptr_t)udind + 52340Sstevel@tonic-gate udind->OPT_offset), 52350Sstevel@tonic-gate oldolen); 52360Sstevel@tonic-gate } 52378778SErik.Nordmark@Sun.COM ASSERT(cr != NULL); 52388778SErik.Nordmark@Sun.COM 52390Sstevel@tonic-gate tl_fill_option(ui_mp->b_rptr + udind->OPT_offset + 52408778SErik.Nordmark@Sun.COM oldolen, cr, cpid, 52411676Sjpk peer_tep->te_flag, peer_tep->te_credp); 52420Sstevel@tonic-gate } else { 52430Sstevel@tonic-gate bcopy((void *)((uintptr_t)udreq + ooff), 52445240Snordmark (void *)((uintptr_t)udind + udind->OPT_offset), 52455240Snordmark olen); 52460Sstevel@tonic-gate } 52470Sstevel@tonic-gate 52480Sstevel@tonic-gate /* 52490Sstevel@tonic-gate * relink data blocks from mp to ui_mp 52500Sstevel@tonic-gate */ 52510Sstevel@tonic-gate ui_mp->b_cont = mp->b_cont; 52520Sstevel@tonic-gate freeb(mp); 52530Sstevel@tonic-gate mp = ui_mp; 52540Sstevel@tonic-gate } 52550Sstevel@tonic-gate /* 52560Sstevel@tonic-gate * send indication message 52570Sstevel@tonic-gate */ 52580Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_UNITDATA_IND, peer_tep->te_state); 52590Sstevel@tonic-gate putnext(peer_tep->te_rq, mp); 52600Sstevel@tonic-gate } 52610Sstevel@tonic-gate 52620Sstevel@tonic-gate 52630Sstevel@tonic-gate 52640Sstevel@tonic-gate /* 52650Sstevel@tonic-gate * Check if a given addr is in use. 52660Sstevel@tonic-gate * Endpoint ptr returned or NULL if not found. 52670Sstevel@tonic-gate * The name space is separate for each mode. This implies that 52680Sstevel@tonic-gate * sockets get their own name space. 52690Sstevel@tonic-gate */ 52700Sstevel@tonic-gate static tl_endpt_t * 52710Sstevel@tonic-gate tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap) 52720Sstevel@tonic-gate { 52730Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL; 52740Sstevel@tonic-gate int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap, 52750Sstevel@tonic-gate (mod_hash_val_t *)&peer_tep, tl_find_callback); 52760Sstevel@tonic-gate 52770Sstevel@tonic-gate ASSERT(! IS_SOCKET(tep)); 52780Sstevel@tonic-gate 52790Sstevel@tonic-gate ASSERT(ap != NULL && ap->ta_alen > 0); 52800Sstevel@tonic-gate ASSERT(ap->ta_zoneid == tep->te_zoneid); 52810Sstevel@tonic-gate ASSERT(ap->ta_abuf != NULL); 52820Sstevel@tonic-gate ASSERT(EQUIV(rc == 0, peer_tep != NULL)); 52830Sstevel@tonic-gate ASSERT(IMPLY(rc == 0, 52845240Snordmark (tep->te_zoneid == peer_tep->te_zoneid) && 52855240Snordmark (tep->te_transport == peer_tep->te_transport))); 52860Sstevel@tonic-gate 52870Sstevel@tonic-gate if ((rc == 0) && (peer_tep->te_closing)) { 52880Sstevel@tonic-gate tl_refrele(peer_tep); 52890Sstevel@tonic-gate peer_tep = NULL; 52900Sstevel@tonic-gate } 52910Sstevel@tonic-gate 52920Sstevel@tonic-gate return (peer_tep); 52930Sstevel@tonic-gate } 52940Sstevel@tonic-gate 52950Sstevel@tonic-gate /* 52960Sstevel@tonic-gate * Find peer for a socket based on unix domain address. 52970Sstevel@tonic-gate * For implicit addresses our peer can be found by minor number in ai hash. For 52987409SRic.Aleshire@Sun.COM * explicit binds we look vnode address at addr_hash. 52990Sstevel@tonic-gate */ 53000Sstevel@tonic-gate static tl_endpt_t * 53010Sstevel@tonic-gate tl_sock_find_peer(tl_endpt_t *tep, soux_addr_t *ux_addr) 53020Sstevel@tonic-gate { 53030Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL; 53040Sstevel@tonic-gate mod_hash_t *hash = ux_addr->soua_magic == SOU_MAGIC_IMPLICIT ? 53050Sstevel@tonic-gate tep->te_aihash : tep->te_addrhash; 53060Sstevel@tonic-gate int rc = mod_hash_find_cb(hash, (mod_hash_key_t)ux_addr->soua_vp, 53070Sstevel@tonic-gate (mod_hash_val_t *)&peer_tep, tl_find_callback); 53080Sstevel@tonic-gate 53090Sstevel@tonic-gate ASSERT(IS_SOCKET(tep)); 53100Sstevel@tonic-gate ASSERT(EQUIV(rc == 0, peer_tep != NULL)); 53117409SRic.Aleshire@Sun.COM ASSERT(IMPLY(rc == 0, (tep->te_transport == peer_tep->te_transport))); 53127409SRic.Aleshire@Sun.COM 53137409SRic.Aleshire@Sun.COM if (peer_tep != NULL) { 53147409SRic.Aleshire@Sun.COM /* Don't attempt to use closing peer. */ 53157409SRic.Aleshire@Sun.COM if (peer_tep->te_closing) 53167409SRic.Aleshire@Sun.COM goto errout; 53177409SRic.Aleshire@Sun.COM 53187409SRic.Aleshire@Sun.COM /* 53197409SRic.Aleshire@Sun.COM * Cross-zone unix sockets are permitted, but for Trusted 53207409SRic.Aleshire@Sun.COM * Extensions only, the "server" for these must be in the 53217409SRic.Aleshire@Sun.COM * global zone. 53227409SRic.Aleshire@Sun.COM */ 53237409SRic.Aleshire@Sun.COM if ((peer_tep->te_zoneid != tep->te_zoneid) && 53247409SRic.Aleshire@Sun.COM is_system_labeled() && 53257409SRic.Aleshire@Sun.COM (peer_tep->te_zoneid != GLOBAL_ZONEID)) 53267409SRic.Aleshire@Sun.COM goto errout; 53270Sstevel@tonic-gate } 53280Sstevel@tonic-gate 53290Sstevel@tonic-gate return (peer_tep); 53307409SRic.Aleshire@Sun.COM 53317409SRic.Aleshire@Sun.COM errout: 53327409SRic.Aleshire@Sun.COM tl_refrele(peer_tep); 53337409SRic.Aleshire@Sun.COM return (NULL); 53340Sstevel@tonic-gate } 53350Sstevel@tonic-gate 53360Sstevel@tonic-gate /* 53370Sstevel@tonic-gate * Generate a free addr and return it in struct pointed by ap 53380Sstevel@tonic-gate * but allocating space for address buffer. 53390Sstevel@tonic-gate * The generated address will be at least 4 bytes long and, if req->ta_alen 53400Sstevel@tonic-gate * exceeds 4 bytes, be req->ta_alen bytes long. 53410Sstevel@tonic-gate * 53420Sstevel@tonic-gate * If address is found it will be inserted in the hash. 53430Sstevel@tonic-gate * 53440Sstevel@tonic-gate * If req->ta_alen is larger than the default alen (4 bytes) the last 53450Sstevel@tonic-gate * alen-4 bytes will always be the same as in req. 53460Sstevel@tonic-gate * 53470Sstevel@tonic-gate * Return 0 for failure. 53480Sstevel@tonic-gate * Return non-zero for success. 53490Sstevel@tonic-gate */ 53500Sstevel@tonic-gate static boolean_t 53510Sstevel@tonic-gate tl_get_any_addr(tl_endpt_t *tep, tl_addr_t *req) 53520Sstevel@tonic-gate { 53530Sstevel@tonic-gate t_scalar_t alen; 53540Sstevel@tonic-gate uint32_t loopcnt; /* Limit loop to 2^32 */ 53550Sstevel@tonic-gate 53560Sstevel@tonic-gate ASSERT(tep->te_hash_hndl != NULL); 53570Sstevel@tonic-gate ASSERT(! IS_SOCKET(tep)); 53580Sstevel@tonic-gate 53590Sstevel@tonic-gate if (tep->te_hash_hndl == NULL) 53600Sstevel@tonic-gate return (B_FALSE); 53610Sstevel@tonic-gate 53620Sstevel@tonic-gate /* 53630Sstevel@tonic-gate * check if default addr is in use 53640Sstevel@tonic-gate * if it is - bump it and try again 53650Sstevel@tonic-gate */ 53660Sstevel@tonic-gate if (req == NULL) { 53670Sstevel@tonic-gate alen = sizeof (uint32_t); 53680Sstevel@tonic-gate } else { 53690Sstevel@tonic-gate alen = max(req->ta_alen, sizeof (uint32_t)); 53700Sstevel@tonic-gate ASSERT(tep->te_zoneid == req->ta_zoneid); 53710Sstevel@tonic-gate } 53720Sstevel@tonic-gate 53730Sstevel@tonic-gate if (tep->te_alen < alen) { 53740Sstevel@tonic-gate void *abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP); 53750Sstevel@tonic-gate 53760Sstevel@tonic-gate /* 53770Sstevel@tonic-gate * Not enough space in tep->ta_ap to hold the address, 53780Sstevel@tonic-gate * allocate a bigger space. 53790Sstevel@tonic-gate */ 53800Sstevel@tonic-gate if (abuf == NULL) 53810Sstevel@tonic-gate return (B_FALSE); 53820Sstevel@tonic-gate 53830Sstevel@tonic-gate if (tep->te_alen > 0) 53840Sstevel@tonic-gate kmem_free(tep->te_abuf, tep->te_alen); 53850Sstevel@tonic-gate 53860Sstevel@tonic-gate tep->te_alen = alen; 53870Sstevel@tonic-gate tep->te_abuf = abuf; 53880Sstevel@tonic-gate } 53890Sstevel@tonic-gate 53900Sstevel@tonic-gate /* Copy in the address in req */ 53910Sstevel@tonic-gate if (req != NULL) { 53920Sstevel@tonic-gate ASSERT(alen >= req->ta_alen); 53930Sstevel@tonic-gate bcopy(req->ta_abuf, tep->te_abuf, (size_t)req->ta_alen); 53940Sstevel@tonic-gate } 53950Sstevel@tonic-gate 53960Sstevel@tonic-gate /* 53970Sstevel@tonic-gate * First try minor number then try default addresses. 53980Sstevel@tonic-gate */ 53990Sstevel@tonic-gate bcopy(&tep->te_minor, tep->te_abuf, sizeof (uint32_t)); 54000Sstevel@tonic-gate 54010Sstevel@tonic-gate for (loopcnt = 0; loopcnt < UINT32_MAX; loopcnt++) { 54020Sstevel@tonic-gate if (mod_hash_insert_reserve(tep->te_addrhash, 54035240Snordmark (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep, 54045240Snordmark tep->te_hash_hndl) == 0) { 54050Sstevel@tonic-gate /* 54060Sstevel@tonic-gate * found free address 54070Sstevel@tonic-gate */ 54080Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED; 54090Sstevel@tonic-gate tep->te_hash_hndl = NULL; 54100Sstevel@tonic-gate 54110Sstevel@tonic-gate return (B_TRUE); /* successful return */ 54120Sstevel@tonic-gate } 54130Sstevel@tonic-gate /* 54140Sstevel@tonic-gate * Use default address. 54150Sstevel@tonic-gate */ 54160Sstevel@tonic-gate bcopy(&tep->te_defaddr, tep->te_abuf, sizeof (uint32_t)); 54170Sstevel@tonic-gate atomic_add_32(&tep->te_defaddr, 1); 54180Sstevel@tonic-gate } 54190Sstevel@tonic-gate 54200Sstevel@tonic-gate /* 54210Sstevel@tonic-gate * Failed to find anything. 54220Sstevel@tonic-gate */ 54230Sstevel@tonic-gate (void) (STRLOG(TL_ID, -1, 1, SL_ERROR, 54245240Snordmark "tl_get_any_addr:looped 2^32 times")); 54250Sstevel@tonic-gate return (B_FALSE); 54260Sstevel@tonic-gate } 54270Sstevel@tonic-gate 54280Sstevel@tonic-gate /* 54290Sstevel@tonic-gate * reallocb + set r/w ptrs to reflect size. 54300Sstevel@tonic-gate */ 54310Sstevel@tonic-gate static mblk_t * 54320Sstevel@tonic-gate tl_resizemp(mblk_t *mp, ssize_t new_size) 54330Sstevel@tonic-gate { 54340Sstevel@tonic-gate if ((mp = reallocb(mp, new_size, 0)) == NULL) 54350Sstevel@tonic-gate return (NULL); 54360Sstevel@tonic-gate 54370Sstevel@tonic-gate mp->b_rptr = DB_BASE(mp); 54380Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + new_size; 54390Sstevel@tonic-gate return (mp); 54400Sstevel@tonic-gate } 54410Sstevel@tonic-gate 54420Sstevel@tonic-gate static void 54430Sstevel@tonic-gate tl_cl_backenable(tl_endpt_t *tep) 54440Sstevel@tonic-gate { 54450Sstevel@tonic-gate list_t *l = &tep->te_flowlist; 54460Sstevel@tonic-gate tl_endpt_t *elp; 54470Sstevel@tonic-gate 54480Sstevel@tonic-gate ASSERT(IS_CLTS(tep)); 54490Sstevel@tonic-gate 54500Sstevel@tonic-gate for (elp = list_head(l); elp != NULL; elp = list_head(l)) { 54510Sstevel@tonic-gate ASSERT(tep->te_ser == elp->te_ser); 54520Sstevel@tonic-gate ASSERT(elp->te_flowq == tep); 54530Sstevel@tonic-gate if (! elp->te_closing) 54540Sstevel@tonic-gate TL_QENABLE(elp); 54550Sstevel@tonic-gate elp->te_flowq = NULL; 54560Sstevel@tonic-gate list_remove(l, elp); 54570Sstevel@tonic-gate } 54580Sstevel@tonic-gate } 54590Sstevel@tonic-gate 54600Sstevel@tonic-gate /* 54610Sstevel@tonic-gate * Unconnect endpoints. 54620Sstevel@tonic-gate */ 54630Sstevel@tonic-gate static void 54640Sstevel@tonic-gate tl_co_unconnect(tl_endpt_t *tep) 54650Sstevel@tonic-gate { 54660Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp; 54670Sstevel@tonic-gate tl_endpt_t *srv_tep = tep->te_oconp; 54680Sstevel@tonic-gate list_t *l; 54690Sstevel@tonic-gate tl_icon_t *tip; 54700Sstevel@tonic-gate tl_endpt_t *cl_tep; 54710Sstevel@tonic-gate mblk_t *d_mp; 54720Sstevel@tonic-gate 54730Sstevel@tonic-gate ASSERT(IS_COTS(tep)); 54740Sstevel@tonic-gate /* 54750Sstevel@tonic-gate * If our peer is closing, don't use it. 54760Sstevel@tonic-gate */ 54770Sstevel@tonic-gate if ((peer_tep != NULL) && peer_tep->te_closing) { 54780Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp); 54790Sstevel@tonic-gate peer_tep = NULL; 54800Sstevel@tonic-gate } 54810Sstevel@tonic-gate if ((srv_tep != NULL) && srv_tep->te_closing) { 54820Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 54830Sstevel@tonic-gate srv_tep = NULL; 54840Sstevel@tonic-gate } 54850Sstevel@tonic-gate 54860Sstevel@tonic-gate if (tep->te_nicon > 0) { 54870Sstevel@tonic-gate l = &tep->te_iconp; 54880Sstevel@tonic-gate /* 54890Sstevel@tonic-gate * If incoming requests pending, change state 54900Sstevel@tonic-gate * of clients on disconnect ind event and send 54910Sstevel@tonic-gate * discon_ind pdu to modules above them 54920Sstevel@tonic-gate * for server: all clients get disconnect 54930Sstevel@tonic-gate */ 54940Sstevel@tonic-gate 54950Sstevel@tonic-gate while (tep->te_nicon > 0) { 54960Sstevel@tonic-gate tip = list_head(l); 54970Sstevel@tonic-gate cl_tep = tip->ti_tep; 54980Sstevel@tonic-gate 54990Sstevel@tonic-gate if (cl_tep == NULL) { 55000Sstevel@tonic-gate tl_freetip(tep, tip); 55010Sstevel@tonic-gate continue; 55020Sstevel@tonic-gate } 55030Sstevel@tonic-gate 55040Sstevel@tonic-gate if (cl_tep->te_oconp != NULL) { 55050Sstevel@tonic-gate ASSERT(cl_tep != cl_tep->te_oconp); 55060Sstevel@tonic-gate TL_UNCONNECT(cl_tep->te_oconp); 55070Sstevel@tonic-gate } 55080Sstevel@tonic-gate 55090Sstevel@tonic-gate if (cl_tep->te_closing) { 55100Sstevel@tonic-gate tl_freetip(tep, tip); 55110Sstevel@tonic-gate continue; 55120Sstevel@tonic-gate } 55130Sstevel@tonic-gate 55140Sstevel@tonic-gate enableok(cl_tep->te_wq); 55150Sstevel@tonic-gate TL_QENABLE(cl_tep); 55160Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(ECONNREFUSED, BADSEQNUM); 55170Sstevel@tonic-gate if (d_mp != NULL) { 55180Sstevel@tonic-gate cl_tep->te_state = TS_IDLE; 55190Sstevel@tonic-gate putnext(cl_tep->te_rq, d_mp); 55200Sstevel@tonic-gate } else { 55210Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 55225240Snordmark SL_TRACE|SL_ERROR, 55235240Snordmark "tl_co_unconnect:icmng: " 55245240Snordmark "allocb failure")); 55250Sstevel@tonic-gate } 55260Sstevel@tonic-gate tl_freetip(tep, tip); 55270Sstevel@tonic-gate } 55280Sstevel@tonic-gate } else if (srv_tep != NULL) { 55290Sstevel@tonic-gate /* 55300Sstevel@tonic-gate * If outgoing request pending, change state 55310Sstevel@tonic-gate * of server on discon ind event 55320Sstevel@tonic-gate */ 55330Sstevel@tonic-gate 55340Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect && 55350Sstevel@tonic-gate IS_COTSORD(srv_tep) && 55360Sstevel@tonic-gate !tl_icon_hasprim(srv_tep, tep->te_seqno, T_ORDREL_IND)) { 55370Sstevel@tonic-gate /* 55380Sstevel@tonic-gate * Queue ordrel_ind for server to be picked up 55390Sstevel@tonic-gate * when the connection is accepted. 55400Sstevel@tonic-gate */ 55410Sstevel@tonic-gate d_mp = tl_ordrel_ind_alloc(); 55420Sstevel@tonic-gate } else { 55430Sstevel@tonic-gate /* 55440Sstevel@tonic-gate * send discon_ind to server 55450Sstevel@tonic-gate */ 55460Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(ECONNRESET, tep->te_seqno); 55470Sstevel@tonic-gate } 55480Sstevel@tonic-gate if (d_mp == NULL) { 55490Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 55505240Snordmark SL_TRACE|SL_ERROR, 55515240Snordmark "tl_co_unconnect:outgoing:allocb failure")); 55520Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 55530Sstevel@tonic-gate goto discon_peer; 55540Sstevel@tonic-gate } 55550Sstevel@tonic-gate 55560Sstevel@tonic-gate /* 55570Sstevel@tonic-gate * If this is a socket the T_DISCON_IND is queued with 55580Sstevel@tonic-gate * the T_CONN_IND. Otherwise the T_CONN_IND is removed 55590Sstevel@tonic-gate * from the list of pending connections. 55600Sstevel@tonic-gate * Note that when te_oconp is set the peer better have 55610Sstevel@tonic-gate * a t_connind_t for the client. 55620Sstevel@tonic-gate */ 55630Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) { 55640Sstevel@tonic-gate /* 55650Sstevel@tonic-gate * Queue the disconnection message. 55660Sstevel@tonic-gate */ 55670Sstevel@tonic-gate tl_icon_queuemsg(srv_tep, tep->te_seqno, d_mp); 55680Sstevel@tonic-gate } else { 55690Sstevel@tonic-gate tip = tl_icon_find(srv_tep, tep->te_seqno); 55700Sstevel@tonic-gate if (tip == NULL) { 55710Sstevel@tonic-gate freemsg(d_mp); 55720Sstevel@tonic-gate } else { 55730Sstevel@tonic-gate ASSERT(tep == tip->ti_tep); 55740Sstevel@tonic-gate ASSERT(tep->te_ser == srv_tep->te_ser); 55750Sstevel@tonic-gate /* 55760Sstevel@tonic-gate * Delete tip from the server list. 55770Sstevel@tonic-gate */ 55780Sstevel@tonic-gate if (srv_tep->te_nicon == 1) { 55790Sstevel@tonic-gate srv_tep->te_state = 55800Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND2, 55815240Snordmark srv_tep->te_state); 55820Sstevel@tonic-gate } else { 55830Sstevel@tonic-gate srv_tep->te_state = 55840Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND3, 55855240Snordmark srv_tep->te_state); 55860Sstevel@tonic-gate } 55870Sstevel@tonic-gate ASSERT(*(uint32_t *)(d_mp->b_rptr) == 55880Sstevel@tonic-gate T_DISCON_IND); 55890Sstevel@tonic-gate putnext(srv_tep->te_rq, d_mp); 55900Sstevel@tonic-gate tl_freetip(srv_tep, tip); 55910Sstevel@tonic-gate } 55920Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp); 55930Sstevel@tonic-gate srv_tep = NULL; 55940Sstevel@tonic-gate } 55950Sstevel@tonic-gate } else if (peer_tep != NULL) { 55960Sstevel@tonic-gate /* 55970Sstevel@tonic-gate * unconnect existing connection 55980Sstevel@tonic-gate * If connected, change state of peer on 55990Sstevel@tonic-gate * discon ind event and send discon ind pdu 56000Sstevel@tonic-gate * to module above it 56010Sstevel@tonic-gate */ 56020Sstevel@tonic-gate 56030Sstevel@tonic-gate ASSERT(tep->te_ser == peer_tep->te_ser); 56040Sstevel@tonic-gate if (IS_COTSORD(peer_tep) && 56050Sstevel@tonic-gate (peer_tep->te_state == TS_WIND_ORDREL || 56060Sstevel@tonic-gate peer_tep->te_state == TS_DATA_XFER)) { 56070Sstevel@tonic-gate /* 56080Sstevel@tonic-gate * send ordrel ind 56090Sstevel@tonic-gate */ 56100Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE, 56110Sstevel@tonic-gate "tl_co_unconnect:connected: ordrel_ind state %d->%d", 56125240Snordmark peer_tep->te_state, 56135240Snordmark NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state))); 56140Sstevel@tonic-gate d_mp = tl_ordrel_ind_alloc(); 56150Sstevel@tonic-gate if (! d_mp) { 56160Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 56170Sstevel@tonic-gate SL_TRACE|SL_ERROR, 56180Sstevel@tonic-gate "tl_co_unconnect:connected:" 56190Sstevel@tonic-gate "allocb failure")); 56200Sstevel@tonic-gate /* 56210Sstevel@tonic-gate * Continue with cleaning up peer as 56220Sstevel@tonic-gate * this side may go away with the close 56230Sstevel@tonic-gate */ 56240Sstevel@tonic-gate TL_QENABLE(peer_tep); 56250Sstevel@tonic-gate goto discon_peer; 56260Sstevel@tonic-gate } 56270Sstevel@tonic-gate peer_tep->te_state = 56285240Snordmark NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state); 56290Sstevel@tonic-gate 56300Sstevel@tonic-gate putnext(peer_tep->te_rq, d_mp); 56310Sstevel@tonic-gate /* 56320Sstevel@tonic-gate * Handle flow control case. This will generate 56330Sstevel@tonic-gate * a t_discon_ind message with reason 0 if there 56340Sstevel@tonic-gate * is data queued on the write side. 56350Sstevel@tonic-gate */ 56360Sstevel@tonic-gate TL_QENABLE(peer_tep); 56370Sstevel@tonic-gate } else if (IS_COTSORD(peer_tep) && 56380Sstevel@tonic-gate peer_tep->te_state == TS_WREQ_ORDREL) { 56390Sstevel@tonic-gate /* 56400Sstevel@tonic-gate * Sent an ordrel_ind. We send a discon with 56410Sstevel@tonic-gate * with error 0 to inform that the peer is gone. 56420Sstevel@tonic-gate */ 56430Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 56445240Snordmark SL_TRACE|SL_ERROR, 56455240Snordmark "tl_co_unconnect: discon in state %d", 56465240Snordmark tep->te_state)); 56470Sstevel@tonic-gate tl_discon_ind(peer_tep, 0); 56480Sstevel@tonic-gate } else { 56490Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, 56505240Snordmark SL_TRACE|SL_ERROR, 56515240Snordmark "tl_co_unconnect: state %d", tep->te_state)); 56520Sstevel@tonic-gate tl_discon_ind(peer_tep, ECONNRESET); 56530Sstevel@tonic-gate } 56540Sstevel@tonic-gate 56550Sstevel@tonic-gate discon_peer: 56560Sstevel@tonic-gate /* 56570Sstevel@tonic-gate * Disconnect cross-pointers only for close 56580Sstevel@tonic-gate */ 56590Sstevel@tonic-gate if (tep->te_closing) { 56600Sstevel@tonic-gate peer_tep = tep->te_conp; 56610Sstevel@tonic-gate TL_REMOVE_PEER(peer_tep->te_conp); 56620Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_conp); 56630Sstevel@tonic-gate } 56640Sstevel@tonic-gate } 56650Sstevel@tonic-gate } 56660Sstevel@tonic-gate 56670Sstevel@tonic-gate /* 56680Sstevel@tonic-gate * Note: The following routine does not recover from allocb() 56690Sstevel@tonic-gate * failures 56700Sstevel@tonic-gate * The reason should be from the <sys/errno.h> space. 56710Sstevel@tonic-gate */ 56720Sstevel@tonic-gate static void 56730Sstevel@tonic-gate tl_discon_ind(tl_endpt_t *tep, uint32_t reason) 56740Sstevel@tonic-gate { 56750Sstevel@tonic-gate mblk_t *d_mp; 56760Sstevel@tonic-gate 56770Sstevel@tonic-gate if (tep->te_closing) 56780Sstevel@tonic-gate return; 56790Sstevel@tonic-gate 56800Sstevel@tonic-gate /* 56810Sstevel@tonic-gate * flush the queues. 56820Sstevel@tonic-gate */ 56830Sstevel@tonic-gate flushq(tep->te_rq, FLUSHDATA); 56840Sstevel@tonic-gate (void) putnextctl1(tep->te_rq, M_FLUSH, FLUSHRW); 56850Sstevel@tonic-gate 56860Sstevel@tonic-gate /* 56870Sstevel@tonic-gate * send discon ind 56880Sstevel@tonic-gate */ 56890Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(reason, tep->te_seqno); 56900Sstevel@tonic-gate if (! d_mp) { 56910Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR, 56925240Snordmark "tl_discon_ind:allocb failure")); 56930Sstevel@tonic-gate return; 56940Sstevel@tonic-gate } 56950Sstevel@tonic-gate tep->te_state = TS_IDLE; 56960Sstevel@tonic-gate putnext(tep->te_rq, d_mp); 56970Sstevel@tonic-gate } 56980Sstevel@tonic-gate 56990Sstevel@tonic-gate /* 57000Sstevel@tonic-gate * Note: The following routine does not recover from allocb() 57010Sstevel@tonic-gate * failures 57020Sstevel@tonic-gate * The reason should be from the <sys/errno.h> space. 57030Sstevel@tonic-gate */ 57040Sstevel@tonic-gate static mblk_t * 57050Sstevel@tonic-gate tl_discon_ind_alloc(uint32_t reason, t_scalar_t seqnum) 57060Sstevel@tonic-gate { 57070Sstevel@tonic-gate mblk_t *mp; 57080Sstevel@tonic-gate struct T_discon_ind *tdi; 57090Sstevel@tonic-gate 57100Sstevel@tonic-gate if (mp = allocb(sizeof (struct T_discon_ind), BPRI_MED)) { 57110Sstevel@tonic-gate DB_TYPE(mp) = M_PROTO; 57120Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind); 57130Sstevel@tonic-gate tdi = (struct T_discon_ind *)mp->b_rptr; 57140Sstevel@tonic-gate tdi->PRIM_type = T_DISCON_IND; 57150Sstevel@tonic-gate tdi->DISCON_reason = reason; 57160Sstevel@tonic-gate tdi->SEQ_number = seqnum; 57170Sstevel@tonic-gate } 57180Sstevel@tonic-gate return (mp); 57190Sstevel@tonic-gate } 57200Sstevel@tonic-gate 57210Sstevel@tonic-gate 57220Sstevel@tonic-gate /* 57230Sstevel@tonic-gate * Note: The following routine does not recover from allocb() 57240Sstevel@tonic-gate * failures 57250Sstevel@tonic-gate */ 57260Sstevel@tonic-gate static mblk_t * 57270Sstevel@tonic-gate tl_ordrel_ind_alloc(void) 57280Sstevel@tonic-gate { 57290Sstevel@tonic-gate mblk_t *mp; 57300Sstevel@tonic-gate struct T_ordrel_ind *toi; 57310Sstevel@tonic-gate 57320Sstevel@tonic-gate if (mp = allocb(sizeof (struct T_ordrel_ind), BPRI_MED)) { 57330Sstevel@tonic-gate DB_TYPE(mp) = M_PROTO; 57340Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (struct T_ordrel_ind); 57350Sstevel@tonic-gate toi = (struct T_ordrel_ind *)mp->b_rptr; 57360Sstevel@tonic-gate toi->PRIM_type = T_ORDREL_IND; 57370Sstevel@tonic-gate } 57380Sstevel@tonic-gate return (mp); 57390Sstevel@tonic-gate } 57400Sstevel@tonic-gate 57410Sstevel@tonic-gate 57420Sstevel@tonic-gate /* 57430Sstevel@tonic-gate * Lookup the seqno in the list of queued connections. 57440Sstevel@tonic-gate */ 57450Sstevel@tonic-gate static tl_icon_t * 57460Sstevel@tonic-gate tl_icon_find(tl_endpt_t *tep, t_scalar_t seqno) 57470Sstevel@tonic-gate { 57480Sstevel@tonic-gate list_t *l = &tep->te_iconp; 57490Sstevel@tonic-gate tl_icon_t *tip = list_head(l); 57500Sstevel@tonic-gate 57510Sstevel@tonic-gate ASSERT(seqno != 0); 57520Sstevel@tonic-gate 57530Sstevel@tonic-gate for (; tip != NULL && (tip->ti_seqno != seqno); tip = list_next(l, tip)) 57540Sstevel@tonic-gate ; 57550Sstevel@tonic-gate 57560Sstevel@tonic-gate return (tip); 57570Sstevel@tonic-gate } 57580Sstevel@tonic-gate 57590Sstevel@tonic-gate /* 57600Sstevel@tonic-gate * Queue data for a given T_CONN_IND while verifying that redundant 57610Sstevel@tonic-gate * messages, such as a T_ORDREL_IND after a T_DISCON_IND, are not queued. 57620Sstevel@tonic-gate * Used when the originator of the connection closes. 57630Sstevel@tonic-gate */ 57640Sstevel@tonic-gate static void 57650Sstevel@tonic-gate tl_icon_queuemsg(tl_endpt_t *tep, t_scalar_t seqno, mblk_t *nmp) 57660Sstevel@tonic-gate { 57670Sstevel@tonic-gate tl_icon_t *tip; 57680Sstevel@tonic-gate mblk_t **mpp, *mp; 57690Sstevel@tonic-gate int prim, nprim; 57700Sstevel@tonic-gate 57710Sstevel@tonic-gate if (nmp->b_datap->db_type == M_PROTO) 57720Sstevel@tonic-gate nprim = ((union T_primitives *)nmp->b_rptr)->type; 57730Sstevel@tonic-gate else 57740Sstevel@tonic-gate nprim = -1; /* M_DATA */ 57750Sstevel@tonic-gate 57760Sstevel@tonic-gate tip = tl_icon_find(tep, seqno); 57770Sstevel@tonic-gate if (tip == NULL) { 57780Sstevel@tonic-gate freemsg(nmp); 57790Sstevel@tonic-gate return; 57800Sstevel@tonic-gate } 57810Sstevel@tonic-gate 57820Sstevel@tonic-gate ASSERT(tip->ti_seqno != 0); 57830Sstevel@tonic-gate mpp = &tip->ti_mp; 57840Sstevel@tonic-gate while (*mpp != NULL) { 57850Sstevel@tonic-gate mp = *mpp; 57860Sstevel@tonic-gate 57870Sstevel@tonic-gate if (mp->b_datap->db_type == M_PROTO) 57880Sstevel@tonic-gate prim = ((union T_primitives *)mp->b_rptr)->type; 57890Sstevel@tonic-gate else 57900Sstevel@tonic-gate prim = -1; /* M_DATA */ 57910Sstevel@tonic-gate 57920Sstevel@tonic-gate /* 57930Sstevel@tonic-gate * Allow nothing after a T_DISCON_IND 57940Sstevel@tonic-gate */ 57950Sstevel@tonic-gate if (prim == T_DISCON_IND) { 57960Sstevel@tonic-gate freemsg(nmp); 57970Sstevel@tonic-gate return; 57980Sstevel@tonic-gate } 57990Sstevel@tonic-gate /* 58000Sstevel@tonic-gate * Only allow a T_DISCON_IND after an T_ORDREL_IND 58010Sstevel@tonic-gate */ 58020Sstevel@tonic-gate if (prim == T_ORDREL_IND && nprim != T_DISCON_IND) { 58030Sstevel@tonic-gate freemsg(nmp); 58040Sstevel@tonic-gate return; 58050Sstevel@tonic-gate } 58060Sstevel@tonic-gate mpp = &(mp->b_next); 58070Sstevel@tonic-gate } 58080Sstevel@tonic-gate *mpp = nmp; 58090Sstevel@tonic-gate } 58100Sstevel@tonic-gate 58110Sstevel@tonic-gate /* 58120Sstevel@tonic-gate * Verify if a certain TPI primitive exists on the connind queue. 58130Sstevel@tonic-gate * Use prim -1 for M_DATA. 58140Sstevel@tonic-gate * Return non-zero if found. 58150Sstevel@tonic-gate */ 58160Sstevel@tonic-gate static boolean_t 58170Sstevel@tonic-gate tl_icon_hasprim(tl_endpt_t *tep, t_scalar_t seqno, t_scalar_t prim) 58180Sstevel@tonic-gate { 58190Sstevel@tonic-gate tl_icon_t *tip = tl_icon_find(tep, seqno); 58200Sstevel@tonic-gate boolean_t found = B_FALSE; 58210Sstevel@tonic-gate 58220Sstevel@tonic-gate if (tip != NULL) { 58230Sstevel@tonic-gate mblk_t *mp; 58240Sstevel@tonic-gate for (mp = tip->ti_mp; !found && mp != NULL; mp = mp->b_next) { 58250Sstevel@tonic-gate found = (DB_TYPE(mp) == M_PROTO && 58260Sstevel@tonic-gate ((union T_primitives *)mp->b_rptr)->type == prim); 58270Sstevel@tonic-gate } 58280Sstevel@tonic-gate } 58290Sstevel@tonic-gate return (found); 58300Sstevel@tonic-gate } 58310Sstevel@tonic-gate 58320Sstevel@tonic-gate /* 58330Sstevel@tonic-gate * Send the b_next mblk chain that has accumulated before the connection 58340Sstevel@tonic-gate * was accepted. Perform the necessary state transitions. 58350Sstevel@tonic-gate */ 58360Sstevel@tonic-gate static void 58370Sstevel@tonic-gate tl_icon_sendmsgs(tl_endpt_t *tep, mblk_t **mpp) 58380Sstevel@tonic-gate { 58390Sstevel@tonic-gate mblk_t *mp; 58400Sstevel@tonic-gate union T_primitives *primp; 58410Sstevel@tonic-gate 58420Sstevel@tonic-gate if (tep->te_closing) { 58430Sstevel@tonic-gate tl_icon_freemsgs(mpp); 58440Sstevel@tonic-gate return; 58450Sstevel@tonic-gate } 58460Sstevel@tonic-gate 58470Sstevel@tonic-gate ASSERT(tep->te_state == TS_DATA_XFER); 58480Sstevel@tonic-gate ASSERT(tep->te_rq->q_first == NULL); 58490Sstevel@tonic-gate 58500Sstevel@tonic-gate while ((mp = *mpp) != NULL) { 58510Sstevel@tonic-gate *mpp = mp->b_next; 58520Sstevel@tonic-gate mp->b_next = NULL; 58530Sstevel@tonic-gate 58540Sstevel@tonic-gate ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO)); 58550Sstevel@tonic-gate switch (DB_TYPE(mp)) { 58560Sstevel@tonic-gate default: 58570Sstevel@tonic-gate freemsg(mp); 58580Sstevel@tonic-gate break; 58590Sstevel@tonic-gate case M_DATA: 58600Sstevel@tonic-gate putnext(tep->te_rq, mp); 58610Sstevel@tonic-gate break; 58620Sstevel@tonic-gate case M_PROTO: 58630Sstevel@tonic-gate primp = (union T_primitives *)mp->b_rptr; 58640Sstevel@tonic-gate switch (primp->type) { 58650Sstevel@tonic-gate case T_UNITDATA_IND: 58660Sstevel@tonic-gate case T_DATA_IND: 58670Sstevel@tonic-gate case T_OPTDATA_IND: 58680Sstevel@tonic-gate case T_EXDATA_IND: 58690Sstevel@tonic-gate putnext(tep->te_rq, mp); 58700Sstevel@tonic-gate break; 58710Sstevel@tonic-gate case T_ORDREL_IND: 58720Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ORDREL_IND, 58735240Snordmark tep->te_state); 58740Sstevel@tonic-gate putnext(tep->te_rq, mp); 58750Sstevel@tonic-gate break; 58760Sstevel@tonic-gate case T_DISCON_IND: 58770Sstevel@tonic-gate tep->te_state = TS_IDLE; 58780Sstevel@tonic-gate putnext(tep->te_rq, mp); 58790Sstevel@tonic-gate break; 58800Sstevel@tonic-gate default: 58810Sstevel@tonic-gate #ifdef DEBUG 58820Sstevel@tonic-gate cmn_err(CE_PANIC, 58835240Snordmark "tl_icon_sendmsgs: unknown primitive"); 58840Sstevel@tonic-gate #endif /* DEBUG */ 58850Sstevel@tonic-gate freemsg(mp); 58860Sstevel@tonic-gate break; 58870Sstevel@tonic-gate } 58880Sstevel@tonic-gate break; 58890Sstevel@tonic-gate } 58900Sstevel@tonic-gate } 58910Sstevel@tonic-gate } 58920Sstevel@tonic-gate 58930Sstevel@tonic-gate /* 58940Sstevel@tonic-gate * Free the b_next mblk chain that has accumulated before the connection 58950Sstevel@tonic-gate * was accepted. 58960Sstevel@tonic-gate */ 58970Sstevel@tonic-gate static void 58980Sstevel@tonic-gate tl_icon_freemsgs(mblk_t **mpp) 58990Sstevel@tonic-gate { 59000Sstevel@tonic-gate mblk_t *mp; 59010Sstevel@tonic-gate 59020Sstevel@tonic-gate while ((mp = *mpp) != NULL) { 59030Sstevel@tonic-gate *mpp = mp->b_next; 59040Sstevel@tonic-gate mp->b_next = NULL; 59050Sstevel@tonic-gate freemsg(mp); 59060Sstevel@tonic-gate } 59070Sstevel@tonic-gate } 59080Sstevel@tonic-gate 59090Sstevel@tonic-gate /* 59100Sstevel@tonic-gate * Send M_ERROR 59110Sstevel@tonic-gate * Note: assumes caller ensured enough space in mp or enough 59120Sstevel@tonic-gate * memory available. Does not attempt recovery from allocb() 59130Sstevel@tonic-gate * failures 59140Sstevel@tonic-gate */ 59150Sstevel@tonic-gate 59160Sstevel@tonic-gate static void 59170Sstevel@tonic-gate tl_merror(queue_t *wq, mblk_t *mp, int error) 59180Sstevel@tonic-gate { 59190Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 59200Sstevel@tonic-gate 59210Sstevel@tonic-gate if (tep->te_closing) { 59220Sstevel@tonic-gate freemsg(mp); 59230Sstevel@tonic-gate return; 59240Sstevel@tonic-gate } 59250Sstevel@tonic-gate 59260Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 59275240Snordmark SL_TRACE|SL_ERROR, 59287240Srh87107 "tl_merror: tep=%p, err=%d", (void *)tep, error)); 59290Sstevel@tonic-gate 59300Sstevel@tonic-gate /* 59310Sstevel@tonic-gate * flush all messages on queue. we are shutting 59320Sstevel@tonic-gate * the stream down on fatal error 59330Sstevel@tonic-gate */ 59340Sstevel@tonic-gate flushq(wq, FLUSHALL); 59350Sstevel@tonic-gate if (IS_COTS(tep)) { 59360Sstevel@tonic-gate /* connection oriented - unconnect endpoints */ 59370Sstevel@tonic-gate tl_co_unconnect(tep); 59380Sstevel@tonic-gate } 59390Sstevel@tonic-gate if (mp->b_cont) { 59400Sstevel@tonic-gate freemsg(mp->b_cont); 59410Sstevel@tonic-gate mp->b_cont = NULL; 59420Sstevel@tonic-gate } 59430Sstevel@tonic-gate 59440Sstevel@tonic-gate if ((MBLKSIZE(mp) < 1) || (DB_REF(mp) > 1)) { 59450Sstevel@tonic-gate freemsg(mp); 59460Sstevel@tonic-gate mp = allocb(1, BPRI_HI); 59470Sstevel@tonic-gate if (!mp) { 59480Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 59495240Snordmark SL_TRACE|SL_ERROR, 59505240Snordmark "tl_merror:M_PROTO: out of memory")); 59510Sstevel@tonic-gate return; 59520Sstevel@tonic-gate } 59530Sstevel@tonic-gate } 59540Sstevel@tonic-gate if (mp) { 59550Sstevel@tonic-gate DB_TYPE(mp) = M_ERROR; 59560Sstevel@tonic-gate mp->b_rptr = DB_BASE(mp); 59570Sstevel@tonic-gate *mp->b_rptr = (char)error; 59580Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (char); 59590Sstevel@tonic-gate qreply(wq, mp); 59600Sstevel@tonic-gate } else { 59610Sstevel@tonic-gate (void) putnextctl1(tep->te_rq, M_ERROR, error); 59620Sstevel@tonic-gate } 59630Sstevel@tonic-gate } 59640Sstevel@tonic-gate 59650Sstevel@tonic-gate static void 59661676Sjpk tl_fill_option(uchar_t *buf, cred_t *cr, pid_t cpid, int flag, cred_t *pcr) 59670Sstevel@tonic-gate { 59688778SErik.Nordmark@Sun.COM ASSERT(cr != NULL); 59698778SErik.Nordmark@Sun.COM 59700Sstevel@tonic-gate if (flag & TL_SETCRED) { 59710Sstevel@tonic-gate struct opthdr *opt = (struct opthdr *)buf; 59720Sstevel@tonic-gate tl_credopt_t *tlcred; 59730Sstevel@tonic-gate 59740Sstevel@tonic-gate opt->level = TL_PROT_LEVEL; 59750Sstevel@tonic-gate opt->name = TL_OPT_PEER_CRED; 59760Sstevel@tonic-gate opt->len = (t_uscalar_t)OPTLEN(sizeof (tl_credopt_t)); 59770Sstevel@tonic-gate 59780Sstevel@tonic-gate tlcred = (tl_credopt_t *)(opt + 1); 59790Sstevel@tonic-gate tlcred->tc_uid = crgetuid(cr); 59800Sstevel@tonic-gate tlcred->tc_gid = crgetgid(cr); 59810Sstevel@tonic-gate tlcred->tc_ruid = crgetruid(cr); 59820Sstevel@tonic-gate tlcred->tc_rgid = crgetrgid(cr); 59830Sstevel@tonic-gate tlcred->tc_suid = crgetsuid(cr); 59840Sstevel@tonic-gate tlcred->tc_sgid = crgetsgid(cr); 59850Sstevel@tonic-gate tlcred->tc_ngroups = crgetngroups(cr); 59860Sstevel@tonic-gate } else if (flag & TL_SETUCRED) { 59870Sstevel@tonic-gate struct opthdr *opt = (struct opthdr *)buf; 59880Sstevel@tonic-gate 59890Sstevel@tonic-gate opt->level = TL_PROT_LEVEL; 59900Sstevel@tonic-gate opt->name = TL_OPT_PEER_UCRED; 5991*11134SCasper.Dik@Sun.COM opt->len = (t_uscalar_t)OPTLEN(ucredminsize(cr)); 59920Sstevel@tonic-gate 59931676Sjpk (void) cred2ucred(cr, cpid, (void *)(opt + 1), pcr); 59940Sstevel@tonic-gate } else { 59950Sstevel@tonic-gate struct T_opthdr *topt = (struct T_opthdr *)buf; 59960Sstevel@tonic-gate ASSERT(flag & TL_SOCKUCRED); 59970Sstevel@tonic-gate 59980Sstevel@tonic-gate topt->level = SOL_SOCKET; 59990Sstevel@tonic-gate topt->name = SCM_UCRED; 6000*11134SCasper.Dik@Sun.COM topt->len = ucredminsize(cr) + sizeof (*topt); 60010Sstevel@tonic-gate topt->status = 0; 60021676Sjpk (void) cred2ucred(cr, cpid, (void *)(topt + 1), pcr); 60030Sstevel@tonic-gate } 60040Sstevel@tonic-gate } 60050Sstevel@tonic-gate 60060Sstevel@tonic-gate /* ARGSUSED */ 60070Sstevel@tonic-gate static int 60080Sstevel@tonic-gate tl_default_opt(queue_t *wq, int level, int name, uchar_t *ptr) 60090Sstevel@tonic-gate { 60100Sstevel@tonic-gate /* no default value processed in protocol specific code currently */ 60110Sstevel@tonic-gate return (-1); 60120Sstevel@tonic-gate } 60130Sstevel@tonic-gate 60140Sstevel@tonic-gate /* ARGSUSED */ 60150Sstevel@tonic-gate static int 60160Sstevel@tonic-gate tl_get_opt(queue_t *wq, int level, int name, uchar_t *ptr) 60170Sstevel@tonic-gate { 60180Sstevel@tonic-gate int len; 60190Sstevel@tonic-gate tl_endpt_t *tep; 60200Sstevel@tonic-gate int *valp; 60210Sstevel@tonic-gate 60220Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 60230Sstevel@tonic-gate 60240Sstevel@tonic-gate len = 0; 60250Sstevel@tonic-gate 60260Sstevel@tonic-gate /* 60270Sstevel@tonic-gate * Assumes: option level and name sanity check done elsewhere 60280Sstevel@tonic-gate */ 60290Sstevel@tonic-gate 60300Sstevel@tonic-gate switch (level) { 60310Sstevel@tonic-gate case SOL_SOCKET: 60320Sstevel@tonic-gate if (! IS_SOCKET(tep)) 60330Sstevel@tonic-gate break; 60340Sstevel@tonic-gate switch (name) { 60350Sstevel@tonic-gate case SO_RECVUCRED: 60360Sstevel@tonic-gate len = sizeof (int); 60370Sstevel@tonic-gate valp = (int *)ptr; 60380Sstevel@tonic-gate *valp = (tep->te_flag & TL_SOCKUCRED) != 0; 60390Sstevel@tonic-gate break; 60400Sstevel@tonic-gate default: 60410Sstevel@tonic-gate break; 60420Sstevel@tonic-gate } 60430Sstevel@tonic-gate break; 60440Sstevel@tonic-gate case TL_PROT_LEVEL: 60450Sstevel@tonic-gate switch (name) { 60460Sstevel@tonic-gate case TL_OPT_PEER_CRED: 60470Sstevel@tonic-gate case TL_OPT_PEER_UCRED: 60480Sstevel@tonic-gate /* 60490Sstevel@tonic-gate * option not supposed to retrieved directly 60500Sstevel@tonic-gate * Only sent in T_CON_{IND,CON}, T_UNITDATA_IND 60510Sstevel@tonic-gate * when some internal flags set by other options 60520Sstevel@tonic-gate * Direct retrieval always designed to fail(ignored) 60530Sstevel@tonic-gate * for this option. 60540Sstevel@tonic-gate */ 60550Sstevel@tonic-gate break; 60560Sstevel@tonic-gate } 60570Sstevel@tonic-gate } 60580Sstevel@tonic-gate return (len); 60590Sstevel@tonic-gate } 60600Sstevel@tonic-gate 60610Sstevel@tonic-gate /* ARGSUSED */ 60620Sstevel@tonic-gate static int 60630Sstevel@tonic-gate tl_set_opt( 60640Sstevel@tonic-gate queue_t *wq, 60650Sstevel@tonic-gate uint_t mgmt_flags, 60660Sstevel@tonic-gate int level, 60670Sstevel@tonic-gate int name, 60680Sstevel@tonic-gate uint_t inlen, 60690Sstevel@tonic-gate uchar_t *invalp, 60700Sstevel@tonic-gate uint_t *outlenp, 60710Sstevel@tonic-gate uchar_t *outvalp, 60720Sstevel@tonic-gate void *thisdg_attrs, 607311042SErik.Nordmark@Sun.COM cred_t *cr) 60740Sstevel@tonic-gate { 60750Sstevel@tonic-gate int error; 60760Sstevel@tonic-gate tl_endpt_t *tep; 60770Sstevel@tonic-gate 60780Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 60790Sstevel@tonic-gate 60800Sstevel@tonic-gate error = 0; /* NOERROR */ 60810Sstevel@tonic-gate 60820Sstevel@tonic-gate /* 60830Sstevel@tonic-gate * Assumes: option level and name sanity checks done elsewhere 60840Sstevel@tonic-gate */ 60850Sstevel@tonic-gate 60860Sstevel@tonic-gate switch (level) { 60870Sstevel@tonic-gate case SOL_SOCKET: 60880Sstevel@tonic-gate if (! IS_SOCKET(tep)) { 60890Sstevel@tonic-gate error = EINVAL; 60900Sstevel@tonic-gate break; 60910Sstevel@tonic-gate } 60920Sstevel@tonic-gate /* 60930Sstevel@tonic-gate * TBD: fill in other AF_UNIX socket options and then stop 60940Sstevel@tonic-gate * returning error. 60950Sstevel@tonic-gate */ 60960Sstevel@tonic-gate switch (name) { 60970Sstevel@tonic-gate case SO_RECVUCRED: 60980Sstevel@tonic-gate /* 60990Sstevel@tonic-gate * We only support this for datagram sockets; 61000Sstevel@tonic-gate * getpeerucred handles the connection oriented 61010Sstevel@tonic-gate * transports. 61020Sstevel@tonic-gate */ 61030Sstevel@tonic-gate if (! IS_CLTS(tep)) { 61040Sstevel@tonic-gate error = EINVAL; 61050Sstevel@tonic-gate break; 61060Sstevel@tonic-gate } 61070Sstevel@tonic-gate if (*(int *)invalp == 0) 61080Sstevel@tonic-gate tep->te_flag &= ~TL_SOCKUCRED; 61090Sstevel@tonic-gate else 61100Sstevel@tonic-gate tep->te_flag |= TL_SOCKUCRED; 61110Sstevel@tonic-gate break; 61120Sstevel@tonic-gate default: 61130Sstevel@tonic-gate error = EINVAL; 61140Sstevel@tonic-gate break; 61150Sstevel@tonic-gate } 61160Sstevel@tonic-gate break; 61170Sstevel@tonic-gate case TL_PROT_LEVEL: 61180Sstevel@tonic-gate switch (name) { 61190Sstevel@tonic-gate case TL_OPT_PEER_CRED: 61200Sstevel@tonic-gate case TL_OPT_PEER_UCRED: 61210Sstevel@tonic-gate /* 61220Sstevel@tonic-gate * option not supposed to be set directly 61230Sstevel@tonic-gate * Its value in initialized for each endpoint at 61240Sstevel@tonic-gate * driver open time. 61250Sstevel@tonic-gate * Direct setting always designed to fail for this 61260Sstevel@tonic-gate * option. 61270Sstevel@tonic-gate */ 61280Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, 61295240Snordmark SL_TRACE|SL_ERROR, 61305240Snordmark "tl_set_opt: option is not supported")); 61310Sstevel@tonic-gate error = EPROTO; 61320Sstevel@tonic-gate break; 61330Sstevel@tonic-gate } 61340Sstevel@tonic-gate } 61350Sstevel@tonic-gate return (error); 61360Sstevel@tonic-gate } 61370Sstevel@tonic-gate 61380Sstevel@tonic-gate 61390Sstevel@tonic-gate static void 61400Sstevel@tonic-gate tl_timer(void *arg) 61410Sstevel@tonic-gate { 61420Sstevel@tonic-gate queue_t *wq = arg; 61430Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 61440Sstevel@tonic-gate 61450Sstevel@tonic-gate ASSERT(tep); 61460Sstevel@tonic-gate 61470Sstevel@tonic-gate tep->te_timoutid = 0; 61480Sstevel@tonic-gate 61490Sstevel@tonic-gate enableok(wq); 61500Sstevel@tonic-gate /* 61510Sstevel@tonic-gate * Note: can call wsrv directly here and save context switch 61520Sstevel@tonic-gate * Consider change when qtimeout (not timeout) is active 61530Sstevel@tonic-gate */ 61540Sstevel@tonic-gate qenable(wq); 61550Sstevel@tonic-gate } 61560Sstevel@tonic-gate 61570Sstevel@tonic-gate static void 61580Sstevel@tonic-gate tl_buffer(void *arg) 61590Sstevel@tonic-gate { 61600Sstevel@tonic-gate queue_t *wq = arg; 61610Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr; 61620Sstevel@tonic-gate 61630Sstevel@tonic-gate ASSERT(tep); 61640Sstevel@tonic-gate 61650Sstevel@tonic-gate tep->te_bufcid = 0; 61660Sstevel@tonic-gate tep->te_nowsrv = B_FALSE; 61670Sstevel@tonic-gate 61680Sstevel@tonic-gate enableok(wq); 61690Sstevel@tonic-gate /* 61700Sstevel@tonic-gate * Note: can call wsrv directly here and save context switch 61710Sstevel@tonic-gate * Consider change when qbufcall (not bufcall) is active 61720Sstevel@tonic-gate */ 61730Sstevel@tonic-gate qenable(wq); 61740Sstevel@tonic-gate } 61750Sstevel@tonic-gate 61760Sstevel@tonic-gate static void 61770Sstevel@tonic-gate tl_memrecover(queue_t *wq, mblk_t *mp, size_t size) 61780Sstevel@tonic-gate { 61790Sstevel@tonic-gate tl_endpt_t *tep; 61800Sstevel@tonic-gate 61810Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr; 61820Sstevel@tonic-gate 61830Sstevel@tonic-gate if (tep->te_closing) { 61840Sstevel@tonic-gate freemsg(mp); 61850Sstevel@tonic-gate return; 61860Sstevel@tonic-gate } 61870Sstevel@tonic-gate noenable(wq); 61880Sstevel@tonic-gate 61890Sstevel@tonic-gate (void) insq(wq, wq->q_first, mp); 61900Sstevel@tonic-gate 61910Sstevel@tonic-gate if (tep->te_bufcid || tep->te_timoutid) { 61920Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR, 61935240Snordmark "tl_memrecover:recover %p pending", (void *)wq)); 61940Sstevel@tonic-gate return; 61950Sstevel@tonic-gate } 61960Sstevel@tonic-gate 61970Sstevel@tonic-gate if (!(tep->te_bufcid = qbufcall(wq, size, BPRI_MED, tl_buffer, wq))) { 61980Sstevel@tonic-gate tep->te_timoutid = qtimeout(wq, tl_timer, wq, 61990Sstevel@tonic-gate drv_usectohz(TL_BUFWAIT)); 62000Sstevel@tonic-gate } 62010Sstevel@tonic-gate } 62020Sstevel@tonic-gate 62030Sstevel@tonic-gate static void 62040Sstevel@tonic-gate tl_freetip(tl_endpt_t *tep, tl_icon_t *tip) 62050Sstevel@tonic-gate { 62060Sstevel@tonic-gate ASSERT(tip->ti_seqno != 0); 62070Sstevel@tonic-gate 62080Sstevel@tonic-gate if (tip->ti_mp != NULL) { 62090Sstevel@tonic-gate tl_icon_freemsgs(&tip->ti_mp); 62100Sstevel@tonic-gate tip->ti_mp = NULL; 62110Sstevel@tonic-gate } 62120Sstevel@tonic-gate if (tip->ti_tep != NULL) { 62130Sstevel@tonic-gate tl_refrele(tip->ti_tep); 62140Sstevel@tonic-gate tip->ti_tep = NULL; 62150Sstevel@tonic-gate } 62160Sstevel@tonic-gate list_remove(&tep->te_iconp, tip); 62170Sstevel@tonic-gate kmem_free(tip, sizeof (tl_icon_t)); 62180Sstevel@tonic-gate tep->te_nicon--; 62190Sstevel@tonic-gate } 62200Sstevel@tonic-gate 62210Sstevel@tonic-gate /* 62220Sstevel@tonic-gate * Remove address from address hash. 62230Sstevel@tonic-gate */ 62240Sstevel@tonic-gate static void 62250Sstevel@tonic-gate tl_addr_unbind(tl_endpt_t *tep) 62260Sstevel@tonic-gate { 62270Sstevel@tonic-gate tl_endpt_t *elp; 62280Sstevel@tonic-gate 62290Sstevel@tonic-gate if (tep->te_flag & TL_ADDRHASHED) { 62300Sstevel@tonic-gate if (IS_SOCKET(tep)) { 62310Sstevel@tonic-gate (void) mod_hash_remove(tep->te_addrhash, 62320Sstevel@tonic-gate (mod_hash_key_t)tep->te_vp, 62330Sstevel@tonic-gate (mod_hash_val_t *)&elp); 62340Sstevel@tonic-gate tep->te_vp = (void *)(uintptr_t)tep->te_minor; 62350Sstevel@tonic-gate tep->te_magic = SOU_MAGIC_IMPLICIT; 62360Sstevel@tonic-gate } else { 62370Sstevel@tonic-gate (void) mod_hash_remove(tep->te_addrhash, 62380Sstevel@tonic-gate (mod_hash_key_t)&tep->te_ap, 62390Sstevel@tonic-gate (mod_hash_val_t *)&elp); 62400Sstevel@tonic-gate (void) kmem_free(tep->te_abuf, tep->te_alen); 62410Sstevel@tonic-gate tep->te_alen = -1; 62420Sstevel@tonic-gate tep->te_abuf = NULL; 62430Sstevel@tonic-gate } 62440Sstevel@tonic-gate tep->te_flag &= ~TL_ADDRHASHED; 62450Sstevel@tonic-gate } 62460Sstevel@tonic-gate } 6247