10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51676Sjpk * Common Development and Distribution License (the "License").
61676Sjpk * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*11474SJonathan.Adams@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate * Multithreaded STREAMS Local Transport Provider.
280Sstevel@tonic-gate *
290Sstevel@tonic-gate * OVERVIEW
300Sstevel@tonic-gate * ========
310Sstevel@tonic-gate *
320Sstevel@tonic-gate * This driver provides TLI as well as socket semantics. It provides
330Sstevel@tonic-gate * connectionless, connection oriented, and connection oriented with orderly
340Sstevel@tonic-gate * release transports for TLI and sockets. Each transport type has separate name
350Sstevel@tonic-gate * spaces (i.e. it is not possible to connect from a socket to a TLI endpoint) -
360Sstevel@tonic-gate * this removes any name space conflicts when binding to socket style transport
370Sstevel@tonic-gate * addresses.
380Sstevel@tonic-gate *
390Sstevel@tonic-gate * NOTE: There is one exception: Socket ticots and ticotsord transports share
400Sstevel@tonic-gate * the same namespace. In fact, sockets always use ticotsord type transport.
410Sstevel@tonic-gate *
420Sstevel@tonic-gate * The driver mode is specified during open() by the minor number used for
430Sstevel@tonic-gate * open.
440Sstevel@tonic-gate *
450Sstevel@tonic-gate * The sockets in addition have the following semantic differences:
460Sstevel@tonic-gate * No support for passing up credentials (TL_SET[U]CRED).
470Sstevel@tonic-gate *
480Sstevel@tonic-gate * Options are passed through transparently on T_CONN_REQ to T_CONN_IND,
490Sstevel@tonic-gate * from T_UNITDATA_REQ to T_UNIDATA_IND, and from T_OPTDATA_REQ to
500Sstevel@tonic-gate * T_OPTDATA_IND.
510Sstevel@tonic-gate *
520Sstevel@tonic-gate * The T_CONN_CON is generated when processing the T_CONN_REQ i.e. before
530Sstevel@tonic-gate * a T_CONN_RES is received from the acceptor. This means that a socket
540Sstevel@tonic-gate * connect will complete before the peer has called accept.
550Sstevel@tonic-gate *
560Sstevel@tonic-gate *
570Sstevel@tonic-gate * MULTITHREADING
580Sstevel@tonic-gate * ==============
590Sstevel@tonic-gate *
600Sstevel@tonic-gate * The driver does not use STREAMS protection mechanisms. Instead it uses a
610Sstevel@tonic-gate * generic "serializer" abstraction. Most of the operations are executed behind
620Sstevel@tonic-gate * the serializer and are, essentially single-threaded. All functions executed
630Sstevel@tonic-gate * behind the same serializer are strictly serialized. So if one thread calls
640Sstevel@tonic-gate * serializer_enter(serializer, foo, mp1, arg1); and another thread calls
650Sstevel@tonic-gate * serializer_enter(serializer, bar, mp2, arg1); then (depending on which one
660Sstevel@tonic-gate * was called) the actual sequence will be foo(mp1, arg1); bar(mp1, arg2) or
670Sstevel@tonic-gate * bar(mp1, arg2); foo(mp1, arg1); But foo() and bar() will never run at the
680Sstevel@tonic-gate * same time.
690Sstevel@tonic-gate *
700Sstevel@tonic-gate * Connectionless transport use a single serializer per transport type (one for
710Sstevel@tonic-gate * TLI and one for sockets. Connection-oriented transports use finer-grained
720Sstevel@tonic-gate * serializers.
730Sstevel@tonic-gate *
740Sstevel@tonic-gate * All COTS-type endpoints start their life with private serializers. During
750Sstevel@tonic-gate * connection request processing the endpoint serializer is switched to the
760Sstevel@tonic-gate * listener's serializer and the rest of T_CONN_REQ processing is done on the
770Sstevel@tonic-gate * listener serializer. During T_CONN_RES processing the eager serializer is
780Sstevel@tonic-gate * switched from listener to acceptor serializer and after that point all
790Sstevel@tonic-gate * processing for eager and acceptor happens on this serializer. To avoid races
800Sstevel@tonic-gate * with endpoint closes while its serializer may be changing closes are blocked
810Sstevel@tonic-gate * while serializers are manipulated.
820Sstevel@tonic-gate *
830Sstevel@tonic-gate * References accounting
840Sstevel@tonic-gate * ---------------------
850Sstevel@tonic-gate *
860Sstevel@tonic-gate * Endpoints are reference counted and freed when the last reference is
870Sstevel@tonic-gate * dropped. Functions within the serializer may access an endpoint state even
880Sstevel@tonic-gate * after an endpoint closed. The te_closing being set on the endpoint indicates
890Sstevel@tonic-gate * that the endpoint entered its close routine.
900Sstevel@tonic-gate *
910Sstevel@tonic-gate * One reference is held for each opened endpoint instance. The reference
920Sstevel@tonic-gate * counter is incremented when the endpoint is linked to another endpoint and
930Sstevel@tonic-gate * decremented when the link disappears. It is also incremented when the
940Sstevel@tonic-gate * endpoint is found by the hash table lookup. This increment is atomic with the
950Sstevel@tonic-gate * lookup itself and happens while the hash table read lock is held.
960Sstevel@tonic-gate *
970Sstevel@tonic-gate * Close synchronization
980Sstevel@tonic-gate * ---------------------
990Sstevel@tonic-gate *
1000Sstevel@tonic-gate * During close the endpoint as marked as closing using te_closing flag. It is
1010Sstevel@tonic-gate * usually enough to check for te_closing flag since all other state changes
1020Sstevel@tonic-gate * happen after this flag is set and the close entered serializer. Immediately
1030Sstevel@tonic-gate * after setting te_closing flag tl_close() enters serializer and waits until
1040Sstevel@tonic-gate * the callback finishes. This allows all functions called within serializer to
1050Sstevel@tonic-gate * simply check te_closing without any locks.
1060Sstevel@tonic-gate *
1070Sstevel@tonic-gate * Serializer management.
1080Sstevel@tonic-gate * ---------------------
1090Sstevel@tonic-gate *
1100Sstevel@tonic-gate * For COTS transports serializers are created when the endpoint is constructed
1110Sstevel@tonic-gate * and destroyed when the endpoint is destructed. CLTS transports use global
1120Sstevel@tonic-gate * serializers - one for sockets and one for TLI.
1130Sstevel@tonic-gate *
1140Sstevel@tonic-gate * COTS serializers have separate reference counts to deal with several
1150Sstevel@tonic-gate * endpoints sharing the same serializer. There is a subtle problem related to
1160Sstevel@tonic-gate * the serializer destruction. The serializer should never be destroyed by any
1170Sstevel@tonic-gate * function executed inside serializer. This means that close has to wait till
1180Sstevel@tonic-gate * all serializer activity for this endpoint is finished before it can drop the
1190Sstevel@tonic-gate * last reference on the endpoint (which may as well free the serializer). This
1200Sstevel@tonic-gate * is only relevant for COTS transports which manage serializers
1210Sstevel@tonic-gate * dynamically. For CLTS transports close may complete without waiting for all
1220Sstevel@tonic-gate * serializer activity to finish since serializer is only destroyed at driver
1230Sstevel@tonic-gate * detach time.
1240Sstevel@tonic-gate *
1250Sstevel@tonic-gate * COTS endpoints keep track of the number of outstanding requests on the
1260Sstevel@tonic-gate * serializer for the endpoint. The code handling accept() avoids changing
1270Sstevel@tonic-gate * client serializer if it has any pending messages on the serializer and
1280Sstevel@tonic-gate * instead moves acceptor to listener's serializer.
1290Sstevel@tonic-gate *
1300Sstevel@tonic-gate *
1310Sstevel@tonic-gate * Use of hash tables
1320Sstevel@tonic-gate * ------------------
1330Sstevel@tonic-gate *
1340Sstevel@tonic-gate * The driver uses modhash hash table implementation. Each transport uses two
1350Sstevel@tonic-gate * hash tables - one for finding endpoints by acceptor ID and another one for
1360Sstevel@tonic-gate * finding endpoints by address. For sockets TICOTS and TICOTSORD share the same
1370Sstevel@tonic-gate * pair of hash tables since sockets only use TICOTSORD.
1380Sstevel@tonic-gate *
1390Sstevel@tonic-gate * All hash tables lookups increment a reference count for returned endpoints,
1400Sstevel@tonic-gate * so we may safely check the endpoint state even when the endpoint is removed
1410Sstevel@tonic-gate * from the hash by another thread immediately after it is found.
1420Sstevel@tonic-gate *
1430Sstevel@tonic-gate *
1440Sstevel@tonic-gate * CLOSE processing
1450Sstevel@tonic-gate * ================
1460Sstevel@tonic-gate *
1470Sstevel@tonic-gate * The driver enters serializer twice on close(). The close sequence is the
1480Sstevel@tonic-gate * following:
1490Sstevel@tonic-gate *
1500Sstevel@tonic-gate * 1) Wait until closing is safe (te_closewait becomes zero)
1510Sstevel@tonic-gate * This step is needed to prevent close during serializer switches. In most
1520Sstevel@tonic-gate * cases (close happening after connection establishment) te_closewait is
1530Sstevel@tonic-gate * zero.
1540Sstevel@tonic-gate * 1) Set te_closing.
1550Sstevel@tonic-gate * 2) Call tl_close_ser() within serializer and wait for it to complete.
1560Sstevel@tonic-gate *
1570Sstevel@tonic-gate * te_close_ser simply marks endpoint and wakes up waiting tl_close().
1580Sstevel@tonic-gate * It also needs to clear write-side q_next pointers - this should be done
1590Sstevel@tonic-gate * before qprocsoff().
1600Sstevel@tonic-gate *
1610Sstevel@tonic-gate * This synchronous serializer entry during close is needed to ensure that
1620Sstevel@tonic-gate * the queue is valid everywhere inside the serializer.
1630Sstevel@tonic-gate *
1640Sstevel@tonic-gate * Note that in many cases close will execute tl_close_ser() synchronously,
1650Sstevel@tonic-gate * so it will not wait at all.
1660Sstevel@tonic-gate *
1670Sstevel@tonic-gate * 3) Calls qprocsoff().
1680Sstevel@tonic-gate * 4) Calls tl_close_finish_ser() within the serializer and waits for it to
1690Sstevel@tonic-gate * complete (for COTS transports). For CLTS transport there is no wait.
1700Sstevel@tonic-gate *
1710Sstevel@tonic-gate * tl_close_finish_ser() Finishes the close process and wakes up waiting
1720Sstevel@tonic-gate * close if there is any.
1730Sstevel@tonic-gate *
1740Sstevel@tonic-gate * Note that in most cases close will enter te_close_ser_finish()
1750Sstevel@tonic-gate * synchronously and will not wait at all.
1760Sstevel@tonic-gate *
1770Sstevel@tonic-gate *
1780Sstevel@tonic-gate * Flow Control
1790Sstevel@tonic-gate * ============
1800Sstevel@tonic-gate *
1810Sstevel@tonic-gate * The driver implements both read and write side service routines. No one calls
1820Sstevel@tonic-gate * putq() on the read queue. The read side service routine tl_rsrv() is called
1830Sstevel@tonic-gate * when the read side stream is back-enabled. It enters serializer synchronously
1840Sstevel@tonic-gate * (waits till serializer processing is complete). Within serializer it
1850Sstevel@tonic-gate * back-enables all endpoints blocked by the queue for connection-less
1860Sstevel@tonic-gate * transports and enables write side service processing for the peer for
1870Sstevel@tonic-gate * connection-oriented transports.
1880Sstevel@tonic-gate *
1890Sstevel@tonic-gate * Read and write side service routines use special mblk_sized space in the
1900Sstevel@tonic-gate * endpoint structure to enter perimeter.
1910Sstevel@tonic-gate *
1920Sstevel@tonic-gate * Write-side flow control
1930Sstevel@tonic-gate * -----------------------
1940Sstevel@tonic-gate *
1950Sstevel@tonic-gate * Write side flow control is a bit tricky. The driver needs to deal with two
1960Sstevel@tonic-gate * message queues - the explicit STREAMS message queue maintained by
1970Sstevel@tonic-gate * putq()/getq()/putbq() and the implicit queue within the serializer. These two
1980Sstevel@tonic-gate * queues should be synchronized to preserve message ordering and should
1990Sstevel@tonic-gate * maintain a single order determined by the order in which messages enter
2000Sstevel@tonic-gate * tl_wput(). In order to maintain the ordering between these two queues the
2010Sstevel@tonic-gate * STREAMS queue is only manipulated within the serializer, so the ordering is
2020Sstevel@tonic-gate * provided by the serializer.
2030Sstevel@tonic-gate *
2040Sstevel@tonic-gate * Functions called from the tl_wsrv() sometimes may call putbq(). To
2050Sstevel@tonic-gate * immediately stop any further processing of the STREAMS message queues the
2060Sstevel@tonic-gate * code calling putbq() also sets the te_nowsrv flag in the endpoint. The write
2070Sstevel@tonic-gate * side service processing stops when the flag is set.
2080Sstevel@tonic-gate *
2090Sstevel@tonic-gate * The tl_wsrv() function enters serializer synchronously and waits for it to
2100Sstevel@tonic-gate * complete. The serializer call-back tl_wsrv_ser() either drains all messages
2110Sstevel@tonic-gate * on the STREAMS queue or terminates when it notices the te_nowsrv flag
2120Sstevel@tonic-gate * set. Note that the maximum amount of messages processed by tl_wput_ser() is
2130Sstevel@tonic-gate * always bounded by the amount of messages on the STREAMS queue at the time
2140Sstevel@tonic-gate * tl_wsrv_ser() is entered. Any new messages may only appear on the STREAMS
2150Sstevel@tonic-gate * queue from another serialized entry which can't happen in parallel. This
2160Sstevel@tonic-gate * guarantees that tl_wput_ser() is complete in bounded time (there is no risk
2170Sstevel@tonic-gate * of it draining forever while writer places new messages on the STREAMS
2180Sstevel@tonic-gate * queue).
2190Sstevel@tonic-gate *
2200Sstevel@tonic-gate * Note that a closing endpoint never sets te_nowsrv and never calls putbq().
2210Sstevel@tonic-gate *
2220Sstevel@tonic-gate *
2230Sstevel@tonic-gate * Unix Domain Sockets
2240Sstevel@tonic-gate * ===================
2250Sstevel@tonic-gate *
2260Sstevel@tonic-gate * The driver knows the structure of Unix Domain sockets addresses and treats
2270Sstevel@tonic-gate * them differently from generic TLI addresses. For sockets implicit binds are
2280Sstevel@tonic-gate * requested by setting SOU_MAGIC_IMPLICIT in the soua_magic part of the address
2290Sstevel@tonic-gate * instead of using address length of zero. Explicit binds specify
2300Sstevel@tonic-gate * SOU_MAGIC_EXPLICIT as magic.
2310Sstevel@tonic-gate *
2320Sstevel@tonic-gate * For implicit binds we always use minor number as soua_vp part of the address
2330Sstevel@tonic-gate * and avoid any hash table lookups. This saves two hash tables lookups per
2340Sstevel@tonic-gate * anonymous bind.
2350Sstevel@tonic-gate *
2360Sstevel@tonic-gate * For explicit address we hash the vnode pointer instead of hashing the
2370Sstevel@tonic-gate * full-scale address+zone+length. Hashing by pointer is more efficient then
2380Sstevel@tonic-gate * hashing by the full address.
2390Sstevel@tonic-gate *
2400Sstevel@tonic-gate * For unix domain sockets the te_ap is always pointing to te_uxaddr part of the
2410Sstevel@tonic-gate * tep structure, so it should be never freed.
2420Sstevel@tonic-gate *
2430Sstevel@tonic-gate * Also for sockets the driver always uses minor number as acceptor id.
2440Sstevel@tonic-gate *
2450Sstevel@tonic-gate * TPI VIOLATIONS
2460Sstevel@tonic-gate * --------------
2470Sstevel@tonic-gate *
2480Sstevel@tonic-gate * This driver violates TPI in several respects for Unix Domain Sockets:
2490Sstevel@tonic-gate *
2500Sstevel@tonic-gate * 1) It treats O_T_BIND_REQ as T_BIND_REQ and refuses bind if an explicit bind
2510Sstevel@tonic-gate * is requested and the endpoint is already in use. There is no point in
2520Sstevel@tonic-gate * generating an unused address since this address will be rejected by
2530Sstevel@tonic-gate * sockfs anyway. For implicit binds it always generates a new address
2540Sstevel@tonic-gate * (sets soua_vp to its minor number).
2550Sstevel@tonic-gate *
2560Sstevel@tonic-gate * 2) It always uses minor number as acceptor ID and never uses queue
2570Sstevel@tonic-gate * pointer. It is ok since sockets get acceptor ID from T_CAPABILITY_REQ
2580Sstevel@tonic-gate * message and they do not use the queue pointer.
2590Sstevel@tonic-gate *
2600Sstevel@tonic-gate * 3) For Listener sockets the usual sequence is to issue bind() zero backlog
2610Sstevel@tonic-gate * followed by listen(). The listen() should be issued with non-zero
2620Sstevel@tonic-gate * backlog, so sotpi_listen() issues unbind request followed by bind
2630Sstevel@tonic-gate * request to the same address but with a non-zero qlen value. Both
2640Sstevel@tonic-gate * tl_bind() and tl_unbind() require write lock on the hash table to
2650Sstevel@tonic-gate * insert/remove the address. The driver does not remove the address from
2660Sstevel@tonic-gate * the hash for endpoints that are bound to the explicit address and have
2670Sstevel@tonic-gate * backlog of zero. During T_BIND_REQ processing if the address requested
2680Sstevel@tonic-gate * is equal to the address the endpoint already has it updates the backlog
2690Sstevel@tonic-gate * without reinserting the address in the hash table. This optimization
2700Sstevel@tonic-gate * avoids two hash table updates for each listener created. It always
2710Sstevel@tonic-gate * avoids the problem of a "stolen" address when another listener may use
2720Sstevel@tonic-gate * the same address between the unbind and bind and suddenly listen() fails
2730Sstevel@tonic-gate * because address is in use even though the bind() succeeded.
2740Sstevel@tonic-gate *
2750Sstevel@tonic-gate *
2760Sstevel@tonic-gate * CONNECTIONLESS TRANSPORTS
2770Sstevel@tonic-gate * =========================
2780Sstevel@tonic-gate *
2790Sstevel@tonic-gate * Connectionless transports all share the same serializer (one for TLI and one
2800Sstevel@tonic-gate * for Sockets). Functions executing behind serializer can check or modify state
2810Sstevel@tonic-gate * of any endpoint.
2820Sstevel@tonic-gate *
2830Sstevel@tonic-gate * When endpoint X talks to another endpoint Y it caches the pointer to Y in the
2840Sstevel@tonic-gate * te_lastep field. The next time X talks to some address A it checks whether A
2850Sstevel@tonic-gate * is the same as Y's address and if it is there is no need to lookup Y. If the
2860Sstevel@tonic-gate * address is different or the state of Y is not appropriate (e.g. closed or not
2870Sstevel@tonic-gate * idle) X does a lookup using tl_find_peer() and caches the new address.
2880Sstevel@tonic-gate * NOTE: tl_find_peer() never returns closing endpoint and it places a refhold
2890Sstevel@tonic-gate * on the endpoint found.
2900Sstevel@tonic-gate *
2910Sstevel@tonic-gate * During close of endpoint Y it doesn't try to remove itself from other
2920Sstevel@tonic-gate * endpoints caches. They will detect that Y is gone and will search the peer
2930Sstevel@tonic-gate * endpoint again.
2940Sstevel@tonic-gate *
2950Sstevel@tonic-gate * Flow Control Handling.
2960Sstevel@tonic-gate * ----------------------
2970Sstevel@tonic-gate *
2980Sstevel@tonic-gate * Each connectionless endpoint keeps a list of endpoints which are
2990Sstevel@tonic-gate * flow-controlled by its queue. It also keeps a pointer to the queue which
3000Sstevel@tonic-gate * flow-controls itself. Whenever flow control releases for endpoint X it
3010Sstevel@tonic-gate * enables all queues from the list. During close it also back-enables everyone
3020Sstevel@tonic-gate * in the list. If X is flow-controlled when it is closing it removes it from
3030Sstevel@tonic-gate * the peers list.
3040Sstevel@tonic-gate *
3050Sstevel@tonic-gate * DATA STRUCTURES
3060Sstevel@tonic-gate * ===============
3070Sstevel@tonic-gate *
3080Sstevel@tonic-gate * Each endpoint is represented by the tl_endpt_t structure which keeps all the
3090Sstevel@tonic-gate * endpoint state. For connection-oriented transports it has a keeps a list
3100Sstevel@tonic-gate * of pending connections (tl_icon_t). For connectionless transports it keeps a
3110Sstevel@tonic-gate * list of endpoints flow controlled by this one.
3120Sstevel@tonic-gate *
3130Sstevel@tonic-gate * Each transport type is represented by a per-transport data structure
3140Sstevel@tonic-gate * tl_transport_state_t. It contains a pointer to an acceptor ID hash and the
3150Sstevel@tonic-gate * endpoint address hash tables for each transport. It also contains pointer to
3160Sstevel@tonic-gate * transport serializer for connectionless transports.
3170Sstevel@tonic-gate *
3180Sstevel@tonic-gate * Each endpoint keeps a link to its transport structure, so the code can find
3190Sstevel@tonic-gate * all per-transport information quickly.
3200Sstevel@tonic-gate */
3210Sstevel@tonic-gate
3220Sstevel@tonic-gate #include <sys/types.h>
3230Sstevel@tonic-gate #include <sys/inttypes.h>
3240Sstevel@tonic-gate #include <sys/stream.h>
3250Sstevel@tonic-gate #include <sys/stropts.h>
3260Sstevel@tonic-gate #define _SUN_TPI_VERSION 2
3270Sstevel@tonic-gate #include <sys/tihdr.h>
3280Sstevel@tonic-gate #include <sys/strlog.h>
3290Sstevel@tonic-gate #include <sys/debug.h>
3300Sstevel@tonic-gate #include <sys/cred.h>
3310Sstevel@tonic-gate #include <sys/errno.h>
3320Sstevel@tonic-gate #include <sys/kmem.h>
3330Sstevel@tonic-gate #include <sys/id_space.h>
3340Sstevel@tonic-gate #include <sys/modhash.h>
3350Sstevel@tonic-gate #include <sys/mkdev.h>
3360Sstevel@tonic-gate #include <sys/tl.h>
3370Sstevel@tonic-gate #include <sys/stat.h>
3380Sstevel@tonic-gate #include <sys/conf.h>
3390Sstevel@tonic-gate #include <sys/modctl.h>
3400Sstevel@tonic-gate #include <sys/strsun.h>
3410Sstevel@tonic-gate #include <sys/socket.h>
3420Sstevel@tonic-gate #include <sys/socketvar.h>
3430Sstevel@tonic-gate #include <sys/sysmacros.h>
3440Sstevel@tonic-gate #include <sys/xti_xtiopt.h>
3450Sstevel@tonic-gate #include <sys/ddi.h>
3460Sstevel@tonic-gate #include <sys/sunddi.h>
3470Sstevel@tonic-gate #include <sys/zone.h>
3480Sstevel@tonic-gate #include <inet/common.h> /* typedef int (*pfi_t)() for inet/optcom.h */
3490Sstevel@tonic-gate #include <inet/optcom.h>
3500Sstevel@tonic-gate #include <sys/strsubr.h>
3510Sstevel@tonic-gate #include <sys/ucred.h>
3520Sstevel@tonic-gate #include <sys/suntpi.h>
3530Sstevel@tonic-gate #include <sys/list.h>
3540Sstevel@tonic-gate #include <sys/serializer.h>
3550Sstevel@tonic-gate
3560Sstevel@tonic-gate /*
3570Sstevel@tonic-gate * TBD List
3580Sstevel@tonic-gate * 14 Eliminate state changes through table
3590Sstevel@tonic-gate * 16. AF_UNIX socket options
3600Sstevel@tonic-gate * 17. connect() for ticlts
3610Sstevel@tonic-gate * 18. support for "netstat" to show AF_UNIX plus TLI local
3620Sstevel@tonic-gate * transport connections
3630Sstevel@tonic-gate * 21. sanity check to flushing on sending M_ERROR
3640Sstevel@tonic-gate */
3650Sstevel@tonic-gate
3660Sstevel@tonic-gate /*
3670Sstevel@tonic-gate * CONSTANT DECLARATIONS
3680Sstevel@tonic-gate * --------------------
3690Sstevel@tonic-gate */
3700Sstevel@tonic-gate
3710Sstevel@tonic-gate /*
3720Sstevel@tonic-gate * Local declarations
3730Sstevel@tonic-gate */
3740Sstevel@tonic-gate #define NEXTSTATE(EV, ST) ti_statetbl[EV][ST]
3750Sstevel@tonic-gate
3760Sstevel@tonic-gate #define BADSEQNUM (-1) /* initial seq number used by T_DISCON_IND */
3770Sstevel@tonic-gate #define TL_BUFWAIT (10000) /* usecs to wait for allocb buffer timeout */
3780Sstevel@tonic-gate #define TL_TIDUSZ (64*1024) /* tidu size when "strmsgz" is unlimited (0) */
3790Sstevel@tonic-gate /*
3800Sstevel@tonic-gate * Hash tables size.
3810Sstevel@tonic-gate */
3820Sstevel@tonic-gate #define TL_HASH_SIZE 311
3830Sstevel@tonic-gate
3840Sstevel@tonic-gate /*
3850Sstevel@tonic-gate * Definitions for module_info
3860Sstevel@tonic-gate */
3870Sstevel@tonic-gate #define TL_ID (104) /* module ID number */
3880Sstevel@tonic-gate #define TL_NAME "tl" /* module name */
3890Sstevel@tonic-gate #define TL_MINPSZ (0) /* min packet size */
3900Sstevel@tonic-gate #define TL_MAXPSZ INFPSZ /* max packet size ZZZ */
3910Sstevel@tonic-gate #define TL_HIWAT (16*1024) /* hi water mark */
3920Sstevel@tonic-gate #define TL_LOWAT (256) /* lo water mark */
3930Sstevel@tonic-gate /*
3940Sstevel@tonic-gate * Definition of minor numbers/modes for new transport provider modes.
3950Sstevel@tonic-gate * We view the socket use as a separate mode to get a separate name space.
3960Sstevel@tonic-gate */
3970Sstevel@tonic-gate #define TL_TICOTS 0 /* connection oriented transport */
3980Sstevel@tonic-gate #define TL_TICOTSORD 1 /* COTS w/ orderly release */
3990Sstevel@tonic-gate #define TL_TICLTS 2 /* connectionless transport */
4000Sstevel@tonic-gate #define TL_UNUSED 3
4010Sstevel@tonic-gate #define TL_SOCKET 4 /* Socket */
4020Sstevel@tonic-gate #define TL_SOCK_COTS (TL_SOCKET|TL_TICOTS)
4030Sstevel@tonic-gate #define TL_SOCK_COTSORD (TL_SOCKET|TL_TICOTSORD)
4040Sstevel@tonic-gate #define TL_SOCK_CLTS (TL_SOCKET|TL_TICLTS)
4050Sstevel@tonic-gate
4060Sstevel@tonic-gate #define TL_MINOR_MASK 0x7
4070Sstevel@tonic-gate #define TL_MINOR_START (TL_TICLTS + 1)
4080Sstevel@tonic-gate
4090Sstevel@tonic-gate /*
4100Sstevel@tonic-gate * LOCAL MACROS
4110Sstevel@tonic-gate */
4120Sstevel@tonic-gate #define T_ALIGN(p) P2ROUNDUP((p), sizeof (t_scalar_t))
4130Sstevel@tonic-gate
4140Sstevel@tonic-gate /*
4150Sstevel@tonic-gate * EXTERNAL VARIABLE DECLARATIONS
4160Sstevel@tonic-gate * -----------------------------
4170Sstevel@tonic-gate */
4180Sstevel@tonic-gate /*
4190Sstevel@tonic-gate * state table defined in the OS space.c
4200Sstevel@tonic-gate */
4210Sstevel@tonic-gate extern char ti_statetbl[TE_NOEVENTS][TS_NOSTATES];
4220Sstevel@tonic-gate
4230Sstevel@tonic-gate /*
4240Sstevel@tonic-gate * STREAMS DRIVER ENTRY POINTS PROTOTYPES
4250Sstevel@tonic-gate */
4260Sstevel@tonic-gate static int tl_open(queue_t *, dev_t *, int, int, cred_t *);
4270Sstevel@tonic-gate static int tl_close(queue_t *, int, cred_t *);
4280Sstevel@tonic-gate static void tl_wput(queue_t *, mblk_t *);
4290Sstevel@tonic-gate static void tl_wsrv(queue_t *);
4300Sstevel@tonic-gate static void tl_rsrv(queue_t *);
4310Sstevel@tonic-gate
4320Sstevel@tonic-gate static int tl_attach(dev_info_t *, ddi_attach_cmd_t);
4330Sstevel@tonic-gate static int tl_detach(dev_info_t *, ddi_detach_cmd_t);
4340Sstevel@tonic-gate static int tl_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
4350Sstevel@tonic-gate
4360Sstevel@tonic-gate
4370Sstevel@tonic-gate /*
4380Sstevel@tonic-gate * GLOBAL DATA STRUCTURES AND VARIABLES
4390Sstevel@tonic-gate * -----------------------------------
4400Sstevel@tonic-gate */
4410Sstevel@tonic-gate
4420Sstevel@tonic-gate /*
4430Sstevel@tonic-gate * Table representing database of all options managed by T_SVR4_OPTMGMT_REQ
4440Sstevel@tonic-gate * For now, we only manage the SO_RECVUCRED option but we also have
4450Sstevel@tonic-gate * harmless dummy options to make things work with some common code we access.
4460Sstevel@tonic-gate */
4470Sstevel@tonic-gate opdes_t tl_opt_arr[] = {
4480Sstevel@tonic-gate /* The SO_TYPE is needed for the hack below */
4490Sstevel@tonic-gate {
4500Sstevel@tonic-gate SO_TYPE,
4510Sstevel@tonic-gate SOL_SOCKET,
4520Sstevel@tonic-gate OA_R,
4530Sstevel@tonic-gate OA_R,
4540Sstevel@tonic-gate OP_NP,
45511042SErik.Nordmark@Sun.COM 0,
4560Sstevel@tonic-gate sizeof (t_scalar_t),
4570Sstevel@tonic-gate 0
4580Sstevel@tonic-gate },
4590Sstevel@tonic-gate {
4600Sstevel@tonic-gate SO_RECVUCRED,
4610Sstevel@tonic-gate SOL_SOCKET,
4620Sstevel@tonic-gate OA_RW,
4630Sstevel@tonic-gate OA_RW,
4640Sstevel@tonic-gate OP_NP,
46511042SErik.Nordmark@Sun.COM 0,
4660Sstevel@tonic-gate sizeof (int),
4670Sstevel@tonic-gate 0
4680Sstevel@tonic-gate }
4690Sstevel@tonic-gate };
4700Sstevel@tonic-gate
4710Sstevel@tonic-gate /*
4720Sstevel@tonic-gate * Table of all supported levels
4730Sstevel@tonic-gate * Note: Some levels (e.g. XTI_GENERIC) may be valid but may not have
4740Sstevel@tonic-gate * any supported options so we need this info separately.
4750Sstevel@tonic-gate *
4760Sstevel@tonic-gate * This is needed only for topmost tpi providers.
4770Sstevel@tonic-gate */
4780Sstevel@tonic-gate optlevel_t tl_valid_levels_arr[] = {
4790Sstevel@tonic-gate XTI_GENERIC,
4800Sstevel@tonic-gate SOL_SOCKET,
4810Sstevel@tonic-gate TL_PROT_LEVEL
4820Sstevel@tonic-gate };
4830Sstevel@tonic-gate
4840Sstevel@tonic-gate #define TL_VALID_LEVELS_CNT A_CNT(tl_valid_levels_arr)
4850Sstevel@tonic-gate /*
4860Sstevel@tonic-gate * Current upper bound on the amount of space needed to return all options.
4870Sstevel@tonic-gate * Additional options with data size of sizeof(long) are handled automatically.
4880Sstevel@tonic-gate * Others need hand job.
4890Sstevel@tonic-gate */
4900Sstevel@tonic-gate #define TL_MAX_OPT_BUF_LEN \
4910Sstevel@tonic-gate ((A_CNT(tl_opt_arr) << 2) + \
4920Sstevel@tonic-gate (A_CNT(tl_opt_arr) * sizeof (struct opthdr)) + \
4930Sstevel@tonic-gate + 64 + sizeof (struct T_optmgmt_ack))
4940Sstevel@tonic-gate
4950Sstevel@tonic-gate #define TL_OPT_ARR_CNT A_CNT(tl_opt_arr)
4960Sstevel@tonic-gate
4970Sstevel@tonic-gate /*
4980Sstevel@tonic-gate * transport addr structure
4990Sstevel@tonic-gate */
5000Sstevel@tonic-gate typedef struct tl_addr {
5010Sstevel@tonic-gate zoneid_t ta_zoneid; /* Zone scope of address */
5020Sstevel@tonic-gate t_scalar_t ta_alen; /* length of abuf */
5030Sstevel@tonic-gate void *ta_abuf; /* the addr itself */
5040Sstevel@tonic-gate } tl_addr_t;
5050Sstevel@tonic-gate
5060Sstevel@tonic-gate /*
5070Sstevel@tonic-gate * Refcounted version of serializer.
5080Sstevel@tonic-gate */
5090Sstevel@tonic-gate typedef struct tl_serializer {
5100Sstevel@tonic-gate uint_t ts_refcnt;
5110Sstevel@tonic-gate serializer_t *ts_serializer;
5120Sstevel@tonic-gate } tl_serializer_t;
5130Sstevel@tonic-gate
5140Sstevel@tonic-gate /*
5150Sstevel@tonic-gate * Each transport type has a separate state.
5160Sstevel@tonic-gate * Per-transport state.
5170Sstevel@tonic-gate */
5180Sstevel@tonic-gate typedef struct tl_transport_state {
5190Sstevel@tonic-gate char *tr_name;
5200Sstevel@tonic-gate minor_t tr_minor;
5210Sstevel@tonic-gate uint32_t tr_defaddr;
5220Sstevel@tonic-gate mod_hash_t *tr_ai_hash;
5230Sstevel@tonic-gate mod_hash_t *tr_addr_hash;
5240Sstevel@tonic-gate tl_serializer_t *tr_serializer;
5250Sstevel@tonic-gate } tl_transport_state_t;
5260Sstevel@tonic-gate
5270Sstevel@tonic-gate #define TL_DFADDR 0x1000
5280Sstevel@tonic-gate
5290Sstevel@tonic-gate static tl_transport_state_t tl_transports[] = {
5300Sstevel@tonic-gate { "ticots", TL_TICOTS, TL_DFADDR, NULL, NULL, NULL },
5310Sstevel@tonic-gate { "ticotsord", TL_TICOTSORD, TL_DFADDR, NULL, NULL, NULL },
5320Sstevel@tonic-gate { "ticlts", TL_TICLTS, TL_DFADDR, NULL, NULL, NULL },
5330Sstevel@tonic-gate { "undefined", TL_UNUSED, TL_DFADDR, NULL, NULL, NULL },
5340Sstevel@tonic-gate { "sticots", TL_SOCK_COTS, TL_DFADDR, NULL, NULL, NULL },
5350Sstevel@tonic-gate { "sticotsord", TL_SOCK_COTSORD, TL_DFADDR, NULL, NULL },
5360Sstevel@tonic-gate { "sticlts", TL_SOCK_CLTS, TL_DFADDR, NULL, NULL, NULL }
5370Sstevel@tonic-gate };
5380Sstevel@tonic-gate
5390Sstevel@tonic-gate #define TL_MAXTRANSPORT A_CNT(tl_transports)
5400Sstevel@tonic-gate
5410Sstevel@tonic-gate struct tl_endpt;
5420Sstevel@tonic-gate typedef struct tl_endpt tl_endpt_t;
5430Sstevel@tonic-gate
5440Sstevel@tonic-gate typedef void (tlproc_t)(mblk_t *, tl_endpt_t *);
5450Sstevel@tonic-gate
5460Sstevel@tonic-gate /*
5470Sstevel@tonic-gate * Data structure used to represent pending connects.
5480Sstevel@tonic-gate * Records enough information so that the connecting peer can close
5490Sstevel@tonic-gate * before the connection gets accepted.
5500Sstevel@tonic-gate */
5510Sstevel@tonic-gate typedef struct tl_icon {
5520Sstevel@tonic-gate list_node_t ti_node;
5530Sstevel@tonic-gate struct tl_endpt *ti_tep; /* NULL if peer has already closed */
5540Sstevel@tonic-gate mblk_t *ti_mp; /* b_next list of data + ordrel_ind */
5550Sstevel@tonic-gate t_scalar_t ti_seqno; /* Sequence number */
5560Sstevel@tonic-gate } tl_icon_t;
5570Sstevel@tonic-gate
5580Sstevel@tonic-gate typedef struct so_ux_addr soux_addr_t;
5590Sstevel@tonic-gate #define TL_SOUX_ADDRLEN sizeof (soux_addr_t)
5600Sstevel@tonic-gate
5610Sstevel@tonic-gate /*
5622486Sakolb * Maximum number of unaccepted connection indications allowed per listener.
5632486Sakolb */
5642486Sakolb #define TL_MAXQLEN 4096
5652486Sakolb int tl_maxqlen = TL_MAXQLEN;
5662486Sakolb
5672486Sakolb /*
5680Sstevel@tonic-gate * transport endpoint structure
5690Sstevel@tonic-gate */
5700Sstevel@tonic-gate struct tl_endpt {
5710Sstevel@tonic-gate queue_t *te_rq; /* stream read queue */
5720Sstevel@tonic-gate queue_t *te_wq; /* stream write queue */
5730Sstevel@tonic-gate uint32_t te_refcnt;
5740Sstevel@tonic-gate int32_t te_state; /* TPI state of endpoint */
5750Sstevel@tonic-gate minor_t te_minor; /* minor number */
5760Sstevel@tonic-gate #define te_seqno te_minor
5770Sstevel@tonic-gate uint_t te_flag; /* flag field */
5780Sstevel@tonic-gate boolean_t te_nowsrv;
5790Sstevel@tonic-gate tl_serializer_t *te_ser; /* Serializer to use */
5800Sstevel@tonic-gate #define te_serializer te_ser->ts_serializer
5810Sstevel@tonic-gate
5820Sstevel@tonic-gate soux_addr_t te_uxaddr; /* Socket address */
5830Sstevel@tonic-gate #define te_magic te_uxaddr.soua_magic
5840Sstevel@tonic-gate #define te_vp te_uxaddr.soua_vp
5850Sstevel@tonic-gate tl_addr_t te_ap; /* addr bound to this endpt */
5860Sstevel@tonic-gate #define te_zoneid te_ap.ta_zoneid
5870Sstevel@tonic-gate #define te_alen te_ap.ta_alen
5880Sstevel@tonic-gate #define te_abuf te_ap.ta_abuf
5890Sstevel@tonic-gate
5900Sstevel@tonic-gate tl_transport_state_t *te_transport;
5910Sstevel@tonic-gate #define te_addrhash te_transport->tr_addr_hash
5920Sstevel@tonic-gate #define te_aihash te_transport->tr_ai_hash
5930Sstevel@tonic-gate #define te_defaddr te_transport->tr_defaddr
5940Sstevel@tonic-gate cred_t *te_credp; /* endpoint user credentials */
5950Sstevel@tonic-gate mod_hash_hndl_t te_hash_hndl; /* Handle for address hash */
5960Sstevel@tonic-gate
5970Sstevel@tonic-gate /*
5980Sstevel@tonic-gate * State specific for connection-oriented and connectionless transports.
5990Sstevel@tonic-gate */
6000Sstevel@tonic-gate union {
6010Sstevel@tonic-gate /* Connection-oriented state. */
6020Sstevel@tonic-gate struct {
6030Sstevel@tonic-gate t_uscalar_t _te_nicon; /* count of conn requests */
6040Sstevel@tonic-gate t_uscalar_t _te_qlen; /* max conn requests */
6050Sstevel@tonic-gate tl_endpt_t *_te_oconp; /* conn request pending */
6060Sstevel@tonic-gate tl_endpt_t *_te_conp; /* connected endpt */
6070Sstevel@tonic-gate #ifndef _ILP32
6080Sstevel@tonic-gate void *_te_pad;
6090Sstevel@tonic-gate #endif
6100Sstevel@tonic-gate list_t _te_iconp; /* list of conn ind. pending */
6110Sstevel@tonic-gate } _te_cots_state;
6120Sstevel@tonic-gate /* Connection-less state. */
6130Sstevel@tonic-gate struct {
6140Sstevel@tonic-gate tl_endpt_t *_te_lastep; /* last dest. endpoint */
6150Sstevel@tonic-gate tl_endpt_t *_te_flowq; /* flow controlled on whom */
6160Sstevel@tonic-gate list_node_t _te_flows; /* lists of connections */
6170Sstevel@tonic-gate list_t _te_flowlist; /* Who flowcontrols on me */
6180Sstevel@tonic-gate } _te_clts_state;
6190Sstevel@tonic-gate } _te_transport_state;
6200Sstevel@tonic-gate #define te_nicon _te_transport_state._te_cots_state._te_nicon
6210Sstevel@tonic-gate #define te_qlen _te_transport_state._te_cots_state._te_qlen
6220Sstevel@tonic-gate #define te_oconp _te_transport_state._te_cots_state._te_oconp
6230Sstevel@tonic-gate #define te_conp _te_transport_state._te_cots_state._te_conp
6240Sstevel@tonic-gate #define te_iconp _te_transport_state._te_cots_state._te_iconp
6250Sstevel@tonic-gate #define te_lastep _te_transport_state._te_clts_state._te_lastep
6260Sstevel@tonic-gate #define te_flowq _te_transport_state._te_clts_state._te_flowq
6270Sstevel@tonic-gate #define te_flowlist _te_transport_state._te_clts_state._te_flowlist
6280Sstevel@tonic-gate #define te_flows _te_transport_state._te_clts_state._te_flows
6290Sstevel@tonic-gate
6300Sstevel@tonic-gate bufcall_id_t te_bufcid; /* outstanding bufcall id */
6310Sstevel@tonic-gate timeout_id_t te_timoutid; /* outstanding timeout id */
6320Sstevel@tonic-gate pid_t te_cpid; /* cached pid of endpoint */
6330Sstevel@tonic-gate t_uscalar_t te_acceptor_id; /* acceptor id for T_CONN_RES */
6340Sstevel@tonic-gate /*
6350Sstevel@tonic-gate * Pieces of the endpoint state needed for closing.
6360Sstevel@tonic-gate */
6370Sstevel@tonic-gate kmutex_t te_closelock;
6380Sstevel@tonic-gate kcondvar_t te_closecv;
6390Sstevel@tonic-gate uint8_t te_closing; /* The endpoint started closing */
6400Sstevel@tonic-gate uint8_t te_closewait; /* Wait in close until zero */
6410Sstevel@tonic-gate mblk_t te_closemp; /* for entering serializer on close */
6420Sstevel@tonic-gate mblk_t te_rsrvmp; /* for entering serializer on rsrv */
6430Sstevel@tonic-gate mblk_t te_wsrvmp; /* for entering serializer on wsrv */
6440Sstevel@tonic-gate kmutex_t te_srv_lock;
6450Sstevel@tonic-gate kcondvar_t te_srv_cv;
6460Sstevel@tonic-gate uint8_t te_rsrv_active; /* Running in tl_rsrv() */
6470Sstevel@tonic-gate uint8_t te_wsrv_active; /* Running in tl_wsrv() */
6480Sstevel@tonic-gate /*
6490Sstevel@tonic-gate * Pieces of the endpoint state needed for serializer transitions.
6500Sstevel@tonic-gate */
6510Sstevel@tonic-gate kmutex_t te_ser_lock; /* Protects the count below */
6520Sstevel@tonic-gate uint_t te_ser_count; /* Number of messages on serializer */
6530Sstevel@tonic-gate };
6540Sstevel@tonic-gate
6550Sstevel@tonic-gate /*
6560Sstevel@tonic-gate * Flag values. Lower 4 bits specify that transport used.
6570Sstevel@tonic-gate * TL_LISTENER, TL_ACCEPTOR, TL_ACCEPTED and TL_EAGER are for debugging only,
6580Sstevel@tonic-gate * they allow to identify the endpoint more easily.
6590Sstevel@tonic-gate */
6600Sstevel@tonic-gate #define TL_LISTENER 0x00010 /* the listener endpoint */
6610Sstevel@tonic-gate #define TL_ACCEPTOR 0x00020 /* the accepting endpoint */
6620Sstevel@tonic-gate #define TL_EAGER 0x00040 /* connecting endpoint */
6630Sstevel@tonic-gate #define TL_ACCEPTED 0x00080 /* accepted connection */
6640Sstevel@tonic-gate #define TL_SETCRED 0x00100 /* flag to indicate sending of credentials */
6650Sstevel@tonic-gate #define TL_SETUCRED 0x00200 /* flag to indicate sending of ucred */
6660Sstevel@tonic-gate #define TL_SOCKUCRED 0x00400 /* flag to indicate sending of SCM_UCRED */
6670Sstevel@tonic-gate #define TL_ADDRHASHED 0x01000 /* Endpoint address is stored in te_addrhash */
6680Sstevel@tonic-gate #define TL_CLOSE_SER 0x10000 /* Endpoint close has entered the serializer */
6690Sstevel@tonic-gate /*
6700Sstevel@tonic-gate * Boolean checks for the endpoint type.
6710Sstevel@tonic-gate */
6720Sstevel@tonic-gate #define IS_CLTS(x) (((x)->te_flag & TL_TICLTS) != 0)
6730Sstevel@tonic-gate #define IS_COTS(x) (((x)->te_flag & TL_TICLTS) == 0)
6740Sstevel@tonic-gate #define IS_COTSORD(x) (((x)->te_flag & TL_TICOTSORD) != 0)
6750Sstevel@tonic-gate #define IS_SOCKET(x) (((x)->te_flag & TL_SOCKET) != 0)
6760Sstevel@tonic-gate
6770Sstevel@tonic-gate /*
6780Sstevel@tonic-gate * Certain operations are always used together. These macros reduce the chance
6790Sstevel@tonic-gate * of missing a part of a combination.
6800Sstevel@tonic-gate */
6810Sstevel@tonic-gate #define TL_UNCONNECT(x) { tl_refrele(x); x = NULL; }
6820Sstevel@tonic-gate #define TL_REMOVE_PEER(x) { if ((x) != NULL) TL_UNCONNECT(x) }
6830Sstevel@tonic-gate
6840Sstevel@tonic-gate #define TL_PUTBQ(x, mp) { \
6850Sstevel@tonic-gate ASSERT(!((x)->te_flag & TL_CLOSE_SER)); \
6860Sstevel@tonic-gate (x)->te_nowsrv = B_TRUE; \
6870Sstevel@tonic-gate (void) putbq((x)->te_wq, mp); \
6880Sstevel@tonic-gate }
6890Sstevel@tonic-gate
6900Sstevel@tonic-gate #define TL_QENABLE(x) { (x)->te_nowsrv = B_FALSE; qenable((x)->te_wq); }
6910Sstevel@tonic-gate #define TL_PUTQ(x, mp) { (x)->te_nowsrv = B_FALSE; (void)putq((x)->te_wq, mp); }
6920Sstevel@tonic-gate
6930Sstevel@tonic-gate /*
6940Sstevel@tonic-gate * STREAMS driver glue data structures.
6950Sstevel@tonic-gate */
6960Sstevel@tonic-gate static struct module_info tl_minfo = {
6970Sstevel@tonic-gate TL_ID, /* mi_idnum */
6980Sstevel@tonic-gate TL_NAME, /* mi_idname */
6990Sstevel@tonic-gate TL_MINPSZ, /* mi_minpsz */
7000Sstevel@tonic-gate TL_MAXPSZ, /* mi_maxpsz */
7010Sstevel@tonic-gate TL_HIWAT, /* mi_hiwat */
7020Sstevel@tonic-gate TL_LOWAT /* mi_lowat */
7030Sstevel@tonic-gate };
7040Sstevel@tonic-gate
7050Sstevel@tonic-gate static struct qinit tl_rinit = {
7060Sstevel@tonic-gate NULL, /* qi_putp */
7070Sstevel@tonic-gate (int (*)())tl_rsrv, /* qi_srvp */
7080Sstevel@tonic-gate tl_open, /* qi_qopen */
7090Sstevel@tonic-gate tl_close, /* qi_qclose */
7100Sstevel@tonic-gate NULL, /* qi_qadmin */
7110Sstevel@tonic-gate &tl_minfo, /* qi_minfo */
7120Sstevel@tonic-gate NULL /* qi_mstat */
7130Sstevel@tonic-gate };
7140Sstevel@tonic-gate
7150Sstevel@tonic-gate static struct qinit tl_winit = {
7160Sstevel@tonic-gate (int (*)())tl_wput, /* qi_putp */
7170Sstevel@tonic-gate (int (*)())tl_wsrv, /* qi_srvp */
7180Sstevel@tonic-gate NULL, /* qi_qopen */
7190Sstevel@tonic-gate NULL, /* qi_qclose */
7200Sstevel@tonic-gate NULL, /* qi_qadmin */
7210Sstevel@tonic-gate &tl_minfo, /* qi_minfo */
7220Sstevel@tonic-gate NULL /* qi_mstat */
7230Sstevel@tonic-gate };
7240Sstevel@tonic-gate
7250Sstevel@tonic-gate static struct streamtab tlinfo = {
7260Sstevel@tonic-gate &tl_rinit, /* st_rdinit */
7270Sstevel@tonic-gate &tl_winit, /* st_wrinit */
7280Sstevel@tonic-gate NULL, /* st_muxrinit */
7290Sstevel@tonic-gate NULL /* st_muxwrinit */
7300Sstevel@tonic-gate };
7310Sstevel@tonic-gate
7320Sstevel@tonic-gate DDI_DEFINE_STREAM_OPS(tl_devops, nulldev, nulldev, tl_attach, tl_detach,
7337656SSherry.Moore@Sun.COM nulldev, tl_info, D_MP, &tlinfo, ddi_quiesce_not_supported);
7340Sstevel@tonic-gate
7350Sstevel@tonic-gate static struct modldrv modldrv = {
7360Sstevel@tonic-gate &mod_driverops, /* Type of module -- pseudo driver here */
7377240Srh87107 "TPI Local Transport (tl)",
7380Sstevel@tonic-gate &tl_devops, /* driver ops */
7390Sstevel@tonic-gate };
7400Sstevel@tonic-gate
7410Sstevel@tonic-gate /*
7420Sstevel@tonic-gate * Module linkage information for the kernel.
7430Sstevel@tonic-gate */
7440Sstevel@tonic-gate static struct modlinkage modlinkage = {
7450Sstevel@tonic-gate MODREV_1,
7460Sstevel@tonic-gate &modldrv,
7470Sstevel@tonic-gate NULL
7480Sstevel@tonic-gate };
7490Sstevel@tonic-gate
7500Sstevel@tonic-gate /*
7510Sstevel@tonic-gate * Templates for response to info request
7520Sstevel@tonic-gate * Check sanity of unlimited connect data etc.
7530Sstevel@tonic-gate */
7540Sstevel@tonic-gate
7550Sstevel@tonic-gate #define TL_CLTS_PROVIDER_FLAG (XPG4_1|SENDZERO)
7560Sstevel@tonic-gate #define TL_COTS_PROVIDER_FLAG (XPG4_1|SENDZERO)
7570Sstevel@tonic-gate
7580Sstevel@tonic-gate static struct T_info_ack tl_cots_info_ack =
7590Sstevel@tonic-gate {
7600Sstevel@tonic-gate T_INFO_ACK, /* PRIM_type -always T_INFO_ACK */
7610Sstevel@tonic-gate T_INFINITE, /* TSDU size */
7620Sstevel@tonic-gate T_INFINITE, /* ETSDU size */
7630Sstevel@tonic-gate T_INFINITE, /* CDATA_size */
7640Sstevel@tonic-gate T_INFINITE, /* DDATA_size */
7650Sstevel@tonic-gate T_INFINITE, /* ADDR_size */
7660Sstevel@tonic-gate T_INFINITE, /* OPT_size */
7670Sstevel@tonic-gate 0, /* TIDU_size - fill at run time */
7680Sstevel@tonic-gate T_COTS, /* SERV_type */
7690Sstevel@tonic-gate -1, /* CURRENT_state */
7700Sstevel@tonic-gate TL_COTS_PROVIDER_FLAG /* PROVIDER_flag */
7710Sstevel@tonic-gate };
7720Sstevel@tonic-gate
7730Sstevel@tonic-gate static struct T_info_ack tl_clts_info_ack =
7740Sstevel@tonic-gate {
7750Sstevel@tonic-gate T_INFO_ACK, /* PRIM_type - always T_INFO_ACK */
7760Sstevel@tonic-gate 0, /* TSDU_size - fill at run time */
7770Sstevel@tonic-gate -2, /* ETSDU_size -2 => not supported */
7780Sstevel@tonic-gate -2, /* CDATA_size -2 => not supported */
7790Sstevel@tonic-gate -2, /* DDATA_size -2 => not supported */
7800Sstevel@tonic-gate -1, /* ADDR_size -1 => unlimited */
7810Sstevel@tonic-gate -1, /* OPT_size */
7820Sstevel@tonic-gate 0, /* TIDU_size - fill at run time */
7830Sstevel@tonic-gate T_CLTS, /* SERV_type */
7840Sstevel@tonic-gate -1, /* CURRENT_state */
7850Sstevel@tonic-gate TL_CLTS_PROVIDER_FLAG /* PROVIDER_flag */
7860Sstevel@tonic-gate };
7870Sstevel@tonic-gate
7880Sstevel@tonic-gate /*
7890Sstevel@tonic-gate * private copy of devinfo pointer used in tl_info
7900Sstevel@tonic-gate */
7910Sstevel@tonic-gate static dev_info_t *tl_dip;
7920Sstevel@tonic-gate
7930Sstevel@tonic-gate /*
7940Sstevel@tonic-gate * Endpoints cache.
7950Sstevel@tonic-gate */
7960Sstevel@tonic-gate static kmem_cache_t *tl_cache;
7970Sstevel@tonic-gate /*
7980Sstevel@tonic-gate * Minor number space.
7990Sstevel@tonic-gate */
8000Sstevel@tonic-gate static id_space_t *tl_minors;
8010Sstevel@tonic-gate
8020Sstevel@tonic-gate /*
8030Sstevel@tonic-gate * Default Data Unit size.
8040Sstevel@tonic-gate */
8050Sstevel@tonic-gate static t_scalar_t tl_tidusz;
8060Sstevel@tonic-gate
8070Sstevel@tonic-gate /*
8080Sstevel@tonic-gate * Size of hash tables.
8090Sstevel@tonic-gate */
8100Sstevel@tonic-gate static size_t tl_hash_size = TL_HASH_SIZE;
8110Sstevel@tonic-gate
8120Sstevel@tonic-gate /*
8130Sstevel@tonic-gate * Debug and test variable ONLY. Turn off T_CONN_IND queueing
8140Sstevel@tonic-gate * for sockets.
8150Sstevel@tonic-gate */
8160Sstevel@tonic-gate static int tl_disable_early_connect = 0;
8170Sstevel@tonic-gate static int tl_client_closing_when_accepting;
8180Sstevel@tonic-gate
8190Sstevel@tonic-gate static int tl_serializer_noswitch;
8200Sstevel@tonic-gate
8210Sstevel@tonic-gate /*
8220Sstevel@tonic-gate * LOCAL FUNCTION PROTOTYPES
8230Sstevel@tonic-gate * -------------------------
8240Sstevel@tonic-gate */
8250Sstevel@tonic-gate static boolean_t tl_eqaddr(tl_addr_t *, tl_addr_t *);
8260Sstevel@tonic-gate static void tl_do_proto(mblk_t *, tl_endpt_t *);
8270Sstevel@tonic-gate static void tl_do_ioctl(mblk_t *, tl_endpt_t *);
8280Sstevel@tonic-gate static void tl_do_ioctl_ser(mblk_t *, tl_endpt_t *);
8290Sstevel@tonic-gate static void tl_error_ack(queue_t *, mblk_t *, t_scalar_t, t_scalar_t,
8300Sstevel@tonic-gate t_scalar_t);
8310Sstevel@tonic-gate static void tl_bind(mblk_t *, tl_endpt_t *);
8320Sstevel@tonic-gate static void tl_bind_ser(mblk_t *, tl_endpt_t *);
8330Sstevel@tonic-gate static void tl_ok_ack(queue_t *, mblk_t *mp, t_scalar_t);
8340Sstevel@tonic-gate static void tl_unbind(mblk_t *, tl_endpt_t *);
8350Sstevel@tonic-gate static void tl_optmgmt(queue_t *, mblk_t *);
8360Sstevel@tonic-gate static void tl_conn_req(queue_t *, mblk_t *);
8370Sstevel@tonic-gate static void tl_conn_req_ser(mblk_t *, tl_endpt_t *);
8380Sstevel@tonic-gate static void tl_conn_res(mblk_t *, tl_endpt_t *);
8390Sstevel@tonic-gate static void tl_discon_req(mblk_t *, tl_endpt_t *);
8400Sstevel@tonic-gate static void tl_capability_req(mblk_t *, tl_endpt_t *);
8410Sstevel@tonic-gate static void tl_info_req_ser(mblk_t *, tl_endpt_t *);
8420Sstevel@tonic-gate static void tl_info_req(mblk_t *, tl_endpt_t *);
8430Sstevel@tonic-gate static void tl_addr_req(mblk_t *, tl_endpt_t *);
8440Sstevel@tonic-gate static void tl_connected_cots_addr_req(mblk_t *, tl_endpt_t *);
8450Sstevel@tonic-gate static void tl_data(mblk_t *, tl_endpt_t *);
8460Sstevel@tonic-gate static void tl_exdata(mblk_t *, tl_endpt_t *);
8470Sstevel@tonic-gate static void tl_ordrel(mblk_t *, tl_endpt_t *);
8480Sstevel@tonic-gate static void tl_unitdata(mblk_t *, tl_endpt_t *);
8490Sstevel@tonic-gate static void tl_unitdata_ser(mblk_t *, tl_endpt_t *);
8500Sstevel@tonic-gate static void tl_uderr(queue_t *, mblk_t *, t_scalar_t);
8510Sstevel@tonic-gate static tl_endpt_t *tl_find_peer(tl_endpt_t *, tl_addr_t *);
8520Sstevel@tonic-gate static tl_endpt_t *tl_sock_find_peer(tl_endpt_t *, struct so_ux_addr *);
8530Sstevel@tonic-gate static boolean_t tl_get_any_addr(tl_endpt_t *, tl_addr_t *);
8540Sstevel@tonic-gate static void tl_cl_backenable(tl_endpt_t *);
8550Sstevel@tonic-gate static void tl_co_unconnect(tl_endpt_t *);
8560Sstevel@tonic-gate static mblk_t *tl_resizemp(mblk_t *, ssize_t);
8570Sstevel@tonic-gate static void tl_discon_ind(tl_endpt_t *, uint32_t);
8580Sstevel@tonic-gate static mblk_t *tl_discon_ind_alloc(uint32_t, t_scalar_t);
8590Sstevel@tonic-gate static mblk_t *tl_ordrel_ind_alloc(void);
8600Sstevel@tonic-gate static tl_icon_t *tl_icon_find(tl_endpt_t *, t_scalar_t);
8610Sstevel@tonic-gate static void tl_icon_queuemsg(tl_endpt_t *, t_scalar_t, mblk_t *);
8620Sstevel@tonic-gate static boolean_t tl_icon_hasprim(tl_endpt_t *, t_scalar_t, t_scalar_t);
8630Sstevel@tonic-gate static void tl_icon_sendmsgs(tl_endpt_t *, mblk_t **);
8640Sstevel@tonic-gate static void tl_icon_freemsgs(mblk_t **);
8650Sstevel@tonic-gate static void tl_merror(queue_t *, mblk_t *, int);
8661676Sjpk static void tl_fill_option(uchar_t *, cred_t *, pid_t, int, cred_t *);
8670Sstevel@tonic-gate static int tl_default_opt(queue_t *, int, int, uchar_t *);
8680Sstevel@tonic-gate static int tl_get_opt(queue_t *, int, int, uchar_t *);
8690Sstevel@tonic-gate static int tl_set_opt(queue_t *, uint_t, int, int, uint_t, uchar_t *, uint_t *,
87011042SErik.Nordmark@Sun.COM uchar_t *, void *, cred_t *);
8710Sstevel@tonic-gate static void tl_memrecover(queue_t *, mblk_t *, size_t);
8720Sstevel@tonic-gate static void tl_freetip(tl_endpt_t *, tl_icon_t *);
8730Sstevel@tonic-gate static void tl_free(tl_endpt_t *);
8740Sstevel@tonic-gate static int tl_constructor(void *, void *, int);
8750Sstevel@tonic-gate static void tl_destructor(void *, void *);
8760Sstevel@tonic-gate static void tl_find_callback(mod_hash_key_t, mod_hash_val_t);
8770Sstevel@tonic-gate static tl_serializer_t *tl_serializer_alloc(int);
8780Sstevel@tonic-gate static void tl_serializer_refhold(tl_serializer_t *);
8790Sstevel@tonic-gate static void tl_serializer_refrele(tl_serializer_t *);
8800Sstevel@tonic-gate static void tl_serializer_enter(tl_endpt_t *, tlproc_t, mblk_t *);
8810Sstevel@tonic-gate static void tl_serializer_exit(tl_endpt_t *);
8820Sstevel@tonic-gate static boolean_t tl_noclose(tl_endpt_t *);
8830Sstevel@tonic-gate static void tl_closeok(tl_endpt_t *);
8840Sstevel@tonic-gate static void tl_refhold(tl_endpt_t *);
8850Sstevel@tonic-gate static void tl_refrele(tl_endpt_t *);
8860Sstevel@tonic-gate static int tl_hash_cmp_addr(mod_hash_key_t, mod_hash_key_t);
8870Sstevel@tonic-gate static uint_t tl_hash_by_addr(void *, mod_hash_key_t);
8880Sstevel@tonic-gate static void tl_close_ser(mblk_t *, tl_endpt_t *);
8890Sstevel@tonic-gate static void tl_close_finish_ser(mblk_t *, tl_endpt_t *);
8900Sstevel@tonic-gate static void tl_wput_data_ser(mblk_t *, tl_endpt_t *);
8910Sstevel@tonic-gate static void tl_proto_ser(mblk_t *, tl_endpt_t *);
8920Sstevel@tonic-gate static void tl_putq_ser(mblk_t *, tl_endpt_t *);
8930Sstevel@tonic-gate static void tl_wput_common_ser(mblk_t *, tl_endpt_t *);
8940Sstevel@tonic-gate static void tl_wput_ser(mblk_t *, tl_endpt_t *);
8950Sstevel@tonic-gate static void tl_wsrv_ser(mblk_t *, tl_endpt_t *);
8960Sstevel@tonic-gate static void tl_rsrv_ser(mblk_t *, tl_endpt_t *);
8970Sstevel@tonic-gate static void tl_addr_unbind(tl_endpt_t *);
8980Sstevel@tonic-gate
8990Sstevel@tonic-gate /*
9000Sstevel@tonic-gate * Intialize option database object for TL
9010Sstevel@tonic-gate */
9020Sstevel@tonic-gate
9030Sstevel@tonic-gate optdb_obj_t tl_opt_obj = {
9040Sstevel@tonic-gate tl_default_opt, /* TL default value function pointer */
9050Sstevel@tonic-gate tl_get_opt, /* TL get function pointer */
9060Sstevel@tonic-gate tl_set_opt, /* TL set function pointer */
9070Sstevel@tonic-gate TL_OPT_ARR_CNT, /* TL option database count of entries */
9080Sstevel@tonic-gate tl_opt_arr, /* TL option database */
9090Sstevel@tonic-gate TL_VALID_LEVELS_CNT, /* TL valid level count of entries */
9100Sstevel@tonic-gate tl_valid_levels_arr /* TL valid level array */
9110Sstevel@tonic-gate };
9120Sstevel@tonic-gate
9130Sstevel@tonic-gate /*
9140Sstevel@tonic-gate * LOCAL FUNCTIONS AND DRIVER ENTRY POINTS
9150Sstevel@tonic-gate * ---------------------------------------
9160Sstevel@tonic-gate */
9170Sstevel@tonic-gate
9180Sstevel@tonic-gate /*
9190Sstevel@tonic-gate * Loadable module routines
9200Sstevel@tonic-gate */
9210Sstevel@tonic-gate int
_init(void)9220Sstevel@tonic-gate _init(void)
9230Sstevel@tonic-gate {
9240Sstevel@tonic-gate return (mod_install(&modlinkage));
9250Sstevel@tonic-gate }
9260Sstevel@tonic-gate
9270Sstevel@tonic-gate int
_fini(void)9280Sstevel@tonic-gate _fini(void)
9290Sstevel@tonic-gate {
9300Sstevel@tonic-gate return (mod_remove(&modlinkage));
9310Sstevel@tonic-gate }
9320Sstevel@tonic-gate
9330Sstevel@tonic-gate int
_info(struct modinfo * modinfop)9340Sstevel@tonic-gate _info(struct modinfo *modinfop)
9350Sstevel@tonic-gate {
9360Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop));
9370Sstevel@tonic-gate }
9380Sstevel@tonic-gate
9390Sstevel@tonic-gate /*
9400Sstevel@tonic-gate * Driver Entry Points and Other routines
9410Sstevel@tonic-gate */
9420Sstevel@tonic-gate static int
tl_attach(dev_info_t * devi,ddi_attach_cmd_t cmd)9430Sstevel@tonic-gate tl_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
9440Sstevel@tonic-gate {
9450Sstevel@tonic-gate int i;
9460Sstevel@tonic-gate char name[32];
9470Sstevel@tonic-gate
9480Sstevel@tonic-gate /*
9490Sstevel@tonic-gate * Resume from a checkpoint state.
9500Sstevel@tonic-gate */
9510Sstevel@tonic-gate if (cmd == DDI_RESUME)
9520Sstevel@tonic-gate return (DDI_SUCCESS);
9530Sstevel@tonic-gate
9540Sstevel@tonic-gate if (cmd != DDI_ATTACH)
9550Sstevel@tonic-gate return (DDI_FAILURE);
9560Sstevel@tonic-gate
9570Sstevel@tonic-gate /*
9580Sstevel@tonic-gate * Deduce TIDU size to use. Note: "strmsgsz" being 0 has semantics that
9590Sstevel@tonic-gate * streams message sizes can be unlimited. We use a defined constant
9600Sstevel@tonic-gate * instead.
9610Sstevel@tonic-gate */
9620Sstevel@tonic-gate tl_tidusz = strmsgsz != 0 ? (t_scalar_t)strmsgsz : TL_TIDUSZ;
9630Sstevel@tonic-gate
9640Sstevel@tonic-gate /*
9650Sstevel@tonic-gate * Create subdevices for each transport.
9660Sstevel@tonic-gate */
9670Sstevel@tonic-gate for (i = 0; i < TL_UNUSED; i++) {
9680Sstevel@tonic-gate if (ddi_create_minor_node(devi,
9695240Snordmark tl_transports[i].tr_name,
9705240Snordmark S_IFCHR, tl_transports[i].tr_minor,
9715240Snordmark DDI_PSEUDO, NULL) == DDI_FAILURE) {
9720Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL);
9730Sstevel@tonic-gate return (DDI_FAILURE);
9740Sstevel@tonic-gate }
9750Sstevel@tonic-gate }
9760Sstevel@tonic-gate
9770Sstevel@tonic-gate tl_cache = kmem_cache_create("tl_cache", sizeof (tl_endpt_t),
9780Sstevel@tonic-gate 0, tl_constructor, tl_destructor, NULL, NULL, NULL, 0);
9790Sstevel@tonic-gate
9800Sstevel@tonic-gate if (tl_cache == NULL) {
9810Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL);
9820Sstevel@tonic-gate return (DDI_FAILURE);
9830Sstevel@tonic-gate }
9840Sstevel@tonic-gate
9850Sstevel@tonic-gate tl_minors = id_space_create("tl_minor_space",
9860Sstevel@tonic-gate TL_MINOR_START, MAXMIN32 - TL_MINOR_START + 1);
9870Sstevel@tonic-gate
9880Sstevel@tonic-gate /*
9890Sstevel@tonic-gate * Create ID space for minor numbers
9900Sstevel@tonic-gate */
9910Sstevel@tonic-gate for (i = 0; i < TL_MAXTRANSPORT; i++) {
9920Sstevel@tonic-gate tl_transport_state_t *t = &tl_transports[i];
9930Sstevel@tonic-gate
9940Sstevel@tonic-gate if (i == TL_UNUSED)
9950Sstevel@tonic-gate continue;
9960Sstevel@tonic-gate
9970Sstevel@tonic-gate /* Socket COTSORD shares namespace with COTS */
9980Sstevel@tonic-gate if (i == TL_SOCK_COTSORD) {
9990Sstevel@tonic-gate t->tr_ai_hash =
10000Sstevel@tonic-gate tl_transports[TL_SOCK_COTS].tr_ai_hash;
10010Sstevel@tonic-gate ASSERT(t->tr_ai_hash != NULL);
10020Sstevel@tonic-gate t->tr_addr_hash =
10030Sstevel@tonic-gate tl_transports[TL_SOCK_COTS].tr_addr_hash;
10040Sstevel@tonic-gate ASSERT(t->tr_addr_hash != NULL);
10050Sstevel@tonic-gate continue;
10060Sstevel@tonic-gate }
10070Sstevel@tonic-gate
10080Sstevel@tonic-gate /*
10090Sstevel@tonic-gate * Create hash tables.
10100Sstevel@tonic-gate */
10110Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_ai_hash",
10120Sstevel@tonic-gate t->tr_name);
10130Sstevel@tonic-gate #ifdef _ILP32
10140Sstevel@tonic-gate if (i & TL_SOCKET)
10150Sstevel@tonic-gate t->tr_ai_hash =
10160Sstevel@tonic-gate mod_hash_create_idhash(name, tl_hash_size - 1,
10175240Snordmark mod_hash_null_valdtor);
10180Sstevel@tonic-gate else
10190Sstevel@tonic-gate t->tr_ai_hash =
10200Sstevel@tonic-gate mod_hash_create_ptrhash(name, tl_hash_size,
10215240Snordmark mod_hash_null_valdtor, sizeof (queue_t));
10220Sstevel@tonic-gate #else
10230Sstevel@tonic-gate t->tr_ai_hash =
10240Sstevel@tonic-gate mod_hash_create_idhash(name, tl_hash_size - 1,
10255240Snordmark mod_hash_null_valdtor);
10260Sstevel@tonic-gate #endif /* _ILP32 */
10270Sstevel@tonic-gate
10280Sstevel@tonic-gate if (i & TL_SOCKET) {
10290Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_sockaddr_hash",
10300Sstevel@tonic-gate t->tr_name);
10310Sstevel@tonic-gate t->tr_addr_hash = mod_hash_create_ptrhash(name,
10320Sstevel@tonic-gate tl_hash_size, mod_hash_null_valdtor,
10330Sstevel@tonic-gate sizeof (uintptr_t));
10340Sstevel@tonic-gate } else {
10350Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s_addr_hash",
10360Sstevel@tonic-gate t->tr_name);
10370Sstevel@tonic-gate t->tr_addr_hash = mod_hash_create_extended(name,
10380Sstevel@tonic-gate tl_hash_size, mod_hash_null_keydtor,
10390Sstevel@tonic-gate mod_hash_null_valdtor,
10400Sstevel@tonic-gate tl_hash_by_addr, NULL, tl_hash_cmp_addr, KM_SLEEP);
10410Sstevel@tonic-gate }
10420Sstevel@tonic-gate
10430Sstevel@tonic-gate /* Create serializer for connectionless transports. */
10440Sstevel@tonic-gate if (i & TL_TICLTS)
10450Sstevel@tonic-gate t->tr_serializer = tl_serializer_alloc(KM_SLEEP);
10460Sstevel@tonic-gate }
10470Sstevel@tonic-gate
10480Sstevel@tonic-gate tl_dip = devi;
10490Sstevel@tonic-gate
10500Sstevel@tonic-gate return (DDI_SUCCESS);
10510Sstevel@tonic-gate }
10520Sstevel@tonic-gate
10530Sstevel@tonic-gate static int
tl_detach(dev_info_t * devi,ddi_detach_cmd_t cmd)10540Sstevel@tonic-gate tl_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
10550Sstevel@tonic-gate {
10560Sstevel@tonic-gate int i;
10570Sstevel@tonic-gate
10580Sstevel@tonic-gate if (cmd == DDI_SUSPEND)
10590Sstevel@tonic-gate return (DDI_SUCCESS);
10600Sstevel@tonic-gate
10610Sstevel@tonic-gate if (cmd != DDI_DETACH)
10620Sstevel@tonic-gate return (DDI_FAILURE);
10630Sstevel@tonic-gate
10640Sstevel@tonic-gate /*
10650Sstevel@tonic-gate * Destroy arenas and hash tables.
10660Sstevel@tonic-gate */
10670Sstevel@tonic-gate for (i = 0; i < TL_MAXTRANSPORT; i++) {
10680Sstevel@tonic-gate tl_transport_state_t *t = &tl_transports[i];
10690Sstevel@tonic-gate
10700Sstevel@tonic-gate if ((i == TL_UNUSED) || (i == TL_SOCK_COTSORD))
10710Sstevel@tonic-gate continue;
10720Sstevel@tonic-gate
1073*11474SJonathan.Adams@Sun.COM EQUIV(i & TL_TICLTS, t->tr_serializer != NULL);
10740Sstevel@tonic-gate if (t->tr_serializer != NULL) {
10750Sstevel@tonic-gate tl_serializer_refrele(t->tr_serializer);
10760Sstevel@tonic-gate t->tr_serializer = NULL;
10770Sstevel@tonic-gate }
10780Sstevel@tonic-gate
10790Sstevel@tonic-gate #ifdef _ILP32
10800Sstevel@tonic-gate if (i & TL_SOCKET)
10810Sstevel@tonic-gate mod_hash_destroy_idhash(t->tr_ai_hash);
10820Sstevel@tonic-gate else
10830Sstevel@tonic-gate mod_hash_destroy_ptrhash(t->tr_ai_hash);
10840Sstevel@tonic-gate #else
10850Sstevel@tonic-gate mod_hash_destroy_idhash(t->tr_ai_hash);
10860Sstevel@tonic-gate #endif /* _ILP32 */
10870Sstevel@tonic-gate t->tr_ai_hash = NULL;
10880Sstevel@tonic-gate if (i & TL_SOCKET)
10890Sstevel@tonic-gate mod_hash_destroy_ptrhash(t->tr_addr_hash);
10900Sstevel@tonic-gate else
10910Sstevel@tonic-gate mod_hash_destroy_hash(t->tr_addr_hash);
10920Sstevel@tonic-gate t->tr_addr_hash = NULL;
10930Sstevel@tonic-gate }
10940Sstevel@tonic-gate
10950Sstevel@tonic-gate kmem_cache_destroy(tl_cache);
10960Sstevel@tonic-gate tl_cache = NULL;
10970Sstevel@tonic-gate id_space_destroy(tl_minors);
10980Sstevel@tonic-gate tl_minors = NULL;
10990Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL);
11000Sstevel@tonic-gate return (DDI_SUCCESS);
11010Sstevel@tonic-gate }
11020Sstevel@tonic-gate
11030Sstevel@tonic-gate /* ARGSUSED */
11040Sstevel@tonic-gate static int
tl_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)11050Sstevel@tonic-gate tl_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
11060Sstevel@tonic-gate {
11070Sstevel@tonic-gate
11080Sstevel@tonic-gate int retcode = DDI_FAILURE;
11090Sstevel@tonic-gate
11100Sstevel@tonic-gate switch (infocmd) {
11110Sstevel@tonic-gate
11120Sstevel@tonic-gate case DDI_INFO_DEVT2DEVINFO:
11130Sstevel@tonic-gate if (tl_dip != NULL) {
11140Sstevel@tonic-gate *result = (void *)tl_dip;
11150Sstevel@tonic-gate retcode = DDI_SUCCESS;
11160Sstevel@tonic-gate }
11170Sstevel@tonic-gate break;
11180Sstevel@tonic-gate
11190Sstevel@tonic-gate case DDI_INFO_DEVT2INSTANCE:
11200Sstevel@tonic-gate *result = (void *)0;
11210Sstevel@tonic-gate retcode = DDI_SUCCESS;
11220Sstevel@tonic-gate break;
11230Sstevel@tonic-gate
11240Sstevel@tonic-gate default:
11250Sstevel@tonic-gate break;
11260Sstevel@tonic-gate }
11270Sstevel@tonic-gate return (retcode);
11280Sstevel@tonic-gate }
11290Sstevel@tonic-gate
11300Sstevel@tonic-gate /*
11310Sstevel@tonic-gate * Endpoint reference management.
11320Sstevel@tonic-gate */
11330Sstevel@tonic-gate static void
tl_refhold(tl_endpt_t * tep)11340Sstevel@tonic-gate tl_refhold(tl_endpt_t *tep)
11350Sstevel@tonic-gate {
11360Sstevel@tonic-gate atomic_add_32(&tep->te_refcnt, 1);
11370Sstevel@tonic-gate }
11380Sstevel@tonic-gate
11390Sstevel@tonic-gate static void
tl_refrele(tl_endpt_t * tep)11400Sstevel@tonic-gate tl_refrele(tl_endpt_t *tep)
11410Sstevel@tonic-gate {
11420Sstevel@tonic-gate ASSERT(tep->te_refcnt != 0);
11430Sstevel@tonic-gate
11440Sstevel@tonic-gate if (atomic_add_32_nv(&tep->te_refcnt, -1) == 0)
11450Sstevel@tonic-gate tl_free(tep);
11460Sstevel@tonic-gate }
11470Sstevel@tonic-gate
11480Sstevel@tonic-gate /*ARGSUSED*/
11490Sstevel@tonic-gate static int
tl_constructor(void * buf,void * cdrarg,int kmflags)11500Sstevel@tonic-gate tl_constructor(void *buf, void *cdrarg, int kmflags)
11510Sstevel@tonic-gate {
11520Sstevel@tonic-gate tl_endpt_t *tep = buf;
11530Sstevel@tonic-gate
11540Sstevel@tonic-gate bzero(tep, sizeof (tl_endpt_t));
11550Sstevel@tonic-gate mutex_init(&tep->te_closelock, NULL, MUTEX_DEFAULT, NULL);
11560Sstevel@tonic-gate cv_init(&tep->te_closecv, NULL, CV_DEFAULT, NULL);
11570Sstevel@tonic-gate mutex_init(&tep->te_srv_lock, NULL, MUTEX_DEFAULT, NULL);
11580Sstevel@tonic-gate cv_init(&tep->te_srv_cv, NULL, CV_DEFAULT, NULL);
11590Sstevel@tonic-gate mutex_init(&tep->te_ser_lock, NULL, MUTEX_DEFAULT, NULL);
11600Sstevel@tonic-gate
11610Sstevel@tonic-gate return (0);
11620Sstevel@tonic-gate }
11630Sstevel@tonic-gate
11640Sstevel@tonic-gate /*ARGSUSED*/
11650Sstevel@tonic-gate static void
tl_destructor(void * buf,void * cdrarg)11660Sstevel@tonic-gate tl_destructor(void *buf, void *cdrarg)
11670Sstevel@tonic-gate {
11680Sstevel@tonic-gate tl_endpt_t *tep = buf;
11690Sstevel@tonic-gate
11700Sstevel@tonic-gate mutex_destroy(&tep->te_closelock);
11710Sstevel@tonic-gate cv_destroy(&tep->te_closecv);
11720Sstevel@tonic-gate mutex_destroy(&tep->te_srv_lock);
11730Sstevel@tonic-gate cv_destroy(&tep->te_srv_cv);
11740Sstevel@tonic-gate mutex_destroy(&tep->te_ser_lock);
11750Sstevel@tonic-gate }
11760Sstevel@tonic-gate
11770Sstevel@tonic-gate static void
tl_free(tl_endpt_t * tep)11780Sstevel@tonic-gate tl_free(tl_endpt_t *tep)
11790Sstevel@tonic-gate {
11800Sstevel@tonic-gate ASSERT(tep->te_refcnt == 0);
11810Sstevel@tonic-gate ASSERT(tep->te_transport != NULL);
11820Sstevel@tonic-gate ASSERT(tep->te_rq == NULL);
11830Sstevel@tonic-gate ASSERT(tep->te_wq == NULL);
11840Sstevel@tonic-gate ASSERT(tep->te_ser != NULL);
11850Sstevel@tonic-gate ASSERT(tep->te_ser_count == 0);
11860Sstevel@tonic-gate ASSERT(! (tep->te_flag & TL_ADDRHASHED));
11870Sstevel@tonic-gate
11880Sstevel@tonic-gate if (IS_SOCKET(tep)) {
11890Sstevel@tonic-gate ASSERT(tep->te_alen == TL_SOUX_ADDRLEN);
11900Sstevel@tonic-gate ASSERT(tep->te_abuf == &tep->te_uxaddr);
11910Sstevel@tonic-gate ASSERT(tep->te_vp == (void *)(uintptr_t)tep->te_minor);
11920Sstevel@tonic-gate ASSERT(tep->te_magic == SOU_MAGIC_IMPLICIT);
11930Sstevel@tonic-gate } else if (tep->te_abuf != NULL) {
11940Sstevel@tonic-gate kmem_free(tep->te_abuf, tep->te_alen);
11950Sstevel@tonic-gate tep->te_alen = -1; /* uninitialized */
11960Sstevel@tonic-gate tep->te_abuf = NULL;
11970Sstevel@tonic-gate } else {
11980Sstevel@tonic-gate ASSERT(tep->te_alen == -1);
11990Sstevel@tonic-gate }
12000Sstevel@tonic-gate
12010Sstevel@tonic-gate id_free(tl_minors, tep->te_minor);
12020Sstevel@tonic-gate ASSERT(tep->te_credp == NULL);
12030Sstevel@tonic-gate
12040Sstevel@tonic-gate if (tep->te_hash_hndl != NULL)
12050Sstevel@tonic-gate mod_hash_cancel(tep->te_addrhash, &tep->te_hash_hndl);
12060Sstevel@tonic-gate
12070Sstevel@tonic-gate if (IS_COTS(tep)) {
12080Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_conp);
12090Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_oconp);
12100Sstevel@tonic-gate tl_serializer_refrele(tep->te_ser);
12110Sstevel@tonic-gate tep->te_ser = NULL;
12120Sstevel@tonic-gate ASSERT(tep->te_nicon == 0);
12130Sstevel@tonic-gate ASSERT(list_head(&tep->te_iconp) == NULL);
12140Sstevel@tonic-gate } else {
12150Sstevel@tonic-gate ASSERT(tep->te_lastep == NULL);
12160Sstevel@tonic-gate ASSERT(list_head(&tep->te_flowlist) == NULL);
12170Sstevel@tonic-gate ASSERT(tep->te_flowq == NULL);
12180Sstevel@tonic-gate }
12190Sstevel@tonic-gate
12200Sstevel@tonic-gate ASSERT(tep->te_bufcid == 0);
12210Sstevel@tonic-gate ASSERT(tep->te_timoutid == 0);
12220Sstevel@tonic-gate bzero(&tep->te_ap, sizeof (tep->te_ap));
12230Sstevel@tonic-gate tep->te_acceptor_id = 0;
12240Sstevel@tonic-gate
12250Sstevel@tonic-gate ASSERT(tep->te_closewait == 0);
12260Sstevel@tonic-gate ASSERT(!tep->te_rsrv_active);
12270Sstevel@tonic-gate ASSERT(!tep->te_wsrv_active);
12280Sstevel@tonic-gate tep->te_closing = 0;
12290Sstevel@tonic-gate tep->te_nowsrv = B_FALSE;
12300Sstevel@tonic-gate tep->te_flag = 0;
12310Sstevel@tonic-gate
12320Sstevel@tonic-gate kmem_cache_free(tl_cache, tep);
12330Sstevel@tonic-gate }
12340Sstevel@tonic-gate
12350Sstevel@tonic-gate /*
12360Sstevel@tonic-gate * Allocate/free reference-counted wrappers for serializers.
12370Sstevel@tonic-gate */
12380Sstevel@tonic-gate static tl_serializer_t *
tl_serializer_alloc(int flags)12390Sstevel@tonic-gate tl_serializer_alloc(int flags)
12400Sstevel@tonic-gate {
12410Sstevel@tonic-gate tl_serializer_t *s = kmem_alloc(sizeof (tl_serializer_t), flags);
12420Sstevel@tonic-gate serializer_t *ser;
12430Sstevel@tonic-gate
12440Sstevel@tonic-gate if (s == NULL)
12450Sstevel@tonic-gate return (NULL);
12460Sstevel@tonic-gate
12470Sstevel@tonic-gate ser = serializer_create(flags);
12480Sstevel@tonic-gate
12490Sstevel@tonic-gate if (ser == NULL) {
12500Sstevel@tonic-gate kmem_free(s, sizeof (tl_serializer_t));
12510Sstevel@tonic-gate return (NULL);
12520Sstevel@tonic-gate }
12530Sstevel@tonic-gate
12540Sstevel@tonic-gate s->ts_refcnt = 1;
12550Sstevel@tonic-gate s->ts_serializer = ser;
12560Sstevel@tonic-gate return (s);
12570Sstevel@tonic-gate }
12580Sstevel@tonic-gate
12590Sstevel@tonic-gate static void
tl_serializer_refhold(tl_serializer_t * s)12600Sstevel@tonic-gate tl_serializer_refhold(tl_serializer_t *s)
12610Sstevel@tonic-gate {
12620Sstevel@tonic-gate atomic_add_32(&s->ts_refcnt, 1);
12630Sstevel@tonic-gate }
12640Sstevel@tonic-gate
12650Sstevel@tonic-gate static void
tl_serializer_refrele(tl_serializer_t * s)12660Sstevel@tonic-gate tl_serializer_refrele(tl_serializer_t *s)
12670Sstevel@tonic-gate {
12680Sstevel@tonic-gate if (atomic_add_32_nv(&s->ts_refcnt, -1) == 0) {
12690Sstevel@tonic-gate serializer_destroy(s->ts_serializer);
12700Sstevel@tonic-gate kmem_free(s, sizeof (tl_serializer_t));
12710Sstevel@tonic-gate }
12720Sstevel@tonic-gate }
12730Sstevel@tonic-gate
12740Sstevel@tonic-gate /*
12750Sstevel@tonic-gate * Post a request on the endpoint serializer. For COTS transports keep track of
12760Sstevel@tonic-gate * the number of pending requests.
12770Sstevel@tonic-gate */
12780Sstevel@tonic-gate static void
tl_serializer_enter(tl_endpt_t * tep,tlproc_t tlproc,mblk_t * mp)12790Sstevel@tonic-gate tl_serializer_enter(tl_endpt_t *tep, tlproc_t tlproc, mblk_t *mp)
12800Sstevel@tonic-gate {
12810Sstevel@tonic-gate if (IS_COTS(tep)) {
12820Sstevel@tonic-gate mutex_enter(&tep->te_ser_lock);
12830Sstevel@tonic-gate tep->te_ser_count++;
12840Sstevel@tonic-gate mutex_exit(&tep->te_ser_lock);
12850Sstevel@tonic-gate }
12860Sstevel@tonic-gate serializer_enter(tep->te_serializer, (srproc_t *)tlproc, mp, tep);
12870Sstevel@tonic-gate }
12880Sstevel@tonic-gate
12890Sstevel@tonic-gate /*
12900Sstevel@tonic-gate * Complete processing the request on the serializer. Decrement the counter for
12910Sstevel@tonic-gate * pending requests for COTS transports.
12920Sstevel@tonic-gate */
12930Sstevel@tonic-gate static void
tl_serializer_exit(tl_endpt_t * tep)12940Sstevel@tonic-gate tl_serializer_exit(tl_endpt_t *tep)
12950Sstevel@tonic-gate {
12960Sstevel@tonic-gate if (IS_COTS(tep)) {
12970Sstevel@tonic-gate mutex_enter(&tep->te_ser_lock);
12980Sstevel@tonic-gate ASSERT(tep->te_ser_count != 0);
12990Sstevel@tonic-gate tep->te_ser_count--;
13000Sstevel@tonic-gate mutex_exit(&tep->te_ser_lock);
13010Sstevel@tonic-gate }
13020Sstevel@tonic-gate }
13030Sstevel@tonic-gate
13040Sstevel@tonic-gate /*
13050Sstevel@tonic-gate * Hash management functions.
13060Sstevel@tonic-gate */
13070Sstevel@tonic-gate
13080Sstevel@tonic-gate /*
13090Sstevel@tonic-gate * Return TRUE if two addresses are equal, false otherwise.
13100Sstevel@tonic-gate */
13110Sstevel@tonic-gate static boolean_t
tl_eqaddr(tl_addr_t * ap1,tl_addr_t * ap2)13120Sstevel@tonic-gate tl_eqaddr(tl_addr_t *ap1, tl_addr_t *ap2)
13130Sstevel@tonic-gate {
13140Sstevel@tonic-gate return ((ap1->ta_alen > 0) &&
13150Sstevel@tonic-gate (ap1->ta_alen == ap2->ta_alen) &&
13160Sstevel@tonic-gate (ap1->ta_zoneid == ap2->ta_zoneid) &&
13170Sstevel@tonic-gate (bcmp(ap1->ta_abuf, ap2->ta_abuf, ap1->ta_alen) == 0));
13180Sstevel@tonic-gate }
13190Sstevel@tonic-gate
13200Sstevel@tonic-gate /*
13210Sstevel@tonic-gate * This function is called whenever an endpoint is found in the hash table.
13220Sstevel@tonic-gate */
13230Sstevel@tonic-gate /* ARGSUSED0 */
13240Sstevel@tonic-gate static void
tl_find_callback(mod_hash_key_t key,mod_hash_val_t val)13250Sstevel@tonic-gate tl_find_callback(mod_hash_key_t key, mod_hash_val_t val)
13260Sstevel@tonic-gate {
13270Sstevel@tonic-gate tl_refhold((tl_endpt_t *)val);
13280Sstevel@tonic-gate }
13290Sstevel@tonic-gate
13300Sstevel@tonic-gate /*
13310Sstevel@tonic-gate * Address hash function.
13320Sstevel@tonic-gate */
13330Sstevel@tonic-gate /* ARGSUSED */
13340Sstevel@tonic-gate static uint_t
tl_hash_by_addr(void * hash_data,mod_hash_key_t key)13350Sstevel@tonic-gate tl_hash_by_addr(void *hash_data, mod_hash_key_t key)
13360Sstevel@tonic-gate {
13370Sstevel@tonic-gate tl_addr_t *ap = (tl_addr_t *)key;
13380Sstevel@tonic-gate size_t len = ap->ta_alen;
13390Sstevel@tonic-gate uchar_t *p = ap->ta_abuf;
13400Sstevel@tonic-gate uint_t i, g;
13410Sstevel@tonic-gate
13420Sstevel@tonic-gate ASSERT((len > 0) && (p != NULL));
13430Sstevel@tonic-gate
13440Sstevel@tonic-gate for (i = ap->ta_zoneid; len -- != 0; p++) {
13450Sstevel@tonic-gate i = (i << 4) + (*p);
13460Sstevel@tonic-gate if ((g = (i & 0xf0000000U)) != 0) {
13470Sstevel@tonic-gate i ^= (g >> 24);
13480Sstevel@tonic-gate i ^= g;
13490Sstevel@tonic-gate }
13500Sstevel@tonic-gate }
13510Sstevel@tonic-gate return (i);
13520Sstevel@tonic-gate }
13530Sstevel@tonic-gate
13540Sstevel@tonic-gate /*
13550Sstevel@tonic-gate * This function is used by hash lookups. It compares two generic addresses.
13560Sstevel@tonic-gate */
13570Sstevel@tonic-gate static int
tl_hash_cmp_addr(mod_hash_key_t key1,mod_hash_key_t key2)13580Sstevel@tonic-gate tl_hash_cmp_addr(mod_hash_key_t key1, mod_hash_key_t key2)
13590Sstevel@tonic-gate {
13600Sstevel@tonic-gate #ifdef DEBUG
13610Sstevel@tonic-gate tl_addr_t *ap1 = (tl_addr_t *)key1;
13620Sstevel@tonic-gate tl_addr_t *ap2 = (tl_addr_t *)key2;
13630Sstevel@tonic-gate
13640Sstevel@tonic-gate ASSERT(key1 != NULL);
13650Sstevel@tonic-gate ASSERT(key2 != NULL);
13660Sstevel@tonic-gate
13670Sstevel@tonic-gate ASSERT(ap1->ta_abuf != NULL);
13680Sstevel@tonic-gate ASSERT(ap2->ta_abuf != NULL);
13690Sstevel@tonic-gate ASSERT(ap1->ta_alen > 0);
13700Sstevel@tonic-gate ASSERT(ap2->ta_alen > 0);
13710Sstevel@tonic-gate #endif
13720Sstevel@tonic-gate
13730Sstevel@tonic-gate return (! tl_eqaddr((tl_addr_t *)key1, (tl_addr_t *)key2));
13740Sstevel@tonic-gate }
13750Sstevel@tonic-gate
13760Sstevel@tonic-gate /*
13770Sstevel@tonic-gate * Prevent endpoint from closing if possible.
13780Sstevel@tonic-gate * Return B_TRUE on success, B_FALSE on failure.
13790Sstevel@tonic-gate */
13800Sstevel@tonic-gate static boolean_t
tl_noclose(tl_endpt_t * tep)13810Sstevel@tonic-gate tl_noclose(tl_endpt_t *tep)
13820Sstevel@tonic-gate {
13830Sstevel@tonic-gate boolean_t rc = B_FALSE;
13840Sstevel@tonic-gate
13850Sstevel@tonic-gate mutex_enter(&tep->te_closelock);
13860Sstevel@tonic-gate if (! tep->te_closing) {
13870Sstevel@tonic-gate ASSERT(tep->te_closewait == 0);
13880Sstevel@tonic-gate tep->te_closewait++;
13890Sstevel@tonic-gate rc = B_TRUE;
13900Sstevel@tonic-gate }
13910Sstevel@tonic-gate mutex_exit(&tep->te_closelock);
13920Sstevel@tonic-gate return (rc);
13930Sstevel@tonic-gate }
13940Sstevel@tonic-gate
13950Sstevel@tonic-gate /*
13960Sstevel@tonic-gate * Allow endpoint to close if needed.
13970Sstevel@tonic-gate */
13980Sstevel@tonic-gate static void
tl_closeok(tl_endpt_t * tep)13990Sstevel@tonic-gate tl_closeok(tl_endpt_t *tep)
14000Sstevel@tonic-gate {
14010Sstevel@tonic-gate ASSERT(tep->te_closewait > 0);
14020Sstevel@tonic-gate mutex_enter(&tep->te_closelock);
14030Sstevel@tonic-gate ASSERT(tep->te_closewait == 1);
14040Sstevel@tonic-gate tep->te_closewait--;
14050Sstevel@tonic-gate cv_signal(&tep->te_closecv);
14060Sstevel@tonic-gate mutex_exit(&tep->te_closelock);
14070Sstevel@tonic-gate }
14080Sstevel@tonic-gate
14090Sstevel@tonic-gate /*
14100Sstevel@tonic-gate * STREAMS open entry point.
14110Sstevel@tonic-gate */
14120Sstevel@tonic-gate /* ARGSUSED */
14130Sstevel@tonic-gate static int
tl_open(queue_t * rq,dev_t * devp,int oflag,int sflag,cred_t * credp)14140Sstevel@tonic-gate tl_open(queue_t *rq, dev_t *devp, int oflag, int sflag, cred_t *credp)
14150Sstevel@tonic-gate {
14160Sstevel@tonic-gate tl_endpt_t *tep;
14170Sstevel@tonic-gate minor_t minor = getminor(*devp);
14180Sstevel@tonic-gate
14190Sstevel@tonic-gate /*
14200Sstevel@tonic-gate * Driver is called directly. Both CLONEOPEN and MODOPEN
14210Sstevel@tonic-gate * are illegal
14220Sstevel@tonic-gate */
14230Sstevel@tonic-gate if ((sflag == CLONEOPEN) || (sflag == MODOPEN))
14240Sstevel@tonic-gate return (ENXIO);
14250Sstevel@tonic-gate
14260Sstevel@tonic-gate if (rq->q_ptr != NULL)
14270Sstevel@tonic-gate return (0);
14280Sstevel@tonic-gate
14290Sstevel@tonic-gate /* Minor number should specify the mode used for the driver. */
14300Sstevel@tonic-gate if ((minor >= TL_UNUSED))
14310Sstevel@tonic-gate return (ENXIO);
14320Sstevel@tonic-gate
14330Sstevel@tonic-gate if (oflag & SO_SOCKSTR) {
14340Sstevel@tonic-gate minor |= TL_SOCKET;
14350Sstevel@tonic-gate }
14360Sstevel@tonic-gate
14370Sstevel@tonic-gate tep = kmem_cache_alloc(tl_cache, KM_SLEEP);
14380Sstevel@tonic-gate tep->te_refcnt = 1;
14390Sstevel@tonic-gate tep->te_cpid = curproc->p_pid;
14400Sstevel@tonic-gate rq->q_ptr = WR(rq)->q_ptr = tep;
14410Sstevel@tonic-gate tep->te_state = TS_UNBND;
14420Sstevel@tonic-gate tep->te_credp = credp;
14430Sstevel@tonic-gate crhold(credp);
14440Sstevel@tonic-gate tep->te_zoneid = getzoneid();
14450Sstevel@tonic-gate
14460Sstevel@tonic-gate tep->te_flag = minor & TL_MINOR_MASK;
14470Sstevel@tonic-gate tep->te_transport = &tl_transports[minor];
14480Sstevel@tonic-gate
14490Sstevel@tonic-gate /* Allocate a unique minor number for this instance. */
14500Sstevel@tonic-gate tep->te_minor = (minor_t)id_alloc(tl_minors);
14510Sstevel@tonic-gate
14520Sstevel@tonic-gate /* Reserve hash handle for bind(). */
14530Sstevel@tonic-gate (void) mod_hash_reserve(tep->te_addrhash, &tep->te_hash_hndl);
14540Sstevel@tonic-gate
14550Sstevel@tonic-gate /* Transport-specific initialization */
14560Sstevel@tonic-gate if (IS_COTS(tep)) {
14570Sstevel@tonic-gate /* Use private serializer */
14580Sstevel@tonic-gate tep->te_ser = tl_serializer_alloc(KM_SLEEP);
14590Sstevel@tonic-gate
14600Sstevel@tonic-gate /* Create list for pending connections */
14610Sstevel@tonic-gate list_create(&tep->te_iconp, sizeof (tl_icon_t),
14620Sstevel@tonic-gate offsetof(tl_icon_t, ti_node));
14630Sstevel@tonic-gate tep->te_qlen = 0;
14640Sstevel@tonic-gate tep->te_nicon = 0;
14650Sstevel@tonic-gate tep->te_oconp = NULL;
14660Sstevel@tonic-gate tep->te_conp = NULL;
14670Sstevel@tonic-gate } else {
14680Sstevel@tonic-gate /* Use shared serializer */
14690Sstevel@tonic-gate tep->te_ser = tep->te_transport->tr_serializer;
14700Sstevel@tonic-gate bzero(&tep->te_flows, sizeof (list_node_t));
14710Sstevel@tonic-gate /* Create list for flow control */
14720Sstevel@tonic-gate list_create(&tep->te_flowlist, sizeof (tl_endpt_t),
14730Sstevel@tonic-gate offsetof(tl_endpt_t, te_flows));
14740Sstevel@tonic-gate tep->te_flowq = NULL;
14750Sstevel@tonic-gate tep->te_lastep = NULL;
14760Sstevel@tonic-gate
14770Sstevel@tonic-gate }
14780Sstevel@tonic-gate
14790Sstevel@tonic-gate /* Initialize endpoint address */
14800Sstevel@tonic-gate if (IS_SOCKET(tep)) {
14810Sstevel@tonic-gate /* Socket-specific address handling. */
14820Sstevel@tonic-gate tep->te_alen = TL_SOUX_ADDRLEN;
14830Sstevel@tonic-gate tep->te_abuf = &tep->te_uxaddr;
14840Sstevel@tonic-gate tep->te_vp = (void *)(uintptr_t)tep->te_minor;
14850Sstevel@tonic-gate tep->te_magic = SOU_MAGIC_IMPLICIT;
14860Sstevel@tonic-gate } else {
14870Sstevel@tonic-gate tep->te_alen = -1;
14880Sstevel@tonic-gate tep->te_abuf = NULL;
14890Sstevel@tonic-gate }
14900Sstevel@tonic-gate
14910Sstevel@tonic-gate /* clone the driver */
14920Sstevel@tonic-gate *devp = makedevice(getmajor(*devp), tep->te_minor);
14930Sstevel@tonic-gate
14940Sstevel@tonic-gate tep->te_rq = rq;
14950Sstevel@tonic-gate tep->te_wq = WR(rq);
14960Sstevel@tonic-gate
14970Sstevel@tonic-gate #ifdef _ILP32
14980Sstevel@tonic-gate if (IS_SOCKET(tep))
14990Sstevel@tonic-gate tep->te_acceptor_id = tep->te_minor;
15000Sstevel@tonic-gate else
15010Sstevel@tonic-gate tep->te_acceptor_id = (t_uscalar_t)rq;
15020Sstevel@tonic-gate #else
15030Sstevel@tonic-gate tep->te_acceptor_id = tep->te_minor;
15040Sstevel@tonic-gate #endif /* _ILP32 */
15050Sstevel@tonic-gate
15060Sstevel@tonic-gate
15070Sstevel@tonic-gate qprocson(rq);
15080Sstevel@tonic-gate
15090Sstevel@tonic-gate /*
15100Sstevel@tonic-gate * Insert acceptor ID in the hash. The AI hash always sleeps on
15110Sstevel@tonic-gate * insertion so insertion can't fail.
15120Sstevel@tonic-gate */
15130Sstevel@tonic-gate (void) mod_hash_insert(tep->te_transport->tr_ai_hash,
15140Sstevel@tonic-gate (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id,
15150Sstevel@tonic-gate (mod_hash_val_t)tep);
15160Sstevel@tonic-gate
15170Sstevel@tonic-gate return (0);
15180Sstevel@tonic-gate }
15190Sstevel@tonic-gate
15200Sstevel@tonic-gate /* ARGSUSED1 */
15210Sstevel@tonic-gate static int
tl_close(queue_t * rq,int flag,cred_t * credp)15220Sstevel@tonic-gate tl_close(queue_t *rq, int flag, cred_t *credp)
15230Sstevel@tonic-gate {
15240Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
15250Sstevel@tonic-gate tl_endpt_t *elp = NULL;
15260Sstevel@tonic-gate queue_t *wq = tep->te_wq;
15270Sstevel@tonic-gate int rc;
15280Sstevel@tonic-gate
15290Sstevel@tonic-gate ASSERT(wq == WR(rq));
15300Sstevel@tonic-gate
15310Sstevel@tonic-gate /*
15320Sstevel@tonic-gate * Remove the endpoint from acceptor hash.
15330Sstevel@tonic-gate */
15340Sstevel@tonic-gate rc = mod_hash_remove(tep->te_transport->tr_ai_hash,
15350Sstevel@tonic-gate (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id,
15360Sstevel@tonic-gate (mod_hash_val_t *)&elp);
15370Sstevel@tonic-gate ASSERT(rc == 0 && tep == elp);
15380Sstevel@tonic-gate if ((rc != 0) || (tep != elp)) {
15390Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
15405240Snordmark SL_TRACE|SL_ERROR,
15415240Snordmark "tl_close:inconsistency in AI hash"));
15420Sstevel@tonic-gate }
15430Sstevel@tonic-gate
15440Sstevel@tonic-gate /*
15450Sstevel@tonic-gate * Wait till close is safe, then mark endpoint as closing.
15460Sstevel@tonic-gate */
15470Sstevel@tonic-gate mutex_enter(&tep->te_closelock);
15480Sstevel@tonic-gate while (tep->te_closewait)
15490Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock);
15500Sstevel@tonic-gate tep->te_closing = B_TRUE;
15510Sstevel@tonic-gate /*
15520Sstevel@tonic-gate * Will wait for the serializer part of the close to finish, so set
15530Sstevel@tonic-gate * te_closewait now.
15540Sstevel@tonic-gate */
15550Sstevel@tonic-gate tep->te_closewait = 1;
15560Sstevel@tonic-gate tep->te_nowsrv = B_FALSE;
15570Sstevel@tonic-gate mutex_exit(&tep->te_closelock);
15580Sstevel@tonic-gate
15590Sstevel@tonic-gate /*
15600Sstevel@tonic-gate * tl_close_ser doesn't drop reference, so no need to tl_refhold.
15610Sstevel@tonic-gate * It is safe because close will wait for tl_close_ser to finish.
15620Sstevel@tonic-gate */
15630Sstevel@tonic-gate tl_serializer_enter(tep, tl_close_ser, &tep->te_closemp);
15640Sstevel@tonic-gate
15650Sstevel@tonic-gate /*
15660Sstevel@tonic-gate * Wait for the first phase of close to complete before qprocsoff().
15670Sstevel@tonic-gate */
15680Sstevel@tonic-gate mutex_enter(&tep->te_closelock);
15690Sstevel@tonic-gate while (tep->te_closewait)
15700Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock);
15710Sstevel@tonic-gate mutex_exit(&tep->te_closelock);
15720Sstevel@tonic-gate
15730Sstevel@tonic-gate qprocsoff(rq);
15740Sstevel@tonic-gate
15750Sstevel@tonic-gate if (tep->te_bufcid) {
15760Sstevel@tonic-gate qunbufcall(rq, tep->te_bufcid);
15770Sstevel@tonic-gate tep->te_bufcid = 0;
15780Sstevel@tonic-gate }
15790Sstevel@tonic-gate if (tep->te_timoutid) {
15800Sstevel@tonic-gate (void) quntimeout(rq, tep->te_timoutid);
15810Sstevel@tonic-gate tep->te_timoutid = 0;
15820Sstevel@tonic-gate }
15830Sstevel@tonic-gate
15840Sstevel@tonic-gate /*
15850Sstevel@tonic-gate * Finish close behind serializer.
15860Sstevel@tonic-gate *
15870Sstevel@tonic-gate * For a CLTS endpoint increase a refcount and continue close processing
15880Sstevel@tonic-gate * with serializer protection. This processing may happen asynchronously
15890Sstevel@tonic-gate * with the completion of tl_close().
15900Sstevel@tonic-gate *
15910Sstevel@tonic-gate * Fot a COTS endpoint wait before destroying tep since the serializer
15920Sstevel@tonic-gate * may go away together with tep and we need to destroy serializer
15930Sstevel@tonic-gate * outside of serializer context.
15940Sstevel@tonic-gate */
15950Sstevel@tonic-gate ASSERT(tep->te_closewait == 0);
15960Sstevel@tonic-gate if (IS_COTS(tep))
15970Sstevel@tonic-gate tep->te_closewait = 1;
15980Sstevel@tonic-gate else
15990Sstevel@tonic-gate tl_refhold(tep);
16000Sstevel@tonic-gate
16010Sstevel@tonic-gate tl_serializer_enter(tep, tl_close_finish_ser, &tep->te_closemp);
16020Sstevel@tonic-gate
16030Sstevel@tonic-gate /*
16040Sstevel@tonic-gate * For connection-oriented transports wait for all serializer activity
16050Sstevel@tonic-gate * to settle down.
16060Sstevel@tonic-gate */
16070Sstevel@tonic-gate if (IS_COTS(tep)) {
16080Sstevel@tonic-gate mutex_enter(&tep->te_closelock);
16090Sstevel@tonic-gate while (tep->te_closewait)
16100Sstevel@tonic-gate cv_wait(&tep->te_closecv, &tep->te_closelock);
16110Sstevel@tonic-gate mutex_exit(&tep->te_closelock);
16120Sstevel@tonic-gate }
16130Sstevel@tonic-gate
16140Sstevel@tonic-gate crfree(tep->te_credp);
16150Sstevel@tonic-gate tep->te_credp = NULL;
16160Sstevel@tonic-gate tep->te_wq = NULL;
16170Sstevel@tonic-gate tl_refrele(tep);
16180Sstevel@tonic-gate /*
16190Sstevel@tonic-gate * tep is likely to be destroyed now, so can't reference it any more.
16200Sstevel@tonic-gate */
16210Sstevel@tonic-gate
16220Sstevel@tonic-gate rq->q_ptr = wq->q_ptr = NULL;
16230Sstevel@tonic-gate return (0);
16240Sstevel@tonic-gate }
16250Sstevel@tonic-gate
16260Sstevel@tonic-gate /*
16270Sstevel@tonic-gate * First phase of close processing done behind the serializer.
16280Sstevel@tonic-gate *
16290Sstevel@tonic-gate * Do not drop the reference in the end - tl_close() wants this reference to
16300Sstevel@tonic-gate * stay.
16310Sstevel@tonic-gate */
16320Sstevel@tonic-gate /* ARGSUSED0 */
16330Sstevel@tonic-gate static void
tl_close_ser(mblk_t * mp,tl_endpt_t * tep)16340Sstevel@tonic-gate tl_close_ser(mblk_t *mp, tl_endpt_t *tep)
16350Sstevel@tonic-gate {
16360Sstevel@tonic-gate ASSERT(tep->te_closing);
16370Sstevel@tonic-gate ASSERT(tep->te_closewait == 1);
16380Sstevel@tonic-gate ASSERT(!(tep->te_flag & TL_CLOSE_SER));
16390Sstevel@tonic-gate
16400Sstevel@tonic-gate tep->te_flag |= TL_CLOSE_SER;
16410Sstevel@tonic-gate
16420Sstevel@tonic-gate /*
16430Sstevel@tonic-gate * Drain out all messages on queue except for TL_TICOTS where the
16440Sstevel@tonic-gate * abortive release semantics permit discarding of data on close
16450Sstevel@tonic-gate */
16460Sstevel@tonic-gate if (tep->te_wq->q_first && (IS_CLTS(tep) || IS_COTSORD(tep))) {
16470Sstevel@tonic-gate tl_wsrv_ser(NULL, tep);
16480Sstevel@tonic-gate }
16490Sstevel@tonic-gate
16500Sstevel@tonic-gate /* Remove address from hash table. */
16510Sstevel@tonic-gate tl_addr_unbind(tep);
16520Sstevel@tonic-gate /*
16530Sstevel@tonic-gate * qprocsoff() gets confused when q->q_next is not NULL on the write
16540Sstevel@tonic-gate * queue of the driver, so clear these before qprocsoff() is called.
16550Sstevel@tonic-gate * Also clear q_next for the peer since this queue is going away.
16560Sstevel@tonic-gate */
16570Sstevel@tonic-gate if (IS_COTS(tep) && !IS_SOCKET(tep)) {
16580Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp;
16590Sstevel@tonic-gate
16600Sstevel@tonic-gate tep->te_wq->q_next = NULL;
16610Sstevel@tonic-gate if ((peer_tep != NULL) && !peer_tep->te_closing)
16620Sstevel@tonic-gate peer_tep->te_wq->q_next = NULL;
16630Sstevel@tonic-gate }
16640Sstevel@tonic-gate
16650Sstevel@tonic-gate tep->te_rq = NULL;
16660Sstevel@tonic-gate
16670Sstevel@tonic-gate /* wake up tl_close() */
16680Sstevel@tonic-gate tl_closeok(tep);
16690Sstevel@tonic-gate tl_serializer_exit(tep);
16700Sstevel@tonic-gate }
16710Sstevel@tonic-gate
16720Sstevel@tonic-gate /*
16730Sstevel@tonic-gate * Second phase of tl_close(). Should wakeup tl_close() for COTS mode and drop
16740Sstevel@tonic-gate * the reference for CLTS.
16750Sstevel@tonic-gate *
16760Sstevel@tonic-gate * Called from serializer. Should drop reference count for CLTS only.
16770Sstevel@tonic-gate */
16780Sstevel@tonic-gate /* ARGSUSED0 */
16790Sstevel@tonic-gate static void
tl_close_finish_ser(mblk_t * mp,tl_endpt_t * tep)16800Sstevel@tonic-gate tl_close_finish_ser(mblk_t *mp, tl_endpt_t *tep)
16810Sstevel@tonic-gate {
16820Sstevel@tonic-gate ASSERT(tep->te_closing);
1683*11474SJonathan.Adams@Sun.COM IMPLY(IS_CLTS(tep), tep->te_closewait == 0);
1684*11474SJonathan.Adams@Sun.COM IMPLY(IS_COTS(tep), tep->te_closewait == 1);
16850Sstevel@tonic-gate
16860Sstevel@tonic-gate tep->te_state = -1; /* Uninitialized */
16870Sstevel@tonic-gate if (IS_COTS(tep)) {
16880Sstevel@tonic-gate tl_co_unconnect(tep);
16890Sstevel@tonic-gate } else {
16900Sstevel@tonic-gate /* Connectionless specific cleanup */
16910Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_lastep);
16920Sstevel@tonic-gate /*
16930Sstevel@tonic-gate * Backenable anybody that is flow controlled waiting for
16940Sstevel@tonic-gate * this endpoint.
16950Sstevel@tonic-gate */
16960Sstevel@tonic-gate tl_cl_backenable(tep);
16970Sstevel@tonic-gate if (tep->te_flowq != NULL) {
16980Sstevel@tonic-gate list_remove(&(tep->te_flowq->te_flowlist), tep);
16990Sstevel@tonic-gate tep->te_flowq = NULL;
17000Sstevel@tonic-gate }
17010Sstevel@tonic-gate }
17020Sstevel@tonic-gate
17030Sstevel@tonic-gate tl_serializer_exit(tep);
17040Sstevel@tonic-gate if (IS_COTS(tep))
17050Sstevel@tonic-gate tl_closeok(tep);
17060Sstevel@tonic-gate else
17070Sstevel@tonic-gate tl_refrele(tep);
17080Sstevel@tonic-gate }
17090Sstevel@tonic-gate
17100Sstevel@tonic-gate /*
17110Sstevel@tonic-gate * STREAMS write-side put procedure.
17120Sstevel@tonic-gate * Enter serializer for most of the processing.
17130Sstevel@tonic-gate *
17140Sstevel@tonic-gate * The T_CONN_REQ is processed outside of serializer.
17150Sstevel@tonic-gate */
17160Sstevel@tonic-gate static void
tl_wput(queue_t * wq,mblk_t * mp)17170Sstevel@tonic-gate tl_wput(queue_t *wq, mblk_t *mp)
17180Sstevel@tonic-gate {
17190Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
17200Sstevel@tonic-gate ssize_t msz = MBLKL(mp);
17210Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr;
17220Sstevel@tonic-gate tlproc_t *tl_proc = NULL;
17230Sstevel@tonic-gate
17240Sstevel@tonic-gate switch (DB_TYPE(mp)) {
17250Sstevel@tonic-gate case M_DATA:
17260Sstevel@tonic-gate /* Only valid for connection-oriented transports */
17270Sstevel@tonic-gate if (IS_CLTS(tep)) {
17280Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
17295240Snordmark SL_TRACE|SL_ERROR,
17305240Snordmark "tl_wput:M_DATA invalid for ticlts driver"));
17310Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
1732165Sxy158873 return;
17330Sstevel@tonic-gate }
17340Sstevel@tonic-gate tl_proc = tl_wput_data_ser;
17350Sstevel@tonic-gate break;
17360Sstevel@tonic-gate
17370Sstevel@tonic-gate case M_IOCTL:
17380Sstevel@tonic-gate switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
17390Sstevel@tonic-gate case TL_IOC_CREDOPT:
17400Sstevel@tonic-gate /* FALLTHROUGH */
17410Sstevel@tonic-gate case TL_IOC_UCREDOPT:
17420Sstevel@tonic-gate /*
17430Sstevel@tonic-gate * Serialize endpoint state change.
17440Sstevel@tonic-gate */
17450Sstevel@tonic-gate tl_proc = tl_do_ioctl_ser;
17460Sstevel@tonic-gate break;
17470Sstevel@tonic-gate
17480Sstevel@tonic-gate default:
17490Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL);
17500Sstevel@tonic-gate return;
17510Sstevel@tonic-gate }
17520Sstevel@tonic-gate break;
17530Sstevel@tonic-gate
17540Sstevel@tonic-gate case M_FLUSH:
17550Sstevel@tonic-gate /*
17560Sstevel@tonic-gate * do canonical M_FLUSH processing
17570Sstevel@tonic-gate */
17580Sstevel@tonic-gate if (*mp->b_rptr & FLUSHW) {
17590Sstevel@tonic-gate flushq(wq, FLUSHALL);
17600Sstevel@tonic-gate *mp->b_rptr &= ~FLUSHW;
17610Sstevel@tonic-gate }
17620Sstevel@tonic-gate if (*mp->b_rptr & FLUSHR) {
17630Sstevel@tonic-gate flushq(RD(wq), FLUSHALL);
17640Sstevel@tonic-gate qreply(wq, mp);
17650Sstevel@tonic-gate } else {
17660Sstevel@tonic-gate freemsg(mp);
17670Sstevel@tonic-gate }
17680Sstevel@tonic-gate return;
17690Sstevel@tonic-gate
17700Sstevel@tonic-gate case M_PROTO:
17710Sstevel@tonic-gate if (msz < sizeof (prim->type)) {
17720Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
17735240Snordmark SL_TRACE|SL_ERROR,
17745240Snordmark "tl_wput:M_PROTO data too short"));
17750Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
17760Sstevel@tonic-gate return;
17770Sstevel@tonic-gate }
17780Sstevel@tonic-gate switch (prim->type) {
17790Sstevel@tonic-gate case T_OPTMGMT_REQ:
17800Sstevel@tonic-gate case T_SVR4_OPTMGMT_REQ:
17810Sstevel@tonic-gate /*
17820Sstevel@tonic-gate * Process TPI option management requests immediately
17830Sstevel@tonic-gate * in put procedure regardless of in-order processing
17840Sstevel@tonic-gate * of already queued messages.
17850Sstevel@tonic-gate * (Note: This driver supports AF_UNIX socket
17860Sstevel@tonic-gate * implementation. Unless we implement this processing,
17870Sstevel@tonic-gate * setsockopt() on socket endpoint will block on flow
17880Sstevel@tonic-gate * controlled endpoints which it should not. That is
17890Sstevel@tonic-gate * required for successful execution of VSU socket tests
17900Sstevel@tonic-gate * and is consistent with BSD socket behavior).
17910Sstevel@tonic-gate */
17920Sstevel@tonic-gate tl_optmgmt(wq, mp);
17930Sstevel@tonic-gate return;
17940Sstevel@tonic-gate case O_T_BIND_REQ:
17950Sstevel@tonic-gate case T_BIND_REQ:
17960Sstevel@tonic-gate tl_proc = tl_bind_ser;
17970Sstevel@tonic-gate break;
17980Sstevel@tonic-gate case T_CONN_REQ:
17990Sstevel@tonic-gate if (IS_CLTS(tep)) {
18000Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
18010Sstevel@tonic-gate return;
18020Sstevel@tonic-gate }
18030Sstevel@tonic-gate tl_conn_req(wq, mp);
18040Sstevel@tonic-gate return;
18050Sstevel@tonic-gate case T_DATA_REQ:
18060Sstevel@tonic-gate case T_OPTDATA_REQ:
18070Sstevel@tonic-gate case T_EXDATA_REQ:
18080Sstevel@tonic-gate case T_ORDREL_REQ:
18090Sstevel@tonic-gate tl_proc = tl_putq_ser;
18100Sstevel@tonic-gate break;
18110Sstevel@tonic-gate case T_UNITDATA_REQ:
18120Sstevel@tonic-gate if (IS_COTS(tep) ||
18130Sstevel@tonic-gate (msz < sizeof (struct T_unitdata_req))) {
18140Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
18150Sstevel@tonic-gate return;
18160Sstevel@tonic-gate }
18170Sstevel@tonic-gate if ((tep->te_state == TS_IDLE) && !wq->q_first) {
18180Sstevel@tonic-gate tl_proc = tl_unitdata_ser;
18190Sstevel@tonic-gate } else {
18200Sstevel@tonic-gate tl_proc = tl_putq_ser;
18210Sstevel@tonic-gate }
18220Sstevel@tonic-gate break;
18230Sstevel@tonic-gate default:
18240Sstevel@tonic-gate /*
18250Sstevel@tonic-gate * process in service procedure if message already
18260Sstevel@tonic-gate * queued (maintain in-order processing)
18270Sstevel@tonic-gate */
18280Sstevel@tonic-gate if (wq->q_first != NULL) {
18290Sstevel@tonic-gate tl_proc = tl_putq_ser;
18300Sstevel@tonic-gate } else {
18310Sstevel@tonic-gate tl_proc = tl_wput_ser;
18320Sstevel@tonic-gate }
18330Sstevel@tonic-gate break;
18340Sstevel@tonic-gate }
18350Sstevel@tonic-gate break;
18360Sstevel@tonic-gate
18370Sstevel@tonic-gate case M_PCPROTO:
18380Sstevel@tonic-gate /*
18390Sstevel@tonic-gate * Check that the message has enough data to figure out TPI
18400Sstevel@tonic-gate * primitive.
18410Sstevel@tonic-gate */
18420Sstevel@tonic-gate if (msz < sizeof (prim->type)) {
18430Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
18445240Snordmark SL_TRACE|SL_ERROR,
18455240Snordmark "tl_wput:M_PCROTO data too short"));
18460Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
18470Sstevel@tonic-gate return;
18480Sstevel@tonic-gate }
18490Sstevel@tonic-gate switch (prim->type) {
18500Sstevel@tonic-gate case T_CAPABILITY_REQ:
18510Sstevel@tonic-gate tl_capability_req(mp, tep);
18520Sstevel@tonic-gate return;
18530Sstevel@tonic-gate case T_INFO_REQ:
18540Sstevel@tonic-gate tl_proc = tl_info_req_ser;
18550Sstevel@tonic-gate break;
18560Sstevel@tonic-gate default:
18570Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
18585240Snordmark SL_TRACE|SL_ERROR,
18595240Snordmark "tl_wput:unknown TPI msg primitive"));
18600Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
18610Sstevel@tonic-gate return;
18620Sstevel@tonic-gate }
18630Sstevel@tonic-gate break;
18640Sstevel@tonic-gate default:
18650Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
18665240Snordmark "tl_wput:default:unexpected Streams message"));
18670Sstevel@tonic-gate freemsg(mp);
18680Sstevel@tonic-gate return;
18690Sstevel@tonic-gate }
18700Sstevel@tonic-gate
18710Sstevel@tonic-gate /*
18720Sstevel@tonic-gate * Continue processing via serializer.
18730Sstevel@tonic-gate */
18740Sstevel@tonic-gate ASSERT(tl_proc != NULL);
18750Sstevel@tonic-gate tl_refhold(tep);
18760Sstevel@tonic-gate tl_serializer_enter(tep, tl_proc, mp);
18770Sstevel@tonic-gate }
18780Sstevel@tonic-gate
18790Sstevel@tonic-gate /*
18800Sstevel@tonic-gate * Place message on the queue while preserving order.
18810Sstevel@tonic-gate */
18820Sstevel@tonic-gate static void
tl_putq_ser(mblk_t * mp,tl_endpt_t * tep)18830Sstevel@tonic-gate tl_putq_ser(mblk_t *mp, tl_endpt_t *tep)
18840Sstevel@tonic-gate {
18850Sstevel@tonic-gate if (tep->te_closing) {
18860Sstevel@tonic-gate tl_wput_ser(mp, tep);
18870Sstevel@tonic-gate } else {
18880Sstevel@tonic-gate TL_PUTQ(tep, mp);
18890Sstevel@tonic-gate tl_serializer_exit(tep);
18900Sstevel@tonic-gate tl_refrele(tep);
18910Sstevel@tonic-gate }
18920Sstevel@tonic-gate
18930Sstevel@tonic-gate }
18940Sstevel@tonic-gate
18950Sstevel@tonic-gate static void
tl_wput_common_ser(mblk_t * mp,tl_endpt_t * tep)18960Sstevel@tonic-gate tl_wput_common_ser(mblk_t *mp, tl_endpt_t *tep)
18970Sstevel@tonic-gate {
18980Sstevel@tonic-gate ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO));
18990Sstevel@tonic-gate
19000Sstevel@tonic-gate switch (DB_TYPE(mp)) {
19010Sstevel@tonic-gate case M_DATA:
19020Sstevel@tonic-gate tl_data(mp, tep);
19030Sstevel@tonic-gate break;
19040Sstevel@tonic-gate case M_PROTO:
19050Sstevel@tonic-gate tl_do_proto(mp, tep);
19060Sstevel@tonic-gate break;
19070Sstevel@tonic-gate default:
19080Sstevel@tonic-gate freemsg(mp);
19090Sstevel@tonic-gate break;
19100Sstevel@tonic-gate }
19110Sstevel@tonic-gate }
19120Sstevel@tonic-gate
19130Sstevel@tonic-gate /*
19140Sstevel@tonic-gate * Write side put procedure called from serializer.
19150Sstevel@tonic-gate */
19160Sstevel@tonic-gate static void
tl_wput_ser(mblk_t * mp,tl_endpt_t * tep)19170Sstevel@tonic-gate tl_wput_ser(mblk_t *mp, tl_endpt_t *tep)
19180Sstevel@tonic-gate {
19190Sstevel@tonic-gate tl_wput_common_ser(mp, tep);
19200Sstevel@tonic-gate tl_serializer_exit(tep);
19210Sstevel@tonic-gate tl_refrele(tep);
19220Sstevel@tonic-gate }
19230Sstevel@tonic-gate
19240Sstevel@tonic-gate /*
19250Sstevel@tonic-gate * M_DATA processing. Called from serializer.
19260Sstevel@tonic-gate */
19270Sstevel@tonic-gate static void
tl_wput_data_ser(mblk_t * mp,tl_endpt_t * tep)19280Sstevel@tonic-gate tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep)
19290Sstevel@tonic-gate {
19300Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp;
19310Sstevel@tonic-gate queue_t *peer_rq;
19320Sstevel@tonic-gate
19330Sstevel@tonic-gate ASSERT(DB_TYPE(mp) == M_DATA);
19340Sstevel@tonic-gate ASSERT(IS_COTS(tep));
19350Sstevel@tonic-gate
1936*11474SJonathan.Adams@Sun.COM IMPLY(peer_tep, tep->te_serializer == peer_tep->te_serializer);
19370Sstevel@tonic-gate
19380Sstevel@tonic-gate /*
19390Sstevel@tonic-gate * fastpath for data. Ignore flow control if tep is closing.
19400Sstevel@tonic-gate */
19410Sstevel@tonic-gate if ((peer_tep != NULL) &&
19420Sstevel@tonic-gate !peer_tep->te_closing &&
19430Sstevel@tonic-gate ((tep->te_state == TS_DATA_XFER) ||
19445240Snordmark (tep->te_state == TS_WREQ_ORDREL)) &&
19450Sstevel@tonic-gate (tep->te_wq != NULL) &&
19460Sstevel@tonic-gate (tep->te_wq->q_first == NULL) &&
19470Sstevel@tonic-gate ((peer_tep->te_state == TS_DATA_XFER) ||
19485240Snordmark (peer_tep->te_state == TS_WREQ_ORDREL)) &&
19490Sstevel@tonic-gate ((peer_rq = peer_tep->te_rq) != NULL) &&
19500Sstevel@tonic-gate (canputnext(peer_rq) || tep->te_closing)) {
19510Sstevel@tonic-gate putnext(peer_rq, mp);
19520Sstevel@tonic-gate } else if (tep->te_closing) {
19530Sstevel@tonic-gate /*
19540Sstevel@tonic-gate * It is possible that by the time we got here tep started to
19550Sstevel@tonic-gate * close. If the write queue is not empty, and the state is
19560Sstevel@tonic-gate * TS_DATA_XFER the data should be delivered in order, so we
19570Sstevel@tonic-gate * call putq() instead of freeing the data.
19580Sstevel@tonic-gate */
19590Sstevel@tonic-gate if ((tep->te_wq != NULL) &&
19600Sstevel@tonic-gate ((tep->te_state == TS_DATA_XFER) ||
19615240Snordmark (tep->te_state == TS_WREQ_ORDREL))) {
19620Sstevel@tonic-gate TL_PUTQ(tep, mp);
19630Sstevel@tonic-gate } else {
19640Sstevel@tonic-gate freemsg(mp);
19650Sstevel@tonic-gate }
19660Sstevel@tonic-gate } else {
19670Sstevel@tonic-gate TL_PUTQ(tep, mp);
19680Sstevel@tonic-gate }
19690Sstevel@tonic-gate
19700Sstevel@tonic-gate tl_serializer_exit(tep);
19710Sstevel@tonic-gate tl_refrele(tep);
19720Sstevel@tonic-gate }
19730Sstevel@tonic-gate
19740Sstevel@tonic-gate /*
19750Sstevel@tonic-gate * Write side service routine.
19760Sstevel@tonic-gate *
19770Sstevel@tonic-gate * All actual processing happens within serializer which is entered
19780Sstevel@tonic-gate * synchronously. It is possible that by the time tl_wsrv() wakes up, some new
19790Sstevel@tonic-gate * messages that need processing may have arrived, so tl_wsrv repeats until
19800Sstevel@tonic-gate * queue is empty or te_nowsrv is set.
19810Sstevel@tonic-gate */
19820Sstevel@tonic-gate static void
tl_wsrv(queue_t * wq)19830Sstevel@tonic-gate tl_wsrv(queue_t *wq)
19840Sstevel@tonic-gate {
19850Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
19860Sstevel@tonic-gate
19870Sstevel@tonic-gate while ((wq->q_first != NULL) && !tep->te_nowsrv) {
19880Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock);
19890Sstevel@tonic-gate ASSERT(tep->te_wsrv_active == B_FALSE);
19900Sstevel@tonic-gate tep->te_wsrv_active = B_TRUE;
19910Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock);
19920Sstevel@tonic-gate
19930Sstevel@tonic-gate tl_serializer_enter(tep, tl_wsrv_ser, &tep->te_wsrvmp);
19940Sstevel@tonic-gate
19950Sstevel@tonic-gate /*
19960Sstevel@tonic-gate * Wait for serializer job to complete.
19970Sstevel@tonic-gate */
19980Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock);
19990Sstevel@tonic-gate while (tep->te_wsrv_active) {
20000Sstevel@tonic-gate cv_wait(&tep->te_srv_cv, &tep->te_srv_lock);
20010Sstevel@tonic-gate }
20020Sstevel@tonic-gate cv_signal(&tep->te_srv_cv);
20030Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock);
20040Sstevel@tonic-gate }
20050Sstevel@tonic-gate }
20060Sstevel@tonic-gate
20070Sstevel@tonic-gate /*
20080Sstevel@tonic-gate * Serialized write side processing of the STREAMS queue.
20090Sstevel@tonic-gate * May be called either from tl_wsrv() or from tl_close() in which case ser_mp
20100Sstevel@tonic-gate * is NULL.
20110Sstevel@tonic-gate */
20120Sstevel@tonic-gate static void
tl_wsrv_ser(mblk_t * ser_mp,tl_endpt_t * tep)20130Sstevel@tonic-gate tl_wsrv_ser(mblk_t *ser_mp, tl_endpt_t *tep)
20140Sstevel@tonic-gate {
20150Sstevel@tonic-gate mblk_t *mp;
20160Sstevel@tonic-gate queue_t *wq = tep->te_wq;
20170Sstevel@tonic-gate
20180Sstevel@tonic-gate ASSERT(wq != NULL);
20190Sstevel@tonic-gate while (!tep->te_nowsrv && (mp = getq(wq)) != NULL) {
20200Sstevel@tonic-gate tl_wput_common_ser(mp, tep);
20210Sstevel@tonic-gate }
20220Sstevel@tonic-gate
20230Sstevel@tonic-gate /*
20240Sstevel@tonic-gate * Wakeup service routine unless called from close.
20250Sstevel@tonic-gate * If ser_mp is specified, the caller is tl_wsrv().
20260Sstevel@tonic-gate * Otherwise, the caller is tl_close_ser(). Since tl_close_ser() doesn't
20270Sstevel@tonic-gate * call tl_serializer_enter() before calling tl_wsrv_ser(), there should
20280Sstevel@tonic-gate * be no matching tl_serializer_exit() in this case.
20290Sstevel@tonic-gate * Also, there is no need to wakeup anyone since tl_close_ser() is not
20300Sstevel@tonic-gate * waiting on te_srv_cv.
20310Sstevel@tonic-gate */
20320Sstevel@tonic-gate if (ser_mp != NULL) {
20330Sstevel@tonic-gate /*
20340Sstevel@tonic-gate * We are called from tl_wsrv.
20350Sstevel@tonic-gate */
20360Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock);
20370Sstevel@tonic-gate ASSERT(tep->te_wsrv_active);
20380Sstevel@tonic-gate tep->te_wsrv_active = B_FALSE;
20390Sstevel@tonic-gate cv_signal(&tep->te_srv_cv);
20400Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock);
20410Sstevel@tonic-gate tl_serializer_exit(tep);
20420Sstevel@tonic-gate }
20430Sstevel@tonic-gate }
20440Sstevel@tonic-gate
20450Sstevel@tonic-gate /*
20460Sstevel@tonic-gate * Called when the stream is backenabled. Enter serializer and qenable everyone
20470Sstevel@tonic-gate * flow controlled by tep.
20480Sstevel@tonic-gate *
20490Sstevel@tonic-gate * NOTE: The service routine should enter serializer synchronously. Otherwise it
20500Sstevel@tonic-gate * is possible that two instances of tl_rsrv will be running reusing the same
20510Sstevel@tonic-gate * rsrv mblk.
20520Sstevel@tonic-gate */
20530Sstevel@tonic-gate static void
tl_rsrv(queue_t * rq)20540Sstevel@tonic-gate tl_rsrv(queue_t *rq)
20550Sstevel@tonic-gate {
20560Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
20570Sstevel@tonic-gate
20580Sstevel@tonic-gate ASSERT(rq->q_first == NULL);
20590Sstevel@tonic-gate ASSERT(tep->te_rsrv_active == 0);
20600Sstevel@tonic-gate
20610Sstevel@tonic-gate tep->te_rsrv_active = B_TRUE;
20620Sstevel@tonic-gate tl_serializer_enter(tep, tl_rsrv_ser, &tep->te_rsrvmp);
20630Sstevel@tonic-gate /*
20640Sstevel@tonic-gate * Wait for serializer job to complete.
20650Sstevel@tonic-gate */
20660Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock);
20670Sstevel@tonic-gate while (tep->te_rsrv_active) {
20680Sstevel@tonic-gate cv_wait(&tep->te_srv_cv, &tep->te_srv_lock);
20690Sstevel@tonic-gate }
20700Sstevel@tonic-gate cv_signal(&tep->te_srv_cv);
20710Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock);
20720Sstevel@tonic-gate }
20730Sstevel@tonic-gate
20740Sstevel@tonic-gate /* ARGSUSED */
20750Sstevel@tonic-gate static void
tl_rsrv_ser(mblk_t * mp,tl_endpt_t * tep)20760Sstevel@tonic-gate tl_rsrv_ser(mblk_t *mp, tl_endpt_t *tep)
20770Sstevel@tonic-gate {
20780Sstevel@tonic-gate tl_endpt_t *peer_tep;
20790Sstevel@tonic-gate
20800Sstevel@tonic-gate if (IS_CLTS(tep) && tep->te_state == TS_IDLE) {
20810Sstevel@tonic-gate tl_cl_backenable(tep);
20820Sstevel@tonic-gate } else if (
20835240Snordmark IS_COTS(tep) &&
20845240Snordmark ((peer_tep = tep->te_conp) != NULL) &&
20855240Snordmark !peer_tep->te_closing &&
20865240Snordmark ((tep->te_state == TS_DATA_XFER) ||
20875240Snordmark (tep->te_state == TS_WIND_ORDREL)||
20885240Snordmark (tep->te_state == TS_WREQ_ORDREL))) {
20890Sstevel@tonic-gate TL_QENABLE(peer_tep);
20900Sstevel@tonic-gate }
20910Sstevel@tonic-gate
20920Sstevel@tonic-gate /*
20930Sstevel@tonic-gate * Wakeup read side service routine.
20940Sstevel@tonic-gate */
20950Sstevel@tonic-gate mutex_enter(&tep->te_srv_lock);
20960Sstevel@tonic-gate ASSERT(tep->te_rsrv_active);
20970Sstevel@tonic-gate tep->te_rsrv_active = B_FALSE;
20980Sstevel@tonic-gate cv_signal(&tep->te_srv_cv);
20990Sstevel@tonic-gate mutex_exit(&tep->te_srv_lock);
21000Sstevel@tonic-gate tl_serializer_exit(tep);
21010Sstevel@tonic-gate }
21020Sstevel@tonic-gate
21030Sstevel@tonic-gate /*
21040Sstevel@tonic-gate * process M_PROTO messages. Always called from serializer.
21050Sstevel@tonic-gate */
21060Sstevel@tonic-gate static void
tl_do_proto(mblk_t * mp,tl_endpt_t * tep)21070Sstevel@tonic-gate tl_do_proto(mblk_t *mp, tl_endpt_t *tep)
21080Sstevel@tonic-gate {
21090Sstevel@tonic-gate ssize_t msz = MBLKL(mp);
21100Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr;
21110Sstevel@tonic-gate
21120Sstevel@tonic-gate /* Message size was validated by tl_wput(). */
21130Sstevel@tonic-gate ASSERT(msz >= sizeof (prim->type));
21140Sstevel@tonic-gate
21150Sstevel@tonic-gate switch (prim->type) {
21160Sstevel@tonic-gate case T_UNBIND_REQ:
21170Sstevel@tonic-gate tl_unbind(mp, tep);
21180Sstevel@tonic-gate break;
21190Sstevel@tonic-gate
21200Sstevel@tonic-gate case T_ADDR_REQ:
21210Sstevel@tonic-gate tl_addr_req(mp, tep);
21220Sstevel@tonic-gate break;
21230Sstevel@tonic-gate
21240Sstevel@tonic-gate case O_T_CONN_RES:
21250Sstevel@tonic-gate case T_CONN_RES:
21260Sstevel@tonic-gate if (IS_CLTS(tep)) {
21270Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO);
21280Sstevel@tonic-gate break;
21290Sstevel@tonic-gate }
21300Sstevel@tonic-gate tl_conn_res(mp, tep);
21310Sstevel@tonic-gate break;
21320Sstevel@tonic-gate
21330Sstevel@tonic-gate case T_DISCON_REQ:
21340Sstevel@tonic-gate if (IS_CLTS(tep)) {
21350Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO);
21360Sstevel@tonic-gate break;
21370Sstevel@tonic-gate }
21380Sstevel@tonic-gate tl_discon_req(mp, tep);
21390Sstevel@tonic-gate break;
21400Sstevel@tonic-gate
21410Sstevel@tonic-gate case T_DATA_REQ:
21420Sstevel@tonic-gate if (IS_CLTS(tep)) {
21430Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO);
21440Sstevel@tonic-gate break;
21450Sstevel@tonic-gate }
21460Sstevel@tonic-gate tl_data(mp, tep);
21470Sstevel@tonic-gate break;
21480Sstevel@tonic-gate
21490Sstevel@tonic-gate case T_OPTDATA_REQ:
21500Sstevel@tonic-gate if (IS_CLTS(tep)) {
21510Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO);
21520Sstevel@tonic-gate break;
21530Sstevel@tonic-gate }
21540Sstevel@tonic-gate tl_data(mp, tep);
21550Sstevel@tonic-gate break;
21560Sstevel@tonic-gate
21570Sstevel@tonic-gate case T_EXDATA_REQ:
21580Sstevel@tonic-gate if (IS_CLTS(tep)) {
21590Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO);
21600Sstevel@tonic-gate break;
21610Sstevel@tonic-gate }
21620Sstevel@tonic-gate tl_exdata(mp, tep);
21630Sstevel@tonic-gate break;
21640Sstevel@tonic-gate
21650Sstevel@tonic-gate case T_ORDREL_REQ:
21660Sstevel@tonic-gate if (! IS_COTSORD(tep)) {
21670Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO);
21680Sstevel@tonic-gate break;
21690Sstevel@tonic-gate }
21700Sstevel@tonic-gate tl_ordrel(mp, tep);
21710Sstevel@tonic-gate break;
21720Sstevel@tonic-gate
21730Sstevel@tonic-gate case T_UNITDATA_REQ:
21740Sstevel@tonic-gate if (IS_COTS(tep)) {
21750Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO);
21760Sstevel@tonic-gate break;
21770Sstevel@tonic-gate }
21780Sstevel@tonic-gate tl_unitdata(mp, tep);
21790Sstevel@tonic-gate break;
21800Sstevel@tonic-gate
21810Sstevel@tonic-gate default:
21820Sstevel@tonic-gate tl_merror(tep->te_wq, mp, EPROTO);
21830Sstevel@tonic-gate break;
21840Sstevel@tonic-gate }
21850Sstevel@tonic-gate }
21860Sstevel@tonic-gate
21870Sstevel@tonic-gate /*
21880Sstevel@tonic-gate * Process ioctl from serializer.
21890Sstevel@tonic-gate * This is a wrapper around tl_do_ioctl().
21900Sstevel@tonic-gate */
21910Sstevel@tonic-gate static void
tl_do_ioctl_ser(mblk_t * mp,tl_endpt_t * tep)21920Sstevel@tonic-gate tl_do_ioctl_ser(mblk_t *mp, tl_endpt_t *tep)
21930Sstevel@tonic-gate {
21940Sstevel@tonic-gate if (! tep->te_closing)
21950Sstevel@tonic-gate tl_do_ioctl(mp, tep);
21960Sstevel@tonic-gate else
21970Sstevel@tonic-gate freemsg(mp);
21980Sstevel@tonic-gate
21990Sstevel@tonic-gate tl_serializer_exit(tep);
22000Sstevel@tonic-gate tl_refrele(tep);
22010Sstevel@tonic-gate }
22020Sstevel@tonic-gate
22030Sstevel@tonic-gate static void
tl_do_ioctl(mblk_t * mp,tl_endpt_t * tep)22040Sstevel@tonic-gate tl_do_ioctl(mblk_t *mp, tl_endpt_t *tep)
22050Sstevel@tonic-gate {
22060Sstevel@tonic-gate struct iocblk *iocbp = (struct iocblk *)mp->b_rptr;
22070Sstevel@tonic-gate int cmd = iocbp->ioc_cmd;
22080Sstevel@tonic-gate queue_t *wq = tep->te_wq;
22090Sstevel@tonic-gate int error;
22100Sstevel@tonic-gate int thisopt, otheropt;
22110Sstevel@tonic-gate
22120Sstevel@tonic-gate ASSERT((cmd == TL_IOC_CREDOPT) || (cmd == TL_IOC_UCREDOPT));
22130Sstevel@tonic-gate
22140Sstevel@tonic-gate switch (cmd) {
22150Sstevel@tonic-gate case TL_IOC_CREDOPT:
22160Sstevel@tonic-gate if (cmd == TL_IOC_CREDOPT) {
22170Sstevel@tonic-gate thisopt = TL_SETCRED;
22180Sstevel@tonic-gate otheropt = TL_SETUCRED;
22190Sstevel@tonic-gate } else {
22200Sstevel@tonic-gate /* FALLTHROUGH */
22210Sstevel@tonic-gate case TL_IOC_UCREDOPT:
22220Sstevel@tonic-gate thisopt = TL_SETUCRED;
22230Sstevel@tonic-gate otheropt = TL_SETCRED;
22240Sstevel@tonic-gate }
22250Sstevel@tonic-gate /*
22260Sstevel@tonic-gate * The credentials passing does not apply to sockets.
22270Sstevel@tonic-gate * Only one of the cred options can be set at a given time.
22280Sstevel@tonic-gate */
22290Sstevel@tonic-gate if (IS_SOCKET(tep) || (tep->te_flag & otheropt)) {
22300Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL);
22310Sstevel@tonic-gate return;
22320Sstevel@tonic-gate }
22330Sstevel@tonic-gate
22340Sstevel@tonic-gate /*
22350Sstevel@tonic-gate * Turn on generation of credential options for
22360Sstevel@tonic-gate * T_conn_req, T_conn_con, T_unidata_ind.
22370Sstevel@tonic-gate */
22380Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint32_t));
22390Sstevel@tonic-gate if (error != 0) {
22400Sstevel@tonic-gate miocnak(wq, mp, 0, error);
22410Sstevel@tonic-gate return;
22420Sstevel@tonic-gate }
22430Sstevel@tonic-gate if (!IS_P2ALIGNED(mp->b_cont->b_rptr, sizeof (uint32_t))) {
22440Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL);
22450Sstevel@tonic-gate return;
22460Sstevel@tonic-gate }
22470Sstevel@tonic-gate
22480Sstevel@tonic-gate if (*(uint32_t *)mp->b_cont->b_rptr)
22490Sstevel@tonic-gate tep->te_flag |= thisopt;
22500Sstevel@tonic-gate else
22510Sstevel@tonic-gate tep->te_flag &= ~thisopt;
22520Sstevel@tonic-gate
22530Sstevel@tonic-gate miocack(wq, mp, 0, 0);
22540Sstevel@tonic-gate break;
22550Sstevel@tonic-gate
22560Sstevel@tonic-gate default:
22570Sstevel@tonic-gate /* Should not be here */
22580Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL);
22590Sstevel@tonic-gate break;
22600Sstevel@tonic-gate }
22610Sstevel@tonic-gate }
22620Sstevel@tonic-gate
22630Sstevel@tonic-gate
22640Sstevel@tonic-gate /*
22650Sstevel@tonic-gate * send T_ERROR_ACK
22660Sstevel@tonic-gate * Note: assumes enough memory or caller passed big enough mp
22670Sstevel@tonic-gate * - no recovery from allocb failures
22680Sstevel@tonic-gate */
22690Sstevel@tonic-gate
22700Sstevel@tonic-gate static void
tl_error_ack(queue_t * wq,mblk_t * mp,t_scalar_t tli_err,t_scalar_t unix_err,t_scalar_t type)22710Sstevel@tonic-gate tl_error_ack(queue_t *wq, mblk_t *mp, t_scalar_t tli_err,
22720Sstevel@tonic-gate t_scalar_t unix_err, t_scalar_t type)
22730Sstevel@tonic-gate {
22740Sstevel@tonic-gate struct T_error_ack *err_ack;
22750Sstevel@tonic-gate mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_error_ack),
22760Sstevel@tonic-gate M_PCPROTO, T_ERROR_ACK);
22770Sstevel@tonic-gate
22780Sstevel@tonic-gate if (ackmp == NULL) {
22790Sstevel@tonic-gate (void) (STRLOG(TL_ID, 0, 1, SL_TRACE|SL_ERROR,
22805240Snordmark "tl_error_ack:out of mblk memory"));
22810Sstevel@tonic-gate tl_merror(wq, NULL, ENOSR);
22820Sstevel@tonic-gate return;
22830Sstevel@tonic-gate }
22840Sstevel@tonic-gate err_ack = (struct T_error_ack *)ackmp->b_rptr;
22850Sstevel@tonic-gate err_ack->ERROR_prim = type;
22860Sstevel@tonic-gate err_ack->TLI_error = tli_err;
22870Sstevel@tonic-gate err_ack->UNIX_error = unix_err;
22880Sstevel@tonic-gate
22890Sstevel@tonic-gate /*
22900Sstevel@tonic-gate * send error ack message
22910Sstevel@tonic-gate */
22920Sstevel@tonic-gate qreply(wq, ackmp);
22930Sstevel@tonic-gate }
22940Sstevel@tonic-gate
22950Sstevel@tonic-gate
22960Sstevel@tonic-gate
22970Sstevel@tonic-gate /*
22980Sstevel@tonic-gate * send T_OK_ACK
22990Sstevel@tonic-gate * Note: assumes enough memory or caller passed big enough mp
23000Sstevel@tonic-gate * - no recovery from allocb failures
23010Sstevel@tonic-gate */
23020Sstevel@tonic-gate static void
tl_ok_ack(queue_t * wq,mblk_t * mp,t_scalar_t type)23030Sstevel@tonic-gate tl_ok_ack(queue_t *wq, mblk_t *mp, t_scalar_t type)
23040Sstevel@tonic-gate {
23050Sstevel@tonic-gate struct T_ok_ack *ok_ack;
23060Sstevel@tonic-gate mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_ok_ack),
23070Sstevel@tonic-gate M_PCPROTO, T_OK_ACK);
23080Sstevel@tonic-gate
23090Sstevel@tonic-gate if (ackmp == NULL) {
23100Sstevel@tonic-gate tl_merror(wq, NULL, ENOMEM);
23110Sstevel@tonic-gate return;
23120Sstevel@tonic-gate }
23130Sstevel@tonic-gate
23140Sstevel@tonic-gate ok_ack = (struct T_ok_ack *)ackmp->b_rptr;
23150Sstevel@tonic-gate ok_ack->CORRECT_prim = type;
23160Sstevel@tonic-gate
23170Sstevel@tonic-gate (void) qreply(wq, ackmp);
23180Sstevel@tonic-gate }
23190Sstevel@tonic-gate
23200Sstevel@tonic-gate /*
23210Sstevel@tonic-gate * Process T_BIND_REQ and O_T_BIND_REQ from serializer.
23220Sstevel@tonic-gate * This is a wrapper around tl_bind().
23230Sstevel@tonic-gate */
23240Sstevel@tonic-gate static void
tl_bind_ser(mblk_t * mp,tl_endpt_t * tep)23250Sstevel@tonic-gate tl_bind_ser(mblk_t *mp, tl_endpt_t *tep)
23260Sstevel@tonic-gate {
23270Sstevel@tonic-gate if (! tep->te_closing)
23280Sstevel@tonic-gate tl_bind(mp, tep);
23290Sstevel@tonic-gate else
23300Sstevel@tonic-gate freemsg(mp);
23310Sstevel@tonic-gate
23320Sstevel@tonic-gate tl_serializer_exit(tep);
23330Sstevel@tonic-gate tl_refrele(tep);
23340Sstevel@tonic-gate }
23350Sstevel@tonic-gate
23360Sstevel@tonic-gate /*
23370Sstevel@tonic-gate * Process T_BIND_REQ and O_T_BIND_REQ TPI requests.
23380Sstevel@tonic-gate * Assumes that the endpoint is in the unbound.
23390Sstevel@tonic-gate */
23400Sstevel@tonic-gate static void
tl_bind(mblk_t * mp,tl_endpt_t * tep)23410Sstevel@tonic-gate tl_bind(mblk_t *mp, tl_endpt_t *tep)
23420Sstevel@tonic-gate {
23430Sstevel@tonic-gate queue_t *wq = tep->te_wq;
23440Sstevel@tonic-gate struct T_bind_ack *b_ack;
23450Sstevel@tonic-gate struct T_bind_req *bind = (struct T_bind_req *)mp->b_rptr;
23460Sstevel@tonic-gate mblk_t *ackmp, *bamp;
23470Sstevel@tonic-gate soux_addr_t ux_addr;
23480Sstevel@tonic-gate t_uscalar_t qlen = 0;
23490Sstevel@tonic-gate t_scalar_t alen, aoff;
23500Sstevel@tonic-gate tl_addr_t addr_req;
23510Sstevel@tonic-gate void *addr_startp;
23520Sstevel@tonic-gate ssize_t msz = MBLKL(mp), basize;
23530Sstevel@tonic-gate t_scalar_t tli_err = 0, unix_err = 0;
23540Sstevel@tonic-gate t_scalar_t save_prim_type = bind->PRIM_type;
23550Sstevel@tonic-gate t_scalar_t save_state = tep->te_state;
23560Sstevel@tonic-gate
23570Sstevel@tonic-gate if (tep->te_state != TS_UNBND) {
23580Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
23595240Snordmark SL_TRACE|SL_ERROR,
23605240Snordmark "tl_wput:bind_request:out of state, state=%d",
23615240Snordmark tep->te_state));
23620Sstevel@tonic-gate tli_err = TOUTSTATE;
23630Sstevel@tonic-gate goto error;
23640Sstevel@tonic-gate }
23650Sstevel@tonic-gate
23660Sstevel@tonic-gate if (msz < sizeof (struct T_bind_req)) {
23670Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL;
23680Sstevel@tonic-gate goto error;
23690Sstevel@tonic-gate }
23700Sstevel@tonic-gate
23710Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_BIND_REQ, tep->te_state);
23720Sstevel@tonic-gate
23730Sstevel@tonic-gate ASSERT((bind->PRIM_type == O_T_BIND_REQ) ||
23740Sstevel@tonic-gate (bind->PRIM_type == T_BIND_REQ));
23750Sstevel@tonic-gate
23760Sstevel@tonic-gate alen = bind->ADDR_length;
23770Sstevel@tonic-gate aoff = bind->ADDR_offset;
23780Sstevel@tonic-gate
23790Sstevel@tonic-gate /* negotiate max conn req pending */
23800Sstevel@tonic-gate if (IS_COTS(tep)) {
23810Sstevel@tonic-gate qlen = bind->CONIND_number;
23822486Sakolb if (qlen > tl_maxqlen)
23832486Sakolb qlen = tl_maxqlen;
23840Sstevel@tonic-gate }
23850Sstevel@tonic-gate
23860Sstevel@tonic-gate /*
23870Sstevel@tonic-gate * Reserve hash handle. It can only be NULL if the endpoint is unbound
23880Sstevel@tonic-gate * and bound again.
23890Sstevel@tonic-gate */
23900Sstevel@tonic-gate if ((tep->te_hash_hndl == NULL) &&
23910Sstevel@tonic-gate ((tep->te_flag & TL_ADDRHASHED) == 0) &&
23920Sstevel@tonic-gate mod_hash_reserve_nosleep(tep->te_addrhash,
23935240Snordmark &tep->te_hash_hndl) != 0) {
23940Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR;
23950Sstevel@tonic-gate goto error;
23960Sstevel@tonic-gate }
23970Sstevel@tonic-gate
23980Sstevel@tonic-gate /*
23990Sstevel@tonic-gate * Verify address correctness.
24000Sstevel@tonic-gate */
24010Sstevel@tonic-gate if (IS_SOCKET(tep)) {
24020Sstevel@tonic-gate ASSERT(bind->PRIM_type == O_T_BIND_REQ);
24030Sstevel@tonic-gate
24040Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) ||
24050Sstevel@tonic-gate (aoff < 0) ||
24060Sstevel@tonic-gate (aoff + alen > msz)) {
24070Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
24085240Snordmark 1, SL_TRACE|SL_ERROR,
24095240Snordmark "tl_bind: invalid socket addr"));
24100Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
24110Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL;
24120Sstevel@tonic-gate goto error;
24130Sstevel@tonic-gate }
24140Sstevel@tonic-gate /* Copy address from message to local buffer. */
24150Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, sizeof (ux_addr));
24160Sstevel@tonic-gate /*
24170Sstevel@tonic-gate * Check that we got correct address from sockets
24180Sstevel@tonic-gate */
24190Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_EXPLICIT) &&
24200Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_IMPLICIT)) {
24210Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
24225240Snordmark 1, SL_TRACE|SL_ERROR,
24235240Snordmark "tl_bind: invalid socket magic"));
24240Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
24250Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL;
24260Sstevel@tonic-gate goto error;
24270Sstevel@tonic-gate }
24280Sstevel@tonic-gate if ((ux_addr.soua_magic == SOU_MAGIC_IMPLICIT) &&
24290Sstevel@tonic-gate (ux_addr.soua_vp != NULL)) {
24300Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
24315240Snordmark 1, SL_TRACE|SL_ERROR,
24325240Snordmark "tl_bind: implicit addr non-empty"));
24330Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
24340Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL;
24350Sstevel@tonic-gate goto error;
24360Sstevel@tonic-gate }
24370Sstevel@tonic-gate if ((ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) &&
24380Sstevel@tonic-gate (ux_addr.soua_vp == NULL)) {
24390Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
24405240Snordmark 1, SL_TRACE|SL_ERROR,
24415240Snordmark "tl_bind: explicit addr empty"));
24420Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
24430Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL;
24440Sstevel@tonic-gate goto error;
24450Sstevel@tonic-gate }
24460Sstevel@tonic-gate } else {
24470Sstevel@tonic-gate if ((alen > 0) && ((aoff < 0) ||
24485240Snordmark ((ssize_t)(aoff + alen) > msz) ||
24495240Snordmark ((aoff + alen) < 0))) {
24500Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
24515240Snordmark 1, SL_TRACE|SL_ERROR,
24525240Snordmark "tl_bind: invalid message"));
24530Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
24540Sstevel@tonic-gate tli_err = TSYSERR; unix_err = EINVAL;
24550Sstevel@tonic-gate goto error;
24560Sstevel@tonic-gate }
24570Sstevel@tonic-gate if ((alen < 0) || (alen > (msz - sizeof (struct T_bind_req)))) {
24580Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
24595240Snordmark 1, SL_TRACE|SL_ERROR,
24605240Snordmark "tl_bind: bad addr in message"));
24610Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
24620Sstevel@tonic-gate tli_err = TBADADDR;
24630Sstevel@tonic-gate goto error;
24640Sstevel@tonic-gate }
24650Sstevel@tonic-gate #ifdef DEBUG
24660Sstevel@tonic-gate /*
24670Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps.
24680Sstevel@tonic-gate * if (! assertion)
24690Sstevel@tonic-gate * log warning;
24700Sstevel@tonic-gate */
24710Sstevel@tonic-gate if (! ((alen == 0 && aoff == 0) ||
24720Sstevel@tonic-gate (aoff >= (t_scalar_t)(sizeof (struct T_bind_req))))) {
24730Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
24740Sstevel@tonic-gate 3, SL_TRACE|SL_ERROR,
24750Sstevel@tonic-gate "tl_bind: addr overlaps TPI message"));
24760Sstevel@tonic-gate }
24770Sstevel@tonic-gate #endif
24780Sstevel@tonic-gate }
24790Sstevel@tonic-gate
24800Sstevel@tonic-gate /*
24810Sstevel@tonic-gate * Bind the address provided or allocate one if requested.
24820Sstevel@tonic-gate * Allow rebinds with a new qlen value.
24830Sstevel@tonic-gate */
24840Sstevel@tonic-gate if (IS_SOCKET(tep)) {
24850Sstevel@tonic-gate /*
24860Sstevel@tonic-gate * For anonymous requests the te_ap is already set up properly
24870Sstevel@tonic-gate * so use minor number as an address.
24880Sstevel@tonic-gate * For explicit requests need to check whether the address is
24890Sstevel@tonic-gate * already in use.
24900Sstevel@tonic-gate */
24910Sstevel@tonic-gate if (ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) {
24920Sstevel@tonic-gate int rc;
24930Sstevel@tonic-gate
24940Sstevel@tonic-gate if (tep->te_flag & TL_ADDRHASHED) {
24950Sstevel@tonic-gate ASSERT(IS_COTS(tep) && tep->te_qlen == 0);
24960Sstevel@tonic-gate if (tep->te_vp == ux_addr.soua_vp)
24970Sstevel@tonic-gate goto skip_addr_bind;
24980Sstevel@tonic-gate else /* Rebind to a new address. */
24990Sstevel@tonic-gate tl_addr_unbind(tep);
25000Sstevel@tonic-gate }
25010Sstevel@tonic-gate /*
25020Sstevel@tonic-gate * Insert address in the hash if it is not already
25030Sstevel@tonic-gate * there. Since we use preallocated handle, the insert
25040Sstevel@tonic-gate * can fail only if the key is already present.
25050Sstevel@tonic-gate */
25060Sstevel@tonic-gate rc = mod_hash_insert_reserve(tep->te_addrhash,
25070Sstevel@tonic-gate (mod_hash_key_t)ux_addr.soua_vp,
25080Sstevel@tonic-gate (mod_hash_val_t)tep, tep->te_hash_hndl);
25090Sstevel@tonic-gate
25100Sstevel@tonic-gate if (rc != 0) {
25110Sstevel@tonic-gate ASSERT(rc == MH_ERR_DUPLICATE);
25120Sstevel@tonic-gate /*
25130Sstevel@tonic-gate * Violate O_T_BIND_REQ semantics and fail with
25140Sstevel@tonic-gate * TADDRBUSY - sockets will not use any address
25150Sstevel@tonic-gate * other than supplied one for explicit binds.
25160Sstevel@tonic-gate */
25170Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
25185240Snordmark SL_TRACE|SL_ERROR,
25195240Snordmark "tl_bind:requested addr %p is busy",
25205240Snordmark ux_addr.soua_vp));
25210Sstevel@tonic-gate tli_err = TADDRBUSY; unix_err = 0;
25220Sstevel@tonic-gate goto error;
25230Sstevel@tonic-gate }
25240Sstevel@tonic-gate tep->te_uxaddr = ux_addr;
25250Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED;
25260Sstevel@tonic-gate tep->te_hash_hndl = NULL;
25270Sstevel@tonic-gate }
25280Sstevel@tonic-gate } else if (alen == 0) {
25290Sstevel@tonic-gate /*
25300Sstevel@tonic-gate * assign any free address
25310Sstevel@tonic-gate */
25320Sstevel@tonic-gate if (! tl_get_any_addr(tep, NULL)) {
25330Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
25345240Snordmark 1, SL_TRACE|SL_ERROR,
25355240Snordmark "tl_bind:failed to get buffer for any "
25365240Snordmark "address"));
25370Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR;
25380Sstevel@tonic-gate goto error;
25390Sstevel@tonic-gate }
25400Sstevel@tonic-gate } else {
25410Sstevel@tonic-gate addr_req.ta_alen = alen;
25420Sstevel@tonic-gate addr_req.ta_abuf = (mp->b_rptr + aoff);
25430Sstevel@tonic-gate addr_req.ta_zoneid = tep->te_zoneid;
25440Sstevel@tonic-gate
25450Sstevel@tonic-gate tep->te_abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP);
25460Sstevel@tonic-gate if (tep->te_abuf == NULL) {
25470Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOSR;
25480Sstevel@tonic-gate goto error;
25490Sstevel@tonic-gate }
25500Sstevel@tonic-gate bcopy(addr_req.ta_abuf, tep->te_abuf, addr_req.ta_alen);
25510Sstevel@tonic-gate tep->te_alen = alen;
25520Sstevel@tonic-gate
25530Sstevel@tonic-gate if (mod_hash_insert_reserve(tep->te_addrhash,
25545240Snordmark (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep,
25555240Snordmark tep->te_hash_hndl) != 0) {
25560Sstevel@tonic-gate if (save_prim_type == T_BIND_REQ) {
25570Sstevel@tonic-gate /*
25580Sstevel@tonic-gate * The bind semantics for this primitive
25590Sstevel@tonic-gate * require a failure if the exact address
25600Sstevel@tonic-gate * requested is busy
25610Sstevel@tonic-gate */
25620Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
25635240Snordmark SL_TRACE|SL_ERROR,
25645240Snordmark "tl_bind:requested addr is busy"));
25650Sstevel@tonic-gate tli_err = TADDRBUSY; unix_err = 0;
25660Sstevel@tonic-gate goto error;
25670Sstevel@tonic-gate }
25680Sstevel@tonic-gate
25690Sstevel@tonic-gate /*
25700Sstevel@tonic-gate * O_T_BIND_REQ semantics say if address if requested
25710Sstevel@tonic-gate * address is busy, bind to any available free address
25720Sstevel@tonic-gate */
25730Sstevel@tonic-gate if (! tl_get_any_addr(tep, &addr_req)) {
25740Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
25755240Snordmark SL_TRACE|SL_ERROR,
25765240Snordmark "tl_bind:unable to get any addr buf"));
25770Sstevel@tonic-gate tli_err = TSYSERR; unix_err = ENOMEM;
25780Sstevel@tonic-gate goto error;
25790Sstevel@tonic-gate }
25800Sstevel@tonic-gate } else {
25810Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED;
25820Sstevel@tonic-gate tep->te_hash_hndl = NULL;
25830Sstevel@tonic-gate }
25840Sstevel@tonic-gate }
25850Sstevel@tonic-gate
25860Sstevel@tonic-gate ASSERT(tep->te_alen >= 0);
25870Sstevel@tonic-gate
25880Sstevel@tonic-gate skip_addr_bind:
25890Sstevel@tonic-gate /*
25900Sstevel@tonic-gate * prepare T_BIND_ACK TPI message
25910Sstevel@tonic-gate */
25920Sstevel@tonic-gate basize = sizeof (struct T_bind_ack) + tep->te_alen;
25930Sstevel@tonic-gate bamp = reallocb(mp, basize, 0);
25940Sstevel@tonic-gate if (bamp == NULL) {
25950Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
25965240Snordmark "tl_wput:tl_bind: allocb failed"));
25970Sstevel@tonic-gate /*
25980Sstevel@tonic-gate * roll back state changes
25990Sstevel@tonic-gate */
26000Sstevel@tonic-gate tl_addr_unbind(tep);
26010Sstevel@tonic-gate tep->te_state = TS_UNBND;
26020Sstevel@tonic-gate tl_memrecover(wq, mp, basize);
26030Sstevel@tonic-gate return;
26040Sstevel@tonic-gate }
26050Sstevel@tonic-gate
26060Sstevel@tonic-gate DB_TYPE(bamp) = M_PCPROTO;
26070Sstevel@tonic-gate bamp->b_wptr = bamp->b_rptr + basize;
26080Sstevel@tonic-gate b_ack = (struct T_bind_ack *)bamp->b_rptr;
26090Sstevel@tonic-gate b_ack->PRIM_type = T_BIND_ACK;
26100Sstevel@tonic-gate b_ack->CONIND_number = qlen;
26110Sstevel@tonic-gate b_ack->ADDR_length = tep->te_alen;
26120Sstevel@tonic-gate b_ack->ADDR_offset = (t_scalar_t)sizeof (struct T_bind_ack);
26130Sstevel@tonic-gate addr_startp = bamp->b_rptr + b_ack->ADDR_offset;
26140Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen);
26150Sstevel@tonic-gate
26160Sstevel@tonic-gate if (IS_COTS(tep)) {
26170Sstevel@tonic-gate tep->te_qlen = qlen;
26180Sstevel@tonic-gate if (qlen > 0)
26190Sstevel@tonic-gate tep->te_flag |= TL_LISTENER;
26200Sstevel@tonic-gate }
26210Sstevel@tonic-gate
26220Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_BIND_ACK, tep->te_state);
26230Sstevel@tonic-gate /*
26240Sstevel@tonic-gate * send T_BIND_ACK message
26250Sstevel@tonic-gate */
26260Sstevel@tonic-gate (void) qreply(wq, bamp);
26270Sstevel@tonic-gate return;
26280Sstevel@tonic-gate
26290Sstevel@tonic-gate error:
26300Sstevel@tonic-gate ackmp = reallocb(mp, sizeof (struct T_error_ack), 0);
26310Sstevel@tonic-gate if (ackmp == NULL) {
26320Sstevel@tonic-gate /*
26330Sstevel@tonic-gate * roll back state changes
26340Sstevel@tonic-gate */
26350Sstevel@tonic-gate tep->te_state = save_state;
26360Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack));
26370Sstevel@tonic-gate return;
26380Sstevel@tonic-gate }
26390Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
26400Sstevel@tonic-gate tl_error_ack(wq, ackmp, tli_err, unix_err, save_prim_type);
26410Sstevel@tonic-gate }
26420Sstevel@tonic-gate
26430Sstevel@tonic-gate /*
26440Sstevel@tonic-gate * Process T_UNBIND_REQ.
26450Sstevel@tonic-gate * Called from serializer.
26460Sstevel@tonic-gate */
26470Sstevel@tonic-gate static void
tl_unbind(mblk_t * mp,tl_endpt_t * tep)26480Sstevel@tonic-gate tl_unbind(mblk_t *mp, tl_endpt_t *tep)
26490Sstevel@tonic-gate {
26500Sstevel@tonic-gate queue_t *wq;
26510Sstevel@tonic-gate mblk_t *ackmp;
26520Sstevel@tonic-gate
26530Sstevel@tonic-gate if (tep->te_closing) {
26540Sstevel@tonic-gate freemsg(mp);
26550Sstevel@tonic-gate return;
26560Sstevel@tonic-gate }
26570Sstevel@tonic-gate
26580Sstevel@tonic-gate wq = tep->te_wq;
26590Sstevel@tonic-gate
26600Sstevel@tonic-gate /*
26610Sstevel@tonic-gate * preallocate memory for max of T_OK_ACK and T_ERROR_ACK
26620Sstevel@tonic-gate * ==> allocate for T_ERROR_ACK (known max)
26630Sstevel@tonic-gate */
26640Sstevel@tonic-gate if ((ackmp = reallocb(mp, sizeof (struct T_error_ack), 0)) == NULL) {
26650Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack));
26660Sstevel@tonic-gate return;
26670Sstevel@tonic-gate }
26680Sstevel@tonic-gate /*
26690Sstevel@tonic-gate * memory resources committed
26700Sstevel@tonic-gate * Note: no message validation. T_UNBIND_REQ message is
26710Sstevel@tonic-gate * same size as PRIM_type field so already verified earlier.
26720Sstevel@tonic-gate */
26730Sstevel@tonic-gate
26740Sstevel@tonic-gate /*
26750Sstevel@tonic-gate * validate state
26760Sstevel@tonic-gate */
26770Sstevel@tonic-gate if (tep->te_state != TS_IDLE) {
26780Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
26795240Snordmark SL_TRACE|SL_ERROR,
26805240Snordmark "tl_wput:T_UNBIND_REQ:out of state, state=%d",
26815240Snordmark tep->te_state));
26820Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_UNBIND_REQ);
26830Sstevel@tonic-gate return;
26840Sstevel@tonic-gate }
26850Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_UNBIND_REQ, tep->te_state);
26860Sstevel@tonic-gate
26870Sstevel@tonic-gate /*
26880Sstevel@tonic-gate * TPI says on T_UNBIND_REQ:
26890Sstevel@tonic-gate * send up a M_FLUSH to flush both
26900Sstevel@tonic-gate * read and write queues
26910Sstevel@tonic-gate */
26920Sstevel@tonic-gate (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
26930Sstevel@tonic-gate
26940Sstevel@tonic-gate if (! IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 ||
26950Sstevel@tonic-gate tep->te_magic != SOU_MAGIC_EXPLICIT) {
26960Sstevel@tonic-gate
26970Sstevel@tonic-gate /*
26980Sstevel@tonic-gate * Sockets use bind with qlen==0 followed by bind() to
26990Sstevel@tonic-gate * the same address with qlen > 0 for listeners.
27000Sstevel@tonic-gate * We allow rebind with a new qlen value.
27010Sstevel@tonic-gate */
27020Sstevel@tonic-gate tl_addr_unbind(tep);
27030Sstevel@tonic-gate }
27040Sstevel@tonic-gate
27050Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
27060Sstevel@tonic-gate /*
27070Sstevel@tonic-gate * send T_OK_ACK
27080Sstevel@tonic-gate */
27090Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_UNBIND_REQ);
27100Sstevel@tonic-gate }
27110Sstevel@tonic-gate
27120Sstevel@tonic-gate
27130Sstevel@tonic-gate /*
27140Sstevel@tonic-gate * Option management code from drv/ip is used here
27150Sstevel@tonic-gate * Note: TL_PROT_LEVEL/TL_IOC_CREDOPT option is not part of tl_opt_arr
27160Sstevel@tonic-gate * database of options. So optcom_req() will fail T_SVR4_OPTMGMT_REQ.
27170Sstevel@tonic-gate * However, that is what we want as that option is 'unorthodox'
27180Sstevel@tonic-gate * and only valid in T_CONN_IND, T_CONN_CON and T_UNITDATA_IND
27190Sstevel@tonic-gate * and not in T_SVR4_OPTMGMT_REQ/ACK
27200Sstevel@tonic-gate * Note2: use of optcom_req means this routine is an exception to
27210Sstevel@tonic-gate * recovery from allocb() failures.
27220Sstevel@tonic-gate */
27230Sstevel@tonic-gate
27240Sstevel@tonic-gate static void
tl_optmgmt(queue_t * wq,mblk_t * mp)27250Sstevel@tonic-gate tl_optmgmt(queue_t *wq, mblk_t *mp)
27260Sstevel@tonic-gate {
27270Sstevel@tonic-gate tl_endpt_t *tep;
27280Sstevel@tonic-gate mblk_t *ackmp;
27290Sstevel@tonic-gate union T_primitives *prim;
27308778SErik.Nordmark@Sun.COM cred_t *cr;
27310Sstevel@tonic-gate
27320Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr;
27330Sstevel@tonic-gate prim = (union T_primitives *)mp->b_rptr;
27340Sstevel@tonic-gate
27358778SErik.Nordmark@Sun.COM /*
27368778SErik.Nordmark@Sun.COM * All Solaris components should pass a db_credp
27378778SErik.Nordmark@Sun.COM * for this TPI message, hence we ASSERT.
27388778SErik.Nordmark@Sun.COM * But in case there is some other M_PROTO that looks
27398778SErik.Nordmark@Sun.COM * like a TPI message sent by some other kernel
27408778SErik.Nordmark@Sun.COM * component, we check and return an error.
27418778SErik.Nordmark@Sun.COM */
27428778SErik.Nordmark@Sun.COM cr = msg_getcred(mp, NULL);
27438778SErik.Nordmark@Sun.COM ASSERT(cr != NULL);
27448778SErik.Nordmark@Sun.COM if (cr == NULL) {
27458778SErik.Nordmark@Sun.COM tl_error_ack(wq, mp, TSYSERR, EINVAL, prim->type);
27468778SErik.Nordmark@Sun.COM return;
27478778SErik.Nordmark@Sun.COM }
27488778SErik.Nordmark@Sun.COM
27490Sstevel@tonic-gate /* all states OK for AF_UNIX options ? */
27500Sstevel@tonic-gate if (!IS_SOCKET(tep) && tep->te_state != TS_IDLE &&
27510Sstevel@tonic-gate prim->type == T_SVR4_OPTMGMT_REQ) {
27520Sstevel@tonic-gate /*
27530Sstevel@tonic-gate * Broken TLI semantics that options can only be managed
27540Sstevel@tonic-gate * in TS_IDLE state. Needed for Sparc ABI test suite that
27550Sstevel@tonic-gate * tests this TLI (mis)feature using this device driver.
27560Sstevel@tonic-gate */
27570Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
27585240Snordmark SL_TRACE|SL_ERROR,
27595240Snordmark "tl_wput:T_SVR4_OPTMGMT_REQ:out of state, state=%d",
27605240Snordmark tep->te_state));
27610Sstevel@tonic-gate /*
27620Sstevel@tonic-gate * preallocate memory for T_ERROR_ACK
27630Sstevel@tonic-gate */
27640Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
27650Sstevel@tonic-gate if (! ackmp) {
27660Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack));
27670Sstevel@tonic-gate return;
27680Sstevel@tonic-gate }
27690Sstevel@tonic-gate
27700Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_SVR4_OPTMGMT_REQ);
27710Sstevel@tonic-gate freemsg(mp);
27720Sstevel@tonic-gate return;
27730Sstevel@tonic-gate }
27740Sstevel@tonic-gate
27750Sstevel@tonic-gate /*
27760Sstevel@tonic-gate * call common option management routine from drv/ip
27770Sstevel@tonic-gate */
27780Sstevel@tonic-gate if (prim->type == T_SVR4_OPTMGMT_REQ) {
277911042SErik.Nordmark@Sun.COM svr4_optcom_req(wq, mp, cr, &tl_opt_obj);
27800Sstevel@tonic-gate } else {
27810Sstevel@tonic-gate ASSERT(prim->type == T_OPTMGMT_REQ);
278211042SErik.Nordmark@Sun.COM tpi_optcom_req(wq, mp, cr, &tl_opt_obj);
27830Sstevel@tonic-gate }
27840Sstevel@tonic-gate }
27850Sstevel@tonic-gate
27860Sstevel@tonic-gate /*
27870Sstevel@tonic-gate * Handle T_conn_req - the driver part of accept().
27880Sstevel@tonic-gate * If TL_SET[U]CRED generate the credentials options.
27890Sstevel@tonic-gate * If this is a socket pass through options unmodified.
27900Sstevel@tonic-gate * For sockets generate the T_CONN_CON here instead of
27910Sstevel@tonic-gate * waiting for the T_CONN_RES.
27920Sstevel@tonic-gate */
27930Sstevel@tonic-gate static void
tl_conn_req(queue_t * wq,mblk_t * mp)27940Sstevel@tonic-gate tl_conn_req(queue_t *wq, mblk_t *mp)
27950Sstevel@tonic-gate {
27960Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
27970Sstevel@tonic-gate struct T_conn_req *creq = (struct T_conn_req *)mp->b_rptr;
27980Sstevel@tonic-gate ssize_t msz = MBLKL(mp);
27990Sstevel@tonic-gate t_scalar_t alen, aoff, olen, ooff, err = 0;
28000Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL;
28010Sstevel@tonic-gate mblk_t *ackmp;
28020Sstevel@tonic-gate mblk_t *dimp;
28030Sstevel@tonic-gate struct T_discon_ind *di;
28040Sstevel@tonic-gate soux_addr_t ux_addr;
28050Sstevel@tonic-gate tl_addr_t dst;
28060Sstevel@tonic-gate
28070Sstevel@tonic-gate ASSERT(IS_COTS(tep));
28080Sstevel@tonic-gate
28090Sstevel@tonic-gate if (tep->te_closing) {
28100Sstevel@tonic-gate freemsg(mp);
28110Sstevel@tonic-gate return;
28120Sstevel@tonic-gate }
28130Sstevel@tonic-gate
28140Sstevel@tonic-gate /*
28150Sstevel@tonic-gate * preallocate memory for:
28160Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK
28170Sstevel@tonic-gate * ==> known max T_ERROR_ACK
28180Sstevel@tonic-gate * 2. max of T_DISCON_IND and T_CONN_IND
28190Sstevel@tonic-gate */
28200Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
28210Sstevel@tonic-gate if (! ackmp) {
28220Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack));
28230Sstevel@tonic-gate return;
28240Sstevel@tonic-gate }
28250Sstevel@tonic-gate /*
28260Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now
28270Sstevel@tonic-gate * will be committed for T_DISCON_IND/T_CONN_IND later
28280Sstevel@tonic-gate */
28290Sstevel@tonic-gate
28300Sstevel@tonic-gate if (tep->te_state != TS_IDLE) {
28310Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
28325240Snordmark SL_TRACE|SL_ERROR,
28335240Snordmark "tl_wput:T_CONN_REQ:out of state, state=%d",
28345240Snordmark tep->te_state));
28350Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
28360Sstevel@tonic-gate freemsg(mp);
28370Sstevel@tonic-gate return;
28380Sstevel@tonic-gate }
28390Sstevel@tonic-gate
28400Sstevel@tonic-gate /*
28410Sstevel@tonic-gate * validate the message
28420Sstevel@tonic-gate * Note: dereference fields in struct inside message only
28430Sstevel@tonic-gate * after validating the message length.
28440Sstevel@tonic-gate */
28450Sstevel@tonic-gate if (msz < sizeof (struct T_conn_req)) {
28460Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
28475240Snordmark "tl_conn_req:invalid message length"));
28480Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
28490Sstevel@tonic-gate freemsg(mp);
28500Sstevel@tonic-gate return;
28510Sstevel@tonic-gate }
28520Sstevel@tonic-gate alen = creq->DEST_length;
28530Sstevel@tonic-gate aoff = creq->DEST_offset;
28540Sstevel@tonic-gate olen = creq->OPT_length;
28550Sstevel@tonic-gate ooff = creq->OPT_offset;
28560Sstevel@tonic-gate if (olen == 0)
28570Sstevel@tonic-gate ooff = 0;
28580Sstevel@tonic-gate
28590Sstevel@tonic-gate if (IS_SOCKET(tep)) {
28600Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) ||
28610Sstevel@tonic-gate (aoff < 0) ||
28620Sstevel@tonic-gate (aoff + alen > msz) ||
28630Sstevel@tonic-gate (alen > msz - sizeof (struct T_conn_req))) {
28640Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
28650Sstevel@tonic-gate 1, SL_TRACE|SL_ERROR,
28660Sstevel@tonic-gate "tl_conn_req: invalid socket addr"));
28670Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
28680Sstevel@tonic-gate freemsg(mp);
28690Sstevel@tonic-gate return;
28700Sstevel@tonic-gate }
28710Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN);
28720Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) &&
28730Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) {
28740Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
28755240Snordmark 1, SL_TRACE|SL_ERROR,
28765240Snordmark "tl_conn_req: invalid socket magic"));
28770Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
28780Sstevel@tonic-gate freemsg(mp);
28790Sstevel@tonic-gate return;
28800Sstevel@tonic-gate }
28810Sstevel@tonic-gate } else {
28820Sstevel@tonic-gate if ((alen > 0 && ((aoff + alen) > msz || aoff + alen < 0)) ||
28830Sstevel@tonic-gate (olen > 0 && ((ssize_t)(ooff + olen) > msz ||
28845240Snordmark ooff + olen < 0)) ||
28850Sstevel@tonic-gate olen < 0 || ooff < 0) {
28860Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
28875240Snordmark SL_TRACE|SL_ERROR,
28885240Snordmark "tl_conn_req:invalid message"));
28890Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
28900Sstevel@tonic-gate freemsg(mp);
28910Sstevel@tonic-gate return;
28920Sstevel@tonic-gate }
28930Sstevel@tonic-gate
28940Sstevel@tonic-gate if (alen <= 0 || aoff < 0 ||
28950Sstevel@tonic-gate (ssize_t)alen > msz - sizeof (struct T_conn_req)) {
28960Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
28970Sstevel@tonic-gate SL_TRACE|SL_ERROR,
28980Sstevel@tonic-gate "tl_conn_req:bad addr in message, "
28990Sstevel@tonic-gate "alen=%d, msz=%ld",
29000Sstevel@tonic-gate alen, msz));
29010Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADADDR, 0, T_CONN_REQ);
29020Sstevel@tonic-gate freemsg(mp);
29030Sstevel@tonic-gate return;
29040Sstevel@tonic-gate }
29050Sstevel@tonic-gate #ifdef DEBUG
29060Sstevel@tonic-gate /*
29070Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps.
29080Sstevel@tonic-gate * if (! assertion)
29090Sstevel@tonic-gate * log warning;
29100Sstevel@tonic-gate */
29110Sstevel@tonic-gate if (! (aoff >= (t_scalar_t)sizeof (struct T_conn_req))) {
29120Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
29135240Snordmark SL_TRACE|SL_ERROR,
29145240Snordmark "tl_conn_req: addr overlaps TPI message"));
29150Sstevel@tonic-gate }
29160Sstevel@tonic-gate #endif
29170Sstevel@tonic-gate if (olen) {
29180Sstevel@tonic-gate /*
29190Sstevel@tonic-gate * no opts in connect req
29200Sstevel@tonic-gate * supported in this provider except for sockets.
29210Sstevel@tonic-gate */
29220Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
29235240Snordmark SL_TRACE|SL_ERROR,
29245240Snordmark "tl_conn_req:options not supported "
29255240Snordmark "in message"));
29260Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADOPT, 0, T_CONN_REQ);
29270Sstevel@tonic-gate freemsg(mp);
29280Sstevel@tonic-gate return;
29290Sstevel@tonic-gate }
29300Sstevel@tonic-gate }
29310Sstevel@tonic-gate
29320Sstevel@tonic-gate /*
29330Sstevel@tonic-gate * Prevent tep from closing on us.
29340Sstevel@tonic-gate */
29350Sstevel@tonic-gate if (! tl_noclose(tep)) {
29360Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
29375240Snordmark "tl_conn_req:endpoint is closing"));
29380Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
29390Sstevel@tonic-gate freemsg(mp);
29400Sstevel@tonic-gate return;
29410Sstevel@tonic-gate }
29420Sstevel@tonic-gate
29430Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_REQ, tep->te_state);
29440Sstevel@tonic-gate /*
29450Sstevel@tonic-gate * get endpoint to connect to
29460Sstevel@tonic-gate * check that peer with DEST addr is bound to addr
29470Sstevel@tonic-gate * and has CONIND_number > 0
29480Sstevel@tonic-gate */
29490Sstevel@tonic-gate dst.ta_alen = alen;
29500Sstevel@tonic-gate dst.ta_abuf = mp->b_rptr + aoff;
29510Sstevel@tonic-gate dst.ta_zoneid = tep->te_zoneid;
29520Sstevel@tonic-gate
29530Sstevel@tonic-gate /*
29540Sstevel@tonic-gate * Verify if remote addr is in use
29550Sstevel@tonic-gate */
29560Sstevel@tonic-gate peer_tep = (IS_SOCKET(tep) ?
29570Sstevel@tonic-gate tl_sock_find_peer(tep, &ux_addr) :
29580Sstevel@tonic-gate tl_find_peer(tep, &dst));
29590Sstevel@tonic-gate
29600Sstevel@tonic-gate if (peer_tep == NULL) {
29610Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
29625240Snordmark "tl_conn_req:no one at connect address"));
29630Sstevel@tonic-gate err = ECONNREFUSED;
29640Sstevel@tonic-gate } else if (peer_tep->te_nicon >= peer_tep->te_qlen) {
29650Sstevel@tonic-gate /*
29660Sstevel@tonic-gate * validate that number of incoming connection is
29670Sstevel@tonic-gate * not to capacity on destination endpoint
29680Sstevel@tonic-gate */
29690Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
29705240Snordmark "tl_conn_req: qlen overflow connection refused"));
29710Sstevel@tonic-gate err = ECONNREFUSED;
29720Sstevel@tonic-gate }
29730Sstevel@tonic-gate
29740Sstevel@tonic-gate /*
29753661Sakolb * Send T_DISCON_IND in case of error
29760Sstevel@tonic-gate */
29770Sstevel@tonic-gate if (err != 0) {
29780Sstevel@tonic-gate if (peer_tep != NULL)
29790Sstevel@tonic-gate tl_refrele(peer_tep);
29800Sstevel@tonic-gate /* We are still expected to send T_OK_ACK */
29810Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
29820Sstevel@tonic-gate tl_ok_ack(tep->te_wq, ackmp, T_CONN_REQ);
29830Sstevel@tonic-gate tl_closeok(tep);
29840Sstevel@tonic-gate dimp = tpi_ack_alloc(mp, sizeof (struct T_discon_ind),
29850Sstevel@tonic-gate M_PROTO, T_DISCON_IND);
29860Sstevel@tonic-gate if (dimp == NULL) {
29870Sstevel@tonic-gate tl_merror(wq, NULL, ENOSR);
29880Sstevel@tonic-gate return;
29890Sstevel@tonic-gate }
29900Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr;
29910Sstevel@tonic-gate di->DISCON_reason = err;
29920Sstevel@tonic-gate di->SEQ_number = BADSEQNUM;
29930Sstevel@tonic-gate
29940Sstevel@tonic-gate tep->te_state = TS_IDLE;
29950Sstevel@tonic-gate /*
29960Sstevel@tonic-gate * send T_DISCON_IND message
29970Sstevel@tonic-gate */
29980Sstevel@tonic-gate putnext(tep->te_rq, dimp);
29990Sstevel@tonic-gate return;
30000Sstevel@tonic-gate }
30010Sstevel@tonic-gate
30020Sstevel@tonic-gate ASSERT(IS_COTS(peer_tep));
30030Sstevel@tonic-gate
30040Sstevel@tonic-gate /*
30050Sstevel@tonic-gate * Found the listener. At this point processing will continue on
30060Sstevel@tonic-gate * listener serializer. Close of the endpoint should be blocked while we
30070Sstevel@tonic-gate * switch serializers.
30080Sstevel@tonic-gate */
30090Sstevel@tonic-gate tl_serializer_refhold(peer_tep->te_ser);
30100Sstevel@tonic-gate tl_serializer_refrele(tep->te_ser);
30110Sstevel@tonic-gate tep->te_ser = peer_tep->te_ser;
30120Sstevel@tonic-gate ASSERT(tep->te_oconp == NULL);
30130Sstevel@tonic-gate tep->te_oconp = peer_tep;
30140Sstevel@tonic-gate
30150Sstevel@tonic-gate /*
30160Sstevel@tonic-gate * It is safe to close now. Close may continue on listener serializer.
30170Sstevel@tonic-gate */
30180Sstevel@tonic-gate tl_closeok(tep);
30190Sstevel@tonic-gate
30200Sstevel@tonic-gate /*
30210Sstevel@tonic-gate * Pass ackmp to tl_conn_req_ser. Note that mp->b_cont may contain user
30220Sstevel@tonic-gate * data, so we link mp to ackmp.
30230Sstevel@tonic-gate */
30240Sstevel@tonic-gate ackmp->b_cont = mp;
30250Sstevel@tonic-gate mp = ackmp;
30260Sstevel@tonic-gate
30270Sstevel@tonic-gate tl_refhold(tep);
30280Sstevel@tonic-gate tl_serializer_enter(tep, tl_conn_req_ser, mp);
30290Sstevel@tonic-gate }
30300Sstevel@tonic-gate
30310Sstevel@tonic-gate /*
30320Sstevel@tonic-gate * Finish T_CONN_REQ processing on listener serializer.
30330Sstevel@tonic-gate */
30340Sstevel@tonic-gate static void
tl_conn_req_ser(mblk_t * mp,tl_endpt_t * tep)30350Sstevel@tonic-gate tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep)
30360Sstevel@tonic-gate {
30370Sstevel@tonic-gate queue_t *wq;
30380Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_oconp;
30390Sstevel@tonic-gate mblk_t *confmp, *cimp, *indmp;
30400Sstevel@tonic-gate void *opts = NULL;
30410Sstevel@tonic-gate mblk_t *ackmp = mp;
30420Sstevel@tonic-gate struct T_conn_req *creq = (struct T_conn_req *)mp->b_cont->b_rptr;
30430Sstevel@tonic-gate struct T_conn_ind *ci;
30440Sstevel@tonic-gate tl_icon_t *tip;
30450Sstevel@tonic-gate void *addr_startp;
30460Sstevel@tonic-gate t_scalar_t olen = creq->OPT_length;
30470Sstevel@tonic-gate t_scalar_t ooff = creq->OPT_offset;
30480Sstevel@tonic-gate size_t ci_msz;
30490Sstevel@tonic-gate size_t size;
305011134SCasper.Dik@Sun.COM cred_t *cr = NULL;
305111134SCasper.Dik@Sun.COM pid_t cpid;
30520Sstevel@tonic-gate
30530Sstevel@tonic-gate if (tep->te_closing) {
30540Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
30550Sstevel@tonic-gate tl_serializer_exit(tep);
30560Sstevel@tonic-gate tl_refrele(tep);
30570Sstevel@tonic-gate freemsg(mp);
30580Sstevel@tonic-gate return;
30590Sstevel@tonic-gate }
30600Sstevel@tonic-gate
30610Sstevel@tonic-gate wq = tep->te_wq;
30620Sstevel@tonic-gate tep->te_flag |= TL_EAGER;
30630Sstevel@tonic-gate
30640Sstevel@tonic-gate /*
30650Sstevel@tonic-gate * Extract preallocated ackmp from mp.
30660Sstevel@tonic-gate */
30670Sstevel@tonic-gate mp = mp->b_cont;
30680Sstevel@tonic-gate ackmp->b_cont = NULL;
30690Sstevel@tonic-gate
30700Sstevel@tonic-gate if (olen == 0)
30710Sstevel@tonic-gate ooff = 0;
30720Sstevel@tonic-gate
30730Sstevel@tonic-gate if (peer_tep->te_closing ||
30740Sstevel@tonic-gate !((peer_tep->te_state == TS_IDLE) ||
30755240Snordmark (peer_tep->te_state == TS_WRES_CIND))) {
30763661Sakolb (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
30775240Snordmark "tl_conn_req:peer in bad state (%d)",
30785240Snordmark peer_tep->te_state));
30790Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
30800Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, ECONNREFUSED, T_CONN_REQ);
30810Sstevel@tonic-gate freemsg(ackmp);
30820Sstevel@tonic-gate tl_serializer_exit(tep);
30830Sstevel@tonic-gate tl_refrele(tep);
30840Sstevel@tonic-gate return;
30850Sstevel@tonic-gate }
30860Sstevel@tonic-gate
30870Sstevel@tonic-gate /*
30880Sstevel@tonic-gate * preallocate now for T_DISCON_IND or T_CONN_IND
30890Sstevel@tonic-gate */
30900Sstevel@tonic-gate /*
30910Sstevel@tonic-gate * calculate length of T_CONN_IND message
30920Sstevel@tonic-gate */
309311134SCasper.Dik@Sun.COM if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED)) {
309411134SCasper.Dik@Sun.COM cr = msg_getcred(mp, &cpid);
309511134SCasper.Dik@Sun.COM ASSERT(cr != NULL);
309611134SCasper.Dik@Sun.COM if (peer_tep->te_flag & TL_SETCRED) {
309711134SCasper.Dik@Sun.COM ooff = 0;
309811134SCasper.Dik@Sun.COM olen = (t_scalar_t) sizeof (struct opthdr) +
309911134SCasper.Dik@Sun.COM OPTLEN(sizeof (tl_credopt_t));
310011134SCasper.Dik@Sun.COM /* 1 option only */
310111134SCasper.Dik@Sun.COM } else {
310211134SCasper.Dik@Sun.COM ooff = 0;
310311134SCasper.Dik@Sun.COM olen = (t_scalar_t)sizeof (struct opthdr) +
310411134SCasper.Dik@Sun.COM OPTLEN(ucredminsize(cr));
310511134SCasper.Dik@Sun.COM /* 1 option only */
310611134SCasper.Dik@Sun.COM }
31070Sstevel@tonic-gate }
31080Sstevel@tonic-gate ci_msz = sizeof (struct T_conn_ind) + tep->te_alen;
31090Sstevel@tonic-gate ci_msz = T_ALIGN(ci_msz) + olen;
31100Sstevel@tonic-gate size = max(ci_msz, sizeof (struct T_discon_ind));
31110Sstevel@tonic-gate
31120Sstevel@tonic-gate /*
31130Sstevel@tonic-gate * Save options from mp - we'll need them for T_CONN_IND.
31140Sstevel@tonic-gate */
31150Sstevel@tonic-gate if (ooff != 0) {
31160Sstevel@tonic-gate opts = kmem_alloc(olen, KM_NOSLEEP);
31170Sstevel@tonic-gate if (opts == NULL) {
31180Sstevel@tonic-gate /*
31190Sstevel@tonic-gate * roll back state changes
31200Sstevel@tonic-gate */
31210Sstevel@tonic-gate tep->te_state = TS_IDLE;
31220Sstevel@tonic-gate tl_memrecover(wq, mp, size);
31230Sstevel@tonic-gate freemsg(ackmp);
31240Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
31250Sstevel@tonic-gate tl_serializer_exit(tep);
31260Sstevel@tonic-gate tl_refrele(tep);
31270Sstevel@tonic-gate return;
31280Sstevel@tonic-gate }
31290Sstevel@tonic-gate /* Copy options to a temp buffer */
31300Sstevel@tonic-gate bcopy(mp->b_rptr + ooff, opts, olen);
31310Sstevel@tonic-gate }
31320Sstevel@tonic-gate
31330Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) {
31340Sstevel@tonic-gate /*
31350Sstevel@tonic-gate * Generate a T_CONN_CON that has the identical address
31360Sstevel@tonic-gate * (and options) as the T_CONN_REQ.
31370Sstevel@tonic-gate * NOTE: assumes that the T_conn_req and T_conn_con structures
31380Sstevel@tonic-gate * are isomorphic.
31390Sstevel@tonic-gate */
31400Sstevel@tonic-gate confmp = copyb(mp);
31410Sstevel@tonic-gate if (! confmp) {
31420Sstevel@tonic-gate /*
31430Sstevel@tonic-gate * roll back state changes
31440Sstevel@tonic-gate */
31450Sstevel@tonic-gate tep->te_state = TS_IDLE;
31460Sstevel@tonic-gate tl_memrecover(wq, mp, mp->b_wptr - mp->b_rptr);
31470Sstevel@tonic-gate freemsg(ackmp);
31480Sstevel@tonic-gate if (opts != NULL)
31490Sstevel@tonic-gate kmem_free(opts, olen);
31500Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
31510Sstevel@tonic-gate tl_serializer_exit(tep);
31520Sstevel@tonic-gate tl_refrele(tep);
31530Sstevel@tonic-gate return;
31540Sstevel@tonic-gate }
31550Sstevel@tonic-gate ((struct T_conn_con *)(confmp->b_rptr))->PRIM_type =
31565240Snordmark T_CONN_CON;
31570Sstevel@tonic-gate } else {
31580Sstevel@tonic-gate confmp = NULL;
31590Sstevel@tonic-gate }
31600Sstevel@tonic-gate if ((indmp = reallocb(mp, size, 0)) == NULL) {
31610Sstevel@tonic-gate /*
31620Sstevel@tonic-gate * roll back state changes
31630Sstevel@tonic-gate */
31640Sstevel@tonic-gate tep->te_state = TS_IDLE;
31650Sstevel@tonic-gate tl_memrecover(wq, mp, size);
31660Sstevel@tonic-gate freemsg(ackmp);
31670Sstevel@tonic-gate if (opts != NULL)
31680Sstevel@tonic-gate kmem_free(opts, olen);
31690Sstevel@tonic-gate freemsg(confmp);
31700Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
31710Sstevel@tonic-gate tl_serializer_exit(tep);
31720Sstevel@tonic-gate tl_refrele(tep);
31730Sstevel@tonic-gate return;
31740Sstevel@tonic-gate }
31750Sstevel@tonic-gate
31760Sstevel@tonic-gate tip = kmem_zalloc(sizeof (*tip), KM_NOSLEEP);
31770Sstevel@tonic-gate if (tip == NULL) {
31780Sstevel@tonic-gate /*
31790Sstevel@tonic-gate * roll back state changes
31800Sstevel@tonic-gate */
31810Sstevel@tonic-gate tep->te_state = TS_IDLE;
31820Sstevel@tonic-gate tl_memrecover(wq, indmp, sizeof (*tip));
31830Sstevel@tonic-gate freemsg(ackmp);
31840Sstevel@tonic-gate if (opts != NULL)
31850Sstevel@tonic-gate kmem_free(opts, olen);
31860Sstevel@tonic-gate freemsg(confmp);
31870Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
31880Sstevel@tonic-gate tl_serializer_exit(tep);
31890Sstevel@tonic-gate tl_refrele(tep);
31900Sstevel@tonic-gate return;
31910Sstevel@tonic-gate }
31920Sstevel@tonic-gate tip->ti_mp = NULL;
31930Sstevel@tonic-gate
31940Sstevel@tonic-gate /*
31950Sstevel@tonic-gate * memory is now committed for T_DISCON_IND/T_CONN_IND/T_CONN_CON
31960Sstevel@tonic-gate * and tl_icon_t cell.
31970Sstevel@tonic-gate */
31980Sstevel@tonic-gate
31990Sstevel@tonic-gate /*
32000Sstevel@tonic-gate * ack validity of request and send the peer credential in the ACK.
32010Sstevel@tonic-gate */
32020Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
32030Sstevel@tonic-gate
32040Sstevel@tonic-gate if (peer_tep != NULL && peer_tep->te_credp != NULL &&
32050Sstevel@tonic-gate confmp != NULL) {
32068778SErik.Nordmark@Sun.COM mblk_setcred(confmp, peer_tep->te_credp, peer_tep->te_cpid);
32070Sstevel@tonic-gate }
32080Sstevel@tonic-gate
32090Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_CONN_REQ);
32100Sstevel@tonic-gate
32110Sstevel@tonic-gate /*
32120Sstevel@tonic-gate * prepare message to send T_CONN_IND
32130Sstevel@tonic-gate */
32140Sstevel@tonic-gate /*
32150Sstevel@tonic-gate * allocate the message - original data blocks retained
32160Sstevel@tonic-gate * in the returned mblk
32170Sstevel@tonic-gate */
32180Sstevel@tonic-gate cimp = tl_resizemp(indmp, size);
32190Sstevel@tonic-gate if (! cimp) {
32200Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
32215240Snordmark "tl_conn_req:con_ind:allocb failure"));
32220Sstevel@tonic-gate tl_merror(wq, indmp, ENOMEM);
32230Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
32240Sstevel@tonic-gate tl_serializer_exit(tep);
32250Sstevel@tonic-gate tl_refrele(tep);
32260Sstevel@tonic-gate if (opts != NULL)
32270Sstevel@tonic-gate kmem_free(opts, olen);
32280Sstevel@tonic-gate freemsg(confmp);
32290Sstevel@tonic-gate ASSERT(tip->ti_mp == NULL);
32300Sstevel@tonic-gate kmem_free(tip, sizeof (*tip));
32310Sstevel@tonic-gate return;
32320Sstevel@tonic-gate }
32330Sstevel@tonic-gate
32340Sstevel@tonic-gate DB_TYPE(cimp) = M_PROTO;
32350Sstevel@tonic-gate ci = (struct T_conn_ind *)cimp->b_rptr;
32360Sstevel@tonic-gate ci->PRIM_type = T_CONN_IND;
32370Sstevel@tonic-gate ci->SRC_offset = (t_scalar_t)sizeof (struct T_conn_ind);
32380Sstevel@tonic-gate ci->SRC_length = tep->te_alen;
32390Sstevel@tonic-gate ci->SEQ_number = tep->te_seqno;
32400Sstevel@tonic-gate
32410Sstevel@tonic-gate addr_startp = cimp->b_rptr + ci->SRC_offset;
32420Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen);
32430Sstevel@tonic-gate if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED)) {
32448778SErik.Nordmark@Sun.COM
32450Sstevel@tonic-gate ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset +
32465240Snordmark ci->SRC_length);
32470Sstevel@tonic-gate ci->OPT_length = olen; /* because only 1 option */
32480Sstevel@tonic-gate tl_fill_option(cimp->b_rptr + ci->OPT_offset,
32498778SErik.Nordmark@Sun.COM cr, cpid,
32505240Snordmark peer_tep->te_flag, peer_tep->te_credp);
32510Sstevel@tonic-gate } else if (ooff != 0) {
32520Sstevel@tonic-gate /* Copy option from T_CONN_REQ */
32530Sstevel@tonic-gate ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset +
32545240Snordmark ci->SRC_length);
32550Sstevel@tonic-gate ci->OPT_length = olen;
32560Sstevel@tonic-gate ASSERT(opts != NULL);
32570Sstevel@tonic-gate bcopy(opts, (void *)((uintptr_t)ci + ci->OPT_offset), olen);
32580Sstevel@tonic-gate } else {
32590Sstevel@tonic-gate ci->OPT_offset = 0;
32600Sstevel@tonic-gate ci->OPT_length = 0;
32610Sstevel@tonic-gate }
32620Sstevel@tonic-gate if (opts != NULL)
32630Sstevel@tonic-gate kmem_free(opts, olen);
32640Sstevel@tonic-gate
32650Sstevel@tonic-gate /*
32660Sstevel@tonic-gate * register connection request with server peer
32670Sstevel@tonic-gate * append to list of incoming connections
32680Sstevel@tonic-gate * increment references for both peer_tep and tep: peer_tep is placed on
32690Sstevel@tonic-gate * te_oconp and tep is placed on listeners queue.
32700Sstevel@tonic-gate */
32710Sstevel@tonic-gate tip->ti_tep = tep;
32720Sstevel@tonic-gate tip->ti_seqno = tep->te_seqno;
32730Sstevel@tonic-gate list_insert_tail(&peer_tep->te_iconp, tip);
32740Sstevel@tonic-gate peer_tep->te_nicon++;
32750Sstevel@tonic-gate
32760Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_CONN_IND, peer_tep->te_state);
32770Sstevel@tonic-gate /*
32780Sstevel@tonic-gate * send the T_CONN_IND message
32790Sstevel@tonic-gate */
32800Sstevel@tonic-gate putnext(peer_tep->te_rq, cimp);
32810Sstevel@tonic-gate
32820Sstevel@tonic-gate /*
32830Sstevel@tonic-gate * Send a T_CONN_CON message for sockets.
32840Sstevel@tonic-gate * Disable the queues until we have reached the correct state!
32850Sstevel@tonic-gate */
32860Sstevel@tonic-gate if (confmp != NULL) {
32870Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_CON, tep->te_state);
32880Sstevel@tonic-gate noenable(wq);
32890Sstevel@tonic-gate putnext(tep->te_rq, confmp);
32900Sstevel@tonic-gate }
32910Sstevel@tonic-gate /*
32920Sstevel@tonic-gate * Now we need to increment tep reference because tep is referenced by
32930Sstevel@tonic-gate * server list of pending connections. We also need to decrement
32940Sstevel@tonic-gate * reference before exiting serializer. Two operations void each other
32950Sstevel@tonic-gate * so we don't modify reference at all.
32960Sstevel@tonic-gate */
32970Sstevel@tonic-gate ASSERT(tep->te_refcnt >= 2);
32980Sstevel@tonic-gate ASSERT(peer_tep->te_refcnt >= 2);
32990Sstevel@tonic-gate tl_serializer_exit(tep);
33000Sstevel@tonic-gate }
33010Sstevel@tonic-gate
33020Sstevel@tonic-gate
33030Sstevel@tonic-gate
33040Sstevel@tonic-gate /*
33050Sstevel@tonic-gate * Handle T_conn_res on listener stream. Called on listener serializer.
33060Sstevel@tonic-gate * tl_conn_req has already generated the T_CONN_CON.
33070Sstevel@tonic-gate * tl_conn_res is called on listener serializer.
33080Sstevel@tonic-gate * No one accesses acceptor at this point, so it is safe to modify acceptor.
33090Sstevel@tonic-gate * Switch eager serializer to acceptor's.
33100Sstevel@tonic-gate *
33110Sstevel@tonic-gate * If TL_SET[U]CRED generate the credentials options.
33120Sstevel@tonic-gate * For sockets tl_conn_req has already generated the T_CONN_CON.
33130Sstevel@tonic-gate */
33140Sstevel@tonic-gate static void
tl_conn_res(mblk_t * mp,tl_endpt_t * tep)33150Sstevel@tonic-gate tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
33160Sstevel@tonic-gate {
33170Sstevel@tonic-gate queue_t *wq;
33180Sstevel@tonic-gate struct T_conn_res *cres = (struct T_conn_res *)mp->b_rptr;
33190Sstevel@tonic-gate ssize_t msz = MBLKL(mp);
33200Sstevel@tonic-gate t_scalar_t olen, ooff, err = 0;
33210Sstevel@tonic-gate t_scalar_t prim = cres->PRIM_type;
33220Sstevel@tonic-gate uchar_t *addr_startp;
33230Sstevel@tonic-gate tl_endpt_t *acc_ep = NULL, *cl_ep = NULL;
33240Sstevel@tonic-gate tl_icon_t *tip;
33250Sstevel@tonic-gate size_t size;
33260Sstevel@tonic-gate mblk_t *ackmp, *respmp;
33270Sstevel@tonic-gate mblk_t *dimp, *ccmp = NULL;
33280Sstevel@tonic-gate struct T_discon_ind *di;
33290Sstevel@tonic-gate struct T_conn_con *cc;
33300Sstevel@tonic-gate boolean_t client_noclose_set = B_FALSE;
33310Sstevel@tonic-gate boolean_t switch_client_serializer = B_TRUE;
33320Sstevel@tonic-gate
33330Sstevel@tonic-gate ASSERT(IS_COTS(tep));
33340Sstevel@tonic-gate
33350Sstevel@tonic-gate if (tep->te_closing) {
33360Sstevel@tonic-gate freemsg(mp);
33370Sstevel@tonic-gate return;
33380Sstevel@tonic-gate }
33390Sstevel@tonic-gate
33400Sstevel@tonic-gate wq = tep->te_wq;
33410Sstevel@tonic-gate
33420Sstevel@tonic-gate /*
33430Sstevel@tonic-gate * preallocate memory for:
33440Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK
33450Sstevel@tonic-gate * ==> known max T_ERROR_ACK
33460Sstevel@tonic-gate * 2. max of T_DISCON_IND and T_CONN_CON
33470Sstevel@tonic-gate */
33480Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
33490Sstevel@tonic-gate if (! ackmp) {
33500Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack));
33510Sstevel@tonic-gate return;
33520Sstevel@tonic-gate }
33530Sstevel@tonic-gate /*
33540Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now
33550Sstevel@tonic-gate * will be committed for T_DISCON_IND/T_CONN_CON later
33560Sstevel@tonic-gate */
33570Sstevel@tonic-gate
33580Sstevel@tonic-gate
33590Sstevel@tonic-gate ASSERT(prim == T_CONN_RES || prim == O_T_CONN_RES);
33600Sstevel@tonic-gate
33610Sstevel@tonic-gate /*
33620Sstevel@tonic-gate * validate state
33630Sstevel@tonic-gate */
33640Sstevel@tonic-gate if (tep->te_state != TS_WRES_CIND) {
33650Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
33665240Snordmark SL_TRACE|SL_ERROR,
33675240Snordmark "tl_wput:T_CONN_RES:out of state, state=%d",
33685240Snordmark tep->te_state));
33690Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
33700Sstevel@tonic-gate freemsg(mp);
33710Sstevel@tonic-gate return;
33720Sstevel@tonic-gate }
33730Sstevel@tonic-gate
33740Sstevel@tonic-gate /*
33750Sstevel@tonic-gate * validate the message
33760Sstevel@tonic-gate * Note: dereference fields in struct inside message only
33770Sstevel@tonic-gate * after validating the message length.
33780Sstevel@tonic-gate */
33790Sstevel@tonic-gate if (msz < sizeof (struct T_conn_res)) {
33800Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
33815240Snordmark "tl_conn_res:invalid message length"));
33820Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
33830Sstevel@tonic-gate freemsg(mp);
33840Sstevel@tonic-gate return;
33850Sstevel@tonic-gate }
33860Sstevel@tonic-gate olen = cres->OPT_length;
33870Sstevel@tonic-gate ooff = cres->OPT_offset;
33880Sstevel@tonic-gate if (((olen > 0) && ((ooff + olen) > msz))) {
33890Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
33905240Snordmark "tl_conn_res:invalid message"));
33910Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
33920Sstevel@tonic-gate freemsg(mp);
33930Sstevel@tonic-gate return;
33940Sstevel@tonic-gate }
33950Sstevel@tonic-gate if (olen) {
33960Sstevel@tonic-gate /*
33970Sstevel@tonic-gate * no opts in connect res
33980Sstevel@tonic-gate * supported in this provider
33990Sstevel@tonic-gate */
34000Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
34015240Snordmark "tl_conn_res:options not supported in message"));
34020Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADOPT, 0, prim);
34030Sstevel@tonic-gate freemsg(mp);
34040Sstevel@tonic-gate return;
34050Sstevel@tonic-gate }
34060Sstevel@tonic-gate
34070Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_CONN_RES, tep->te_state);
34080Sstevel@tonic-gate ASSERT(tep->te_state == TS_WACK_CRES);
34090Sstevel@tonic-gate
34100Sstevel@tonic-gate if (cres->SEQ_number < TL_MINOR_START &&
34115240Snordmark cres->SEQ_number >= BADSEQNUM) {
34120Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
34135240Snordmark "tl_conn_res:remote endpoint sequence number bad"));
34140Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
34150Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
34160Sstevel@tonic-gate freemsg(mp);
34170Sstevel@tonic-gate return;
34180Sstevel@tonic-gate }
34190Sstevel@tonic-gate
34200Sstevel@tonic-gate /*
34210Sstevel@tonic-gate * find accepting endpoint. Will have extra reference if found.
34220Sstevel@tonic-gate */
34230Sstevel@tonic-gate if (mod_hash_find_cb(tep->te_transport->tr_ai_hash,
34245240Snordmark (mod_hash_key_t)(uintptr_t)cres->ACCEPTOR_id,
34255240Snordmark (mod_hash_val_t *)&acc_ep, tl_find_callback) != 0) {
34260Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
34275240Snordmark "tl_conn_res:bad accepting endpoint"));
34280Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
34290Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim);
34300Sstevel@tonic-gate freemsg(mp);
34310Sstevel@tonic-gate return;
34320Sstevel@tonic-gate }
34330Sstevel@tonic-gate
34340Sstevel@tonic-gate /*
34350Sstevel@tonic-gate * Prevent acceptor from closing.
34360Sstevel@tonic-gate */
34370Sstevel@tonic-gate if (! tl_noclose(acc_ep)) {
34380Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
34395240Snordmark "tl_conn_res:bad accepting endpoint"));
34400Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
34410Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim);
34420Sstevel@tonic-gate tl_refrele(acc_ep);
34430Sstevel@tonic-gate freemsg(mp);
34440Sstevel@tonic-gate return;
34450Sstevel@tonic-gate }
34460Sstevel@tonic-gate
34470Sstevel@tonic-gate acc_ep->te_flag |= TL_ACCEPTOR;
34480Sstevel@tonic-gate
34490Sstevel@tonic-gate /*
34500Sstevel@tonic-gate * validate that accepting endpoint, if different from listening
34510Sstevel@tonic-gate * has address bound => state is TS_IDLE
34520Sstevel@tonic-gate * TROUBLE in XPG4 !!?
34530Sstevel@tonic-gate */
34540Sstevel@tonic-gate if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) {
34550Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
34565240Snordmark "tl_conn_res:accepting endpoint has no address bound,"
34575240Snordmark "state=%d", acc_ep->te_state));
34580Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
34590Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
34600Sstevel@tonic-gate freemsg(mp);
34610Sstevel@tonic-gate tl_closeok(acc_ep);
34620Sstevel@tonic-gate tl_refrele(acc_ep);
34630Sstevel@tonic-gate return;
34640Sstevel@tonic-gate }
34650Sstevel@tonic-gate
34660Sstevel@tonic-gate /*
34670Sstevel@tonic-gate * validate if accepting endpt same as listening, then
34680Sstevel@tonic-gate * no other incoming connection should be on the queue
34690Sstevel@tonic-gate */
34700Sstevel@tonic-gate
34710Sstevel@tonic-gate if ((tep == acc_ep) && (tep->te_nicon > 1)) {
34720Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
34735240Snordmark "tl_conn_res: > 1 conn_ind on listener-acceptor"));
34740Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
34750Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADF, 0, prim);
34760Sstevel@tonic-gate freemsg(mp);
34770Sstevel@tonic-gate tl_closeok(acc_ep);
34780Sstevel@tonic-gate tl_refrele(acc_ep);
34790Sstevel@tonic-gate return;
34800Sstevel@tonic-gate }
34810Sstevel@tonic-gate
34820Sstevel@tonic-gate /*
34830Sstevel@tonic-gate * Mark for deletion, the entry corresponding to client
34840Sstevel@tonic-gate * on list of pending connections made by the listener
34850Sstevel@tonic-gate * search list to see if client is one of the
34860Sstevel@tonic-gate * recorded as a listener.
34870Sstevel@tonic-gate */
34880Sstevel@tonic-gate tip = tl_icon_find(tep, cres->SEQ_number);
34890Sstevel@tonic-gate if (tip == NULL) {
34900Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
34915240Snordmark "tl_conn_res:no client in listener list"));
34920Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
34930Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
34940Sstevel@tonic-gate freemsg(mp);
34950Sstevel@tonic-gate tl_closeok(acc_ep);
34960Sstevel@tonic-gate tl_refrele(acc_ep);
34970Sstevel@tonic-gate return;
34980Sstevel@tonic-gate }
34990Sstevel@tonic-gate
35000Sstevel@tonic-gate /*
35010Sstevel@tonic-gate * If ti_tep is NULL the client has already closed. In this case
35020Sstevel@tonic-gate * the code below will avoid any action on the client side
35030Sstevel@tonic-gate * but complete the server and acceptor state transitions.
35040Sstevel@tonic-gate */
35050Sstevel@tonic-gate ASSERT(tip->ti_tep == NULL ||
35065240Snordmark tip->ti_tep->te_seqno == cres->SEQ_number);
35070Sstevel@tonic-gate cl_ep = tip->ti_tep;
35080Sstevel@tonic-gate
35090Sstevel@tonic-gate /*
35100Sstevel@tonic-gate * If the client is present it is switched from listener's to acceptor's
35110Sstevel@tonic-gate * serializer. We should block client closes while serializers are
35120Sstevel@tonic-gate * being switched.
35130Sstevel@tonic-gate *
35140Sstevel@tonic-gate * It is possible that the client is present but is currently being
35150Sstevel@tonic-gate * closed. There are two possible cases:
35160Sstevel@tonic-gate *
35170Sstevel@tonic-gate * 1) The client has already entered tl_close_finish_ser() and sent
35180Sstevel@tonic-gate * T_ORDREL_IND. In this case we can just ignore the client (but we
35190Sstevel@tonic-gate * still need to send all messages from tip->ti_mp to the acceptor).
35200Sstevel@tonic-gate *
35210Sstevel@tonic-gate * 2) The client started the close but has not entered
35220Sstevel@tonic-gate * tl_close_finish_ser() yet. In this case, the client is already
35230Sstevel@tonic-gate * proceeding asynchronously on the listener's serializer, so we're
35240Sstevel@tonic-gate * forced to change the acceptor to use the listener's serializer to
35250Sstevel@tonic-gate * ensure that any operations on the acceptor are serialized with
35260Sstevel@tonic-gate * respect to the close that's in-progress.
35270Sstevel@tonic-gate */
35280Sstevel@tonic-gate if (cl_ep != NULL) {
35290Sstevel@tonic-gate if (tl_noclose(cl_ep)) {
35300Sstevel@tonic-gate client_noclose_set = B_TRUE;
35310Sstevel@tonic-gate } else {
35320Sstevel@tonic-gate /*
35330Sstevel@tonic-gate * Client is closing. If it it has sent the
35340Sstevel@tonic-gate * T_ORDREL_IND, we can simply ignore it - otherwise,
35350Sstevel@tonic-gate * we have to let let the client continue until it is
35360Sstevel@tonic-gate * sent.
35370Sstevel@tonic-gate *
35380Sstevel@tonic-gate * If we do continue using the client, acceptor will
35390Sstevel@tonic-gate * switch to client's serializer which is used by client
35400Sstevel@tonic-gate * for its close.
35410Sstevel@tonic-gate */
35420Sstevel@tonic-gate tl_client_closing_when_accepting++;
35430Sstevel@tonic-gate switch_client_serializer = B_FALSE;
35440Sstevel@tonic-gate if (!IS_SOCKET(cl_ep) || tl_disable_early_connect ||
35450Sstevel@tonic-gate cl_ep->te_state == -1)
35460Sstevel@tonic-gate cl_ep = NULL;
35470Sstevel@tonic-gate }
35480Sstevel@tonic-gate }
35490Sstevel@tonic-gate
35500Sstevel@tonic-gate if (cl_ep != NULL) {
35510Sstevel@tonic-gate /*
35520Sstevel@tonic-gate * validate client state to be TS_WCON_CREQ or TS_DATA_XFER
35530Sstevel@tonic-gate * (latter for sockets only)
35540Sstevel@tonic-gate */
35550Sstevel@tonic-gate if (cl_ep->te_state != TS_WCON_CREQ &&
35560Sstevel@tonic-gate (cl_ep->te_state != TS_DATA_XFER &&
35570Sstevel@tonic-gate IS_SOCKET(cl_ep))) {
35580Sstevel@tonic-gate err = ECONNREFUSED;
35590Sstevel@tonic-gate /*
35600Sstevel@tonic-gate * T_DISCON_IND sent later after committing memory
35610Sstevel@tonic-gate * and acking validity of request
35620Sstevel@tonic-gate */
35630Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
35645240Snordmark "tl_conn_res:peer in bad state"));
35650Sstevel@tonic-gate }
35660Sstevel@tonic-gate
35670Sstevel@tonic-gate /*
35680Sstevel@tonic-gate * preallocate now for T_DISCON_IND or T_CONN_CONN
35690Sstevel@tonic-gate * ack validity of request (T_OK_ACK) after memory committed
35700Sstevel@tonic-gate */
35710Sstevel@tonic-gate
35720Sstevel@tonic-gate if (err)
35730Sstevel@tonic-gate size = sizeof (struct T_discon_ind);
35740Sstevel@tonic-gate else {
35750Sstevel@tonic-gate /*
35760Sstevel@tonic-gate * calculate length of T_CONN_CON message
35770Sstevel@tonic-gate */
35780Sstevel@tonic-gate olen = 0;
35790Sstevel@tonic-gate if (cl_ep->te_flag & TL_SETCRED) {
35800Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) +
35815240Snordmark OPTLEN(sizeof (tl_credopt_t));
35820Sstevel@tonic-gate } else if (cl_ep->te_flag & TL_SETUCRED) {
35830Sstevel@tonic-gate olen = (t_scalar_t)sizeof (struct opthdr) +
358411134SCasper.Dik@Sun.COM OPTLEN(ucredminsize(acc_ep->te_credp));
35850Sstevel@tonic-gate }
35860Sstevel@tonic-gate size = T_ALIGN(sizeof (struct T_conn_con) +
35875240Snordmark acc_ep->te_alen) + olen;
35880Sstevel@tonic-gate }
35890Sstevel@tonic-gate if ((respmp = reallocb(mp, size, 0)) == NULL) {
35900Sstevel@tonic-gate /*
35910Sstevel@tonic-gate * roll back state changes
35920Sstevel@tonic-gate */
35930Sstevel@tonic-gate tep->te_state = TS_WRES_CIND;
35940Sstevel@tonic-gate tl_memrecover(wq, mp, size);
35950Sstevel@tonic-gate freemsg(ackmp);
35960Sstevel@tonic-gate if (client_noclose_set)
35970Sstevel@tonic-gate tl_closeok(cl_ep);
35980Sstevel@tonic-gate tl_closeok(acc_ep);
35990Sstevel@tonic-gate tl_refrele(acc_ep);
36000Sstevel@tonic-gate return;
36010Sstevel@tonic-gate }
36020Sstevel@tonic-gate mp = NULL;
36030Sstevel@tonic-gate }
36040Sstevel@tonic-gate
36050Sstevel@tonic-gate /*
36060Sstevel@tonic-gate * Now ack validity of request
36070Sstevel@tonic-gate */
36080Sstevel@tonic-gate if (tep->te_nicon == 1) {
36090Sstevel@tonic-gate if (tep == acc_ep)
36100Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK2, tep->te_state);
36110Sstevel@tonic-gate else
36120Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK3, tep->te_state);
36130Sstevel@tonic-gate } else
36140Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_OK_ACK4, tep->te_state);
36150Sstevel@tonic-gate
36160Sstevel@tonic-gate /*
36170Sstevel@tonic-gate * send T_DISCON_IND now if client state validation failed earlier
36180Sstevel@tonic-gate */
36190Sstevel@tonic-gate if (err) {
36200Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim);
36210Sstevel@tonic-gate /*
36220Sstevel@tonic-gate * flush the queues - why always ?
36230Sstevel@tonic-gate */
36240Sstevel@tonic-gate (void) putnextctl1(acc_ep->te_rq, M_FLUSH, FLUSHR);
36250Sstevel@tonic-gate
36260Sstevel@tonic-gate dimp = tl_resizemp(respmp, size);
36270Sstevel@tonic-gate if (! dimp) {
36280Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
36295240Snordmark SL_TRACE|SL_ERROR,
36305240Snordmark "tl_conn_res:con_ind:allocb failure"));
36310Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM);
36320Sstevel@tonic-gate tl_closeok(acc_ep);
36330Sstevel@tonic-gate if (client_noclose_set)
36340Sstevel@tonic-gate tl_closeok(cl_ep);
36350Sstevel@tonic-gate tl_refrele(acc_ep);
36360Sstevel@tonic-gate return;
36370Sstevel@tonic-gate }
36380Sstevel@tonic-gate if (dimp->b_cont) {
36390Sstevel@tonic-gate /* no user data in provider generated discon ind */
36400Sstevel@tonic-gate freemsg(dimp->b_cont);
36410Sstevel@tonic-gate dimp->b_cont = NULL;
36420Sstevel@tonic-gate }
36430Sstevel@tonic-gate
36440Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO;
36450Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr;
36460Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND;
36470Sstevel@tonic-gate di->DISCON_reason = err;
36480Sstevel@tonic-gate di->SEQ_number = BADSEQNUM;
36490Sstevel@tonic-gate
36500Sstevel@tonic-gate tep->te_state = TS_IDLE;
36510Sstevel@tonic-gate /*
36520Sstevel@tonic-gate * send T_DISCON_IND message
36530Sstevel@tonic-gate */
36540Sstevel@tonic-gate putnext(acc_ep->te_rq, dimp);
36550Sstevel@tonic-gate if (client_noclose_set)
36560Sstevel@tonic-gate tl_closeok(cl_ep);
36570Sstevel@tonic-gate tl_closeok(acc_ep);
36580Sstevel@tonic-gate tl_refrele(acc_ep);
36590Sstevel@tonic-gate return;
36600Sstevel@tonic-gate }
36610Sstevel@tonic-gate
36620Sstevel@tonic-gate /*
36630Sstevel@tonic-gate * now start connecting the accepting endpoint
36640Sstevel@tonic-gate */
36650Sstevel@tonic-gate if (tep != acc_ep)
36660Sstevel@tonic-gate acc_ep->te_state = NEXTSTATE(TE_PASS_CONN, acc_ep->te_state);
36670Sstevel@tonic-gate
36680Sstevel@tonic-gate if (cl_ep == NULL) {
36690Sstevel@tonic-gate /*
36700Sstevel@tonic-gate * The client has already closed. Send up any queued messages
36710Sstevel@tonic-gate * and change the state accordingly.
36720Sstevel@tonic-gate */
36730Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim);
36740Sstevel@tonic-gate tl_icon_sendmsgs(acc_ep, &tip->ti_mp);
36750Sstevel@tonic-gate
36760Sstevel@tonic-gate /*
36770Sstevel@tonic-gate * remove endpoint from incoming connection
36780Sstevel@tonic-gate * delete client from list of incoming connections
36790Sstevel@tonic-gate */
36800Sstevel@tonic-gate tl_freetip(tep, tip);
36810Sstevel@tonic-gate freemsg(mp);
36820Sstevel@tonic-gate tl_closeok(acc_ep);
36830Sstevel@tonic-gate tl_refrele(acc_ep);
36840Sstevel@tonic-gate return;
36850Sstevel@tonic-gate } else if (tip->ti_mp != NULL) {
36860Sstevel@tonic-gate /*
36870Sstevel@tonic-gate * The client could have queued a T_DISCON_IND which needs
36880Sstevel@tonic-gate * to be sent up.
36890Sstevel@tonic-gate * Note that t_discon_req can not operate the same as
36900Sstevel@tonic-gate * t_data_req since it is not possible for it to putbq
36910Sstevel@tonic-gate * the message and return -1 due to the use of qwriter.
36920Sstevel@tonic-gate */
36930Sstevel@tonic-gate tl_icon_sendmsgs(acc_ep, &tip->ti_mp);
36940Sstevel@tonic-gate }
36950Sstevel@tonic-gate
36960Sstevel@tonic-gate /*
36970Sstevel@tonic-gate * prepare connect confirm T_CONN_CON message
36980Sstevel@tonic-gate */
36990Sstevel@tonic-gate
37000Sstevel@tonic-gate /*
37010Sstevel@tonic-gate * allocate the message - original data blocks
37020Sstevel@tonic-gate * retained in the returned mblk
37030Sstevel@tonic-gate */
37040Sstevel@tonic-gate if (! IS_SOCKET(cl_ep) || tl_disable_early_connect) {
37050Sstevel@tonic-gate ccmp = tl_resizemp(respmp, size);
37060Sstevel@tonic-gate if (ccmp == NULL) {
37070Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim);
37080Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
37095240Snordmark SL_TRACE|SL_ERROR,
37105240Snordmark "tl_conn_res:conn_con:allocb failure"));
37110Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM);
37120Sstevel@tonic-gate tl_closeok(acc_ep);
37130Sstevel@tonic-gate if (client_noclose_set)
37140Sstevel@tonic-gate tl_closeok(cl_ep);
37150Sstevel@tonic-gate tl_refrele(acc_ep);
37160Sstevel@tonic-gate return;
37170Sstevel@tonic-gate }
37180Sstevel@tonic-gate
37190Sstevel@tonic-gate DB_TYPE(ccmp) = M_PROTO;
37200Sstevel@tonic-gate cc = (struct T_conn_con *)ccmp->b_rptr;
37210Sstevel@tonic-gate cc->PRIM_type = T_CONN_CON;
37220Sstevel@tonic-gate cc->RES_offset = (t_scalar_t)sizeof (struct T_conn_con);
37230Sstevel@tonic-gate cc->RES_length = acc_ep->te_alen;
37240Sstevel@tonic-gate addr_startp = ccmp->b_rptr + cc->RES_offset;
37250Sstevel@tonic-gate bcopy(acc_ep->te_abuf, addr_startp, acc_ep->te_alen);
37260Sstevel@tonic-gate if (cl_ep->te_flag & (TL_SETCRED|TL_SETUCRED)) {
37270Sstevel@tonic-gate cc->OPT_offset = (t_scalar_t)T_ALIGN(cc->RES_offset +
37280Sstevel@tonic-gate cc->RES_length);
37290Sstevel@tonic-gate cc->OPT_length = olen;
37300Sstevel@tonic-gate tl_fill_option(ccmp->b_rptr + cc->OPT_offset,
37311676Sjpk acc_ep->te_credp, acc_ep->te_cpid, cl_ep->te_flag,
37321676Sjpk cl_ep->te_credp);
37330Sstevel@tonic-gate } else {
37340Sstevel@tonic-gate cc->OPT_offset = 0;
37350Sstevel@tonic-gate cc->OPT_length = 0;
37360Sstevel@tonic-gate }
37370Sstevel@tonic-gate /*
37380Sstevel@tonic-gate * Forward the credential in the packet so it can be picked up
37390Sstevel@tonic-gate * at the higher layers for more complete credential processing
37400Sstevel@tonic-gate */
37418778SErik.Nordmark@Sun.COM mblk_setcred(ccmp, acc_ep->te_credp, acc_ep->te_cpid);
37420Sstevel@tonic-gate } else {
37430Sstevel@tonic-gate freemsg(respmp);
37440Sstevel@tonic-gate respmp = NULL;
37450Sstevel@tonic-gate }
37460Sstevel@tonic-gate
37470Sstevel@tonic-gate /*
37480Sstevel@tonic-gate * make connection linking
37490Sstevel@tonic-gate * accepting and client endpoints
37500Sstevel@tonic-gate * No need to increment references:
37510Sstevel@tonic-gate * on client: it should already have one from tip->ti_tep linkage.
37520Sstevel@tonic-gate * on acceptor is should already have one from the table lookup.
37530Sstevel@tonic-gate *
37540Sstevel@tonic-gate * At this point both client and acceptor can't close. Set client
37550Sstevel@tonic-gate * serializer to acceptor's.
37560Sstevel@tonic-gate */
37570Sstevel@tonic-gate ASSERT(cl_ep->te_refcnt >= 2);
37580Sstevel@tonic-gate ASSERT(acc_ep->te_refcnt >= 2);
37590Sstevel@tonic-gate ASSERT(cl_ep->te_conp == NULL);
37600Sstevel@tonic-gate ASSERT(acc_ep->te_conp == NULL);
37610Sstevel@tonic-gate cl_ep->te_conp = acc_ep;
37620Sstevel@tonic-gate acc_ep->te_conp = cl_ep;
37630Sstevel@tonic-gate ASSERT(cl_ep->te_ser == tep->te_ser);
37640Sstevel@tonic-gate if (switch_client_serializer) {
37650Sstevel@tonic-gate mutex_enter(&cl_ep->te_ser_lock);
37660Sstevel@tonic-gate if (cl_ep->te_ser_count > 0) {
37670Sstevel@tonic-gate switch_client_serializer = B_FALSE;
37680Sstevel@tonic-gate tl_serializer_noswitch++;
37690Sstevel@tonic-gate } else {
37700Sstevel@tonic-gate /*
37710Sstevel@tonic-gate * Move client to the acceptor's serializer.
37720Sstevel@tonic-gate */
37730Sstevel@tonic-gate tl_serializer_refhold(acc_ep->te_ser);
37740Sstevel@tonic-gate tl_serializer_refrele(cl_ep->te_ser);
37750Sstevel@tonic-gate cl_ep->te_ser = acc_ep->te_ser;
37760Sstevel@tonic-gate }
37770Sstevel@tonic-gate mutex_exit(&cl_ep->te_ser_lock);
37780Sstevel@tonic-gate }
37790Sstevel@tonic-gate if (!switch_client_serializer) {
37800Sstevel@tonic-gate /*
37810Sstevel@tonic-gate * It is not possible to switch client to use acceptor's.
37820Sstevel@tonic-gate * Move acceptor to client's serializer (which is the same as
37830Sstevel@tonic-gate * listener's).
37840Sstevel@tonic-gate */
37850Sstevel@tonic-gate tl_serializer_refhold(cl_ep->te_ser);
37860Sstevel@tonic-gate tl_serializer_refrele(acc_ep->te_ser);
37870Sstevel@tonic-gate acc_ep->te_ser = cl_ep->te_ser;
37880Sstevel@tonic-gate }
37890Sstevel@tonic-gate
37900Sstevel@tonic-gate TL_REMOVE_PEER(cl_ep->te_oconp);
37910Sstevel@tonic-gate TL_REMOVE_PEER(acc_ep->te_oconp);
37920Sstevel@tonic-gate
37930Sstevel@tonic-gate /*
37940Sstevel@tonic-gate * remove endpoint from incoming connection
37950Sstevel@tonic-gate * delete client from list of incoming connections
37960Sstevel@tonic-gate */
37970Sstevel@tonic-gate tip->ti_tep = NULL;
37980Sstevel@tonic-gate tl_freetip(tep, tip);
37990Sstevel@tonic-gate tl_ok_ack(wq, ackmp, prim);
38000Sstevel@tonic-gate
38010Sstevel@tonic-gate /*
38020Sstevel@tonic-gate * data blocks already linked in reallocb()
38030Sstevel@tonic-gate */
38040Sstevel@tonic-gate
38050Sstevel@tonic-gate /*
38060Sstevel@tonic-gate * link queues so that I_SENDFD will work
38070Sstevel@tonic-gate */
38080Sstevel@tonic-gate if (! IS_SOCKET(tep)) {
38090Sstevel@tonic-gate acc_ep->te_wq->q_next = cl_ep->te_rq;
38100Sstevel@tonic-gate cl_ep->te_wq->q_next = acc_ep->te_rq;
38110Sstevel@tonic-gate }
38120Sstevel@tonic-gate
38130Sstevel@tonic-gate /*
38140Sstevel@tonic-gate * send T_CONN_CON up on client side unless it was already
38150Sstevel@tonic-gate * done (for a socket). In cases any data or ordrel req has been
38160Sstevel@tonic-gate * queued make sure that the service procedure runs.
38170Sstevel@tonic-gate */
38180Sstevel@tonic-gate if (IS_SOCKET(cl_ep) && !tl_disable_early_connect) {
38190Sstevel@tonic-gate enableok(cl_ep->te_wq);
38200Sstevel@tonic-gate TL_QENABLE(cl_ep);
38210Sstevel@tonic-gate if (ccmp != NULL)
38220Sstevel@tonic-gate freemsg(ccmp);
38230Sstevel@tonic-gate } else {
38240Sstevel@tonic-gate /*
38250Sstevel@tonic-gate * change client state on TE_CONN_CON event
38260Sstevel@tonic-gate */
38270Sstevel@tonic-gate cl_ep->te_state = NEXTSTATE(TE_CONN_CON, cl_ep->te_state);
38280Sstevel@tonic-gate putnext(cl_ep->te_rq, ccmp);
38290Sstevel@tonic-gate }
38300Sstevel@tonic-gate
38310Sstevel@tonic-gate /* Mark the both endpoints as accepted */
38320Sstevel@tonic-gate cl_ep->te_flag |= TL_ACCEPTED;
38330Sstevel@tonic-gate acc_ep->te_flag |= TL_ACCEPTED;
38340Sstevel@tonic-gate
38350Sstevel@tonic-gate /*
38360Sstevel@tonic-gate * Allow client and acceptor to close.
38370Sstevel@tonic-gate */
38380Sstevel@tonic-gate tl_closeok(acc_ep);
38390Sstevel@tonic-gate if (client_noclose_set)
38400Sstevel@tonic-gate tl_closeok(cl_ep);
38410Sstevel@tonic-gate }
38420Sstevel@tonic-gate
38430Sstevel@tonic-gate
38440Sstevel@tonic-gate
38450Sstevel@tonic-gate
38460Sstevel@tonic-gate static void
tl_discon_req(mblk_t * mp,tl_endpt_t * tep)38470Sstevel@tonic-gate tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
38480Sstevel@tonic-gate {
38490Sstevel@tonic-gate queue_t *wq;
38500Sstevel@tonic-gate struct T_discon_req *dr;
38510Sstevel@tonic-gate ssize_t msz;
38520Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp;
38530Sstevel@tonic-gate tl_endpt_t *srv_tep = tep->te_oconp;
38540Sstevel@tonic-gate tl_icon_t *tip;
38550Sstevel@tonic-gate size_t size;
38560Sstevel@tonic-gate mblk_t *ackmp, *dimp, *respmp;
38570Sstevel@tonic-gate struct T_discon_ind *di;
38580Sstevel@tonic-gate t_scalar_t save_state, new_state;
38590Sstevel@tonic-gate
38600Sstevel@tonic-gate if (tep->te_closing) {
38610Sstevel@tonic-gate freemsg(mp);
38620Sstevel@tonic-gate return;
38630Sstevel@tonic-gate }
38640Sstevel@tonic-gate
38650Sstevel@tonic-gate if ((peer_tep != NULL) && peer_tep->te_closing) {
38660Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp);
38670Sstevel@tonic-gate peer_tep = NULL;
38680Sstevel@tonic-gate }
38690Sstevel@tonic-gate if ((srv_tep != NULL) && srv_tep->te_closing) {
38700Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
38710Sstevel@tonic-gate srv_tep = NULL;
38720Sstevel@tonic-gate }
38730Sstevel@tonic-gate
38740Sstevel@tonic-gate wq = tep->te_wq;
38750Sstevel@tonic-gate
38760Sstevel@tonic-gate /*
38770Sstevel@tonic-gate * preallocate memory for:
38780Sstevel@tonic-gate * 1. max of T_ERROR_ACK and T_OK_ACK
38790Sstevel@tonic-gate * ==> known max T_ERROR_ACK
38800Sstevel@tonic-gate * 2. for T_DISCON_IND
38810Sstevel@tonic-gate */
38820Sstevel@tonic-gate ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
38830Sstevel@tonic-gate if (! ackmp) {
38840Sstevel@tonic-gate tl_memrecover(wq, mp, sizeof (struct T_error_ack));
38850Sstevel@tonic-gate return;
38860Sstevel@tonic-gate }
38870Sstevel@tonic-gate /*
38880Sstevel@tonic-gate * memory committed for T_OK_ACK/T_ERROR_ACK now
38890Sstevel@tonic-gate * will be committed for T_DISCON_IND later
38900Sstevel@tonic-gate */
38910Sstevel@tonic-gate
38920Sstevel@tonic-gate dr = (struct T_discon_req *)mp->b_rptr;
38930Sstevel@tonic-gate msz = MBLKL(mp);
38940Sstevel@tonic-gate
38950Sstevel@tonic-gate /*
38960Sstevel@tonic-gate * validate the state
38970Sstevel@tonic-gate */
38980Sstevel@tonic-gate save_state = new_state = tep->te_state;
38990Sstevel@tonic-gate if (! (save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) &&
39000Sstevel@tonic-gate ! (save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) {
39010Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
39025240Snordmark SL_TRACE|SL_ERROR,
39035240Snordmark "tl_wput:T_DISCON_REQ:out of state, state=%d",
39045240Snordmark tep->te_state));
39050Sstevel@tonic-gate tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_DISCON_REQ);
39060Sstevel@tonic-gate freemsg(mp);
39070Sstevel@tonic-gate return;
39080Sstevel@tonic-gate }
39090Sstevel@tonic-gate /*
39100Sstevel@tonic-gate * Defer committing the state change until it is determined if
39110Sstevel@tonic-gate * the message will be queued with the tl_icon or not.
39120Sstevel@tonic-gate */
39130Sstevel@tonic-gate new_state = NEXTSTATE(TE_DISCON_REQ, tep->te_state);
39140Sstevel@tonic-gate
39150Sstevel@tonic-gate /* validate the message */
39160Sstevel@tonic-gate if (msz < sizeof (struct T_discon_req)) {
39170Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
39185240Snordmark "tl_discon_req:invalid message"));
39190Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
39200Sstevel@tonic-gate tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_DISCON_REQ);
39210Sstevel@tonic-gate freemsg(mp);
39220Sstevel@tonic-gate return;
39230Sstevel@tonic-gate }
39240Sstevel@tonic-gate
39250Sstevel@tonic-gate /*
39260Sstevel@tonic-gate * if server, then validate that client exists
39270Sstevel@tonic-gate * by connection sequence number etc.
39280Sstevel@tonic-gate */
39290Sstevel@tonic-gate if (tep->te_nicon > 0) { /* server */
39300Sstevel@tonic-gate
39310Sstevel@tonic-gate /*
39320Sstevel@tonic-gate * search server list for disconnect client
39330Sstevel@tonic-gate */
39340Sstevel@tonic-gate tip = tl_icon_find(tep, dr->SEQ_number);
39350Sstevel@tonic-gate if (tip == NULL) {
39360Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2,
39375240Snordmark SL_TRACE|SL_ERROR,
39385240Snordmark "tl_discon_req:no disconnect endpoint"));
39390Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
39400Sstevel@tonic-gate tl_error_ack(wq, ackmp, TBADSEQ, 0, T_DISCON_REQ);
39410Sstevel@tonic-gate freemsg(mp);
39420Sstevel@tonic-gate return;
39430Sstevel@tonic-gate }
39440Sstevel@tonic-gate /*
39450Sstevel@tonic-gate * If ti_tep is NULL the client has already closed. In this case
39460Sstevel@tonic-gate * the code below will avoid any action on the client side.
39470Sstevel@tonic-gate */
39480Sstevel@tonic-gate
3949*11474SJonathan.Adams@Sun.COM IMPLY(tip->ti_tep != NULL,
3950*11474SJonathan.Adams@Sun.COM tip->ti_tep->te_seqno == dr->SEQ_number);
39510Sstevel@tonic-gate peer_tep = tip->ti_tep;
39520Sstevel@tonic-gate }
39530Sstevel@tonic-gate
39540Sstevel@tonic-gate /*
39550Sstevel@tonic-gate * preallocate now for T_DISCON_IND
39560Sstevel@tonic-gate * ack validity of request (T_OK_ACK) after memory committed
39570Sstevel@tonic-gate */
39580Sstevel@tonic-gate size = sizeof (struct T_discon_ind);
39590Sstevel@tonic-gate if ((respmp = reallocb(mp, size, 0)) == NULL) {
39600Sstevel@tonic-gate tl_memrecover(wq, mp, size);
39610Sstevel@tonic-gate freemsg(ackmp);
39620Sstevel@tonic-gate return;
39630Sstevel@tonic-gate }
39640Sstevel@tonic-gate
39650Sstevel@tonic-gate /*
39660Sstevel@tonic-gate * prepare message to ack validity of request
39670Sstevel@tonic-gate */
39680Sstevel@tonic-gate if (tep->te_nicon == 0)
39690Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK1, new_state);
39700Sstevel@tonic-gate else
39710Sstevel@tonic-gate if (tep->te_nicon == 1)
39720Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK2, new_state);
39730Sstevel@tonic-gate else
39740Sstevel@tonic-gate new_state = NEXTSTATE(TE_OK_ACK4, new_state);
39750Sstevel@tonic-gate
39760Sstevel@tonic-gate /*
39770Sstevel@tonic-gate * Flushing queues according to TPI. Using the old state.
39780Sstevel@tonic-gate */
39790Sstevel@tonic-gate if ((tep->te_nicon <= 1) &&
39800Sstevel@tonic-gate ((save_state == TS_DATA_XFER) ||
39810Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) ||
39820Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL)))
39830Sstevel@tonic-gate (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
39840Sstevel@tonic-gate
39850Sstevel@tonic-gate /* send T_OK_ACK up */
39860Sstevel@tonic-gate tl_ok_ack(wq, ackmp, T_DISCON_REQ);
39870Sstevel@tonic-gate
39880Sstevel@tonic-gate /*
39890Sstevel@tonic-gate * now do disconnect business
39900Sstevel@tonic-gate */
39910Sstevel@tonic-gate if (tep->te_nicon > 0) { /* listener */
39920Sstevel@tonic-gate if (peer_tep != NULL && !peer_tep->te_closing) {
39930Sstevel@tonic-gate /*
39940Sstevel@tonic-gate * disconnect incoming connect request pending to tep
39950Sstevel@tonic-gate */
39960Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) {
39970Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2,
39985240Snordmark SL_TRACE|SL_ERROR,
39995240Snordmark "tl_discon_req: reallocb failed"));
40000Sstevel@tonic-gate tep->te_state = new_state;
40010Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM);
40020Sstevel@tonic-gate return;
40030Sstevel@tonic-gate }
40040Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr;
40050Sstevel@tonic-gate di->SEQ_number = BADSEQNUM;
40060Sstevel@tonic-gate save_state = peer_tep->te_state;
40070Sstevel@tonic-gate peer_tep->te_state = TS_IDLE;
40080Sstevel@tonic-gate
40090Sstevel@tonic-gate TL_REMOVE_PEER(peer_tep->te_oconp);
40100Sstevel@tonic-gate enableok(peer_tep->te_wq);
40110Sstevel@tonic-gate TL_QENABLE(peer_tep);
40120Sstevel@tonic-gate } else {
40130Sstevel@tonic-gate freemsg(respmp);
40140Sstevel@tonic-gate dimp = NULL;
40150Sstevel@tonic-gate }
40160Sstevel@tonic-gate
40170Sstevel@tonic-gate /*
40180Sstevel@tonic-gate * remove endpoint from incoming connection list
40190Sstevel@tonic-gate * - remove disconnect client from list on server
40200Sstevel@tonic-gate */
40210Sstevel@tonic-gate tl_freetip(tep, tip);
40220Sstevel@tonic-gate } else if ((peer_tep = tep->te_oconp) != NULL) { /* client */
40230Sstevel@tonic-gate /*
40240Sstevel@tonic-gate * disconnect an outgoing request pending from tep
40250Sstevel@tonic-gate */
40260Sstevel@tonic-gate
40270Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) {
40280Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2,
40295240Snordmark SL_TRACE|SL_ERROR,
40305240Snordmark "tl_discon_req: reallocb failed"));
40310Sstevel@tonic-gate tep->te_state = new_state;
40320Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM);
40330Sstevel@tonic-gate return;
40340Sstevel@tonic-gate }
40350Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr;
40360Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO;
40370Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND;
40380Sstevel@tonic-gate di->DISCON_reason = ECONNRESET;
40390Sstevel@tonic-gate di->SEQ_number = tep->te_seqno;
40400Sstevel@tonic-gate
40410Sstevel@tonic-gate /*
40420Sstevel@tonic-gate * If this is a socket the T_DISCON_IND is queued with
40430Sstevel@tonic-gate * the T_CONN_IND. Otherwise the T_CONN_IND is removed
40440Sstevel@tonic-gate * from the list of pending connections.
40450Sstevel@tonic-gate * Note that when te_oconp is set the peer better have
40460Sstevel@tonic-gate * a t_connind_t for the client.
40470Sstevel@tonic-gate */
40480Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) {
40490Sstevel@tonic-gate /*
40500Sstevel@tonic-gate * No need to check that
40510Sstevel@tonic-gate * ti_tep == NULL since the T_DISCON_IND
40520Sstevel@tonic-gate * takes precedence over other queued
40530Sstevel@tonic-gate * messages.
40540Sstevel@tonic-gate */
40550Sstevel@tonic-gate tl_icon_queuemsg(peer_tep, tep->te_seqno, dimp);
40560Sstevel@tonic-gate peer_tep = NULL;
40570Sstevel@tonic-gate dimp = NULL;
40580Sstevel@tonic-gate /*
40590Sstevel@tonic-gate * Can't clear te_oconp since tl_co_unconnect needs
40600Sstevel@tonic-gate * it as a hint not to free the tep.
40610Sstevel@tonic-gate * Keep the state unchanged since tl_conn_res inspects
40620Sstevel@tonic-gate * it.
40630Sstevel@tonic-gate */
40640Sstevel@tonic-gate new_state = tep->te_state;
40650Sstevel@tonic-gate } else {
40660Sstevel@tonic-gate /* Found - delete it */
40670Sstevel@tonic-gate tip = tl_icon_find(peer_tep, tep->te_seqno);
40680Sstevel@tonic-gate if (tip != NULL) {
40690Sstevel@tonic-gate ASSERT(tep == tip->ti_tep);
40700Sstevel@tonic-gate save_state = peer_tep->te_state;
40710Sstevel@tonic-gate if (peer_tep->te_nicon == 1)
40720Sstevel@tonic-gate peer_tep->te_state =
40730Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND2,
40745240Snordmark peer_tep->te_state);
40750Sstevel@tonic-gate else
40760Sstevel@tonic-gate peer_tep->te_state =
40770Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND3,
40785240Snordmark peer_tep->te_state);
40790Sstevel@tonic-gate tl_freetip(peer_tep, tip);
40800Sstevel@tonic-gate }
40810Sstevel@tonic-gate ASSERT(tep->te_oconp != NULL);
40820Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
40830Sstevel@tonic-gate }
40840Sstevel@tonic-gate } else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */
40850Sstevel@tonic-gate if ((dimp = tl_resizemp(respmp, size)) == NULL) {
40860Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2,
40875240Snordmark SL_TRACE|SL_ERROR,
40885240Snordmark "tl_discon_req: reallocb failed"));
40890Sstevel@tonic-gate tep->te_state = new_state;
40900Sstevel@tonic-gate tl_merror(wq, respmp, ENOMEM);
40910Sstevel@tonic-gate return;
40920Sstevel@tonic-gate }
40930Sstevel@tonic-gate di = (struct T_discon_ind *)dimp->b_rptr;
40940Sstevel@tonic-gate di->SEQ_number = BADSEQNUM;
40950Sstevel@tonic-gate
40960Sstevel@tonic-gate save_state = peer_tep->te_state;
40970Sstevel@tonic-gate peer_tep->te_state = TS_IDLE;
40980Sstevel@tonic-gate } else {
40990Sstevel@tonic-gate /* Not connected */
41000Sstevel@tonic-gate tep->te_state = new_state;
41010Sstevel@tonic-gate freemsg(respmp);
41020Sstevel@tonic-gate return;
41030Sstevel@tonic-gate }
41040Sstevel@tonic-gate
41050Sstevel@tonic-gate /* Commit state changes */
41060Sstevel@tonic-gate tep->te_state = new_state;
41070Sstevel@tonic-gate
41080Sstevel@tonic-gate if (peer_tep == NULL) {
41090Sstevel@tonic-gate ASSERT(dimp == NULL);
41100Sstevel@tonic-gate goto done;
41110Sstevel@tonic-gate }
41120Sstevel@tonic-gate /*
41130Sstevel@tonic-gate * Flush queues on peer before sending up
41140Sstevel@tonic-gate * T_DISCON_IND according to TPI
41150Sstevel@tonic-gate */
41160Sstevel@tonic-gate
41170Sstevel@tonic-gate if ((save_state == TS_DATA_XFER) ||
41180Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) ||
41190Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL))
41200Sstevel@tonic-gate (void) putnextctl1(peer_tep->te_rq, M_FLUSH, FLUSHRW);
41210Sstevel@tonic-gate
41220Sstevel@tonic-gate DB_TYPE(dimp) = M_PROTO;
41230Sstevel@tonic-gate di->PRIM_type = T_DISCON_IND;
41240Sstevel@tonic-gate di->DISCON_reason = ECONNRESET;
41250Sstevel@tonic-gate
41260Sstevel@tonic-gate /*
41270Sstevel@tonic-gate * data blocks already linked into dimp by reallocb()
41280Sstevel@tonic-gate */
41290Sstevel@tonic-gate /*
41300Sstevel@tonic-gate * send indication message to peer user module
41310Sstevel@tonic-gate */
41320Sstevel@tonic-gate ASSERT(dimp != NULL);
41330Sstevel@tonic-gate putnext(peer_tep->te_rq, dimp);
41340Sstevel@tonic-gate done:
41350Sstevel@tonic-gate if (tep->te_conp) { /* disconnect pointers if connected */
41360Sstevel@tonic-gate ASSERT(! peer_tep->te_closing);
41370Sstevel@tonic-gate
41380Sstevel@tonic-gate /*
41390Sstevel@tonic-gate * Messages may be queued on peer's write queue
41400Sstevel@tonic-gate * waiting to be processed by its write service
41410Sstevel@tonic-gate * procedure. Before the pointer to the peer transport
41420Sstevel@tonic-gate * structure is set to NULL, qenable the peer's write
41430Sstevel@tonic-gate * queue so that the queued up messages are processed.
41440Sstevel@tonic-gate */
41450Sstevel@tonic-gate if ((save_state == TS_DATA_XFER) ||
41460Sstevel@tonic-gate (save_state == TS_WIND_ORDREL) ||
41470Sstevel@tonic-gate (save_state == TS_WREQ_ORDREL))
41480Sstevel@tonic-gate TL_QENABLE(peer_tep);
41490Sstevel@tonic-gate ASSERT(peer_tep != NULL && peer_tep->te_conp != NULL);
41500Sstevel@tonic-gate TL_UNCONNECT(peer_tep->te_conp);
41510Sstevel@tonic-gate if (! IS_SOCKET(tep)) {
41520Sstevel@tonic-gate /*
41530Sstevel@tonic-gate * unlink the streams
41540Sstevel@tonic-gate */
41550Sstevel@tonic-gate tep->te_wq->q_next = NULL;
41560Sstevel@tonic-gate peer_tep->te_wq->q_next = NULL;
41570Sstevel@tonic-gate }
41580Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp);
41590Sstevel@tonic-gate }
41600Sstevel@tonic-gate }
41610Sstevel@tonic-gate
41620Sstevel@tonic-gate
41630Sstevel@tonic-gate static void
tl_addr_req(mblk_t * mp,tl_endpt_t * tep)41640Sstevel@tonic-gate tl_addr_req(mblk_t *mp, tl_endpt_t *tep)
41650Sstevel@tonic-gate {
41660Sstevel@tonic-gate queue_t *wq;
41670Sstevel@tonic-gate size_t ack_sz;
41680Sstevel@tonic-gate mblk_t *ackmp;
41690Sstevel@tonic-gate struct T_addr_ack *taa;
41700Sstevel@tonic-gate
41710Sstevel@tonic-gate if (tep->te_closing) {
41720Sstevel@tonic-gate freemsg(mp);
41730Sstevel@tonic-gate return;
41740Sstevel@tonic-gate }
41750Sstevel@tonic-gate
41760Sstevel@tonic-gate wq = tep->te_wq;
41770Sstevel@tonic-gate
41780Sstevel@tonic-gate /*
41790Sstevel@tonic-gate * Note: T_ADDR_REQ message has only PRIM_type field
41800Sstevel@tonic-gate * so it is already validated earlier.
41810Sstevel@tonic-gate */
41820Sstevel@tonic-gate
41830Sstevel@tonic-gate if (IS_CLTS(tep) ||
41840Sstevel@tonic-gate (tep->te_state > TS_WREQ_ORDREL) ||
41850Sstevel@tonic-gate (tep->te_state < TS_DATA_XFER)) {
41860Sstevel@tonic-gate /*
41870Sstevel@tonic-gate * Either connectionless or connection oriented but not
41880Sstevel@tonic-gate * in connected data transfer state or half-closed states.
41890Sstevel@tonic-gate */
41900Sstevel@tonic-gate ack_sz = sizeof (struct T_addr_ack);
41910Sstevel@tonic-gate if (tep->te_state >= TS_IDLE)
41920Sstevel@tonic-gate /* is bound */
41930Sstevel@tonic-gate ack_sz += tep->te_alen;
41940Sstevel@tonic-gate ackmp = reallocb(mp, ack_sz, 0);
41950Sstevel@tonic-gate if (ackmp == NULL) {
41960Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
41975240Snordmark SL_TRACE|SL_ERROR,
41985240Snordmark "tl_addr_req: reallocb failed"));
41990Sstevel@tonic-gate tl_memrecover(wq, mp, ack_sz);
42000Sstevel@tonic-gate return;
42010Sstevel@tonic-gate }
42020Sstevel@tonic-gate
42030Sstevel@tonic-gate taa = (struct T_addr_ack *)ackmp->b_rptr;
42040Sstevel@tonic-gate
42050Sstevel@tonic-gate bzero(taa, sizeof (struct T_addr_ack));
42060Sstevel@tonic-gate
42070Sstevel@tonic-gate taa->PRIM_type = T_ADDR_ACK;
42080Sstevel@tonic-gate ackmp->b_datap->db_type = M_PCPROTO;
42090Sstevel@tonic-gate ackmp->b_wptr = (uchar_t *)&taa[1];
42100Sstevel@tonic-gate
42110Sstevel@tonic-gate if (tep->te_state >= TS_IDLE) {
42120Sstevel@tonic-gate /* endpoint is bound */
42130Sstevel@tonic-gate taa->LOCADDR_length = tep->te_alen;
42140Sstevel@tonic-gate taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa);
42150Sstevel@tonic-gate
42160Sstevel@tonic-gate bcopy(tep->te_abuf, ackmp->b_wptr,
42175240Snordmark tep->te_alen);
42180Sstevel@tonic-gate ackmp->b_wptr += tep->te_alen;
42190Sstevel@tonic-gate ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
42200Sstevel@tonic-gate }
42210Sstevel@tonic-gate
42220Sstevel@tonic-gate (void) qreply(wq, ackmp);
42230Sstevel@tonic-gate } else {
42240Sstevel@tonic-gate ASSERT(tep->te_state == TS_DATA_XFER ||
42255240Snordmark tep->te_state == TS_WIND_ORDREL ||
42265240Snordmark tep->te_state == TS_WREQ_ORDREL);
42270Sstevel@tonic-gate /* connection oriented in data transfer */
42280Sstevel@tonic-gate tl_connected_cots_addr_req(mp, tep);
42290Sstevel@tonic-gate }
42300Sstevel@tonic-gate }
42310Sstevel@tonic-gate
42320Sstevel@tonic-gate
42330Sstevel@tonic-gate static void
tl_connected_cots_addr_req(mblk_t * mp,tl_endpt_t * tep)42340Sstevel@tonic-gate tl_connected_cots_addr_req(mblk_t *mp, tl_endpt_t *tep)
42350Sstevel@tonic-gate {
42360Sstevel@tonic-gate tl_endpt_t *peer_tep;
42370Sstevel@tonic-gate size_t ack_sz;
42380Sstevel@tonic-gate mblk_t *ackmp;
42390Sstevel@tonic-gate struct T_addr_ack *taa;
42400Sstevel@tonic-gate uchar_t *addr_startp;
42410Sstevel@tonic-gate
42420Sstevel@tonic-gate if (tep->te_closing) {
42430Sstevel@tonic-gate freemsg(mp);
42440Sstevel@tonic-gate return;
42450Sstevel@tonic-gate }
42460Sstevel@tonic-gate
42470Sstevel@tonic-gate ASSERT(tep->te_state >= TS_IDLE);
42480Sstevel@tonic-gate
42490Sstevel@tonic-gate ack_sz = sizeof (struct T_addr_ack);
42500Sstevel@tonic-gate ack_sz += T_ALIGN(tep->te_alen);
42510Sstevel@tonic-gate peer_tep = tep->te_conp;
42520Sstevel@tonic-gate ack_sz += peer_tep->te_alen;
42530Sstevel@tonic-gate
42540Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, ack_sz, M_PCPROTO, T_ADDR_ACK);
42550Sstevel@tonic-gate if (ackmp == NULL) {
42560Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
42575240Snordmark "tl_connected_cots_addr_req: reallocb failed"));
42580Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp, ack_sz);
42590Sstevel@tonic-gate return;
42600Sstevel@tonic-gate }
42610Sstevel@tonic-gate
42620Sstevel@tonic-gate taa = (struct T_addr_ack *)ackmp->b_rptr;
42630Sstevel@tonic-gate
42640Sstevel@tonic-gate /* endpoint is bound */
42650Sstevel@tonic-gate taa->LOCADDR_length = tep->te_alen;
42660Sstevel@tonic-gate taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa);
42670Sstevel@tonic-gate
42680Sstevel@tonic-gate addr_startp = (uchar_t *)&taa[1];
42690Sstevel@tonic-gate
42700Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp,
42710Sstevel@tonic-gate tep->te_alen);
42720Sstevel@tonic-gate
42730Sstevel@tonic-gate taa->REMADDR_length = peer_tep->te_alen;
42740Sstevel@tonic-gate taa->REMADDR_offset = (t_scalar_t)T_ALIGN(taa->LOCADDR_offset +
42755240Snordmark taa->LOCADDR_length);
42760Sstevel@tonic-gate addr_startp = ackmp->b_rptr + taa->REMADDR_offset;
42770Sstevel@tonic-gate bcopy(peer_tep->te_abuf, addr_startp,
42780Sstevel@tonic-gate peer_tep->te_alen);
42790Sstevel@tonic-gate ackmp->b_wptr = (uchar_t *)ackmp->b_rptr +
42800Sstevel@tonic-gate taa->REMADDR_offset + peer_tep->te_alen;
42810Sstevel@tonic-gate ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
42820Sstevel@tonic-gate
42830Sstevel@tonic-gate putnext(tep->te_rq, ackmp);
42840Sstevel@tonic-gate }
42850Sstevel@tonic-gate
42860Sstevel@tonic-gate static void
tl_copy_info(struct T_info_ack * ia,tl_endpt_t * tep)42870Sstevel@tonic-gate tl_copy_info(struct T_info_ack *ia, tl_endpt_t *tep)
42880Sstevel@tonic-gate {
42890Sstevel@tonic-gate if (IS_CLTS(tep)) {
42900Sstevel@tonic-gate *ia = tl_clts_info_ack;
42910Sstevel@tonic-gate ia->TSDU_size = tl_tidusz; /* TSDU and TIDU size are same */
42920Sstevel@tonic-gate } else {
42930Sstevel@tonic-gate *ia = tl_cots_info_ack;
42940Sstevel@tonic-gate if (IS_COTSORD(tep))
42950Sstevel@tonic-gate ia->SERV_type = T_COTS_ORD;
42960Sstevel@tonic-gate }
42970Sstevel@tonic-gate ia->TIDU_size = tl_tidusz;
42980Sstevel@tonic-gate ia->CURRENT_state = tep->te_state;
42990Sstevel@tonic-gate }
43000Sstevel@tonic-gate
43010Sstevel@tonic-gate /*
43020Sstevel@tonic-gate * This routine responds to T_CAPABILITY_REQ messages. It is called by
43030Sstevel@tonic-gate * tl_wput.
43040Sstevel@tonic-gate */
43050Sstevel@tonic-gate static void
tl_capability_req(mblk_t * mp,tl_endpt_t * tep)43060Sstevel@tonic-gate tl_capability_req(mblk_t *mp, tl_endpt_t *tep)
43070Sstevel@tonic-gate {
43080Sstevel@tonic-gate mblk_t *ackmp;
43090Sstevel@tonic-gate t_uscalar_t cap_bits1;
43100Sstevel@tonic-gate struct T_capability_ack *tcap;
43110Sstevel@tonic-gate
43120Sstevel@tonic-gate if (tep->te_closing) {
43130Sstevel@tonic-gate freemsg(mp);
43140Sstevel@tonic-gate return;
43150Sstevel@tonic-gate }
43160Sstevel@tonic-gate
43170Sstevel@tonic-gate cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
43180Sstevel@tonic-gate
43190Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
43200Sstevel@tonic-gate M_PCPROTO, T_CAPABILITY_ACK);
43210Sstevel@tonic-gate if (ackmp == NULL) {
43220Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
43235240Snordmark "tl_capability_req: reallocb failed"));
43240Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp,
43250Sstevel@tonic-gate sizeof (struct T_capability_ack));
43260Sstevel@tonic-gate return;
43270Sstevel@tonic-gate }
43280Sstevel@tonic-gate
43290Sstevel@tonic-gate tcap = (struct T_capability_ack *)ackmp->b_rptr;
43300Sstevel@tonic-gate tcap->CAP_bits1 = 0;
43310Sstevel@tonic-gate
43320Sstevel@tonic-gate if (cap_bits1 & TC1_INFO) {
43330Sstevel@tonic-gate tl_copy_info(&tcap->INFO_ack, tep);
43340Sstevel@tonic-gate tcap->CAP_bits1 |= TC1_INFO;
43350Sstevel@tonic-gate }
43360Sstevel@tonic-gate
43370Sstevel@tonic-gate if (cap_bits1 & TC1_ACCEPTOR_ID) {
43380Sstevel@tonic-gate tcap->ACCEPTOR_id = tep->te_acceptor_id;
43390Sstevel@tonic-gate tcap->CAP_bits1 |= TC1_ACCEPTOR_ID;
43400Sstevel@tonic-gate }
43410Sstevel@tonic-gate
43420Sstevel@tonic-gate putnext(tep->te_rq, ackmp);
43430Sstevel@tonic-gate }
43440Sstevel@tonic-gate
43450Sstevel@tonic-gate static void
tl_info_req_ser(mblk_t * mp,tl_endpt_t * tep)43460Sstevel@tonic-gate tl_info_req_ser(mblk_t *mp, tl_endpt_t *tep)
43470Sstevel@tonic-gate {
43480Sstevel@tonic-gate if (! tep->te_closing)
43490Sstevel@tonic-gate tl_info_req(mp, tep);
43500Sstevel@tonic-gate else
43510Sstevel@tonic-gate freemsg(mp);
43520Sstevel@tonic-gate
43530Sstevel@tonic-gate tl_serializer_exit(tep);
43540Sstevel@tonic-gate tl_refrele(tep);
43550Sstevel@tonic-gate }
43560Sstevel@tonic-gate
43570Sstevel@tonic-gate static void
tl_info_req(mblk_t * mp,tl_endpt_t * tep)43580Sstevel@tonic-gate tl_info_req(mblk_t *mp, tl_endpt_t *tep)
43590Sstevel@tonic-gate {
43600Sstevel@tonic-gate mblk_t *ackmp;
43610Sstevel@tonic-gate
43620Sstevel@tonic-gate ackmp = tpi_ack_alloc(mp, sizeof (struct T_info_ack),
43630Sstevel@tonic-gate M_PCPROTO, T_INFO_ACK);
43640Sstevel@tonic-gate if (ackmp == NULL) {
43650Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
43665240Snordmark "tl_info_req: reallocb failed"));
43670Sstevel@tonic-gate tl_memrecover(tep->te_wq, mp, sizeof (struct T_info_ack));
43680Sstevel@tonic-gate return;
43690Sstevel@tonic-gate }
43700Sstevel@tonic-gate
43710Sstevel@tonic-gate /*
43720Sstevel@tonic-gate * fill in T_INFO_ACK contents
43730Sstevel@tonic-gate */
43740Sstevel@tonic-gate tl_copy_info((struct T_info_ack *)ackmp->b_rptr, tep);
43750Sstevel@tonic-gate
43760Sstevel@tonic-gate /*
43770Sstevel@tonic-gate * send ack message
43780Sstevel@tonic-gate */
43790Sstevel@tonic-gate putnext(tep->te_rq, ackmp);
43800Sstevel@tonic-gate }
43810Sstevel@tonic-gate
43820Sstevel@tonic-gate /*
43830Sstevel@tonic-gate * Handle M_DATA, T_data_req and T_optdata_req.
43840Sstevel@tonic-gate * If this is a socket pass through T_optdata_req options unmodified.
43850Sstevel@tonic-gate */
43860Sstevel@tonic-gate static void
tl_data(mblk_t * mp,tl_endpt_t * tep)43870Sstevel@tonic-gate tl_data(mblk_t *mp, tl_endpt_t *tep)
43880Sstevel@tonic-gate {
43890Sstevel@tonic-gate queue_t *wq = tep->te_wq;
43900Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr;
43910Sstevel@tonic-gate ssize_t msz = MBLKL(mp);
43920Sstevel@tonic-gate tl_endpt_t *peer_tep;
43930Sstevel@tonic-gate queue_t *peer_rq;
43940Sstevel@tonic-gate boolean_t closing = tep->te_closing;
43950Sstevel@tonic-gate
43960Sstevel@tonic-gate if (IS_CLTS(tep)) {
43970Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 2,
43985240Snordmark SL_TRACE|SL_ERROR,
43995240Snordmark "tl_wput:clts:unattached M_DATA"));
44000Sstevel@tonic-gate if (!closing) {
44010Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
44020Sstevel@tonic-gate } else {
44030Sstevel@tonic-gate freemsg(mp);
44040Sstevel@tonic-gate }
44050Sstevel@tonic-gate return;
44060Sstevel@tonic-gate }
44070Sstevel@tonic-gate
44080Sstevel@tonic-gate /*
44090Sstevel@tonic-gate * If the endpoint is closing it should still forward any data to the
44100Sstevel@tonic-gate * peer (if it has one). If it is not allowed to forward it can just
44110Sstevel@tonic-gate * free the message.
44120Sstevel@tonic-gate */
44130Sstevel@tonic-gate if (closing &&
44140Sstevel@tonic-gate (tep->te_state != TS_DATA_XFER) &&
44150Sstevel@tonic-gate (tep->te_state != TS_WREQ_ORDREL)) {
44160Sstevel@tonic-gate freemsg(mp);
44170Sstevel@tonic-gate return;
44180Sstevel@tonic-gate }
44190Sstevel@tonic-gate
44200Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) {
44210Sstevel@tonic-gate if (prim->type == T_DATA_REQ &&
44220Sstevel@tonic-gate msz < sizeof (struct T_data_req)) {
44230Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
44240Sstevel@tonic-gate SL_TRACE|SL_ERROR,
44250Sstevel@tonic-gate "tl_data:T_DATA_REQ:invalid message"));
44260Sstevel@tonic-gate if (!closing) {
44270Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
44280Sstevel@tonic-gate } else {
44290Sstevel@tonic-gate freemsg(mp);
44300Sstevel@tonic-gate }
44310Sstevel@tonic-gate return;
44320Sstevel@tonic-gate } else if (prim->type == T_OPTDATA_REQ &&
44337656SSherry.Moore@Sun.COM (msz < sizeof (struct T_optdata_req) || !IS_SOCKET(tep))) {
44340Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
44355240Snordmark SL_TRACE|SL_ERROR,
44365240Snordmark "tl_data:T_OPTDATA_REQ:invalid message"));
44370Sstevel@tonic-gate if (!closing) {
44380Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
44390Sstevel@tonic-gate } else {
44400Sstevel@tonic-gate freemsg(mp);
44410Sstevel@tonic-gate }
44420Sstevel@tonic-gate return;
44430Sstevel@tonic-gate }
44440Sstevel@tonic-gate }
44450Sstevel@tonic-gate
44460Sstevel@tonic-gate /*
44470Sstevel@tonic-gate * connection oriented provider
44480Sstevel@tonic-gate */
44490Sstevel@tonic-gate switch (tep->te_state) {
44500Sstevel@tonic-gate case TS_IDLE:
44510Sstevel@tonic-gate /*
44520Sstevel@tonic-gate * Other end not here - do nothing.
44530Sstevel@tonic-gate */
44540Sstevel@tonic-gate freemsg(mp);
44550Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
44565240Snordmark "tl_data:cots with endpoint idle"));
44570Sstevel@tonic-gate return;
44580Sstevel@tonic-gate
44590Sstevel@tonic-gate case TS_DATA_XFER:
44600Sstevel@tonic-gate /* valid states */
44610Sstevel@tonic-gate if (tep->te_conp != NULL)
44620Sstevel@tonic-gate break;
44630Sstevel@tonic-gate
44640Sstevel@tonic-gate if (tep->te_oconp == NULL) {
44650Sstevel@tonic-gate if (!closing) {
44660Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
44670Sstevel@tonic-gate } else {
44680Sstevel@tonic-gate freemsg(mp);
44690Sstevel@tonic-gate }
44700Sstevel@tonic-gate return;
44710Sstevel@tonic-gate }
44720Sstevel@tonic-gate /*
44730Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus
44740Sstevel@tonic-gate * the peer might not yet have accepted the connection.
44750Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND.
44760Sstevel@tonic-gate * Otherwise defer processing the packet until the peer
44770Sstevel@tonic-gate * accepts the connection.
44780Sstevel@tonic-gate * Note that the queue is noenabled when we go into this
44790Sstevel@tonic-gate * state.
44800Sstevel@tonic-gate */
44810Sstevel@tonic-gate if (!closing) {
44820Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
44835240Snordmark SL_TRACE|SL_ERROR,
44845240Snordmark "tl_data: ocon"));
44850Sstevel@tonic-gate TL_PUTBQ(tep, mp);
44860Sstevel@tonic-gate return;
44870Sstevel@tonic-gate }
44880Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) {
44890Sstevel@tonic-gate if (msz < sizeof (t_scalar_t)) {
44900Sstevel@tonic-gate freemsg(mp);
44910Sstevel@tonic-gate return;
44920Sstevel@tonic-gate }
44930Sstevel@tonic-gate /* reuse message block - just change REQ to IND */
44940Sstevel@tonic-gate if (prim->type == T_DATA_REQ)
44950Sstevel@tonic-gate prim->type = T_DATA_IND;
44960Sstevel@tonic-gate else
44970Sstevel@tonic-gate prim->type = T_OPTDATA_IND;
44980Sstevel@tonic-gate }
44990Sstevel@tonic-gate tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
45000Sstevel@tonic-gate return;
45010Sstevel@tonic-gate
45020Sstevel@tonic-gate case TS_WREQ_ORDREL:
45030Sstevel@tonic-gate if (tep->te_conp == NULL) {
45040Sstevel@tonic-gate /*
45050Sstevel@tonic-gate * Other end closed - generate discon_ind
45060Sstevel@tonic-gate * with reason 0 to cause an EPIPE but no
45070Sstevel@tonic-gate * read side error on AF_UNIX sockets.
45080Sstevel@tonic-gate */
45090Sstevel@tonic-gate freemsg(mp);
45100Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
45115240Snordmark SL_TRACE|SL_ERROR,
45125240Snordmark "tl_data: WREQ_ORDREL and no peer"));
45130Sstevel@tonic-gate tl_discon_ind(tep, 0);
45140Sstevel@tonic-gate return;
45150Sstevel@tonic-gate }
45160Sstevel@tonic-gate break;
45170Sstevel@tonic-gate
45180Sstevel@tonic-gate default:
45190Sstevel@tonic-gate /* invalid state for event TE_DATA_REQ */
45200Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
45215240Snordmark "tl_data:cots:out of state"));
45220Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
45230Sstevel@tonic-gate return;
45240Sstevel@tonic-gate }
45250Sstevel@tonic-gate /*
45260Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_DATA_REQ, tep->te_state);
45270Sstevel@tonic-gate * (State stays same on this event)
45280Sstevel@tonic-gate */
45290Sstevel@tonic-gate
45300Sstevel@tonic-gate /*
45310Sstevel@tonic-gate * get connected endpoint
45320Sstevel@tonic-gate */
45330Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
45340Sstevel@tonic-gate freemsg(mp);
45350Sstevel@tonic-gate /* Peer closed */
45360Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
45375240Snordmark "tl_data: peer gone"));
45380Sstevel@tonic-gate return;
45390Sstevel@tonic-gate }
45400Sstevel@tonic-gate
45410Sstevel@tonic-gate ASSERT(tep->te_serializer == peer_tep->te_serializer);
45420Sstevel@tonic-gate peer_rq = peer_tep->te_rq;
45430Sstevel@tonic-gate
45440Sstevel@tonic-gate /*
45450Sstevel@tonic-gate * Put it back if flow controlled
45460Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded
45470Sstevel@tonic-gate * so we can ignore flow control.
45480Sstevel@tonic-gate */
45490Sstevel@tonic-gate if (!canputnext(peer_rq) && !closing) {
45500Sstevel@tonic-gate TL_PUTBQ(tep, mp);
45510Sstevel@tonic-gate return;
45520Sstevel@tonic-gate }
45530Sstevel@tonic-gate
45540Sstevel@tonic-gate /*
45550Sstevel@tonic-gate * validate peer state
45560Sstevel@tonic-gate */
45570Sstevel@tonic-gate switch (peer_tep->te_state) {
45580Sstevel@tonic-gate case TS_DATA_XFER:
45590Sstevel@tonic-gate case TS_WIND_ORDREL:
45600Sstevel@tonic-gate /* valid states */
45610Sstevel@tonic-gate break;
45620Sstevel@tonic-gate default:
45630Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
45645240Snordmark "tl_data:rx side:invalid state"));
45650Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO);
45660Sstevel@tonic-gate return;
45670Sstevel@tonic-gate }
45680Sstevel@tonic-gate if (DB_TYPE(mp) == M_PROTO) {
45690Sstevel@tonic-gate /* reuse message block - just change REQ to IND */
45700Sstevel@tonic-gate if (prim->type == T_DATA_REQ)
45710Sstevel@tonic-gate prim->type = T_DATA_IND;
45720Sstevel@tonic-gate else
45730Sstevel@tonic-gate prim->type = T_OPTDATA_IND;
45740Sstevel@tonic-gate }
45750Sstevel@tonic-gate /*
45760Sstevel@tonic-gate * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state);
45770Sstevel@tonic-gate * (peer state stays same on this event)
45780Sstevel@tonic-gate */
45790Sstevel@tonic-gate /*
45800Sstevel@tonic-gate * send data to connected peer
45810Sstevel@tonic-gate */
45820Sstevel@tonic-gate putnext(peer_rq, mp);
45830Sstevel@tonic-gate }
45840Sstevel@tonic-gate
45850Sstevel@tonic-gate
45860Sstevel@tonic-gate
45870Sstevel@tonic-gate static void
tl_exdata(mblk_t * mp,tl_endpt_t * tep)45880Sstevel@tonic-gate tl_exdata(mblk_t *mp, tl_endpt_t *tep)
45890Sstevel@tonic-gate {
45900Sstevel@tonic-gate queue_t *wq = tep->te_wq;
45910Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr;
45920Sstevel@tonic-gate ssize_t msz = MBLKL(mp);
45930Sstevel@tonic-gate tl_endpt_t *peer_tep;
45940Sstevel@tonic-gate queue_t *peer_rq;
45950Sstevel@tonic-gate boolean_t closing = tep->te_closing;
45960Sstevel@tonic-gate
45970Sstevel@tonic-gate if (msz < sizeof (struct T_exdata_req)) {
45980Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
45995240Snordmark "tl_exdata:invalid message"));
46000Sstevel@tonic-gate if (!closing) {
46010Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
46020Sstevel@tonic-gate } else {
46030Sstevel@tonic-gate freemsg(mp);
46040Sstevel@tonic-gate }
46050Sstevel@tonic-gate return;
46060Sstevel@tonic-gate }
46070Sstevel@tonic-gate
46080Sstevel@tonic-gate /*
46090Sstevel@tonic-gate * If the endpoint is closing it should still forward any data to the
46100Sstevel@tonic-gate * peer (if it has one). If it is not allowed to forward it can just
46110Sstevel@tonic-gate * free the message.
46120Sstevel@tonic-gate */
46130Sstevel@tonic-gate if (closing &&
46140Sstevel@tonic-gate (tep->te_state != TS_DATA_XFER) &&
46150Sstevel@tonic-gate (tep->te_state != TS_WREQ_ORDREL)) {
46160Sstevel@tonic-gate freemsg(mp);
46170Sstevel@tonic-gate return;
46180Sstevel@tonic-gate }
46190Sstevel@tonic-gate
46200Sstevel@tonic-gate /*
46210Sstevel@tonic-gate * validate state
46220Sstevel@tonic-gate */
46230Sstevel@tonic-gate switch (tep->te_state) {
46240Sstevel@tonic-gate case TS_IDLE:
46250Sstevel@tonic-gate /*
46260Sstevel@tonic-gate * Other end not here - do nothing.
46270Sstevel@tonic-gate */
46280Sstevel@tonic-gate freemsg(mp);
46290Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
46305240Snordmark "tl_exdata:cots with endpoint idle"));
46310Sstevel@tonic-gate return;
46320Sstevel@tonic-gate
46330Sstevel@tonic-gate case TS_DATA_XFER:
46340Sstevel@tonic-gate /* valid states */
46350Sstevel@tonic-gate if (tep->te_conp != NULL)
46360Sstevel@tonic-gate break;
46370Sstevel@tonic-gate
46380Sstevel@tonic-gate if (tep->te_oconp == NULL) {
46390Sstevel@tonic-gate if (!closing) {
46400Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
46410Sstevel@tonic-gate } else {
46420Sstevel@tonic-gate freemsg(mp);
46430Sstevel@tonic-gate }
46440Sstevel@tonic-gate return;
46450Sstevel@tonic-gate }
46460Sstevel@tonic-gate /*
46470Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus
46480Sstevel@tonic-gate * the peer might not yet have accepted the connection.
46490Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND.
46500Sstevel@tonic-gate * Otherwise defer processing the packet until the peer
46510Sstevel@tonic-gate * accepts the connection.
46520Sstevel@tonic-gate * Note that the queue is noenabled when we go into this
46530Sstevel@tonic-gate * state.
46540Sstevel@tonic-gate */
46550Sstevel@tonic-gate if (!closing) {
46560Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
46575240Snordmark SL_TRACE|SL_ERROR,
46585240Snordmark "tl_exdata: ocon"));
46590Sstevel@tonic-gate TL_PUTBQ(tep, mp);
46600Sstevel@tonic-gate return;
46610Sstevel@tonic-gate }
46620Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
46635240Snordmark "tl_exdata: closing socket ocon"));
46640Sstevel@tonic-gate prim->type = T_EXDATA_IND;
46650Sstevel@tonic-gate tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
46660Sstevel@tonic-gate return;
46670Sstevel@tonic-gate
46680Sstevel@tonic-gate case TS_WREQ_ORDREL:
46690Sstevel@tonic-gate if (tep->te_conp == NULL) {
46700Sstevel@tonic-gate /*
46710Sstevel@tonic-gate * Other end closed - generate discon_ind
46720Sstevel@tonic-gate * with reason 0 to cause an EPIPE but no
46730Sstevel@tonic-gate * read side error on AF_UNIX sockets.
46740Sstevel@tonic-gate */
46750Sstevel@tonic-gate freemsg(mp);
46760Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
46775240Snordmark SL_TRACE|SL_ERROR,
46785240Snordmark "tl_exdata: WREQ_ORDREL and no peer"));
46790Sstevel@tonic-gate tl_discon_ind(tep, 0);
46800Sstevel@tonic-gate return;
46810Sstevel@tonic-gate }
46820Sstevel@tonic-gate break;
46830Sstevel@tonic-gate
46840Sstevel@tonic-gate default:
46850Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
46865240Snordmark SL_TRACE|SL_ERROR,
46875240Snordmark "tl_wput:T_EXDATA_REQ:out of state, state=%d",
46885240Snordmark tep->te_state));
46890Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
46900Sstevel@tonic-gate return;
46910Sstevel@tonic-gate }
46920Sstevel@tonic-gate /*
46930Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_EXDATA_REQ, tep->te_state);
46940Sstevel@tonic-gate * (state stays same on this event)
46950Sstevel@tonic-gate */
46960Sstevel@tonic-gate
46970Sstevel@tonic-gate /*
46980Sstevel@tonic-gate * get connected endpoint
46990Sstevel@tonic-gate */
47000Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
47010Sstevel@tonic-gate freemsg(mp);
47020Sstevel@tonic-gate /* Peer closed */
47030Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
47045240Snordmark "tl_exdata: peer gone"));
47050Sstevel@tonic-gate return;
47060Sstevel@tonic-gate }
47070Sstevel@tonic-gate
47080Sstevel@tonic-gate peer_rq = peer_tep->te_rq;
47090Sstevel@tonic-gate
47100Sstevel@tonic-gate /*
47110Sstevel@tonic-gate * Put it back if flow controlled
47120Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded
47130Sstevel@tonic-gate * so we can ignore flow control.
47140Sstevel@tonic-gate */
47150Sstevel@tonic-gate if (!canputnext(peer_rq) && !closing) {
47160Sstevel@tonic-gate TL_PUTBQ(tep, mp);
47170Sstevel@tonic-gate return;
47180Sstevel@tonic-gate }
47190Sstevel@tonic-gate
47200Sstevel@tonic-gate /*
47210Sstevel@tonic-gate * validate state on peer
47220Sstevel@tonic-gate */
47230Sstevel@tonic-gate switch (peer_tep->te_state) {
47240Sstevel@tonic-gate case TS_DATA_XFER:
47250Sstevel@tonic-gate case TS_WIND_ORDREL:
47260Sstevel@tonic-gate /* valid states */
47270Sstevel@tonic-gate break;
47280Sstevel@tonic-gate default:
47290Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
47305240Snordmark "tl_exdata:rx side:invalid state"));
47310Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO);
47320Sstevel@tonic-gate return;
47330Sstevel@tonic-gate }
47340Sstevel@tonic-gate /*
47350Sstevel@tonic-gate * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state);
47360Sstevel@tonic-gate * (peer state stays same on this event)
47370Sstevel@tonic-gate */
47380Sstevel@tonic-gate /*
47390Sstevel@tonic-gate * reuse message block
47400Sstevel@tonic-gate */
47410Sstevel@tonic-gate prim->type = T_EXDATA_IND;
47420Sstevel@tonic-gate
47430Sstevel@tonic-gate /*
47440Sstevel@tonic-gate * send data to connected peer
47450Sstevel@tonic-gate */
47460Sstevel@tonic-gate putnext(peer_rq, mp);
47470Sstevel@tonic-gate }
47480Sstevel@tonic-gate
47490Sstevel@tonic-gate
47500Sstevel@tonic-gate
47510Sstevel@tonic-gate static void
tl_ordrel(mblk_t * mp,tl_endpt_t * tep)47520Sstevel@tonic-gate tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
47530Sstevel@tonic-gate {
47540Sstevel@tonic-gate queue_t *wq = tep->te_wq;
47550Sstevel@tonic-gate union T_primitives *prim = (union T_primitives *)mp->b_rptr;
47560Sstevel@tonic-gate ssize_t msz = MBLKL(mp);
47570Sstevel@tonic-gate tl_endpt_t *peer_tep;
47580Sstevel@tonic-gate queue_t *peer_rq;
47590Sstevel@tonic-gate boolean_t closing = tep->te_closing;
47600Sstevel@tonic-gate
47610Sstevel@tonic-gate if (msz < sizeof (struct T_ordrel_req)) {
47620Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
47635240Snordmark "tl_ordrel:invalid message"));
47640Sstevel@tonic-gate if (!closing) {
47650Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
47660Sstevel@tonic-gate } else {
47670Sstevel@tonic-gate freemsg(mp);
47680Sstevel@tonic-gate }
47690Sstevel@tonic-gate return;
47700Sstevel@tonic-gate }
47710Sstevel@tonic-gate
47720Sstevel@tonic-gate /*
47730Sstevel@tonic-gate * validate state
47740Sstevel@tonic-gate */
47750Sstevel@tonic-gate switch (tep->te_state) {
47760Sstevel@tonic-gate case TS_DATA_XFER:
47770Sstevel@tonic-gate case TS_WREQ_ORDREL:
47780Sstevel@tonic-gate /* valid states */
47790Sstevel@tonic-gate if (tep->te_conp != NULL)
47800Sstevel@tonic-gate break;
47810Sstevel@tonic-gate
47820Sstevel@tonic-gate if (tep->te_oconp == NULL)
47830Sstevel@tonic-gate break;
47840Sstevel@tonic-gate
47850Sstevel@tonic-gate /*
47860Sstevel@tonic-gate * For a socket the T_CONN_CON is sent early thus
47870Sstevel@tonic-gate * the peer might not yet have accepted the connection.
47880Sstevel@tonic-gate * If we are closing queue the packet with the T_CONN_IND.
47890Sstevel@tonic-gate * Otherwise defer processing the packet until the peer
47900Sstevel@tonic-gate * accepts the connection.
47910Sstevel@tonic-gate * Note that the queue is noenabled when we go into this
47920Sstevel@tonic-gate * state.
47930Sstevel@tonic-gate */
47940Sstevel@tonic-gate if (!closing) {
47950Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
47965240Snordmark SL_TRACE|SL_ERROR,
47975240Snordmark "tl_ordlrel: ocon"));
47980Sstevel@tonic-gate TL_PUTBQ(tep, mp);
47990Sstevel@tonic-gate return;
48000Sstevel@tonic-gate }
48010Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
48025240Snordmark "tl_ordlrel: closing socket ocon"));
48030Sstevel@tonic-gate prim->type = T_ORDREL_IND;
48040Sstevel@tonic-gate (void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
48050Sstevel@tonic-gate return;
48060Sstevel@tonic-gate
48070Sstevel@tonic-gate default:
48080Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
48095240Snordmark SL_TRACE|SL_ERROR,
48105240Snordmark "tl_wput:T_ORDREL_REQ:out of state, state=%d",
48115240Snordmark tep->te_state));
48120Sstevel@tonic-gate if (!closing) {
48130Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
48140Sstevel@tonic-gate } else {
48150Sstevel@tonic-gate freemsg(mp);
48160Sstevel@tonic-gate }
48170Sstevel@tonic-gate return;
48180Sstevel@tonic-gate }
48190Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ORDREL_REQ, tep->te_state);
48200Sstevel@tonic-gate
48210Sstevel@tonic-gate /*
48220Sstevel@tonic-gate * get connected endpoint
48230Sstevel@tonic-gate */
48240Sstevel@tonic-gate if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
48250Sstevel@tonic-gate /* Peer closed */
48260Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
48275240Snordmark "tl_ordrel: peer gone"));
48280Sstevel@tonic-gate freemsg(mp);
48290Sstevel@tonic-gate return;
48300Sstevel@tonic-gate }
48310Sstevel@tonic-gate
48320Sstevel@tonic-gate peer_rq = peer_tep->te_rq;
48330Sstevel@tonic-gate
48340Sstevel@tonic-gate /*
48350Sstevel@tonic-gate * Put it back if flow controlled except when we are closing.
48360Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded
48370Sstevel@tonic-gate * so we can ignore flow control.
48380Sstevel@tonic-gate */
48390Sstevel@tonic-gate if (! canputnext(peer_rq) && !closing) {
48400Sstevel@tonic-gate TL_PUTBQ(tep, mp);
48410Sstevel@tonic-gate return;
48420Sstevel@tonic-gate }
48430Sstevel@tonic-gate
48440Sstevel@tonic-gate /*
48450Sstevel@tonic-gate * validate state on peer
48460Sstevel@tonic-gate */
48470Sstevel@tonic-gate switch (peer_tep->te_state) {
48480Sstevel@tonic-gate case TS_DATA_XFER:
48490Sstevel@tonic-gate case TS_WIND_ORDREL:
48500Sstevel@tonic-gate /* valid states */
48510Sstevel@tonic-gate break;
48520Sstevel@tonic-gate default:
48530Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
48545240Snordmark "tl_ordrel:rx side:invalid state"));
48550Sstevel@tonic-gate tl_merror(peer_tep->te_wq, mp, EPROTO);
48560Sstevel@tonic-gate return;
48570Sstevel@tonic-gate }
48580Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state);
48590Sstevel@tonic-gate
48600Sstevel@tonic-gate /*
48610Sstevel@tonic-gate * reuse message block
48620Sstevel@tonic-gate */
48630Sstevel@tonic-gate prim->type = T_ORDREL_IND;
48640Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
48655240Snordmark "tl_ordrel: send ordrel_ind"));
48660Sstevel@tonic-gate
48670Sstevel@tonic-gate /*
48680Sstevel@tonic-gate * send data to connected peer
48690Sstevel@tonic-gate */
48700Sstevel@tonic-gate putnext(peer_rq, mp);
48710Sstevel@tonic-gate }
48720Sstevel@tonic-gate
48730Sstevel@tonic-gate
48740Sstevel@tonic-gate /*
48750Sstevel@tonic-gate * Send T_UDERROR_IND. The error should be from the <sys/errno.h> space.
48760Sstevel@tonic-gate */
48770Sstevel@tonic-gate static void
tl_uderr(queue_t * wq,mblk_t * mp,t_scalar_t err)48780Sstevel@tonic-gate tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err)
48790Sstevel@tonic-gate {
48800Sstevel@tonic-gate size_t err_sz;
48810Sstevel@tonic-gate tl_endpt_t *tep;
48820Sstevel@tonic-gate struct T_unitdata_req *udreq;
48830Sstevel@tonic-gate mblk_t *err_mp;
48840Sstevel@tonic-gate t_scalar_t alen;
48850Sstevel@tonic-gate t_scalar_t olen;
48860Sstevel@tonic-gate struct T_uderror_ind *uderr;
48870Sstevel@tonic-gate uchar_t *addr_startp;
48880Sstevel@tonic-gate
48890Sstevel@tonic-gate err_sz = sizeof (struct T_uderror_ind);
48900Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr;
48910Sstevel@tonic-gate udreq = (struct T_unitdata_req *)mp->b_rptr;
48920Sstevel@tonic-gate alen = udreq->DEST_length;
48930Sstevel@tonic-gate olen = udreq->OPT_length;
48940Sstevel@tonic-gate
48950Sstevel@tonic-gate if (alen > 0)
48960Sstevel@tonic-gate err_sz = T_ALIGN(err_sz + alen);
48970Sstevel@tonic-gate if (olen > 0)
48980Sstevel@tonic-gate err_sz += olen;
48990Sstevel@tonic-gate
49000Sstevel@tonic-gate err_mp = allocb(err_sz, BPRI_MED);
49010Sstevel@tonic-gate if (! err_mp) {
49020Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
49035240Snordmark "tl_uderr:allocb failure"));
49040Sstevel@tonic-gate /*
49050Sstevel@tonic-gate * Note: no rollback of state needed as it does
49060Sstevel@tonic-gate * not change in connectionless transport
49070Sstevel@tonic-gate */
49080Sstevel@tonic-gate tl_memrecover(wq, mp, err_sz);
49090Sstevel@tonic-gate return;
49100Sstevel@tonic-gate }
49110Sstevel@tonic-gate
49120Sstevel@tonic-gate DB_TYPE(err_mp) = M_PROTO;
49130Sstevel@tonic-gate err_mp->b_wptr = err_mp->b_rptr + err_sz;
49140Sstevel@tonic-gate uderr = (struct T_uderror_ind *)err_mp->b_rptr;
49150Sstevel@tonic-gate uderr->PRIM_type = T_UDERROR_IND;
49160Sstevel@tonic-gate uderr->ERROR_type = err;
49170Sstevel@tonic-gate uderr->DEST_length = alen;
49180Sstevel@tonic-gate uderr->OPT_length = olen;
49190Sstevel@tonic-gate if (alen <= 0) {
49200Sstevel@tonic-gate uderr->DEST_offset = 0;
49210Sstevel@tonic-gate } else {
49220Sstevel@tonic-gate uderr->DEST_offset =
49235240Snordmark (t_scalar_t)sizeof (struct T_uderror_ind);
49240Sstevel@tonic-gate addr_startp = mp->b_rptr + udreq->DEST_offset;
49250Sstevel@tonic-gate bcopy(addr_startp, err_mp->b_rptr + uderr->DEST_offset,
49265240Snordmark (size_t)alen);
49270Sstevel@tonic-gate }
49280Sstevel@tonic-gate if (olen <= 0) {
49290Sstevel@tonic-gate uderr->OPT_offset = 0;
49300Sstevel@tonic-gate } else {
49310Sstevel@tonic-gate uderr->OPT_offset =
49325240Snordmark (t_scalar_t)T_ALIGN(sizeof (struct T_uderror_ind) +
49335240Snordmark uderr->DEST_length);
49340Sstevel@tonic-gate addr_startp = mp->b_rptr + udreq->OPT_offset;
49350Sstevel@tonic-gate bcopy(addr_startp, err_mp->b_rptr+uderr->OPT_offset,
49365240Snordmark (size_t)olen);
49370Sstevel@tonic-gate }
49380Sstevel@tonic-gate freemsg(mp);
49390Sstevel@tonic-gate
49400Sstevel@tonic-gate /*
49410Sstevel@tonic-gate * send indication message
49420Sstevel@tonic-gate */
49430Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_UDERROR_IND, tep->te_state);
49440Sstevel@tonic-gate
49450Sstevel@tonic-gate qreply(wq, err_mp);
49460Sstevel@tonic-gate }
49470Sstevel@tonic-gate
49480Sstevel@tonic-gate static void
tl_unitdata_ser(mblk_t * mp,tl_endpt_t * tep)49490Sstevel@tonic-gate tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep)
49500Sstevel@tonic-gate {
49510Sstevel@tonic-gate queue_t *wq = tep->te_wq;
49520Sstevel@tonic-gate
49530Sstevel@tonic-gate if (!tep->te_closing && (wq->q_first != NULL)) {
49540Sstevel@tonic-gate TL_PUTQ(tep, mp);
49550Sstevel@tonic-gate } else if (tep->te_rq != NULL)
49560Sstevel@tonic-gate tl_unitdata(mp, tep);
49570Sstevel@tonic-gate else
49580Sstevel@tonic-gate freemsg(mp);
49590Sstevel@tonic-gate
49600Sstevel@tonic-gate tl_serializer_exit(tep);
49610Sstevel@tonic-gate tl_refrele(tep);
49620Sstevel@tonic-gate }
49630Sstevel@tonic-gate
49640Sstevel@tonic-gate /*
49650Sstevel@tonic-gate * Handle T_unitdata_req.
49660Sstevel@tonic-gate * If TL_SET[U]CRED or TL_SOCKUCRED generate the credentials options.
49670Sstevel@tonic-gate * If this is a socket pass through options unmodified.
49680Sstevel@tonic-gate */
49690Sstevel@tonic-gate static void
tl_unitdata(mblk_t * mp,tl_endpt_t * tep)49700Sstevel@tonic-gate tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
49710Sstevel@tonic-gate {
49720Sstevel@tonic-gate queue_t *wq = tep->te_wq;
49730Sstevel@tonic-gate soux_addr_t ux_addr;
49740Sstevel@tonic-gate tl_addr_t destaddr;
49750Sstevel@tonic-gate uchar_t *addr_startp;
49760Sstevel@tonic-gate tl_endpt_t *peer_tep;
49770Sstevel@tonic-gate struct T_unitdata_ind *udind;
49780Sstevel@tonic-gate struct T_unitdata_req *udreq;
49790Sstevel@tonic-gate ssize_t msz, ui_sz;
49800Sstevel@tonic-gate t_scalar_t alen, aoff, olen, ooff;
49810Sstevel@tonic-gate t_scalar_t oldolen = 0;
498211134SCasper.Dik@Sun.COM cred_t *cr = NULL;
498311134SCasper.Dik@Sun.COM pid_t cpid;
49840Sstevel@tonic-gate
49850Sstevel@tonic-gate udreq = (struct T_unitdata_req *)mp->b_rptr;
49860Sstevel@tonic-gate msz = MBLKL(mp);
49870Sstevel@tonic-gate
49880Sstevel@tonic-gate /*
49890Sstevel@tonic-gate * validate the state
49900Sstevel@tonic-gate */
49910Sstevel@tonic-gate if (tep->te_state != TS_IDLE) {
49920Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
49935240Snordmark SL_TRACE|SL_ERROR,
49945240Snordmark "tl_wput:T_CONN_REQ:out of state"));
49950Sstevel@tonic-gate tl_merror(wq, mp, EPROTO);
49960Sstevel@tonic-gate return;
49970Sstevel@tonic-gate }
49980Sstevel@tonic-gate /*
49990Sstevel@tonic-gate * tep->te_state = NEXTSTATE(TE_UNITDATA_REQ, tep->te_state);
50000Sstevel@tonic-gate * (state does not change on this event)
50010Sstevel@tonic-gate */
50020Sstevel@tonic-gate
50030Sstevel@tonic-gate /*
50040Sstevel@tonic-gate * validate the message
50050Sstevel@tonic-gate * Note: dereference fields in struct inside message only
50060Sstevel@tonic-gate * after validating the message length.
50070Sstevel@tonic-gate */
50080Sstevel@tonic-gate if (msz < sizeof (struct T_unitdata_req)) {
50090Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
50105240Snordmark "tl_unitdata:invalid message length"));
50110Sstevel@tonic-gate tl_merror(wq, mp, EINVAL);
50120Sstevel@tonic-gate return;
50130Sstevel@tonic-gate }
50140Sstevel@tonic-gate alen = udreq->DEST_length;
50150Sstevel@tonic-gate aoff = udreq->DEST_offset;
50160Sstevel@tonic-gate oldolen = olen = udreq->OPT_length;
50170Sstevel@tonic-gate ooff = udreq->OPT_offset;
50180Sstevel@tonic-gate if (olen == 0)
50190Sstevel@tonic-gate ooff = 0;
50200Sstevel@tonic-gate
50210Sstevel@tonic-gate if (IS_SOCKET(tep)) {
50220Sstevel@tonic-gate if ((alen != TL_SOUX_ADDRLEN) ||
50230Sstevel@tonic-gate (aoff < 0) ||
50240Sstevel@tonic-gate (aoff + alen > msz) ||
50250Sstevel@tonic-gate (olen < 0) || (ooff < 0) ||
50260Sstevel@tonic-gate ((olen > 0) && ((ooff + olen) > msz))) {
50270Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
50285240Snordmark 1, SL_TRACE|SL_ERROR,
50295240Snordmark "tl_unitdata_req: invalid socket addr "
50305240Snordmark "(msz=%d, al=%d, ao=%d, ol=%d, oo = %d)",
50315240Snordmark (int)msz, alen, aoff, olen, ooff));
50320Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ);
50330Sstevel@tonic-gate return;
50340Sstevel@tonic-gate }
50350Sstevel@tonic-gate bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN);
50360Sstevel@tonic-gate
50370Sstevel@tonic-gate if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) &&
50380Sstevel@tonic-gate (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) {
50390Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor,
50405240Snordmark 1, SL_TRACE|SL_ERROR,
50415240Snordmark "tl_conn_req: invalid socket magic"));
50420Sstevel@tonic-gate tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ);
50430Sstevel@tonic-gate return;
50440Sstevel@tonic-gate }
50450Sstevel@tonic-gate } else {
50460Sstevel@tonic-gate if ((alen < 0) ||
50470Sstevel@tonic-gate (aoff < 0) ||
50480Sstevel@tonic-gate ((alen > 0) && ((aoff + alen) > msz)) ||
50490Sstevel@tonic-gate ((ssize_t)alen > (msz - sizeof (struct T_unitdata_req))) ||
50500Sstevel@tonic-gate ((aoff + alen) < 0) ||
50510Sstevel@tonic-gate ((olen > 0) && ((ooff + olen) > msz)) ||
50520Sstevel@tonic-gate (olen < 0) ||
50530Sstevel@tonic-gate (ooff < 0) ||
50540Sstevel@tonic-gate ((ssize_t)olen > (msz - sizeof (struct T_unitdata_req)))) {
50550Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
50560Sstevel@tonic-gate SL_TRACE|SL_ERROR,
50570Sstevel@tonic-gate "tl_unitdata:invalid unit data message"));
50580Sstevel@tonic-gate tl_merror(wq, mp, EINVAL);
50590Sstevel@tonic-gate return;
50600Sstevel@tonic-gate }
50610Sstevel@tonic-gate }
50620Sstevel@tonic-gate
50630Sstevel@tonic-gate /* Options not supported unless it's a socket */
50640Sstevel@tonic-gate if (alen == 0 || (olen != 0 && !IS_SOCKET(tep))) {
50650Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
50660Sstevel@tonic-gate "tl_unitdata:option use(unsupported) or zero len addr"));
50670Sstevel@tonic-gate tl_uderr(wq, mp, EPROTO);
50680Sstevel@tonic-gate return;
50690Sstevel@tonic-gate }
50700Sstevel@tonic-gate #ifdef DEBUG
50710Sstevel@tonic-gate /*
50720Sstevel@tonic-gate * Mild form of ASSERT()ion to detect broken TPI apps.
50730Sstevel@tonic-gate * if (! assertion)
50740Sstevel@tonic-gate * log warning;
50750Sstevel@tonic-gate */
50760Sstevel@tonic-gate if (! (aoff >= (t_scalar_t)sizeof (struct T_unitdata_req))) {
50770Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
50785240Snordmark "tl_unitdata:addr overlaps TPI message"));
50790Sstevel@tonic-gate }
50800Sstevel@tonic-gate #endif
50810Sstevel@tonic-gate /*
50820Sstevel@tonic-gate * get destination endpoint
50830Sstevel@tonic-gate */
50840Sstevel@tonic-gate destaddr.ta_alen = alen;
50850Sstevel@tonic-gate destaddr.ta_abuf = mp->b_rptr + aoff;
50860Sstevel@tonic-gate destaddr.ta_zoneid = tep->te_zoneid;
50870Sstevel@tonic-gate
50880Sstevel@tonic-gate /*
50890Sstevel@tonic-gate * Check whether the destination is the same that was used previously
50900Sstevel@tonic-gate * and the destination endpoint is in the right state. If something is
50910Sstevel@tonic-gate * wrong, find destination again and cache it.
50920Sstevel@tonic-gate */
50930Sstevel@tonic-gate peer_tep = tep->te_lastep;
50940Sstevel@tonic-gate
50950Sstevel@tonic-gate if ((peer_tep == NULL) || peer_tep->te_closing ||
50960Sstevel@tonic-gate (peer_tep->te_state != TS_IDLE) ||
50970Sstevel@tonic-gate !tl_eqaddr(&destaddr, &peer_tep->te_ap)) {
50980Sstevel@tonic-gate /*
50990Sstevel@tonic-gate * Not the same as cached destination , need to find the right
51000Sstevel@tonic-gate * destination.
51010Sstevel@tonic-gate */
51020Sstevel@tonic-gate peer_tep = (IS_SOCKET(tep) ?
51030Sstevel@tonic-gate tl_sock_find_peer(tep, &ux_addr) :
51040Sstevel@tonic-gate tl_find_peer(tep, &destaddr));
51050Sstevel@tonic-gate
51060Sstevel@tonic-gate if (peer_tep == NULL) {
51070Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
51085240Snordmark SL_TRACE|SL_ERROR,
51095240Snordmark "tl_unitdata:no one at destination address"));
51100Sstevel@tonic-gate tl_uderr(wq, mp, ECONNRESET);
51110Sstevel@tonic-gate return;
51120Sstevel@tonic-gate }
51130Sstevel@tonic-gate
51140Sstevel@tonic-gate /*
51150Sstevel@tonic-gate * Cache the new peer.
51160Sstevel@tonic-gate */
51170Sstevel@tonic-gate if (tep->te_lastep != NULL)
51180Sstevel@tonic-gate tl_refrele(tep->te_lastep);
51190Sstevel@tonic-gate
51200Sstevel@tonic-gate tep->te_lastep = peer_tep;
51210Sstevel@tonic-gate }
51220Sstevel@tonic-gate
51230Sstevel@tonic-gate if (peer_tep->te_state != TS_IDLE) {
51240Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
51255240Snordmark "tl_unitdata:provider in invalid state"));
51260Sstevel@tonic-gate tl_uderr(wq, mp, EPROTO);
51270Sstevel@tonic-gate return;
51280Sstevel@tonic-gate }
51290Sstevel@tonic-gate
51300Sstevel@tonic-gate ASSERT(peer_tep->te_rq != NULL);
51310Sstevel@tonic-gate
51320Sstevel@tonic-gate /*
51330Sstevel@tonic-gate * Put it back if flow controlled except when we are closing.
51340Sstevel@tonic-gate * Note: Messages already on queue when we are closing is bounded
51350Sstevel@tonic-gate * so we can ignore flow control.
51360Sstevel@tonic-gate */
51370Sstevel@tonic-gate if (!canputnext(peer_tep->te_rq) && !(tep->te_closing)) {
51380Sstevel@tonic-gate /* record what we are flow controlled on */
51390Sstevel@tonic-gate if (tep->te_flowq != NULL) {
51400Sstevel@tonic-gate list_remove(&tep->te_flowq->te_flowlist, tep);
51410Sstevel@tonic-gate }
51420Sstevel@tonic-gate list_insert_head(&peer_tep->te_flowlist, tep);
51430Sstevel@tonic-gate tep->te_flowq = peer_tep;
51440Sstevel@tonic-gate TL_PUTBQ(tep, mp);
51450Sstevel@tonic-gate return;
51460Sstevel@tonic-gate }
51470Sstevel@tonic-gate /*
51480Sstevel@tonic-gate * prepare indication message
51490Sstevel@tonic-gate */
51500Sstevel@tonic-gate
51510Sstevel@tonic-gate /*
51520Sstevel@tonic-gate * calculate length of message
51530Sstevel@tonic-gate */
515411134SCasper.Dik@Sun.COM if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) {
515511134SCasper.Dik@Sun.COM cr = msg_getcred(mp, &cpid);
515611134SCasper.Dik@Sun.COM ASSERT(cr != NULL);
515711134SCasper.Dik@Sun.COM
515811134SCasper.Dik@Sun.COM if (peer_tep->te_flag & TL_SETCRED) {
515911134SCasper.Dik@Sun.COM ASSERT(olen == 0);
516011134SCasper.Dik@Sun.COM olen = (t_scalar_t)sizeof (struct opthdr) +
516111134SCasper.Dik@Sun.COM OPTLEN(sizeof (tl_credopt_t));
516211134SCasper.Dik@Sun.COM /* 1 option only */
516311134SCasper.Dik@Sun.COM } else if (peer_tep->te_flag & TL_SETUCRED) {
516411134SCasper.Dik@Sun.COM ASSERT(olen == 0);
516511134SCasper.Dik@Sun.COM olen = (t_scalar_t)sizeof (struct opthdr) +
516611134SCasper.Dik@Sun.COM OPTLEN(ucredminsize(cr));
516711134SCasper.Dik@Sun.COM /* 1 option only */
516811134SCasper.Dik@Sun.COM } else {
516911134SCasper.Dik@Sun.COM /* Possibly more than one option */
517011134SCasper.Dik@Sun.COM olen += (t_scalar_t)sizeof (struct T_opthdr) +
517111134SCasper.Dik@Sun.COM OPTLEN(ucredminsize(cr));
517211134SCasper.Dik@Sun.COM }
51730Sstevel@tonic-gate }
51740Sstevel@tonic-gate
51750Sstevel@tonic-gate ui_sz = T_ALIGN(sizeof (struct T_unitdata_ind) + tep->te_alen) +
51765240Snordmark olen;
51770Sstevel@tonic-gate /*
51780Sstevel@tonic-gate * If the unitdata_ind fits and we are not adding options
51790Sstevel@tonic-gate * reuse the udreq mblk.
51800Sstevel@tonic-gate */
51810Sstevel@tonic-gate if (msz >= ui_sz && alen >= tep->te_alen &&
51820Sstevel@tonic-gate !(peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED))) {
51830Sstevel@tonic-gate /*
51840Sstevel@tonic-gate * Reuse the original mblk. Leave options in place.
51850Sstevel@tonic-gate */
51860Sstevel@tonic-gate udind = (struct T_unitdata_ind *)mp->b_rptr;
51870Sstevel@tonic-gate udind->PRIM_type = T_UNITDATA_IND;
51880Sstevel@tonic-gate udind->SRC_length = tep->te_alen;
51890Sstevel@tonic-gate addr_startp = mp->b_rptr + udind->SRC_offset;
51900Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen);
51910Sstevel@tonic-gate } else {
51920Sstevel@tonic-gate /* Allocate a new T_unidata_ind message */
51930Sstevel@tonic-gate mblk_t *ui_mp;
51940Sstevel@tonic-gate
51950Sstevel@tonic-gate ui_mp = allocb(ui_sz, BPRI_MED);
51960Sstevel@tonic-gate if (! ui_mp) {
51970Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 4, SL_TRACE,
51985240Snordmark "tl_unitdata:allocb failure:message queued"));
51990Sstevel@tonic-gate tl_memrecover(wq, mp, ui_sz);
52000Sstevel@tonic-gate return;
52010Sstevel@tonic-gate }
52020Sstevel@tonic-gate
52030Sstevel@tonic-gate /*
52040Sstevel@tonic-gate * fill in T_UNITDATA_IND contents
52050Sstevel@tonic-gate */
52060Sstevel@tonic-gate DB_TYPE(ui_mp) = M_PROTO;
52070Sstevel@tonic-gate ui_mp->b_wptr = ui_mp->b_rptr + ui_sz;
52080Sstevel@tonic-gate udind = (struct T_unitdata_ind *)ui_mp->b_rptr;
52090Sstevel@tonic-gate udind->PRIM_type = T_UNITDATA_IND;
52100Sstevel@tonic-gate udind->SRC_offset = (t_scalar_t)sizeof (struct T_unitdata_ind);
52110Sstevel@tonic-gate udind->SRC_length = tep->te_alen;
52120Sstevel@tonic-gate addr_startp = ui_mp->b_rptr + udind->SRC_offset;
52130Sstevel@tonic-gate bcopy(tep->te_abuf, addr_startp, tep->te_alen);
52140Sstevel@tonic-gate udind->OPT_offset =
52150Sstevel@tonic-gate (t_scalar_t)T_ALIGN(udind->SRC_offset + udind->SRC_length);
52160Sstevel@tonic-gate udind->OPT_length = olen;
52170Sstevel@tonic-gate if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) {
52188778SErik.Nordmark@Sun.COM
52190Sstevel@tonic-gate if (oldolen != 0) {
52200Sstevel@tonic-gate bcopy((void *)((uintptr_t)udreq + ooff),
52210Sstevel@tonic-gate (void *)((uintptr_t)udind +
52220Sstevel@tonic-gate udind->OPT_offset),
52230Sstevel@tonic-gate oldolen);
52240Sstevel@tonic-gate }
52258778SErik.Nordmark@Sun.COM ASSERT(cr != NULL);
52268778SErik.Nordmark@Sun.COM
52270Sstevel@tonic-gate tl_fill_option(ui_mp->b_rptr + udind->OPT_offset +
52288778SErik.Nordmark@Sun.COM oldolen, cr, cpid,
52291676Sjpk peer_tep->te_flag, peer_tep->te_credp);
52300Sstevel@tonic-gate } else {
52310Sstevel@tonic-gate bcopy((void *)((uintptr_t)udreq + ooff),
52325240Snordmark (void *)((uintptr_t)udind + udind->OPT_offset),
52335240Snordmark olen);
52340Sstevel@tonic-gate }
52350Sstevel@tonic-gate
52360Sstevel@tonic-gate /*
52370Sstevel@tonic-gate * relink data blocks from mp to ui_mp
52380Sstevel@tonic-gate */
52390Sstevel@tonic-gate ui_mp->b_cont = mp->b_cont;
52400Sstevel@tonic-gate freeb(mp);
52410Sstevel@tonic-gate mp = ui_mp;
52420Sstevel@tonic-gate }
52430Sstevel@tonic-gate /*
52440Sstevel@tonic-gate * send indication message
52450Sstevel@tonic-gate */
52460Sstevel@tonic-gate peer_tep->te_state = NEXTSTATE(TE_UNITDATA_IND, peer_tep->te_state);
52470Sstevel@tonic-gate putnext(peer_tep->te_rq, mp);
52480Sstevel@tonic-gate }
52490Sstevel@tonic-gate
52500Sstevel@tonic-gate
52510Sstevel@tonic-gate
52520Sstevel@tonic-gate /*
52530Sstevel@tonic-gate * Check if a given addr is in use.
52540Sstevel@tonic-gate * Endpoint ptr returned or NULL if not found.
52550Sstevel@tonic-gate * The name space is separate for each mode. This implies that
52560Sstevel@tonic-gate * sockets get their own name space.
52570Sstevel@tonic-gate */
52580Sstevel@tonic-gate static tl_endpt_t *
tl_find_peer(tl_endpt_t * tep,tl_addr_t * ap)52590Sstevel@tonic-gate tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap)
52600Sstevel@tonic-gate {
52610Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL;
52620Sstevel@tonic-gate int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap,
52630Sstevel@tonic-gate (mod_hash_val_t *)&peer_tep, tl_find_callback);
52640Sstevel@tonic-gate
52650Sstevel@tonic-gate ASSERT(! IS_SOCKET(tep));
52660Sstevel@tonic-gate
52670Sstevel@tonic-gate ASSERT(ap != NULL && ap->ta_alen > 0);
52680Sstevel@tonic-gate ASSERT(ap->ta_zoneid == tep->te_zoneid);
52690Sstevel@tonic-gate ASSERT(ap->ta_abuf != NULL);
5270*11474SJonathan.Adams@Sun.COM EQUIV(rc == 0, peer_tep != NULL);
5271*11474SJonathan.Adams@Sun.COM IMPLY(rc == 0,
52725240Snordmark (tep->te_zoneid == peer_tep->te_zoneid) &&
5273*11474SJonathan.Adams@Sun.COM (tep->te_transport == peer_tep->te_transport));
52740Sstevel@tonic-gate
52750Sstevel@tonic-gate if ((rc == 0) && (peer_tep->te_closing)) {
52760Sstevel@tonic-gate tl_refrele(peer_tep);
52770Sstevel@tonic-gate peer_tep = NULL;
52780Sstevel@tonic-gate }
52790Sstevel@tonic-gate
52800Sstevel@tonic-gate return (peer_tep);
52810Sstevel@tonic-gate }
52820Sstevel@tonic-gate
52830Sstevel@tonic-gate /*
52840Sstevel@tonic-gate * Find peer for a socket based on unix domain address.
52850Sstevel@tonic-gate * For implicit addresses our peer can be found by minor number in ai hash. For
52867409SRic.Aleshire@Sun.COM * explicit binds we look vnode address at addr_hash.
52870Sstevel@tonic-gate */
52880Sstevel@tonic-gate static tl_endpt_t *
tl_sock_find_peer(tl_endpt_t * tep,soux_addr_t * ux_addr)52890Sstevel@tonic-gate tl_sock_find_peer(tl_endpt_t *tep, soux_addr_t *ux_addr)
52900Sstevel@tonic-gate {
52910Sstevel@tonic-gate tl_endpt_t *peer_tep = NULL;
52920Sstevel@tonic-gate mod_hash_t *hash = ux_addr->soua_magic == SOU_MAGIC_IMPLICIT ?
52930Sstevel@tonic-gate tep->te_aihash : tep->te_addrhash;
52940Sstevel@tonic-gate int rc = mod_hash_find_cb(hash, (mod_hash_key_t)ux_addr->soua_vp,
52950Sstevel@tonic-gate (mod_hash_val_t *)&peer_tep, tl_find_callback);
52960Sstevel@tonic-gate
52970Sstevel@tonic-gate ASSERT(IS_SOCKET(tep));
5298*11474SJonathan.Adams@Sun.COM EQUIV(rc == 0, peer_tep != NULL);
5299*11474SJonathan.Adams@Sun.COM IMPLY(rc == 0, (tep->te_transport == peer_tep->te_transport));
53007409SRic.Aleshire@Sun.COM
53017409SRic.Aleshire@Sun.COM if (peer_tep != NULL) {
53027409SRic.Aleshire@Sun.COM /* Don't attempt to use closing peer. */
53037409SRic.Aleshire@Sun.COM if (peer_tep->te_closing)
53047409SRic.Aleshire@Sun.COM goto errout;
53057409SRic.Aleshire@Sun.COM
53067409SRic.Aleshire@Sun.COM /*
53077409SRic.Aleshire@Sun.COM * Cross-zone unix sockets are permitted, but for Trusted
53087409SRic.Aleshire@Sun.COM * Extensions only, the "server" for these must be in the
53097409SRic.Aleshire@Sun.COM * global zone.
53107409SRic.Aleshire@Sun.COM */
53117409SRic.Aleshire@Sun.COM if ((peer_tep->te_zoneid != tep->te_zoneid) &&
53127409SRic.Aleshire@Sun.COM is_system_labeled() &&
53137409SRic.Aleshire@Sun.COM (peer_tep->te_zoneid != GLOBAL_ZONEID))
53147409SRic.Aleshire@Sun.COM goto errout;
53150Sstevel@tonic-gate }
53160Sstevel@tonic-gate
53170Sstevel@tonic-gate return (peer_tep);
53187409SRic.Aleshire@Sun.COM
53197409SRic.Aleshire@Sun.COM errout:
53207409SRic.Aleshire@Sun.COM tl_refrele(peer_tep);
53217409SRic.Aleshire@Sun.COM return (NULL);
53220Sstevel@tonic-gate }
53230Sstevel@tonic-gate
53240Sstevel@tonic-gate /*
53250Sstevel@tonic-gate * Generate a free addr and return it in struct pointed by ap
53260Sstevel@tonic-gate * but allocating space for address buffer.
53270Sstevel@tonic-gate * The generated address will be at least 4 bytes long and, if req->ta_alen
53280Sstevel@tonic-gate * exceeds 4 bytes, be req->ta_alen bytes long.
53290Sstevel@tonic-gate *
53300Sstevel@tonic-gate * If address is found it will be inserted in the hash.
53310Sstevel@tonic-gate *
53320Sstevel@tonic-gate * If req->ta_alen is larger than the default alen (4 bytes) the last
53330Sstevel@tonic-gate * alen-4 bytes will always be the same as in req.
53340Sstevel@tonic-gate *
53350Sstevel@tonic-gate * Return 0 for failure.
53360Sstevel@tonic-gate * Return non-zero for success.
53370Sstevel@tonic-gate */
53380Sstevel@tonic-gate static boolean_t
tl_get_any_addr(tl_endpt_t * tep,tl_addr_t * req)53390Sstevel@tonic-gate tl_get_any_addr(tl_endpt_t *tep, tl_addr_t *req)
53400Sstevel@tonic-gate {
53410Sstevel@tonic-gate t_scalar_t alen;
53420Sstevel@tonic-gate uint32_t loopcnt; /* Limit loop to 2^32 */
53430Sstevel@tonic-gate
53440Sstevel@tonic-gate ASSERT(tep->te_hash_hndl != NULL);
53450Sstevel@tonic-gate ASSERT(! IS_SOCKET(tep));
53460Sstevel@tonic-gate
53470Sstevel@tonic-gate if (tep->te_hash_hndl == NULL)
53480Sstevel@tonic-gate return (B_FALSE);
53490Sstevel@tonic-gate
53500Sstevel@tonic-gate /*
53510Sstevel@tonic-gate * check if default addr is in use
53520Sstevel@tonic-gate * if it is - bump it and try again
53530Sstevel@tonic-gate */
53540Sstevel@tonic-gate if (req == NULL) {
53550Sstevel@tonic-gate alen = sizeof (uint32_t);
53560Sstevel@tonic-gate } else {
53570Sstevel@tonic-gate alen = max(req->ta_alen, sizeof (uint32_t));
53580Sstevel@tonic-gate ASSERT(tep->te_zoneid == req->ta_zoneid);
53590Sstevel@tonic-gate }
53600Sstevel@tonic-gate
53610Sstevel@tonic-gate if (tep->te_alen < alen) {
53620Sstevel@tonic-gate void *abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP);
53630Sstevel@tonic-gate
53640Sstevel@tonic-gate /*
53650Sstevel@tonic-gate * Not enough space in tep->ta_ap to hold the address,
53660Sstevel@tonic-gate * allocate a bigger space.
53670Sstevel@tonic-gate */
53680Sstevel@tonic-gate if (abuf == NULL)
53690Sstevel@tonic-gate return (B_FALSE);
53700Sstevel@tonic-gate
53710Sstevel@tonic-gate if (tep->te_alen > 0)
53720Sstevel@tonic-gate kmem_free(tep->te_abuf, tep->te_alen);
53730Sstevel@tonic-gate
53740Sstevel@tonic-gate tep->te_alen = alen;
53750Sstevel@tonic-gate tep->te_abuf = abuf;
53760Sstevel@tonic-gate }
53770Sstevel@tonic-gate
53780Sstevel@tonic-gate /* Copy in the address in req */
53790Sstevel@tonic-gate if (req != NULL) {
53800Sstevel@tonic-gate ASSERT(alen >= req->ta_alen);
53810Sstevel@tonic-gate bcopy(req->ta_abuf, tep->te_abuf, (size_t)req->ta_alen);
53820Sstevel@tonic-gate }
53830Sstevel@tonic-gate
53840Sstevel@tonic-gate /*
53850Sstevel@tonic-gate * First try minor number then try default addresses.
53860Sstevel@tonic-gate */
53870Sstevel@tonic-gate bcopy(&tep->te_minor, tep->te_abuf, sizeof (uint32_t));
53880Sstevel@tonic-gate
53890Sstevel@tonic-gate for (loopcnt = 0; loopcnt < UINT32_MAX; loopcnt++) {
53900Sstevel@tonic-gate if (mod_hash_insert_reserve(tep->te_addrhash,
53915240Snordmark (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep,
53925240Snordmark tep->te_hash_hndl) == 0) {
53930Sstevel@tonic-gate /*
53940Sstevel@tonic-gate * found free address
53950Sstevel@tonic-gate */
53960Sstevel@tonic-gate tep->te_flag |= TL_ADDRHASHED;
53970Sstevel@tonic-gate tep->te_hash_hndl = NULL;
53980Sstevel@tonic-gate
53990Sstevel@tonic-gate return (B_TRUE); /* successful return */
54000Sstevel@tonic-gate }
54010Sstevel@tonic-gate /*
54020Sstevel@tonic-gate * Use default address.
54030Sstevel@tonic-gate */
54040Sstevel@tonic-gate bcopy(&tep->te_defaddr, tep->te_abuf, sizeof (uint32_t));
54050Sstevel@tonic-gate atomic_add_32(&tep->te_defaddr, 1);
54060Sstevel@tonic-gate }
54070Sstevel@tonic-gate
54080Sstevel@tonic-gate /*
54090Sstevel@tonic-gate * Failed to find anything.
54100Sstevel@tonic-gate */
54110Sstevel@tonic-gate (void) (STRLOG(TL_ID, -1, 1, SL_ERROR,
54125240Snordmark "tl_get_any_addr:looped 2^32 times"));
54130Sstevel@tonic-gate return (B_FALSE);
54140Sstevel@tonic-gate }
54150Sstevel@tonic-gate
54160Sstevel@tonic-gate /*
54170Sstevel@tonic-gate * reallocb + set r/w ptrs to reflect size.
54180Sstevel@tonic-gate */
54190Sstevel@tonic-gate static mblk_t *
tl_resizemp(mblk_t * mp,ssize_t new_size)54200Sstevel@tonic-gate tl_resizemp(mblk_t *mp, ssize_t new_size)
54210Sstevel@tonic-gate {
54220Sstevel@tonic-gate if ((mp = reallocb(mp, new_size, 0)) == NULL)
54230Sstevel@tonic-gate return (NULL);
54240Sstevel@tonic-gate
54250Sstevel@tonic-gate mp->b_rptr = DB_BASE(mp);
54260Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + new_size;
54270Sstevel@tonic-gate return (mp);
54280Sstevel@tonic-gate }
54290Sstevel@tonic-gate
54300Sstevel@tonic-gate static void
tl_cl_backenable(tl_endpt_t * tep)54310Sstevel@tonic-gate tl_cl_backenable(tl_endpt_t *tep)
54320Sstevel@tonic-gate {
54330Sstevel@tonic-gate list_t *l = &tep->te_flowlist;
54340Sstevel@tonic-gate tl_endpt_t *elp;
54350Sstevel@tonic-gate
54360Sstevel@tonic-gate ASSERT(IS_CLTS(tep));
54370Sstevel@tonic-gate
54380Sstevel@tonic-gate for (elp = list_head(l); elp != NULL; elp = list_head(l)) {
54390Sstevel@tonic-gate ASSERT(tep->te_ser == elp->te_ser);
54400Sstevel@tonic-gate ASSERT(elp->te_flowq == tep);
54410Sstevel@tonic-gate if (! elp->te_closing)
54420Sstevel@tonic-gate TL_QENABLE(elp);
54430Sstevel@tonic-gate elp->te_flowq = NULL;
54440Sstevel@tonic-gate list_remove(l, elp);
54450Sstevel@tonic-gate }
54460Sstevel@tonic-gate }
54470Sstevel@tonic-gate
54480Sstevel@tonic-gate /*
54490Sstevel@tonic-gate * Unconnect endpoints.
54500Sstevel@tonic-gate */
54510Sstevel@tonic-gate static void
tl_co_unconnect(tl_endpt_t * tep)54520Sstevel@tonic-gate tl_co_unconnect(tl_endpt_t *tep)
54530Sstevel@tonic-gate {
54540Sstevel@tonic-gate tl_endpt_t *peer_tep = tep->te_conp;
54550Sstevel@tonic-gate tl_endpt_t *srv_tep = tep->te_oconp;
54560Sstevel@tonic-gate list_t *l;
54570Sstevel@tonic-gate tl_icon_t *tip;
54580Sstevel@tonic-gate tl_endpt_t *cl_tep;
54590Sstevel@tonic-gate mblk_t *d_mp;
54600Sstevel@tonic-gate
54610Sstevel@tonic-gate ASSERT(IS_COTS(tep));
54620Sstevel@tonic-gate /*
54630Sstevel@tonic-gate * If our peer is closing, don't use it.
54640Sstevel@tonic-gate */
54650Sstevel@tonic-gate if ((peer_tep != NULL) && peer_tep->te_closing) {
54660Sstevel@tonic-gate TL_UNCONNECT(tep->te_conp);
54670Sstevel@tonic-gate peer_tep = NULL;
54680Sstevel@tonic-gate }
54690Sstevel@tonic-gate if ((srv_tep != NULL) && srv_tep->te_closing) {
54700Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
54710Sstevel@tonic-gate srv_tep = NULL;
54720Sstevel@tonic-gate }
54730Sstevel@tonic-gate
54740Sstevel@tonic-gate if (tep->te_nicon > 0) {
54750Sstevel@tonic-gate l = &tep->te_iconp;
54760Sstevel@tonic-gate /*
54770Sstevel@tonic-gate * If incoming requests pending, change state
54780Sstevel@tonic-gate * of clients on disconnect ind event and send
54790Sstevel@tonic-gate * discon_ind pdu to modules above them
54800Sstevel@tonic-gate * for server: all clients get disconnect
54810Sstevel@tonic-gate */
54820Sstevel@tonic-gate
54830Sstevel@tonic-gate while (tep->te_nicon > 0) {
54840Sstevel@tonic-gate tip = list_head(l);
54850Sstevel@tonic-gate cl_tep = tip->ti_tep;
54860Sstevel@tonic-gate
54870Sstevel@tonic-gate if (cl_tep == NULL) {
54880Sstevel@tonic-gate tl_freetip(tep, tip);
54890Sstevel@tonic-gate continue;
54900Sstevel@tonic-gate }
54910Sstevel@tonic-gate
54920Sstevel@tonic-gate if (cl_tep->te_oconp != NULL) {
54930Sstevel@tonic-gate ASSERT(cl_tep != cl_tep->te_oconp);
54940Sstevel@tonic-gate TL_UNCONNECT(cl_tep->te_oconp);
54950Sstevel@tonic-gate }
54960Sstevel@tonic-gate
54970Sstevel@tonic-gate if (cl_tep->te_closing) {
54980Sstevel@tonic-gate tl_freetip(tep, tip);
54990Sstevel@tonic-gate continue;
55000Sstevel@tonic-gate }
55010Sstevel@tonic-gate
55020Sstevel@tonic-gate enableok(cl_tep->te_wq);
55030Sstevel@tonic-gate TL_QENABLE(cl_tep);
55040Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(ECONNREFUSED, BADSEQNUM);
55050Sstevel@tonic-gate if (d_mp != NULL) {
55060Sstevel@tonic-gate cl_tep->te_state = TS_IDLE;
55070Sstevel@tonic-gate putnext(cl_tep->te_rq, d_mp);
55080Sstevel@tonic-gate } else {
55090Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
55105240Snordmark SL_TRACE|SL_ERROR,
55115240Snordmark "tl_co_unconnect:icmng: "
55125240Snordmark "allocb failure"));
55130Sstevel@tonic-gate }
55140Sstevel@tonic-gate tl_freetip(tep, tip);
55150Sstevel@tonic-gate }
55160Sstevel@tonic-gate } else if (srv_tep != NULL) {
55170Sstevel@tonic-gate /*
55180Sstevel@tonic-gate * If outgoing request pending, change state
55190Sstevel@tonic-gate * of server on discon ind event
55200Sstevel@tonic-gate */
55210Sstevel@tonic-gate
55220Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect &&
55230Sstevel@tonic-gate IS_COTSORD(srv_tep) &&
55240Sstevel@tonic-gate !tl_icon_hasprim(srv_tep, tep->te_seqno, T_ORDREL_IND)) {
55250Sstevel@tonic-gate /*
55260Sstevel@tonic-gate * Queue ordrel_ind for server to be picked up
55270Sstevel@tonic-gate * when the connection is accepted.
55280Sstevel@tonic-gate */
55290Sstevel@tonic-gate d_mp = tl_ordrel_ind_alloc();
55300Sstevel@tonic-gate } else {
55310Sstevel@tonic-gate /*
55320Sstevel@tonic-gate * send discon_ind to server
55330Sstevel@tonic-gate */
55340Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(ECONNRESET, tep->te_seqno);
55350Sstevel@tonic-gate }
55360Sstevel@tonic-gate if (d_mp == NULL) {
55370Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
55385240Snordmark SL_TRACE|SL_ERROR,
55395240Snordmark "tl_co_unconnect:outgoing:allocb failure"));
55400Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
55410Sstevel@tonic-gate goto discon_peer;
55420Sstevel@tonic-gate }
55430Sstevel@tonic-gate
55440Sstevel@tonic-gate /*
55450Sstevel@tonic-gate * If this is a socket the T_DISCON_IND is queued with
55460Sstevel@tonic-gate * the T_CONN_IND. Otherwise the T_CONN_IND is removed
55470Sstevel@tonic-gate * from the list of pending connections.
55480Sstevel@tonic-gate * Note that when te_oconp is set the peer better have
55490Sstevel@tonic-gate * a t_connind_t for the client.
55500Sstevel@tonic-gate */
55510Sstevel@tonic-gate if (IS_SOCKET(tep) && !tl_disable_early_connect) {
55520Sstevel@tonic-gate /*
55530Sstevel@tonic-gate * Queue the disconnection message.
55540Sstevel@tonic-gate */
55550Sstevel@tonic-gate tl_icon_queuemsg(srv_tep, tep->te_seqno, d_mp);
55560Sstevel@tonic-gate } else {
55570Sstevel@tonic-gate tip = tl_icon_find(srv_tep, tep->te_seqno);
55580Sstevel@tonic-gate if (tip == NULL) {
55590Sstevel@tonic-gate freemsg(d_mp);
55600Sstevel@tonic-gate } else {
55610Sstevel@tonic-gate ASSERT(tep == tip->ti_tep);
55620Sstevel@tonic-gate ASSERT(tep->te_ser == srv_tep->te_ser);
55630Sstevel@tonic-gate /*
55640Sstevel@tonic-gate * Delete tip from the server list.
55650Sstevel@tonic-gate */
55660Sstevel@tonic-gate if (srv_tep->te_nicon == 1) {
55670Sstevel@tonic-gate srv_tep->te_state =
55680Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND2,
55695240Snordmark srv_tep->te_state);
55700Sstevel@tonic-gate } else {
55710Sstevel@tonic-gate srv_tep->te_state =
55720Sstevel@tonic-gate NEXTSTATE(TE_DISCON_IND3,
55735240Snordmark srv_tep->te_state);
55740Sstevel@tonic-gate }
55750Sstevel@tonic-gate ASSERT(*(uint32_t *)(d_mp->b_rptr) ==
55760Sstevel@tonic-gate T_DISCON_IND);
55770Sstevel@tonic-gate putnext(srv_tep->te_rq, d_mp);
55780Sstevel@tonic-gate tl_freetip(srv_tep, tip);
55790Sstevel@tonic-gate }
55800Sstevel@tonic-gate TL_UNCONNECT(tep->te_oconp);
55810Sstevel@tonic-gate srv_tep = NULL;
55820Sstevel@tonic-gate }
55830Sstevel@tonic-gate } else if (peer_tep != NULL) {
55840Sstevel@tonic-gate /*
55850Sstevel@tonic-gate * unconnect existing connection
55860Sstevel@tonic-gate * If connected, change state of peer on
55870Sstevel@tonic-gate * discon ind event and send discon ind pdu
55880Sstevel@tonic-gate * to module above it
55890Sstevel@tonic-gate */
55900Sstevel@tonic-gate
55910Sstevel@tonic-gate ASSERT(tep->te_ser == peer_tep->te_ser);
55920Sstevel@tonic-gate if (IS_COTSORD(peer_tep) &&
55930Sstevel@tonic-gate (peer_tep->te_state == TS_WIND_ORDREL ||
55940Sstevel@tonic-gate peer_tep->te_state == TS_DATA_XFER)) {
55950Sstevel@tonic-gate /*
55960Sstevel@tonic-gate * send ordrel ind
55970Sstevel@tonic-gate */
55980Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
55990Sstevel@tonic-gate "tl_co_unconnect:connected: ordrel_ind state %d->%d",
56005240Snordmark peer_tep->te_state,
56015240Snordmark NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state)));
56020Sstevel@tonic-gate d_mp = tl_ordrel_ind_alloc();
56030Sstevel@tonic-gate if (! d_mp) {
56040Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
56050Sstevel@tonic-gate SL_TRACE|SL_ERROR,
56060Sstevel@tonic-gate "tl_co_unconnect:connected:"
56070Sstevel@tonic-gate "allocb failure"));
56080Sstevel@tonic-gate /*
56090Sstevel@tonic-gate * Continue with cleaning up peer as
56100Sstevel@tonic-gate * this side may go away with the close
56110Sstevel@tonic-gate */
56120Sstevel@tonic-gate TL_QENABLE(peer_tep);
56130Sstevel@tonic-gate goto discon_peer;
56140Sstevel@tonic-gate }
56150Sstevel@tonic-gate peer_tep->te_state =
56165240Snordmark NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state);
56170Sstevel@tonic-gate
56180Sstevel@tonic-gate putnext(peer_tep->te_rq, d_mp);
56190Sstevel@tonic-gate /*
56200Sstevel@tonic-gate * Handle flow control case. This will generate
56210Sstevel@tonic-gate * a t_discon_ind message with reason 0 if there
56220Sstevel@tonic-gate * is data queued on the write side.
56230Sstevel@tonic-gate */
56240Sstevel@tonic-gate TL_QENABLE(peer_tep);
56250Sstevel@tonic-gate } else if (IS_COTSORD(peer_tep) &&
56260Sstevel@tonic-gate peer_tep->te_state == TS_WREQ_ORDREL) {
56270Sstevel@tonic-gate /*
56280Sstevel@tonic-gate * Sent an ordrel_ind. We send a discon with
56290Sstevel@tonic-gate * with error 0 to inform that the peer is gone.
56300Sstevel@tonic-gate */
56310Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
56325240Snordmark SL_TRACE|SL_ERROR,
56335240Snordmark "tl_co_unconnect: discon in state %d",
56345240Snordmark tep->te_state));
56350Sstevel@tonic-gate tl_discon_ind(peer_tep, 0);
56360Sstevel@tonic-gate } else {
56370Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3,
56385240Snordmark SL_TRACE|SL_ERROR,
56395240Snordmark "tl_co_unconnect: state %d", tep->te_state));
56400Sstevel@tonic-gate tl_discon_ind(peer_tep, ECONNRESET);
56410Sstevel@tonic-gate }
56420Sstevel@tonic-gate
56430Sstevel@tonic-gate discon_peer:
56440Sstevel@tonic-gate /*
56450Sstevel@tonic-gate * Disconnect cross-pointers only for close
56460Sstevel@tonic-gate */
56470Sstevel@tonic-gate if (tep->te_closing) {
56480Sstevel@tonic-gate peer_tep = tep->te_conp;
56490Sstevel@tonic-gate TL_REMOVE_PEER(peer_tep->te_conp);
56500Sstevel@tonic-gate TL_REMOVE_PEER(tep->te_conp);
56510Sstevel@tonic-gate }
56520Sstevel@tonic-gate }
56530Sstevel@tonic-gate }
56540Sstevel@tonic-gate
56550Sstevel@tonic-gate /*
56560Sstevel@tonic-gate * Note: The following routine does not recover from allocb()
56570Sstevel@tonic-gate * failures
56580Sstevel@tonic-gate * The reason should be from the <sys/errno.h> space.
56590Sstevel@tonic-gate */
56600Sstevel@tonic-gate static void
tl_discon_ind(tl_endpt_t * tep,uint32_t reason)56610Sstevel@tonic-gate tl_discon_ind(tl_endpt_t *tep, uint32_t reason)
56620Sstevel@tonic-gate {
56630Sstevel@tonic-gate mblk_t *d_mp;
56640Sstevel@tonic-gate
56650Sstevel@tonic-gate if (tep->te_closing)
56660Sstevel@tonic-gate return;
56670Sstevel@tonic-gate
56680Sstevel@tonic-gate /*
56690Sstevel@tonic-gate * flush the queues.
56700Sstevel@tonic-gate */
56710Sstevel@tonic-gate flushq(tep->te_rq, FLUSHDATA);
56720Sstevel@tonic-gate (void) putnextctl1(tep->te_rq, M_FLUSH, FLUSHRW);
56730Sstevel@tonic-gate
56740Sstevel@tonic-gate /*
56750Sstevel@tonic-gate * send discon ind
56760Sstevel@tonic-gate */
56770Sstevel@tonic-gate d_mp = tl_discon_ind_alloc(reason, tep->te_seqno);
56780Sstevel@tonic-gate if (! d_mp) {
56790Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
56805240Snordmark "tl_discon_ind:allocb failure"));
56810Sstevel@tonic-gate return;
56820Sstevel@tonic-gate }
56830Sstevel@tonic-gate tep->te_state = TS_IDLE;
56840Sstevel@tonic-gate putnext(tep->te_rq, d_mp);
56850Sstevel@tonic-gate }
56860Sstevel@tonic-gate
56870Sstevel@tonic-gate /*
56880Sstevel@tonic-gate * Note: The following routine does not recover from allocb()
56890Sstevel@tonic-gate * failures
56900Sstevel@tonic-gate * The reason should be from the <sys/errno.h> space.
56910Sstevel@tonic-gate */
56920Sstevel@tonic-gate static mblk_t *
tl_discon_ind_alloc(uint32_t reason,t_scalar_t seqnum)56930Sstevel@tonic-gate tl_discon_ind_alloc(uint32_t reason, t_scalar_t seqnum)
56940Sstevel@tonic-gate {
56950Sstevel@tonic-gate mblk_t *mp;
56960Sstevel@tonic-gate struct T_discon_ind *tdi;
56970Sstevel@tonic-gate
56980Sstevel@tonic-gate if (mp = allocb(sizeof (struct T_discon_ind), BPRI_MED)) {
56990Sstevel@tonic-gate DB_TYPE(mp) = M_PROTO;
57000Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind);
57010Sstevel@tonic-gate tdi = (struct T_discon_ind *)mp->b_rptr;
57020Sstevel@tonic-gate tdi->PRIM_type = T_DISCON_IND;
57030Sstevel@tonic-gate tdi->DISCON_reason = reason;
57040Sstevel@tonic-gate tdi->SEQ_number = seqnum;
57050Sstevel@tonic-gate }
57060Sstevel@tonic-gate return (mp);
57070Sstevel@tonic-gate }
57080Sstevel@tonic-gate
57090Sstevel@tonic-gate
57100Sstevel@tonic-gate /*
57110Sstevel@tonic-gate * Note: The following routine does not recover from allocb()
57120Sstevel@tonic-gate * failures
57130Sstevel@tonic-gate */
57140Sstevel@tonic-gate static mblk_t *
tl_ordrel_ind_alloc(void)57150Sstevel@tonic-gate tl_ordrel_ind_alloc(void)
57160Sstevel@tonic-gate {
57170Sstevel@tonic-gate mblk_t *mp;
57180Sstevel@tonic-gate struct T_ordrel_ind *toi;
57190Sstevel@tonic-gate
57200Sstevel@tonic-gate if (mp = allocb(sizeof (struct T_ordrel_ind), BPRI_MED)) {
57210Sstevel@tonic-gate DB_TYPE(mp) = M_PROTO;
57220Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (struct T_ordrel_ind);
57230Sstevel@tonic-gate toi = (struct T_ordrel_ind *)mp->b_rptr;
57240Sstevel@tonic-gate toi->PRIM_type = T_ORDREL_IND;
57250Sstevel@tonic-gate }
57260Sstevel@tonic-gate return (mp);
57270Sstevel@tonic-gate }
57280Sstevel@tonic-gate
57290Sstevel@tonic-gate
57300Sstevel@tonic-gate /*
57310Sstevel@tonic-gate * Lookup the seqno in the list of queued connections.
57320Sstevel@tonic-gate */
57330Sstevel@tonic-gate static tl_icon_t *
tl_icon_find(tl_endpt_t * tep,t_scalar_t seqno)57340Sstevel@tonic-gate tl_icon_find(tl_endpt_t *tep, t_scalar_t seqno)
57350Sstevel@tonic-gate {
57360Sstevel@tonic-gate list_t *l = &tep->te_iconp;
57370Sstevel@tonic-gate tl_icon_t *tip = list_head(l);
57380Sstevel@tonic-gate
57390Sstevel@tonic-gate ASSERT(seqno != 0);
57400Sstevel@tonic-gate
57410Sstevel@tonic-gate for (; tip != NULL && (tip->ti_seqno != seqno); tip = list_next(l, tip))
57420Sstevel@tonic-gate ;
57430Sstevel@tonic-gate
57440Sstevel@tonic-gate return (tip);
57450Sstevel@tonic-gate }
57460Sstevel@tonic-gate
57470Sstevel@tonic-gate /*
57480Sstevel@tonic-gate * Queue data for a given T_CONN_IND while verifying that redundant
57490Sstevel@tonic-gate * messages, such as a T_ORDREL_IND after a T_DISCON_IND, are not queued.
57500Sstevel@tonic-gate * Used when the originator of the connection closes.
57510Sstevel@tonic-gate */
57520Sstevel@tonic-gate static void
tl_icon_queuemsg(tl_endpt_t * tep,t_scalar_t seqno,mblk_t * nmp)57530Sstevel@tonic-gate tl_icon_queuemsg(tl_endpt_t *tep, t_scalar_t seqno, mblk_t *nmp)
57540Sstevel@tonic-gate {
57550Sstevel@tonic-gate tl_icon_t *tip;
57560Sstevel@tonic-gate mblk_t **mpp, *mp;
57570Sstevel@tonic-gate int prim, nprim;
57580Sstevel@tonic-gate
57590Sstevel@tonic-gate if (nmp->b_datap->db_type == M_PROTO)
57600Sstevel@tonic-gate nprim = ((union T_primitives *)nmp->b_rptr)->type;
57610Sstevel@tonic-gate else
57620Sstevel@tonic-gate nprim = -1; /* M_DATA */
57630Sstevel@tonic-gate
57640Sstevel@tonic-gate tip = tl_icon_find(tep, seqno);
57650Sstevel@tonic-gate if (tip == NULL) {
57660Sstevel@tonic-gate freemsg(nmp);
57670Sstevel@tonic-gate return;
57680Sstevel@tonic-gate }
57690Sstevel@tonic-gate
57700Sstevel@tonic-gate ASSERT(tip->ti_seqno != 0);
57710Sstevel@tonic-gate mpp = &tip->ti_mp;
57720Sstevel@tonic-gate while (*mpp != NULL) {
57730Sstevel@tonic-gate mp = *mpp;
57740Sstevel@tonic-gate
57750Sstevel@tonic-gate if (mp->b_datap->db_type == M_PROTO)
57760Sstevel@tonic-gate prim = ((union T_primitives *)mp->b_rptr)->type;
57770Sstevel@tonic-gate else
57780Sstevel@tonic-gate prim = -1; /* M_DATA */
57790Sstevel@tonic-gate
57800Sstevel@tonic-gate /*
57810Sstevel@tonic-gate * Allow nothing after a T_DISCON_IND
57820Sstevel@tonic-gate */
57830Sstevel@tonic-gate if (prim == T_DISCON_IND) {
57840Sstevel@tonic-gate freemsg(nmp);
57850Sstevel@tonic-gate return;
57860Sstevel@tonic-gate }
57870Sstevel@tonic-gate /*
57880Sstevel@tonic-gate * Only allow a T_DISCON_IND after an T_ORDREL_IND
57890Sstevel@tonic-gate */
57900Sstevel@tonic-gate if (prim == T_ORDREL_IND && nprim != T_DISCON_IND) {
57910Sstevel@tonic-gate freemsg(nmp);
57920Sstevel@tonic-gate return;
57930Sstevel@tonic-gate }
57940Sstevel@tonic-gate mpp = &(mp->b_next);
57950Sstevel@tonic-gate }
57960Sstevel@tonic-gate *mpp = nmp;
57970Sstevel@tonic-gate }
57980Sstevel@tonic-gate
57990Sstevel@tonic-gate /*
58000Sstevel@tonic-gate * Verify if a certain TPI primitive exists on the connind queue.
58010Sstevel@tonic-gate * Use prim -1 for M_DATA.
58020Sstevel@tonic-gate * Return non-zero if found.
58030Sstevel@tonic-gate */
58040Sstevel@tonic-gate static boolean_t
tl_icon_hasprim(tl_endpt_t * tep,t_scalar_t seqno,t_scalar_t prim)58050Sstevel@tonic-gate tl_icon_hasprim(tl_endpt_t *tep, t_scalar_t seqno, t_scalar_t prim)
58060Sstevel@tonic-gate {
58070Sstevel@tonic-gate tl_icon_t *tip = tl_icon_find(tep, seqno);
58080Sstevel@tonic-gate boolean_t found = B_FALSE;
58090Sstevel@tonic-gate
58100Sstevel@tonic-gate if (tip != NULL) {
58110Sstevel@tonic-gate mblk_t *mp;
58120Sstevel@tonic-gate for (mp = tip->ti_mp; !found && mp != NULL; mp = mp->b_next) {
58130Sstevel@tonic-gate found = (DB_TYPE(mp) == M_PROTO &&
58140Sstevel@tonic-gate ((union T_primitives *)mp->b_rptr)->type == prim);
58150Sstevel@tonic-gate }
58160Sstevel@tonic-gate }
58170Sstevel@tonic-gate return (found);
58180Sstevel@tonic-gate }
58190Sstevel@tonic-gate
58200Sstevel@tonic-gate /*
58210Sstevel@tonic-gate * Send the b_next mblk chain that has accumulated before the connection
58220Sstevel@tonic-gate * was accepted. Perform the necessary state transitions.
58230Sstevel@tonic-gate */
58240Sstevel@tonic-gate static void
tl_icon_sendmsgs(tl_endpt_t * tep,mblk_t ** mpp)58250Sstevel@tonic-gate tl_icon_sendmsgs(tl_endpt_t *tep, mblk_t **mpp)
58260Sstevel@tonic-gate {
58270Sstevel@tonic-gate mblk_t *mp;
58280Sstevel@tonic-gate union T_primitives *primp;
58290Sstevel@tonic-gate
58300Sstevel@tonic-gate if (tep->te_closing) {
58310Sstevel@tonic-gate tl_icon_freemsgs(mpp);
58320Sstevel@tonic-gate return;
58330Sstevel@tonic-gate }
58340Sstevel@tonic-gate
58350Sstevel@tonic-gate ASSERT(tep->te_state == TS_DATA_XFER);
58360Sstevel@tonic-gate ASSERT(tep->te_rq->q_first == NULL);
58370Sstevel@tonic-gate
58380Sstevel@tonic-gate while ((mp = *mpp) != NULL) {
58390Sstevel@tonic-gate *mpp = mp->b_next;
58400Sstevel@tonic-gate mp->b_next = NULL;
58410Sstevel@tonic-gate
58420Sstevel@tonic-gate ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO));
58430Sstevel@tonic-gate switch (DB_TYPE(mp)) {
58440Sstevel@tonic-gate default:
58450Sstevel@tonic-gate freemsg(mp);
58460Sstevel@tonic-gate break;
58470Sstevel@tonic-gate case M_DATA:
58480Sstevel@tonic-gate putnext(tep->te_rq, mp);
58490Sstevel@tonic-gate break;
58500Sstevel@tonic-gate case M_PROTO:
58510Sstevel@tonic-gate primp = (union T_primitives *)mp->b_rptr;
58520Sstevel@tonic-gate switch (primp->type) {
58530Sstevel@tonic-gate case T_UNITDATA_IND:
58540Sstevel@tonic-gate case T_DATA_IND:
58550Sstevel@tonic-gate case T_OPTDATA_IND:
58560Sstevel@tonic-gate case T_EXDATA_IND:
58570Sstevel@tonic-gate putnext(tep->te_rq, mp);
58580Sstevel@tonic-gate break;
58590Sstevel@tonic-gate case T_ORDREL_IND:
58600Sstevel@tonic-gate tep->te_state = NEXTSTATE(TE_ORDREL_IND,
58615240Snordmark tep->te_state);
58620Sstevel@tonic-gate putnext(tep->te_rq, mp);
58630Sstevel@tonic-gate break;
58640Sstevel@tonic-gate case T_DISCON_IND:
58650Sstevel@tonic-gate tep->te_state = TS_IDLE;
58660Sstevel@tonic-gate putnext(tep->te_rq, mp);
58670Sstevel@tonic-gate break;
58680Sstevel@tonic-gate default:
58690Sstevel@tonic-gate #ifdef DEBUG
58700Sstevel@tonic-gate cmn_err(CE_PANIC,
58715240Snordmark "tl_icon_sendmsgs: unknown primitive");
58720Sstevel@tonic-gate #endif /* DEBUG */
58730Sstevel@tonic-gate freemsg(mp);
58740Sstevel@tonic-gate break;
58750Sstevel@tonic-gate }
58760Sstevel@tonic-gate break;
58770Sstevel@tonic-gate }
58780Sstevel@tonic-gate }
58790Sstevel@tonic-gate }
58800Sstevel@tonic-gate
58810Sstevel@tonic-gate /*
58820Sstevel@tonic-gate * Free the b_next mblk chain that has accumulated before the connection
58830Sstevel@tonic-gate * was accepted.
58840Sstevel@tonic-gate */
58850Sstevel@tonic-gate static void
tl_icon_freemsgs(mblk_t ** mpp)58860Sstevel@tonic-gate tl_icon_freemsgs(mblk_t **mpp)
58870Sstevel@tonic-gate {
58880Sstevel@tonic-gate mblk_t *mp;
58890Sstevel@tonic-gate
58900Sstevel@tonic-gate while ((mp = *mpp) != NULL) {
58910Sstevel@tonic-gate *mpp = mp->b_next;
58920Sstevel@tonic-gate mp->b_next = NULL;
58930Sstevel@tonic-gate freemsg(mp);
58940Sstevel@tonic-gate }
58950Sstevel@tonic-gate }
58960Sstevel@tonic-gate
58970Sstevel@tonic-gate /*
58980Sstevel@tonic-gate * Send M_ERROR
58990Sstevel@tonic-gate * Note: assumes caller ensured enough space in mp or enough
59000Sstevel@tonic-gate * memory available. Does not attempt recovery from allocb()
59010Sstevel@tonic-gate * failures
59020Sstevel@tonic-gate */
59030Sstevel@tonic-gate
59040Sstevel@tonic-gate static void
tl_merror(queue_t * wq,mblk_t * mp,int error)59050Sstevel@tonic-gate tl_merror(queue_t *wq, mblk_t *mp, int error)
59060Sstevel@tonic-gate {
59070Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
59080Sstevel@tonic-gate
59090Sstevel@tonic-gate if (tep->te_closing) {
59100Sstevel@tonic-gate freemsg(mp);
59110Sstevel@tonic-gate return;
59120Sstevel@tonic-gate }
59130Sstevel@tonic-gate
59140Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
59155240Snordmark SL_TRACE|SL_ERROR,
59167240Srh87107 "tl_merror: tep=%p, err=%d", (void *)tep, error));
59170Sstevel@tonic-gate
59180Sstevel@tonic-gate /*
59190Sstevel@tonic-gate * flush all messages on queue. we are shutting
59200Sstevel@tonic-gate * the stream down on fatal error
59210Sstevel@tonic-gate */
59220Sstevel@tonic-gate flushq(wq, FLUSHALL);
59230Sstevel@tonic-gate if (IS_COTS(tep)) {
59240Sstevel@tonic-gate /* connection oriented - unconnect endpoints */
59250Sstevel@tonic-gate tl_co_unconnect(tep);
59260Sstevel@tonic-gate }
59270Sstevel@tonic-gate if (mp->b_cont) {
59280Sstevel@tonic-gate freemsg(mp->b_cont);
59290Sstevel@tonic-gate mp->b_cont = NULL;
59300Sstevel@tonic-gate }
59310Sstevel@tonic-gate
59320Sstevel@tonic-gate if ((MBLKSIZE(mp) < 1) || (DB_REF(mp) > 1)) {
59330Sstevel@tonic-gate freemsg(mp);
59340Sstevel@tonic-gate mp = allocb(1, BPRI_HI);
59350Sstevel@tonic-gate if (!mp) {
59360Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
59375240Snordmark SL_TRACE|SL_ERROR,
59385240Snordmark "tl_merror:M_PROTO: out of memory"));
59390Sstevel@tonic-gate return;
59400Sstevel@tonic-gate }
59410Sstevel@tonic-gate }
59420Sstevel@tonic-gate if (mp) {
59430Sstevel@tonic-gate DB_TYPE(mp) = M_ERROR;
59440Sstevel@tonic-gate mp->b_rptr = DB_BASE(mp);
59450Sstevel@tonic-gate *mp->b_rptr = (char)error;
59460Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (char);
59470Sstevel@tonic-gate qreply(wq, mp);
59480Sstevel@tonic-gate } else {
59490Sstevel@tonic-gate (void) putnextctl1(tep->te_rq, M_ERROR, error);
59500Sstevel@tonic-gate }
59510Sstevel@tonic-gate }
59520Sstevel@tonic-gate
59530Sstevel@tonic-gate static void
tl_fill_option(uchar_t * buf,cred_t * cr,pid_t cpid,int flag,cred_t * pcr)59541676Sjpk tl_fill_option(uchar_t *buf, cred_t *cr, pid_t cpid, int flag, cred_t *pcr)
59550Sstevel@tonic-gate {
59568778SErik.Nordmark@Sun.COM ASSERT(cr != NULL);
59578778SErik.Nordmark@Sun.COM
59580Sstevel@tonic-gate if (flag & TL_SETCRED) {
59590Sstevel@tonic-gate struct opthdr *opt = (struct opthdr *)buf;
59600Sstevel@tonic-gate tl_credopt_t *tlcred;
59610Sstevel@tonic-gate
59620Sstevel@tonic-gate opt->level = TL_PROT_LEVEL;
59630Sstevel@tonic-gate opt->name = TL_OPT_PEER_CRED;
59640Sstevel@tonic-gate opt->len = (t_uscalar_t)OPTLEN(sizeof (tl_credopt_t));
59650Sstevel@tonic-gate
59660Sstevel@tonic-gate tlcred = (tl_credopt_t *)(opt + 1);
59670Sstevel@tonic-gate tlcred->tc_uid = crgetuid(cr);
59680Sstevel@tonic-gate tlcred->tc_gid = crgetgid(cr);
59690Sstevel@tonic-gate tlcred->tc_ruid = crgetruid(cr);
59700Sstevel@tonic-gate tlcred->tc_rgid = crgetrgid(cr);
59710Sstevel@tonic-gate tlcred->tc_suid = crgetsuid(cr);
59720Sstevel@tonic-gate tlcred->tc_sgid = crgetsgid(cr);
59730Sstevel@tonic-gate tlcred->tc_ngroups = crgetngroups(cr);
59740Sstevel@tonic-gate } else if (flag & TL_SETUCRED) {
59750Sstevel@tonic-gate struct opthdr *opt = (struct opthdr *)buf;
59760Sstevel@tonic-gate
59770Sstevel@tonic-gate opt->level = TL_PROT_LEVEL;
59780Sstevel@tonic-gate opt->name = TL_OPT_PEER_UCRED;
597911134SCasper.Dik@Sun.COM opt->len = (t_uscalar_t)OPTLEN(ucredminsize(cr));
59800Sstevel@tonic-gate
59811676Sjpk (void) cred2ucred(cr, cpid, (void *)(opt + 1), pcr);
59820Sstevel@tonic-gate } else {
59830Sstevel@tonic-gate struct T_opthdr *topt = (struct T_opthdr *)buf;
59840Sstevel@tonic-gate ASSERT(flag & TL_SOCKUCRED);
59850Sstevel@tonic-gate
59860Sstevel@tonic-gate topt->level = SOL_SOCKET;
59870Sstevel@tonic-gate topt->name = SCM_UCRED;
598811134SCasper.Dik@Sun.COM topt->len = ucredminsize(cr) + sizeof (*topt);
59890Sstevel@tonic-gate topt->status = 0;
59901676Sjpk (void) cred2ucred(cr, cpid, (void *)(topt + 1), pcr);
59910Sstevel@tonic-gate }
59920Sstevel@tonic-gate }
59930Sstevel@tonic-gate
59940Sstevel@tonic-gate /* ARGSUSED */
59950Sstevel@tonic-gate static int
tl_default_opt(queue_t * wq,int level,int name,uchar_t * ptr)59960Sstevel@tonic-gate tl_default_opt(queue_t *wq, int level, int name, uchar_t *ptr)
59970Sstevel@tonic-gate {
59980Sstevel@tonic-gate /* no default value processed in protocol specific code currently */
59990Sstevel@tonic-gate return (-1);
60000Sstevel@tonic-gate }
60010Sstevel@tonic-gate
60020Sstevel@tonic-gate /* ARGSUSED */
60030Sstevel@tonic-gate static int
tl_get_opt(queue_t * wq,int level,int name,uchar_t * ptr)60040Sstevel@tonic-gate tl_get_opt(queue_t *wq, int level, int name, uchar_t *ptr)
60050Sstevel@tonic-gate {
60060Sstevel@tonic-gate int len;
60070Sstevel@tonic-gate tl_endpt_t *tep;
60080Sstevel@tonic-gate int *valp;
60090Sstevel@tonic-gate
60100Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr;
60110Sstevel@tonic-gate
60120Sstevel@tonic-gate len = 0;
60130Sstevel@tonic-gate
60140Sstevel@tonic-gate /*
60150Sstevel@tonic-gate * Assumes: option level and name sanity check done elsewhere
60160Sstevel@tonic-gate */
60170Sstevel@tonic-gate
60180Sstevel@tonic-gate switch (level) {
60190Sstevel@tonic-gate case SOL_SOCKET:
60200Sstevel@tonic-gate if (! IS_SOCKET(tep))
60210Sstevel@tonic-gate break;
60220Sstevel@tonic-gate switch (name) {
60230Sstevel@tonic-gate case SO_RECVUCRED:
60240Sstevel@tonic-gate len = sizeof (int);
60250Sstevel@tonic-gate valp = (int *)ptr;
60260Sstevel@tonic-gate *valp = (tep->te_flag & TL_SOCKUCRED) != 0;
60270Sstevel@tonic-gate break;
60280Sstevel@tonic-gate default:
60290Sstevel@tonic-gate break;
60300Sstevel@tonic-gate }
60310Sstevel@tonic-gate break;
60320Sstevel@tonic-gate case TL_PROT_LEVEL:
60330Sstevel@tonic-gate switch (name) {
60340Sstevel@tonic-gate case TL_OPT_PEER_CRED:
60350Sstevel@tonic-gate case TL_OPT_PEER_UCRED:
60360Sstevel@tonic-gate /*
60370Sstevel@tonic-gate * option not supposed to retrieved directly
60380Sstevel@tonic-gate * Only sent in T_CON_{IND,CON}, T_UNITDATA_IND
60390Sstevel@tonic-gate * when some internal flags set by other options
60400Sstevel@tonic-gate * Direct retrieval always designed to fail(ignored)
60410Sstevel@tonic-gate * for this option.
60420Sstevel@tonic-gate */
60430Sstevel@tonic-gate break;
60440Sstevel@tonic-gate }
60450Sstevel@tonic-gate }
60460Sstevel@tonic-gate return (len);
60470Sstevel@tonic-gate }
60480Sstevel@tonic-gate
60490Sstevel@tonic-gate /* ARGSUSED */
60500Sstevel@tonic-gate static int
tl_set_opt(queue_t * wq,uint_t mgmt_flags,int level,int name,uint_t inlen,uchar_t * invalp,uint_t * outlenp,uchar_t * outvalp,void * thisdg_attrs,cred_t * cr)60510Sstevel@tonic-gate tl_set_opt(
60520Sstevel@tonic-gate queue_t *wq,
60530Sstevel@tonic-gate uint_t mgmt_flags,
60540Sstevel@tonic-gate int level,
60550Sstevel@tonic-gate int name,
60560Sstevel@tonic-gate uint_t inlen,
60570Sstevel@tonic-gate uchar_t *invalp,
60580Sstevel@tonic-gate uint_t *outlenp,
60590Sstevel@tonic-gate uchar_t *outvalp,
60600Sstevel@tonic-gate void *thisdg_attrs,
606111042SErik.Nordmark@Sun.COM cred_t *cr)
60620Sstevel@tonic-gate {
60630Sstevel@tonic-gate int error;
60640Sstevel@tonic-gate tl_endpt_t *tep;
60650Sstevel@tonic-gate
60660Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr;
60670Sstevel@tonic-gate
60680Sstevel@tonic-gate error = 0; /* NOERROR */
60690Sstevel@tonic-gate
60700Sstevel@tonic-gate /*
60710Sstevel@tonic-gate * Assumes: option level and name sanity checks done elsewhere
60720Sstevel@tonic-gate */
60730Sstevel@tonic-gate
60740Sstevel@tonic-gate switch (level) {
60750Sstevel@tonic-gate case SOL_SOCKET:
60760Sstevel@tonic-gate if (! IS_SOCKET(tep)) {
60770Sstevel@tonic-gate error = EINVAL;
60780Sstevel@tonic-gate break;
60790Sstevel@tonic-gate }
60800Sstevel@tonic-gate /*
60810Sstevel@tonic-gate * TBD: fill in other AF_UNIX socket options and then stop
60820Sstevel@tonic-gate * returning error.
60830Sstevel@tonic-gate */
60840Sstevel@tonic-gate switch (name) {
60850Sstevel@tonic-gate case SO_RECVUCRED:
60860Sstevel@tonic-gate /*
60870Sstevel@tonic-gate * We only support this for datagram sockets;
60880Sstevel@tonic-gate * getpeerucred handles the connection oriented
60890Sstevel@tonic-gate * transports.
60900Sstevel@tonic-gate */
60910Sstevel@tonic-gate if (! IS_CLTS(tep)) {
60920Sstevel@tonic-gate error = EINVAL;
60930Sstevel@tonic-gate break;
60940Sstevel@tonic-gate }
60950Sstevel@tonic-gate if (*(int *)invalp == 0)
60960Sstevel@tonic-gate tep->te_flag &= ~TL_SOCKUCRED;
60970Sstevel@tonic-gate else
60980Sstevel@tonic-gate tep->te_flag |= TL_SOCKUCRED;
60990Sstevel@tonic-gate break;
61000Sstevel@tonic-gate default:
61010Sstevel@tonic-gate error = EINVAL;
61020Sstevel@tonic-gate break;
61030Sstevel@tonic-gate }
61040Sstevel@tonic-gate break;
61050Sstevel@tonic-gate case TL_PROT_LEVEL:
61060Sstevel@tonic-gate switch (name) {
61070Sstevel@tonic-gate case TL_OPT_PEER_CRED:
61080Sstevel@tonic-gate case TL_OPT_PEER_UCRED:
61090Sstevel@tonic-gate /*
61100Sstevel@tonic-gate * option not supposed to be set directly
61110Sstevel@tonic-gate * Its value in initialized for each endpoint at
61120Sstevel@tonic-gate * driver open time.
61130Sstevel@tonic-gate * Direct setting always designed to fail for this
61140Sstevel@tonic-gate * option.
61150Sstevel@tonic-gate */
61160Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1,
61175240Snordmark SL_TRACE|SL_ERROR,
61185240Snordmark "tl_set_opt: option is not supported"));
61190Sstevel@tonic-gate error = EPROTO;
61200Sstevel@tonic-gate break;
61210Sstevel@tonic-gate }
61220Sstevel@tonic-gate }
61230Sstevel@tonic-gate return (error);
61240Sstevel@tonic-gate }
61250Sstevel@tonic-gate
61260Sstevel@tonic-gate
61270Sstevel@tonic-gate static void
tl_timer(void * arg)61280Sstevel@tonic-gate tl_timer(void *arg)
61290Sstevel@tonic-gate {
61300Sstevel@tonic-gate queue_t *wq = arg;
61310Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
61320Sstevel@tonic-gate
61330Sstevel@tonic-gate ASSERT(tep);
61340Sstevel@tonic-gate
61350Sstevel@tonic-gate tep->te_timoutid = 0;
61360Sstevel@tonic-gate
61370Sstevel@tonic-gate enableok(wq);
61380Sstevel@tonic-gate /*
61390Sstevel@tonic-gate * Note: can call wsrv directly here and save context switch
61400Sstevel@tonic-gate * Consider change when qtimeout (not timeout) is active
61410Sstevel@tonic-gate */
61420Sstevel@tonic-gate qenable(wq);
61430Sstevel@tonic-gate }
61440Sstevel@tonic-gate
61450Sstevel@tonic-gate static void
tl_buffer(void * arg)61460Sstevel@tonic-gate tl_buffer(void *arg)
61470Sstevel@tonic-gate {
61480Sstevel@tonic-gate queue_t *wq = arg;
61490Sstevel@tonic-gate tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
61500Sstevel@tonic-gate
61510Sstevel@tonic-gate ASSERT(tep);
61520Sstevel@tonic-gate
61530Sstevel@tonic-gate tep->te_bufcid = 0;
61540Sstevel@tonic-gate tep->te_nowsrv = B_FALSE;
61550Sstevel@tonic-gate
61560Sstevel@tonic-gate enableok(wq);
61570Sstevel@tonic-gate /*
61580Sstevel@tonic-gate * Note: can call wsrv directly here and save context switch
61590Sstevel@tonic-gate * Consider change when qbufcall (not bufcall) is active
61600Sstevel@tonic-gate */
61610Sstevel@tonic-gate qenable(wq);
61620Sstevel@tonic-gate }
61630Sstevel@tonic-gate
61640Sstevel@tonic-gate static void
tl_memrecover(queue_t * wq,mblk_t * mp,size_t size)61650Sstevel@tonic-gate tl_memrecover(queue_t *wq, mblk_t *mp, size_t size)
61660Sstevel@tonic-gate {
61670Sstevel@tonic-gate tl_endpt_t *tep;
61680Sstevel@tonic-gate
61690Sstevel@tonic-gate tep = (tl_endpt_t *)wq->q_ptr;
61700Sstevel@tonic-gate
61710Sstevel@tonic-gate if (tep->te_closing) {
61720Sstevel@tonic-gate freemsg(mp);
61730Sstevel@tonic-gate return;
61740Sstevel@tonic-gate }
61750Sstevel@tonic-gate noenable(wq);
61760Sstevel@tonic-gate
61770Sstevel@tonic-gate (void) insq(wq, wq->q_first, mp);
61780Sstevel@tonic-gate
61790Sstevel@tonic-gate if (tep->te_bufcid || tep->te_timoutid) {
61800Sstevel@tonic-gate (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
61815240Snordmark "tl_memrecover:recover %p pending", (void *)wq));
61820Sstevel@tonic-gate return;
61830Sstevel@tonic-gate }
61840Sstevel@tonic-gate
61850Sstevel@tonic-gate if (!(tep->te_bufcid = qbufcall(wq, size, BPRI_MED, tl_buffer, wq))) {
61860Sstevel@tonic-gate tep->te_timoutid = qtimeout(wq, tl_timer, wq,
61870Sstevel@tonic-gate drv_usectohz(TL_BUFWAIT));
61880Sstevel@tonic-gate }
61890Sstevel@tonic-gate }
61900Sstevel@tonic-gate
61910Sstevel@tonic-gate static void
tl_freetip(tl_endpt_t * tep,tl_icon_t * tip)61920Sstevel@tonic-gate tl_freetip(tl_endpt_t *tep, tl_icon_t *tip)
61930Sstevel@tonic-gate {
61940Sstevel@tonic-gate ASSERT(tip->ti_seqno != 0);
61950Sstevel@tonic-gate
61960Sstevel@tonic-gate if (tip->ti_mp != NULL) {
61970Sstevel@tonic-gate tl_icon_freemsgs(&tip->ti_mp);
61980Sstevel@tonic-gate tip->ti_mp = NULL;
61990Sstevel@tonic-gate }
62000Sstevel@tonic-gate if (tip->ti_tep != NULL) {
62010Sstevel@tonic-gate tl_refrele(tip->ti_tep);
62020Sstevel@tonic-gate tip->ti_tep = NULL;
62030Sstevel@tonic-gate }
62040Sstevel@tonic-gate list_remove(&tep->te_iconp, tip);
62050Sstevel@tonic-gate kmem_free(tip, sizeof (tl_icon_t));
62060Sstevel@tonic-gate tep->te_nicon--;
62070Sstevel@tonic-gate }
62080Sstevel@tonic-gate
62090Sstevel@tonic-gate /*
62100Sstevel@tonic-gate * Remove address from address hash.
62110Sstevel@tonic-gate */
62120Sstevel@tonic-gate static void
tl_addr_unbind(tl_endpt_t * tep)62130Sstevel@tonic-gate tl_addr_unbind(tl_endpt_t *tep)
62140Sstevel@tonic-gate {
62150Sstevel@tonic-gate tl_endpt_t *elp;
62160Sstevel@tonic-gate
62170Sstevel@tonic-gate if (tep->te_flag & TL_ADDRHASHED) {
62180Sstevel@tonic-gate if (IS_SOCKET(tep)) {
62190Sstevel@tonic-gate (void) mod_hash_remove(tep->te_addrhash,
62200Sstevel@tonic-gate (mod_hash_key_t)tep->te_vp,
62210Sstevel@tonic-gate (mod_hash_val_t *)&elp);
62220Sstevel@tonic-gate tep->te_vp = (void *)(uintptr_t)tep->te_minor;
62230Sstevel@tonic-gate tep->te_magic = SOU_MAGIC_IMPLICIT;
62240Sstevel@tonic-gate } else {
62250Sstevel@tonic-gate (void) mod_hash_remove(tep->te_addrhash,
62260Sstevel@tonic-gate (mod_hash_key_t)&tep->te_ap,
62270Sstevel@tonic-gate (mod_hash_val_t *)&elp);
62280Sstevel@tonic-gate (void) kmem_free(tep->te_abuf, tep->te_alen);
62290Sstevel@tonic-gate tep->te_alen = -1;
62300Sstevel@tonic-gate tep->te_abuf = NULL;
62310Sstevel@tonic-gate }
62320Sstevel@tonic-gate tep->te_flag &= ~TL_ADDRHASHED;
62330Sstevel@tonic-gate }
62340Sstevel@tonic-gate }
6235