1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
23 /* All Rights Reserved */
24
25 /*
26 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 */
29
30 #include "mt.h"
31 #include <stdlib.h>
32 #include <string.h>
33 #include <strings.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stropts.h>
37 #include <sys/stream.h>
38 #define _SUN_TPI_VERSION 2
39 #include <sys/tihdr.h>
40 #include <sys/timod.h>
41 #include <sys/stat.h>
42 #include <xti.h>
43 #include <fcntl.h>
44 #include <signal.h>
45 #include <assert.h>
46 #include <syslog.h>
47 #include <limits.h>
48 #include <ucred.h>
49 #include "tx.h"
50
51 #define DEFSIZE 2048
52
53 /*
54 * The following used to be in tiuser.h, but was causing too much namespace
55 * pollution.
56 */
57 #define ROUNDUP32(X) ((X + 0x03)&~0x03)
58
59 static struct _ti_user *find_tilink(int s);
60 static struct _ti_user *add_tilink(int s);
61 static void _t_free_lookbufs(struct _ti_user *tiptr);
62 static unsigned int _t_setsize(t_scalar_t infosize, boolean_t option);
63 static int _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf);
64 static int _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf);
65 static int _t_adjust_state(int fd, int instate);
66 static int _t_alloc_bufs(int fd, struct _ti_user *tiptr,
67 struct T_info_ack *tsap);
68
69 mutex_t _ti_userlock = DEFAULTMUTEX; /* Protects hash_bucket[] */
70
71 /*
72 * Checkfd - checks validity of file descriptor
73 */
74 struct _ti_user *
_t_checkfd(int fd,int force_sync,int api_semantics)75 _t_checkfd(int fd, int force_sync, int api_semantics)
76 {
77 sigset_t mask;
78 struct _ti_user *tiptr;
79 int retval, timodpushed;
80
81 if (fd < 0) {
82 t_errno = TBADF;
83 return (NULL);
84 }
85 tiptr = NULL;
86 sig_mutex_lock(&_ti_userlock);
87 if ((tiptr = find_tilink(fd)) != NULL) {
88 if (!force_sync) {
89 sig_mutex_unlock(&_ti_userlock);
90 return (tiptr);
91 }
92 }
93 sig_mutex_unlock(&_ti_userlock);
94
95 /*
96 * Not found or a forced sync is required.
97 * check if this is a valid TLI/XTI descriptor.
98 */
99 timodpushed = 0;
100 do {
101 retval = ioctl(fd, I_FIND, "timod");
102 } while (retval < 0 && errno == EINTR);
103
104 if (retval < 0 || (retval == 0 && _T_IS_TLI(api_semantics))) {
105 /*
106 * not a stream or a TLI endpoint with no timod
107 * XXX Note: If it is a XTI call, we push "timod" and
108 * try to convert it into a transport endpoint later.
109 * We do not do it for TLI and "retain" the old buggy
110 * behavior because ypbind and a lot of other deamons seem
111 * to use a buggy logic test of the form
112 * "(t_getstate(0) != -1 || t_errno != TBADF)" to see if
113 * they we ever invoked with request on stdin and drop into
114 * untested code. This test is in code generated by rpcgen
115 * which is why it is replicated test in many daemons too.
116 * We will need to fix that test too with "IsaTLIendpoint"
117 * test if we ever fix this for TLI
118 */
119 t_errno = TBADF;
120 return (NULL);
121 }
122
123 if (retval == 0) {
124 /*
125 * "timod" not already on stream, then push it
126 */
127 do {
128 /*
129 * Assumes (correctly) that I_PUSH is
130 * atomic w.r.t signals (EINTR error)
131 */
132 retval = ioctl(fd, I_PUSH, "timod");
133 } while (retval < 0 && errno == EINTR);
134
135 if (retval < 0) {
136 t_errno = TSYSERR;
137 return (NULL);
138 }
139 timodpushed = 1;
140 }
141 /*
142 * Try to (re)constitute the info at user level from state
143 * in the kernel. This could be information that lost due
144 * to an exec or being instantiated at a new descriptor due
145 * to , open(), dup2() etc.
146 *
147 * _t_create() requires that all signals be blocked.
148 * Note that sig_mutex_lock() only defers signals, it does not
149 * block them, so interruptible syscalls could still get EINTR.
150 */
151 (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
152 sig_mutex_lock(&_ti_userlock);
153 tiptr = _t_create(fd, NULL, api_semantics, NULL);
154 if (tiptr == NULL) {
155 int sv_errno = errno;
156 sig_mutex_unlock(&_ti_userlock);
157 (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
158 /*
159 * restore to stream before timod pushed. It may
160 * not have been a network transport stream.
161 */
162 if (timodpushed)
163 (void) ioctl(fd, I_POP, 0);
164 errno = sv_errno;
165 return (NULL);
166 }
167 sig_mutex_unlock(&_ti_userlock);
168 (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
169 return (tiptr);
170 }
171
172 /*
173 * copy data to output buffer making sure the output buffer is 32 bit
174 * aligned, even though the input buffer may not be.
175 */
176 int
_t_aligned_copy(struct strbuf * strbufp,int len,int init_offset,char * datap,t_scalar_t * rtn_offset)177 _t_aligned_copy(
178 struct strbuf *strbufp,
179 int len,
180 int init_offset,
181 char *datap,
182 t_scalar_t *rtn_offset)
183 {
184 *rtn_offset = ROUNDUP32(init_offset);
185 if ((*rtn_offset + len) > strbufp->maxlen) {
186 /*
187 * Aligned copy will overflow buffer
188 */
189 return (-1);
190 }
191 (void) memcpy(strbufp->buf + *rtn_offset, datap, (size_t)len);
192
193 return (0);
194 }
195
196
197 /*
198 * append data and control info in look buffer (list in the MT case)
199 *
200 * The only thing that can be in look buffer is a T_DISCON_IND,
201 * T_ORDREL_IND or a T_UDERROR_IND.
202 *
203 * It also enforces priority of T_DISCONDs over any T_ORDREL_IND
204 * already in the buffer. It assumes no T_ORDREL_IND is appended
205 * when there is already something on the looklist (error case) and
206 * that a T_ORDREL_IND if present will always be the first on the
207 * list.
208 *
209 * This also assumes ti_lock is held via sig_mutex_lock(),
210 * so signals are deferred here.
211 */
212 int
_t_register_lookevent(struct _ti_user * tiptr,caddr_t dptr,int dsize,caddr_t cptr,int csize)213 _t_register_lookevent(
214 struct _ti_user *tiptr,
215 caddr_t dptr,
216 int dsize,
217 caddr_t cptr,
218 int csize)
219 {
220 struct _ti_lookbufs *tlbs;
221 int cbuf_size, dbuf_size;
222
223 assert(MUTEX_HELD(&tiptr->ti_lock));
224
225 cbuf_size = tiptr->ti_ctlsize;
226 dbuf_size = tiptr->ti_rcvsize;
227
228 if ((csize > cbuf_size) || dsize > dbuf_size) {
229 /* can't fit - return error */
230 return (-1); /* error */
231 }
232 /*
233 * Enforce priority of T_DISCON_IND over T_ORDREL_IND
234 * queued earlier.
235 * Note: Since there can be only at most one T_ORDREL_IND
236 * queued (more than one is error case), and we look for it
237 * on each append of T_DISCON_IND, it can only be at the
238 * head of the list if it is there.
239 */
240 if (tiptr->ti_lookcnt > 0) { /* something already on looklist */
241 if (cptr && csize >= (int)sizeof (struct T_discon_ind) &&
242 /* LINTED pointer cast */
243 *(t_scalar_t *)cptr == T_DISCON_IND) {
244 /* appending discon ind */
245 assert(tiptr->ti_servtype != T_CLTS);
246 /* LINTED pointer cast */
247 if (*(t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf ==
248 T_ORDREL_IND) { /* T_ORDREL_IND is on list */
249 /*
250 * Blow away T_ORDREL_IND
251 */
252 _t_free_looklist_head(tiptr);
253 }
254 }
255 }
256 tlbs = &tiptr->ti_lookbufs;
257 if (tiptr->ti_lookcnt > 0) {
258 int listcount = 0;
259 /*
260 * Allocate and append a new lookbuf to the
261 * existing list. (Should only happen in MT case)
262 */
263 while (tlbs->tl_next != NULL) {
264 listcount++;
265 tlbs = tlbs->tl_next;
266 }
267 assert(tiptr->ti_lookcnt == listcount);
268
269 /*
270 * signals are deferred, calls to malloc() are safe.
271 */
272 if ((tlbs->tl_next = malloc(sizeof (struct _ti_lookbufs))) ==
273 NULL)
274 return (-1); /* error */
275 tlbs = tlbs->tl_next;
276 /*
277 * Allocate the buffers. The sizes derived from the
278 * sizes of other related buffers. See _t_alloc_bufs()
279 * for details.
280 */
281 if ((tlbs->tl_lookcbuf = malloc(cbuf_size)) == NULL) {
282 /* giving up - free other memory chunks */
283 free(tlbs);
284 return (-1); /* error */
285 }
286 if ((dsize > 0) &&
287 ((tlbs->tl_lookdbuf = malloc(dbuf_size)) == NULL)) {
288 /* giving up - free other memory chunks */
289 free(tlbs->tl_lookcbuf);
290 free(tlbs);
291 return (-1); /* error */
292 }
293 }
294
295 (void) memcpy(tlbs->tl_lookcbuf, cptr, csize);
296 if (dsize > 0)
297 (void) memcpy(tlbs->tl_lookdbuf, dptr, dsize);
298 tlbs->tl_lookdlen = dsize;
299 tlbs->tl_lookclen = csize;
300 tlbs->tl_next = NULL;
301 tiptr->ti_lookcnt++;
302 return (0); /* ok return */
303 }
304
305 /*
306 * Is there something that needs attention?
307 * Assumes tiptr->ti_lock held and this threads signals blocked
308 * in MT case.
309 */
310 int
_t_is_event(int fd,struct _ti_user * tiptr)311 _t_is_event(int fd, struct _ti_user *tiptr)
312 {
313 int size, retval;
314
315 assert(MUTEX_HELD(&tiptr->ti_lock));
316 if ((retval = ioctl(fd, I_NREAD, &size)) < 0) {
317 t_errno = TSYSERR;
318 return (-1);
319 }
320
321 if ((retval > 0) || (tiptr->ti_lookcnt > 0)) {
322 t_errno = TLOOK;
323 return (-1);
324 }
325 return (0);
326 }
327
328 /*
329 * wait for T_OK_ACK
330 * assumes tiptr->ti_lock held in MT case
331 */
332 int
_t_is_ok(int fd,struct _ti_user * tiptr,t_scalar_t type)333 _t_is_ok(int fd, struct _ti_user *tiptr, t_scalar_t type)
334 {
335 struct strbuf ctlbuf;
336 struct strbuf databuf;
337 union T_primitives *pptr;
338 int retval, cntlflag;
339 int size;
340 int didalloc, didralloc;
341 int flags = 0;
342
343 assert(MUTEX_HELD(&tiptr->ti_lock));
344 /*
345 * Acquire ctlbuf for use in sending/receiving control part
346 * of the message.
347 */
348 if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0)
349 return (-1);
350 /*
351 * Acquire databuf for use in sending/receiving data part
352 */
353 if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
354 if (didalloc)
355 free(ctlbuf.buf);
356 else
357 tiptr->ti_ctlbuf = ctlbuf.buf;
358 return (-1);
359 }
360
361 /*
362 * Temporarily convert a non blocking endpoint to a
363 * blocking one and restore status later
364 */
365 cntlflag = fcntl(fd, F_GETFL, 0);
366 if (cntlflag & (O_NDELAY | O_NONBLOCK))
367 (void) fcntl(fd, F_SETFL, cntlflag & ~(O_NDELAY | O_NONBLOCK));
368
369 flags = RS_HIPRI;
370
371 while ((retval = getmsg(fd, &ctlbuf, &databuf, &flags)) < 0) {
372 if (errno == EINTR)
373 continue;
374 if (cntlflag & (O_NDELAY | O_NONBLOCK))
375 (void) fcntl(fd, F_SETFL, cntlflag);
376 t_errno = TSYSERR;
377 goto err_out;
378 }
379
380 /* did I get entire message */
381 if (retval > 0) {
382 if (cntlflag & (O_NDELAY | O_NONBLOCK))
383 (void) fcntl(fd, F_SETFL, cntlflag);
384 t_errno = TSYSERR;
385 errno = EIO;
386 goto err_out;
387 }
388
389 /*
390 * is ctl part large enough to determine type?
391 */
392 if (ctlbuf.len < (int)sizeof (t_scalar_t)) {
393 if (cntlflag & (O_NDELAY | O_NONBLOCK))
394 (void) fcntl(fd, F_SETFL, cntlflag);
395 t_errno = TSYSERR;
396 errno = EPROTO;
397 goto err_out;
398 }
399
400 if (cntlflag & (O_NDELAY | O_NONBLOCK))
401 (void) fcntl(fd, F_SETFL, cntlflag);
402
403 /* LINTED pointer cast */
404 pptr = (union T_primitives *)ctlbuf.buf;
405
406 switch (pptr->type) {
407 case T_OK_ACK:
408 if ((ctlbuf.len < (int)sizeof (struct T_ok_ack)) ||
409 (pptr->ok_ack.CORRECT_prim != type)) {
410 t_errno = TSYSERR;
411 errno = EPROTO;
412 goto err_out;
413 }
414 if (didalloc)
415 free(ctlbuf.buf);
416 else
417 tiptr->ti_ctlbuf = ctlbuf.buf;
418 if (didralloc)
419 free(databuf.buf);
420 else
421 tiptr->ti_rcvbuf = databuf.buf;
422 return (0);
423
424 case T_ERROR_ACK:
425 if ((ctlbuf.len < (int)sizeof (struct T_error_ack)) ||
426 (pptr->error_ack.ERROR_prim != type)) {
427 t_errno = TSYSERR;
428 errno = EPROTO;
429 goto err_out;
430 }
431 /*
432 * if error is out of state and there is something
433 * on read queue, then indicate to user that
434 * there is something that needs attention
435 */
436 if (pptr->error_ack.TLI_error == TOUTSTATE) {
437 if ((retval = ioctl(fd, I_NREAD, &size)) < 0) {
438 t_errno = TSYSERR;
439 goto err_out;
440 }
441 if (retval > 0)
442 t_errno = TLOOK;
443 else
444 t_errno = TOUTSTATE;
445 } else {
446 t_errno = pptr->error_ack.TLI_error;
447 if (t_errno == TSYSERR)
448 errno = pptr->error_ack.UNIX_error;
449 }
450 goto err_out;
451 default:
452 t_errno = TSYSERR;
453 errno = EPROTO;
454 /* fallthru to err_out: */
455 }
456 err_out:
457 if (didalloc)
458 free(ctlbuf.buf);
459 else
460 tiptr->ti_ctlbuf = ctlbuf.buf;
461 if (didralloc)
462 free(databuf.buf);
463 else
464 tiptr->ti_rcvbuf = databuf.buf;
465 return (-1);
466 }
467
468 /*
469 * timod ioctl
470 */
471 int
_t_do_ioctl(int fd,char * buf,int size,int cmd,int * retlenp)472 _t_do_ioctl(int fd, char *buf, int size, int cmd, int *retlenp)
473 {
474 int retval;
475 struct strioctl strioc;
476
477 strioc.ic_cmd = cmd;
478 strioc.ic_timout = -1;
479 strioc.ic_len = size;
480 strioc.ic_dp = buf;
481
482 if ((retval = ioctl(fd, I_STR, &strioc)) < 0) {
483 t_errno = TSYSERR;
484 return (-1);
485 }
486
487 if (retval > 0) {
488 t_errno = retval&0xff;
489 if (t_errno == TSYSERR)
490 errno = (retval >> 8)&0xff;
491 return (-1);
492 }
493 if (retlenp)
494 *retlenp = strioc.ic_len;
495 return (0);
496 }
497
498 /*
499 * alloc scratch buffers and look buffers
500 */
501 /* ARGSUSED */
502 static int
_t_alloc_bufs(int fd,struct _ti_user * tiptr,struct T_info_ack * tsap)503 _t_alloc_bufs(int fd, struct _ti_user *tiptr, struct T_info_ack *tsap)
504 {
505 unsigned int size1, size2;
506 t_scalar_t optsize;
507 unsigned int csize, dsize, asize, osize;
508 char *ctlbuf, *rcvbuf;
509 char *lookdbuf, *lookcbuf;
510
511 csize = _t_setsize(tsap->CDATA_size, B_FALSE);
512 dsize = _t_setsize(tsap->DDATA_size, B_FALSE);
513
514 size1 = _T_MAX(csize, dsize);
515
516 if (size1 != 0) {
517 if ((rcvbuf = malloc(size1)) == NULL)
518 return (-1);
519 if ((lookdbuf = malloc(size1)) == NULL) {
520 free(rcvbuf);
521 return (-1);
522 }
523 } else {
524 rcvbuf = NULL;
525 lookdbuf = NULL;
526 }
527
528 asize = _t_setsize(tsap->ADDR_size, B_FALSE);
529 if (tsap->OPT_size >= 0)
530 /* compensate for XTI level options */
531 optsize = tsap->OPT_size + TX_XTI_LEVEL_MAX_OPTBUF;
532 else
533 optsize = tsap->OPT_size;
534 osize = _t_setsize(optsize, B_TRUE);
535
536 /*
537 * We compute the largest buffer size needed for this provider by
538 * adding the components. [ An extra sizeof (t_scalar_t) is added to
539 * take care of rounding off for alignment) for each buffer ]
540 * The goal here is compute the size of largest possible buffer that
541 * might be needed to hold a TPI message for the transport provider
542 * on this endpoint.
543 * Note: T_ADDR_ACK contains potentially two address buffers.
544 */
545
546 size2 = (unsigned int)sizeof (union T_primitives) /* TPI struct */
547 + asize + (unsigned int)sizeof (t_scalar_t) +
548 /* first addr buffer plus alignment */
549 asize + (unsigned int)sizeof (t_scalar_t) +
550 /* second addr buffer plus ailignment */
551 osize + (unsigned int)sizeof (t_scalar_t);
552 /* option buffer plus alignment */
553
554 if ((ctlbuf = malloc(size2)) == NULL) {
555 if (size1 != 0) {
556 free(rcvbuf);
557 free(lookdbuf);
558 }
559 return (-1);
560 }
561
562 if ((lookcbuf = malloc(size2)) == NULL) {
563 if (size1 != 0) {
564 free(rcvbuf);
565 free(lookdbuf);
566 }
567 free(ctlbuf);
568 return (-1);
569 }
570
571 tiptr->ti_rcvsize = size1;
572 tiptr->ti_rcvbuf = rcvbuf;
573 tiptr->ti_ctlsize = size2;
574 tiptr->ti_ctlbuf = ctlbuf;
575
576 /*
577 * Note: The head of the lookbuffers list (and associated buffers)
578 * is allocated here on initialization.
579 * More allocated on demand.
580 */
581 tiptr->ti_lookbufs.tl_lookclen = 0;
582 tiptr->ti_lookbufs.tl_lookcbuf = lookcbuf;
583 tiptr->ti_lookbufs.tl_lookdlen = 0;
584 tiptr->ti_lookbufs.tl_lookdbuf = lookdbuf;
585
586 return (0);
587 }
588
589
590 /*
591 * set sizes of buffers
592 */
593 static unsigned int
_t_setsize(t_scalar_t infosize,boolean_t option)594 _t_setsize(t_scalar_t infosize, boolean_t option)
595 {
596 static size_t optinfsize;
597
598 switch (infosize) {
599 case T_INFINITE /* -1 */:
600 if (option) {
601 if (optinfsize == 0) {
602 size_t uc = ucred_size();
603 if (uc < DEFSIZE/2)
604 optinfsize = DEFSIZE;
605 else
606 optinfsize = ucred_size() + DEFSIZE/2;
607 }
608 return ((unsigned int)optinfsize);
609 }
610 return (DEFSIZE);
611 case T_INVALID /* -2 */:
612 return (0);
613 default:
614 return ((unsigned int) infosize);
615 }
616 }
617
618 static void
_t_reinit_tiptr(struct _ti_user * tiptr)619 _t_reinit_tiptr(struct _ti_user *tiptr)
620 {
621 /*
622 * Note: This routine is designed for a "reinitialization"
623 * Following fields are not modified here and preserved.
624 * - ti_fd field
625 * - ti_lock
626 * - ti_next
627 * - ti_prev
628 * The above fields have to be separately initialized if this
629 * is used for a fresh initialization.
630 */
631
632 tiptr->ti_flags = 0;
633 tiptr->ti_rcvsize = 0;
634 tiptr->ti_rcvbuf = NULL;
635 tiptr->ti_ctlsize = 0;
636 tiptr->ti_ctlbuf = NULL;
637 tiptr->ti_lookbufs.tl_lookdbuf = NULL;
638 tiptr->ti_lookbufs.tl_lookcbuf = NULL;
639 tiptr->ti_lookbufs.tl_lookdlen = 0;
640 tiptr->ti_lookbufs.tl_lookclen = 0;
641 tiptr->ti_lookbufs.tl_next = NULL;
642 tiptr->ti_maxpsz = 0;
643 tiptr->ti_tsdusize = 0;
644 tiptr->ti_etsdusize = 0;
645 tiptr->ti_cdatasize = 0;
646 tiptr->ti_ddatasize = 0;
647 tiptr->ti_servtype = 0;
648 tiptr->ti_lookcnt = 0;
649 tiptr->ti_state = 0;
650 tiptr->ti_ocnt = 0;
651 tiptr->ti_prov_flag = 0;
652 tiptr->ti_qlen = 0;
653 }
654
655 /*
656 * Link manipulation routines.
657 *
658 * NBUCKETS hash buckets are used to give fast
659 * access. The number is derived the file descriptor softlimit
660 * number (64).
661 */
662
663 #define NBUCKETS 64
664 static struct _ti_user *hash_bucket[NBUCKETS];
665
666 /*
667 * Allocates a new link and returns a pointer to it.
668 * Assumes that the caller is holding _ti_userlock via sig_mutex_lock(),
669 * so signals are deferred here.
670 */
671 static struct _ti_user *
add_tilink(int s)672 add_tilink(int s)
673 {
674 struct _ti_user *tiptr;
675 struct _ti_user *prevptr;
676 struct _ti_user *curptr;
677 int x;
678 struct stat stbuf;
679
680 assert(MUTEX_HELD(&_ti_userlock));
681
682 if (s < 0 || fstat(s, &stbuf) != 0)
683 return (NULL);
684
685 x = s % NBUCKETS;
686 if (hash_bucket[x] != NULL) {
687 /*
688 * Walk along the bucket looking for
689 * duplicate entry or the end.
690 */
691 for (curptr = hash_bucket[x]; curptr != NULL;
692 curptr = curptr->ti_next) {
693 if (curptr->ti_fd == s) {
694 /*
695 * This can happen when the user has close(2)'ed
696 * a descriptor and then been allocated it again
697 * via t_open().
698 *
699 * We will re-use the existing _ti_user struct
700 * in this case rather than using the one
701 * we allocated above. If there are buffers
702 * associated with the existing _ti_user
703 * struct, they may not be the correct size,
704 * so we can not use it. We free them
705 * here and re-allocate a new ones
706 * later on.
707 */
708 if (curptr->ti_rcvbuf != NULL)
709 free(curptr->ti_rcvbuf);
710 free(curptr->ti_ctlbuf);
711 _t_free_lookbufs(curptr);
712 _t_reinit_tiptr(curptr);
713 curptr->ti_rdev = stbuf.st_rdev;
714 curptr->ti_ino = stbuf.st_ino;
715 return (curptr);
716 }
717 prevptr = curptr;
718 }
719 /*
720 * Allocate and link in a new one.
721 */
722 if ((tiptr = malloc(sizeof (*tiptr))) == NULL)
723 return (NULL);
724 /*
725 * First initialize fields common with reinitialization and
726 * then other fields too
727 */
728 _t_reinit_tiptr(tiptr);
729 prevptr->ti_next = tiptr;
730 tiptr->ti_prev = prevptr;
731 } else {
732 /*
733 * First entry.
734 */
735 if ((tiptr = malloc(sizeof (*tiptr))) == NULL)
736 return (NULL);
737 _t_reinit_tiptr(tiptr);
738 hash_bucket[x] = tiptr;
739 tiptr->ti_prev = NULL;
740 }
741 tiptr->ti_next = NULL;
742 tiptr->ti_fd = s;
743 tiptr->ti_rdev = stbuf.st_rdev;
744 tiptr->ti_ino = stbuf.st_ino;
745 (void) mutex_init(&tiptr->ti_lock, USYNC_THREAD, NULL);
746 return (tiptr);
747 }
748
749 /*
750 * Find a link by descriptor
751 * Assumes that the caller is holding _ti_userlock.
752 */
753 static struct _ti_user *
find_tilink(int s)754 find_tilink(int s)
755 {
756 struct _ti_user *curptr;
757 int x;
758 struct stat stbuf;
759
760 assert(MUTEX_HELD(&_ti_userlock));
761
762 if (s < 0 || fstat(s, &stbuf) != 0)
763 return (NULL);
764
765 x = s % NBUCKETS;
766 /*
767 * Walk along the bucket looking for the descriptor.
768 */
769 for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
770 if (curptr->ti_fd == s) {
771 if (curptr->ti_rdev == stbuf.st_rdev &&
772 curptr->ti_ino == stbuf.st_ino)
773 return (curptr);
774 (void) _t_delete_tilink(s);
775 }
776 }
777 return (NULL);
778 }
779
780 /*
781 * Assumes that the caller is holding _ti_userlock.
782 * Also assumes that all signals are blocked.
783 */
784 int
_t_delete_tilink(int s)785 _t_delete_tilink(int s)
786 {
787 struct _ti_user *curptr;
788 int x;
789
790 /*
791 * Find the link.
792 */
793 assert(MUTEX_HELD(&_ti_userlock));
794 if (s < 0)
795 return (-1);
796 x = s % NBUCKETS;
797 /*
798 * Walk along the bucket looking for
799 * the descriptor.
800 */
801 for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
802 if (curptr->ti_fd == s) {
803 struct _ti_user *nextptr;
804 struct _ti_user *prevptr;
805
806 nextptr = curptr->ti_next;
807 prevptr = curptr->ti_prev;
808 if (prevptr)
809 prevptr->ti_next = nextptr;
810 else
811 hash_bucket[x] = nextptr;
812 if (nextptr)
813 nextptr->ti_prev = prevptr;
814
815 /*
816 * free resource associated with the curptr
817 */
818 if (curptr->ti_rcvbuf != NULL)
819 free(curptr->ti_rcvbuf);
820 free(curptr->ti_ctlbuf);
821 _t_free_lookbufs(curptr);
822 (void) mutex_destroy(&curptr->ti_lock);
823 free(curptr);
824 return (0);
825 }
826 }
827 return (-1);
828 }
829
830 /*
831 * Allocate a TLI state structure and synch it with the kernel
832 * *tiptr is returned
833 * Assumes that the caller is holding the _ti_userlock and has blocked signals.
834 *
835 * This function may fail the first time it is called with given transport if it
836 * doesn't support T_CAPABILITY_REQ TPI message.
837 */
838 struct _ti_user *
_t_create(int fd,struct t_info * info,int api_semantics,int * t_capreq_failed)839 _t_create(int fd, struct t_info *info, int api_semantics, int *t_capreq_failed)
840 {
841 /*
842 * Aligned data buffer for ioctl.
843 */
844 union {
845 struct ti_sync_req ti_req;
846 struct ti_sync_ack ti_ack;
847 union T_primitives t_prim;
848 char pad[128];
849 } ioctl_data;
850 void *ioctlbuf = &ioctl_data; /* TI_SYNC/GETINFO with room to grow */
851 /* preferred location first local variable */
852 /* see note below */
853 /*
854 * Note: We use "ioctlbuf" allocated on stack above with
855 * room to grow since (struct ti_sync_ack) can grow in size
856 * on future kernels. (We do not use malloc'd "ti_ctlbuf" as that
857 * part of instance structure which may not exist yet)
858 * Its preferred declaration location is first local variable in this
859 * procedure as bugs causing overruns will be detectable on
860 * platforms where procedure calling conventions place return
861 * address on stack (such as x86) instead of causing silent
862 * memory corruption.
863 */
864 struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
865 struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
866 struct T_capability_req *tcrp = (struct T_capability_req *)ioctlbuf;
867 struct T_capability_ack *tcap = (struct T_capability_ack *)ioctlbuf;
868 struct T_info_ack *tiap = &tcap->INFO_ack;
869 struct _ti_user *ntiptr;
870 int expected_acksize;
871 int retlen, rstate, sv_errno, rval;
872
873 assert(MUTEX_HELD(&_ti_userlock));
874
875 /*
876 * Use ioctl required for sync'ing state with kernel.
877 * We use two ioctls. TI_CAPABILITY is used to get TPI information and
878 * TI_SYNC is used to synchronise state with timod. Statically linked
879 * TLI applications will no longer work on older releases where there
880 * are no TI_SYNC and TI_CAPABILITY.
881 */
882
883 /*
884 * Request info about transport.
885 * Assumes that TC1_INFO should always be implemented.
886 * For TI_CAPABILITY size argument to ioctl specifies maximum buffer
887 * size.
888 */
889 tcrp->PRIM_type = T_CAPABILITY_REQ;
890 tcrp->CAP_bits1 = TC1_INFO | TC1_ACCEPTOR_ID;
891 rval = _t_do_ioctl(fd, (char *)ioctlbuf,
892 (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
893 expected_acksize = (int)sizeof (struct T_capability_ack);
894
895 if (rval < 0) {
896 /*
897 * TI_CAPABILITY may fail when transport provider doesn't
898 * support T_CAPABILITY_REQ message type. In this case file
899 * descriptor may be unusable (when transport provider sent
900 * M_ERROR in response to T_CAPABILITY_REQ). This should only
901 * happen once during system lifetime for given transport
902 * provider since timod will emulate TI_CAPABILITY after it
903 * detected the failure.
904 */
905 if (t_capreq_failed != NULL)
906 *t_capreq_failed = 1;
907 return (NULL);
908 }
909
910 if (retlen != expected_acksize) {
911 t_errno = TSYSERR;
912 errno = EIO;
913 return (NULL);
914 }
915
916 if ((tcap->CAP_bits1 & TC1_INFO) == 0) {
917 t_errno = TSYSERR;
918 errno = EPROTO;
919 return (NULL);
920 }
921 if (info != NULL) {
922 if (tiap->PRIM_type != T_INFO_ACK) {
923 t_errno = TSYSERR;
924 errno = EPROTO;
925 return (NULL);
926 }
927 info->addr = tiap->ADDR_size;
928 info->options = tiap->OPT_size;
929 info->tsdu = tiap->TSDU_size;
930 info->etsdu = tiap->ETSDU_size;
931 info->connect = tiap->CDATA_size;
932 info->discon = tiap->DDATA_size;
933 info->servtype = tiap->SERV_type;
934 if (_T_IS_XTI(api_semantics)) {
935 /*
936 * XTI ONLY - TLI "struct t_info" does not
937 * have "flags"
938 */
939 info->flags = 0;
940 if (tiap->PROVIDER_flag & (SENDZERO|OLD_SENDZERO))
941 info->flags |= T_SENDZERO;
942 /*
943 * Some day there MAY be a NEW bit in T_info_ack
944 * PROVIDER_flag namespace exposed by TPI header
945 * <sys/tihdr.h> which will functionally correspond to
946 * role played by T_ORDRELDATA in info->flags namespace
947 * When that bit exists, we can add a test to see if
948 * it is set and set T_ORDRELDATA.
949 * Note: Currently only mOSI ("minimal OSI") provider
950 * is specified to use T_ORDRELDATA so probability of
951 * needing it is minimal.
952 */
953 }
954 }
955
956 /*
957 * if first time or no instance (after fork/exec, dup etc,
958 * then create initialize data structure
959 * and allocate buffers
960 */
961 ntiptr = add_tilink(fd);
962 if (ntiptr == NULL) {
963 t_errno = TSYSERR;
964 errno = ENOMEM;
965 return (NULL);
966 }
967 sig_mutex_lock(&ntiptr->ti_lock);
968
969 /*
970 * Allocate buffers for the new descriptor
971 */
972 if (_t_alloc_bufs(fd, ntiptr, tiap) < 0) {
973 sv_errno = errno;
974 (void) _t_delete_tilink(fd);
975 t_errno = TSYSERR;
976 sig_mutex_unlock(&ntiptr->ti_lock);
977 errno = sv_errno;
978 return (NULL);
979 }
980
981 /* Fill instance structure */
982
983 ntiptr->ti_lookcnt = 0;
984 ntiptr->ti_flags = USED;
985 ntiptr->ti_state = T_UNINIT;
986 ntiptr->ti_ocnt = 0;
987
988 assert(tiap->TIDU_size > 0);
989 ntiptr->ti_maxpsz = tiap->TIDU_size;
990 assert(tiap->TSDU_size >= -2);
991 ntiptr->ti_tsdusize = tiap->TSDU_size;
992 assert(tiap->ETSDU_size >= -2);
993 ntiptr->ti_etsdusize = tiap->ETSDU_size;
994 assert(tiap->CDATA_size >= -2);
995 ntiptr->ti_cdatasize = tiap->CDATA_size;
996 assert(tiap->DDATA_size >= -2);
997 ntiptr->ti_ddatasize = tiap->DDATA_size;
998 ntiptr->ti_servtype = tiap->SERV_type;
999 ntiptr->ti_prov_flag = tiap->PROVIDER_flag;
1000
1001 if ((tcap->CAP_bits1 & TC1_ACCEPTOR_ID) != 0) {
1002 ntiptr->acceptor_id = tcap->ACCEPTOR_id;
1003 ntiptr->ti_flags |= V_ACCEPTOR_ID;
1004 }
1005 else
1006 ntiptr->ti_flags &= ~V_ACCEPTOR_ID;
1007
1008 /*
1009 * Restore state from kernel (caveat some heuristics)
1010 */
1011 switch (tiap->CURRENT_state) {
1012
1013 case TS_UNBND:
1014 ntiptr->ti_state = T_UNBND;
1015 break;
1016
1017 case TS_IDLE:
1018 if ((rstate = _t_adjust_state(fd, T_IDLE)) < 0) {
1019 sv_errno = errno;
1020 (void) _t_delete_tilink(fd);
1021 sig_mutex_unlock(&ntiptr->ti_lock);
1022 errno = sv_errno;
1023 return (NULL);
1024 }
1025 ntiptr->ti_state = rstate;
1026 break;
1027
1028 case TS_WRES_CIND:
1029 ntiptr->ti_state = T_INCON;
1030 break;
1031
1032 case TS_WCON_CREQ:
1033 ntiptr->ti_state = T_OUTCON;
1034 break;
1035
1036 case TS_DATA_XFER:
1037 if ((rstate = _t_adjust_state(fd, T_DATAXFER)) < 0) {
1038 sv_errno = errno;
1039 (void) _t_delete_tilink(fd);
1040 sig_mutex_unlock(&ntiptr->ti_lock);
1041 errno = sv_errno;
1042 return (NULL);
1043 }
1044 ntiptr->ti_state = rstate;
1045 break;
1046
1047 case TS_WIND_ORDREL:
1048 ntiptr->ti_state = T_OUTREL;
1049 break;
1050
1051 case TS_WREQ_ORDREL:
1052 if ((rstate = _t_adjust_state(fd, T_INREL)) < 0) {
1053 sv_errno = errno;
1054 (void) _t_delete_tilink(fd);
1055 sig_mutex_unlock(&ntiptr->ti_lock);
1056 errno = sv_errno;
1057 return (NULL);
1058 }
1059 ntiptr->ti_state = rstate;
1060 break;
1061 default:
1062 t_errno = TSTATECHNG;
1063 (void) _t_delete_tilink(fd);
1064 sig_mutex_unlock(&ntiptr->ti_lock);
1065 return (NULL);
1066 }
1067
1068 /*
1069 * Sync information with timod.
1070 */
1071 tsrp->tsr_flags = TSRF_QLEN_REQ;
1072
1073 rval = _t_do_ioctl(fd, ioctlbuf,
1074 (int)sizeof (struct ti_sync_req), TI_SYNC, &retlen);
1075 expected_acksize = (int)sizeof (struct ti_sync_ack);
1076
1077 if (rval < 0) {
1078 sv_errno = errno;
1079 (void) _t_delete_tilink(fd);
1080 t_errno = TSYSERR;
1081 sig_mutex_unlock(&ntiptr->ti_lock);
1082 errno = sv_errno;
1083 return (NULL);
1084 }
1085
1086 /*
1087 * This is a "less than" check as "struct ti_sync_ack" returned by
1088 * TI_SYNC can grow in size in future kernels. If/when a statically
1089 * linked application is run on a future kernel, it should not fail.
1090 */
1091 if (retlen < expected_acksize) {
1092 sv_errno = errno;
1093 (void) _t_delete_tilink(fd);
1094 t_errno = TSYSERR;
1095 sig_mutex_unlock(&ntiptr->ti_lock);
1096 errno = sv_errno;
1097 return (NULL);
1098 }
1099
1100 if (_T_IS_TLI(api_semantics))
1101 tsap->tsa_qlen = 0; /* not needed for TLI */
1102
1103 ntiptr->ti_qlen = tsap->tsa_qlen;
1104 sig_mutex_unlock(&ntiptr->ti_lock);
1105 return (ntiptr);
1106 }
1107
1108
1109 static int
_t_adjust_state(int fd,int instate)1110 _t_adjust_state(int fd, int instate)
1111 {
1112 char ctlbuf[sizeof (t_scalar_t)];
1113 char databuf[sizeof (int)]; /* size unimportant - anything > 0 */
1114 struct strpeek arg;
1115 int outstate, retval;
1116
1117 /*
1118 * Peek at message on stream head (if any)
1119 * and see if it is data
1120 */
1121 arg.ctlbuf.buf = ctlbuf;
1122 arg.ctlbuf.maxlen = (int)sizeof (ctlbuf);
1123 arg.ctlbuf.len = 0;
1124
1125 arg.databuf.buf = databuf;
1126 arg.databuf.maxlen = (int)sizeof (databuf);
1127 arg.databuf.len = 0;
1128
1129 arg.flags = 0;
1130
1131 if ((retval = ioctl(fd, I_PEEK, &arg)) < 0) {
1132 t_errno = TSYSERR;
1133 return (-1);
1134 }
1135 outstate = instate;
1136 /*
1137 * If peek shows something at stream head, then
1138 * Adjust "outstate" based on some heuristics.
1139 */
1140 if (retval > 0) {
1141 switch (instate) {
1142 case T_IDLE:
1143 /*
1144 * The following heuristic is to handle data
1145 * ahead of T_DISCON_IND indications that might
1146 * be at the stream head waiting to be
1147 * read (T_DATA_IND or M_DATA)
1148 */
1149 if (((arg.ctlbuf.len == 4) &&
1150 /* LINTED pointer cast */
1151 ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
1152 ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
1153 outstate = T_DATAXFER;
1154 }
1155 break;
1156 case T_DATAXFER:
1157 /*
1158 * The following heuristic is to handle
1159 * the case where the connection is established
1160 * and in data transfer state at the provider
1161 * but the T_CONN_CON has not yet been read
1162 * from the stream head.
1163 */
1164 if ((arg.ctlbuf.len == 4) &&
1165 /* LINTED pointer cast */
1166 ((*(int32_t *)arg.ctlbuf.buf) == T_CONN_CON))
1167 outstate = T_OUTCON;
1168 break;
1169 case T_INREL:
1170 /*
1171 * The following heuristic is to handle data
1172 * ahead of T_ORDREL_IND indications that might
1173 * be at the stream head waiting to be
1174 * read (T_DATA_IND or M_DATA)
1175 */
1176 if (((arg.ctlbuf.len == 4) &&
1177 /* LINTED pointer cast */
1178 ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
1179 ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
1180 outstate = T_DATAXFER;
1181 }
1182 break;
1183 default:
1184 break;
1185 }
1186 }
1187 return (outstate);
1188 }
1189
1190 /*
1191 * Assumes caller has blocked signals at least in this thread (for safe
1192 * malloc/free operations)
1193 */
1194 static int
_t_cbuf_alloc(struct _ti_user * tiptr,char ** retbuf)1195 _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf)
1196 {
1197 unsigned size2;
1198
1199 assert(MUTEX_HELD(&tiptr->ti_lock));
1200 size2 = tiptr->ti_ctlsize; /* same size as default ctlbuf */
1201
1202 if ((*retbuf = malloc(size2)) == NULL) {
1203 return (-1);
1204 }
1205 return (size2);
1206 }
1207
1208
1209 /*
1210 * Assumes caller has blocked signals at least in this thread (for safe
1211 * malloc/free operations)
1212 */
1213 int
_t_rbuf_alloc(struct _ti_user * tiptr,char ** retbuf)1214 _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf)
1215 {
1216 unsigned size1;
1217
1218 assert(MUTEX_HELD(&tiptr->ti_lock));
1219 size1 = tiptr->ti_rcvsize; /* same size as default rcvbuf */
1220
1221 if ((*retbuf = malloc(size1)) == NULL) {
1222 return (-1);
1223 }
1224 return (size1);
1225 }
1226
1227 /*
1228 * Free lookbuffer structures and associated resources
1229 * Assumes ti_lock held for MT case.
1230 */
1231 static void
_t_free_lookbufs(struct _ti_user * tiptr)1232 _t_free_lookbufs(struct _ti_user *tiptr)
1233 {
1234 struct _ti_lookbufs *tlbs, *prev_tlbs, *head_tlbs;
1235
1236 /*
1237 * Assertion:
1238 * The structure lock should be held or the global list
1239 * manipulation lock. The assumption is that nothing
1240 * else can access the descriptor since global list manipulation
1241 * lock is held so it is OK to manipulate fields without the
1242 * structure lock
1243 */
1244 assert(MUTEX_HELD(&tiptr->ti_lock) || MUTEX_HELD(&_ti_userlock));
1245
1246 /*
1247 * Free only the buffers in the first lookbuf
1248 */
1249 head_tlbs = &tiptr->ti_lookbufs;
1250 if (head_tlbs->tl_lookdbuf != NULL) {
1251 free(head_tlbs->tl_lookdbuf);
1252 head_tlbs->tl_lookdbuf = NULL;
1253 }
1254 free(head_tlbs->tl_lookcbuf);
1255 head_tlbs->tl_lookcbuf = NULL;
1256 /*
1257 * Free the node and the buffers in the rest of the
1258 * list
1259 */
1260
1261 tlbs = head_tlbs->tl_next;
1262 head_tlbs->tl_next = NULL;
1263
1264 while (tlbs != NULL) {
1265 if (tlbs->tl_lookdbuf != NULL)
1266 free(tlbs->tl_lookdbuf);
1267 free(tlbs->tl_lookcbuf);
1268 prev_tlbs = tlbs;
1269 tlbs = tlbs->tl_next;
1270 free(prev_tlbs);
1271 }
1272 }
1273
1274 /*
1275 * Free lookbuffer event list head.
1276 * Consume current lookbuffer event
1277 * Assumes ti_lock held for MT case.
1278 * Note: The head of this list is part of the instance
1279 * structure so the code is a little unorthodox.
1280 */
1281 void
_t_free_looklist_head(struct _ti_user * tiptr)1282 _t_free_looklist_head(struct _ti_user *tiptr)
1283 {
1284 struct _ti_lookbufs *tlbs, *next_tlbs;
1285
1286 tlbs = &tiptr->ti_lookbufs;
1287
1288 if (tlbs->tl_next) {
1289 /*
1290 * Free the control and data buffers
1291 */
1292 if (tlbs->tl_lookdbuf != NULL)
1293 free(tlbs->tl_lookdbuf);
1294 free(tlbs->tl_lookcbuf);
1295 /*
1296 * Replace with next lookbuf event contents
1297 */
1298 next_tlbs = tlbs->tl_next;
1299 tlbs->tl_next = next_tlbs->tl_next;
1300 tlbs->tl_lookcbuf = next_tlbs->tl_lookcbuf;
1301 tlbs->tl_lookclen = next_tlbs->tl_lookclen;
1302 tlbs->tl_lookdbuf = next_tlbs->tl_lookdbuf;
1303 tlbs->tl_lookdlen = next_tlbs->tl_lookdlen;
1304 free(next_tlbs);
1305 /*
1306 * Decrement the flag - should never get to zero.
1307 * in this path
1308 */
1309 tiptr->ti_lookcnt--;
1310 assert(tiptr->ti_lookcnt > 0);
1311 } else {
1312 /*
1313 * No more look buffer events - just clear the flag
1314 * and leave the buffers alone
1315 */
1316 assert(tiptr->ti_lookcnt == 1);
1317 tiptr->ti_lookcnt = 0;
1318 }
1319 }
1320
1321 /*
1322 * Discard lookbuffer events.
1323 * Assumes ti_lock held for MT case.
1324 */
1325 void
_t_flush_lookevents(struct _ti_user * tiptr)1326 _t_flush_lookevents(struct _ti_user *tiptr)
1327 {
1328 struct _ti_lookbufs *tlbs, *prev_tlbs;
1329
1330 /*
1331 * Leave the first nodes buffers alone (i.e. allocated)
1332 * but reset the flag.
1333 */
1334 assert(MUTEX_HELD(&tiptr->ti_lock));
1335 tiptr->ti_lookcnt = 0;
1336 /*
1337 * Blow away the rest of the list
1338 */
1339 tlbs = tiptr->ti_lookbufs.tl_next;
1340 tiptr->ti_lookbufs.tl_next = NULL;
1341 while (tlbs != NULL) {
1342 if (tlbs->tl_lookdbuf != NULL)
1343 free(tlbs->tl_lookdbuf);
1344 free(tlbs->tl_lookcbuf);
1345 prev_tlbs = tlbs;
1346 tlbs = tlbs->tl_next;
1347 free(prev_tlbs);
1348 }
1349 }
1350
1351
1352 /*
1353 * This routine checks if the receive. buffer in the instance structure
1354 * is available (non-null). If it is, the buffer is acquired and marked busy
1355 * (null). If it is busy (possible in MT programs), it allocates a new
1356 * buffer and sets a flag indicating new memory was allocated and the caller
1357 * has to free it.
1358 */
1359 int
_t_acquire_ctlbuf(struct _ti_user * tiptr,struct strbuf * ctlbufp,int * didallocp)1360 _t_acquire_ctlbuf(
1361 struct _ti_user *tiptr,
1362 struct strbuf *ctlbufp,
1363 int *didallocp)
1364 {
1365 *didallocp = 0;
1366
1367 ctlbufp->len = 0;
1368 if (tiptr->ti_ctlbuf) {
1369 ctlbufp->buf = tiptr->ti_ctlbuf;
1370 tiptr->ti_ctlbuf = NULL;
1371 ctlbufp->maxlen = tiptr->ti_ctlsize;
1372 } else {
1373 /*
1374 * tiptr->ti_ctlbuf is in use
1375 * allocate new buffer and free after use.
1376 */
1377 if ((ctlbufp->maxlen = _t_cbuf_alloc(tiptr,
1378 &ctlbufp->buf)) < 0) {
1379 t_errno = TSYSERR;
1380 return (-1);
1381 }
1382 *didallocp = 1;
1383 }
1384 return (0);
1385 }
1386
1387 /*
1388 * This routine checks if the receive buffer in the instance structure
1389 * is available (non-null). If it is, the buffer is acquired and marked busy
1390 * (null). If it is busy (possible in MT programs), it allocates a new
1391 * buffer and sets a flag indicating new memory was allocated and the caller
1392 * has to free it.
1393 * Note: The receive buffer pointer can also be null if the transport
1394 * provider does not support connect/disconnect data, (e.g. TCP) - not
1395 * just when it is "busy". In that case, ti_rcvsize will be 0 and that is
1396 * used to instantiate the databuf which points to a null buffer of
1397 * length 0 which is the right thing to do for that case.
1398 */
1399 int
_t_acquire_databuf(struct _ti_user * tiptr,struct strbuf * databufp,int * didallocp)1400 _t_acquire_databuf(
1401 struct _ti_user *tiptr,
1402 struct strbuf *databufp,
1403 int *didallocp)
1404 {
1405 *didallocp = 0;
1406
1407 databufp->len = 0;
1408 if (tiptr->ti_rcvbuf) {
1409 assert(tiptr->ti_rcvsize != 0);
1410 databufp->buf = tiptr->ti_rcvbuf;
1411 tiptr->ti_rcvbuf = NULL;
1412 databufp->maxlen = tiptr->ti_rcvsize;
1413 } else if (tiptr->ti_rcvsize == 0) {
1414 databufp->buf = NULL;
1415 databufp->maxlen = 0;
1416 } else {
1417 /*
1418 * tiptr->ti_rcvbuf is in use
1419 * allocate new buffer and free after use.
1420 */
1421 if ((databufp->maxlen = _t_rbuf_alloc(tiptr,
1422 &databufp->buf)) < 0) {
1423 t_errno = TSYSERR;
1424 return (-1);
1425 }
1426 *didallocp = 1;
1427 }
1428 return (0);
1429 }
1430
1431 /*
1432 * This routine requests timod to look for any expedited data
1433 * queued in the "receive buffers" in the kernel. Used for XTI
1434 * t_look() semantics for transports that send expedited data
1435 * data inline (e.g TCP).
1436 * Returns -1 for failure
1437 * Returns 0 for success
1438 * On a successful return, the location pointed by "expedited_queuedp"
1439 * contains
1440 * 0 if no expedited data is found queued in "receive buffers"
1441 * 1 if expedited data is found queued in "receive buffers"
1442 */
1443
1444 int
_t_expinline_queued(int fd,int * expedited_queuedp)1445 _t_expinline_queued(int fd, int *expedited_queuedp)
1446 {
1447 union {
1448 struct ti_sync_req ti_req;
1449 struct ti_sync_ack ti_ack;
1450 char pad[128];
1451 } ioctl_data;
1452 void *ioctlbuf = &ioctl_data; /* for TI_SYNC with room to grow */
1453 /* preferred location first local variable */
1454 /* see note in _t_create above */
1455 struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
1456 struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
1457 int rval, retlen;
1458
1459 *expedited_queuedp = 0;
1460 /* request info on rq expinds */
1461 tsrp->tsr_flags = TSRF_IS_EXP_IN_RCVBUF;
1462 do {
1463 rval = _t_do_ioctl(fd, ioctlbuf,
1464 (int)sizeof (struct T_info_req), TI_SYNC, &retlen);
1465 } while (rval < 0 && errno == EINTR);
1466
1467 if (rval < 0)
1468 return (-1);
1469
1470 /*
1471 * This is a "less than" check as "struct ti_sync_ack" returned by
1472 * TI_SYNC can grow in size in future kernels. If/when a statically
1473 * linked application is run on a future kernel, it should not fail.
1474 */
1475 if (retlen < (int)sizeof (struct ti_sync_ack)) {
1476 t_errno = TSYSERR;
1477 errno = EIO;
1478 return (-1);
1479 }
1480 if (tsap->tsa_flags & TSAF_EXP_QUEUED)
1481 *expedited_queuedp = 1;
1482 return (0);
1483 }
1484
1485 /*
1486 * Support functions for use by functions that do scatter/gather
1487 * like t_sndv(), t_rcvv() etc..follow below.
1488 */
1489
1490 /*
1491 * _t_bytecount_upto_intmax() :
1492 * Sum of the lengths of the individual buffers in
1493 * the t_iovec array. If the sum exceeds INT_MAX
1494 * it is truncated to INT_MAX.
1495 */
1496 unsigned int
_t_bytecount_upto_intmax(const struct t_iovec * tiov,unsigned int tiovcount)1497 _t_bytecount_upto_intmax(const struct t_iovec *tiov, unsigned int tiovcount)
1498 {
1499 size_t nbytes;
1500 int i;
1501
1502 nbytes = 0;
1503 for (i = 0; i < tiovcount && nbytes < INT_MAX; i++) {
1504 if (tiov[i].iov_len >= INT_MAX) {
1505 nbytes = INT_MAX;
1506 break;
1507 }
1508 nbytes += tiov[i].iov_len;
1509 }
1510
1511 if (nbytes > INT_MAX)
1512 nbytes = INT_MAX;
1513
1514 return ((unsigned int)nbytes);
1515 }
1516
1517 /*
1518 * Gather the data in the t_iovec buffers, into a single linear buffer
1519 * starting at dataptr. Caller must have allocated sufficient space
1520 * starting at dataptr. The total amount of data that is gathered is
1521 * limited to INT_MAX. Any remaining data in the t_iovec buffers is
1522 * not copied.
1523 */
1524 void
_t_gather(char * dataptr,const struct t_iovec * tiov,unsigned int tiovcount)1525 _t_gather(char *dataptr, const struct t_iovec *tiov, unsigned int tiovcount)
1526 {
1527 char *curptr;
1528 unsigned int cur_count;
1529 unsigned int nbytes_remaining;
1530 int i;
1531
1532 curptr = dataptr;
1533 cur_count = 0;
1534
1535 nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
1536 for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
1537 if (tiov[i].iov_len <= nbytes_remaining)
1538 cur_count = (int)tiov[i].iov_len;
1539 else
1540 cur_count = nbytes_remaining;
1541 (void) memcpy(curptr, tiov[i].iov_base, cur_count);
1542 curptr += cur_count;
1543 nbytes_remaining -= cur_count;
1544 }
1545 }
1546
1547 /*
1548 * Scatter the data from the single linear buffer at pdatabuf->buf into
1549 * the t_iovec buffers.
1550 */
1551 void
_t_scatter(struct strbuf * pdatabuf,struct t_iovec * tiov,int tiovcount)1552 _t_scatter(struct strbuf *pdatabuf, struct t_iovec *tiov, int tiovcount)
1553 {
1554 char *curptr;
1555 unsigned int nbytes_remaining;
1556 unsigned int curlen;
1557 int i;
1558
1559 /*
1560 * There cannot be any uncopied data leftover in pdatabuf
1561 * at the conclusion of this function. (asserted below)
1562 */
1563 assert(pdatabuf->len <= _t_bytecount_upto_intmax(tiov, tiovcount));
1564 curptr = pdatabuf->buf;
1565 nbytes_remaining = pdatabuf->len;
1566 for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
1567 if (tiov[i].iov_len < nbytes_remaining)
1568 curlen = (unsigned int)tiov[i].iov_len;
1569 else
1570 curlen = nbytes_remaining;
1571 (void) memcpy(tiov[i].iov_base, curptr, curlen);
1572 curptr += curlen;
1573 nbytes_remaining -= curlen;
1574 }
1575 }
1576
1577 /*
1578 * Adjust the iovec array, for subsequent use. Examine each element in the
1579 * iovec array,and zero out the iov_len if the buffer was sent fully.
1580 * otherwise the buffer was only partially sent, so adjust both iov_len and
1581 * iov_base.
1582 *
1583 */
1584 void
_t_adjust_iov(int bytes_sent,struct iovec * iov,int * iovcountp)1585 _t_adjust_iov(int bytes_sent, struct iovec *iov, int *iovcountp)
1586 {
1587
1588 int i;
1589
1590 for (i = 0; i < *iovcountp && bytes_sent; i++) {
1591 if (iov[i].iov_len == 0)
1592 continue;
1593 if (bytes_sent < iov[i].iov_len)
1594 break;
1595 else {
1596 bytes_sent -= iov[i].iov_len;
1597 iov[i].iov_len = 0;
1598 }
1599 }
1600 iov[i].iov_len -= bytes_sent;
1601 iov[i].iov_base += bytes_sent;
1602 }
1603
1604 /*
1605 * Copy the t_iovec array to the iovec array while taking care to see
1606 * that the sum of the buffer lengths in the result is not more than
1607 * INT_MAX. This function requires that T_IOV_MAX is no larger than
1608 * IOV_MAX. Otherwise the resulting array is not a suitable input to
1609 * writev(). If the sum of the lengths in t_iovec is zero, so is the
1610 * resulting iovec.
1611 */
1612 void
_t_copy_tiov_to_iov(const struct t_iovec * tiov,int tiovcount,struct iovec * iov,int * iovcountp)1613 _t_copy_tiov_to_iov(const struct t_iovec *tiov, int tiovcount,
1614 struct iovec *iov, int *iovcountp)
1615 {
1616 int i;
1617 unsigned int nbytes_remaining;
1618
1619 nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
1620 i = 0;
1621 do {
1622 iov[i].iov_base = tiov[i].iov_base;
1623 if (tiov[i].iov_len > nbytes_remaining)
1624 iov[i].iov_len = nbytes_remaining;
1625 else
1626 iov[i].iov_len = tiov[i].iov_len;
1627 nbytes_remaining -= iov[i].iov_len;
1628 i++;
1629 } while (nbytes_remaining != 0 && i < tiovcount);
1630
1631 *iovcountp = i;
1632 }
1633
1634 /*
1635 * Routine called after connection establishment on transports where
1636 * connection establishment changes certain transport attributes such as
1637 * TIDU_size
1638 */
1639 int
_t_do_postconn_sync(int fd,struct _ti_user * tiptr)1640 _t_do_postconn_sync(int fd, struct _ti_user *tiptr)
1641 {
1642 union {
1643 struct T_capability_req tc_req;
1644 struct T_capability_ack tc_ack;
1645 } ioctl_data;
1646
1647 void *ioctlbuf = &ioctl_data;
1648 int expected_acksize;
1649 int retlen, rval;
1650 struct T_capability_req *tc_reqp = (struct T_capability_req *)ioctlbuf;
1651 struct T_capability_ack *tc_ackp = (struct T_capability_ack *)ioctlbuf;
1652 struct T_info_ack *tiap;
1653
1654 /*
1655 * This T_CAPABILITY_REQ should not fail, even if it is unsupported
1656 * by the transport provider. timod will emulate it in that case.
1657 */
1658 tc_reqp->PRIM_type = T_CAPABILITY_REQ;
1659 tc_reqp->CAP_bits1 = TC1_INFO;
1660 rval = _t_do_ioctl(fd, (char *)ioctlbuf,
1661 (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
1662 expected_acksize = (int)sizeof (struct T_capability_ack);
1663
1664 if (rval < 0)
1665 return (-1);
1666
1667 /*
1668 * T_capability TPI messages are extensible and can grow in future.
1669 * However timod will take care of returning no more information
1670 * than what was requested, and truncating the "extended"
1671 * information towards the end of the T_capability_ack, if necessary.
1672 */
1673 if (retlen != expected_acksize) {
1674 t_errno = TSYSERR;
1675 errno = EIO;
1676 return (-1);
1677 }
1678
1679 /*
1680 * The T_info_ack part of the T_capability_ack is guaranteed to be
1681 * present only if the corresponding TC1_INFO bit is set
1682 */
1683 if ((tc_ackp->CAP_bits1 & TC1_INFO) == 0) {
1684 t_errno = TSYSERR;
1685 errno = EPROTO;
1686 return (-1);
1687 }
1688
1689 tiap = &tc_ackp->INFO_ack;
1690 if (tiap->PRIM_type != T_INFO_ACK) {
1691 t_errno = TSYSERR;
1692 errno = EPROTO;
1693 return (-1);
1694 }
1695
1696 /*
1697 * Note: Sync with latest information returned in "struct T_info_ack
1698 * but we deliberately not sync the state here as user level state
1699 * construction here is not required, only update of attributes which
1700 * may have changed because of negotations during connection
1701 * establsihment
1702 */
1703 assert(tiap->TIDU_size > 0);
1704 tiptr->ti_maxpsz = tiap->TIDU_size;
1705 assert(tiap->TSDU_size >= T_INVALID);
1706 tiptr->ti_tsdusize = tiap->TSDU_size;
1707 assert(tiap->ETSDU_size >= T_INVALID);
1708 tiptr->ti_etsdusize = tiap->ETSDU_size;
1709 assert(tiap->CDATA_size >= T_INVALID);
1710 tiptr->ti_cdatasize = tiap->CDATA_size;
1711 assert(tiap->DDATA_size >= T_INVALID);
1712 tiptr->ti_ddatasize = tiap->DDATA_size;
1713 tiptr->ti_prov_flag = tiap->PROVIDER_flag;
1714
1715 return (0);
1716 }
1717