16007Sthurlow /*
26007Sthurlow * Copyright (c) 2000-2001, Boris Popov
36007Sthurlow * All rights reserved.
46007Sthurlow *
56007Sthurlow * Redistribution and use in source and binary forms, with or without
66007Sthurlow * modification, are permitted provided that the following conditions
76007Sthurlow * are met:
86007Sthurlow * 1. Redistributions of source code must retain the above copyright
96007Sthurlow * notice, this list of conditions and the following disclaimer.
106007Sthurlow * 2. Redistributions in binary form must reproduce the above copyright
116007Sthurlow * notice, this list of conditions and the following disclaimer in the
126007Sthurlow * documentation and/or other materials provided with the distribution.
136007Sthurlow * 3. All advertising materials mentioning features or use of this software
146007Sthurlow * must display the following acknowledgement:
156007Sthurlow * This product includes software developed by Boris Popov.
166007Sthurlow * 4. Neither the name of the author nor the names of any co-contributors
176007Sthurlow * may be used to endorse or promote products derived from this software
186007Sthurlow * without specific prior written permission.
196007Sthurlow *
206007Sthurlow * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
216007Sthurlow * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
226007Sthurlow * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
236007Sthurlow * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
246007Sthurlow * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
256007Sthurlow * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
266007Sthurlow * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
276007Sthurlow * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
286007Sthurlow * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
296007Sthurlow * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
306007Sthurlow * SUCH DAMAGE.
316007Sthurlow *
326007Sthurlow * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
336007Sthurlow */
348271SGordon.Ross@Sun.COM
356007Sthurlow /*
36*12508Samw@Sun.COM * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
376007Sthurlow */
386007Sthurlow
396007Sthurlow #include <sys/param.h>
406007Sthurlow #include <sys/systm.h>
4111332SGordon.Ross@Sun.COM #include <sys/time.h>
426007Sthurlow #include <sys/kmem.h>
436007Sthurlow #include <sys/proc.h>
446007Sthurlow #include <sys/lock.h>
456007Sthurlow #include <sys/socket.h>
466007Sthurlow #include <sys/mount.h>
4710023SGordon.Ross@Sun.COM #include <sys/sunddi.h>
486007Sthurlow #include <sys/cmn_err.h>
496007Sthurlow #include <sys/sdt.h>
506007Sthurlow
516007Sthurlow #include <netsmb/smb_osdep.h>
526007Sthurlow
536007Sthurlow #include <netsmb/smb.h>
546007Sthurlow #include <netsmb/smb_conn.h>
556007Sthurlow #include <netsmb/smb_subr.h>
566007Sthurlow #include <netsmb/smb_tran.h>
576007Sthurlow #include <netsmb/smb_rq.h>
586007Sthurlow
5910023SGordon.Ross@Sun.COM /*
6010023SGordon.Ross@Sun.COM * How long to wait before restarting a request (after reconnect)
6110023SGordon.Ross@Sun.COM */
6210023SGordon.Ross@Sun.COM #define SMB_RCNDELAY 2 /* seconds */
6310023SGordon.Ross@Sun.COM
6410023SGordon.Ross@Sun.COM /*
6510023SGordon.Ross@Sun.COM * leave this zero - we can't ssecond guess server side effects of
6610023SGordon.Ross@Sun.COM * duplicate ops, this isn't nfs!
6710023SGordon.Ross@Sun.COM */
6810023SGordon.Ross@Sun.COM #define SMBMAXRESTARTS 0
6910023SGordon.Ross@Sun.COM
7010023SGordon.Ross@Sun.COM
716007Sthurlow static int smb_rq_reply(struct smb_rq *rqp);
726007Sthurlow static int smb_rq_enqueue(struct smb_rq *rqp);
736007Sthurlow static int smb_rq_getenv(struct smb_connobj *layer,
746007Sthurlow struct smb_vc **vcpp, struct smb_share **sspp);
756007Sthurlow static int smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
766007Sthurlow static int smb_t2_reply(struct smb_t2rq *t2p);
776007Sthurlow static int smb_nt_reply(struct smb_ntrq *ntp);
786007Sthurlow
796007Sthurlow
8010023SGordon.Ross@Sun.COM /*
8110023SGordon.Ross@Sun.COM * Done with a request object. Free its contents.
8210023SGordon.Ross@Sun.COM * If it was allocated (SMBR_ALLOCED) free it too.
8310023SGordon.Ross@Sun.COM * Some of these are stack locals, not allocated.
8410023SGordon.Ross@Sun.COM *
8510023SGordon.Ross@Sun.COM * No locks here - this is the last ref.
8610023SGordon.Ross@Sun.COM */
8710023SGordon.Ross@Sun.COM void
smb_rq_done(struct smb_rq * rqp)8810023SGordon.Ross@Sun.COM smb_rq_done(struct smb_rq *rqp)
8910023SGordon.Ross@Sun.COM {
9010023SGordon.Ross@Sun.COM
9110023SGordon.Ross@Sun.COM /*
9210023SGordon.Ross@Sun.COM * No smb_vc_rele() here - see smb_rq_init()
9310023SGordon.Ross@Sun.COM */
9410023SGordon.Ross@Sun.COM mb_done(&rqp->sr_rq);
9510023SGordon.Ross@Sun.COM md_done(&rqp->sr_rp);
9610023SGordon.Ross@Sun.COM mutex_destroy(&rqp->sr_lock);
9710023SGordon.Ross@Sun.COM cv_destroy(&rqp->sr_cond);
9810023SGordon.Ross@Sun.COM if (rqp->sr_flags & SMBR_ALLOCED)
9910023SGordon.Ross@Sun.COM kmem_free(rqp, sizeof (*rqp));
10010023SGordon.Ross@Sun.COM }
1016007Sthurlow
1026007Sthurlow int
smb_rq_alloc(struct smb_connobj * layer,uchar_t cmd,struct smb_cred * scred,struct smb_rq ** rqpp)1036007Sthurlow smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
1046007Sthurlow struct smb_rq **rqpp)
1056007Sthurlow {
1066007Sthurlow struct smb_rq *rqp;
1076007Sthurlow int error;
1086007Sthurlow
1096007Sthurlow rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
1106007Sthurlow if (rqp == NULL)
1116007Sthurlow return (ENOMEM);
1126007Sthurlow error = smb_rq_init(rqp, layer, cmd, scred);
1136007Sthurlow if (error) {
1146007Sthurlow smb_rq_done(rqp);
1156007Sthurlow return (error);
1166007Sthurlow }
1176007Sthurlow rqp->sr_flags |= SMBR_ALLOCED;
1186007Sthurlow *rqpp = rqp;
1196007Sthurlow return (0);
1206007Sthurlow }
1216007Sthurlow
1226007Sthurlow int
smb_rq_init(struct smb_rq * rqp,struct smb_connobj * co,uchar_t cmd,struct smb_cred * scred)12310023SGordon.Ross@Sun.COM smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
1246007Sthurlow struct smb_cred *scred)
1256007Sthurlow {
1266007Sthurlow int error;
1276007Sthurlow
1286007Sthurlow bzero(rqp, sizeof (*rqp));
1296007Sthurlow mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL);
1306007Sthurlow cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
1316007Sthurlow
13210023SGordon.Ross@Sun.COM error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
1336007Sthurlow if (error)
1346007Sthurlow return (error);
1356007Sthurlow
13610023SGordon.Ross@Sun.COM /*
13710023SGordon.Ross@Sun.COM * We copied a VC pointer (vcp) into rqp->sr_vc,
13810023SGordon.Ross@Sun.COM * but we do NOT do a smb_vc_hold here. Instead,
13910023SGordon.Ross@Sun.COM * the caller is responsible for the hold on the
14010023SGordon.Ross@Sun.COM * share or the VC as needed. For smbfs callers,
14110023SGordon.Ross@Sun.COM * the hold is on the share, via the smbfs mount.
14210023SGordon.Ross@Sun.COM * For nsmb ioctl callers, the hold is done when
14310023SGordon.Ross@Sun.COM * the driver handle gets VC or share references.
14410023SGordon.Ross@Sun.COM * This design avoids frequent hold/rele activity
14510023SGordon.Ross@Sun.COM * when creating and completing requests.
14610023SGordon.Ross@Sun.COM */
14710023SGordon.Ross@Sun.COM
1486007Sthurlow rqp->sr_rexmit = SMBMAXRESTARTS;
14910023SGordon.Ross@Sun.COM rqp->sr_cred = scred; /* Note: ref hold done by caller. */
15010023SGordon.Ross@Sun.COM rqp->sr_pid = (uint16_t)ddi_get_pid();
1516007Sthurlow error = smb_rq_new(rqp, cmd);
15210023SGordon.Ross@Sun.COM
1536007Sthurlow return (error);
1546007Sthurlow }
1556007Sthurlow
1566007Sthurlow static int
smb_rq_new(struct smb_rq * rqp,uchar_t cmd)1576007Sthurlow smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
1586007Sthurlow {
15910023SGordon.Ross@Sun.COM struct mbchain *mbp = &rqp->sr_rq;
1606007Sthurlow struct smb_vc *vcp = rqp->sr_vc;
1616007Sthurlow int error;
1626007Sthurlow
1636007Sthurlow ASSERT(rqp != NULL);
16410023SGordon.Ross@Sun.COM
1656007Sthurlow rqp->sr_sendcnt = 0;
1666007Sthurlow rqp->sr_cmd = cmd;
16710023SGordon.Ross@Sun.COM
1686007Sthurlow mb_done(mbp);
1696007Sthurlow md_done(&rqp->sr_rp);
1706007Sthurlow error = mb_init(mbp);
1716007Sthurlow if (error)
1726007Sthurlow return (error);
17310023SGordon.Ross@Sun.COM
17410023SGordon.Ross@Sun.COM /*
17510023SGordon.Ross@Sun.COM * Is this the right place to save the flags?
17610023SGordon.Ross@Sun.COM */
17710023SGordon.Ross@Sun.COM rqp->sr_rqflags = vcp->vc_hflags;
1788271SGordon.Ross@Sun.COM rqp->sr_rqflags2 = vcp->vc_hflags2;
17910023SGordon.Ross@Sun.COM
18010023SGordon.Ross@Sun.COM /*
18110023SGordon.Ross@Sun.COM * The SMB header is filled in later by
18210023SGordon.Ross@Sun.COM * smb_rq_fillhdr (see below)
18310023SGordon.Ross@Sun.COM * Just reserve space here.
18410023SGordon.Ross@Sun.COM */
18510023SGordon.Ross@Sun.COM mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
18610023SGordon.Ross@Sun.COM
1876007Sthurlow return (0);
1886007Sthurlow }
1896007Sthurlow
19010023SGordon.Ross@Sun.COM /*
19110023SGordon.Ross@Sun.COM * Given a request with it's body already composed,
19210023SGordon.Ross@Sun.COM * rewind to the start and fill in the SMB header.
19310023SGordon.Ross@Sun.COM * This is called after the request is enqueued,
19410023SGordon.Ross@Sun.COM * so we have the final MID, seq num. etc.
19510023SGordon.Ross@Sun.COM */
1966007Sthurlow void
smb_rq_fillhdr(struct smb_rq * rqp)19710023SGordon.Ross@Sun.COM smb_rq_fillhdr(struct smb_rq *rqp)
1986007Sthurlow {
19910023SGordon.Ross@Sun.COM struct mbchain mbtmp, *mbp = &mbtmp;
20010023SGordon.Ross@Sun.COM mblk_t *m;
20110023SGordon.Ross@Sun.COM
20210023SGordon.Ross@Sun.COM /*
20310023SGordon.Ross@Sun.COM * Fill in the SMB header using a dup of the first mblk,
20410023SGordon.Ross@Sun.COM * which points at the same data but has its own wptr,
20510023SGordon.Ross@Sun.COM * so we can rewind without trashing the message.
20610023SGordon.Ross@Sun.COM */
20710023SGordon.Ross@Sun.COM m = dupb(rqp->sr_rq.mb_top);
20810023SGordon.Ross@Sun.COM m->b_wptr = m->b_rptr; /* rewind */
20910023SGordon.Ross@Sun.COM mb_initm(mbp, m);
21010023SGordon.Ross@Sun.COM
21110023SGordon.Ross@Sun.COM mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
21210023SGordon.Ross@Sun.COM mb_put_uint8(mbp, rqp->sr_cmd);
21310023SGordon.Ross@Sun.COM mb_put_uint32le(mbp, 0); /* status */
21410023SGordon.Ross@Sun.COM mb_put_uint8(mbp, rqp->sr_rqflags);
21510023SGordon.Ross@Sun.COM mb_put_uint16le(mbp, rqp->sr_rqflags2);
21610023SGordon.Ross@Sun.COM mb_put_uint16le(mbp, 0); /* pid-high */
21710023SGordon.Ross@Sun.COM mb_put_mem(mbp, NULL, 8, MB_MZERO); /* MAC sig. (later) */
21810023SGordon.Ross@Sun.COM mb_put_uint16le(mbp, 0); /* reserved */
21910023SGordon.Ross@Sun.COM mb_put_uint16le(mbp, rqp->sr_rqtid);
22010023SGordon.Ross@Sun.COM mb_put_uint16le(mbp, rqp->sr_pid);
22110023SGordon.Ross@Sun.COM mb_put_uint16le(mbp, rqp->sr_rquid);
22210023SGordon.Ross@Sun.COM mb_put_uint16le(mbp, rqp->sr_mid);
22310023SGordon.Ross@Sun.COM
22410023SGordon.Ross@Sun.COM /* This will free the mblk from dupb. */
22510023SGordon.Ross@Sun.COM mb_done(mbp);
22610023SGordon.Ross@Sun.COM }
22710023SGordon.Ross@Sun.COM
22810023SGordon.Ross@Sun.COM int
smb_rq_simple(struct smb_rq * rqp)22910023SGordon.Ross@Sun.COM smb_rq_simple(struct smb_rq *rqp)
23010023SGordon.Ross@Sun.COM {
23110023SGordon.Ross@Sun.COM return (smb_rq_simple_timed(rqp, smb_timo_default));
2326007Sthurlow }
2336007Sthurlow
2346007Sthurlow /*
2356007Sthurlow * Simple request-reply exchange
2366007Sthurlow */
2376007Sthurlow int
smb_rq_simple_timed(struct smb_rq * rqp,int timeout)2386007Sthurlow smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
2396007Sthurlow {
2406007Sthurlow int error = EINVAL;
2416007Sthurlow
2426007Sthurlow for (; ; ) {
2436007Sthurlow /*
2446007Sthurlow * Don't send any new requests if force unmount is underway.
2456007Sthurlow * This check was moved into smb_rq_enqueue.
2466007Sthurlow */
2476007Sthurlow rqp->sr_flags &= ~SMBR_RESTART;
2486007Sthurlow rqp->sr_timo = timeout; /* in seconds */
2496007Sthurlow rqp->sr_state = SMBRQ_NOTSENT;
2506007Sthurlow error = smb_rq_enqueue(rqp);
2516007Sthurlow if (error) {
2526007Sthurlow break;
2536007Sthurlow }
2546007Sthurlow error = smb_rq_reply(rqp);
2556007Sthurlow if (!error)
2566007Sthurlow break;
2576007Sthurlow if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
2586007Sthurlow SMBR_RESTART)
2596007Sthurlow break;
2606007Sthurlow if (rqp->sr_rexmit <= 0)
2616007Sthurlow break;
2626007Sthurlow SMBRQ_LOCK(rqp);
26310023SGordon.Ross@Sun.COM if (rqp->sr_share) {
26411332SGordon.Ross@Sun.COM (void) cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
26511332SGordon.Ross@Sun.COM SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
2666007Sthurlow
2676007Sthurlow } else {
26811332SGordon.Ross@Sun.COM delay(SEC_TO_TICK(SMB_RCNDELAY));
2696007Sthurlow }
2706007Sthurlow SMBRQ_UNLOCK(rqp);
2716007Sthurlow rqp->sr_rexmit--;
2726007Sthurlow }
2736007Sthurlow return (error);
2746007Sthurlow }
2756007Sthurlow
2766007Sthurlow
2776007Sthurlow static int
smb_rq_enqueue(struct smb_rq * rqp)2786007Sthurlow smb_rq_enqueue(struct smb_rq *rqp)
2796007Sthurlow {
2806007Sthurlow struct smb_vc *vcp = rqp->sr_vc;
2816007Sthurlow struct smb_share *ssp = rqp->sr_share;
2826007Sthurlow int error = 0;
2836007Sthurlow
2846007Sthurlow /*
28510023SGordon.Ross@Sun.COM * Normal requests may initiate a reconnect,
28610023SGordon.Ross@Sun.COM * and/or wait for state changes to finish.
28710023SGordon.Ross@Sun.COM * Some requests set the NORECONNECT flag
28810023SGordon.Ross@Sun.COM * to avoid all that (i.e. tree discon)
2896007Sthurlow */
29010023SGordon.Ross@Sun.COM if (rqp->sr_flags & SMBR_NORECONNECT) {
29110023SGordon.Ross@Sun.COM if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
29210023SGordon.Ross@Sun.COM SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
29310023SGordon.Ross@Sun.COM return (ENOTCONN);
29410023SGordon.Ross@Sun.COM }
29510023SGordon.Ross@Sun.COM if (ssp != NULL &&
29610023SGordon.Ross@Sun.COM ((ssp->ss_flags & SMBS_CONNECTED) == 0))
29710023SGordon.Ross@Sun.COM return (ENOTCONN);
29810023SGordon.Ross@Sun.COM goto ok_out;
2996007Sthurlow }
3006007Sthurlow
3016007Sthurlow /*
30210023SGordon.Ross@Sun.COM * If we're not connected, initiate a reconnect
30310023SGordon.Ross@Sun.COM * and/or wait for an existing one to finish.
3046007Sthurlow */
3056007Sthurlow if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
30610023SGordon.Ross@Sun.COM error = smb_iod_reconnect(vcp);
30710023SGordon.Ross@Sun.COM if (error != 0)
30810023SGordon.Ross@Sun.COM return (error);
3096007Sthurlow }
3106007Sthurlow
3116007Sthurlow /*
31210023SGordon.Ross@Sun.COM * If this request has a "share" object
31310023SGordon.Ross@Sun.COM * that needs a tree connect, do it now.
3146007Sthurlow */
31510023SGordon.Ross@Sun.COM if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
31610023SGordon.Ross@Sun.COM error = smb_share_tcon(ssp, rqp->sr_cred);
31710023SGordon.Ross@Sun.COM if (error)
31810023SGordon.Ross@Sun.COM return (error);
3196007Sthurlow }
3206007Sthurlow
32110023SGordon.Ross@Sun.COM /*
32210023SGordon.Ross@Sun.COM * We now know what UID + TID to use.
32310023SGordon.Ross@Sun.COM * Store them in the request.
32410023SGordon.Ross@Sun.COM */
32510023SGordon.Ross@Sun.COM ok_out:
32610023SGordon.Ross@Sun.COM rqp->sr_rquid = vcp->vc_smbuid;
32710023SGordon.Ross@Sun.COM rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
32810023SGordon.Ross@Sun.COM error = smb_iod_addrq(rqp);
3296007Sthurlow
3306007Sthurlow return (error);
3316007Sthurlow }
3326007Sthurlow
3336007Sthurlow /*
3346007Sthurlow * Mark location of the word count, which is filled in later by
3356007Sthurlow * smb_rw_wend(). Also initialize the counter that it uses
3366007Sthurlow * to figure out what value to fill in.
3376007Sthurlow *
3386007Sthurlow * Note that the word count happens to be 8-bit.
3396007Sthurlow */
3406007Sthurlow void
smb_rq_wstart(struct smb_rq * rqp)3416007Sthurlow smb_rq_wstart(struct smb_rq *rqp)
3426007Sthurlow {
3436007Sthurlow rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
3446007Sthurlow rqp->sr_rq.mb_count = 0;
3456007Sthurlow }
3466007Sthurlow
3476007Sthurlow void
smb_rq_wend(struct smb_rq * rqp)3486007Sthurlow smb_rq_wend(struct smb_rq *rqp)
3496007Sthurlow {
3506007Sthurlow uint_t wcnt;
3516007Sthurlow
3526007Sthurlow if (rqp->sr_wcount == NULL) {
3536007Sthurlow SMBSDEBUG("no wcount\n");
3546007Sthurlow return;
3556007Sthurlow }
3566007Sthurlow wcnt = rqp->sr_rq.mb_count;
3576007Sthurlow if (wcnt > 0x1ff)
3586007Sthurlow SMBSDEBUG("word count too large (%d)\n", wcnt);
3596007Sthurlow if (wcnt & 1)
3606007Sthurlow SMBSDEBUG("odd word count\n");
3616007Sthurlow /* Fill in the word count (8-bits) */
3626007Sthurlow *rqp->sr_wcount = (wcnt >> 1);
3636007Sthurlow }
3646007Sthurlow
3656007Sthurlow /*
3666007Sthurlow * Mark location of the byte count, which is filled in later by
3676007Sthurlow * smb_rw_bend(). Also initialize the counter that it uses
3686007Sthurlow * to figure out what value to fill in.
3696007Sthurlow *
3706007Sthurlow * Note that the byte count happens to be 16-bit.
3716007Sthurlow */
3726007Sthurlow void
smb_rq_bstart(struct smb_rq * rqp)3736007Sthurlow smb_rq_bstart(struct smb_rq *rqp)
3746007Sthurlow {
3756007Sthurlow rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t));
3766007Sthurlow rqp->sr_rq.mb_count = 0;
3776007Sthurlow }
3786007Sthurlow
3796007Sthurlow void
smb_rq_bend(struct smb_rq * rqp)3806007Sthurlow smb_rq_bend(struct smb_rq *rqp)
3816007Sthurlow {
3826007Sthurlow uint_t bcnt;
3836007Sthurlow
3846007Sthurlow if (rqp->sr_bcount == NULL) {
3856007Sthurlow SMBSDEBUG("no bcount\n");
3866007Sthurlow return;
3876007Sthurlow }
3886007Sthurlow bcnt = rqp->sr_rq.mb_count;
3896007Sthurlow if (bcnt > 0xffff)
3906007Sthurlow SMBSDEBUG("byte count too large (%d)\n", bcnt);
3916007Sthurlow /*
3926007Sthurlow * Fill in the byte count (16-bits)
3936007Sthurlow * The pointer is char * type due to
3946007Sthurlow * typical off-by-one alignment.
3956007Sthurlow */
3966007Sthurlow rqp->sr_bcount[0] = bcnt & 0xFF;
3976007Sthurlow rqp->sr_bcount[1] = (bcnt >> 8);
3986007Sthurlow }
3996007Sthurlow
4006007Sthurlow int
smb_rq_intr(struct smb_rq * rqp)4016007Sthurlow smb_rq_intr(struct smb_rq *rqp)
4026007Sthurlow {
4036007Sthurlow if (rqp->sr_flags & SMBR_INTR)
4046007Sthurlow return (EINTR);
4056007Sthurlow
4066007Sthurlow return (0);
4076007Sthurlow }
4086007Sthurlow
4096007Sthurlow static int
smb_rq_getenv(struct smb_connobj * co,struct smb_vc ** vcpp,struct smb_share ** sspp)4106007Sthurlow smb_rq_getenv(struct smb_connobj *co,
4116007Sthurlow struct smb_vc **vcpp, struct smb_share **sspp)
4126007Sthurlow {
4136007Sthurlow struct smb_vc *vcp = NULL;
4146007Sthurlow struct smb_share *ssp = NULL;
41510023SGordon.Ross@Sun.COM int error = EINVAL;
4166007Sthurlow
4176007Sthurlow if (co->co_flags & SMBO_GONE) {
4186007Sthurlow SMBSDEBUG("zombie CO\n");
4196007Sthurlow error = EINVAL;
4206007Sthurlow goto out;
4216007Sthurlow }
4226007Sthurlow
4236007Sthurlow switch (co->co_level) {
4246007Sthurlow case SMBL_SHARE:
4256007Sthurlow ssp = CPTOSS(co);
42610023SGordon.Ross@Sun.COM if ((co->co_flags & SMBO_GONE) ||
42710023SGordon.Ross@Sun.COM co->co_parent == NULL) {
4286007Sthurlow SMBSDEBUG("zombie share %s\n", ssp->ss_name);
4296007Sthurlow break;
4306007Sthurlow }
43110023SGordon.Ross@Sun.COM /* instead of recursion... */
43210023SGordon.Ross@Sun.COM co = co->co_parent;
43310023SGordon.Ross@Sun.COM /* FALLTHROUGH */
43410023SGordon.Ross@Sun.COM case SMBL_VC:
43510023SGordon.Ross@Sun.COM vcp = CPTOVC(co);
43610023SGordon.Ross@Sun.COM if ((co->co_flags & SMBO_GONE) ||
43710023SGordon.Ross@Sun.COM co->co_parent == NULL) {
43810023SGordon.Ross@Sun.COM SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname);
43910023SGordon.Ross@Sun.COM break;
44010023SGordon.Ross@Sun.COM }
44110023SGordon.Ross@Sun.COM error = 0;
4426007Sthurlow break;
44310023SGordon.Ross@Sun.COM
4446007Sthurlow default:
4456007Sthurlow SMBSDEBUG("invalid level %d passed\n", co->co_level);
4466007Sthurlow }
4476007Sthurlow
4486007Sthurlow out:
4496007Sthurlow if (!error) {
4506007Sthurlow if (vcpp)
4516007Sthurlow *vcpp = vcp;
4526007Sthurlow if (sspp)
4536007Sthurlow *sspp = ssp;
4546007Sthurlow }
4556007Sthurlow
4566007Sthurlow return (error);
4576007Sthurlow }
4586007Sthurlow
4596007Sthurlow /*
4606007Sthurlow * Wait for reply on the request
4616007Sthurlow */
4626007Sthurlow static int
smb_rq_reply(struct smb_rq * rqp)4636007Sthurlow smb_rq_reply(struct smb_rq *rqp)
4646007Sthurlow {
4656007Sthurlow struct mdchain *mdp = &rqp->sr_rp;
4666007Sthurlow u_int8_t tb;
4676007Sthurlow int error, rperror = 0;
4686007Sthurlow
46911332SGordon.Ross@Sun.COM if (rqp->sr_timo == SMBNOREPLYWAIT) {
47011332SGordon.Ross@Sun.COM smb_iod_removerq(rqp);
47111332SGordon.Ross@Sun.COM return (0);
47211332SGordon.Ross@Sun.COM }
4736007Sthurlow
4746007Sthurlow error = smb_iod_waitrq(rqp);
4756007Sthurlow if (error)
4766007Sthurlow return (error);
4778271SGordon.Ross@Sun.COM
4788271SGordon.Ross@Sun.COM /*
4798271SGordon.Ross@Sun.COM * If the request was signed, validate the
4808271SGordon.Ross@Sun.COM * signature on the response.
4818271SGordon.Ross@Sun.COM */
4828271SGordon.Ross@Sun.COM if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
4838271SGordon.Ross@Sun.COM error = smb_rq_verify(rqp);
4848271SGordon.Ross@Sun.COM if (error)
4858271SGordon.Ross@Sun.COM return (error);
4868271SGordon.Ross@Sun.COM }
4878271SGordon.Ross@Sun.COM
4888271SGordon.Ross@Sun.COM /*
4898271SGordon.Ross@Sun.COM * Parse the SMB header
4908271SGordon.Ross@Sun.COM */
49110023SGordon.Ross@Sun.COM error = md_get_uint32le(mdp, NULL);
4926007Sthurlow if (error)
4936007Sthurlow return (error);
4946007Sthurlow error = md_get_uint8(mdp, &tb);
4956007Sthurlow error = md_get_uint32le(mdp, &rqp->sr_error);
4966007Sthurlow error = md_get_uint8(mdp, &rqp->sr_rpflags);
4976007Sthurlow error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
4986007Sthurlow if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
4996007Sthurlow /*
5006007Sthurlow * Do a special check for STATUS_BUFFER_OVERFLOW;
5016007Sthurlow * it's not an error.
5026007Sthurlow */
5036007Sthurlow if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
5046007Sthurlow /*
5056007Sthurlow * Don't report it as an error to our caller;
5066007Sthurlow * they can look at rqp->sr_error if they
5076007Sthurlow * need to know whether we got a
5086007Sthurlow * STATUS_BUFFER_OVERFLOW.
5096007Sthurlow * XXX - should we do that for all errors
5106007Sthurlow * where (error & 0xC0000000) is 0x80000000,
5116007Sthurlow * i.e. all warnings?
5126007Sthurlow */
5136007Sthurlow rperror = 0;
5146007Sthurlow } else
5156007Sthurlow rperror = smb_maperr32(rqp->sr_error);
5166007Sthurlow } else {
5176007Sthurlow rqp->sr_errclass = rqp->sr_error & 0xff;
5186007Sthurlow rqp->sr_serror = rqp->sr_error >> 16;
5196007Sthurlow rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
5206007Sthurlow }
5216007Sthurlow if (rperror == EMOREDATA) {
5226007Sthurlow rperror = E2BIG;
5236007Sthurlow rqp->sr_flags |= SMBR_MOREDATA;
5246007Sthurlow } else
5256007Sthurlow rqp->sr_flags &= ~SMBR_MOREDATA;
5266007Sthurlow
52710023SGordon.Ross@Sun.COM error = md_get_uint32le(mdp, NULL);
52810023SGordon.Ross@Sun.COM error = md_get_uint32le(mdp, NULL);
52910023SGordon.Ross@Sun.COM error = md_get_uint32le(mdp, NULL);
5306007Sthurlow
5316007Sthurlow error = md_get_uint16le(mdp, &rqp->sr_rptid);
5326007Sthurlow error = md_get_uint16le(mdp, &rqp->sr_rppid);
5336007Sthurlow error = md_get_uint16le(mdp, &rqp->sr_rpuid);
5346007Sthurlow error = md_get_uint16le(mdp, &rqp->sr_rpmid);
5356007Sthurlow
5366007Sthurlow return ((error) ? error : rperror);
5376007Sthurlow }
5386007Sthurlow
5396007Sthurlow
5406007Sthurlow #define ALIGN4(a) (((a) + 3) & ~3)
5416007Sthurlow
5426007Sthurlow /*
5436007Sthurlow * TRANS2 request implementation
5446007Sthurlow * TRANS implementation is in the "t2" routines
5456007Sthurlow * NT_TRANSACTION implementation is the separate "nt" stuff
5466007Sthurlow */
5476007Sthurlow int
smb_t2_alloc(struct smb_connobj * layer,ushort_t setup,struct smb_cred * scred,struct smb_t2rq ** t2pp)5486007Sthurlow smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
5496007Sthurlow struct smb_t2rq **t2pp)
5506007Sthurlow {
5516007Sthurlow struct smb_t2rq *t2p;
5526007Sthurlow int error;
5536007Sthurlow
5546007Sthurlow t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
5556007Sthurlow if (t2p == NULL)
5566007Sthurlow return (ENOMEM);
5576007Sthurlow error = smb_t2_init(t2p, layer, &setup, 1, scred);
5586007Sthurlow t2p->t2_flags |= SMBT2_ALLOCED;
5596007Sthurlow if (error) {
5606007Sthurlow smb_t2_done(t2p);
5616007Sthurlow return (error);
5626007Sthurlow }
5636007Sthurlow *t2pp = t2p;
5646007Sthurlow return (0);
5656007Sthurlow }
5666007Sthurlow
5676007Sthurlow int
smb_nt_alloc(struct smb_connobj * layer,ushort_t fn,struct smb_cred * scred,struct smb_ntrq ** ntpp)5686007Sthurlow smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred,
5696007Sthurlow struct smb_ntrq **ntpp)
5706007Sthurlow {
5716007Sthurlow struct smb_ntrq *ntp;
5726007Sthurlow int error;
5736007Sthurlow
5746007Sthurlow ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP);
5756007Sthurlow if (ntp == NULL)
5766007Sthurlow return (ENOMEM);
5776007Sthurlow error = smb_nt_init(ntp, layer, fn, scred);
5786007Sthurlow mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL);
5796007Sthurlow cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL);
5806007Sthurlow ntp->nt_flags |= SMBT2_ALLOCED;
5816007Sthurlow if (error) {
5826007Sthurlow smb_nt_done(ntp);
5836007Sthurlow return (error);
5846007Sthurlow }
5856007Sthurlow *ntpp = ntp;
5866007Sthurlow return (0);
5876007Sthurlow }
5886007Sthurlow
5896007Sthurlow int
smb_t2_init(struct smb_t2rq * t2p,struct smb_connobj * source,ushort_t * setup,int setupcnt,struct smb_cred * scred)5906007Sthurlow smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
5916007Sthurlow int setupcnt, struct smb_cred *scred)
5926007Sthurlow {
5936007Sthurlow int i;
5946007Sthurlow int error;
5956007Sthurlow
5966007Sthurlow bzero(t2p, sizeof (*t2p));
59710023SGordon.Ross@Sun.COM mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
59810023SGordon.Ross@Sun.COM cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
59910023SGordon.Ross@Sun.COM
6006007Sthurlow t2p->t2_source = source;
6016007Sthurlow t2p->t2_setupcount = (u_int16_t)setupcnt;
6026007Sthurlow t2p->t2_setupdata = t2p->t2_setup;
6036007Sthurlow for (i = 0; i < setupcnt; i++)
6046007Sthurlow t2p->t2_setup[i] = setup[i];
6056007Sthurlow t2p->t2_fid = 0xffff;
6066007Sthurlow t2p->t2_cred = scred;
6076007Sthurlow t2p->t2_share = (source->co_level == SMBL_SHARE ?
6086007Sthurlow CPTOSS(source) : NULL); /* for smb up/down */
6096007Sthurlow error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
6106007Sthurlow if (error)
6116007Sthurlow return (error);
6126007Sthurlow return (0);
6136007Sthurlow }
6146007Sthurlow
6156007Sthurlow int
smb_nt_init(struct smb_ntrq * ntp,struct smb_connobj * source,ushort_t fn,struct smb_cred * scred)6166007Sthurlow smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn,
6176007Sthurlow struct smb_cred *scred)
6186007Sthurlow {
6196007Sthurlow int error;
6206007Sthurlow
6216007Sthurlow bzero(ntp, sizeof (*ntp));
6226007Sthurlow ntp->nt_source = source;
6236007Sthurlow ntp->nt_function = fn;
6246007Sthurlow ntp->nt_cred = scred;
6256007Sthurlow ntp->nt_share = (source->co_level == SMBL_SHARE ?
6266007Sthurlow CPTOSS(source) : NULL); /* for smb up/down */
6276007Sthurlow error = smb_rq_getenv(source, &ntp->nt_vc, NULL);
6286007Sthurlow if (error)
6296007Sthurlow return (error);
6306007Sthurlow return (0);
6316007Sthurlow }
6326007Sthurlow
6336007Sthurlow void
smb_t2_done(struct smb_t2rq * t2p)6346007Sthurlow smb_t2_done(struct smb_t2rq *t2p)
6356007Sthurlow {
6366007Sthurlow mb_done(&t2p->t2_tparam);
6376007Sthurlow mb_done(&t2p->t2_tdata);
6386007Sthurlow md_done(&t2p->t2_rparam);
6396007Sthurlow md_done(&t2p->t2_rdata);
6406007Sthurlow mutex_destroy(&t2p->t2_lock);
6416007Sthurlow cv_destroy(&t2p->t2_cond);
6426007Sthurlow if (t2p->t2_flags & SMBT2_ALLOCED)
6436007Sthurlow kmem_free(t2p, sizeof (*t2p));
6446007Sthurlow }
6456007Sthurlow
6466007Sthurlow void
smb_nt_done(struct smb_ntrq * ntp)6476007Sthurlow smb_nt_done(struct smb_ntrq *ntp)
6486007Sthurlow {
6496007Sthurlow mb_done(&ntp->nt_tsetup);
6506007Sthurlow mb_done(&ntp->nt_tparam);
6516007Sthurlow mb_done(&ntp->nt_tdata);
6526007Sthurlow md_done(&ntp->nt_rparam);
6536007Sthurlow md_done(&ntp->nt_rdata);
6546007Sthurlow cv_destroy(&ntp->nt_cond);
6556007Sthurlow mutex_destroy(&ntp->nt_lock);
6566007Sthurlow if (ntp->nt_flags & SMBT2_ALLOCED)
6576007Sthurlow kmem_free(ntp, sizeof (*ntp));
6586007Sthurlow }
6596007Sthurlow
6606007Sthurlow /*
6616007Sthurlow * Extract data [offset,count] from mtop and add to mdp.
6626007Sthurlow */
6636007Sthurlow static int
smb_t2_placedata(mblk_t * mtop,u_int16_t offset,u_int16_t count,struct mdchain * mdp)6646007Sthurlow smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count,
6656007Sthurlow struct mdchain *mdp)
6666007Sthurlow {
6676007Sthurlow mblk_t *n;
6686007Sthurlow
6696007Sthurlow n = m_copym(mtop, offset, count, M_WAITOK);
6706007Sthurlow if (n == NULL)
6716007Sthurlow return (EBADRPC);
6726007Sthurlow
6736007Sthurlow if (mdp->md_top == NULL) {
6746007Sthurlow md_initm(mdp, n);
6756007Sthurlow } else
6766007Sthurlow m_cat(mdp->md_top, n);
6776007Sthurlow
6786007Sthurlow return (0);
6796007Sthurlow }
6806007Sthurlow
6816007Sthurlow static int
smb_t2_reply(struct smb_t2rq * t2p)6826007Sthurlow smb_t2_reply(struct smb_t2rq *t2p)
6836007Sthurlow {
6846007Sthurlow struct mdchain *mdp;
6856007Sthurlow struct smb_rq *rqp = t2p->t2_rq;
6866007Sthurlow int error, error2, totpgot, totdgot;
6876007Sthurlow u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
6886007Sthurlow u_int16_t tmp, bc, dcount;
6896007Sthurlow u_int8_t wc;
6906007Sthurlow
6916007Sthurlow t2p->t2_flags &= ~SMBT2_MOREDATA;
6926007Sthurlow
6936007Sthurlow error = smb_rq_reply(rqp);
6946007Sthurlow if (rqp->sr_flags & SMBR_MOREDATA)
6956007Sthurlow t2p->t2_flags |= SMBT2_MOREDATA;
6966007Sthurlow t2p->t2_sr_errclass = rqp->sr_errclass;
6976007Sthurlow t2p->t2_sr_serror = rqp->sr_serror;
6986007Sthurlow t2p->t2_sr_error = rqp->sr_error;
6996007Sthurlow t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
7006007Sthurlow if (error && !(rqp->sr_flags & SMBR_MOREDATA))
7016007Sthurlow return (error);
7026007Sthurlow /*
7036007Sthurlow * Now we have to get all subseqent responses, if any.
7046007Sthurlow * The CIFS specification says that they can be misordered,
7056007Sthurlow * which is weird.
7066007Sthurlow * TODO: timo
7076007Sthurlow */
7086007Sthurlow totpgot = totdgot = 0;
7096007Sthurlow totpcount = totdcount = 0xffff;
7106007Sthurlow mdp = &rqp->sr_rp;
7116007Sthurlow for (;;) {
7126007Sthurlow DTRACE_PROBE2(smb_trans_reply,
7136007Sthurlow (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
7146007Sthurlow m_dumpm(mdp->md_top);
7156007Sthurlow
7166007Sthurlow if ((error2 = md_get_uint8(mdp, &wc)) != 0)
7176007Sthurlow break;
7186007Sthurlow if (wc < 10) {
7196007Sthurlow error2 = ENOENT;
7206007Sthurlow break;
7216007Sthurlow }
7226007Sthurlow if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
7236007Sthurlow break;
7246007Sthurlow if (totpcount > tmp)
7256007Sthurlow totpcount = tmp;
7266007Sthurlow if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
7276007Sthurlow break;
7286007Sthurlow if (totdcount > tmp)
7296007Sthurlow totdcount = tmp;
7306007Sthurlow if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
7316007Sthurlow (error2 = md_get_uint16le(mdp, &pcount)) != 0 ||
7326007Sthurlow (error2 = md_get_uint16le(mdp, &poff)) != 0 ||
7336007Sthurlow (error2 = md_get_uint16le(mdp, &pdisp)) != 0)
7346007Sthurlow break;
7356007Sthurlow if (pcount != 0 && pdisp != totpgot) {
7366007Sthurlow SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
7376007Sthurlow pdisp, totpgot);
7386007Sthurlow error2 = EINVAL;
7396007Sthurlow break;
7406007Sthurlow }
7416007Sthurlow if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 ||
7426007Sthurlow (error2 = md_get_uint16le(mdp, &doff)) != 0 ||
7436007Sthurlow (error2 = md_get_uint16le(mdp, &ddisp)) != 0)
7446007Sthurlow break;
7456007Sthurlow if (dcount != 0 && ddisp != totdgot) {
7466007Sthurlow SMBSDEBUG("Can't handle misordered data: dcount %d\n",
7476007Sthurlow dcount);
7486007Sthurlow error2 = EINVAL;
7496007Sthurlow break;
7506007Sthurlow }
7516007Sthurlow
7526007Sthurlow /* XXX: Skip setup words? We don't save them? */
7536007Sthurlow md_get_uint8(mdp, &wc); /* SetupCount */
7546007Sthurlow md_get_uint8(mdp, NULL); /* Reserved2 */
7556007Sthurlow tmp = wc;
7566007Sthurlow while (tmp--)
75710023SGordon.Ross@Sun.COM md_get_uint16le(mdp, NULL);
7586007Sthurlow
7596007Sthurlow if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
7606007Sthurlow break;
7616007Sthurlow
7626007Sthurlow /*
7636007Sthurlow * There are pad bytes here, and the poff value
7646007Sthurlow * indicates where the next data are found.
7656007Sthurlow * No need to guess at the padding size.
7666007Sthurlow */
7676007Sthurlow if (pcount) {
7686007Sthurlow error2 = smb_t2_placedata(mdp->md_top, poff,
7696007Sthurlow pcount, &t2p->t2_rparam);
7706007Sthurlow if (error2)
7716007Sthurlow break;
7726007Sthurlow }
7736007Sthurlow totpgot += pcount;
7746007Sthurlow
7756007Sthurlow if (dcount) {
7766007Sthurlow error2 = smb_t2_placedata(mdp->md_top, doff,
7776007Sthurlow dcount, &t2p->t2_rdata);
7786007Sthurlow if (error2)
7796007Sthurlow break;
7806007Sthurlow }
7816007Sthurlow totdgot += dcount;
7826007Sthurlow
7836007Sthurlow if (totpgot >= totpcount && totdgot >= totdcount) {
7846007Sthurlow error2 = 0;
7856007Sthurlow t2p->t2_flags |= SMBT2_ALLRECV;
7866007Sthurlow break;
7876007Sthurlow }
7886007Sthurlow /*
7896007Sthurlow * We're done with this reply, look for the next one.
7906007Sthurlow */
7916007Sthurlow SMBRQ_LOCK(rqp);
7926007Sthurlow md_next_record(&rqp->sr_rp);
7936007Sthurlow SMBRQ_UNLOCK(rqp);
7946007Sthurlow error2 = smb_rq_reply(rqp);
7956007Sthurlow if (rqp->sr_flags & SMBR_MOREDATA)
7966007Sthurlow t2p->t2_flags |= SMBT2_MOREDATA;
7976007Sthurlow if (!error2)
7986007Sthurlow continue;
7996007Sthurlow t2p->t2_sr_errclass = rqp->sr_errclass;
8006007Sthurlow t2p->t2_sr_serror = rqp->sr_serror;
8016007Sthurlow t2p->t2_sr_error = rqp->sr_error;
8026007Sthurlow t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
8036007Sthurlow error = error2;
8046007Sthurlow if (!(rqp->sr_flags & SMBR_MOREDATA))
8056007Sthurlow break;
8066007Sthurlow }
8076007Sthurlow return (error ? error : error2);
8086007Sthurlow }
8096007Sthurlow
8106007Sthurlow static int
smb_nt_reply(struct smb_ntrq * ntp)8116007Sthurlow smb_nt_reply(struct smb_ntrq *ntp)
8126007Sthurlow {
8136007Sthurlow struct mdchain *mdp;
8146007Sthurlow struct smb_rq *rqp = ntp->nt_rq;
8156007Sthurlow int error, error2;
8166007Sthurlow u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
8176007Sthurlow u_int32_t tmp, dcount, totpgot, totdgot;
8186007Sthurlow u_int16_t bc;
8196007Sthurlow u_int8_t wc;
8206007Sthurlow
8216007Sthurlow ntp->nt_flags &= ~SMBT2_MOREDATA;
8226007Sthurlow
8236007Sthurlow error = smb_rq_reply(rqp);
8246007Sthurlow if (rqp->sr_flags & SMBR_MOREDATA)
8256007Sthurlow ntp->nt_flags |= SMBT2_MOREDATA;
8266007Sthurlow ntp->nt_sr_error = rqp->sr_error;
8276007Sthurlow ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
8286007Sthurlow if (error && !(rqp->sr_flags & SMBR_MOREDATA))
8296007Sthurlow return (error);
8306007Sthurlow /*
8316007Sthurlow * Now we have to get all subseqent responses. The CIFS specification
8326007Sthurlow * says that they can be misordered which is weird.
8336007Sthurlow * TODO: timo
8346007Sthurlow */
8356007Sthurlow totpgot = totdgot = 0;
8366007Sthurlow totpcount = totdcount = 0xffffffff;
8376007Sthurlow mdp = &rqp->sr_rp;
8386007Sthurlow for (;;) {
8396007Sthurlow DTRACE_PROBE2(smb_trans_reply,
8406007Sthurlow (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
8416007Sthurlow m_dumpm(mdp->md_top);
8426007Sthurlow
8436007Sthurlow if ((error2 = md_get_uint8(mdp, &wc)) != 0)
8446007Sthurlow break;
8456007Sthurlow if (wc < 18) {
8466007Sthurlow error2 = ENOENT;
8476007Sthurlow break;
8486007Sthurlow }
8496007Sthurlow md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */
8506007Sthurlow if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
8516007Sthurlow break;
8526007Sthurlow if (totpcount > tmp)
8536007Sthurlow totpcount = tmp;
8546007Sthurlow if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
8556007Sthurlow break;
8566007Sthurlow if (totdcount > tmp)
8576007Sthurlow totdcount = tmp;
8586007Sthurlow if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 ||
8596007Sthurlow (error2 = md_get_uint32le(mdp, &poff)) != 0 ||
8606007Sthurlow (error2 = md_get_uint32le(mdp, &pdisp)) != 0)
8616007Sthurlow break;
8626007Sthurlow if (pcount != 0 && pdisp != totpgot) {
8636007Sthurlow SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
8646007Sthurlow pdisp, totpgot);
8656007Sthurlow error2 = EINVAL;
8666007Sthurlow break;
8676007Sthurlow }
8686007Sthurlow if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 ||
8696007Sthurlow (error2 = md_get_uint32le(mdp, &doff)) != 0 ||
8706007Sthurlow (error2 = md_get_uint32le(mdp, &ddisp)) != 0)
8716007Sthurlow break;
8726007Sthurlow if (dcount != 0 && ddisp != totdgot) {
8736007Sthurlow SMBSDEBUG("Can't handle misordered data: dcount %d\n",
8746007Sthurlow dcount);
8756007Sthurlow error2 = EINVAL;
8766007Sthurlow break;
8776007Sthurlow }
8786007Sthurlow
8796007Sthurlow /* XXX: Skip setup words? We don't save them? */
8806007Sthurlow md_get_uint8(mdp, &wc); /* SetupCount */
8816007Sthurlow tmp = wc;
8826007Sthurlow while (tmp--)
88310023SGordon.Ross@Sun.COM md_get_uint16le(mdp, NULL);
8846007Sthurlow
8856007Sthurlow if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
8866007Sthurlow break;
8876007Sthurlow
8886007Sthurlow /*
8896007Sthurlow * There are pad bytes here, and the poff value
8906007Sthurlow * indicates where the next data are found.
8916007Sthurlow * No need to guess at the padding size.
8926007Sthurlow */
8936007Sthurlow if (pcount) {
8946007Sthurlow error2 = smb_t2_placedata(mdp->md_top, poff, pcount,
8956007Sthurlow &ntp->nt_rparam);
8966007Sthurlow if (error2)
8976007Sthurlow break;
8986007Sthurlow }
8996007Sthurlow totpgot += pcount;
9006007Sthurlow
9016007Sthurlow if (dcount) {
9026007Sthurlow error2 = smb_t2_placedata(mdp->md_top, doff, dcount,
9036007Sthurlow &ntp->nt_rdata);
9046007Sthurlow if (error2)
9056007Sthurlow break;
9066007Sthurlow }
9076007Sthurlow totdgot += dcount;
9086007Sthurlow
9096007Sthurlow if (totpgot >= totpcount && totdgot >= totdcount) {
9106007Sthurlow error2 = 0;
9116007Sthurlow ntp->nt_flags |= SMBT2_ALLRECV;
9126007Sthurlow break;
9136007Sthurlow }
9146007Sthurlow /*
9156007Sthurlow * We're done with this reply, look for the next one.
9166007Sthurlow */
9176007Sthurlow SMBRQ_LOCK(rqp);
9186007Sthurlow md_next_record(&rqp->sr_rp);
9196007Sthurlow SMBRQ_UNLOCK(rqp);
9206007Sthurlow error2 = smb_rq_reply(rqp);
9216007Sthurlow if (rqp->sr_flags & SMBR_MOREDATA)
9226007Sthurlow ntp->nt_flags |= SMBT2_MOREDATA;
9236007Sthurlow if (!error2)
9246007Sthurlow continue;
9256007Sthurlow ntp->nt_sr_error = rqp->sr_error;
9266007Sthurlow ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
9276007Sthurlow error = error2;
9286007Sthurlow if (!(rqp->sr_flags & SMBR_MOREDATA))
9296007Sthurlow break;
9306007Sthurlow }
9316007Sthurlow return (error ? error : error2);
9326007Sthurlow }
9336007Sthurlow
9346007Sthurlow /*
9356007Sthurlow * Perform a full round of TRANS2 request
9366007Sthurlow */
9376007Sthurlow static int
smb_t2_request_int(struct smb_t2rq * t2p)9386007Sthurlow smb_t2_request_int(struct smb_t2rq *t2p)
9396007Sthurlow {
9406007Sthurlow struct smb_vc *vcp = t2p->t2_vc;
9416007Sthurlow struct smb_cred *scred = t2p->t2_cred;
9426007Sthurlow struct mbchain *mbp;
9436007Sthurlow struct mdchain *mdp, mbparam, mbdata;
9446007Sthurlow mblk_t *m;
9456007Sthurlow struct smb_rq *rqp;
9466007Sthurlow int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
9478271SGordon.Ross@Sun.COM int error, doff, poff, txdcount, txpcount, nmlen, nmsize;
9486007Sthurlow
9496007Sthurlow m = t2p->t2_tparam.mb_top;
9506007Sthurlow if (m) {
9516007Sthurlow md_initm(&mbparam, m); /* do not free it! */
9526007Sthurlow totpcount = m_fixhdr(m);
9536007Sthurlow if (totpcount > 0xffff) /* maxvalue for ushort_t */
9546007Sthurlow return (EINVAL);
9556007Sthurlow } else
9566007Sthurlow totpcount = 0;
9576007Sthurlow m = t2p->t2_tdata.mb_top;
9586007Sthurlow if (m) {
9596007Sthurlow md_initm(&mbdata, m); /* do not free it! */
96011332SGordon.Ross@Sun.COM totdcount = m_fixhdr(m);
9616007Sthurlow if (totdcount > 0xffff)
9626007Sthurlow return (EINVAL);
9636007Sthurlow } else
9646007Sthurlow totdcount = 0;
9656007Sthurlow leftdcount = totdcount;
9666007Sthurlow leftpcount = totpcount;
9676007Sthurlow txmax = vcp->vc_txmax;
9688271SGordon.Ross@Sun.COM error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
9696007Sthurlow SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
9706007Sthurlow if (error)
9716007Sthurlow return (error);
9726007Sthurlow rqp->sr_timo = smb_timo_default;
9736007Sthurlow rqp->sr_flags |= SMBR_MULTIPACKET;
9746007Sthurlow t2p->t2_rq = rqp;
9756007Sthurlow mbp = &rqp->sr_rq;
9766007Sthurlow smb_rq_wstart(rqp);
9776007Sthurlow mb_put_uint16le(mbp, totpcount);
9786007Sthurlow mb_put_uint16le(mbp, totdcount);
9796007Sthurlow mb_put_uint16le(mbp, t2p->t2_maxpcount);
9806007Sthurlow mb_put_uint16le(mbp, t2p->t2_maxdcount);
9816007Sthurlow mb_put_uint8(mbp, t2p->t2_maxscount);
9826007Sthurlow mb_put_uint8(mbp, 0); /* reserved */
9836007Sthurlow mb_put_uint16le(mbp, 0); /* flags */
9846007Sthurlow mb_put_uint32le(mbp, 0); /* Timeout */
9856007Sthurlow mb_put_uint16le(mbp, 0); /* reserved 2 */
9866007Sthurlow len = mb_fixhdr(mbp);
9876007Sthurlow
9886007Sthurlow /*
9898271SGordon.Ross@Sun.COM * Now we know the size of the trans overhead stuff:
9908271SGordon.Ross@Sun.COM * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
9918271SGordon.Ross@Sun.COM * where nmsize is the OTW size of the name, including
9928271SGordon.Ross@Sun.COM * the unicode null terminator and any alignment.
9938271SGordon.Ross@Sun.COM * Use this to decide which parts (and how much)
9948271SGordon.Ross@Sun.COM * can go into this request: params, data
9956007Sthurlow */
9968271SGordon.Ross@Sun.COM nmlen = t2p->t_name ? t2p->t_name_len : 0;
9978271SGordon.Ross@Sun.COM nmsize = nmlen + 1; /* null term. */
9988271SGordon.Ross@Sun.COM if (SMB_UNICODE_STRINGS(vcp)) {
9998271SGordon.Ross@Sun.COM nmsize *= 2;
10008271SGordon.Ross@Sun.COM /* we know put_dmem will need to align */
10018271SGordon.Ross@Sun.COM nmsize += 1;
10028271SGordon.Ross@Sun.COM }
10038271SGordon.Ross@Sun.COM len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmsize);
10046007Sthurlow if (len + leftpcount > txmax) {
10056007Sthurlow txpcount = min(leftpcount, txmax - len);
10066007Sthurlow poff = len;
10076007Sthurlow txdcount = 0;
10086007Sthurlow doff = 0;
10096007Sthurlow } else {
10106007Sthurlow txpcount = leftpcount;
10116007Sthurlow poff = txpcount ? len : 0;
10126007Sthurlow /*
10136007Sthurlow * Other client traffic seems to "ALIGN2" here. The extra
10146007Sthurlow * 2 byte pad we use has no observed downside and may be
10156007Sthurlow * required for some old servers(?)
10166007Sthurlow */
10176007Sthurlow len = ALIGN4(len + txpcount);
10186007Sthurlow txdcount = min(leftdcount, txmax - len);
10196007Sthurlow doff = txdcount ? len : 0;
10206007Sthurlow }
10216007Sthurlow leftpcount -= txpcount;
10226007Sthurlow leftdcount -= txdcount;
10236007Sthurlow mb_put_uint16le(mbp, txpcount);
10246007Sthurlow mb_put_uint16le(mbp, poff);
10256007Sthurlow mb_put_uint16le(mbp, txdcount);
10266007Sthurlow mb_put_uint16le(mbp, doff);
10276007Sthurlow mb_put_uint8(mbp, t2p->t2_setupcount);
10286007Sthurlow mb_put_uint8(mbp, 0);
10296007Sthurlow for (i = 0; i < t2p->t2_setupcount; i++) {
10306007Sthurlow mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
10316007Sthurlow }
10326007Sthurlow smb_rq_wend(rqp);
10336007Sthurlow smb_rq_bstart(rqp);
10348271SGordon.Ross@Sun.COM if (t2p->t_name) {
10358271SGordon.Ross@Sun.COM /* Put the string and terminating null. */
103611332SGordon.Ross@Sun.COM error = smb_put_dmem(mbp, vcp, t2p->t_name, nmlen + 1,
10378271SGordon.Ross@Sun.COM SMB_CS_NONE, NULL);
10388271SGordon.Ross@Sun.COM } else {
10398271SGordon.Ross@Sun.COM /* nmsize accounts for padding, char size. */
104011332SGordon.Ross@Sun.COM error = mb_put_mem(mbp, NULL, nmsize, MB_MZERO);
10418271SGordon.Ross@Sun.COM }
104211332SGordon.Ross@Sun.COM if (error)
104311332SGordon.Ross@Sun.COM goto freerq;
10446007Sthurlow len = mb_fixhdr(mbp);
10456007Sthurlow if (txpcount) {
10466007Sthurlow mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
10476007Sthurlow error = md_get_mbuf(&mbparam, txpcount, &m);
10486007Sthurlow SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
10496007Sthurlow if (error)
10506007Sthurlow goto freerq;
10516007Sthurlow mb_put_mbuf(mbp, m);
10526007Sthurlow }
10536007Sthurlow len = mb_fixhdr(mbp);
10546007Sthurlow if (txdcount) {
10556007Sthurlow mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
10566007Sthurlow error = md_get_mbuf(&mbdata, txdcount, &m);
10576007Sthurlow if (error)
10586007Sthurlow goto freerq;
10596007Sthurlow mb_put_mbuf(mbp, m);
10606007Sthurlow }
10616007Sthurlow smb_rq_bend(rqp); /* incredible, but thats it... */
10626007Sthurlow error = smb_rq_enqueue(rqp);
10636007Sthurlow if (error)
10646007Sthurlow goto freerq;
10656007Sthurlow if (leftpcount || leftdcount) {
10666007Sthurlow error = smb_rq_reply(rqp);
10676007Sthurlow if (error)
10686007Sthurlow goto bad;
10696007Sthurlow /*
10706007Sthurlow * this is an interim response, ignore it.
10716007Sthurlow */
10726007Sthurlow SMBRQ_LOCK(rqp);
10736007Sthurlow md_next_record(&rqp->sr_rp);
10746007Sthurlow SMBRQ_UNLOCK(rqp);
10756007Sthurlow }
10766007Sthurlow while (leftpcount || leftdcount) {
10776007Sthurlow error = smb_rq_new(rqp, t2p->t_name ?
10786007Sthurlow SMB_COM_TRANSACTION_SECONDARY :
10796007Sthurlow SMB_COM_TRANSACTION2_SECONDARY);
10806007Sthurlow if (error)
10816007Sthurlow goto bad;
10826007Sthurlow mbp = &rqp->sr_rq;
10836007Sthurlow smb_rq_wstart(rqp);
10846007Sthurlow mb_put_uint16le(mbp, totpcount);
10856007Sthurlow mb_put_uint16le(mbp, totdcount);
10866007Sthurlow len = mb_fixhdr(mbp);
10876007Sthurlow /*
10886007Sthurlow * now we have known packet size as
10896007Sthurlow * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
10906007Sthurlow * and need to decide which parts should go into request
10916007Sthurlow */
10926007Sthurlow len = ALIGN4(len + 6 * 2 + 2);
10936007Sthurlow if (t2p->t_name == NULL)
10946007Sthurlow len += 2;
10956007Sthurlow if (len + leftpcount > txmax) {
10966007Sthurlow txpcount = min(leftpcount, txmax - len);
10976007Sthurlow poff = len;
10986007Sthurlow txdcount = 0;
10996007Sthurlow doff = 0;
11006007Sthurlow } else {
11016007Sthurlow txpcount = leftpcount;
11026007Sthurlow poff = txpcount ? len : 0;
11036007Sthurlow len = ALIGN4(len + txpcount);
11046007Sthurlow txdcount = min(leftdcount, txmax - len);
11056007Sthurlow doff = txdcount ? len : 0;
11066007Sthurlow }
11076007Sthurlow mb_put_uint16le(mbp, txpcount);
11086007Sthurlow mb_put_uint16le(mbp, poff);
11096007Sthurlow mb_put_uint16le(mbp, totpcount - leftpcount);
11106007Sthurlow mb_put_uint16le(mbp, txdcount);
11116007Sthurlow mb_put_uint16le(mbp, doff);
11126007Sthurlow mb_put_uint16le(mbp, totdcount - leftdcount);
11136007Sthurlow leftpcount -= txpcount;
11146007Sthurlow leftdcount -= txdcount;
11156007Sthurlow if (t2p->t_name == NULL)
11166007Sthurlow mb_put_uint16le(mbp, t2p->t2_fid);
11176007Sthurlow smb_rq_wend(rqp);
11186007Sthurlow smb_rq_bstart(rqp);
11196007Sthurlow mb_put_uint8(mbp, 0); /* name */
11206007Sthurlow len = mb_fixhdr(mbp);
11216007Sthurlow if (txpcount) {
11226007Sthurlow mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
11236007Sthurlow error = md_get_mbuf(&mbparam, txpcount, &m);
11246007Sthurlow if (error)
11256007Sthurlow goto bad;
11266007Sthurlow mb_put_mbuf(mbp, m);
11276007Sthurlow }
11286007Sthurlow len = mb_fixhdr(mbp);
11296007Sthurlow if (txdcount) {
11306007Sthurlow mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
11316007Sthurlow error = md_get_mbuf(&mbdata, txdcount, &m);
11326007Sthurlow if (error)
11336007Sthurlow goto bad;
11346007Sthurlow mb_put_mbuf(mbp, m);
11356007Sthurlow }
11366007Sthurlow smb_rq_bend(rqp);
11376007Sthurlow error = smb_iod_multirq(rqp);
11386007Sthurlow if (error)
11396007Sthurlow goto bad;
11406007Sthurlow } /* while left params or data */
11416007Sthurlow error = smb_t2_reply(t2p);
11426007Sthurlow if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
11436007Sthurlow goto bad;
11446007Sthurlow mdp = &t2p->t2_rdata;
11456007Sthurlow if (mdp->md_top) {
11466007Sthurlow md_initm(mdp, mdp->md_top);
11476007Sthurlow }
11486007Sthurlow mdp = &t2p->t2_rparam;
11496007Sthurlow if (mdp->md_top) {
11506007Sthurlow md_initm(mdp, mdp->md_top);
11516007Sthurlow }
11526007Sthurlow bad:
11536007Sthurlow smb_iod_removerq(rqp);
11546007Sthurlow freerq:
11556007Sthurlow if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
11566007Sthurlow if (rqp->sr_flags & SMBR_RESTART)
11576007Sthurlow t2p->t2_flags |= SMBT2_RESTART;
11586007Sthurlow md_done(&t2p->t2_rparam);
11596007Sthurlow md_done(&t2p->t2_rdata);
11606007Sthurlow }
11616007Sthurlow smb_rq_done(rqp);
11626007Sthurlow return (error);
11636007Sthurlow }
11646007Sthurlow
11656007Sthurlow
11666007Sthurlow /*
11676007Sthurlow * Perform a full round of NT_TRANSACTION request
11686007Sthurlow */
11696007Sthurlow static int
smb_nt_request_int(struct smb_ntrq * ntp)11706007Sthurlow smb_nt_request_int(struct smb_ntrq *ntp)
11716007Sthurlow {
11726007Sthurlow struct smb_vc *vcp = ntp->nt_vc;
11736007Sthurlow struct smb_cred *scred = ntp->nt_cred;
11746007Sthurlow struct mbchain *mbp;
11756007Sthurlow struct mdchain *mdp, mbsetup, mbparam, mbdata;
11766007Sthurlow mblk_t *m;
11776007Sthurlow struct smb_rq *rqp;
11786007Sthurlow int totpcount, leftpcount, totdcount, leftdcount, len, txmax;
11796007Sthurlow int error, doff, poff, txdcount, txpcount;
11806007Sthurlow int totscount;
11816007Sthurlow
11826007Sthurlow m = ntp->nt_tsetup.mb_top;
11836007Sthurlow if (m) {
11846007Sthurlow md_initm(&mbsetup, m); /* do not free it! */
11856007Sthurlow totscount = m_fixhdr(m);
11866007Sthurlow if (totscount > 2 * 0xff)
11876007Sthurlow return (EINVAL);
11886007Sthurlow } else
11896007Sthurlow totscount = 0;
11906007Sthurlow m = ntp->nt_tparam.mb_top;
11916007Sthurlow if (m) {
11926007Sthurlow md_initm(&mbparam, m); /* do not free it! */
11936007Sthurlow totpcount = m_fixhdr(m);
11946007Sthurlow if (totpcount > 0x7fffffff)
11956007Sthurlow return (EINVAL);
11966007Sthurlow } else
11976007Sthurlow totpcount = 0;
11986007Sthurlow m = ntp->nt_tdata.mb_top;
11996007Sthurlow if (m) {
12006007Sthurlow md_initm(&mbdata, m); /* do not free it! */
12016007Sthurlow totdcount = m_fixhdr(m);
12026007Sthurlow if (totdcount > 0x7fffffff)
12036007Sthurlow return (EINVAL);
12046007Sthurlow } else
12056007Sthurlow totdcount = 0;
12066007Sthurlow leftdcount = totdcount;
12076007Sthurlow leftpcount = totpcount;
12086007Sthurlow txmax = vcp->vc_txmax;
12096007Sthurlow error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp);
12106007Sthurlow if (error)
12116007Sthurlow return (error);
12126007Sthurlow rqp->sr_timo = smb_timo_default;
12136007Sthurlow rqp->sr_flags |= SMBR_MULTIPACKET;
12146007Sthurlow ntp->nt_rq = rqp;
12156007Sthurlow mbp = &rqp->sr_rq;
12166007Sthurlow smb_rq_wstart(rqp);
12176007Sthurlow mb_put_uint8(mbp, ntp->nt_maxscount);
12186007Sthurlow mb_put_uint16le(mbp, 0); /* reserved (flags?) */
12196007Sthurlow mb_put_uint32le(mbp, totpcount);
12206007Sthurlow mb_put_uint32le(mbp, totdcount);
12216007Sthurlow mb_put_uint32le(mbp, ntp->nt_maxpcount);
12226007Sthurlow mb_put_uint32le(mbp, ntp->nt_maxdcount);
12236007Sthurlow len = mb_fixhdr(mbp);
12246007Sthurlow /*
12256007Sthurlow * now we have known packet size as
12266007Sthurlow * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
12276007Sthurlow * and need to decide which parts should go into the first request
12286007Sthurlow */
12296007Sthurlow len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2);
12306007Sthurlow if (len + leftpcount > txmax) {
12316007Sthurlow txpcount = min(leftpcount, txmax - len);
12326007Sthurlow poff = len;
12336007Sthurlow txdcount = 0;
12346007Sthurlow doff = 0;
12356007Sthurlow } else {
12366007Sthurlow txpcount = leftpcount;
12376007Sthurlow poff = txpcount ? len : 0;
12386007Sthurlow len = ALIGN4(len + txpcount);
12396007Sthurlow txdcount = min(leftdcount, txmax - len);
12406007Sthurlow doff = txdcount ? len : 0;
12416007Sthurlow }
12426007Sthurlow leftpcount -= txpcount;
12436007Sthurlow leftdcount -= txdcount;
12446007Sthurlow mb_put_uint32le(mbp, txpcount);
12456007Sthurlow mb_put_uint32le(mbp, poff);
12466007Sthurlow mb_put_uint32le(mbp, txdcount);
12476007Sthurlow mb_put_uint32le(mbp, doff);
12486007Sthurlow mb_put_uint8(mbp, (totscount+1)/2);
12496007Sthurlow mb_put_uint16le(mbp, ntp->nt_function);
12506007Sthurlow if (totscount) {
12516007Sthurlow error = md_get_mbuf(&mbsetup, totscount, &m);
12526007Sthurlow SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax);
12536007Sthurlow if (error)
12546007Sthurlow goto freerq;
12556007Sthurlow mb_put_mbuf(mbp, m);
12566007Sthurlow if (totscount & 1)
12576007Sthurlow mb_put_uint8(mbp, 0); /* setup is in words */
12586007Sthurlow }
12596007Sthurlow smb_rq_wend(rqp);
12606007Sthurlow smb_rq_bstart(rqp);
12616007Sthurlow len = mb_fixhdr(mbp);
12626007Sthurlow if (txpcount) {
12636007Sthurlow mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
12646007Sthurlow error = md_get_mbuf(&mbparam, txpcount, &m);
12656007Sthurlow SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
12666007Sthurlow if (error)
12676007Sthurlow goto freerq;
12686007Sthurlow mb_put_mbuf(mbp, m);
12696007Sthurlow }
12706007Sthurlow len = mb_fixhdr(mbp);
12716007Sthurlow if (txdcount) {
12726007Sthurlow mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
12736007Sthurlow error = md_get_mbuf(&mbdata, txdcount, &m);
12746007Sthurlow if (error)
12756007Sthurlow goto freerq;
12766007Sthurlow mb_put_mbuf(mbp, m);
12776007Sthurlow }
12786007Sthurlow smb_rq_bend(rqp); /* incredible, but thats it... */
12796007Sthurlow error = smb_rq_enqueue(rqp);
12806007Sthurlow if (error)
12816007Sthurlow goto freerq;
12826007Sthurlow if (leftpcount || leftdcount) {
12836007Sthurlow error = smb_rq_reply(rqp);
12846007Sthurlow if (error)
12856007Sthurlow goto bad;
12866007Sthurlow /*
12876007Sthurlow * this is an interim response, ignore it.
12886007Sthurlow */
12896007Sthurlow SMBRQ_LOCK(rqp);
12906007Sthurlow md_next_record(&rqp->sr_rp);
12916007Sthurlow SMBRQ_UNLOCK(rqp);
12926007Sthurlow }
12936007Sthurlow while (leftpcount || leftdcount) {
12946007Sthurlow error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY);
12956007Sthurlow if (error)
12966007Sthurlow goto bad;
12976007Sthurlow mbp = &rqp->sr_rq;
12986007Sthurlow smb_rq_wstart(rqp);
12996007Sthurlow mb_put_mem(mbp, NULL, 3, MB_MZERO);
13006007Sthurlow mb_put_uint32le(mbp, totpcount);
13016007Sthurlow mb_put_uint32le(mbp, totdcount);
13026007Sthurlow len = mb_fixhdr(mbp);
13036007Sthurlow /*
13046007Sthurlow * now we have known packet size as
13056007Sthurlow * ALIGN4(len + 6 * 4 + 2)
13066007Sthurlow * and need to decide which parts should go into request
13076007Sthurlow */
13086007Sthurlow len = ALIGN4(len + 6 * 4 + 2);
13096007Sthurlow if (len + leftpcount > txmax) {
13106007Sthurlow txpcount = min(leftpcount, txmax - len);
13116007Sthurlow poff = len;
13126007Sthurlow txdcount = 0;
13136007Sthurlow doff = 0;
13146007Sthurlow } else {
13156007Sthurlow txpcount = leftpcount;
13166007Sthurlow poff = txpcount ? len : 0;
13176007Sthurlow len = ALIGN4(len + txpcount);
13186007Sthurlow txdcount = min(leftdcount, txmax - len);
13196007Sthurlow doff = txdcount ? len : 0;
13206007Sthurlow }
13216007Sthurlow mb_put_uint32le(mbp, txpcount);
13226007Sthurlow mb_put_uint32le(mbp, poff);
13236007Sthurlow mb_put_uint32le(mbp, totpcount - leftpcount);
13246007Sthurlow mb_put_uint32le(mbp, txdcount);
13256007Sthurlow mb_put_uint32le(mbp, doff);
13266007Sthurlow mb_put_uint32le(mbp, totdcount - leftdcount);
13276007Sthurlow leftpcount -= txpcount;
13286007Sthurlow leftdcount -= txdcount;
13296007Sthurlow smb_rq_wend(rqp);
13306007Sthurlow smb_rq_bstart(rqp);
13316007Sthurlow len = mb_fixhdr(mbp);
13326007Sthurlow if (txpcount) {
13336007Sthurlow mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
13346007Sthurlow error = md_get_mbuf(&mbparam, txpcount, &m);
13356007Sthurlow if (error)
13366007Sthurlow goto bad;
13376007Sthurlow mb_put_mbuf(mbp, m);
13386007Sthurlow }
13396007Sthurlow len = mb_fixhdr(mbp);
13406007Sthurlow if (txdcount) {
13416007Sthurlow mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
13426007Sthurlow error = md_get_mbuf(&mbdata, txdcount, &m);
13436007Sthurlow if (error)
13446007Sthurlow goto bad;
13456007Sthurlow mb_put_mbuf(mbp, m);
13466007Sthurlow }
13476007Sthurlow smb_rq_bend(rqp);
13486007Sthurlow error = smb_iod_multirq(rqp);
13496007Sthurlow if (error)
13506007Sthurlow goto bad;
13516007Sthurlow } /* while left params or data */
13526007Sthurlow error = smb_nt_reply(ntp);
13536007Sthurlow if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
13546007Sthurlow goto bad;
13556007Sthurlow mdp = &ntp->nt_rdata;
13566007Sthurlow if (mdp->md_top) {
13576007Sthurlow md_initm(mdp, mdp->md_top);
13586007Sthurlow }
13596007Sthurlow mdp = &ntp->nt_rparam;
13606007Sthurlow if (mdp->md_top) {
13616007Sthurlow md_initm(mdp, mdp->md_top);
13626007Sthurlow }
13636007Sthurlow bad:
13646007Sthurlow smb_iod_removerq(rqp);
13656007Sthurlow freerq:
13666007Sthurlow if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
13676007Sthurlow if (rqp->sr_flags & SMBR_RESTART)
13686007Sthurlow ntp->nt_flags |= SMBT2_RESTART;
13696007Sthurlow md_done(&ntp->nt_rparam);
13706007Sthurlow md_done(&ntp->nt_rdata);
13716007Sthurlow }
13726007Sthurlow smb_rq_done(rqp);
13736007Sthurlow return (error);
13746007Sthurlow }
13756007Sthurlow
13766007Sthurlow int
smb_t2_request(struct smb_t2rq * t2p)13776007Sthurlow smb_t2_request(struct smb_t2rq *t2p)
13786007Sthurlow {
13796007Sthurlow int error = EINVAL, i;
13806007Sthurlow
13816007Sthurlow for (i = 0; ; ) {
13826007Sthurlow /*
13836007Sthurlow * Don't send any new requests if force unmount is underway.
13846007Sthurlow * This check was moved into smb_rq_enqueue, called by
13856007Sthurlow * smb_t2_request_int()
13866007Sthurlow */
13876007Sthurlow t2p->t2_flags &= ~SMBT2_RESTART;
13886007Sthurlow error = smb_t2_request_int(t2p);
13896007Sthurlow if (!error)
13906007Sthurlow break;
13916007Sthurlow if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
13926007Sthurlow SMBT2_RESTART)
13936007Sthurlow break;
13946007Sthurlow if (++i > SMBMAXRESTARTS)
13956007Sthurlow break;
13966007Sthurlow mutex_enter(&(t2p)->t2_lock);
139710023SGordon.Ross@Sun.COM if (t2p->t2_share) {
139811332SGordon.Ross@Sun.COM (void) cv_reltimedwait(&t2p->t2_cond, &(t2p)->t2_lock,
139911332SGordon.Ross@Sun.COM SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
14006007Sthurlow } else {
140111332SGordon.Ross@Sun.COM delay(SEC_TO_TICK(SMB_RCNDELAY));
14026007Sthurlow }
14036007Sthurlow mutex_exit(&(t2p)->t2_lock);
14046007Sthurlow }
14056007Sthurlow return (error);
14066007Sthurlow }
14076007Sthurlow
14086007Sthurlow
14096007Sthurlow int
smb_nt_request(struct smb_ntrq * ntp)14106007Sthurlow smb_nt_request(struct smb_ntrq *ntp)
14116007Sthurlow {
14126007Sthurlow int error = EINVAL, i;
14136007Sthurlow
14146007Sthurlow for (i = 0; ; ) {
14156007Sthurlow /*
14166007Sthurlow * Don't send any new requests if force unmount is underway.
14176007Sthurlow * This check was moved into smb_rq_enqueue, called by
14186007Sthurlow * smb_nt_request_int()
14196007Sthurlow */
14206007Sthurlow ntp->nt_flags &= ~SMBT2_RESTART;
14216007Sthurlow error = smb_nt_request_int(ntp);
14226007Sthurlow if (!error)
14236007Sthurlow break;
14246007Sthurlow if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
14256007Sthurlow SMBT2_RESTART)
14266007Sthurlow break;
14276007Sthurlow if (++i > SMBMAXRESTARTS)
14286007Sthurlow break;
14296007Sthurlow mutex_enter(&(ntp)->nt_lock);
143010023SGordon.Ross@Sun.COM if (ntp->nt_share) {
143111332SGordon.Ross@Sun.COM (void) cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
143211332SGordon.Ross@Sun.COM SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
14336007Sthurlow
14346007Sthurlow } else {
143511332SGordon.Ross@Sun.COM delay(SEC_TO_TICK(SMB_RCNDELAY));
14366007Sthurlow }
14376007Sthurlow mutex_exit(&(ntp)->nt_lock);
14386007Sthurlow }
14396007Sthurlow return (error);
14406007Sthurlow }
1441