14520Snw141292 /*
24520Snw141292 * CDDL HEADER START
34520Snw141292 *
44520Snw141292 * The contents of this file are subject to the terms of the
54520Snw141292 * Common Development and Distribution License (the "License").
64520Snw141292 * You may not use this file except in compliance with the License.
74520Snw141292 *
84520Snw141292 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
94520Snw141292 * or http://www.opensolaris.org/os/licensing.
104520Snw141292 * See the License for the specific language governing permissions
114520Snw141292 * and limitations under the License.
124520Snw141292 *
134520Snw141292 * When distributing Covered Code, include this CDDL HEADER in each
144520Snw141292 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
154520Snw141292 * If applicable, add the following below this CDDL HEADER, with the
164520Snw141292 * fields enclosed by brackets "[]" replaced with your own identifying
174520Snw141292 * information: Portions Copyright [yyyy] [name of copyright owner]
184520Snw141292 *
194520Snw141292 * CDDL HEADER END
204520Snw141292 */
214520Snw141292 /*
22*12914SJoyce.McIntosh@Sun.COM * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
234520Snw141292 */
244520Snw141292
254520Snw141292 /*
264520Snw141292 * Utility routines
274520Snw141292 */
284520Snw141292
294520Snw141292 #include <stdio.h>
304520Snw141292 #include <stdlib.h>
314520Snw141292 #include <errno.h>
324520Snw141292 #include <libintl.h>
33*12914SJoyce.McIntosh@Sun.COM #include <assert.h>
34*12914SJoyce.McIntosh@Sun.COM #include <ucontext.h>
35*12914SJoyce.McIntosh@Sun.COM #include <pthread.h>
364520Snw141292 #include "idmap_impl.h"
374520Snw141292
384520Snw141292 #define _UDT_SIZE_INCR 1
394520Snw141292
404520Snw141292 #define _GET_IDS_SIZE_INCR 1
414520Snw141292
424520Snw141292 static struct timeval TIMEOUT = { 25, 0 };
434520Snw141292
44*12914SJoyce.McIntosh@Sun.COM struct idmap_handle {
45*12914SJoyce.McIntosh@Sun.COM CLIENT *client;
46*12914SJoyce.McIntosh@Sun.COM boolean_t failed;
47*12914SJoyce.McIntosh@Sun.COM rwlock_t lock;
48*12914SJoyce.McIntosh@Sun.COM };
49*12914SJoyce.McIntosh@Sun.COM
50*12914SJoyce.McIntosh@Sun.COM static struct idmap_handle idmap_handle = {
51*12914SJoyce.McIntosh@Sun.COM NULL, /* client */
52*12914SJoyce.McIntosh@Sun.COM B_TRUE, /* failed */
53*12914SJoyce.McIntosh@Sun.COM DEFAULTRWLOCK, /* lock */
54*12914SJoyce.McIntosh@Sun.COM };
55*12914SJoyce.McIntosh@Sun.COM
56*12914SJoyce.McIntosh@Sun.COM static idmap_stat _idmap_clnt_connect(void);
57*12914SJoyce.McIntosh@Sun.COM static void _idmap_clnt_disconnect(void);
58*12914SJoyce.McIntosh@Sun.COM
594520Snw141292 idmap_retcode
_udt_extend_batch(idmap_udt_handle_t * udthandle)605908Sjp151216 _udt_extend_batch(idmap_udt_handle_t *udthandle)
615908Sjp151216 {
624520Snw141292 idmap_update_op *tmplist;
634520Snw141292 size_t nsize;
644520Snw141292
654520Snw141292 if (udthandle->next >= udthandle->batch.idmap_update_batch_len) {
664520Snw141292 nsize = (udthandle->batch.idmap_update_batch_len +
675908Sjp151216 _UDT_SIZE_INCR) * sizeof (*tmplist);
684520Snw141292 tmplist = realloc(
695908Sjp151216 udthandle->batch.idmap_update_batch_val, nsize);
704520Snw141292 if (tmplist == NULL)
714520Snw141292 return (IDMAP_ERR_MEMORY);
724520Snw141292 (void) memset((uchar_t *)tmplist +
735908Sjp151216 (udthandle->batch.idmap_update_batch_len *
745908Sjp151216 sizeof (*tmplist)), 0,
755908Sjp151216 _UDT_SIZE_INCR * sizeof (*tmplist));
764520Snw141292 udthandle->batch.idmap_update_batch_val = tmplist;
774520Snw141292 udthandle->batch.idmap_update_batch_len += _UDT_SIZE_INCR;
784520Snw141292 }
794520Snw141292 udthandle->batch.idmap_update_batch_val[udthandle->next].opnum =
805908Sjp151216 OP_NONE;
814520Snw141292 return (IDMAP_SUCCESS);
824520Snw141292 }
834520Snw141292
844520Snw141292 idmap_retcode
_get_ids_extend_batch(idmap_get_handle_t * gh)855908Sjp151216 _get_ids_extend_batch(idmap_get_handle_t *gh)
865908Sjp151216 {
874520Snw141292 idmap_mapping *t1;
884520Snw141292 idmap_get_res_t *t2;
894520Snw141292 size_t nsize, len;
904520Snw141292
914520Snw141292 len = gh->batch.idmap_mapping_batch_len;
924520Snw141292 if (gh->next >= len) {
934520Snw141292 /* extend the request array */
944520Snw141292 nsize = (len + _GET_IDS_SIZE_INCR) * sizeof (*t1);
954520Snw141292 t1 = realloc(gh->batch.idmap_mapping_batch_val, nsize);
964520Snw141292 if (t1 == NULL)
974520Snw141292 return (IDMAP_ERR_MEMORY);
984520Snw141292 (void) memset((uchar_t *)t1 + (len * sizeof (*t1)), 0,
995908Sjp151216 _GET_IDS_SIZE_INCR * sizeof (*t1));
1004520Snw141292 gh->batch.idmap_mapping_batch_val = t1;
1014520Snw141292
1024520Snw141292 /* extend the return list */
1034520Snw141292 nsize = (len + _GET_IDS_SIZE_INCR) * sizeof (*t2);
1044520Snw141292 t2 = realloc(gh->retlist, nsize);
1054520Snw141292 if (t2 == NULL)
1064520Snw141292 return (IDMAP_ERR_MEMORY);
1074520Snw141292 (void) memset((uchar_t *)t2 + (len * sizeof (*t2)), 0,
1085908Sjp151216 _GET_IDS_SIZE_INCR * sizeof (*t2));
1094520Snw141292 gh->retlist = t2;
1104520Snw141292
1114520Snw141292 gh->batch.idmap_mapping_batch_len += _GET_IDS_SIZE_INCR;
1124520Snw141292 }
1134520Snw141292 return (IDMAP_SUCCESS);
1144520Snw141292 }
1154520Snw141292
1164520Snw141292 idmap_stat
_iter_get_next_list(int type,idmap_iter_t * iter,void * arg,uchar_t ** list,size_t valsize,xdrproc_t xdr_arg_proc,xdrproc_t xdr_res_proc)1174520Snw141292 _iter_get_next_list(int type, idmap_iter_t *iter,
1184520Snw141292 void *arg, uchar_t **list, size_t valsize,
1195908Sjp151216 xdrproc_t xdr_arg_proc, xdrproc_t xdr_res_proc)
1205908Sjp151216 {
121*12914SJoyce.McIntosh@Sun.COM idmap_stat rc;
1224520Snw141292
1234520Snw141292 iter->next = 0;
1244520Snw141292 iter->retlist = NULL;
1254520Snw141292
1264520Snw141292 /* init the result */
1274520Snw141292 if (*list) {
1284520Snw141292 xdr_free(xdr_res_proc, (caddr_t)*list);
1294520Snw141292 } else {
1304520Snw141292 if ((*list = malloc(valsize)) == NULL) {
1314520Snw141292 errno = ENOMEM;
1324520Snw141292 return (IDMAP_ERR_MEMORY);
1334520Snw141292 }
1344520Snw141292 }
1354520Snw141292 (void) memset(*list, 0, valsize);
1364520Snw141292
137*12914SJoyce.McIntosh@Sun.COM rc = _idmap_clnt_call(type,
1385908Sjp151216 xdr_arg_proc, (caddr_t)arg,
1395908Sjp151216 xdr_res_proc, (caddr_t)*list,
1405908Sjp151216 TIMEOUT);
141*12914SJoyce.McIntosh@Sun.COM if (rc != IDMAP_SUCCESS) {
1424520Snw141292 free(*list);
143*12914SJoyce.McIntosh@Sun.COM return (rc);
1444520Snw141292 }
1454520Snw141292 iter->retlist = *list;
1464520Snw141292 return (IDMAP_SUCCESS);
1474520Snw141292 }
1484644Sbaban
149*12914SJoyce.McIntosh@Sun.COM /*
150*12914SJoyce.McIntosh@Sun.COM * Convert the return values from an RPC request into an idmap return code.
151*12914SJoyce.McIntosh@Sun.COM * Set errno on error.
152*12914SJoyce.McIntosh@Sun.COM */
153*12914SJoyce.McIntosh@Sun.COM static
1544644Sbaban idmap_stat
_idmap_rpc2stat(enum clnt_stat clntstat,CLIENT * clnt)155*12914SJoyce.McIntosh@Sun.COM _idmap_rpc2stat(enum clnt_stat clntstat, CLIENT *clnt)
1565908Sjp151216 {
1574644Sbaban /*
1584644Sbaban * We only deal with door_call(3C) errors here. We look at
1594644Sbaban * r_err.re_errno instead of r_err.re_status because we need
1604644Sbaban * to differentiate between RPC failures caused by bad door fd
1614644Sbaban * and others.
1624644Sbaban */
1634644Sbaban struct rpc_err r_err;
164*12914SJoyce.McIntosh@Sun.COM
165*12914SJoyce.McIntosh@Sun.COM if (clntstat == RPC_SUCCESS)
166*12914SJoyce.McIntosh@Sun.COM return (IDMAP_SUCCESS);
167*12914SJoyce.McIntosh@Sun.COM
168*12914SJoyce.McIntosh@Sun.COM clnt_geterr(clnt, &r_err);
169*12914SJoyce.McIntosh@Sun.COM errno = r_err.re_errno;
170*12914SJoyce.McIntosh@Sun.COM switch (r_err.re_errno) {
171*12914SJoyce.McIntosh@Sun.COM case ENOMEM:
172*12914SJoyce.McIntosh@Sun.COM return (IDMAP_ERR_MEMORY);
173*12914SJoyce.McIntosh@Sun.COM case EBADF:
174*12914SJoyce.McIntosh@Sun.COM return (IDMAP_ERR_RPC_HANDLE);
175*12914SJoyce.McIntosh@Sun.COM default:
176*12914SJoyce.McIntosh@Sun.COM return (IDMAP_ERR_RPC);
177*12914SJoyce.McIntosh@Sun.COM }
178*12914SJoyce.McIntosh@Sun.COM }
179*12914SJoyce.McIntosh@Sun.COM
180*12914SJoyce.McIntosh@Sun.COM /*
181*12914SJoyce.McIntosh@Sun.COM * Management of the connection to idmapd.
182*12914SJoyce.McIntosh@Sun.COM *
183*12914SJoyce.McIntosh@Sun.COM * The intent is that connections to idmapd are automatically maintained,
184*12914SJoyce.McIntosh@Sun.COM * reconnecting if necessary. No attempt is made to retry connnection
185*12914SJoyce.McIntosh@Sun.COM * attempts; a failure to connect yields an immediate error return.
186*12914SJoyce.McIntosh@Sun.COM *
187*12914SJoyce.McIntosh@Sun.COM * State of the connection is maintained through the "client" and "failed"
188*12914SJoyce.McIntosh@Sun.COM * elements of the handle structure:
189*12914SJoyce.McIntosh@Sun.COM *
190*12914SJoyce.McIntosh@Sun.COM * client failed
191*12914SJoyce.McIntosh@Sun.COM * NULL true Failed on a previous request and was not recovered.
192*12914SJoyce.McIntosh@Sun.COM * NULL false Should never happen.
193*12914SJoyce.McIntosh@Sun.COM * nonNULL true Structure exists, but an error has occurred. Waiting
194*12914SJoyce.McIntosh@Sun.COM * for a chance to attempt to reconnect.
195*12914SJoyce.McIntosh@Sun.COM * nonNULL false Connection is good.
196*12914SJoyce.McIntosh@Sun.COM *
197*12914SJoyce.McIntosh@Sun.COM * Note that the initial state is NULL/true, so that the first request
198*12914SJoyce.McIntosh@Sun.COM * will establish the initial connection.
199*12914SJoyce.McIntosh@Sun.COM *
200*12914SJoyce.McIntosh@Sun.COM * Concurrency is managed through the rw lock "lock". Only the writer is
201*12914SJoyce.McIntosh@Sun.COM * allowed to connect or disconnect, and thus only the writer can set
202*12914SJoyce.McIntosh@Sun.COM * "failed" to "false". Readers are allowed to use the "client" pointer,
203*12914SJoyce.McIntosh@Sun.COM * and to set "failed" to "true", indicating that they have encountered a
204*12914SJoyce.McIntosh@Sun.COM * failure. The "client" pointer is only valid while one holds a reader
205*12914SJoyce.McIntosh@Sun.COM * lock. Once "failed" has been set to "true", all requests (including
206*12914SJoyce.McIntosh@Sun.COM * the retry of the failing request) will attempt to gain the writer lock.
207*12914SJoyce.McIntosh@Sun.COM * When they succeed, indicating that there are no requests in flight and
208*12914SJoyce.McIntosh@Sun.COM * thus no outstanding references to the CLIENT structure, they check
209*12914SJoyce.McIntosh@Sun.COM * again to see if the connection is still failed (since another thread
210*12914SJoyce.McIntosh@Sun.COM * might have fixed it), and then if it is still failed they disconnect
211*12914SJoyce.McIntosh@Sun.COM * and reconnect.
212*12914SJoyce.McIntosh@Sun.COM */
213*12914SJoyce.McIntosh@Sun.COM
214*12914SJoyce.McIntosh@Sun.COM /*
215*12914SJoyce.McIntosh@Sun.COM * Make an RPC call. Automatically reconnect if the connection to idmapd
216*12914SJoyce.McIntosh@Sun.COM * fails. Convert RPC results to idmap return codes.
217*12914SJoyce.McIntosh@Sun.COM */
218*12914SJoyce.McIntosh@Sun.COM idmap_stat
_idmap_clnt_call(const rpcproc_t procnum,const xdrproc_t inproc,const caddr_t in,const xdrproc_t outproc,caddr_t out,const struct timeval tout)219*12914SJoyce.McIntosh@Sun.COM _idmap_clnt_call(
220*12914SJoyce.McIntosh@Sun.COM const rpcproc_t procnum,
221*12914SJoyce.McIntosh@Sun.COM const xdrproc_t inproc,
222*12914SJoyce.McIntosh@Sun.COM const caddr_t in,
223*12914SJoyce.McIntosh@Sun.COM const xdrproc_t outproc,
224*12914SJoyce.McIntosh@Sun.COM caddr_t out,
225*12914SJoyce.McIntosh@Sun.COM const struct timeval tout)
226*12914SJoyce.McIntosh@Sun.COM {
227*12914SJoyce.McIntosh@Sun.COM enum clnt_stat clntstat;
228*12914SJoyce.McIntosh@Sun.COM idmap_stat rc;
229*12914SJoyce.McIntosh@Sun.COM
230*12914SJoyce.McIntosh@Sun.COM (void) rw_rdlock(&idmap_handle.lock);
231*12914SJoyce.McIntosh@Sun.COM for (;;) {
232*12914SJoyce.McIntosh@Sun.COM if (idmap_handle.failed) {
233*12914SJoyce.McIntosh@Sun.COM /* No connection. Bid to see if we should fix it. */
234*12914SJoyce.McIntosh@Sun.COM (void) rw_unlock(&idmap_handle.lock);
235*12914SJoyce.McIntosh@Sun.COM /* Somebody else might fix it here. */
236*12914SJoyce.McIntosh@Sun.COM (void) rw_wrlock(&idmap_handle.lock);
237*12914SJoyce.McIntosh@Sun.COM /*
238*12914SJoyce.McIntosh@Sun.COM * At this point, everybody else is asleep waiting
239*12914SJoyce.McIntosh@Sun.COM * for us. Check to see if somebody else has already
240*12914SJoyce.McIntosh@Sun.COM * fixed the problem.
241*12914SJoyce.McIntosh@Sun.COM */
242*12914SJoyce.McIntosh@Sun.COM if (idmap_handle.failed) {
243*12914SJoyce.McIntosh@Sun.COM /* It's our job to fix. */
244*12914SJoyce.McIntosh@Sun.COM _idmap_clnt_disconnect();
245*12914SJoyce.McIntosh@Sun.COM rc = _idmap_clnt_connect();
246*12914SJoyce.McIntosh@Sun.COM if (rc != IDMAP_SUCCESS) {
247*12914SJoyce.McIntosh@Sun.COM /* We couldn't fix it. */
248*12914SJoyce.McIntosh@Sun.COM assert(idmap_handle.failed);
249*12914SJoyce.McIntosh@Sun.COM assert(idmap_handle.client == NULL);
250*12914SJoyce.McIntosh@Sun.COM break;
251*12914SJoyce.McIntosh@Sun.COM }
252*12914SJoyce.McIntosh@Sun.COM /* We fixed it. */
253*12914SJoyce.McIntosh@Sun.COM idmap_handle.failed = B_FALSE;
254*12914SJoyce.McIntosh@Sun.COM }
255*12914SJoyce.McIntosh@Sun.COM
256*12914SJoyce.McIntosh@Sun.COM /* It's fixed now. */
257*12914SJoyce.McIntosh@Sun.COM (void) rw_unlock(&idmap_handle.lock);
258*12914SJoyce.McIntosh@Sun.COM /*
259*12914SJoyce.McIntosh@Sun.COM * Starting here, somebody might declare it failed
260*12914SJoyce.McIntosh@Sun.COM * again.
261*12914SJoyce.McIntosh@Sun.COM */
262*12914SJoyce.McIntosh@Sun.COM (void) rw_rdlock(&idmap_handle.lock);
263*12914SJoyce.McIntosh@Sun.COM continue;
264*12914SJoyce.McIntosh@Sun.COM }
265*12914SJoyce.McIntosh@Sun.COM
266*12914SJoyce.McIntosh@Sun.COM clntstat = clnt_call(idmap_handle.client, procnum, inproc, in,
267*12914SJoyce.McIntosh@Sun.COM outproc, out, tout);
268*12914SJoyce.McIntosh@Sun.COM rc = _idmap_rpc2stat(clntstat, idmap_handle.client);
269*12914SJoyce.McIntosh@Sun.COM if (rc == IDMAP_ERR_RPC_HANDLE) {
270*12914SJoyce.McIntosh@Sun.COM /* Failed. Needs to be reconnected. */
271*12914SJoyce.McIntosh@Sun.COM idmap_handle.failed = B_TRUE;
272*12914SJoyce.McIntosh@Sun.COM continue;
273*12914SJoyce.McIntosh@Sun.COM }
274*12914SJoyce.McIntosh@Sun.COM
275*12914SJoyce.McIntosh@Sun.COM /* Success or unrecoverable failure. */
276*12914SJoyce.McIntosh@Sun.COM break;
277*12914SJoyce.McIntosh@Sun.COM }
278*12914SJoyce.McIntosh@Sun.COM (void) rw_unlock(&idmap_handle.lock);
279*12914SJoyce.McIntosh@Sun.COM return (rc);
280*12914SJoyce.McIntosh@Sun.COM }
281*12914SJoyce.McIntosh@Sun.COM
282*12914SJoyce.McIntosh@Sun.COM #define MIN_STACK_NEEDS 65536
283*12914SJoyce.McIntosh@Sun.COM
284*12914SJoyce.McIntosh@Sun.COM /*
285*12914SJoyce.McIntosh@Sun.COM * Connect to idmapd.
286*12914SJoyce.McIntosh@Sun.COM * Must be single-threaded through rw_wrlock(&idmap_handle.lock).
287*12914SJoyce.McIntosh@Sun.COM */
288*12914SJoyce.McIntosh@Sun.COM static
289*12914SJoyce.McIntosh@Sun.COM idmap_stat
_idmap_clnt_connect(void)290*12914SJoyce.McIntosh@Sun.COM _idmap_clnt_connect(void)
291*12914SJoyce.McIntosh@Sun.COM {
292*12914SJoyce.McIntosh@Sun.COM uint_t sendsz = 0;
293*12914SJoyce.McIntosh@Sun.COM stack_t st;
294*12914SJoyce.McIntosh@Sun.COM
295*12914SJoyce.McIntosh@Sun.COM /*
296*12914SJoyce.McIntosh@Sun.COM * clnt_door_call() alloca()s sendsz bytes (twice too, once for
297*12914SJoyce.McIntosh@Sun.COM * the call args buffer and once for the call result buffer), so
298*12914SJoyce.McIntosh@Sun.COM * we want to pick a sendsz that will be large enough, but not
299*12914SJoyce.McIntosh@Sun.COM * too large.
300*12914SJoyce.McIntosh@Sun.COM */
301*12914SJoyce.McIntosh@Sun.COM if (stack_getbounds(&st) == 0) {
302*12914SJoyce.McIntosh@Sun.COM /*
303*12914SJoyce.McIntosh@Sun.COM * Estimate how much stack space is left;
304*12914SJoyce.McIntosh@Sun.COM * st.ss_sp is the top of stack.
305*12914SJoyce.McIntosh@Sun.COM */
306*12914SJoyce.McIntosh@Sun.COM if ((char *)&sendsz < (char *)st.ss_sp)
307*12914SJoyce.McIntosh@Sun.COM /* stack grows up */
308*12914SJoyce.McIntosh@Sun.COM sendsz = ((char *)st.ss_sp - (char *)&sendsz);
309*12914SJoyce.McIntosh@Sun.COM else
310*12914SJoyce.McIntosh@Sun.COM /* stack grows down */
311*12914SJoyce.McIntosh@Sun.COM sendsz = ((char *)&sendsz - (char *)st.ss_sp);
312*12914SJoyce.McIntosh@Sun.COM
313*12914SJoyce.McIntosh@Sun.COM if (sendsz <= MIN_STACK_NEEDS) {
314*12914SJoyce.McIntosh@Sun.COM sendsz = 0; /* RPC call may fail */
315*12914SJoyce.McIntosh@Sun.COM } else {
316*12914SJoyce.McIntosh@Sun.COM /* Leave 64Kb (just a guess) for our needs */
317*12914SJoyce.McIntosh@Sun.COM sendsz -= MIN_STACK_NEEDS;
318*12914SJoyce.McIntosh@Sun.COM
319*12914SJoyce.McIntosh@Sun.COM /* Divide the stack space left by two */
320*12914SJoyce.McIntosh@Sun.COM sendsz = RNDUP(sendsz / 2);
321*12914SJoyce.McIntosh@Sun.COM
322*12914SJoyce.McIntosh@Sun.COM /* Limit sendsz to 256KB */
323*12914SJoyce.McIntosh@Sun.COM if (sendsz > IDMAP_MAX_DOOR_RPC)
324*12914SJoyce.McIntosh@Sun.COM sendsz = IDMAP_MAX_DOOR_RPC;
3254644Sbaban }
3264644Sbaban }
3274644Sbaban
328*12914SJoyce.McIntosh@Sun.COM idmap_handle.client = clnt_door_create(IDMAP_PROG, IDMAP_V1, sendsz);
329*12914SJoyce.McIntosh@Sun.COM if (idmap_handle.client == NULL)
330*12914SJoyce.McIntosh@Sun.COM return (IDMAP_ERR_RPC);
331*12914SJoyce.McIntosh@Sun.COM
332*12914SJoyce.McIntosh@Sun.COM return (IDMAP_SUCCESS);
3334644Sbaban }
334*12914SJoyce.McIntosh@Sun.COM
335*12914SJoyce.McIntosh@Sun.COM /*
336*12914SJoyce.McIntosh@Sun.COM * Disconnect from idmapd, if we're connected.
337*12914SJoyce.McIntosh@Sun.COM */
338*12914SJoyce.McIntosh@Sun.COM static
339*12914SJoyce.McIntosh@Sun.COM void
_idmap_clnt_disconnect(void)340*12914SJoyce.McIntosh@Sun.COM _idmap_clnt_disconnect(void)
341*12914SJoyce.McIntosh@Sun.COM {
342*12914SJoyce.McIntosh@Sun.COM CLIENT *clnt;
343*12914SJoyce.McIntosh@Sun.COM
344*12914SJoyce.McIntosh@Sun.COM clnt = idmap_handle.client;
345*12914SJoyce.McIntosh@Sun.COM if (clnt != NULL) {
346*12914SJoyce.McIntosh@Sun.COM if (clnt->cl_auth)
347*12914SJoyce.McIntosh@Sun.COM auth_destroy(clnt->cl_auth);
348*12914SJoyce.McIntosh@Sun.COM clnt_destroy(clnt);
349*12914SJoyce.McIntosh@Sun.COM idmap_handle.client = NULL;
350*12914SJoyce.McIntosh@Sun.COM }
351*12914SJoyce.McIntosh@Sun.COM }
352