13941Svenki /*
23941Svenki * CDDL HEADER START
33941Svenki *
43941Svenki * The contents of this file are subject to the terms of the
53941Svenki * Common Development and Distribution License (the "License").
63941Svenki * You may not use this file except in compliance with the License.
73941Svenki *
83941Svenki * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93941Svenki * or http://www.opensolaris.org/os/licensing.
103941Svenki * See the License for the specific language governing permissions
113941Svenki * and limitations under the License.
123941Svenki *
133941Svenki * When distributing Covered Code, include this CDDL HEADER in each
143941Svenki * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153941Svenki * If applicable, add the following below this CDDL HEADER, with the
163941Svenki * fields enclosed by brackets "[]" replaced with your own identifying
173941Svenki * information: Portions Copyright [yyyy] [name of copyright owner]
183941Svenki *
193941Svenki * CDDL HEADER END
203941Svenki */
213941Svenki
223941Svenki /*
235995Sfw157321 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
243941Svenki * Use is subject to license terms.
253941Svenki */
263941Svenki
273941Svenki /*
283941Svenki * The snmp library helps to prepare the PDUs and communicate with
293941Svenki * the snmp agent on the SP side via the ds_snmp driver.
303941Svenki */
313941Svenki
323941Svenki #include <stdio.h>
333941Svenki #include <stdlib.h>
343941Svenki #include <string.h>
353941Svenki #include <unistd.h>
363941Svenki #include <thread.h>
373941Svenki #include <synch.h>
383941Svenki #include <errno.h>
393941Svenki #include <sys/time.h>
403941Svenki #include <sys/types.h>
413941Svenki #include <sys/stat.h>
423941Svenki #include <fcntl.h>
433941Svenki #include <libnvpair.h>
443941Svenki #include <sys/ds_snmp.h>
453941Svenki
463941Svenki #include "libpiclsnmp.h"
473941Svenki #include "snmplib.h"
483941Svenki #include "asn1.h"
493941Svenki #include "pdu.h"
503941Svenki #include "debug.h"
513941Svenki
523941Svenki #pragma init(libpiclsnmp_init) /* need this in .init */
533941Svenki
543941Svenki /*
553941Svenki * Data from the MIB is fetched based on the hints about object
563941Svenki * groups received from (possibly many threads in) the application.
573941Svenki * However, the fetched data is kept in a common cache for use across
583941Svenki * all threads, so even a GETBULK is issued only when absolutely
593941Svenki * necessary.
603941Svenki *
613941Svenki * Note that locking is not fine grained (there's no locking per row)
623941Svenki * since we don't expect too many MT consumers right away.
633941Svenki *
643941Svenki */
653941Svenki static mutex_t mibcache_lock;
663941Svenki static nvlist_t **mibcache = NULL;
673941Svenki static uint_t n_mibcache_rows = 0;
683941Svenki
693941Svenki static mutex_t snmp_reqid_lock;
703941Svenki static int snmp_reqid = 1;
713941Svenki
723941Svenki #ifdef SNMP_DEBUG
733941Svenki uint_t snmp_nsends = 0;
743941Svenki uint_t snmp_sentbytes = 0;
753941Svenki uint_t snmp_nrecvs = 0;
763941Svenki uint_t snmp_rcvdbytes = 0;
773941Svenki #endif
783941Svenki
793941Svenki #ifdef USE_SOCKETS
803941Svenki #define SNMP_DEFAULT_PORT 161
813941Svenki #define SNMP_MAX_RECV_PKTSZ (64 * 1024)
823941Svenki #endif
833941Svenki
843941Svenki /*
857746SKelly.Moyer@Sun.COM * We need a reliably monotonic and stable source of time values to age
867746SKelly.Moyer@Sun.COM * entries in the mibcache toward expiration. The code originally used
877746SKelly.Moyer@Sun.COM * gettimeofday(), but since that is subject to time-of-day changes made by
887746SKelly.Moyer@Sun.COM * the administrator, the values it returns do not satisfy our needs.
897746SKelly.Moyer@Sun.COM * Instead, we use gethrtime(), which is immune to time-of-day changes.
907746SKelly.Moyer@Sun.COM * However, since gethrtime() returns a signed 64-bit value in units of
917746SKelly.Moyer@Sun.COM * nanoseconds and we are using signed 32-bit timestamps, we always divide
927746SKelly.Moyer@Sun.COM * the result by (HRTIME_SCALE * NANOSEC) to scale it down into units of 10
937746SKelly.Moyer@Sun.COM * seconds.
947746SKelly.Moyer@Sun.COM *
957746SKelly.Moyer@Sun.COM * Note that the scaling factor means that the value of MAX_INCACHE_TIME
967746SKelly.Moyer@Sun.COM * from snmplib.h should also be in units of 10 seconds.
977746SKelly.Moyer@Sun.COM */
987746SKelly.Moyer@Sun.COM #define GET_SCALED_HRTIME() (int)(gethrtime() / (HRTIME_SCALE * NANOSEC))
997746SKelly.Moyer@Sun.COM
1007746SKelly.Moyer@Sun.COM /*
1017746SKelly.Moyer@Sun.COM * The mibcache code originally cached values for 300 seconds after fetching
1027746SKelly.Moyer@Sun.COM * data via SNMP. Subsequent reads within that 300 second window would come
1037746SKelly.Moyer@Sun.COM * from the cache - which is quite a bit faster than an SNMP query - but the
1047746SKelly.Moyer@Sun.COM * first request that came in more than 300 seconds after the previous SNMP
1057746SKelly.Moyer@Sun.COM * query would trigger a new SNMP query. This worked well as an
1067746SKelly.Moyer@Sun.COM * optimization for frequent queries, but when data was only queried less
1077746SKelly.Moyer@Sun.COM * frequently than every 300 seconds (as proved to be the case at multiple
1087746SKelly.Moyer@Sun.COM * customer sites), the cache didn't help at all.
1097746SKelly.Moyer@Sun.COM *
1107746SKelly.Moyer@Sun.COM * To improve the performance of infrequent queries, code was added to the
1117746SKelly.Moyer@Sun.COM * library to allow a client (i.e. a thread in the picl plugin) to proactively
1127746SKelly.Moyer@Sun.COM * refresh cache entries without waiting for them to expire, thereby ensuring
1137746SKelly.Moyer@Sun.COM * that all volatile entries in the cache at any given time are less than 300
1147746SKelly.Moyer@Sun.COM * seconds old. Whenever an SNMP query is generated to retrieve volatile data
1157746SKelly.Moyer@Sun.COM * that will be cached, an entry is added in a refresh queue that tracks the
1167746SKelly.Moyer@Sun.COM * parameters of the query and the time that it was made. A client can query
1177746SKelly.Moyer@Sun.COM * the age of the oldest item in the refresh queue and - at its discretion - can
1187746SKelly.Moyer@Sun.COM * then force that query to be repeated in a manner that will update the
1197746SKelly.Moyer@Sun.COM * mibcache entry even though it hasn't expired.
1207746SKelly.Moyer@Sun.COM */
1217746SKelly.Moyer@Sun.COM typedef struct {
1227746SKelly.Moyer@Sun.COM struct picl_snmphdl *smd;
1237746SKelly.Moyer@Sun.COM char *oidstrs;
1247746SKelly.Moyer@Sun.COM int n_oids;
1257746SKelly.Moyer@Sun.COM int row;
1267746SKelly.Moyer@Sun.COM int last_fetch_time; /* in scaled hrtime */
1277746SKelly.Moyer@Sun.COM } refreshq_job_t;
1287746SKelly.Moyer@Sun.COM
1297746SKelly.Moyer@Sun.COM static mutex_t refreshq_lock;
1307746SKelly.Moyer@Sun.COM static refreshq_job_t *refreshq = NULL;
1317746SKelly.Moyer@Sun.COM static uint_t n_refreshq_slots = 0; /* # of alloc'ed job slots */
1327746SKelly.Moyer@Sun.COM static uint_t n_refreshq_jobs = 0; /* # of unprocessed jobs */
1337746SKelly.Moyer@Sun.COM static uint_t refreshq_next_job = 0; /* oldest unprocessed job */
1347746SKelly.Moyer@Sun.COM static uint_t refreshq_next_slot = 0; /* next available job slot */
1357746SKelly.Moyer@Sun.COM
1367746SKelly.Moyer@Sun.COM
1377746SKelly.Moyer@Sun.COM /*
1383941Svenki * Static function declarations
1393941Svenki */
1403941Svenki static void libpiclsnmp_init(void);
1413941Svenki
1423941Svenki static int lookup_int(char *, int, int *, int);
1433941Svenki static int lookup_str(char *, int, char **, int);
1443941Svenki static int lookup_bitstr(char *, int, uchar_t **, uint_t *, int);
1453941Svenki
1463941Svenki static oidgroup_t *locate_oid_group(struct picl_snmphdl *, char *);
1473941Svenki static int search_oid_in_group(char *, char *, int);
1483941Svenki
1493941Svenki static snmp_pdu_t *fetch_single(struct picl_snmphdl *, char *, int, int *);
1503941Svenki static snmp_pdu_t *fetch_next(struct picl_snmphdl *, char *, int, int *);
1513941Svenki static void fetch_bulk(struct picl_snmphdl *, char *, int, int, int, int *);
1523941Svenki static int fetch_single_str(struct picl_snmphdl *, char *, int,
1533941Svenki char **, int *);
1543941Svenki static int fetch_single_int(struct picl_snmphdl *, char *, int,
1553941Svenki int *, int *);
1563941Svenki static int fetch_single_bitstr(struct picl_snmphdl *, char *, int,
1573941Svenki uchar_t **, uint_t *, int *);
1583941Svenki
1593941Svenki static int snmp_send_request(struct picl_snmphdl *, snmp_pdu_t *, int *);
1603941Svenki static int snmp_recv_reply(struct picl_snmphdl *, snmp_pdu_t *, int *);
1613941Svenki
1623941Svenki static int mibcache_realloc(int);
1633941Svenki static void mibcache_populate(snmp_pdu_t *, int);
1643941Svenki static char *oid_to_oidstr(oid *, size_t);
1653941Svenki
1667746SKelly.Moyer@Sun.COM static int refreshq_realloc(int);
1677746SKelly.Moyer@Sun.COM static int refreshq_add_job(struct picl_snmphdl *, char *, int, int);
1687746SKelly.Moyer@Sun.COM
1693941Svenki
1703941Svenki static void
libpiclsnmp_init(void)1713941Svenki libpiclsnmp_init(void)
1723941Svenki {
1733941Svenki (void) mutex_init(&mibcache_lock, USYNC_THREAD, NULL);
1743941Svenki if (mibcache_realloc(0) < 0)
1753941Svenki (void) mutex_destroy(&mibcache_lock);
1763941Svenki
1777746SKelly.Moyer@Sun.COM (void) mutex_init(&refreshq_lock, USYNC_THREAD, NULL);
1783941Svenki (void) mutex_init(&snmp_reqid_lock, USYNC_THREAD, NULL);
1793941Svenki
1803941Svenki LOGINIT();
1813941Svenki }
1823941Svenki
1833941Svenki picl_snmphdl_t
snmp_init()1843941Svenki snmp_init()
1853941Svenki {
1863941Svenki struct picl_snmphdl *smd;
1873941Svenki #ifdef USE_SOCKETS
1883941Svenki int sbuf = (1 << 15); /* 16K */
1893941Svenki int rbuf = (1 << 17); /* 64K */
1903941Svenki char *snmp_agent_addr;
1913941Svenki #endif
1923941Svenki
1933941Svenki smd = (struct picl_snmphdl *)calloc(1, sizeof (struct picl_snmphdl));
1943941Svenki if (smd == NULL)
1953941Svenki return (NULL);
1963941Svenki
1973941Svenki #ifdef USE_SOCKETS
1983941Svenki if ((snmp_agent_addr = getenv("SNMP_AGENT_IPADDR")) == NULL)
1993941Svenki return (NULL);
2003941Svenki
2013941Svenki if ((smd->fd = socket(PF_INET, SOCK_DGRAM, 0)) < 0)
2023941Svenki return (NULL);
2033941Svenki
2043941Svenki (void) setsockopt(smd->fd, SOL_SOCKET, SO_SNDBUF, &sbuf, sizeof (int));
2053941Svenki (void) setsockopt(smd->fd, SOL_SOCKET, SO_RCVBUF, &rbuf, sizeof (int));
2063941Svenki
2073941Svenki memset(&smd->agent_addr, 0, sizeof (struct sockaddr_in));
2083941Svenki smd->agent_addr.sin_family = AF_INET;
2093941Svenki smd->agent_addr.sin_port = htons(SNMP_DEFAULT_PORT);
2103941Svenki smd->agent_addr.sin_addr.s_addr = inet_addr(snmp_agent_addr);
2113941Svenki #else
2123941Svenki smd->fd = open(DS_SNMP_DRIVER, O_RDWR);
2133941Svenki if (smd->fd < 0) {
2143941Svenki free(smd);
2153941Svenki return (NULL);
2163941Svenki }
2173941Svenki #endif
2183941Svenki
2193941Svenki return ((picl_snmphdl_t)smd);
2203941Svenki }
2213941Svenki
2223941Svenki void
snmp_fini(picl_snmphdl_t hdl)2233941Svenki snmp_fini(picl_snmphdl_t hdl)
2243941Svenki {
2253941Svenki struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
2263941Svenki
2273941Svenki if (smd) {
2283941Svenki if (smd->fd >= 0) {
2293941Svenki (void) close(smd->fd);
2303941Svenki }
2313941Svenki free(smd);
2323941Svenki }
2333941Svenki }
2343941Svenki
2353941Svenki int
snmp_reinit(picl_snmphdl_t hdl,int clr_linkreset)2363941Svenki snmp_reinit(picl_snmphdl_t hdl, int clr_linkreset)
2373941Svenki {
2383941Svenki struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
2393941Svenki nvlist_t *nvl;
2403941Svenki int i;
2413941Svenki
2423941Svenki (void) mutex_lock(&mibcache_lock);
2433941Svenki
2443941Svenki for (i = 0; i < n_mibcache_rows; i++) {
2453941Svenki if ((nvl = mibcache[i]) != NULL)
2463941Svenki nvlist_free(nvl);
2473941Svenki }
2483941Svenki
2493941Svenki n_mibcache_rows = 0;
2503941Svenki if (mibcache) {
2513941Svenki free(mibcache);
2523941Svenki mibcache = NULL;
2533941Svenki }
2543941Svenki
2553941Svenki (void) mutex_unlock(&mibcache_lock);
2563941Svenki
2573941Svenki if (clr_linkreset) {
2583941Svenki if (smd == NULL || smd->fd < 0)
2593941Svenki return (-1);
2603941Svenki else
2613941Svenki return (ioctl(smd->fd, DSSNMP_CLRLNKRESET, NULL));
2623941Svenki }
2633941Svenki
2643941Svenki return (0);
2653941Svenki }
2663941Svenki
2673941Svenki void
snmp_register_group(picl_snmphdl_t hdl,char * oidstrs,int n_oids,int is_vol)2683941Svenki snmp_register_group(picl_snmphdl_t hdl, char *oidstrs, int n_oids, int is_vol)
2693941Svenki {
2703941Svenki struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
2713941Svenki oidgroup_t *oidg;
2723941Svenki oidgroup_t *curr, *prev;
2733941Svenki char *p;
2743941Svenki int i, sz;
2753941Svenki
2763941Svenki /*
2773941Svenki * Allocate a new oidgroup_t
2783941Svenki */
2793941Svenki oidg = (oidgroup_t *)calloc(1, sizeof (struct oidgroup));
2803941Svenki if (oidg == NULL)
2813941Svenki return;
2823941Svenki
2833941Svenki /*
2843941Svenki * Determine how much space is required to register this group
2853941Svenki */
2863941Svenki sz = 0;
2873941Svenki p = oidstrs;
2883941Svenki for (i = 0; i < n_oids; i++) {
2893941Svenki sz += strlen(p) + 1;
2903941Svenki p = oidstrs + sz;
2913941Svenki }
2923941Svenki
2933941Svenki /*
2943941Svenki * Create this oid group
2953941Svenki */
2963941Svenki if ((p = (char *)malloc(sz)) == NULL) {
2973941Svenki free((void *) oidg);
2983941Svenki return;
2993941Svenki }
3003941Svenki
3013941Svenki (void) memcpy(p, oidstrs, sz);
3023941Svenki
3033941Svenki oidg->next = NULL;
3043941Svenki oidg->oidstrs = p;
3053941Svenki oidg->n_oids = n_oids;
3063941Svenki oidg->is_volatile = is_vol;
3073941Svenki
3083941Svenki /*
3093941Svenki * Link it to the tail of the list of oid groups
3103941Svenki */
3113941Svenki for (prev = NULL, curr = smd->group; curr; curr = curr->next)
3123941Svenki prev = curr;
3133941Svenki
3143941Svenki if (prev == NULL)
3153941Svenki smd->group = oidg;
3163941Svenki else
3173941Svenki prev->next = oidg;
3183941Svenki }
3193941Svenki
3203941Svenki /*
3213941Svenki * snmp_get_int() takes in an OID and returns the integer value
3223941Svenki * of the object referenced in the passed arg. It returns 0 on
3233941Svenki * success and -1 on failure.
3243941Svenki */
3253941Svenki int
snmp_get_int(picl_snmphdl_t hdl,char * prefix,int row,int * val,int * snmp_syserr)3263941Svenki snmp_get_int(picl_snmphdl_t hdl, char *prefix, int row, int *val,
3273941Svenki int *snmp_syserr)
3283941Svenki {
3293941Svenki struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
3303941Svenki oidgroup_t *grp;
3313941Svenki int ret;
3323941Svenki int err = 0;
3333941Svenki
3343941Svenki if (smd == NULL || prefix == NULL || val == NULL)
3353941Svenki return (-1);
3363941Svenki
3373941Svenki /*
3383941Svenki * If this item should not be cached, fetch it directly from
3393941Svenki * the agent using fetch_single_xxx()
3403941Svenki */
3413941Svenki if ((grp = locate_oid_group(smd, prefix)) == NULL) {
3423941Svenki ret = fetch_single_int(smd, prefix, row, val, &err);
3433941Svenki
3443941Svenki if (snmp_syserr)
3453941Svenki *snmp_syserr = err;
3463941Svenki
3473941Svenki return (ret);
3483941Svenki }
3493941Svenki
3503941Svenki /*
3513941Svenki * is it in the cache ?
3523941Svenki */
3533941Svenki if (lookup_int(prefix, row, val, grp->is_volatile) == 0)
3543941Svenki return (0);
3553941Svenki
3563941Svenki /*
3573941Svenki * fetch it from the agent and populate the cache
3583941Svenki */
3593941Svenki fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
3603941Svenki if (snmp_syserr)
3613941Svenki *snmp_syserr = err;
3623941Svenki
3633941Svenki /*
3643941Svenki * look it up again and return it
3653941Svenki */
3663941Svenki if (lookup_int(prefix, row, val, grp->is_volatile) < 0)
3673941Svenki return (-1);
3683941Svenki
3693941Svenki return (0);
3703941Svenki }
3713941Svenki
3723941Svenki /*
3733941Svenki * snmp_get_str() takes in an OID and returns the string value
3743941Svenki * of the object referenced in the passed arg. Memory for the string
3753941Svenki * is allocated within snmp_get_str() and is expected to be freed by
3763941Svenki * the caller when it is no longer needed. The function returns 0
3773941Svenki * on success and -1 on failure.
3783941Svenki */
3793941Svenki int
snmp_get_str(picl_snmphdl_t hdl,char * prefix,int row,char ** strp,int * snmp_syserr)3803941Svenki snmp_get_str(picl_snmphdl_t hdl, char *prefix, int row, char **strp,
3813941Svenki int *snmp_syserr)
3823941Svenki {
3833941Svenki struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
3843941Svenki oidgroup_t *grp;
3853941Svenki char *val;
3863941Svenki int ret;
3873941Svenki int err = 0;
3883941Svenki
3893941Svenki if (smd == NULL || prefix == NULL || strp == NULL)
3903941Svenki return (-1);
3913941Svenki
3926328Sjfrank *strp = NULL;
3933941Svenki /*
3943941Svenki * Check if this item is cacheable or not. If not, call
3953941Svenki * fetch_single_* to get it directly from the agent
3963941Svenki */
3973941Svenki if ((grp = locate_oid_group(smd, prefix)) == NULL) {
3983941Svenki ret = fetch_single_str(smd, prefix, row, strp, &err);
3993941Svenki
4003941Svenki if (snmp_syserr)
4013941Svenki *snmp_syserr = err;
4023941Svenki
4033941Svenki return (ret);
4043941Svenki }
4053941Svenki
4063941Svenki /*
4073941Svenki * See if it's in the cache already
4083941Svenki */
4093941Svenki if (lookup_str(prefix, row, &val, grp->is_volatile) == 0) {
4103941Svenki if ((*strp = strdup(val)) == NULL)
4113941Svenki return (-1);
4123941Svenki else
4133941Svenki return (0);
4143941Svenki }
4153941Svenki
4163941Svenki /*
4173941Svenki * Fetch it from the agent and populate cache
4183941Svenki */
4193941Svenki fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
4203941Svenki if (snmp_syserr)
4213941Svenki *snmp_syserr = err;
4223941Svenki
4233941Svenki /*
4243941Svenki * Retry lookup
4253941Svenki */
4263941Svenki if (lookup_str(prefix, row, &val, grp->is_volatile) < 0)
4273941Svenki return (-1);
4283941Svenki
4293941Svenki
4303941Svenki if ((*strp = strdup(val)) == NULL)
4313941Svenki return (-1);
4323941Svenki else
4333941Svenki return (0);
4343941Svenki }
4353941Svenki
4363941Svenki /*
4373941Svenki * snmp_get_bitstr() takes in an OID and returns the bit string value
4383941Svenki * of the object referenced in the passed args. Memory for the bitstring
4393941Svenki * is allocated within the function and is expected to be freed by
4403941Svenki * the caller when it is no longer needed. The function returns 0
4413941Svenki * on success and -1 on failure.
4423941Svenki */
4433941Svenki int
snmp_get_bitstr(picl_snmphdl_t hdl,char * prefix,int row,uchar_t ** bitstrp,uint_t * nbytes,int * snmp_syserr)4443941Svenki snmp_get_bitstr(picl_snmphdl_t hdl, char *prefix, int row, uchar_t **bitstrp,
4453941Svenki uint_t *nbytes, int *snmp_syserr)
4463941Svenki {
4473941Svenki struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
4483941Svenki oidgroup_t *grp;
4493941Svenki uchar_t *val;
4503941Svenki int ret;
4513941Svenki int err = 0;
4523941Svenki
4533941Svenki if (smd == NULL || prefix == NULL || bitstrp == NULL || nbytes == NULL)
4543941Svenki return (-1);
4553941Svenki
4566328Sjfrank *bitstrp = NULL;
4573941Svenki /*
4583941Svenki * Check if this item is cacheable or not. If not, call
4593941Svenki * fetch_single_* to get it directly from the agent
4603941Svenki */
4613941Svenki if ((grp = locate_oid_group(smd, prefix)) == NULL) {
4623941Svenki ret = fetch_single_bitstr(smd, prefix, row, bitstrp,
4633941Svenki nbytes, &err);
4643941Svenki
4653941Svenki if (snmp_syserr)
4663941Svenki *snmp_syserr = err;
4673941Svenki
4683941Svenki return (ret);
4693941Svenki }
4703941Svenki
4713941Svenki /*
4723941Svenki * See if it's in the cache already
4733941Svenki */
4743941Svenki if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) == 0) {
4753941Svenki if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
4763941Svenki return (-1);
4773941Svenki (void) memcpy(*bitstrp, (const void *)val, *nbytes);
4783941Svenki return (0);
4793941Svenki }
4803941Svenki
4813941Svenki /*
4823941Svenki * Fetch it from the agent and populate cache
4833941Svenki */
4843941Svenki fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
4853941Svenki if (snmp_syserr)
4863941Svenki *snmp_syserr = err;
4873941Svenki
4883941Svenki /*
4893941Svenki * Retry lookup
4903941Svenki */
4913941Svenki if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) < 0)
4923941Svenki return (-1);
4933941Svenki
4943941Svenki if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
4953941Svenki return (-1);
4963941Svenki (void) memcpy(*bitstrp, (const void *)val, *nbytes);
4973941Svenki
4983941Svenki return (0);
4993941Svenki }
5003941Svenki
5013941Svenki /*
5023941Svenki * snmp_get_nextrow() is similar in operation to SNMP_GETNEXT, but
5033941Svenki * only just. In particular, this is only expected to return the next
5043941Svenki * valid row number for the same object, not its value. Since we don't
5053941Svenki * have any other means, we use this to determine the number of rows
5063941Svenki * in the table (and the valid ones). This function returns 0 on success
5073941Svenki * and -1 on failure.
5083941Svenki */
5093941Svenki int
snmp_get_nextrow(picl_snmphdl_t hdl,char * prefix,int row,int * nextrow,int * snmp_syserr)5103941Svenki snmp_get_nextrow(picl_snmphdl_t hdl, char *prefix, int row, int *nextrow,
5113941Svenki int *snmp_syserr)
5123941Svenki {
5133941Svenki struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
5143941Svenki snmp_pdu_t *reply_pdu;
5153941Svenki pdu_varlist_t *vp;
5163941Svenki char *nxt_oidstr;
5173941Svenki int err = 0;
5183941Svenki
5195995Sfw157321 if (smd == NULL || prefix == NULL || nextrow == NULL) {
5205995Sfw157321 if (snmp_syserr)
5215995Sfw157321 *snmp_syserr = EINVAL;
5223941Svenki return (-1);
5235995Sfw157321 }
5243941Svenki
5253941Svenki /*
5263941Svenki * The get_nextrow results should *never* go into any cache,
5273941Svenki * since these relationships are dynamically discovered each time.
5283941Svenki */
5293941Svenki if ((reply_pdu = fetch_next(smd, prefix, row, &err)) == NULL) {
5303941Svenki if (snmp_syserr)
5313941Svenki *snmp_syserr = err;
5323941Svenki return (-1);
5333941Svenki }
5343941Svenki
5353941Svenki /*
5363941Svenki * We are not concerned about the "value" of the lexicographically
5373941Svenki * next object; we only care about the name of that object and
5383941Svenki * its row number (and whether such an object exists or not).
5393941Svenki */
5403941Svenki vp = reply_pdu->vars;
5415995Sfw157321
5425995Sfw157321 /*
5435995Sfw157321 * This indicates that we're at the end of the MIB view.
5445995Sfw157321 */
5453941Svenki if (vp == NULL || vp->name == NULL || vp->type == SNMP_NOSUCHOBJECT ||
5463941Svenki vp->type == SNMP_NOSUCHINSTANCE || vp->type == SNMP_ENDOFMIBVIEW) {
5473941Svenki snmp_free_pdu(reply_pdu);
5485995Sfw157321 if (snmp_syserr)
5495995Sfw157321 *snmp_syserr = ENOSPC;
5503941Svenki return (-1);
5513941Svenki }
5525995Sfw157321
5535995Sfw157321 /*
5545995Sfw157321 * need to be able to convert the OID
5555995Sfw157321 */
5563941Svenki if ((nxt_oidstr = oid_to_oidstr(vp->name, vp->name_len - 1)) == NULL) {
5573941Svenki snmp_free_pdu(reply_pdu);
5585995Sfw157321 if (snmp_syserr)
5595995Sfw157321 *snmp_syserr = ENOMEM;
5603941Svenki return (-1);
5613941Svenki }
5625995Sfw157321
5635995Sfw157321 /*
5645995Sfw157321 * We're on to the next table.
5655995Sfw157321 */
5663941Svenki if (strcmp(nxt_oidstr, prefix) != 0) {
5673941Svenki free(nxt_oidstr);
5683941Svenki snmp_free_pdu(reply_pdu);
5695995Sfw157321 if (snmp_syserr)
5705995Sfw157321 *snmp_syserr = ENOENT;
5713941Svenki return (-1);
5723941Svenki }
5733941Svenki
5743941Svenki /*
5753941Svenki * Ok, so we've got an oid that's simply the next valid row of the
5763941Svenki * passed on object, return this row number.
5773941Svenki */
5783941Svenki *nextrow = (vp->name)[vp->name_len-1];
5793941Svenki
5803941Svenki free(nxt_oidstr);
5813941Svenki snmp_free_pdu(reply_pdu);
5823941Svenki
5833941Svenki return (0);
5843941Svenki }
5853941Svenki
5863941Svenki /*
5873941Svenki * Request ids for snmp messages to the agent are sequenced here.
5883941Svenki */
5893941Svenki int
snmp_get_reqid(void)5903941Svenki snmp_get_reqid(void)
5913941Svenki {
5923941Svenki int ret;
5933941Svenki
5943941Svenki (void) mutex_lock(&snmp_reqid_lock);
5953941Svenki
5963941Svenki ret = snmp_reqid++;
5973941Svenki
5983941Svenki (void) mutex_unlock(&snmp_reqid_lock);
5993941Svenki
6003941Svenki return (ret);
6013941Svenki }
6023941Svenki
6033941Svenki static int
lookup_int(char * prefix,int row,int * valp,int is_vol)6043941Svenki lookup_int(char *prefix, int row, int *valp, int is_vol)
6053941Svenki {
6063941Svenki int32_t *val_arr;
6073941Svenki uint_t nelem;
6087746SKelly.Moyer@Sun.COM int now;
6093941Svenki int elapsed;
6103941Svenki
6113941Svenki (void) mutex_lock(&mibcache_lock);
6123941Svenki
6133941Svenki if (row >= n_mibcache_rows) {
6143941Svenki (void) mutex_unlock(&mibcache_lock);
6153941Svenki return (-1);
6163941Svenki }
6173941Svenki
6183941Svenki if (mibcache[row] == NULL) {
6193941Svenki (void) mutex_unlock(&mibcache_lock);
6203941Svenki return (-1);
6213941Svenki }
6223941Svenki
6233941Svenki /*
6243941Svenki * If this is a volatile property, we should be searching
6253941Svenki * for an integer-timestamp pair
6263941Svenki */
6273941Svenki if (is_vol) {
6283941Svenki if (nvlist_lookup_int32_array(mibcache[row], prefix,
6293941Svenki &val_arr, &nelem) != 0) {
6303941Svenki (void) mutex_unlock(&mibcache_lock);
6313941Svenki return (-1);
6323941Svenki }
6333941Svenki if (nelem != 2 || val_arr[1] < 0) {
6343941Svenki (void) mutex_unlock(&mibcache_lock);
6353941Svenki return (-1);
6363941Svenki }
6377746SKelly.Moyer@Sun.COM now = GET_SCALED_HRTIME();
6387746SKelly.Moyer@Sun.COM elapsed = now - val_arr[1];
6393941Svenki if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
6403941Svenki (void) mutex_unlock(&mibcache_lock);
6413941Svenki return (-1);
6423941Svenki }
6433941Svenki
6443941Svenki *valp = (int)val_arr[0];
6453941Svenki } else {
6463941Svenki if (nvlist_lookup_int32(mibcache[row], prefix, valp) != 0) {
6473941Svenki (void) mutex_unlock(&mibcache_lock);
6483941Svenki return (-1);
6493941Svenki }
6503941Svenki }
6513941Svenki
6523941Svenki (void) mutex_unlock(&mibcache_lock);
6533941Svenki
6543941Svenki return (0);
6553941Svenki }
6563941Svenki
6573941Svenki static int
lookup_str(char * prefix,int row,char ** valp,int is_vol)6583941Svenki lookup_str(char *prefix, int row, char **valp, int is_vol)
6593941Svenki {
6603941Svenki char **val_arr;
6613941Svenki uint_t nelem;
6627746SKelly.Moyer@Sun.COM int now;
6633941Svenki int elapsed;
6643941Svenki
6653941Svenki (void) mutex_lock(&mibcache_lock);
6663941Svenki
6673941Svenki if (row >= n_mibcache_rows) {
6683941Svenki (void) mutex_unlock(&mibcache_lock);
6693941Svenki return (-1);
6703941Svenki }
6713941Svenki
6723941Svenki if (mibcache[row] == NULL) {
6733941Svenki (void) mutex_unlock(&mibcache_lock);
6743941Svenki return (-1);
6753941Svenki }
6763941Svenki
6773941Svenki /*
6783941Svenki * If this is a volatile property, we should be searching
6793941Svenki * for a string-timestamp pair
6803941Svenki */
6813941Svenki if (is_vol) {
6823941Svenki if (nvlist_lookup_string_array(mibcache[row], prefix,
6833941Svenki &val_arr, &nelem) != 0) {
6843941Svenki (void) mutex_unlock(&mibcache_lock);
6853941Svenki return (-1);
6863941Svenki }
6873941Svenki if (nelem != 2 || atoi(val_arr[1]) <= 0) {
6883941Svenki (void) mutex_unlock(&mibcache_lock);
6893941Svenki return (-1);
6903941Svenki }
6917746SKelly.Moyer@Sun.COM now = GET_SCALED_HRTIME();
6927746SKelly.Moyer@Sun.COM elapsed = now - atoi(val_arr[1]);
6933941Svenki if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
6943941Svenki (void) mutex_unlock(&mibcache_lock);
6953941Svenki return (-1);
6963941Svenki }
6973941Svenki
6983941Svenki *valp = val_arr[0];
6993941Svenki } else {
7005995Sfw157321 if (nvlist_lookup_string(mibcache[row], prefix, valp) != 0) {
7015995Sfw157321 (void) mutex_unlock(&mibcache_lock);
7025995Sfw157321 return (-1);
7035995Sfw157321 }
7043941Svenki }
7053941Svenki
7063941Svenki (void) mutex_unlock(&mibcache_lock);
7073941Svenki
7083941Svenki return (0);
7093941Svenki }
7103941Svenki
7113941Svenki static int
lookup_bitstr(char * prefix,int row,uchar_t ** valp,uint_t * nelem,int is_vol)7123941Svenki lookup_bitstr(char *prefix, int row, uchar_t **valp, uint_t *nelem, int is_vol)
7133941Svenki {
7143941Svenki (void) mutex_lock(&mibcache_lock);
7153941Svenki
7163941Svenki if (row >= n_mibcache_rows) {
7173941Svenki (void) mutex_unlock(&mibcache_lock);
7183941Svenki return (-1);
7193941Svenki }
7203941Svenki
7213941Svenki if (mibcache[row] == NULL) {
7223941Svenki (void) mutex_unlock(&mibcache_lock);
7233941Svenki return (-1);
7243941Svenki }
7253941Svenki
7263941Svenki /*
7273941Svenki * We don't support volatile bit string values yet. The nvlist
7283941Svenki * functions don't support bitstring arrays like they do charstring
7293941Svenki * arrays, so we would need to do things in a convoluted way,
7303941Svenki * probably by attaching the timestamp as part of the byte array
7313941Svenki * itself. However, the need for volatile bitstrings isn't there
7323941Svenki * yet, to justify the effort.
7333941Svenki */
7343941Svenki if (is_vol) {
7353941Svenki (void) mutex_unlock(&mibcache_lock);
7363941Svenki return (-1);
7373941Svenki }
7383941Svenki
7393941Svenki if (nvlist_lookup_byte_array(mibcache[row], prefix, valp, nelem) != 0) {
7403941Svenki (void) mutex_unlock(&mibcache_lock);
7413941Svenki return (-1);
7423941Svenki }
7433941Svenki
7443941Svenki (void) mutex_unlock(&mibcache_lock);
7453941Svenki
7463941Svenki return (0);
7473941Svenki }
7483941Svenki
7493941Svenki static int
search_oid_in_group(char * prefix,char * oidstrs,int n_oids)7503941Svenki search_oid_in_group(char *prefix, char *oidstrs, int n_oids)
7513941Svenki {
7523941Svenki char *p;
7533941Svenki int i;
7543941Svenki
7553941Svenki p = oidstrs;
7563941Svenki for (i = 0; i < n_oids; i++) {
7573941Svenki if (strcmp(p, prefix) == 0)
7583941Svenki return (0);
7593941Svenki
7603941Svenki p += strlen(p) + 1;
7613941Svenki }
7623941Svenki
7633941Svenki return (-1);
7643941Svenki }
7653941Svenki
7663941Svenki static oidgroup_t *
locate_oid_group(struct picl_snmphdl * smd,char * prefix)7673941Svenki locate_oid_group(struct picl_snmphdl *smd, char *prefix)
7683941Svenki {
7693941Svenki oidgroup_t *grp;
7703941Svenki
7713941Svenki if (smd == NULL)
7723941Svenki return (NULL);
7733941Svenki
7743941Svenki if (smd->group == NULL)
7753941Svenki return (NULL);
7763941Svenki
7773941Svenki for (grp = smd->group; grp; grp = grp->next) {
7783941Svenki if (search_oid_in_group(prefix, grp->oidstrs,
7793941Svenki grp->n_oids) == 0) {
7803941Svenki return (grp);
7813941Svenki }
7823941Svenki }
7833941Svenki
7843941Svenki return (NULL);
7853941Svenki }
7863941Svenki
7873941Svenki static int
fetch_single_int(struct picl_snmphdl * smd,char * prefix,int row,int * ival,int * snmp_syserr)7883941Svenki fetch_single_int(struct picl_snmphdl *smd, char *prefix, int row, int *ival,
7893941Svenki int *snmp_syserr)
7903941Svenki {
7913941Svenki snmp_pdu_t *reply_pdu;
7923941Svenki pdu_varlist_t *vp;
7933941Svenki
7943941Svenki if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
7953941Svenki return (-1);
7963941Svenki
7973941Svenki /*
7983941Svenki * Note that we don't make any distinction between unsigned int
7993941Svenki * value and signed int value at this point, since we provide
8003941Svenki * only snmp_get_int() at the higher level. While it is possible
8013941Svenki * to provide an entirely separate interface such as snmp_get_uint(),
8023941Svenki * that's quite unnecessary, because we don't do any interpretation
8033941Svenki * of the received value. Besides, the sizes of int and uint are
8043941Svenki * the same and the sizes of all pointers are the same (so val.iptr
8053941Svenki * would be the same as val.uiptr in pdu_varlist_t). If/when we
8063941Svenki * violate any of these assumptions, it will be time to add
8073941Svenki * snmp_get_uint().
8083941Svenki */
8093941Svenki vp = reply_pdu->vars;
8103941Svenki if (vp == NULL || vp->val.iptr == NULL) {
8113941Svenki snmp_free_pdu(reply_pdu);
8123941Svenki return (-1);
8133941Svenki }
8143941Svenki
8153941Svenki *ival = *(vp->val.iptr);
8163941Svenki
8173941Svenki snmp_free_pdu(reply_pdu);
8183941Svenki
8193941Svenki return (0);
8203941Svenki }
8213941Svenki
8223941Svenki static int
fetch_single_str(struct picl_snmphdl * smd,char * prefix,int row,char ** valp,int * snmp_syserr)8233941Svenki fetch_single_str(struct picl_snmphdl *smd, char *prefix, int row, char **valp,
8243941Svenki int *snmp_syserr)
8253941Svenki {
8263941Svenki snmp_pdu_t *reply_pdu;
8273941Svenki pdu_varlist_t *vp;
8283941Svenki
8293941Svenki if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
8303941Svenki return (-1);
8313941Svenki
8323941Svenki vp = reply_pdu->vars;
8333941Svenki if (vp == NULL || vp->val.str == NULL) {
8343941Svenki snmp_free_pdu(reply_pdu);
8353941Svenki return (-1);
8363941Svenki }
8373941Svenki
8383941Svenki *valp = strdup((const char *)(vp->val.str));
8393941Svenki
8403941Svenki snmp_free_pdu(reply_pdu);
8413941Svenki
8423941Svenki return (0);
8433941Svenki }
8443941Svenki
8453941Svenki static int
fetch_single_bitstr(struct picl_snmphdl * smd,char * prefix,int row,uchar_t ** valp,uint_t * nelem,int * snmp_syserr)8463941Svenki fetch_single_bitstr(struct picl_snmphdl *smd, char *prefix, int row,
8473941Svenki uchar_t **valp, uint_t *nelem, int *snmp_syserr)
8483941Svenki {
8493941Svenki snmp_pdu_t *reply_pdu;
8503941Svenki pdu_varlist_t *vp;
8513941Svenki
8523941Svenki if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
8533941Svenki return (-1);
8543941Svenki
8553941Svenki vp = reply_pdu->vars;
8563941Svenki if (vp == NULL || vp->val.str == NULL) {
8573941Svenki snmp_free_pdu(reply_pdu);
8583941Svenki return (-1);
8593941Svenki }
8603941Svenki
8613941Svenki if ((*valp = (uchar_t *)calloc(vp->val_len, 1)) == NULL) {
8623941Svenki snmp_free_pdu(reply_pdu);
8633941Svenki return (-1);
8643941Svenki }
8653941Svenki
8663941Svenki *nelem = vp->val_len;
8673941Svenki (void) memcpy(*valp, (const void *)(vp->val.str),
8683941Svenki (size_t)(vp->val_len));
8693941Svenki
8703941Svenki snmp_free_pdu(reply_pdu);
8713941Svenki
8723941Svenki return (0);
8733941Svenki }
8743941Svenki
8753941Svenki static snmp_pdu_t *
fetch_single(struct picl_snmphdl * smd,char * prefix,int row,int * snmp_syserr)8763941Svenki fetch_single(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
8773941Svenki {
8783941Svenki snmp_pdu_t *pdu, *reply_pdu;
8793941Svenki
8803941Svenki LOGGET(TAG_CMD_REQUEST, prefix, row);
8813941Svenki
8823941Svenki if ((pdu = snmp_create_pdu(SNMP_MSG_GET, 0, prefix, 1, row)) == NULL)
8833941Svenki return (NULL);
8843941Svenki
8853941Svenki LOGPDU(TAG_REQUEST_PDU, pdu);
8863941Svenki
8873941Svenki if (snmp_make_packet(pdu) < 0) {
8883941Svenki snmp_free_pdu(pdu);
8893941Svenki return (NULL);
8903941Svenki }
8913941Svenki
8923941Svenki LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
8933941Svenki
8943941Svenki if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
8953941Svenki snmp_free_pdu(pdu);
8963941Svenki return (NULL);
8973941Svenki }
8983941Svenki
8993941Svenki if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
9003941Svenki snmp_free_pdu(pdu);
9013941Svenki return (NULL);
9023941Svenki }
9033941Svenki
9043941Svenki LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
9053941Svenki
9063941Svenki reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
9073941Svenki pdu->reply_pktsz);
9083941Svenki
9093941Svenki LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
9103941Svenki
9113941Svenki snmp_free_pdu(pdu);
9123941Svenki
9133941Svenki return (reply_pdu);
9143941Svenki }
9153941Svenki
9163941Svenki static void
fetch_bulk(struct picl_snmphdl * smd,char * oidstrs,int n_oids,int row,int is_vol,int * snmp_syserr)9173941Svenki fetch_bulk(struct picl_snmphdl *smd, char *oidstrs, int n_oids,
9183941Svenki int row, int is_vol, int *snmp_syserr)
9193941Svenki {
9203941Svenki snmp_pdu_t *pdu, *reply_pdu;
9213941Svenki int max_reps;
9223941Svenki
9233941Svenki LOGBULK(TAG_CMD_REQUEST, n_oids, oidstrs, row);
9243941Svenki
9253941Svenki /*
9263941Svenki * If we're fetching volatile properties using BULKGET, don't
9273941Svenki * venture to get multiple rows (passing max_reps=0 will make
9283941Svenki * snmp_create_pdu() fetch SNMP_DEF_MAX_REPETITIONS rows)
9293941Svenki */
9303941Svenki max_reps = is_vol ? 1 : 0;
9313941Svenki
9323941Svenki pdu = snmp_create_pdu(SNMP_MSG_GETBULK, max_reps, oidstrs, n_oids, row);
9333941Svenki if (pdu == NULL)
9343941Svenki return;
9353941Svenki
9363941Svenki LOGPDU(TAG_REQUEST_PDU, pdu);
9373941Svenki
9383941Svenki /*
9393941Svenki * Make an ASN.1 encoded packet from the PDU information
9403941Svenki */
9413941Svenki if (snmp_make_packet(pdu) < 0) {
9423941Svenki snmp_free_pdu(pdu);
9433941Svenki return;
9443941Svenki }
9453941Svenki
9463941Svenki LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
9473941Svenki
9483941Svenki /*
9493941Svenki * Send the request packet to the agent
9503941Svenki */
9513941Svenki if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
9523941Svenki snmp_free_pdu(pdu);
9533941Svenki return;
9543941Svenki }
9553941Svenki
9563941Svenki /*
9573941Svenki * Receive response from the agent into the reply packet buffer
9583941Svenki * in the request PDU
9593941Svenki */
9603941Svenki if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
9613941Svenki snmp_free_pdu(pdu);
9623941Svenki return;
9633941Svenki }
9643941Svenki
9653941Svenki LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
9663941Svenki
9673941Svenki /*
9683941Svenki * Parse the reply, validate the response and create a
9693941Svenki * reply-PDU out of the information. Populate the mibcache
9703941Svenki * with the received values.
9713941Svenki */
9723941Svenki reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
9733941Svenki pdu->reply_pktsz);
9743941Svenki if (reply_pdu) {
9753941Svenki LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
9763941Svenki
9777746SKelly.Moyer@Sun.COM if (reply_pdu->errstat == SNMP_ERR_NOERROR) {
9787746SKelly.Moyer@Sun.COM if (is_vol) {
9797746SKelly.Moyer@Sun.COM /* Add a job to the cache refresh work queue */
9807746SKelly.Moyer@Sun.COM (void) refreshq_add_job(smd, oidstrs, n_oids,
9817746SKelly.Moyer@Sun.COM row);
9827746SKelly.Moyer@Sun.COM }
9837746SKelly.Moyer@Sun.COM
9843941Svenki mibcache_populate(reply_pdu, is_vol);
9857746SKelly.Moyer@Sun.COM }
9863941Svenki
9873941Svenki snmp_free_pdu(reply_pdu);
9883941Svenki }
9893941Svenki
9903941Svenki snmp_free_pdu(pdu);
9913941Svenki }
9923941Svenki
9933941Svenki static snmp_pdu_t *
fetch_next(struct picl_snmphdl * smd,char * prefix,int row,int * snmp_syserr)9943941Svenki fetch_next(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
9953941Svenki {
9963941Svenki snmp_pdu_t *pdu, *reply_pdu;
9973941Svenki
9983941Svenki LOGNEXT(TAG_CMD_REQUEST, prefix, row);
9993941Svenki
10003941Svenki pdu = snmp_create_pdu(SNMP_MSG_GETNEXT, 0, prefix, 1, row);
10013941Svenki if (pdu == NULL)
10023941Svenki return (NULL);
10033941Svenki
10043941Svenki LOGPDU(TAG_REQUEST_PDU, pdu);
10053941Svenki
10063941Svenki if (snmp_make_packet(pdu) < 0) {
10073941Svenki snmp_free_pdu(pdu);
10083941Svenki return (NULL);
10093941Svenki }
10103941Svenki
10113941Svenki LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
10123941Svenki
10133941Svenki if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
10143941Svenki snmp_free_pdu(pdu);
10153941Svenki return (NULL);
10163941Svenki }
10173941Svenki
10183941Svenki if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
10193941Svenki snmp_free_pdu(pdu);
10203941Svenki return (NULL);
10213941Svenki }
10223941Svenki
10233941Svenki LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
10243941Svenki
10253941Svenki reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
10263941Svenki pdu->reply_pktsz);
10273941Svenki
10283941Svenki LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
10293941Svenki
10303941Svenki snmp_free_pdu(pdu);
10313941Svenki
10323941Svenki return (reply_pdu);
10333941Svenki }
10343941Svenki
10353941Svenki static int
snmp_send_request(struct picl_snmphdl * smd,snmp_pdu_t * pdu,int * snmp_syserr)10363941Svenki snmp_send_request(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
10373941Svenki {
10383941Svenki extern int errno;
10393941Svenki #ifdef USE_SOCKETS
10403941Svenki int ret;
10413941Svenki #endif
10423941Svenki
10433941Svenki if (smd->fd < 0)
10443941Svenki return (-1);
10453941Svenki
10463941Svenki if (pdu == NULL || pdu->req_pkt == NULL)
10473941Svenki return (-1);
10483941Svenki
10493941Svenki #ifdef USE_SOCKETS
10503941Svenki ret = -1;
10513941Svenki while (ret < 0) {
10523941Svenki LOGIO(TAG_SENDTO, smd->fd, pdu->req_pkt, pdu->req_pktsz);
10533941Svenki
10543941Svenki ret = sendto(smd->fd, pdu->req_pkt, pdu->req_pktsz, 0,
10553941Svenki (struct sockaddr *)&smd->agent_addr,
10563941Svenki sizeof (struct sockaddr));
10573941Svenki if (ret < 0 && errno != EINTR) {
10583941Svenki return (-1);
10593941Svenki }
10603941Svenki }
10613941Svenki #else
10623941Svenki LOGIO(TAG_WRITE, smd->fd, pdu->req_pkt, pdu->req_pktsz);
10633941Svenki
10643941Svenki if (write(smd->fd, pdu->req_pkt, pdu->req_pktsz) < 0) {
10653941Svenki if (snmp_syserr)
10663941Svenki *snmp_syserr = errno;
10673941Svenki return (-1);
10683941Svenki }
10693941Svenki #endif
10703941Svenki
10713941Svenki #ifdef SNMP_DEBUG
10723941Svenki snmp_nsends++;
10733941Svenki snmp_sentbytes += pdu->req_pktsz;
10743941Svenki #endif
10753941Svenki
10763941Svenki return (0);
10773941Svenki }
10783941Svenki
10793941Svenki static int
snmp_recv_reply(struct picl_snmphdl * smd,snmp_pdu_t * pdu,int * snmp_syserr)10803941Svenki snmp_recv_reply(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
10813941Svenki {
10823941Svenki struct dssnmp_info snmp_info;
10833941Svenki size_t pktsz;
10843941Svenki uchar_t *pkt;
10853941Svenki extern int errno;
10863941Svenki #ifdef USE_SOCKETS
10873941Svenki struct sockaddr_in from;
10883941Svenki int fromlen;
10893941Svenki ssize_t msgsz;
10903941Svenki #endif
10913941Svenki
10923941Svenki if (smd->fd < 0 || pdu == NULL)
10933941Svenki return (-1);
10943941Svenki
10953941Svenki #ifdef USE_SOCKETS
10963941Svenki if ((pkt = (uchar_t *)calloc(1, SNMP_MAX_RECV_PKTSZ)) == NULL)
10973941Svenki return (-1);
10983941Svenki
10993941Svenki fromlen = sizeof (struct sockaddr_in);
11003941Svenki
11013941Svenki LOGIO(TAG_RECVFROM, smd->fd, pkt, SNMP_MAX_RECV_PKTSZ);
11023941Svenki
11033941Svenki msgsz = recvfrom(smd->fd, pkt, SNMP_MAX_RECV_PKTSZ, 0,
11043941Svenki (struct sockaddr *)&from, &fromlen);
11053941Svenki if (msgsz < 0 || msgsz >= SNMP_MAX_RECV_PKTSZ) {
11063941Svenki free(pkt);
11073941Svenki return (-1);
11083941Svenki }
11093941Svenki
11103941Svenki pktsz = (size_t)msgsz;
11113941Svenki #else
11123941Svenki LOGIO(TAG_IOCTL, smd->fd, DSSNMP_GETINFO, &snmp_info);
11133941Svenki
11143941Svenki /*
11153941Svenki * The ioctl will block until we have snmp data available
11163941Svenki */
11173941Svenki if (ioctl(smd->fd, DSSNMP_GETINFO, &snmp_info) < 0) {
11183941Svenki if (snmp_syserr)
11193941Svenki *snmp_syserr = errno;
11203941Svenki return (-1);
11213941Svenki }
11223941Svenki
11233941Svenki pktsz = snmp_info.size;
11243941Svenki if ((pkt = (uchar_t *)calloc(1, pktsz)) == NULL)
11253941Svenki return (-1);
11263941Svenki
11273941Svenki LOGIO(TAG_READ, smd->fd, pkt, pktsz);
11283941Svenki
11293941Svenki if (read(smd->fd, pkt, pktsz) < 0) {
11303941Svenki free(pkt);
11313941Svenki if (snmp_syserr)
11323941Svenki *snmp_syserr = errno;
11333941Svenki return (-1);
11343941Svenki }
11353941Svenki #endif
11363941Svenki
11373941Svenki pdu->reply_pkt = pkt;
11383941Svenki pdu->reply_pktsz = pktsz;
11393941Svenki
11403941Svenki #ifdef SNMP_DEBUG
11413941Svenki snmp_nrecvs++;
11423941Svenki snmp_rcvdbytes += pktsz;
11433941Svenki #endif
11443941Svenki
11453941Svenki return (0);
11463941Svenki }
11473941Svenki
11483941Svenki static int
mibcache_realloc(int hint)11493941Svenki mibcache_realloc(int hint)
11503941Svenki {
11513941Svenki uint_t count = (uint_t)hint;
11523941Svenki nvlist_t **p;
11533941Svenki
11543941Svenki if (hint < 0)
11553941Svenki return (-1);
11563941Svenki
11573941Svenki (void) mutex_lock(&mibcache_lock);
11583941Svenki
11593941Svenki if (hint < n_mibcache_rows) {
11603941Svenki (void) mutex_unlock(&mibcache_lock);
11613941Svenki return (0);
11623941Svenki }
11633941Svenki
11643941Svenki count = ((count >> MIBCACHE_BLK_SHIFT) + 1) << MIBCACHE_BLK_SHIFT;
11653941Svenki
11663941Svenki p = (nvlist_t **)calloc(count, sizeof (nvlist_t *));
11673941Svenki if (p == NULL) {
11683941Svenki (void) mutex_unlock(&mibcache_lock);
11693941Svenki return (-1);
11703941Svenki }
11713941Svenki
11723941Svenki if (mibcache) {
11733941Svenki (void) memcpy((void *) p, (void *) mibcache,
11743941Svenki n_mibcache_rows * sizeof (nvlist_t *));
11753941Svenki free((void *) mibcache);
11763941Svenki }
11773941Svenki
11783941Svenki mibcache = p;
11793941Svenki n_mibcache_rows = count;
11803941Svenki
11813941Svenki (void) mutex_unlock(&mibcache_lock);
11823941Svenki
11833941Svenki return (0);
11843941Svenki }
11853941Svenki
11863941Svenki
11873941Svenki /*
11883941Svenki * Scan each variable in the returned PDU's bindings and populate
11893941Svenki * the cache appropriately
11903941Svenki */
11913941Svenki static void
mibcache_populate(snmp_pdu_t * pdu,int is_vol)11923941Svenki mibcache_populate(snmp_pdu_t *pdu, int is_vol)
11933941Svenki {
11943941Svenki pdu_varlist_t *vp;
11953941Svenki int row, ret;
11963941Svenki char *oidstr;
11973941Svenki int tod; /* in secs */
11983941Svenki char tod_str[MAX_INT_LEN];
11993941Svenki int ival_arr[2];
12003941Svenki char *sval_arr[2];
12013941Svenki
12023941Svenki /*
12033941Svenki * If we're populating volatile properties, we also store a
12047746SKelly.Moyer@Sun.COM * timestamp with each property value. When we lookup, we check the
12057746SKelly.Moyer@Sun.COM * current time against this timestamp to determine if we need to
12067746SKelly.Moyer@Sun.COM * refetch the value or not (refetch if it has been in for far too
12077746SKelly.Moyer@Sun.COM * long).
12083941Svenki */
12097746SKelly.Moyer@Sun.COM
12103941Svenki if (is_vol) {
12117746SKelly.Moyer@Sun.COM tod = GET_SCALED_HRTIME();
12123941Svenki
12133941Svenki tod_str[0] = 0;
12143941Svenki (void) snprintf(tod_str, MAX_INT_LEN, "%d", tod);
12153941Svenki
12163941Svenki ival_arr[1] = tod;
12173941Svenki sval_arr[1] = (char *)tod_str;
12183941Svenki }
12193941Svenki
12203941Svenki for (vp = pdu->vars; vp; vp = vp->nextvar) {
12213941Svenki if (vp->type != ASN_INTEGER && vp->type != ASN_OCTET_STR &&
12223941Svenki vp->type != ASN_BIT_STR) {
12233941Svenki continue;
12243941Svenki }
12253941Svenki
12263941Svenki if (vp->name == NULL || vp->val.str == NULL)
12273941Svenki continue;
12283941Svenki
12293941Svenki row = (vp->name)[vp->name_len-1];
12303941Svenki
12313941Svenki (void) mutex_lock(&mibcache_lock);
12323941Svenki
12333941Svenki if (row >= n_mibcache_rows) {
12343941Svenki (void) mutex_unlock(&mibcache_lock);
12353941Svenki if (mibcache_realloc(row) < 0)
12363941Svenki continue;
12373941Svenki (void) mutex_lock(&mibcache_lock);
12383941Svenki }
12393941Svenki ret = 0;
12403941Svenki if (mibcache[row] == NULL)
12413941Svenki ret = nvlist_alloc(&mibcache[row], NV_UNIQUE_NAME, 0);
12423941Svenki
12433941Svenki (void) mutex_unlock(&mibcache_lock);
12443941Svenki
12453941Svenki if (ret != 0)
12463941Svenki continue;
12473941Svenki
12483941Svenki /*
12493941Svenki * Convert the standard OID form into an oid string that
12503941Svenki * we can use as the key to lookup. Since we only search
12513941Svenki * by the prefix (mibcache is really an array of nvlist_t
12523941Svenki * pointers), ignore the leaf subid.
12533941Svenki */
12543941Svenki oidstr = oid_to_oidstr(vp->name, vp->name_len - 1);
12553941Svenki if (oidstr == NULL)
12563941Svenki continue;
12573941Svenki
12583941Svenki (void) mutex_lock(&mibcache_lock);
12593941Svenki
12603941Svenki if (vp->type == ASN_INTEGER) {
12613941Svenki if (is_vol) {
12623941Svenki ival_arr[0] = *(vp->val.iptr);
12633941Svenki (void) nvlist_add_int32_array(mibcache[row],
12643941Svenki oidstr, ival_arr, 2);
12653941Svenki } else {
12665995Sfw157321 (void) nvlist_add_int32(mibcache[row],
12673941Svenki oidstr, *(vp->val.iptr));
12683941Svenki }
12693941Svenki
12703941Svenki } else if (vp->type == ASN_OCTET_STR) {
12713941Svenki if (is_vol) {
12723941Svenki sval_arr[0] = (char *)vp->val.str;
12733941Svenki (void) nvlist_add_string_array(mibcache[row],
12743941Svenki oidstr, sval_arr, 2);
12753941Svenki } else {
12763941Svenki (void) nvlist_add_string(mibcache[row],
12773941Svenki oidstr, (const char *)(vp->val.str));
12783941Svenki }
12793941Svenki } else if (vp->type == ASN_BIT_STR) {
12803941Svenki /*
12813941Svenki * We don't support yet bit string objects that are
12823941Svenki * volatile values.
12833941Svenki */
12843941Svenki if (!is_vol) {
12853941Svenki (void) nvlist_add_byte_array(mibcache[row],
12863941Svenki oidstr, (uchar_t *)(vp->val.str),
12873941Svenki (uint_t)vp->val_len);
12883941Svenki }
12893941Svenki }
12903941Svenki (void) mutex_unlock(&mibcache_lock);
12913941Svenki
12923941Svenki free(oidstr);
12933941Svenki }
12943941Svenki }
12953941Svenki
12963941Svenki static char *
oid_to_oidstr(oid * objid,size_t n_subids)12973941Svenki oid_to_oidstr(oid *objid, size_t n_subids)
12983941Svenki {
12993941Svenki char *oidstr;
13003941Svenki char subid_str[MAX_INT_LEN];
13013941Svenki int i, isize;
13024003Svivek size_t oidstr_sz;
13033941Svenki
13043941Svenki /*
13053941Svenki * ugly, but for now this will have to do.
13063941Svenki */
13074003Svivek oidstr_sz = sizeof (subid_str) * n_subids;
13084003Svivek oidstr = calloc(1, oidstr_sz);
13093941Svenki
13103941Svenki for (i = 0; i < n_subids; i++) {
13114003Svivek (void) memset(subid_str, 0, sizeof (subid_str));
13124003Svivek isize = snprintf(subid_str, sizeof (subid_str), "%d",
13135995Sfw157321 objid[i]);
13144003Svivek if (isize >= sizeof (subid_str))
13153941Svenki return (NULL);
13163941Svenki
13174003Svivek (void) strlcat(oidstr, subid_str, oidstr_sz);
13183941Svenki if (i < (n_subids - 1))
13194003Svivek (void) strlcat(oidstr, ".", oidstr_sz);
13203941Svenki }
13213941Svenki
13223941Svenki return (oidstr);
13233941Svenki }
13247746SKelly.Moyer@Sun.COM
13257746SKelly.Moyer@Sun.COM /*
13267746SKelly.Moyer@Sun.COM * Expand the refreshq to hold more cache refresh jobs. Caller must already
13277746SKelly.Moyer@Sun.COM * hold refreshq_lock mutex. Every expansion of the refreshq will add
13287746SKelly.Moyer@Sun.COM * REFRESH_BLK_SZ job slots, rather than expanding by one slot every time more
13297746SKelly.Moyer@Sun.COM * space is needed.
13307746SKelly.Moyer@Sun.COM */
13317746SKelly.Moyer@Sun.COM static int
refreshq_realloc(int hint)13327746SKelly.Moyer@Sun.COM refreshq_realloc(int hint)
13337746SKelly.Moyer@Sun.COM {
13347746SKelly.Moyer@Sun.COM uint_t count = (uint_t)hint;
13357746SKelly.Moyer@Sun.COM refreshq_job_t *p;
13367746SKelly.Moyer@Sun.COM
13377746SKelly.Moyer@Sun.COM if (hint < 0)
13387746SKelly.Moyer@Sun.COM return (-1);
13397746SKelly.Moyer@Sun.COM
13407746SKelly.Moyer@Sun.COM if (hint < n_refreshq_slots) {
13417746SKelly.Moyer@Sun.COM return (0);
13427746SKelly.Moyer@Sun.COM }
13437746SKelly.Moyer@Sun.COM
13447746SKelly.Moyer@Sun.COM /* Round count up to next multiple of REFRESHQ_BLK_SHIFT */
13457746SKelly.Moyer@Sun.COM count = ((count >> REFRESHQ_BLK_SHIFT) + 1) << REFRESHQ_BLK_SHIFT;
13467746SKelly.Moyer@Sun.COM
13477746SKelly.Moyer@Sun.COM p = (refreshq_job_t *)calloc(count, sizeof (refreshq_job_t));
13487746SKelly.Moyer@Sun.COM if (p == NULL) {
13497746SKelly.Moyer@Sun.COM return (-1);
13507746SKelly.Moyer@Sun.COM }
13517746SKelly.Moyer@Sun.COM
13527746SKelly.Moyer@Sun.COM if (refreshq) {
13537746SKelly.Moyer@Sun.COM if (n_refreshq_jobs == 0) {
13547746SKelly.Moyer@Sun.COM /* Simple case, nothing to copy */
13557746SKelly.Moyer@Sun.COM refreshq_next_job = 0;
13567746SKelly.Moyer@Sun.COM refreshq_next_slot = 0;
13577746SKelly.Moyer@Sun.COM } else if (refreshq_next_slot > refreshq_next_job) {
13587746SKelly.Moyer@Sun.COM /* Simple case, single copy preserves everything */
13597746SKelly.Moyer@Sun.COM (void) memcpy((void *) p,
13607746SKelly.Moyer@Sun.COM (void *) &(refreshq[refreshq_next_job]),
13617746SKelly.Moyer@Sun.COM n_refreshq_jobs * sizeof (refreshq_job_t));
13627746SKelly.Moyer@Sun.COM } else {
13637746SKelly.Moyer@Sun.COM /*
13647746SKelly.Moyer@Sun.COM * Complex case. The jobs in the refresh queue wrap
13657746SKelly.Moyer@Sun.COM * around the end of the array in which they are stored.
13667746SKelly.Moyer@Sun.COM * To preserve chronological order in the new allocated
13677746SKelly.Moyer@Sun.COM * array, we need to copy the jobs at the end of the old
13687746SKelly.Moyer@Sun.COM * array to the beginning of the new one and place the
13697746SKelly.Moyer@Sun.COM * jobs from the beginning of the old array after them.
13707746SKelly.Moyer@Sun.COM */
13717746SKelly.Moyer@Sun.COM uint_t tail_jobs, head_jobs;
13727746SKelly.Moyer@Sun.COM
13737746SKelly.Moyer@Sun.COM tail_jobs = n_refreshq_slots - refreshq_next_job;
13747746SKelly.Moyer@Sun.COM head_jobs = n_refreshq_jobs - tail_jobs;
13757746SKelly.Moyer@Sun.COM
13767746SKelly.Moyer@Sun.COM /* Copy the jobs from the end of the old array */
13777746SKelly.Moyer@Sun.COM (void) memcpy((void *) p,
13787746SKelly.Moyer@Sun.COM (void *) &(refreshq[refreshq_next_job]),
13797746SKelly.Moyer@Sun.COM tail_jobs * sizeof (refreshq_job_t));
13807746SKelly.Moyer@Sun.COM
13817746SKelly.Moyer@Sun.COM /* Copy the jobs from the beginning of the old array */
13827746SKelly.Moyer@Sun.COM (void) memcpy((void *) &(p[tail_jobs]),
1383*8411SKelly.Moyer@Sun.COM (void *) &(refreshq[0]),
13847746SKelly.Moyer@Sun.COM head_jobs * sizeof (refreshq_job_t));
13857746SKelly.Moyer@Sun.COM
13867746SKelly.Moyer@Sun.COM /* update the job and slot indices to match */
13877746SKelly.Moyer@Sun.COM refreshq_next_job = 0;
13887746SKelly.Moyer@Sun.COM refreshq_next_slot = n_refreshq_jobs;
13897746SKelly.Moyer@Sun.COM }
13907746SKelly.Moyer@Sun.COM free((void *) refreshq);
13917746SKelly.Moyer@Sun.COM } else {
13927746SKelly.Moyer@Sun.COM /* First initialization */
13937746SKelly.Moyer@Sun.COM refreshq_next_job = 0;
13947746SKelly.Moyer@Sun.COM refreshq_next_slot = 0;
13957746SKelly.Moyer@Sun.COM n_refreshq_jobs = 0;
13967746SKelly.Moyer@Sun.COM }
13977746SKelly.Moyer@Sun.COM
13987746SKelly.Moyer@Sun.COM refreshq = p;
13997746SKelly.Moyer@Sun.COM n_refreshq_slots = count;
14007746SKelly.Moyer@Sun.COM
14017746SKelly.Moyer@Sun.COM return (0);
14027746SKelly.Moyer@Sun.COM }
14037746SKelly.Moyer@Sun.COM
14047746SKelly.Moyer@Sun.COM /*
14057746SKelly.Moyer@Sun.COM * Add a new job to the refreshq. If there aren't any open slots, attempt to
14067746SKelly.Moyer@Sun.COM * expand the queue first. Return -1 if unable to add the job to the work
14077746SKelly.Moyer@Sun.COM * queue, or 0 if the job was added OR if an existing job with the same
14087746SKelly.Moyer@Sun.COM * parameters is already pending.
14097746SKelly.Moyer@Sun.COM */
14107746SKelly.Moyer@Sun.COM static int
refreshq_add_job(struct picl_snmphdl * smd,char * oidstrs,int n_oids,int row)14117746SKelly.Moyer@Sun.COM refreshq_add_job(struct picl_snmphdl *smd, char *oidstrs, int n_oids, int row)
14127746SKelly.Moyer@Sun.COM {
14137746SKelly.Moyer@Sun.COM int i;
14147746SKelly.Moyer@Sun.COM int job;
14157746SKelly.Moyer@Sun.COM
14167746SKelly.Moyer@Sun.COM (void) mutex_lock(&refreshq_lock);
14177746SKelly.Moyer@Sun.COM
14187746SKelly.Moyer@Sun.COM /*
14197746SKelly.Moyer@Sun.COM * Can't do anything without a queue. Either the client never
14207746SKelly.Moyer@Sun.COM * initialized the refresh queue or the initial memory allocation
14217746SKelly.Moyer@Sun.COM * failed.
14227746SKelly.Moyer@Sun.COM */
14237746SKelly.Moyer@Sun.COM if (refreshq == NULL) {
14247746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
14257746SKelly.Moyer@Sun.COM return (-1);
14267746SKelly.Moyer@Sun.COM }
14277746SKelly.Moyer@Sun.COM
14287746SKelly.Moyer@Sun.COM /*
14297746SKelly.Moyer@Sun.COM * If there is already a job pending with the same parameters as the job
14307746SKelly.Moyer@Sun.COM * we have been asked to add, we apparently let an entry expire and it
14317746SKelly.Moyer@Sun.COM * is now being reloaded. Rather than add another job for the same
14327746SKelly.Moyer@Sun.COM * entry, we skip adding the new job and let the existing job address
14337746SKelly.Moyer@Sun.COM * it.
14347746SKelly.Moyer@Sun.COM */
14357746SKelly.Moyer@Sun.COM for (i = 0, job = refreshq_next_job; i < n_refreshq_jobs; i++,
14367746SKelly.Moyer@Sun.COM job = (job + 1) % n_refreshq_slots) {
14377746SKelly.Moyer@Sun.COM if ((refreshq[job].row == row) &&
14387746SKelly.Moyer@Sun.COM (refreshq[job].n_oids == n_oids) &&
14397746SKelly.Moyer@Sun.COM (refreshq[job].oidstrs == oidstrs)) {
14407746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
14417746SKelly.Moyer@Sun.COM return (0);
14427746SKelly.Moyer@Sun.COM }
14437746SKelly.Moyer@Sun.COM }
14447746SKelly.Moyer@Sun.COM
14457746SKelly.Moyer@Sun.COM
14467746SKelly.Moyer@Sun.COM /*
14477746SKelly.Moyer@Sun.COM * If the queue is full, we need to expand it
14487746SKelly.Moyer@Sun.COM */
14497746SKelly.Moyer@Sun.COM if (n_refreshq_jobs == n_refreshq_slots) {
14507746SKelly.Moyer@Sun.COM if (refreshq_realloc(n_refreshq_slots + 1) < 0) {
14517746SKelly.Moyer@Sun.COM /*
14527746SKelly.Moyer@Sun.COM * Can't expand the job queue, so we drop this job on
14537746SKelly.Moyer@Sun.COM * the floor. No data is lost... we just allow some
14547746SKelly.Moyer@Sun.COM * data in the mibcache to expire.
14557746SKelly.Moyer@Sun.COM */
14567746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
14577746SKelly.Moyer@Sun.COM return (-1);
14587746SKelly.Moyer@Sun.COM }
14597746SKelly.Moyer@Sun.COM }
14607746SKelly.Moyer@Sun.COM
14617746SKelly.Moyer@Sun.COM /*
14627746SKelly.Moyer@Sun.COM * There is room in the queue, so add the new job. We are actually
14637746SKelly.Moyer@Sun.COM * taking a timestamp for this job that is slightly earlier than when
14647746SKelly.Moyer@Sun.COM * the mibcache entry will be updated, but since we're trying to update
14657746SKelly.Moyer@Sun.COM * the mibcache entry before it expires anyway, the earlier timestamp
14667746SKelly.Moyer@Sun.COM * here is acceptable.
14677746SKelly.Moyer@Sun.COM */
14687746SKelly.Moyer@Sun.COM refreshq[refreshq_next_slot].smd = smd;
14697746SKelly.Moyer@Sun.COM refreshq[refreshq_next_slot].oidstrs = oidstrs;
14707746SKelly.Moyer@Sun.COM refreshq[refreshq_next_slot].n_oids = n_oids;
14717746SKelly.Moyer@Sun.COM refreshq[refreshq_next_slot].row = row;
14727746SKelly.Moyer@Sun.COM refreshq[refreshq_next_slot].last_fetch_time = GET_SCALED_HRTIME();
14737746SKelly.Moyer@Sun.COM
14747746SKelly.Moyer@Sun.COM /*
14757746SKelly.Moyer@Sun.COM * Update queue management variables
14767746SKelly.Moyer@Sun.COM */
14777746SKelly.Moyer@Sun.COM n_refreshq_jobs += 1;
14787746SKelly.Moyer@Sun.COM refreshq_next_slot = (refreshq_next_slot + 1) % n_refreshq_slots;
14797746SKelly.Moyer@Sun.COM
14807746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
14817746SKelly.Moyer@Sun.COM
14827746SKelly.Moyer@Sun.COM return (0);
14837746SKelly.Moyer@Sun.COM }
14847746SKelly.Moyer@Sun.COM
14857746SKelly.Moyer@Sun.COM /*
14867746SKelly.Moyer@Sun.COM * Almost all of the refresh code remains dormant unless specifically
14877746SKelly.Moyer@Sun.COM * initialized by a client (the exception being that fetch_bulk() will still
14887746SKelly.Moyer@Sun.COM * call refreshq_add_job(), but the latter will return without doing anything).
14897746SKelly.Moyer@Sun.COM */
14907746SKelly.Moyer@Sun.COM int
snmp_refresh_init(void)14917746SKelly.Moyer@Sun.COM snmp_refresh_init(void)
14927746SKelly.Moyer@Sun.COM {
14937746SKelly.Moyer@Sun.COM int ret;
14947746SKelly.Moyer@Sun.COM
14957746SKelly.Moyer@Sun.COM (void) mutex_lock(&refreshq_lock);
14967746SKelly.Moyer@Sun.COM
14977746SKelly.Moyer@Sun.COM ret = refreshq_realloc(0);
14987746SKelly.Moyer@Sun.COM
14997746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
15007746SKelly.Moyer@Sun.COM
15017746SKelly.Moyer@Sun.COM return (ret);
15027746SKelly.Moyer@Sun.COM }
15037746SKelly.Moyer@Sun.COM
15047746SKelly.Moyer@Sun.COM /*
15057746SKelly.Moyer@Sun.COM * If the client is going away, we don't want to keep doing refresh work, so
15067746SKelly.Moyer@Sun.COM * clean everything up.
15077746SKelly.Moyer@Sun.COM */
15087746SKelly.Moyer@Sun.COM void
snmp_refresh_fini(void)15097746SKelly.Moyer@Sun.COM snmp_refresh_fini(void)
15107746SKelly.Moyer@Sun.COM {
15117746SKelly.Moyer@Sun.COM (void) mutex_lock(&refreshq_lock);
15127746SKelly.Moyer@Sun.COM
15137746SKelly.Moyer@Sun.COM n_refreshq_jobs = 0;
15147746SKelly.Moyer@Sun.COM n_refreshq_slots = 0;
15157746SKelly.Moyer@Sun.COM refreshq_next_job = 0;
15167746SKelly.Moyer@Sun.COM refreshq_next_slot = 0;
15177746SKelly.Moyer@Sun.COM free(refreshq);
15187746SKelly.Moyer@Sun.COM refreshq = NULL;
15197746SKelly.Moyer@Sun.COM
15207746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
15217746SKelly.Moyer@Sun.COM }
15227746SKelly.Moyer@Sun.COM
15237746SKelly.Moyer@Sun.COM /*
15247746SKelly.Moyer@Sun.COM * Return the number of seconds remaining before the mibcache entry associated
15257746SKelly.Moyer@Sun.COM * with the next job in the queue will expire. Note that this requires
15267746SKelly.Moyer@Sun.COM * reversing the scaling normally done on hrtime values. (The need for scaling
15277746SKelly.Moyer@Sun.COM * is purely internal, and should be hidden from clients.) If there are no jobs
15287746SKelly.Moyer@Sun.COM * in the queue, return -1. If the next job has already expired, return 0.
15297746SKelly.Moyer@Sun.COM */
15307746SKelly.Moyer@Sun.COM int
snmp_refresh_get_next_expiration(void)15317746SKelly.Moyer@Sun.COM snmp_refresh_get_next_expiration(void)
15327746SKelly.Moyer@Sun.COM {
15337746SKelly.Moyer@Sun.COM int ret;
15347746SKelly.Moyer@Sun.COM int elapsed;
15357746SKelly.Moyer@Sun.COM
15367746SKelly.Moyer@Sun.COM (void) mutex_lock(&refreshq_lock);
15377746SKelly.Moyer@Sun.COM
15387746SKelly.Moyer@Sun.COM if (n_refreshq_jobs == 0) {
15397746SKelly.Moyer@Sun.COM ret = -1;
15407746SKelly.Moyer@Sun.COM } else {
15417746SKelly.Moyer@Sun.COM elapsed = GET_SCALED_HRTIME() -
15427746SKelly.Moyer@Sun.COM refreshq[refreshq_next_job].last_fetch_time;
15437746SKelly.Moyer@Sun.COM
15447746SKelly.Moyer@Sun.COM if (elapsed >= MAX_INCACHE_TIME) {
15457746SKelly.Moyer@Sun.COM ret = 0;
15467746SKelly.Moyer@Sun.COM } else {
15477746SKelly.Moyer@Sun.COM ret = (MAX_INCACHE_TIME - elapsed) * HRTIME_SCALE;
15487746SKelly.Moyer@Sun.COM }
15497746SKelly.Moyer@Sun.COM }
15507746SKelly.Moyer@Sun.COM
15517746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
15527746SKelly.Moyer@Sun.COM
15537746SKelly.Moyer@Sun.COM return (ret);
15547746SKelly.Moyer@Sun.COM }
15557746SKelly.Moyer@Sun.COM
15567746SKelly.Moyer@Sun.COM /*
15577746SKelly.Moyer@Sun.COM * Given the number of seconds the client wants to spend on each cyle of
15587746SKelly.Moyer@Sun.COM * processing jobs and then sleeping, return a suggestion for the number of jobs
15597746SKelly.Moyer@Sun.COM * the client should process, calculated by dividing the client's cycle duration
15607746SKelly.Moyer@Sun.COM * by MAX_INCACHE_TIME and multiplying the result by the total number of jobs in
15617746SKelly.Moyer@Sun.COM * the queue. (Note that the actual implementation of that calculation is done
15627746SKelly.Moyer@Sun.COM * in a different order to avoid losing fractional values during integer
15637746SKelly.Moyer@Sun.COM * arithmetic.)
15647746SKelly.Moyer@Sun.COM */
15657746SKelly.Moyer@Sun.COM int
snmp_refresh_get_cycle_hint(int secs)15667746SKelly.Moyer@Sun.COM snmp_refresh_get_cycle_hint(int secs)
15677746SKelly.Moyer@Sun.COM {
15687746SKelly.Moyer@Sun.COM int jobs;
15697746SKelly.Moyer@Sun.COM
15707746SKelly.Moyer@Sun.COM (void) mutex_lock(&refreshq_lock);
15717746SKelly.Moyer@Sun.COM
15727746SKelly.Moyer@Sun.COM /*
15737746SKelly.Moyer@Sun.COM * First, we need to scale the client's cycle time to get it into the
15747746SKelly.Moyer@Sun.COM * same units we use internally (i.e. tens of seconds). We round up, as
15757746SKelly.Moyer@Sun.COM * it makes more sense for the client to process extra jobs than
15767746SKelly.Moyer@Sun.COM * insufficient jobs. If the client's desired cycle time is greater
15777746SKelly.Moyer@Sun.COM * than MAX_INCACHE_TIME, we just return the current total number of
15787746SKelly.Moyer@Sun.COM * jobs.
15797746SKelly.Moyer@Sun.COM */
15807746SKelly.Moyer@Sun.COM secs = (secs + HRTIME_SCALE - 1) / HRTIME_SCALE;
15817746SKelly.Moyer@Sun.COM
15827746SKelly.Moyer@Sun.COM jobs = (n_refreshq_jobs * secs) / MAX_INCACHE_TIME;
15837746SKelly.Moyer@Sun.COM if (jobs > n_refreshq_jobs) {
15847746SKelly.Moyer@Sun.COM jobs = n_refreshq_jobs;
15857746SKelly.Moyer@Sun.COM }
15867746SKelly.Moyer@Sun.COM
15877746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
15887746SKelly.Moyer@Sun.COM
15897746SKelly.Moyer@Sun.COM return (jobs);
15907746SKelly.Moyer@Sun.COM }
15917746SKelly.Moyer@Sun.COM
15927746SKelly.Moyer@Sun.COM /*
15937746SKelly.Moyer@Sun.COM * Process the next job on the refresh queue by invoking fetch_bulk() with the
15947746SKelly.Moyer@Sun.COM * recorded parameters. Return -1 if no job was processed (e.g. because there
15957746SKelly.Moyer@Sun.COM * aren't any available), or 0 if a job was processed. We don't actually care
15967746SKelly.Moyer@Sun.COM * if fetch_bulk() fails, since we're just working on cache entry refreshing and
15977746SKelly.Moyer@Sun.COM * the worst case result of failing here is a longer delay getting that data the
15987746SKelly.Moyer@Sun.COM * next time it is requested.
15997746SKelly.Moyer@Sun.COM */
16007746SKelly.Moyer@Sun.COM int
snmp_refresh_process_job(void)16017746SKelly.Moyer@Sun.COM snmp_refresh_process_job(void)
16027746SKelly.Moyer@Sun.COM {
16037746SKelly.Moyer@Sun.COM struct picl_snmphdl *smd;
16047746SKelly.Moyer@Sun.COM char *oidstrs;
16057746SKelly.Moyer@Sun.COM int n_oids;
16067746SKelly.Moyer@Sun.COM int row;
16077746SKelly.Moyer@Sun.COM int err;
16087746SKelly.Moyer@Sun.COM
16097746SKelly.Moyer@Sun.COM (void) mutex_lock(&refreshq_lock);
16107746SKelly.Moyer@Sun.COM
16117746SKelly.Moyer@Sun.COM if (n_refreshq_jobs == 0) {
16127746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
16137746SKelly.Moyer@Sun.COM
16147746SKelly.Moyer@Sun.COM return (-1);
16157746SKelly.Moyer@Sun.COM }
16167746SKelly.Moyer@Sun.COM
16177746SKelly.Moyer@Sun.COM smd = refreshq[refreshq_next_job].smd;
16187746SKelly.Moyer@Sun.COM oidstrs = refreshq[refreshq_next_job].oidstrs;
16197746SKelly.Moyer@Sun.COM n_oids = refreshq[refreshq_next_job].n_oids;
16207746SKelly.Moyer@Sun.COM row = refreshq[refreshq_next_job].row;
16217746SKelly.Moyer@Sun.COM
16227746SKelly.Moyer@Sun.COM refreshq_next_job = (refreshq_next_job + 1) % n_refreshq_slots;
16237746SKelly.Moyer@Sun.COM n_refreshq_jobs--;
16247746SKelly.Moyer@Sun.COM
16257746SKelly.Moyer@Sun.COM (void) mutex_unlock(&refreshq_lock);
16267746SKelly.Moyer@Sun.COM
16277746SKelly.Moyer@Sun.COM
16287746SKelly.Moyer@Sun.COM /*
16297746SKelly.Moyer@Sun.COM * fetch_bulk() is going to come right back into the refresh code to add
16307746SKelly.Moyer@Sun.COM * a new job for the entry we just loaded, which means we have to make
16317746SKelly.Moyer@Sun.COM * the call without holding the refreshq_lock mutex.
16327746SKelly.Moyer@Sun.COM */
16337746SKelly.Moyer@Sun.COM fetch_bulk(smd, oidstrs, n_oids, row, 1, &err);
16347746SKelly.Moyer@Sun.COM
16357746SKelly.Moyer@Sun.COM return (0);
16367746SKelly.Moyer@Sun.COM }
1637