1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * rc_node.c - In-memory SCF object management
28 *
29 * This layer manages the in-memory cache (the Repository Cache) of SCF
30 * data. Read requests are usually satisfied from here, but may require
31 * load calls to the "object" layer. Modify requests always write-through
32 * to the object layer.
33 *
34 * SCF data comprises scopes, services, instances, snapshots, snaplevels,
35 * property groups, properties, and property values. All but the last are
36 * known here as "entities" and are represented by rc_node_t data
37 * structures. (Property values are kept in the rn_values member of the
38 * respective property, not as separate objects.) All entities besides
39 * the "localhost" scope have some entity as a parent, and therefore form
40 * a tree.
41 *
42 * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
43 * the "localhost" scope. The tree is filled in from the database on-demand
44 * by rc_node_fill_children().
45 *
46 * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
47 * lookup.
48 *
49 * Multiple threads may service client requests, so access to each
50 * rc_node_t is synchronized by its rn_lock member. Some fields are
51 * protected by bits in the rn_flags field instead, to support operations
52 * which need to drop rn_lock, for example to respect locking order. Such
53 * flags should be manipulated with the rc_node_{hold,rele}_flag()
54 * functions.
55 *
56 * We track references to nodes to tell when they can be free()d. rn_refs
57 * should be incremented with rc_node_hold() on the creation of client
58 * references (rc_node_ptr_t's and rc_iter_t's). rn_erefs ("ephemeral
59 * references") should be incremented when a pointer is read into a local
60 * variable of a thread, with rc_node_hold_ephemeral_locked(). This
61 * hasn't been fully implemented, however, so rc_node_rele() tolerates
62 * rn_erefs being 0. Some code which predates rn_erefs counts ephemeral
63 * references in rn_refs. Other references are tracked by the
64 * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
65 * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
66 *
67 * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
68 * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
69 * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
70 * etc.). Once you have locked an rc_node_t you must check its rn_flags for
71 * RC_NODE_DEAD before you can use it. This is usually done with the
72 * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
73 * functions & RC_NODE_*() macros), which fail if the object has died.
74 *
75 * When a transactional node (property group or snapshot) is updated,
76 * a new node takes the place of the old node in the global hash and the
77 * old node is hung off of the rn_former list of the new node. At the
78 * same time, all of its children have their rn_parent_ref pointer set,
79 * and any holds they have are reflected in the old node's rn_other_refs
80 * count. This is automatically kept up to date until the final reference
81 * to the subgraph is dropped, at which point the node is unrefed and
82 * destroyed, along with all of its children.
83 *
84 * Because name service lookups may take a long time and, more importantly
85 * may trigger additional accesses to the repository, perm_granted() must be
86 * called without holding any locks.
87 *
88 * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
89 * call via rc_node_setup_iter() to populate the rn_children uu_list of the
90 * rc_node_t * in question and a call to uu_list_walk_start() on that list. For
91 * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
92 * apropriate child.
93 *
94 * An ITER_START for an ENTITY_VALUE makes sure the node has its values
95 * filled, and sets up the iterator. An ITER_READ_VALUE just copies out
96 * the proper values and updates the offset information.
97 *
98 * To allow aliases, snapshots are implemented with a level of indirection.
99 * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
100 * snapshot.c which contains the authoritative snaplevel information. The
101 * snapid is "assigned" by rc_attach_snapshot().
102 *
103 * We provide the client layer with rc_node_ptr_t's to reference objects.
104 * Objects referred to by them are automatically held & released by
105 * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at
106 * client.c entry points to read the pointers. They fetch the pointer to the
107 * object, return (from the function) if it is dead, and lock, hold, or hold
108 * a flag of the object.
109 */
110
111 /*
112 * Permission checking is authorization-based: some operations may only
113 * proceed if the user has been assigned at least one of a set of
114 * authorization strings. The set of enabling authorizations depends on the
115 * operation and the target object. The set of authorizations assigned to
116 * a user is determined by an algorithm defined in libsecdb.
117 *
118 * The fastest way to decide whether the two sets intersect is by entering the
119 * strings into a hash table and detecting collisions, which takes linear time
120 * in the total size of the sets. Except for the authorization patterns which
121 * may be assigned to users, which without advanced pattern-matching
122 * algorithms will take O(n) in the number of enabling authorizations, per
123 * pattern.
124 *
125 * We can achieve some practical speed-ups by noting that if we enter all of
126 * the authorizations from one of the sets into the hash table we can merely
127 * check the elements of the second set for existence without adding them.
128 * This reduces memory requirements and hash table clutter. The enabling set
129 * is well suited for this because it is internal to configd (for now, at
130 * least). Combine this with short-circuiting and we can even minimize the
131 * number of queries to the security databases (user_attr & prof_attr).
132 *
133 * To force this usage onto clients we provide functions for adding
134 * authorizations to the enabling set of a permission context structure
135 * (perm_add_*()) and one to decide whether the the user associated with the
136 * current door call client possesses any of them (perm_granted()).
137 *
138 * At some point, a generic version of this should move to libsecdb.
139 *
140 * While entering the enabling strings into the hash table, we keep track
141 * of which is the most specific for use in generating auditing events.
142 * See the "Collecting the Authorization String" section of the "SMF Audit
143 * Events" block comment below.
144 */
145
146 /*
147 * Composition is the combination of sets of properties. The sets are ordered
148 * and properties in higher sets obscure properties of the same name in lower
149 * sets. Here we present a composed view of an instance's properties as the
150 * union of its properties and its service's properties. Similarly the
151 * properties of snaplevels are combined to form a composed view of the
152 * properties of a snapshot (which should match the composed view of the
153 * properties of the instance when the snapshot was taken).
154 *
155 * In terms of the client interface, the client may request that a property
156 * group iterator for an instance or snapshot be composed. Property groups
157 * traversed by such an iterator may not have the target entity as a parent.
158 * Similarly, the properties traversed by a property iterator for those
159 * property groups may not have the property groups iterated as parents.
160 *
161 * Implementation requires that iterators for instances and snapshots be
162 * composition-savvy, and that we have a "composed property group" entity
163 * which represents the composition of a number of property groups. Iteration
164 * over "composed property groups" yields properties which may have different
165 * parents, but for all other operations a composed property group behaves
166 * like the top-most property group it represents.
167 *
168 * The implementation is based on the rn_cchain[] array of rc_node_t pointers
169 * in rc_node_t. For instances, the pointers point to the instance and its
170 * parent service. For snapshots they point to the child snaplevels, and for
171 * composed property groups they point to property groups. A composed
172 * iterator carries an index into rn_cchain[]. Thus most of the magic ends up
173 * int the rc_iter_*() code.
174 */
175 /*
176 * SMF Audit Events:
177 * ================
178 *
179 * To maintain security, SMF generates audit events whenever
180 * privileged operations are attempted. See the System Administration
181 * Guide:Security Services answerbook for a discussion of the Solaris
182 * audit system.
183 *
184 * The SMF audit event codes are defined in adt_event.h by symbols
185 * starting with ADT_smf_ and are described in audit_event.txt. The
186 * audit record structures are defined in the SMF section of adt.xml.
187 * adt.xml is used to automatically generate adt_event.h which
188 * contains the definitions that we code to in this file. For the
189 * most part the audit events map closely to actions that you would
190 * perform with svcadm or svccfg, but there are some special cases
191 * which we'll discuss later.
192 *
193 * The software associated with SMF audit events falls into three
194 * categories:
195 * - collecting information to be written to the audit
196 * records
197 * - using the adt_* functions in
198 * usr/src/lib/libbsm/common/adt.c to generate the audit
199 * records.
200 * - handling special cases
201 *
202 * Collecting Information:
203 * ----------------------
204 *
205 * Most all of the audit events require the FMRI of the affected
206 * object and the authorization string that was used. The one
207 * exception is ADT_smf_annotation which we'll talk about later.
208 *
209 * Collecting the FMRI:
210 *
211 * The rc_node structure has a member called rn_fmri which points to
212 * its FMRI. This is initialized by a call to rc_node_build_fmri()
213 * when the node's parent is established. The reason for doing it
214 * at this time is that a node's FMRI is basically the concatenation
215 * of the parent's FMRI and the node's name with the appropriate
216 * decoration. rc_node_build_fmri() does this concatenation and
217 * decorating. It is called from rc_node_link_child() and
218 * rc_node_relink_child() where a node is linked to its parent.
219 *
220 * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
221 * when it is needed. It returns rn_fmri if it is set. If the node
222 * is at the top level, however, rn_fmri won't be set because it was
223 * never linked to a parent. In this case,
224 * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
225 * its node type and its name, rn_name.
226 *
227 * Collecting the Authorization String:
228 *
229 * Naturally, the authorization string is captured during the
230 * authorization checking process. Acceptable authorization strings
231 * are added to a permcheck_t hash table as noted in the section on
232 * permission checking above. Once all entries have been added to the
233 * hash table, perm_granted() is called. If the client is authorized,
234 * perm_granted() returns with pc_auth_string of the permcheck_t
235 * structure pointing to the authorization string.
236 *
237 * This works fine if the client is authorized, but what happens if
238 * the client is not authorized? We need to report the required
239 * authorization string. This is the authorization that would have
240 * been used if permission had been granted. perm_granted() will
241 * find no match, so it needs to decide which string in the hash
242 * table to use as the required authorization string. It needs to do
243 * this, because configd is still going to generate an event. A
244 * design decision was made to use the most specific authorization
245 * in the hash table. The pc_auth_type enum designates the
246 * specificity of an authorization string. For example, an
247 * authorization string that is declared in an instance PG is more
248 * specific than one that is declared in a service PG.
249 *
250 * The pc_add() function keeps track of the most specific
251 * authorization in the hash table. It does this using the
252 * pc_specific and pc_specific_type members of the permcheck
253 * structure. pc_add() updates these members whenever a more
254 * specific authorization string is added to the hash table. Thus, if
255 * an authorization match is not found, perm_granted() will return
256 * with pc_auth_string in the permcheck_t pointing to the string that
257 * is referenced by pc_specific.
258 *
259 * Generating the Audit Events:
260 * ===========================
261 *
262 * As the functions in this file process requests for clients of
263 * configd, they gather the information that is required for an audit
264 * event. Eventually, the request processing gets to the point where
265 * the authorization is rejected or to the point where the requested
266 * action was attempted. At these two points smf_audit_event() is
267 * called.
268 *
269 * smf_audit_event() takes 4 parameters:
270 * - the event ID which is one of the ADT_smf_* symbols from
271 * adt_event.h.
272 * - status to pass to adt_put_event()
273 * - return value to pass to adt_put_event()
274 * - the event data (see audit_event_data structure)
275 *
276 * All interactions with the auditing software require an audit
277 * session. We use one audit session per configd client. We keep
278 * track of the audit session in the repcache_client structure.
279 * smf_audit_event() calls get_audit_session() to get the session
280 * pointer.
281 *
282 * smf_audit_event() then calls adt_alloc_event() to allocate an
283 * adt_event_data union which is defined in adt_event.h, copies the
284 * data into the appropriate members of the union and calls
285 * adt_put_event() to generate the event.
286 *
287 * Special Cases:
288 * =============
289 *
290 * There are three major types of special cases:
291 *
292 * - gathering event information for each action in a
293 * transaction
294 * - Higher level events represented by special property
295 * group/property name combinations. Many of these are
296 * restarter actions.
297 * - ADT_smf_annotation event
298 *
299 * Processing Transaction Actions:
300 * ------------------------------
301 *
302 * A transaction can contain multiple actions to modify, create or
303 * delete one or more properties. We need to capture information so
304 * that we can generate an event for each property action. The
305 * transaction information is stored in a tx_commmit_data_t, and
306 * object.c provides accessor functions to retrieve data from this
307 * structure. rc_tx_commit() obtains a tx_commit_data_t by calling
308 * tx_commit_data_new() and passes this to object_tx_commit() to
309 * commit the transaction. Then we call generate_property_events() to
310 * generate an audit event for each property action.
311 *
312 * Special Properties:
313 * ------------------
314 *
315 * There are combinations of property group/property name that are special.
316 * They are special because they have specific meaning to startd. startd
317 * interprets them in a service-independent fashion.
318 * restarter_actions/refresh and general/enabled are two examples of these.
319 * A special event is generated for these properties in addition to the
320 * regular property event described in the previous section. The special
321 * properties are declared as an array of audit_special_prop_item
322 * structures at special_props_list in rc_node.c.
323 *
324 * In the previous section, we mentioned the
325 * generate_property_event() function that generates an event for
326 * every property action. Before generating the event,
327 * generate_property_event() calls special_property_event().
328 * special_property_event() checks to see if the action involves a
329 * special property. If it does, it generates a special audit
330 * event.
331 *
332 * ADT_smf_annotation event:
333 * ------------------------
334 *
335 * This is a special event unlike any other. It allows the svccfg
336 * program to store an annotation in the event log before a series
337 * of transactions is processed. It is used with the import and
338 * apply svccfg commands. svccfg uses the rep_protocol_annotation
339 * message to pass the operation (import or apply) and the file name
340 * to configd. The set_annotation() function in client.c stores
341 * these away in the a repcache_client structure. The address of
342 * this structure is saved in the thread_info structure.
343 *
344 * Before it generates any events, smf_audit_event() calls
345 * smf_annotation_event(). smf_annotation_event() calls
346 * client_annotation_needed() which is defined in client.c. If an
347 * annotation is needed client_annotation_needed() returns the
348 * operation and filename strings that were saved from the
349 * rep_protocol_annotation message. smf_annotation_event() then
350 * generates the ADT_smf_annotation event.
351 */
352
353 #include <assert.h>
354 #include <atomic.h>
355 #include <bsm/adt_event.h>
356 #include <errno.h>
357 #include <libuutil.h>
358 #include <libscf.h>
359 #include <libscf_priv.h>
360 #include <pthread.h>
361 #include <pwd.h>
362 #include <stdio.h>
363 #include <stdlib.h>
364 #include <strings.h>
365 #include <sys/types.h>
366 #include <syslog.h>
367 #include <unistd.h>
368 #include <secdb.h>
369
370 #include "configd.h"
371
372 #define AUTH_PREFIX "solaris.smf."
373 #define AUTH_MANAGE AUTH_PREFIX "manage"
374 #define AUTH_MODIFY AUTH_PREFIX "modify"
375 #define AUTH_MODIFY_PREFIX AUTH_MODIFY "."
376 #define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS
377 #define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE
378 #define AUTH_PG_GENERAL SCF_PG_GENERAL
379 #define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE
380 #define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR
381 #define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE
382 #define AUTH_PROP_ACTION "action_authorization"
383 #define AUTH_PROP_ENABLED "enabled"
384 #define AUTH_PROP_MODIFY "modify_authorization"
385 #define AUTH_PROP_VALUE "value_authorization"
386 #define AUTH_PROP_READ "read_authorization"
387
388 #define MAX_VALID_CHILDREN 3
389
390 /*
391 * The ADT_smf_* symbols may not be defined on the build machine. Because
392 * of this, we do not want to compile the _smf_aud_event() function when
393 * doing native builds.
394 */
395 #ifdef NATIVE_BUILD
396 #define smf_audit_event(i, s, r, d)
397 #else
398 #define smf_audit_event(i, s, r, d) _smf_audit_event(i, s, r, d)
399 #endif /* NATIVE_BUILD */
400
401 typedef struct rc_type_info {
402 uint32_t rt_type; /* matches array index */
403 uint32_t rt_num_ids;
404 uint32_t rt_name_flags;
405 uint32_t rt_valid_children[MAX_VALID_CHILDREN];
406 } rc_type_info_t;
407
408 #define RT_NO_NAME -1U
409
410 static rc_type_info_t rc_types[] = {
411 {REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
412 {REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
413 {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
414 {REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
415 {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
416 {REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
417 {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
418 {REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
419 {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
420 {REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
421 {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
422 {REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
423 {REP_PROTOCOL_ENTITY_PROPERTY}},
424 {REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
425 {REP_PROTOCOL_ENTITY_PROPERTY}},
426 {REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
427 {-1UL}
428 };
429 #define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types)))
430
431 /* Element of a permcheck_t hash table. */
432 struct pc_elt {
433 struct pc_elt *pce_next;
434 char pce_auth[1];
435 };
436
437 /*
438 * If an authorization fails, we must decide which of the elements in the
439 * permcheck hash table to use in the audit event. That is to say of all
440 * the strings in the hash table, we must choose one and use it in the audit
441 * event. It is desirable to use the most specific string in the audit
442 * event.
443 *
444 * The pc_auth_type specifies the types (sources) of authorization
445 * strings. The enum is ordered in increasing specificity.
446 */
447 typedef enum pc_auth_type {
448 PC_AUTH_NONE = 0, /* no auth string available. */
449 PC_AUTH_SMF, /* strings coded into SMF. */
450 PC_AUTH_SVC, /* strings specified in PG of a service. */
451 PC_AUTH_INST /* strings specified in PG of an instance. */
452 } pc_auth_type_t;
453
454 /*
455 * The following enum is used to represent the results of the checks to see
456 * if the client has the appropriate permissions to perform an action.
457 */
458 typedef enum perm_status {
459 PERM_DENIED = 0, /* Permission denied. */
460 PERM_GRANTED, /* Client has authorizations. */
461 PERM_GONE, /* Door client went away. */
462 PERM_FAIL /* Generic failure. e.g. resources */
463 } perm_status_t;
464
465 /* An authorization set hash table. */
466 typedef struct {
467 struct pc_elt **pc_buckets;
468 uint_t pc_bnum; /* number of buckets */
469 uint_t pc_enum; /* number of elements */
470 struct pc_elt *pc_specific; /* most specific element */
471 pc_auth_type_t pc_specific_type; /* type of pc_specific */
472 char *pc_auth_string; /* authorization string */
473 /* for audit events */
474 } permcheck_t;
475
476 /*
477 * Structure for holding audit event data. Not all events use all members
478 * of the structure.
479 */
480 typedef struct audit_event_data {
481 char *ed_auth; /* authorization string. */
482 char *ed_fmri; /* affected FMRI. */
483 char *ed_snapname; /* name of snapshot. */
484 char *ed_old_fmri; /* old fmri in attach case. */
485 char *ed_old_name; /* old snapshot in attach case. */
486 char *ed_type; /* prop. group or prop. type. */
487 char *ed_prop_value; /* property value. */
488 } audit_event_data_t;
489
490 /*
491 * Pointer to function to do special processing to get audit event ID.
492 * Audit event IDs are defined in /usr/include/bsm/adt_event.h. Function
493 * returns 0 if ID successfully retrieved. Otherwise it returns -1.
494 */
495 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *,
496 au_event_t *);
497 static int general_enable_id(tx_commit_data_t *, size_t, const char *,
498 au_event_t *);
499
500 static uu_list_pool_t *rc_children_pool;
501 static uu_list_pool_t *rc_pg_notify_pool;
502 static uu_list_pool_t *rc_notify_pool;
503 static uu_list_pool_t *rc_notify_info_pool;
504
505 static rc_node_t *rc_scope;
506
507 static pthread_mutex_t rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
508 static pthread_cond_t rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
509 static uint_t rc_notify_in_use; /* blocks removals */
510
511 /*
512 * Some combinations of property group/property name require a special
513 * audit event to be generated when there is a change.
514 * audit_special_prop_item_t is used to specify these special cases. The
515 * special_props_list array defines a list of these special properties.
516 */
517 typedef struct audit_special_prop_item {
518 const char *api_pg_name; /* property group name. */
519 const char *api_prop_name; /* property name. */
520 au_event_t api_event_id; /* event id or 0. */
521 spc_getid_fn_t api_event_func; /* function to get event id. */
522 } audit_special_prop_item_t;
523
524 /*
525 * Native builds are done using the build machine's standard include
526 * files. These files may not yet have the definitions for the ADT_smf_*
527 * symbols. Thus, we do not compile this table when doing native builds.
528 */
529 #ifndef NATIVE_BUILD
530 /*
531 * The following special_props_list array specifies property group/property
532 * name combinations that have specific meaning to startd. A special event
533 * is generated for these combinations in addition to the regular property
534 * event.
535 *
536 * At run time this array gets sorted. See the call to qsort(3C) in
537 * rc_node_init(). The array is sorted, so that bsearch(3C) can be used
538 * to do lookups.
539 */
540 static audit_special_prop_item_t special_props_list[] = {
541 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade,
542 NULL},
543 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE,
544 ADT_smf_immediate_degrade, NULL},
545 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL},
546 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON,
547 ADT_smf_maintenance, NULL},
548 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE,
549 ADT_smf_immediate_maintenance, NULL},
550 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP,
551 ADT_smf_immtmp_maintenance, NULL},
552 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY,
553 ADT_smf_tmp_maintenance, NULL},
554 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL},
555 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL},
556 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL},
557 {SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
558 {SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
559 {SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id},
560 {SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id}
561 };
562 #define SPECIAL_PROP_COUNT (sizeof (special_props_list) /\
563 sizeof (audit_special_prop_item_t))
564 #endif /* NATIVE_BUILD */
565
566 /*
567 * We support an arbitrary number of clients interested in events for certain
568 * types of changes. Each client is represented by an rc_notify_info_t, and
569 * all clients are chained onto the rc_notify_info_list.
570 *
571 * The rc_notify_list is the global notification list. Each entry is of
572 * type rc_notify_t, which is embedded in one of three other structures:
573 *
574 * rc_node_t property group update notification
575 * rc_notify_delete_t object deletion notification
576 * rc_notify_info_t notification clients
577 *
578 * Which type of object is determined by which pointer in the rc_notify_t is
579 * non-NULL.
580 *
581 * New notifications and clients are added to the end of the list.
582 * Notifications no-one is interested in are never added to the list.
583 *
584 * Clients use their position in the list to track which notifications they
585 * have not yet reported. As they process notifications, they move forward
586 * in the list past them. There is always a client at the beginning of the
587 * list -- as he moves past notifications, he removes them from the list and
588 * cleans them up.
589 *
590 * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv
591 * is used for global signalling, and each client has a cv which he waits for
592 * events of interest on.
593 *
594 * rc_notify_in_use is used to protect rc_notify_list from deletions when
595 * the rc_pg_notify_lock is dropped. Specifically, rc_notify_info_wait()
596 * must drop the lock to call rc_node_assign(), and then it reacquires the
597 * lock. Deletions from rc_notify_list during this period are not
598 * allowed. Insertions do not matter, because they are always done at the
599 * end of the list.
600 */
601 static uu_list_t *rc_notify_info_list;
602 static uu_list_t *rc_notify_list;
603
604 #define HASH_SIZE 512
605 #define HASH_MASK (HASH_SIZE - 1)
606
607 #pragma align 64(cache_hash)
608 static cache_bucket_t cache_hash[HASH_SIZE];
609
610 #define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK])
611
612
613 static void rc_node_no_client_refs(rc_node_t *np);
614
615
616 static uint32_t
rc_node_hash(rc_node_lookup_t * lp)617 rc_node_hash(rc_node_lookup_t *lp)
618 {
619 uint32_t type = lp->rl_type;
620 uint32_t backend = lp->rl_backend;
621 uint32_t mainid = lp->rl_main_id;
622 uint32_t *ids = lp->rl_ids;
623
624 rc_type_info_t *tp = &rc_types[type];
625 uint32_t num_ids;
626 uint32_t left;
627 uint32_t hash;
628
629 assert(backend == BACKEND_TYPE_NORMAL ||
630 backend == BACKEND_TYPE_NONPERSIST);
631
632 assert(type > 0 && type < NUM_TYPES);
633 num_ids = tp->rt_num_ids;
634
635 left = MAX_IDS - num_ids;
636 assert(num_ids <= MAX_IDS);
637
638 hash = type * 7 + mainid * 5 + backend;
639
640 while (num_ids-- > 0)
641 hash = hash * 11 + *ids++ * 7;
642
643 /*
644 * the rest should be zeroed
645 */
646 while (left-- > 0)
647 assert(*ids++ == 0);
648
649 return (hash);
650 }
651
652 static int
rc_node_match(rc_node_t * np,rc_node_lookup_t * l)653 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
654 {
655 rc_node_lookup_t *r = &np->rn_id;
656 rc_type_info_t *tp;
657 uint32_t type;
658 uint32_t num_ids;
659
660 if (r->rl_main_id != l->rl_main_id)
661 return (0);
662
663 type = r->rl_type;
664 if (type != l->rl_type)
665 return (0);
666
667 assert(type > 0 && type < NUM_TYPES);
668
669 tp = &rc_types[r->rl_type];
670 num_ids = tp->rt_num_ids;
671
672 assert(num_ids <= MAX_IDS);
673 while (num_ids-- > 0)
674 if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
675 return (0);
676
677 return (1);
678 }
679
680 /*
681 * Register an ephemeral reference to np. This should be done while both
682 * the persistent reference from which the np pointer was read is locked
683 * and np itself is locked. This guarantees that another thread which
684 * thinks it has the last reference will yield without destroying the
685 * node.
686 */
687 static void
rc_node_hold_ephemeral_locked(rc_node_t * np)688 rc_node_hold_ephemeral_locked(rc_node_t *np)
689 {
690 assert(MUTEX_HELD(&np->rn_lock));
691
692 ++np->rn_erefs;
693 }
694
695 /*
696 * the "other" references on a node are maintained in an atomically
697 * updated refcount, rn_other_refs. This can be bumped from arbitrary
698 * context, and tracks references to a possibly out-of-date node's children.
699 *
700 * To prevent the node from disappearing between the final drop of
701 * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
702 * 0->1 transitions and decremented (with the node lock held) on 1->0
703 * transitions.
704 */
705 static void
rc_node_hold_other(rc_node_t * np)706 rc_node_hold_other(rc_node_t *np)
707 {
708 if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
709 atomic_add_32(&np->rn_other_refs_held, 1);
710 assert(np->rn_other_refs_held > 0);
711 }
712 assert(np->rn_other_refs > 0);
713 }
714
715 /*
716 * No node locks may be held
717 */
718 static void
rc_node_rele_other(rc_node_t * np)719 rc_node_rele_other(rc_node_t *np)
720 {
721 assert(np->rn_other_refs > 0);
722 if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
723 (void) pthread_mutex_lock(&np->rn_lock);
724 assert(np->rn_other_refs_held > 0);
725 if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
726 np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
727 /*
728 * This was the last client reference. Destroy
729 * any other references and free() the node.
730 */
731 rc_node_no_client_refs(np);
732 } else {
733 (void) pthread_mutex_unlock(&np->rn_lock);
734 }
735 }
736 }
737
738 static void
rc_node_hold_locked(rc_node_t * np)739 rc_node_hold_locked(rc_node_t *np)
740 {
741 assert(MUTEX_HELD(&np->rn_lock));
742
743 if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
744 rc_node_hold_other(np->rn_parent_ref);
745 np->rn_refs++;
746 assert(np->rn_refs > 0);
747 }
748
749 static void
rc_node_hold(rc_node_t * np)750 rc_node_hold(rc_node_t *np)
751 {
752 (void) pthread_mutex_lock(&np->rn_lock);
753 rc_node_hold_locked(np);
754 (void) pthread_mutex_unlock(&np->rn_lock);
755 }
756
757 static void
rc_node_rele_locked(rc_node_t * np)758 rc_node_rele_locked(rc_node_t *np)
759 {
760 int unref = 0;
761 rc_node_t *par_ref = NULL;
762
763 assert(MUTEX_HELD(&np->rn_lock));
764 assert(np->rn_refs > 0);
765
766 if (--np->rn_refs == 0) {
767 if (np->rn_flags & RC_NODE_PARENT_REF)
768 par_ref = np->rn_parent_ref;
769
770 /*
771 * Composed property groups are only as good as their
772 * references.
773 */
774 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
775 np->rn_flags |= RC_NODE_DEAD;
776
777 if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
778 np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
779 unref = 1;
780 }
781
782 if (unref) {
783 /*
784 * This was the last client reference. Destroy any other
785 * references and free() the node.
786 */
787 rc_node_no_client_refs(np);
788 } else {
789 /*
790 * rn_erefs can be 0 if we acquired the reference in
791 * a path which hasn't been updated to increment rn_erefs.
792 * When all paths which end here are updated, we should
793 * assert rn_erefs > 0 and always decrement it.
794 */
795 if (np->rn_erefs > 0)
796 --np->rn_erefs;
797 (void) pthread_mutex_unlock(&np->rn_lock);
798 }
799
800 if (par_ref != NULL)
801 rc_node_rele_other(par_ref);
802 }
803
804 void
rc_node_rele(rc_node_t * np)805 rc_node_rele(rc_node_t *np)
806 {
807 (void) pthread_mutex_lock(&np->rn_lock);
808 rc_node_rele_locked(np);
809 }
810
811 static cache_bucket_t *
cache_hold(uint32_t h)812 cache_hold(uint32_t h)
813 {
814 cache_bucket_t *bp = CACHE_BUCKET(h);
815 (void) pthread_mutex_lock(&bp->cb_lock);
816 return (bp);
817 }
818
819 static void
cache_release(cache_bucket_t * bp)820 cache_release(cache_bucket_t *bp)
821 {
822 (void) pthread_mutex_unlock(&bp->cb_lock);
823 }
824
825 static rc_node_t *
cache_lookup_unlocked(cache_bucket_t * bp,rc_node_lookup_t * lp)826 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
827 {
828 uint32_t h = rc_node_hash(lp);
829 rc_node_t *np;
830
831 assert(MUTEX_HELD(&bp->cb_lock));
832 assert(bp == CACHE_BUCKET(h));
833
834 for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
835 if (np->rn_hash == h && rc_node_match(np, lp)) {
836 rc_node_hold(np);
837 return (np);
838 }
839 }
840
841 return (NULL);
842 }
843
844 static rc_node_t *
cache_lookup(rc_node_lookup_t * lp)845 cache_lookup(rc_node_lookup_t *lp)
846 {
847 uint32_t h;
848 cache_bucket_t *bp;
849 rc_node_t *np;
850
851 h = rc_node_hash(lp);
852 bp = cache_hold(h);
853
854 np = cache_lookup_unlocked(bp, lp);
855
856 cache_release(bp);
857
858 return (np);
859 }
860
861 static void
cache_insert_unlocked(cache_bucket_t * bp,rc_node_t * np)862 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
863 {
864 assert(MUTEX_HELD(&bp->cb_lock));
865 assert(np->rn_hash == rc_node_hash(&np->rn_id));
866 assert(bp == CACHE_BUCKET(np->rn_hash));
867
868 assert(np->rn_hash_next == NULL);
869
870 np->rn_hash_next = bp->cb_head;
871 bp->cb_head = np;
872 }
873
874 static void
cache_remove_unlocked(cache_bucket_t * bp,rc_node_t * np)875 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
876 {
877 rc_node_t **npp;
878
879 assert(MUTEX_HELD(&bp->cb_lock));
880 assert(np->rn_hash == rc_node_hash(&np->rn_id));
881 assert(bp == CACHE_BUCKET(np->rn_hash));
882
883 for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
884 if (*npp == np)
885 break;
886
887 assert(*npp == np);
888 *npp = np->rn_hash_next;
889 np->rn_hash_next = NULL;
890 }
891
892 /*
893 * verify that the 'parent' type can have a child typed 'child'
894 * Fails with
895 * _INVALID_TYPE - argument is invalid
896 * _TYPE_MISMATCH - parent type cannot have children of type child
897 */
898 static int
rc_check_parent_child(uint32_t parent,uint32_t child)899 rc_check_parent_child(uint32_t parent, uint32_t child)
900 {
901 int idx;
902 uint32_t type;
903
904 if (parent == 0 || parent >= NUM_TYPES ||
905 child == 0 || child >= NUM_TYPES)
906 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
907
908 for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
909 type = rc_types[parent].rt_valid_children[idx];
910 if (type == child)
911 return (REP_PROTOCOL_SUCCESS);
912 }
913
914 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
915 }
916
917 /*
918 * Fails with
919 * _INVALID_TYPE - type is invalid
920 * _BAD_REQUEST - name is an invalid name for a node of type type
921 */
922 int
rc_check_type_name(uint32_t type,const char * name)923 rc_check_type_name(uint32_t type, const char *name)
924 {
925 if (type == 0 || type >= NUM_TYPES)
926 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
927
928 if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
929 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
930
931 return (REP_PROTOCOL_SUCCESS);
932 }
933
934 static int
rc_check_pgtype_name(const char * name)935 rc_check_pgtype_name(const char *name)
936 {
937 if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
938 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
939
940 return (REP_PROTOCOL_SUCCESS);
941 }
942
943 /*
944 * rc_node_free_fmri should be called whenever a node loses its parent.
945 * The reason is that the node's fmri string is built up by concatenating
946 * its name to the parent's fmri. Thus, when the node no longer has a
947 * parent, its fmri is no longer valid.
948 */
949 static void
rc_node_free_fmri(rc_node_t * np)950 rc_node_free_fmri(rc_node_t *np)
951 {
952 if (np->rn_fmri != NULL) {
953 free((void *)np->rn_fmri);
954 np->rn_fmri = NULL;
955 }
956 }
957
958 /*
959 * Concatenate the appropriate separator and the FMRI element to the base
960 * FMRI string at fmri.
961 *
962 * Fails with
963 * _TRUNCATED Not enough room in buffer at fmri.
964 */
965 static int
rc_concat_fmri_element(char * fmri,size_t bufsize,size_t * sz_out,const char * element,rep_protocol_entity_t type)966 rc_concat_fmri_element(
967 char *fmri, /* base fmri */
968 size_t bufsize, /* size of buf at fmri */
969 size_t *sz_out, /* receives result size. */
970 const char *element, /* element name to concat */
971 rep_protocol_entity_t type) /* type of element */
972 {
973 size_t actual;
974 const char *name = element;
975 int rc;
976 const char *separator;
977
978 if (bufsize > 0)
979 *sz_out = strlen(fmri);
980 else
981 *sz_out = 0;
982
983 switch (type) {
984 case REP_PROTOCOL_ENTITY_SCOPE:
985 if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
986 /*
987 * No need to display scope information if we are
988 * in the local scope.
989 */
990 separator = SCF_FMRI_SVC_PREFIX;
991 name = NULL;
992 } else {
993 /*
994 * Need to display scope information, because it is
995 * not the local scope.
996 */
997 separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
998 }
999 break;
1000 case REP_PROTOCOL_ENTITY_SERVICE:
1001 separator = SCF_FMRI_SERVICE_PREFIX;
1002 break;
1003 case REP_PROTOCOL_ENTITY_INSTANCE:
1004 separator = SCF_FMRI_INSTANCE_PREFIX;
1005 break;
1006 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
1007 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
1008 separator = SCF_FMRI_PROPERTYGRP_PREFIX;
1009 break;
1010 case REP_PROTOCOL_ENTITY_PROPERTY:
1011 separator = SCF_FMRI_PROPERTY_PREFIX;
1012 break;
1013 case REP_PROTOCOL_ENTITY_VALUE:
1014 /*
1015 * A value does not have a separate FMRI from its property,
1016 * so there is nothing to concat.
1017 */
1018 return (REP_PROTOCOL_SUCCESS);
1019 case REP_PROTOCOL_ENTITY_SNAPSHOT:
1020 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
1021 /* Snapshots do not have FMRIs, so there is nothing to do. */
1022 return (REP_PROTOCOL_SUCCESS);
1023 default:
1024 (void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
1025 __FILE__, __LINE__, type);
1026 abort(); /* Missing a case in switch if we get here. */
1027 }
1028
1029 /* Concatenate separator and element to the fmri buffer. */
1030
1031 actual = strlcat(fmri, separator, bufsize);
1032 if (name != NULL) {
1033 if (actual < bufsize) {
1034 actual = strlcat(fmri, name, bufsize);
1035 } else {
1036 actual += strlen(name);
1037 }
1038 }
1039 if (actual < bufsize) {
1040 rc = REP_PROTOCOL_SUCCESS;
1041 } else {
1042 rc = REP_PROTOCOL_FAIL_TRUNCATED;
1043 }
1044 *sz_out = actual;
1045 return (rc);
1046 }
1047
1048 /*
1049 * Get the FMRI for the node at np. The fmri will be placed in buf. On
1050 * success sz_out will be set to the size of the fmri in buf. If
1051 * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1052 * of the buffer that would be required to avoid truncation.
1053 *
1054 * Fails with
1055 * _TRUNCATED not enough room in buf for the FMRI.
1056 */
1057 static int
rc_node_get_fmri_or_fragment(rc_node_t * np,char * buf,size_t bufsize,size_t * sz_out)1058 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
1059 size_t *sz_out)
1060 {
1061 size_t fmri_len = 0;
1062 int r;
1063
1064 if (bufsize > 0)
1065 *buf = 0;
1066 *sz_out = 0;
1067
1068 if (np->rn_fmri == NULL) {
1069 /*
1070 * A NULL rn_fmri implies that this is a top level scope.
1071 * Child nodes will always have an rn_fmri established
1072 * because both rc_node_link_child() and
1073 * rc_node_relink_child() call rc_node_build_fmri(). In
1074 * this case, we'll just return our name preceded by the
1075 * appropriate FMRI decorations.
1076 */
1077 assert(np->rn_parent == NULL);
1078 r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
1079 np->rn_id.rl_type);
1080 if (r != REP_PROTOCOL_SUCCESS)
1081 return (r);
1082 } else {
1083 /* We have an fmri, so return it. */
1084 fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
1085 }
1086
1087 *sz_out = fmri_len;
1088
1089 if (fmri_len >= bufsize)
1090 return (REP_PROTOCOL_FAIL_TRUNCATED);
1091
1092 return (REP_PROTOCOL_SUCCESS);
1093 }
1094
1095 /*
1096 * Build an FMRI string for this node and save it in rn_fmri.
1097 *
1098 * The basic strategy here is to get the fmri of our parent and then
1099 * concatenate the appropriate separator followed by our name. If our name
1100 * is null, the resulting fmri will just be a copy of the parent fmri.
1101 * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1102 * set. Also the rn_lock for this node should be held.
1103 *
1104 * Fails with
1105 * _NO_RESOURCES Could not allocate memory.
1106 */
1107 static int
rc_node_build_fmri(rc_node_t * np)1108 rc_node_build_fmri(rc_node_t *np)
1109 {
1110 size_t actual;
1111 char fmri[REP_PROTOCOL_FMRI_LEN];
1112 int rc;
1113 size_t sz = REP_PROTOCOL_FMRI_LEN;
1114
1115 assert(MUTEX_HELD(&np->rn_lock));
1116 assert(np->rn_flags & RC_NODE_USING_PARENT);
1117
1118 rc_node_free_fmri(np);
1119
1120 rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
1121 assert(rc == REP_PROTOCOL_SUCCESS);
1122
1123 if (np->rn_name != NULL) {
1124 rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
1125 np->rn_id.rl_type);
1126 assert(rc == REP_PROTOCOL_SUCCESS);
1127 np->rn_fmri = strdup(fmri);
1128 } else {
1129 np->rn_fmri = strdup(fmri);
1130 }
1131 if (np->rn_fmri == NULL) {
1132 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1133 } else {
1134 rc = REP_PROTOCOL_SUCCESS;
1135 }
1136
1137 return (rc);
1138 }
1139
1140 /*
1141 * Get the FMRI of the node at np placing the result in fmri. Then
1142 * concatenate the additional element to fmri. The type variable indicates
1143 * the type of element, so that the appropriate separator can be
1144 * generated. size is the number of bytes in the buffer at fmri, and
1145 * sz_out receives the size of the generated string. If the result is
1146 * truncated, sz_out will receive the size of the buffer that would be
1147 * required to avoid truncation.
1148 *
1149 * Fails with
1150 * _TRUNCATED Not enough room in buffer at fmri.
1151 */
1152 static int
rc_get_fmri_and_concat(rc_node_t * np,char * fmri,size_t size,size_t * sz_out,const char * element,rep_protocol_entity_t type)1153 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
1154 const char *element, rep_protocol_entity_t type)
1155 {
1156 int rc;
1157
1158 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
1159 REP_PROTOCOL_SUCCESS) {
1160 return (rc);
1161 }
1162 if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
1163 REP_PROTOCOL_SUCCESS) {
1164 return (rc);
1165 }
1166
1167 return (REP_PROTOCOL_SUCCESS);
1168 }
1169
1170 static int
rc_notify_info_interested(rc_notify_info_t * rnip,rc_notify_t * np)1171 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
1172 {
1173 rc_node_t *nnp = np->rcn_node;
1174 int i;
1175
1176 assert(MUTEX_HELD(&rc_pg_notify_lock));
1177
1178 if (np->rcn_delete != NULL) {
1179 assert(np->rcn_info == NULL && np->rcn_node == NULL);
1180 return (1); /* everyone likes deletes */
1181 }
1182 if (np->rcn_node == NULL) {
1183 assert(np->rcn_info != NULL || np->rcn_delete != NULL);
1184 return (0);
1185 }
1186 assert(np->rcn_info == NULL);
1187
1188 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
1189 if (rnip->rni_namelist[i] != NULL) {
1190 if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
1191 return (1);
1192 }
1193 if (rnip->rni_typelist[i] != NULL) {
1194 if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
1195 return (1);
1196 }
1197 }
1198 return (0);
1199 }
1200
1201 static void
rc_notify_insert_node(rc_node_t * nnp)1202 rc_notify_insert_node(rc_node_t *nnp)
1203 {
1204 rc_notify_t *np = &nnp->rn_notify;
1205 rc_notify_info_t *nip;
1206 int found = 0;
1207
1208 assert(np->rcn_info == NULL);
1209
1210 if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
1211 return;
1212
1213 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1214 np->rcn_node = nnp;
1215 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1216 nip = uu_list_next(rc_notify_info_list, nip)) {
1217 if (rc_notify_info_interested(nip, np)) {
1218 (void) pthread_cond_broadcast(&nip->rni_cv);
1219 found++;
1220 }
1221 }
1222 if (found)
1223 (void) uu_list_insert_before(rc_notify_list, NULL, np);
1224 else
1225 np->rcn_node = NULL;
1226
1227 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1228 }
1229
1230 static void
rc_notify_deletion(rc_notify_delete_t * ndp,const char * service,const char * instance,const char * pg)1231 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
1232 const char *instance, const char *pg)
1233 {
1234 rc_notify_info_t *nip;
1235
1236 uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
1237 rc_notify_pool);
1238 ndp->rnd_notify.rcn_delete = ndp;
1239
1240 (void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
1241 "svc:/%s%s%s%s%s", service,
1242 (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
1243 (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
1244
1245 /*
1246 * add to notification list, notify watchers
1247 */
1248 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1249 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1250 nip = uu_list_next(rc_notify_info_list, nip))
1251 (void) pthread_cond_broadcast(&nip->rni_cv);
1252 (void) uu_list_insert_before(rc_notify_list, NULL, ndp);
1253 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1254 }
1255
1256 static void
rc_notify_remove_node(rc_node_t * nnp)1257 rc_notify_remove_node(rc_node_t *nnp)
1258 {
1259 rc_notify_t *np = &nnp->rn_notify;
1260
1261 assert(np->rcn_info == NULL);
1262 assert(!MUTEX_HELD(&nnp->rn_lock));
1263
1264 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1265 while (np->rcn_node != NULL) {
1266 if (rc_notify_in_use) {
1267 (void) pthread_cond_wait(&rc_pg_notify_cv,
1268 &rc_pg_notify_lock);
1269 continue;
1270 }
1271 (void) uu_list_remove(rc_notify_list, np);
1272 np->rcn_node = NULL;
1273 break;
1274 }
1275 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1276 }
1277
1278 static void
rc_notify_remove_locked(rc_notify_t * np)1279 rc_notify_remove_locked(rc_notify_t *np)
1280 {
1281 assert(MUTEX_HELD(&rc_pg_notify_lock));
1282 assert(rc_notify_in_use == 0);
1283
1284 (void) uu_list_remove(rc_notify_list, np);
1285 if (np->rcn_node) {
1286 np->rcn_node = NULL;
1287 } else if (np->rcn_delete) {
1288 uu_free(np->rcn_delete);
1289 } else {
1290 assert(0); /* CAN'T HAPPEN */
1291 }
1292 }
1293
1294 /*
1295 * Permission checking functions. See comment atop this file.
1296 */
1297 #ifndef NATIVE_BUILD
1298 static permcheck_t *
pc_create()1299 pc_create()
1300 {
1301 permcheck_t *p;
1302
1303 p = uu_zalloc(sizeof (*p));
1304 if (p == NULL)
1305 return (NULL);
1306 p->pc_bnum = 8; /* Normal case will only have 2 elts. */
1307 p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1308 if (p->pc_buckets == NULL) {
1309 uu_free(p);
1310 return (NULL);
1311 }
1312
1313 p->pc_enum = 0;
1314 return (p);
1315 }
1316
1317 static void
pc_free(permcheck_t * pcp)1318 pc_free(permcheck_t *pcp)
1319 {
1320 uint_t i;
1321 struct pc_elt *ep, *next;
1322
1323 for (i = 0; i < pcp->pc_bnum; ++i) {
1324 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1325 next = ep->pce_next;
1326 free(ep);
1327 }
1328 }
1329
1330 free(pcp->pc_buckets);
1331 free(pcp);
1332 }
1333
1334 static uint32_t
pc_hash(const char * auth)1335 pc_hash(const char *auth)
1336 {
1337 uint32_t h = 0, g;
1338 const char *p;
1339
1340 /*
1341 * Generic hash function from uts/common/os/modhash.c.
1342 */
1343 for (p = auth; *p != '\0'; ++p) {
1344 h = (h << 4) + *p;
1345 g = (h & 0xf0000000);
1346 if (g != 0) {
1347 h ^= (g >> 24);
1348 h ^= g;
1349 }
1350 }
1351
1352 return (h);
1353 }
1354
1355 static perm_status_t
pc_exists(permcheck_t * pcp,const char * auth)1356 pc_exists(permcheck_t *pcp, const char *auth)
1357 {
1358 uint32_t h;
1359 struct pc_elt *ep;
1360
1361 h = pc_hash(auth);
1362 for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1363 ep != NULL;
1364 ep = ep->pce_next) {
1365 if (strcmp(auth, ep->pce_auth) == 0) {
1366 pcp->pc_auth_string = ep->pce_auth;
1367 return (PERM_GRANTED);
1368 }
1369 }
1370
1371 return (PERM_DENIED);
1372 }
1373
1374 static perm_status_t
pc_match(permcheck_t * pcp,const char * pattern)1375 pc_match(permcheck_t *pcp, const char *pattern)
1376 {
1377 uint_t i;
1378 struct pc_elt *ep;
1379
1380 for (i = 0; i < pcp->pc_bnum; ++i) {
1381 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1382 if (_auth_match(pattern, ep->pce_auth)) {
1383 pcp->pc_auth_string = ep->pce_auth;
1384 return (PERM_GRANTED);
1385 }
1386 }
1387 }
1388
1389 return (PERM_DENIED);
1390 }
1391
1392 static int
pc_grow(permcheck_t * pcp)1393 pc_grow(permcheck_t *pcp)
1394 {
1395 uint_t new_bnum, i, j;
1396 struct pc_elt **new_buckets;
1397 struct pc_elt *ep, *next;
1398
1399 new_bnum = pcp->pc_bnum * 2;
1400 if (new_bnum < pcp->pc_bnum)
1401 /* Homey don't play that. */
1402 return (-1);
1403
1404 new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1405 if (new_buckets == NULL)
1406 return (-1);
1407
1408 for (i = 0; i < pcp->pc_bnum; ++i) {
1409 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1410 next = ep->pce_next;
1411 j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1412 ep->pce_next = new_buckets[j];
1413 new_buckets[j] = ep;
1414 }
1415 }
1416
1417 uu_free(pcp->pc_buckets);
1418 pcp->pc_buckets = new_buckets;
1419 pcp->pc_bnum = new_bnum;
1420
1421 return (0);
1422 }
1423
1424 static int
pc_add(permcheck_t * pcp,const char * auth,pc_auth_type_t auth_type)1425 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1426 {
1427 struct pc_elt *ep;
1428 uint_t i;
1429
1430 ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1431 if (ep == NULL)
1432 return (-1);
1433
1434 /* Grow if pc_enum / pc_bnum > 3/4. */
1435 if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1436 /* Failure is not a stopper; we'll try again next time. */
1437 (void) pc_grow(pcp);
1438
1439 (void) strcpy(ep->pce_auth, auth);
1440
1441 i = pc_hash(auth) & (pcp->pc_bnum - 1);
1442 ep->pce_next = pcp->pc_buckets[i];
1443 pcp->pc_buckets[i] = ep;
1444
1445 if (auth_type > pcp->pc_specific_type) {
1446 pcp->pc_specific_type = auth_type;
1447 pcp->pc_specific = ep;
1448 }
1449
1450 ++pcp->pc_enum;
1451
1452 return (0);
1453 }
1454
1455 /*
1456 * For the type of a property group, return the authorization which may be
1457 * used to modify it.
1458 */
1459 static const char *
perm_auth_for_pgtype(const char * pgtype)1460 perm_auth_for_pgtype(const char *pgtype)
1461 {
1462 if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1463 return (AUTH_MODIFY_PREFIX "method");
1464 else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1465 return (AUTH_MODIFY_PREFIX "dependency");
1466 else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1467 return (AUTH_MODIFY_PREFIX "application");
1468 else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1469 return (AUTH_MODIFY_PREFIX "framework");
1470 else
1471 return (NULL);
1472 }
1473
1474 /*
1475 * Fails with
1476 * _NO_RESOURCES - out of memory
1477 */
1478 static int
perm_add_enabling_type(permcheck_t * pcp,const char * auth,pc_auth_type_t auth_type)1479 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1480 pc_auth_type_t auth_type)
1481 {
1482 return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1483 REP_PROTOCOL_FAIL_NO_RESOURCES);
1484 }
1485
1486 /*
1487 * Fails with
1488 * _NO_RESOURCES - out of memory
1489 */
1490 static int
perm_add_enabling(permcheck_t * pcp,const char * auth)1491 perm_add_enabling(permcheck_t *pcp, const char *auth)
1492 {
1493 return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1494 }
1495
1496 /* Note that perm_add_enabling_values() is defined below. */
1497
1498 /*
1499 * perm_granted() returns PERM_GRANTED if the current door caller has one of
1500 * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1501 * the door client went away and PERM_FAIL if an error (usually lack of
1502 * memory) occurs. auth_cb() checks each and every authorizations as
1503 * enumerated by _enum_auths. When we find a result other than PERM_DENIED,
1504 * we short-cut the enumeration and return non-zero.
1505 */
1506
1507 static int
auth_cb(const char * auth,void * ctxt,void * vres)1508 auth_cb(const char *auth, void *ctxt, void *vres)
1509 {
1510 permcheck_t *pcp = ctxt;
1511 int *pret = vres;
1512
1513 if (strchr(auth, KV_WILDCHAR) == NULL)
1514 *pret = pc_exists(pcp, auth);
1515 else
1516 *pret = pc_match(pcp, auth);
1517
1518 if (*pret != PERM_DENIED)
1519 return (1);
1520 /*
1521 * If we failed, choose the most specific auth string for use in
1522 * the audit event.
1523 */
1524 assert(pcp->pc_specific != NULL);
1525 pcp->pc_auth_string = pcp->pc_specific->pce_auth;
1526
1527 return (0); /* Tells that we need to continue */
1528 }
1529
1530 static perm_status_t
perm_granted(permcheck_t * pcp)1531 perm_granted(permcheck_t *pcp)
1532 {
1533 ucred_t *uc;
1534
1535 perm_status_t ret = PERM_DENIED;
1536 uid_t uid;
1537 struct passwd pw;
1538 char pwbuf[1024]; /* XXX should be NSS_BUFLEN_PASSWD */
1539
1540 /* Get the uid */
1541 if ((uc = get_ucred()) == NULL) {
1542 if (errno == EINVAL) {
1543 /*
1544 * Client is no longer waiting for our response (e.g.,
1545 * it received a signal & resumed with EINTR).
1546 * Punting with door_return() would be nice but we
1547 * need to release all of the locks & references we
1548 * hold. And we must report failure to the client
1549 * layer to keep it from ignoring retries as
1550 * already-done (idempotency & all that). None of the
1551 * error codes fit very well, so we might as well
1552 * force the return of _PERMISSION_DENIED since we
1553 * couldn't determine the user.
1554 */
1555 return (PERM_GONE);
1556 }
1557 assert(0);
1558 abort();
1559 }
1560
1561 uid = ucred_geteuid(uc);
1562 assert(uid != (uid_t)-1);
1563
1564 if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) {
1565 return (PERM_FAIL);
1566 }
1567
1568 /*
1569 * Enumerate all the auths defined for the user and return the
1570 * result in ret.
1571 */
1572 if (_enum_auths(pw.pw_name, auth_cb, pcp, &ret) < 0)
1573 return (PERM_FAIL);
1574
1575 return (ret);
1576 }
1577
1578 static int
map_granted_status(perm_status_t status,permcheck_t * pcp,char ** match_auth)1579 map_granted_status(perm_status_t status, permcheck_t *pcp,
1580 char **match_auth)
1581 {
1582 int rc;
1583
1584 *match_auth = NULL;
1585 switch (status) {
1586 case PERM_DENIED:
1587 *match_auth = strdup(pcp->pc_auth_string);
1588 if (*match_auth == NULL)
1589 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1590 else
1591 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1592 break;
1593 case PERM_GRANTED:
1594 *match_auth = strdup(pcp->pc_auth_string);
1595 if (*match_auth == NULL)
1596 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1597 else
1598 rc = REP_PROTOCOL_SUCCESS;
1599 break;
1600 case PERM_GONE:
1601 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1602 break;
1603 case PERM_FAIL:
1604 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1605 break;
1606 }
1607 return (rc);
1608 }
1609 #endif /* NATIVE_BUILD */
1610
1611 /*
1612 * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1613 * serialize certain actions, and to wait for certain operations to complete
1614 *
1615 * The waiting flags are:
1616 * RC_NODE_CHILDREN_CHANGING
1617 * The child list is being built or changed (due to creation
1618 * or deletion). All iterators pause.
1619 *
1620 * RC_NODE_USING_PARENT
1621 * Someone is actively using the parent pointer, so we can't
1622 * be removed from the parent list.
1623 *
1624 * RC_NODE_CREATING_CHILD
1625 * A child is being created -- locks out other creations, to
1626 * prevent insert-insert races.
1627 *
1628 * RC_NODE_IN_TX
1629 * This object is running a transaction.
1630 *
1631 * RC_NODE_DYING
1632 * This node might be dying. Always set as a set, using
1633 * RC_NODE_DYING_FLAGS (which is everything but
1634 * RC_NODE_USING_PARENT)
1635 */
1636 static int
rc_node_hold_flag(rc_node_t * np,uint32_t flag)1637 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1638 {
1639 assert(MUTEX_HELD(&np->rn_lock));
1640 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1641
1642 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1643 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1644 }
1645 if (np->rn_flags & RC_NODE_DEAD)
1646 return (0);
1647
1648 np->rn_flags |= flag;
1649 return (1);
1650 }
1651
1652 static void
rc_node_rele_flag(rc_node_t * np,uint32_t flag)1653 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1654 {
1655 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1656 assert(MUTEX_HELD(&np->rn_lock));
1657 assert((np->rn_flags & flag) == flag);
1658 np->rn_flags &= ~flag;
1659 (void) pthread_cond_broadcast(&np->rn_cv);
1660 }
1661
1662 /*
1663 * wait until a particular flag has cleared. Fails if the object dies.
1664 */
1665 static int
rc_node_wait_flag(rc_node_t * np,uint32_t flag)1666 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1667 {
1668 assert(MUTEX_HELD(&np->rn_lock));
1669 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1670 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1671
1672 return (!(np->rn_flags & RC_NODE_DEAD));
1673 }
1674
1675 /*
1676 * On entry, np's lock must be held, and this thread must be holding
1677 * RC_NODE_USING_PARENT. On return, both of them are released.
1678 *
1679 * If the return value is NULL, np either does not have a parent, or
1680 * the parent has been marked DEAD.
1681 *
1682 * If the return value is non-NULL, it is the parent of np, and both
1683 * its lock and the requested flags are held.
1684 */
1685 static rc_node_t *
rc_node_hold_parent_flag(rc_node_t * np,uint32_t flag)1686 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1687 {
1688 rc_node_t *pp;
1689
1690 assert(MUTEX_HELD(&np->rn_lock));
1691 assert(np->rn_flags & RC_NODE_USING_PARENT);
1692
1693 if ((pp = np->rn_parent) == NULL) {
1694 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1695 (void) pthread_mutex_unlock(&np->rn_lock);
1696 return (NULL);
1697 }
1698 (void) pthread_mutex_unlock(&np->rn_lock);
1699
1700 (void) pthread_mutex_lock(&pp->rn_lock);
1701 (void) pthread_mutex_lock(&np->rn_lock);
1702 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1703 (void) pthread_mutex_unlock(&np->rn_lock);
1704
1705 if (!rc_node_hold_flag(pp, flag)) {
1706 (void) pthread_mutex_unlock(&pp->rn_lock);
1707 return (NULL);
1708 }
1709 return (pp);
1710 }
1711
1712 rc_node_t *
rc_node_alloc(void)1713 rc_node_alloc(void)
1714 {
1715 rc_node_t *np = uu_zalloc(sizeof (*np));
1716
1717 if (np == NULL)
1718 return (NULL);
1719
1720 (void) pthread_mutex_init(&np->rn_lock, NULL);
1721 (void) pthread_cond_init(&np->rn_cv, NULL);
1722
1723 np->rn_children = uu_list_create(rc_children_pool, np, 0);
1724 np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1725
1726 uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1727
1728 uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1729 rc_notify_pool);
1730
1731 return (np);
1732 }
1733
1734 void
rc_node_destroy(rc_node_t * np)1735 rc_node_destroy(rc_node_t *np)
1736 {
1737 int i;
1738
1739 if (np->rn_flags & RC_NODE_UNREFED)
1740 return; /* being handled elsewhere */
1741
1742 assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1743 assert(np->rn_former == NULL);
1744
1745 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1746 /* Release the holds from rc_iter_next(). */
1747 for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1748 /* rn_cchain[i] may be NULL for empty snapshots. */
1749 if (np->rn_cchain[i] != NULL)
1750 rc_node_rele(np->rn_cchain[i]);
1751 }
1752 }
1753
1754 if (np->rn_name != NULL)
1755 free((void *)np->rn_name);
1756 np->rn_name = NULL;
1757 if (np->rn_type != NULL)
1758 free((void *)np->rn_type);
1759 np->rn_type = NULL;
1760 if (np->rn_values != NULL)
1761 object_free_values(np->rn_values, np->rn_valtype,
1762 np->rn_values_count, np->rn_values_size);
1763 np->rn_values = NULL;
1764 rc_node_free_fmri(np);
1765
1766 if (np->rn_snaplevel != NULL)
1767 rc_snaplevel_rele(np->rn_snaplevel);
1768 np->rn_snaplevel = NULL;
1769
1770 uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1771
1772 uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1773 rc_notify_pool);
1774
1775 assert(uu_list_first(np->rn_children) == NULL);
1776 uu_list_destroy(np->rn_children);
1777 uu_list_destroy(np->rn_pg_notify_list);
1778
1779 (void) pthread_mutex_destroy(&np->rn_lock);
1780 (void) pthread_cond_destroy(&np->rn_cv);
1781
1782 uu_free(np);
1783 }
1784
1785 /*
1786 * Link in a child node.
1787 *
1788 * Because of the lock ordering, cp has to already be in the hash table with
1789 * its lock dropped before we get it. To prevent anyone from noticing that
1790 * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once
1791 * we've linked it in, we release the flag.
1792 */
1793 static void
rc_node_link_child(rc_node_t * np,rc_node_t * cp)1794 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1795 {
1796 assert(!MUTEX_HELD(&np->rn_lock));
1797 assert(!MUTEX_HELD(&cp->rn_lock));
1798
1799 (void) pthread_mutex_lock(&np->rn_lock);
1800 (void) pthread_mutex_lock(&cp->rn_lock);
1801 assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1802 (cp->rn_flags & RC_NODE_USING_PARENT));
1803
1804 assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1805 REP_PROTOCOL_SUCCESS);
1806
1807 cp->rn_parent = np;
1808 cp->rn_flags |= RC_NODE_IN_PARENT;
1809 (void) uu_list_insert_before(np->rn_children, NULL, cp);
1810 (void) rc_node_build_fmri(cp);
1811
1812 (void) pthread_mutex_unlock(&np->rn_lock);
1813
1814 rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1815 (void) pthread_mutex_unlock(&cp->rn_lock);
1816 }
1817
1818 /*
1819 * Sets the rn_parent_ref field of all the children of np to pp -- always
1820 * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1821 *
1822 * This is used when we mark a node RC_NODE_OLD, so that when the object and
1823 * its children are no longer referenced, they will all be deleted as a unit.
1824 */
1825 static void
rc_node_setup_parent_ref(rc_node_t * np,rc_node_t * pp)1826 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1827 {
1828 rc_node_t *cp;
1829
1830 assert(MUTEX_HELD(&np->rn_lock));
1831
1832 for (cp = uu_list_first(np->rn_children); cp != NULL;
1833 cp = uu_list_next(np->rn_children, cp)) {
1834 (void) pthread_mutex_lock(&cp->rn_lock);
1835 if (cp->rn_flags & RC_NODE_PARENT_REF) {
1836 assert(cp->rn_parent_ref == pp);
1837 } else {
1838 assert(cp->rn_parent_ref == NULL);
1839
1840 cp->rn_flags |= RC_NODE_PARENT_REF;
1841 cp->rn_parent_ref = pp;
1842 if (cp->rn_refs != 0)
1843 rc_node_hold_other(pp);
1844 }
1845 rc_node_setup_parent_ref(cp, pp); /* recurse */
1846 (void) pthread_mutex_unlock(&cp->rn_lock);
1847 }
1848 }
1849
1850 /*
1851 * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1852 *
1853 * Requirements:
1854 * *no* node locks may be held.
1855 * pp must be held with RC_NODE_CHILDREN_CHANGING
1856 * newp and np must be held with RC_NODE_IN_TX
1857 * np must be marked RC_NODE_IN_PARENT, newp must not be
1858 * np must be marked RC_NODE_OLD
1859 *
1860 * Afterwards:
1861 * pp's RC_NODE_CHILDREN_CHANGING is dropped
1862 * newp and np's RC_NODE_IN_TX is dropped
1863 * newp->rn_former = np;
1864 * newp is RC_NODE_IN_PARENT, np is not.
1865 * interested notify subscribers have been notified of newp's new status.
1866 */
1867 static void
rc_node_relink_child(rc_node_t * pp,rc_node_t * np,rc_node_t * newp)1868 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1869 {
1870 cache_bucket_t *bp;
1871 /*
1872 * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag
1873 * keeps rc_node_update() from seeing it until we are done.
1874 */
1875 bp = cache_hold(newp->rn_hash);
1876 cache_remove_unlocked(bp, np);
1877 cache_insert_unlocked(bp, newp);
1878 cache_release(bp);
1879
1880 /*
1881 * replace np with newp in pp's list, and attach it to newp's rn_former
1882 * link.
1883 */
1884 (void) pthread_mutex_lock(&pp->rn_lock);
1885 assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1886
1887 (void) pthread_mutex_lock(&newp->rn_lock);
1888 assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1889 assert(newp->rn_flags & RC_NODE_IN_TX);
1890
1891 (void) pthread_mutex_lock(&np->rn_lock);
1892 assert(np->rn_flags & RC_NODE_IN_PARENT);
1893 assert(np->rn_flags & RC_NODE_OLD);
1894 assert(np->rn_flags & RC_NODE_IN_TX);
1895
1896 newp->rn_parent = pp;
1897 newp->rn_flags |= RC_NODE_IN_PARENT;
1898
1899 /*
1900 * Note that we carefully add newp before removing np -- this
1901 * keeps iterators on the list from missing us.
1902 */
1903 (void) uu_list_insert_after(pp->rn_children, np, newp);
1904 (void) rc_node_build_fmri(newp);
1905 (void) uu_list_remove(pp->rn_children, np);
1906
1907 /*
1908 * re-set np
1909 */
1910 newp->rn_former = np;
1911 np->rn_parent = NULL;
1912 np->rn_flags &= ~RC_NODE_IN_PARENT;
1913 np->rn_flags |= RC_NODE_ON_FORMER;
1914
1915 rc_notify_insert_node(newp);
1916
1917 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1918 (void) pthread_mutex_unlock(&pp->rn_lock);
1919 rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1920 (void) pthread_mutex_unlock(&newp->rn_lock);
1921 rc_node_setup_parent_ref(np, np);
1922 rc_node_rele_flag(np, RC_NODE_IN_TX);
1923 (void) pthread_mutex_unlock(&np->rn_lock);
1924 }
1925
1926 /*
1927 * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1928 * 'cp' is used (and returned) if the node does not yet exist. If it does
1929 * exist, 'cp' is freed, and the existent node is returned instead.
1930 */
1931 rc_node_t *
rc_node_setup(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,rc_node_t * pp)1932 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1933 rc_node_t *pp)
1934 {
1935 rc_node_t *np;
1936 cache_bucket_t *bp;
1937 uint32_t h = rc_node_hash(nip);
1938
1939 assert(cp->rn_refs == 0);
1940
1941 bp = cache_hold(h);
1942 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1943 cache_release(bp);
1944
1945 /*
1946 * make sure it matches our expectations
1947 */
1948 (void) pthread_mutex_lock(&np->rn_lock);
1949 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1950 assert(np->rn_parent == pp);
1951 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1952 assert(strcmp(np->rn_name, name) == 0);
1953 assert(np->rn_type == NULL);
1954 assert(np->rn_flags & RC_NODE_IN_PARENT);
1955 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1956 }
1957 (void) pthread_mutex_unlock(&np->rn_lock);
1958
1959 rc_node_destroy(cp);
1960 return (np);
1961 }
1962
1963 /*
1964 * No one is there -- setup & install the new node.
1965 */
1966 np = cp;
1967 rc_node_hold(np);
1968 np->rn_id = *nip;
1969 np->rn_hash = h;
1970 np->rn_name = strdup(name);
1971
1972 np->rn_flags |= RC_NODE_USING_PARENT;
1973
1974 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
1975 #if COMPOSITION_DEPTH == 2
1976 np->rn_cchain[0] = np;
1977 np->rn_cchain[1] = pp;
1978 #else
1979 #error This code must be updated.
1980 #endif
1981 }
1982
1983 cache_insert_unlocked(bp, np);
1984 cache_release(bp); /* we are now visible */
1985
1986 rc_node_link_child(pp, np);
1987
1988 return (np);
1989 }
1990
1991 /*
1992 * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
1993 * 'cp' is used (and returned) if the node does not yet exist. If it does
1994 * exist, 'cp' is freed, and the existent node is returned instead.
1995 */
1996 rc_node_t *
rc_node_setup_snapshot(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,uint32_t snap_id,rc_node_t * pp)1997 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1998 uint32_t snap_id, rc_node_t *pp)
1999 {
2000 rc_node_t *np;
2001 cache_bucket_t *bp;
2002 uint32_t h = rc_node_hash(nip);
2003
2004 assert(cp->rn_refs == 0);
2005
2006 bp = cache_hold(h);
2007 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2008 cache_release(bp);
2009
2010 /*
2011 * make sure it matches our expectations
2012 */
2013 (void) pthread_mutex_lock(&np->rn_lock);
2014 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2015 assert(np->rn_parent == pp);
2016 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2017 assert(strcmp(np->rn_name, name) == 0);
2018 assert(np->rn_type == NULL);
2019 assert(np->rn_flags & RC_NODE_IN_PARENT);
2020 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2021 }
2022 (void) pthread_mutex_unlock(&np->rn_lock);
2023
2024 rc_node_destroy(cp);
2025 return (np);
2026 }
2027
2028 /*
2029 * No one is there -- create a new node.
2030 */
2031 np = cp;
2032 rc_node_hold(np);
2033 np->rn_id = *nip;
2034 np->rn_hash = h;
2035 np->rn_name = strdup(name);
2036 np->rn_snapshot_id = snap_id;
2037
2038 np->rn_flags |= RC_NODE_USING_PARENT;
2039
2040 cache_insert_unlocked(bp, np);
2041 cache_release(bp); /* we are now visible */
2042
2043 rc_node_link_child(pp, np);
2044
2045 return (np);
2046 }
2047
2048 /*
2049 * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is
2050 * used (and returned) if the node does not yet exist. If it does exist, 'cp'
2051 * is freed, and the existent node is returned instead.
2052 */
2053 rc_node_t *
rc_node_setup_snaplevel(rc_node_t * cp,rc_node_lookup_t * nip,rc_snaplevel_t * lvl,rc_node_t * pp)2054 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
2055 rc_snaplevel_t *lvl, rc_node_t *pp)
2056 {
2057 rc_node_t *np;
2058 cache_bucket_t *bp;
2059 uint32_t h = rc_node_hash(nip);
2060
2061 assert(cp->rn_refs == 0);
2062
2063 bp = cache_hold(h);
2064 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2065 cache_release(bp);
2066
2067 /*
2068 * make sure it matches our expectations
2069 */
2070 (void) pthread_mutex_lock(&np->rn_lock);
2071 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2072 assert(np->rn_parent == pp);
2073 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2074 assert(np->rn_name == NULL);
2075 assert(np->rn_type == NULL);
2076 assert(np->rn_flags & RC_NODE_IN_PARENT);
2077 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2078 }
2079 (void) pthread_mutex_unlock(&np->rn_lock);
2080
2081 rc_node_destroy(cp);
2082 return (np);
2083 }
2084
2085 /*
2086 * No one is there -- create a new node.
2087 */
2088 np = cp;
2089 rc_node_hold(np); /* released in snapshot_fill_children() */
2090 np->rn_id = *nip;
2091 np->rn_hash = h;
2092
2093 rc_snaplevel_hold(lvl);
2094 np->rn_snaplevel = lvl;
2095
2096 np->rn_flags |= RC_NODE_USING_PARENT;
2097
2098 cache_insert_unlocked(bp, np);
2099 cache_release(bp); /* we are now visible */
2100
2101 /* Add this snaplevel to the snapshot's composition chain. */
2102 assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
2103 pp->rn_cchain[lvl->rsl_level_num - 1] = np;
2104
2105 rc_node_link_child(pp, np);
2106
2107 return (np);
2108 }
2109
2110 /*
2111 * Returns NULL if strdup() fails.
2112 */
2113 rc_node_t *
rc_node_setup_pg(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,const char * type,uint32_t flags,uint32_t gen_id,rc_node_t * pp)2114 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2115 const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
2116 {
2117 rc_node_t *np;
2118 cache_bucket_t *bp;
2119
2120 uint32_t h = rc_node_hash(nip);
2121 bp = cache_hold(h);
2122 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2123 cache_release(bp);
2124
2125 /*
2126 * make sure it matches our expectations (don't check
2127 * the generation number or parent, since someone could
2128 * have gotten a transaction through while we weren't
2129 * looking)
2130 */
2131 (void) pthread_mutex_lock(&np->rn_lock);
2132 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2133 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2134 assert(strcmp(np->rn_name, name) == 0);
2135 assert(strcmp(np->rn_type, type) == 0);
2136 assert(np->rn_pgflags == flags);
2137 assert(np->rn_flags & RC_NODE_IN_PARENT);
2138 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2139 }
2140 (void) pthread_mutex_unlock(&np->rn_lock);
2141
2142 rc_node_destroy(cp);
2143 return (np);
2144 }
2145
2146 np = cp;
2147 rc_node_hold(np); /* released in fill_pg_callback() */
2148 np->rn_id = *nip;
2149 np->rn_hash = h;
2150 np->rn_name = strdup(name);
2151 if (np->rn_name == NULL) {
2152 rc_node_rele(np);
2153 return (NULL);
2154 }
2155 np->rn_type = strdup(type);
2156 if (np->rn_type == NULL) {
2157 free((void *)np->rn_name);
2158 rc_node_rele(np);
2159 return (NULL);
2160 }
2161 np->rn_pgflags = flags;
2162 np->rn_gen_id = gen_id;
2163
2164 np->rn_flags |= RC_NODE_USING_PARENT;
2165
2166 cache_insert_unlocked(bp, np);
2167 cache_release(bp); /* we are now visible */
2168
2169 rc_node_link_child(pp, np);
2170
2171 return (np);
2172 }
2173
2174 #if COMPOSITION_DEPTH == 2
2175 /*
2176 * Initialize a "composed property group" which represents the composition of
2177 * property groups pg1 & pg2. It is ephemeral: once created & returned for an
2178 * ITER_READ request, keeping it out of cache_hash and any child lists
2179 * prevents it from being looked up. Operations besides iteration are passed
2180 * through to pg1.
2181 *
2182 * pg1 & pg2 should be held before entering this function. They will be
2183 * released in rc_node_destroy().
2184 */
2185 static int
rc_node_setup_cpg(rc_node_t * cpg,rc_node_t * pg1,rc_node_t * pg2)2186 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
2187 {
2188 if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
2189 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2190
2191 cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
2192 cpg->rn_name = strdup(pg1->rn_name);
2193 if (cpg->rn_name == NULL)
2194 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2195
2196 cpg->rn_cchain[0] = pg1;
2197 cpg->rn_cchain[1] = pg2;
2198
2199 return (REP_PROTOCOL_SUCCESS);
2200 }
2201 #else
2202 #error This code must be updated.
2203 #endif
2204
2205 /*
2206 * Fails with _NO_RESOURCES.
2207 */
2208 int
rc_node_create_property(rc_node_t * pp,rc_node_lookup_t * nip,const char * name,rep_protocol_value_type_t type,const char * vals,size_t count,size_t size)2209 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
2210 const char *name, rep_protocol_value_type_t type,
2211 const char *vals, size_t count, size_t size)
2212 {
2213 rc_node_t *np;
2214 cache_bucket_t *bp;
2215
2216 uint32_t h = rc_node_hash(nip);
2217 bp = cache_hold(h);
2218 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2219 cache_release(bp);
2220 /*
2221 * make sure it matches our expectations
2222 */
2223 (void) pthread_mutex_lock(&np->rn_lock);
2224 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2225 assert(np->rn_parent == pp);
2226 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2227 assert(strcmp(np->rn_name, name) == 0);
2228 assert(np->rn_valtype == type);
2229 assert(np->rn_values_count == count);
2230 assert(np->rn_values_size == size);
2231 assert(vals == NULL ||
2232 memcmp(np->rn_values, vals, size) == 0);
2233 assert(np->rn_flags & RC_NODE_IN_PARENT);
2234 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2235 }
2236 rc_node_rele_locked(np);
2237 object_free_values(vals, type, count, size);
2238 return (REP_PROTOCOL_SUCCESS);
2239 }
2240
2241 /*
2242 * No one is there -- create a new node.
2243 */
2244 np = rc_node_alloc();
2245 if (np == NULL) {
2246 cache_release(bp);
2247 object_free_values(vals, type, count, size);
2248 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2249 }
2250 np->rn_id = *nip;
2251 np->rn_hash = h;
2252 np->rn_name = strdup(name);
2253 if (np->rn_name == NULL) {
2254 cache_release(bp);
2255 object_free_values(vals, type, count, size);
2256 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2257 }
2258
2259 np->rn_valtype = type;
2260 np->rn_values = vals;
2261 np->rn_values_count = count;
2262 np->rn_values_size = size;
2263
2264 np->rn_flags |= RC_NODE_USING_PARENT;
2265
2266 cache_insert_unlocked(bp, np);
2267 cache_release(bp); /* we are now visible */
2268
2269 rc_node_link_child(pp, np);
2270
2271 return (REP_PROTOCOL_SUCCESS);
2272 }
2273
2274 /*
2275 * This function implements a decision table to determine the event ID for
2276 * changes to the enabled (SCF_PROPERTY_ENABLED) property. The event ID is
2277 * determined by the value of the first property in the command specified
2278 * by cmd_no and the name of the property group. Here is the decision
2279 * table:
2280 *
2281 * Property Group Name
2282 * Property ------------------------------------------
2283 * Value SCF_PG_GENERAL SCF_PG_GENERAL_OVR
2284 * -------- -------------- ------------------
2285 * "0" ADT_smf_disable ADT_smf_tmp_disable
2286 * "1" ADT_smf_enable ADT_smf_tmp_enable
2287 *
2288 * This function is called by special_property_event through a function
2289 * pointer in the special_props_list array.
2290 *
2291 * Since the ADT_smf_* symbols may not be defined in the build machine's
2292 * include files, this function is not compiled when doing native builds.
2293 */
2294 #ifndef NATIVE_BUILD
2295 static int
general_enable_id(tx_commit_data_t * tx_data,size_t cmd_no,const char * pg,au_event_t * event_id)2296 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg,
2297 au_event_t *event_id)
2298 {
2299 const char *value;
2300 uint32_t nvalues;
2301 int enable;
2302
2303 /*
2304 * First, check property value.
2305 */
2306 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2307 return (-1);
2308 if (nvalues == 0)
2309 return (-1);
2310 if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS)
2311 return (-1);
2312 if (strcmp(value, "0") == 0) {
2313 enable = 0;
2314 } else if (strcmp(value, "1") == 0) {
2315 enable = 1;
2316 } else {
2317 return (-1);
2318 }
2319
2320 /*
2321 * Now check property group name.
2322 */
2323 if (strcmp(pg, SCF_PG_GENERAL) == 0) {
2324 *event_id = enable ? ADT_smf_enable : ADT_smf_disable;
2325 return (0);
2326 } else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) {
2327 *event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable;
2328 return (0);
2329 }
2330 return (-1);
2331 }
2332 #endif /* NATIVE_BUILD */
2333
2334 /*
2335 * This function compares two audit_special_prop_item_t structures
2336 * represented by item1 and item2. It returns an integer greater than 0 if
2337 * item1 is greater than item2. It returns 0 if they are equal and an
2338 * integer less than 0 if item1 is less than item2. api_prop_name and
2339 * api_pg_name are the key fields for sorting.
2340 *
2341 * This function is suitable for calls to bsearch(3C) and qsort(3C).
2342 */
2343 static int
special_prop_compare(const void * item1,const void * item2)2344 special_prop_compare(const void *item1, const void *item2)
2345 {
2346 const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1;
2347 const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2;
2348 int r;
2349
2350 r = strcmp(a->api_prop_name, b->api_prop_name);
2351 if (r == 0) {
2352 /*
2353 * Primary keys are the same, so check the secondary key.
2354 */
2355 r = strcmp(a->api_pg_name, b->api_pg_name);
2356 }
2357 return (r);
2358 }
2359
2360 int
rc_node_init(void)2361 rc_node_init(void)
2362 {
2363 rc_node_t *np;
2364 cache_bucket_t *bp;
2365
2366 rc_children_pool = uu_list_pool_create("rc_children_pool",
2367 sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
2368 NULL, UU_LIST_POOL_DEBUG);
2369
2370 rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
2371 sizeof (rc_node_pg_notify_t),
2372 offsetof(rc_node_pg_notify_t, rnpn_node),
2373 NULL, UU_LIST_POOL_DEBUG);
2374
2375 rc_notify_pool = uu_list_pool_create("rc_notify_pool",
2376 sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
2377 NULL, UU_LIST_POOL_DEBUG);
2378
2379 rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2380 sizeof (rc_notify_info_t),
2381 offsetof(rc_notify_info_t, rni_list_node),
2382 NULL, UU_LIST_POOL_DEBUG);
2383
2384 if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2385 rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2386 uu_die("out of memory");
2387
2388 rc_notify_list = uu_list_create(rc_notify_pool,
2389 &rc_notify_list, 0);
2390
2391 rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2392 &rc_notify_info_list, 0);
2393
2394 if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2395 uu_die("out of memory");
2396
2397 /*
2398 * Sort the special_props_list array so that it can be searched
2399 * with bsearch(3C).
2400 *
2401 * The special_props_list array is not compiled into the native
2402 * build code, so there is no need to call qsort if NATIVE_BUILD is
2403 * defined.
2404 */
2405 #ifndef NATIVE_BUILD
2406 qsort(special_props_list, SPECIAL_PROP_COUNT,
2407 sizeof (special_props_list[0]), special_prop_compare);
2408 #endif /* NATIVE_BUILD */
2409
2410 if ((np = rc_node_alloc()) == NULL)
2411 uu_die("out of memory");
2412
2413 rc_node_hold(np);
2414 np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2415 np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2416 np->rn_hash = rc_node_hash(&np->rn_id);
2417 np->rn_name = "localhost";
2418
2419 bp = cache_hold(np->rn_hash);
2420 cache_insert_unlocked(bp, np);
2421 cache_release(bp);
2422
2423 rc_scope = np;
2424 return (1);
2425 }
2426
2427 /*
2428 * Fails with
2429 * _INVALID_TYPE - type is invalid
2430 * _TYPE_MISMATCH - np doesn't carry children of type type
2431 * _DELETED - np has been deleted
2432 * _NO_RESOURCES
2433 */
2434 static int
rc_node_fill_children(rc_node_t * np,uint32_t type)2435 rc_node_fill_children(rc_node_t *np, uint32_t type)
2436 {
2437 int rc;
2438
2439 assert(MUTEX_HELD(&np->rn_lock));
2440
2441 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2442 REP_PROTOCOL_SUCCESS)
2443 return (rc);
2444
2445 if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2446 return (REP_PROTOCOL_FAIL_DELETED);
2447
2448 if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2449 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2450 return (REP_PROTOCOL_SUCCESS);
2451 }
2452
2453 (void) pthread_mutex_unlock(&np->rn_lock);
2454 rc = object_fill_children(np);
2455 (void) pthread_mutex_lock(&np->rn_lock);
2456
2457 if (rc == REP_PROTOCOL_SUCCESS) {
2458 np->rn_flags |= RC_NODE_HAS_CHILDREN;
2459 }
2460 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2461
2462 return (rc);
2463 }
2464
2465 /*
2466 * Returns
2467 * _INVALID_TYPE - type is invalid
2468 * _TYPE_MISMATCH - np doesn't carry children of type type
2469 * _DELETED - np has been deleted
2470 * _NO_RESOURCES
2471 * _SUCCESS - if *cpp is not NULL, it is held
2472 */
2473 static int
rc_node_find_named_child(rc_node_t * np,const char * name,uint32_t type,rc_node_t ** cpp)2474 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2475 rc_node_t **cpp)
2476 {
2477 int ret;
2478 rc_node_t *cp;
2479
2480 assert(MUTEX_HELD(&np->rn_lock));
2481 assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2482
2483 ret = rc_node_fill_children(np, type);
2484 if (ret != REP_PROTOCOL_SUCCESS)
2485 return (ret);
2486
2487 for (cp = uu_list_first(np->rn_children);
2488 cp != NULL;
2489 cp = uu_list_next(np->rn_children, cp)) {
2490 if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2491 break;
2492 }
2493
2494 if (cp != NULL)
2495 rc_node_hold(cp);
2496 *cpp = cp;
2497
2498 return (REP_PROTOCOL_SUCCESS);
2499 }
2500
2501 static int rc_node_parent(rc_node_t *, rc_node_t **);
2502
2503 /*
2504 * Returns
2505 * _INVALID_TYPE - type is invalid
2506 * _DELETED - np or an ancestor has been deleted
2507 * _NOT_FOUND - no ancestor of specified type exists
2508 * _SUCCESS - *app is held
2509 */
2510 static int
rc_node_find_ancestor(rc_node_t * np,uint32_t type,rc_node_t ** app)2511 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2512 {
2513 int ret;
2514 rc_node_t *parent, *np_orig;
2515
2516 if (type >= REP_PROTOCOL_ENTITY_MAX)
2517 return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2518
2519 np_orig = np;
2520
2521 while (np->rn_id.rl_type > type) {
2522 ret = rc_node_parent(np, &parent);
2523 if (np != np_orig)
2524 rc_node_rele(np);
2525 if (ret != REP_PROTOCOL_SUCCESS)
2526 return (ret);
2527 np = parent;
2528 }
2529
2530 if (np->rn_id.rl_type == type) {
2531 *app = parent;
2532 return (REP_PROTOCOL_SUCCESS);
2533 }
2534
2535 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2536 }
2537
2538 #ifndef NATIVE_BUILD
2539 /*
2540 * If the propname property exists in pg, and it is of type string, add its
2541 * values as authorizations to pcp. pg must not be locked on entry, and it is
2542 * returned unlocked. Returns
2543 * _DELETED - pg was deleted
2544 * _NO_RESOURCES
2545 * _NOT_FOUND - pg has no property named propname
2546 * _SUCCESS
2547 */
2548 static int
perm_add_pg_prop_values(permcheck_t * pcp,rc_node_t * pg,const char * propname)2549 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2550 {
2551 rc_node_t *prop;
2552 int result;
2553
2554 uint_t count;
2555 const char *cp;
2556
2557 assert(!MUTEX_HELD(&pg->rn_lock));
2558 assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2559
2560 (void) pthread_mutex_lock(&pg->rn_lock);
2561 result = rc_node_find_named_child(pg, propname,
2562 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2563 (void) pthread_mutex_unlock(&pg->rn_lock);
2564 if (result != REP_PROTOCOL_SUCCESS) {
2565 switch (result) {
2566 case REP_PROTOCOL_FAIL_DELETED:
2567 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2568 return (result);
2569
2570 case REP_PROTOCOL_FAIL_INVALID_TYPE:
2571 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2572 default:
2573 bad_error("rc_node_find_named_child", result);
2574 }
2575 }
2576
2577 if (prop == NULL)
2578 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2579
2580 /* rn_valtype is immutable, so no locking. */
2581 if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2582 rc_node_rele(prop);
2583 return (REP_PROTOCOL_SUCCESS);
2584 }
2585
2586 (void) pthread_mutex_lock(&prop->rn_lock);
2587 for (count = prop->rn_values_count, cp = prop->rn_values;
2588 count > 0;
2589 --count) {
2590 result = perm_add_enabling_type(pcp, cp,
2591 (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2592 PC_AUTH_SVC);
2593 if (result != REP_PROTOCOL_SUCCESS)
2594 break;
2595
2596 cp = strchr(cp, '\0') + 1;
2597 }
2598
2599 rc_node_rele_locked(prop);
2600
2601 return (result);
2602 }
2603
2604 /*
2605 * Assuming that ent is a service or instance node, if the pgname property
2606 * group has type pgtype, and it has a propname property with string type, add
2607 * its values as authorizations to pcp. If pgtype is NULL, it is not checked.
2608 * Returns
2609 * _SUCCESS
2610 * _DELETED - ent was deleted
2611 * _NO_RESOURCES - no resources
2612 * _NOT_FOUND - ent does not have pgname pg or propname property
2613 */
2614 static int
perm_add_ent_prop_values(permcheck_t * pcp,rc_node_t * ent,const char * pgname,const char * pgtype,const char * propname)2615 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2616 const char *pgtype, const char *propname)
2617 {
2618 int r;
2619 rc_node_t *pg;
2620
2621 assert(!MUTEX_HELD(&ent->rn_lock));
2622
2623 (void) pthread_mutex_lock(&ent->rn_lock);
2624 r = rc_node_find_named_child(ent, pgname,
2625 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2626 (void) pthread_mutex_unlock(&ent->rn_lock);
2627
2628 switch (r) {
2629 case REP_PROTOCOL_SUCCESS:
2630 break;
2631
2632 case REP_PROTOCOL_FAIL_DELETED:
2633 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2634 return (r);
2635
2636 default:
2637 bad_error("rc_node_find_named_child", r);
2638 }
2639
2640 if (pg == NULL)
2641 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2642
2643 if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2644 r = perm_add_pg_prop_values(pcp, pg, propname);
2645 switch (r) {
2646 case REP_PROTOCOL_FAIL_DELETED:
2647 r = REP_PROTOCOL_FAIL_NOT_FOUND;
2648 break;
2649
2650 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2651 case REP_PROTOCOL_SUCCESS:
2652 case REP_PROTOCOL_FAIL_NOT_FOUND:
2653 break;
2654
2655 default:
2656 bad_error("perm_add_pg_prop_values", r);
2657 }
2658 }
2659
2660 rc_node_rele(pg);
2661
2662 return (r);
2663 }
2664
2665 /*
2666 * If pg has a property named propname, and is string typed, add its values as
2667 * authorizations to pcp. If pg has no such property, and its parent is an
2668 * instance, walk up to the service and try doing the same with the property
2669 * of the same name from the property group of the same name. Returns
2670 * _SUCCESS
2671 * _NO_RESOURCES
2672 * _DELETED - pg (or an ancestor) was deleted
2673 */
2674 static int
perm_add_enabling_values(permcheck_t * pcp,rc_node_t * pg,const char * propname)2675 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2676 {
2677 int r;
2678 char pgname[REP_PROTOCOL_NAME_LEN + 1];
2679 rc_node_t *svc;
2680 size_t sz;
2681
2682 r = perm_add_pg_prop_values(pcp, pg, propname);
2683
2684 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2685 return (r);
2686
2687 assert(!MUTEX_HELD(&pg->rn_lock));
2688
2689 if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2690 return (REP_PROTOCOL_SUCCESS);
2691
2692 sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2693 assert(sz < sizeof (pgname));
2694
2695 /*
2696 * If pg is a child of an instance or snapshot, we want to compose the
2697 * authorization property with the service's (if it exists). The
2698 * snapshot case applies only to read_authorization. In all other
2699 * cases, the pg's parent will be the instance.
2700 */
2701 r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2702 if (r != REP_PROTOCOL_SUCCESS) {
2703 assert(r == REP_PROTOCOL_FAIL_DELETED);
2704 return (r);
2705 }
2706 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2707
2708 r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2709
2710 rc_node_rele(svc);
2711
2712 if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2713 r = REP_PROTOCOL_SUCCESS;
2714
2715 return (r);
2716 }
2717
2718 /*
2719 * Call perm_add_enabling_values() for the "action_authorization" property of
2720 * the "general" property group of inst. Returns
2721 * _DELETED - inst (or an ancestor) was deleted
2722 * _NO_RESOURCES
2723 * _SUCCESS
2724 */
2725 static int
perm_add_inst_action_auth(permcheck_t * pcp,rc_node_t * inst)2726 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2727 {
2728 int r;
2729 rc_node_t *svc;
2730
2731 assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2732
2733 r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2734 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2735
2736 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2737 return (r);
2738
2739 r = rc_node_parent(inst, &svc);
2740 if (r != REP_PROTOCOL_SUCCESS) {
2741 assert(r == REP_PROTOCOL_FAIL_DELETED);
2742 return (r);
2743 }
2744
2745 r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2746 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2747
2748 return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2749 }
2750 #endif /* NATIVE_BUILD */
2751
2752 void
rc_node_ptr_init(rc_node_ptr_t * out)2753 rc_node_ptr_init(rc_node_ptr_t *out)
2754 {
2755 out->rnp_node = NULL;
2756 out->rnp_auth_string = NULL;
2757 out->rnp_authorized = RC_AUTH_UNKNOWN;
2758 out->rnp_deleted = 0;
2759 }
2760
2761 void
rc_node_ptr_free_mem(rc_node_ptr_t * npp)2762 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2763 {
2764 if (npp->rnp_auth_string != NULL) {
2765 free((void *)npp->rnp_auth_string);
2766 npp->rnp_auth_string = NULL;
2767 }
2768 }
2769
2770 static void
rc_node_assign(rc_node_ptr_t * out,rc_node_t * val)2771 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2772 {
2773 rc_node_t *cur = out->rnp_node;
2774 if (val != NULL)
2775 rc_node_hold(val);
2776 out->rnp_node = val;
2777 if (cur != NULL) {
2778 NODE_LOCK(cur);
2779
2780 /*
2781 * Register the ephemeral reference created by reading
2782 * out->rnp_node into cur. Note that the persistent
2783 * reference we're destroying is locked by the client
2784 * layer.
2785 */
2786 rc_node_hold_ephemeral_locked(cur);
2787
2788 rc_node_rele_locked(cur);
2789 }
2790 out->rnp_authorized = RC_AUTH_UNKNOWN;
2791 rc_node_ptr_free_mem(out);
2792 out->rnp_deleted = 0;
2793 }
2794
2795 void
rc_node_clear(rc_node_ptr_t * out,int deleted)2796 rc_node_clear(rc_node_ptr_t *out, int deleted)
2797 {
2798 rc_node_assign(out, NULL);
2799 out->rnp_deleted = deleted;
2800 }
2801
2802 void
rc_node_ptr_assign(rc_node_ptr_t * out,const rc_node_ptr_t * val)2803 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2804 {
2805 rc_node_assign(out, val->rnp_node);
2806 }
2807
2808 /*
2809 * rc_node_check()/RC_NODE_CHECK()
2810 * generic "entry" checks, run before the use of an rc_node pointer.
2811 *
2812 * Fails with
2813 * _NOT_SET
2814 * _DELETED
2815 */
2816 static int
rc_node_check_and_lock(rc_node_t * np)2817 rc_node_check_and_lock(rc_node_t *np)
2818 {
2819 int result = REP_PROTOCOL_SUCCESS;
2820 if (np == NULL)
2821 return (REP_PROTOCOL_FAIL_NOT_SET);
2822
2823 (void) pthread_mutex_lock(&np->rn_lock);
2824 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2825 result = REP_PROTOCOL_FAIL_DELETED;
2826 (void) pthread_mutex_unlock(&np->rn_lock);
2827 }
2828
2829 return (result);
2830 }
2831
2832 /*
2833 * Fails with
2834 * _NOT_SET - ptr is reset
2835 * _DELETED - node has been deleted
2836 */
2837 static rc_node_t *
rc_node_ptr_check_and_lock(rc_node_ptr_t * npp,int * res)2838 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2839 {
2840 rc_node_t *np = npp->rnp_node;
2841 if (np == NULL) {
2842 if (npp->rnp_deleted)
2843 *res = REP_PROTOCOL_FAIL_DELETED;
2844 else
2845 *res = REP_PROTOCOL_FAIL_NOT_SET;
2846 return (NULL);
2847 }
2848
2849 (void) pthread_mutex_lock(&np->rn_lock);
2850 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2851 (void) pthread_mutex_unlock(&np->rn_lock);
2852 rc_node_clear(npp, 1);
2853 *res = REP_PROTOCOL_FAIL_DELETED;
2854 return (NULL);
2855 }
2856 return (np);
2857 }
2858
2859 #define RC_NODE_CHECK_AND_LOCK(n) { \
2860 int rc__res; \
2861 if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2862 return (rc__res); \
2863 }
2864
2865 #define RC_NODE_CHECK(n) { \
2866 RC_NODE_CHECK_AND_LOCK(n); \
2867 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2868 }
2869
2870 #define RC_NODE_CHECK_AND_HOLD(n) { \
2871 RC_NODE_CHECK_AND_LOCK(n); \
2872 rc_node_hold_locked(n); \
2873 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2874 }
2875
2876 #define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \
2877 int rc__res; \
2878 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \
2879 return (rc__res); \
2880 }
2881
2882 #define RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) { \
2883 int rc__res; \
2884 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == \
2885 NULL) { \
2886 if ((mem) != NULL) \
2887 free((mem)); \
2888 return (rc__res); \
2889 } \
2890 }
2891
2892 #define RC_NODE_PTR_GET_CHECK(np, npp) { \
2893 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2894 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2895 }
2896
2897 #define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \
2898 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2899 rc_node_hold_locked(np); \
2900 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2901 }
2902
2903 #define HOLD_FLAG_OR_RETURN(np, flag) { \
2904 assert(MUTEX_HELD(&(np)->rn_lock)); \
2905 assert(!((np)->rn_flags & RC_NODE_DEAD)); \
2906 if (!rc_node_hold_flag((np), flag)) { \
2907 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2908 return (REP_PROTOCOL_FAIL_DELETED); \
2909 } \
2910 }
2911
2912 #define HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) { \
2913 assert(MUTEX_HELD(&(np)->rn_lock)); \
2914 if (!rc_node_hold_flag((np), flag)) { \
2915 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2916 assert((np) == (npp)->rnp_node); \
2917 rc_node_clear(npp, 1); \
2918 if ((mem) != NULL) \
2919 free((mem)); \
2920 return (REP_PROTOCOL_FAIL_DELETED); \
2921 } \
2922 }
2923
2924 int
rc_local_scope(uint32_t type,rc_node_ptr_t * out)2925 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
2926 {
2927 if (type != REP_PROTOCOL_ENTITY_SCOPE) {
2928 rc_node_clear(out, 0);
2929 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2930 }
2931
2932 /*
2933 * the main scope never gets destroyed
2934 */
2935 rc_node_assign(out, rc_scope);
2936
2937 return (REP_PROTOCOL_SUCCESS);
2938 }
2939
2940 /*
2941 * Fails with
2942 * _NOT_SET - npp is not set
2943 * _DELETED - the node npp pointed at has been deleted
2944 * _TYPE_MISMATCH - type is not _SCOPE
2945 * _NOT_FOUND - scope has no parent
2946 */
2947 static int
rc_scope_parent_scope(rc_node_ptr_t * npp,uint32_t type,rc_node_ptr_t * out)2948 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2949 {
2950 rc_node_t *np;
2951
2952 rc_node_clear(out, 0);
2953
2954 RC_NODE_PTR_GET_CHECK(np, npp);
2955
2956 if (type != REP_PROTOCOL_ENTITY_SCOPE)
2957 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2958
2959 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2960 }
2961
2962 static int rc_node_pg_check_read_protect(rc_node_t *);
2963
2964 /*
2965 * Fails with
2966 * _NOT_SET
2967 * _DELETED
2968 * _NOT_APPLICABLE
2969 * _NOT_FOUND
2970 * _BAD_REQUEST
2971 * _TRUNCATED
2972 * _NO_RESOURCES
2973 */
2974 int
rc_node_name(rc_node_ptr_t * npp,char * buf,size_t sz,uint32_t answertype,size_t * sz_out)2975 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
2976 size_t *sz_out)
2977 {
2978 size_t actual;
2979 rc_node_t *np;
2980
2981 assert(sz == *sz_out);
2982
2983 RC_NODE_PTR_GET_CHECK(np, npp);
2984
2985 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2986 np = np->rn_cchain[0];
2987 RC_NODE_CHECK(np);
2988 }
2989
2990 switch (answertype) {
2991 case RP_ENTITY_NAME_NAME:
2992 if (np->rn_name == NULL)
2993 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2994 actual = strlcpy(buf, np->rn_name, sz);
2995 break;
2996 case RP_ENTITY_NAME_PGTYPE:
2997 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2998 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2999 actual = strlcpy(buf, np->rn_type, sz);
3000 break;
3001 case RP_ENTITY_NAME_PGFLAGS:
3002 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3003 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3004 actual = snprintf(buf, sz, "%d", np->rn_pgflags);
3005 break;
3006 case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
3007 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3008 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3009 actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
3010 break;
3011 case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
3012 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3013 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3014 actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
3015 break;
3016 case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
3017 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3018 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3019 if (np->rn_snaplevel->rsl_instance == NULL)
3020 return (REP_PROTOCOL_FAIL_NOT_FOUND);
3021 actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
3022 break;
3023 case RP_ENTITY_NAME_PGREADPROT:
3024 {
3025 int ret;
3026
3027 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3028 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3029 ret = rc_node_pg_check_read_protect(np);
3030 assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3031 switch (ret) {
3032 case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
3033 actual = snprintf(buf, sz, "1");
3034 break;
3035 case REP_PROTOCOL_SUCCESS:
3036 actual = snprintf(buf, sz, "0");
3037 break;
3038 default:
3039 return (ret);
3040 }
3041 break;
3042 }
3043 default:
3044 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3045 }
3046 if (actual >= sz)
3047 return (REP_PROTOCOL_FAIL_TRUNCATED);
3048
3049 *sz_out = actual;
3050 return (REP_PROTOCOL_SUCCESS);
3051 }
3052
3053 int
rc_node_get_property_type(rc_node_ptr_t * npp,rep_protocol_value_type_t * out)3054 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
3055 {
3056 rc_node_t *np;
3057
3058 RC_NODE_PTR_GET_CHECK(np, npp);
3059
3060 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
3061 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3062
3063 *out = np->rn_valtype;
3064
3065 return (REP_PROTOCOL_SUCCESS);
3066 }
3067
3068 /*
3069 * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold
3070 * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3071 */
3072 static int
rc_node_parent(rc_node_t * np,rc_node_t ** out)3073 rc_node_parent(rc_node_t *np, rc_node_t **out)
3074 {
3075 rc_node_t *pnp;
3076 rc_node_t *np_orig;
3077
3078 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3079 RC_NODE_CHECK_AND_LOCK(np);
3080 } else {
3081 np = np->rn_cchain[0];
3082 RC_NODE_CHECK_AND_LOCK(np);
3083 }
3084
3085 np_orig = np;
3086 rc_node_hold_locked(np); /* simplifies the remainder */
3087
3088 for (;;) {
3089 if (!rc_node_wait_flag(np,
3090 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3091 rc_node_rele_locked(np);
3092 return (REP_PROTOCOL_FAIL_DELETED);
3093 }
3094
3095 if (!(np->rn_flags & RC_NODE_OLD))
3096 break;
3097
3098 rc_node_rele_locked(np);
3099 np = cache_lookup(&np_orig->rn_id);
3100 assert(np != np_orig);
3101
3102 if (np == NULL)
3103 goto deleted;
3104 (void) pthread_mutex_lock(&np->rn_lock);
3105 }
3106
3107 /* guaranteed to succeed without dropping the lock */
3108 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3109 (void) pthread_mutex_unlock(&np->rn_lock);
3110 *out = NULL;
3111 rc_node_rele(np);
3112 return (REP_PROTOCOL_FAIL_DELETED);
3113 }
3114
3115 assert(np->rn_parent != NULL);
3116 pnp = np->rn_parent;
3117 (void) pthread_mutex_unlock(&np->rn_lock);
3118
3119 (void) pthread_mutex_lock(&pnp->rn_lock);
3120 (void) pthread_mutex_lock(&np->rn_lock);
3121 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3122 (void) pthread_mutex_unlock(&np->rn_lock);
3123
3124 rc_node_hold_locked(pnp);
3125
3126 (void) pthread_mutex_unlock(&pnp->rn_lock);
3127
3128 rc_node_rele(np);
3129 *out = pnp;
3130 return (REP_PROTOCOL_SUCCESS);
3131
3132 deleted:
3133 rc_node_rele(np);
3134 return (REP_PROTOCOL_FAIL_DELETED);
3135 }
3136
3137 /*
3138 * Fails with
3139 * _NOT_SET
3140 * _DELETED
3141 */
3142 static int
rc_node_ptr_parent(rc_node_ptr_t * npp,rc_node_t ** out)3143 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
3144 {
3145 rc_node_t *np;
3146
3147 RC_NODE_PTR_GET_CHECK(np, npp);
3148
3149 return (rc_node_parent(np, out));
3150 }
3151
3152 /*
3153 * Fails with
3154 * _NOT_SET - npp is not set
3155 * _DELETED - the node npp pointed at has been deleted
3156 * _TYPE_MISMATCH - npp's node's parent is not of type type
3157 *
3158 * If npp points to a scope, can also fail with
3159 * _NOT_FOUND - scope has no parent
3160 */
3161 int
rc_node_get_parent(rc_node_ptr_t * npp,uint32_t type,rc_node_ptr_t * out)3162 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3163 {
3164 rc_node_t *pnp;
3165 int rc;
3166
3167 if (npp->rnp_node != NULL &&
3168 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
3169 return (rc_scope_parent_scope(npp, type, out));
3170
3171 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
3172 rc_node_clear(out, 0);
3173 return (rc);
3174 }
3175
3176 if (type != pnp->rn_id.rl_type) {
3177 rc_node_rele(pnp);
3178 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3179 }
3180
3181 rc_node_assign(out, pnp);
3182 rc_node_rele(pnp);
3183
3184 return (REP_PROTOCOL_SUCCESS);
3185 }
3186
3187 int
rc_node_parent_type(rc_node_ptr_t * npp,uint32_t * type_out)3188 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
3189 {
3190 rc_node_t *pnp;
3191 int rc;
3192
3193 if (npp->rnp_node != NULL &&
3194 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
3195 *type_out = REP_PROTOCOL_ENTITY_SCOPE;
3196 return (REP_PROTOCOL_SUCCESS);
3197 }
3198
3199 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
3200 return (rc);
3201
3202 *type_out = pnp->rn_id.rl_type;
3203
3204 rc_node_rele(pnp);
3205
3206 return (REP_PROTOCOL_SUCCESS);
3207 }
3208
3209 /*
3210 * Fails with
3211 * _INVALID_TYPE - type is invalid
3212 * _TYPE_MISMATCH - np doesn't carry children of type type
3213 * _DELETED - np has been deleted
3214 * _NOT_FOUND - no child with that name/type combo found
3215 * _NO_RESOURCES
3216 * _BACKEND_ACCESS
3217 */
3218 int
rc_node_get_child(rc_node_ptr_t * npp,const char * name,uint32_t type,rc_node_ptr_t * outp)3219 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
3220 rc_node_ptr_t *outp)
3221 {
3222 rc_node_t *np, *cp;
3223 rc_node_t *child = NULL;
3224 int ret, idx;
3225
3226 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3227 if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
3228 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3229 ret = rc_node_find_named_child(np, name, type, &child);
3230 } else {
3231 (void) pthread_mutex_unlock(&np->rn_lock);
3232 ret = REP_PROTOCOL_SUCCESS;
3233 for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
3234 cp = np->rn_cchain[idx];
3235 if (cp == NULL)
3236 break;
3237 RC_NODE_CHECK_AND_LOCK(cp);
3238 ret = rc_node_find_named_child(cp, name, type,
3239 &child);
3240 (void) pthread_mutex_unlock(&cp->rn_lock);
3241 /*
3242 * loop only if we succeeded, but no child of
3243 * the correct name was found.
3244 */
3245 if (ret != REP_PROTOCOL_SUCCESS ||
3246 child != NULL)
3247 break;
3248 }
3249 (void) pthread_mutex_lock(&np->rn_lock);
3250 }
3251 }
3252 (void) pthread_mutex_unlock(&np->rn_lock);
3253
3254 if (ret == REP_PROTOCOL_SUCCESS) {
3255 rc_node_assign(outp, child);
3256 if (child != NULL)
3257 rc_node_rele(child);
3258 else
3259 ret = REP_PROTOCOL_FAIL_NOT_FOUND;
3260 } else {
3261 rc_node_assign(outp, NULL);
3262 }
3263 return (ret);
3264 }
3265
3266 int
rc_node_update(rc_node_ptr_t * npp)3267 rc_node_update(rc_node_ptr_t *npp)
3268 {
3269 cache_bucket_t *bp;
3270 rc_node_t *np = npp->rnp_node;
3271 rc_node_t *nnp;
3272 rc_node_t *cpg = NULL;
3273
3274 if (np != NULL &&
3275 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3276 /*
3277 * If we're updating a composed property group, actually
3278 * update the top-level property group & return the
3279 * appropriate value. But leave *nnp pointing at us.
3280 */
3281 cpg = np;
3282 np = np->rn_cchain[0];
3283 }
3284
3285 RC_NODE_CHECK(np);
3286
3287 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
3288 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
3289 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3290
3291 for (;;) {
3292 bp = cache_hold(np->rn_hash);
3293 nnp = cache_lookup_unlocked(bp, &np->rn_id);
3294 if (nnp == NULL) {
3295 cache_release(bp);
3296 rc_node_clear(npp, 1);
3297 return (REP_PROTOCOL_FAIL_DELETED);
3298 }
3299 /*
3300 * grab the lock before dropping the cache bucket, so
3301 * that no one else can sneak in
3302 */
3303 (void) pthread_mutex_lock(&nnp->rn_lock);
3304 cache_release(bp);
3305
3306 if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
3307 !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
3308 break;
3309
3310 rc_node_rele_locked(nnp);
3311 }
3312
3313 /*
3314 * If it is dead, we want to update it so that it will continue to
3315 * report being dead.
3316 */
3317 if (nnp->rn_flags & RC_NODE_DEAD) {
3318 (void) pthread_mutex_unlock(&nnp->rn_lock);
3319 if (nnp != np && cpg == NULL)
3320 rc_node_assign(npp, nnp); /* updated */
3321 rc_node_rele(nnp);
3322 return (REP_PROTOCOL_FAIL_DELETED);
3323 }
3324
3325 assert(!(nnp->rn_flags & RC_NODE_OLD));
3326 (void) pthread_mutex_unlock(&nnp->rn_lock);
3327
3328 if (nnp != np && cpg == NULL)
3329 rc_node_assign(npp, nnp); /* updated */
3330
3331 rc_node_rele(nnp);
3332
3333 return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
3334 }
3335
3336 /*
3337 * does a generic modification check, for creation, deletion, and snapshot
3338 * management only. Property group transactions have different checks.
3339 *
3340 * The string returned to *match_auth must be freed.
3341 */
3342 static perm_status_t
rc_node_modify_permission_check(char ** match_auth)3343 rc_node_modify_permission_check(char **match_auth)
3344 {
3345 permcheck_t *pcp;
3346 perm_status_t granted = PERM_GRANTED;
3347 int rc;
3348
3349 *match_auth = NULL;
3350 #ifdef NATIVE_BUILD
3351 if (!client_is_privileged()) {
3352 granted = PERM_DENIED;
3353 }
3354 return (granted);
3355 #else
3356 if (is_main_repository == 0)
3357 return (PERM_GRANTED);
3358 pcp = pc_create();
3359 if (pcp != NULL) {
3360 rc = perm_add_enabling(pcp, AUTH_MODIFY);
3361
3362 if (rc == REP_PROTOCOL_SUCCESS) {
3363 granted = perm_granted(pcp);
3364
3365 if ((granted == PERM_GRANTED) ||
3366 (granted == PERM_DENIED)) {
3367 /*
3368 * Copy off the authorization
3369 * string before freeing pcp.
3370 */
3371 *match_auth =
3372 strdup(pcp->pc_auth_string);
3373 if (*match_auth == NULL)
3374 granted = PERM_FAIL;
3375 }
3376 } else {
3377 granted = PERM_FAIL;
3378 }
3379
3380 pc_free(pcp);
3381 } else {
3382 granted = PERM_FAIL;
3383 }
3384
3385 return (granted);
3386 #endif /* NATIVE_BUILD */
3387 }
3388
3389 /*
3390 * Native builds are done to create svc.configd-native. This program runs
3391 * only on the Solaris build machines to create the seed repository, and it
3392 * is compiled against the build machine's header files. The ADT_smf_*
3393 * symbols may not be defined in these header files. For this reason
3394 * smf_annotation_event(), _smf_audit_event() and special_property_event()
3395 * are not compiled for native builds.
3396 */
3397 #ifndef NATIVE_BUILD
3398
3399 /*
3400 * This function generates an annotation audit event if one has been setup.
3401 * Annotation events should only be generated immediately before the audit
3402 * record from the first attempt to modify the repository from a client
3403 * which has requested an annotation.
3404 */
3405 static void
smf_annotation_event(int status,int return_val)3406 smf_annotation_event(int status, int return_val)
3407 {
3408 adt_session_data_t *session;
3409 adt_event_data_t *event = NULL;
3410 char file[MAXPATHLEN];
3411 char operation[REP_PROTOCOL_NAME_LEN];
3412
3413 /* Don't audit if we're using an alternate repository. */
3414 if (is_main_repository == 0)
3415 return;
3416
3417 if (client_annotation_needed(operation, sizeof (operation), file,
3418 sizeof (file)) == 0) {
3419 return;
3420 }
3421 if (file[0] == 0) {
3422 (void) strlcpy(file, "NO FILE", sizeof (file));
3423 }
3424 if (operation[0] == 0) {
3425 (void) strlcpy(operation, "NO OPERATION",
3426 sizeof (operation));
3427 }
3428 if ((session = get_audit_session()) == NULL)
3429 return;
3430 if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) {
3431 uu_warn("smf_annotation_event cannot allocate event "
3432 "data. %s\n", strerror(errno));
3433 return;
3434 }
3435 event->adt_smf_annotation.operation = operation;
3436 event->adt_smf_annotation.file = file;
3437 if (adt_put_event(event, status, return_val) == 0) {
3438 client_annotation_finished();
3439 } else {
3440 uu_warn("smf_annotation_event failed to put event. "
3441 "%s\n", strerror(errno));
3442 }
3443 adt_free_event(event);
3444 }
3445
3446 /*
3447 * _smf_audit_event interacts with the security auditing system to generate
3448 * an audit event structure. It establishes an audit session and allocates
3449 * an audit event. The event is filled in from the audit data, and
3450 * adt_put_event is called to generate the event.
3451 */
3452 static void
_smf_audit_event(au_event_t event_id,int status,int return_val,audit_event_data_t * data)3453 _smf_audit_event(au_event_t event_id, int status, int return_val,
3454 audit_event_data_t *data)
3455 {
3456 char *auth_used;
3457 char *fmri;
3458 char *prop_value;
3459 adt_session_data_t *session;
3460 adt_event_data_t *event = NULL;
3461
3462 /* Don't audit if we're using an alternate repository */
3463 if (is_main_repository == 0)
3464 return;
3465
3466 smf_annotation_event(status, return_val);
3467 if ((session = get_audit_session()) == NULL)
3468 return;
3469 if ((event = adt_alloc_event(session, event_id)) == NULL) {
3470 uu_warn("_smf_audit_event cannot allocate event "
3471 "data. %s\n", strerror(errno));
3472 return;
3473 }
3474
3475 /*
3476 * Handle possibility of NULL authorization strings, FMRIs and
3477 * property values.
3478 */
3479 if (data->ed_auth == NULL) {
3480 auth_used = "PRIVILEGED";
3481 } else {
3482 auth_used = data->ed_auth;
3483 }
3484 if (data->ed_fmri == NULL) {
3485 syslog(LOG_WARNING, "_smf_audit_event called with "
3486 "empty FMRI string");
3487 fmri = "UNKNOWN FMRI";
3488 } else {
3489 fmri = data->ed_fmri;
3490 }
3491 if (data->ed_prop_value == NULL) {
3492 prop_value = "";
3493 } else {
3494 prop_value = data->ed_prop_value;
3495 }
3496
3497 /* Fill in the event data. */
3498 switch (event_id) {
3499 case ADT_smf_attach_snap:
3500 event->adt_smf_attach_snap.auth_used = auth_used;
3501 event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri;
3502 event->adt_smf_attach_snap.old_name = data->ed_old_name;
3503 event->adt_smf_attach_snap.new_fmri = fmri;
3504 event->adt_smf_attach_snap.new_name = data->ed_snapname;
3505 break;
3506 case ADT_smf_change_prop:
3507 event->adt_smf_change_prop.auth_used = auth_used;
3508 event->adt_smf_change_prop.fmri = fmri;
3509 event->adt_smf_change_prop.type = data->ed_type;
3510 event->adt_smf_change_prop.value = prop_value;
3511 break;
3512 case ADT_smf_clear:
3513 event->adt_smf_clear.auth_used = auth_used;
3514 event->adt_smf_clear.fmri = fmri;
3515 break;
3516 case ADT_smf_create:
3517 event->adt_smf_create.fmri = fmri;
3518 event->adt_smf_create.auth_used = auth_used;
3519 break;
3520 case ADT_smf_create_npg:
3521 event->adt_smf_create_npg.auth_used = auth_used;
3522 event->adt_smf_create_npg.fmri = fmri;
3523 event->adt_smf_create_npg.type = data->ed_type;
3524 break;
3525 case ADT_smf_create_pg:
3526 event->adt_smf_create_pg.auth_used = auth_used;
3527 event->adt_smf_create_pg.fmri = fmri;
3528 event->adt_smf_create_pg.type = data->ed_type;
3529 break;
3530 case ADT_smf_create_prop:
3531 event->adt_smf_create_prop.auth_used = auth_used;
3532 event->adt_smf_create_prop.fmri = fmri;
3533 event->adt_smf_create_prop.type = data->ed_type;
3534 event->adt_smf_create_prop.value = prop_value;
3535 break;
3536 case ADT_smf_create_snap:
3537 event->adt_smf_create_snap.auth_used = auth_used;
3538 event->adt_smf_create_snap.fmri = fmri;
3539 event->adt_smf_create_snap.name = data->ed_snapname;
3540 break;
3541 case ADT_smf_degrade:
3542 event->adt_smf_degrade.auth_used = auth_used;
3543 event->adt_smf_degrade.fmri = fmri;
3544 break;
3545 case ADT_smf_delete:
3546 event->adt_smf_delete.fmri = fmri;
3547 event->adt_smf_delete.auth_used = auth_used;
3548 break;
3549 case ADT_smf_delete_npg:
3550 event->adt_smf_delete_npg.auth_used = auth_used;
3551 event->adt_smf_delete_npg.fmri = fmri;
3552 event->adt_smf_delete_npg.type = data->ed_type;
3553 break;
3554 case ADT_smf_delete_pg:
3555 event->adt_smf_delete_pg.auth_used = auth_used;
3556 event->adt_smf_delete_pg.fmri = fmri;
3557 event->adt_smf_delete_pg.type = data->ed_type;
3558 break;
3559 case ADT_smf_delete_prop:
3560 event->adt_smf_delete_prop.auth_used = auth_used;
3561 event->adt_smf_delete_prop.fmri = fmri;
3562 break;
3563 case ADT_smf_delete_snap:
3564 event->adt_smf_delete_snap.auth_used = auth_used;
3565 event->adt_smf_delete_snap.fmri = fmri;
3566 event->adt_smf_delete_snap.name = data->ed_snapname;
3567 break;
3568 case ADT_smf_disable:
3569 event->adt_smf_disable.auth_used = auth_used;
3570 event->adt_smf_disable.fmri = fmri;
3571 break;
3572 case ADT_smf_enable:
3573 event->adt_smf_enable.auth_used = auth_used;
3574 event->adt_smf_enable.fmri = fmri;
3575 break;
3576 case ADT_smf_immediate_degrade:
3577 event->adt_smf_immediate_degrade.auth_used = auth_used;
3578 event->adt_smf_immediate_degrade.fmri = fmri;
3579 break;
3580 case ADT_smf_immediate_maintenance:
3581 event->adt_smf_immediate_maintenance.auth_used = auth_used;
3582 event->adt_smf_immediate_maintenance.fmri = fmri;
3583 break;
3584 case ADT_smf_immtmp_maintenance:
3585 event->adt_smf_immtmp_maintenance.auth_used = auth_used;
3586 event->adt_smf_immtmp_maintenance.fmri = fmri;
3587 break;
3588 case ADT_smf_maintenance:
3589 event->adt_smf_maintenance.auth_used = auth_used;
3590 event->adt_smf_maintenance.fmri = fmri;
3591 break;
3592 case ADT_smf_milestone:
3593 event->adt_smf_milestone.auth_used = auth_used;
3594 event->adt_smf_milestone.fmri = fmri;
3595 break;
3596 case ADT_smf_read_prop:
3597 event->adt_smf_read_prop.auth_used = auth_used;
3598 event->adt_smf_read_prop.fmri = fmri;
3599 break;
3600 case ADT_smf_refresh:
3601 event->adt_smf_refresh.auth_used = auth_used;
3602 event->adt_smf_refresh.fmri = fmri;
3603 break;
3604 case ADT_smf_restart:
3605 event->adt_smf_restart.auth_used = auth_used;
3606 event->adt_smf_restart.fmri = fmri;
3607 break;
3608 case ADT_smf_tmp_disable:
3609 event->adt_smf_tmp_disable.auth_used = auth_used;
3610 event->adt_smf_tmp_disable.fmri = fmri;
3611 break;
3612 case ADT_smf_tmp_enable:
3613 event->adt_smf_tmp_enable.auth_used = auth_used;
3614 event->adt_smf_tmp_enable.fmri = fmri;
3615 break;
3616 case ADT_smf_tmp_maintenance:
3617 event->adt_smf_tmp_maintenance.auth_used = auth_used;
3618 event->adt_smf_tmp_maintenance.fmri = fmri;
3619 break;
3620 default:
3621 abort(); /* Need to cover all SMF event IDs */
3622 }
3623
3624 if (adt_put_event(event, status, return_val) != 0) {
3625 uu_warn("_smf_audit_event failed to put event. %s\n",
3626 strerror(errno));
3627 }
3628 adt_free_event(event);
3629 }
3630
3631 /*
3632 * Determine if the combination of the property group at pg_name and the
3633 * property at prop_name are in the set of special startd properties. If
3634 * they are, a special audit event will be generated.
3635 */
3636 static void
special_property_event(audit_event_data_t * evdp,const char * prop_name,char * pg_name,int status,int return_val,tx_commit_data_t * tx_data,size_t cmd_no)3637 special_property_event(audit_event_data_t *evdp, const char *prop_name,
3638 char *pg_name, int status, int return_val, tx_commit_data_t *tx_data,
3639 size_t cmd_no)
3640 {
3641 au_event_t event_id;
3642 audit_special_prop_item_t search_key;
3643 audit_special_prop_item_t *found;
3644
3645 /* Use bsearch to find the special property information. */
3646 search_key.api_prop_name = prop_name;
3647 search_key.api_pg_name = pg_name;
3648 found = (audit_special_prop_item_t *)bsearch(&search_key,
3649 special_props_list, SPECIAL_PROP_COUNT,
3650 sizeof (special_props_list[0]), special_prop_compare);
3651 if (found == NULL) {
3652 /* Not a special property. */
3653 return;
3654 }
3655
3656 /* Get the event id */
3657 if (found->api_event_func == NULL) {
3658 event_id = found->api_event_id;
3659 } else {
3660 if ((*found->api_event_func)(tx_data, cmd_no,
3661 found->api_pg_name, &event_id) < 0)
3662 return;
3663 }
3664
3665 /* Generate the event. */
3666 smf_audit_event(event_id, status, return_val, evdp);
3667 }
3668 #endif /* NATIVE_BUILD */
3669
3670 /*
3671 * Return a pointer to a string containing all the values of the command
3672 * specified by cmd_no with each value enclosed in quotes. It is up to the
3673 * caller to free the memory at the returned pointer.
3674 */
3675 static char *
generate_value_list(tx_commit_data_t * tx_data,size_t cmd_no)3676 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
3677 {
3678 const char *cp;
3679 const char *cur_value;
3680 size_t byte_count = 0;
3681 uint32_t i;
3682 uint32_t nvalues;
3683 size_t str_size = 0;
3684 char *values = NULL;
3685 char *vp;
3686
3687 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
3688 return (NULL);
3689 /*
3690 * First determine the size of the buffer that we will need. We
3691 * will represent each property value surrounded by quotes with a
3692 * space separating the values. Thus, we need to find the total
3693 * size of all the value strings and add 3 for each value.
3694 *
3695 * There is one catch, though. We need to escape any internal
3696 * quote marks in the values. So for each quote in the value we
3697 * need to add another byte to the buffer size.
3698 */
3699 for (i = 0; i < nvalues; i++) {
3700 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3701 REP_PROTOCOL_SUCCESS)
3702 return (NULL);
3703 for (cp = cur_value; *cp != 0; cp++) {
3704 byte_count += (*cp == '"') ? 2 : 1;
3705 }
3706 byte_count += 3; /* surrounding quotes & space */
3707 }
3708 byte_count++; /* nul terminator */
3709 values = malloc(byte_count);
3710 if (values == NULL)
3711 return (NULL);
3712 *values = 0;
3713
3714 /* Now build up the string of values. */
3715 for (i = 0; i < nvalues; i++) {
3716 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3717 REP_PROTOCOL_SUCCESS) {
3718 free(values);
3719 return (NULL);
3720 }
3721 (void) strlcat(values, "\"", byte_count);
3722 for (cp = cur_value, vp = values + strlen(values);
3723 *cp != 0; cp++) {
3724 if (*cp == '"') {
3725 *vp++ = '\\';
3726 *vp++ = '"';
3727 } else {
3728 *vp++ = *cp;
3729 }
3730 }
3731 *vp = 0;
3732 str_size = strlcat(values, "\" ", byte_count);
3733 assert(str_size < byte_count);
3734 }
3735 if (str_size > 0)
3736 values[str_size - 1] = 0; /* get rid of trailing space */
3737 return (values);
3738 }
3739
3740 /*
3741 * generate_property_events takes the transaction commit data at tx_data
3742 * and generates an audit event for each command.
3743 *
3744 * Native builds are done to create svc.configd-native. This program runs
3745 * only on the Solaris build machines to create the seed repository. Thus,
3746 * no audit events should be generated when running svc.configd-native.
3747 */
3748 static void
generate_property_events(tx_commit_data_t * tx_data,char * pg_fmri,char * auth_string,int auth_status,int auth_ret_value)3749 generate_property_events(
3750 tx_commit_data_t *tx_data,
3751 char *pg_fmri, /* FMRI of property group */
3752 char *auth_string,
3753 int auth_status,
3754 int auth_ret_value)
3755 {
3756 #ifndef NATIVE_BUILD
3757 enum rep_protocol_transaction_action action;
3758 audit_event_data_t audit_data;
3759 size_t count;
3760 size_t cmd_no;
3761 char *cp;
3762 au_event_t event_id;
3763 char fmri[REP_PROTOCOL_FMRI_LEN];
3764 char pg_name[REP_PROTOCOL_NAME_LEN];
3765 char *pg_end; /* End of prop. group fmri */
3766 const char *prop_name;
3767 uint32_t ptype;
3768 char prop_type[3];
3769 enum rep_protocol_responseid rc;
3770 size_t sz_out;
3771
3772 /* Make sure we have something to do. */
3773 if (tx_data == NULL)
3774 return;
3775 if ((count = tx_cmd_count(tx_data)) == 0)
3776 return;
3777
3778 /* Copy the property group fmri */
3779 pg_end = fmri;
3780 pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri));
3781
3782 /*
3783 * Get the property group name. It is the first component after
3784 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3785 */
3786 cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX);
3787 if (cp == NULL) {
3788 pg_name[0] = 0;
3789 } else {
3790 cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX);
3791 (void) strlcpy(pg_name, cp, sizeof (pg_name));
3792 }
3793
3794 audit_data.ed_auth = auth_string;
3795 audit_data.ed_fmri = fmri;
3796 audit_data.ed_type = prop_type;
3797
3798 /*
3799 * Property type is two characters (see
3800 * rep_protocol_value_type_t), so terminate the string.
3801 */
3802 prop_type[2] = 0;
3803
3804 for (cmd_no = 0; cmd_no < count; cmd_no++) {
3805 /* Construct FMRI of the property */
3806 *pg_end = 0;
3807 if (tx_cmd_prop(tx_data, cmd_no, &prop_name) !=
3808 REP_PROTOCOL_SUCCESS) {
3809 continue;
3810 }
3811 rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out,
3812 prop_name, REP_PROTOCOL_ENTITY_PROPERTY);
3813 if (rc != REP_PROTOCOL_SUCCESS) {
3814 /*
3815 * If we can't get the FMRI, we'll abandon this
3816 * command
3817 */
3818 continue;
3819 }
3820
3821 /* Generate special property event if necessary. */
3822 special_property_event(&audit_data, prop_name, pg_name,
3823 auth_status, auth_ret_value, tx_data, cmd_no);
3824
3825 /* Capture rest of audit data. */
3826 if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) !=
3827 REP_PROTOCOL_SUCCESS) {
3828 continue;
3829 }
3830 prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype);
3831 prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype);
3832 audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no);
3833
3834 /* Determine the event type. */
3835 if (tx_cmd_action(tx_data, cmd_no, &action) !=
3836 REP_PROTOCOL_SUCCESS) {
3837 free(audit_data.ed_prop_value);
3838 continue;
3839 }
3840 switch (action) {
3841 case REP_PROTOCOL_TX_ENTRY_NEW:
3842 event_id = ADT_smf_create_prop;
3843 break;
3844 case REP_PROTOCOL_TX_ENTRY_CLEAR:
3845 event_id = ADT_smf_change_prop;
3846 break;
3847 case REP_PROTOCOL_TX_ENTRY_REPLACE:
3848 event_id = ADT_smf_change_prop;
3849 break;
3850 case REP_PROTOCOL_TX_ENTRY_DELETE:
3851 event_id = ADT_smf_delete_prop;
3852 break;
3853 default:
3854 assert(0); /* Missing a case */
3855 free(audit_data.ed_prop_value);
3856 continue;
3857 }
3858
3859 /* Generate the event. */
3860 smf_audit_event(event_id, auth_status, auth_ret_value,
3861 &audit_data);
3862 free(audit_data.ed_prop_value);
3863 }
3864 #endif /* NATIVE_BUILD */
3865 }
3866
3867 /*
3868 * Fails with
3869 * _DELETED - node has been deleted
3870 * _NOT_SET - npp is reset
3871 * _NOT_APPLICABLE - type is _PROPERTYGRP
3872 * _INVALID_TYPE - node is corrupt or type is invalid
3873 * _TYPE_MISMATCH - node cannot have children of type type
3874 * _BAD_REQUEST - name is invalid
3875 * cannot create children for this type of node
3876 * _NO_RESOURCES - out of memory, or could not allocate new id
3877 * _PERMISSION_DENIED
3878 * _BACKEND_ACCESS
3879 * _BACKEND_READONLY
3880 * _EXISTS - child already exists
3881 * _TRUNCATED - truncated FMRI for the audit record
3882 */
3883 int
rc_node_create_child(rc_node_ptr_t * npp,uint32_t type,const char * name,rc_node_ptr_t * cpp)3884 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3885 rc_node_ptr_t *cpp)
3886 {
3887 rc_node_t *np;
3888 rc_node_t *cp = NULL;
3889 int rc;
3890 perm_status_t perm_rc;
3891 size_t sz_out;
3892 char fmri[REP_PROTOCOL_FMRI_LEN];
3893 audit_event_data_t audit_data;
3894
3895 rc_node_clear(cpp, 0);
3896
3897 /*
3898 * rc_node_modify_permission_check() must be called before the node
3899 * is locked. This is because the library functions that check
3900 * authorizations can trigger calls back into configd.
3901 */
3902 perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
3903 switch (perm_rc) {
3904 case PERM_DENIED:
3905 /*
3906 * We continue in this case, so that an audit event can be
3907 * generated later in the function.
3908 */
3909 break;
3910 case PERM_GRANTED:
3911 break;
3912 case PERM_GONE:
3913 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3914 case PERM_FAIL:
3915 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
3916 default:
3917 bad_error(rc_node_modify_permission_check, perm_rc);
3918 }
3919
3920 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
3921
3922 audit_data.ed_fmri = fmri;
3923
3924 /*
3925 * there is a separate interface for creating property groups
3926 */
3927 if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3928 (void) pthread_mutex_unlock(&np->rn_lock);
3929 free(audit_data.ed_auth);
3930 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3931 }
3932
3933 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3934 (void) pthread_mutex_unlock(&np->rn_lock);
3935 np = np->rn_cchain[0];
3936 if ((rc = rc_node_check_and_lock(np)) != REP_PROTOCOL_SUCCESS) {
3937 free(audit_data.ed_auth);
3938 return (rc);
3939 }
3940 }
3941
3942 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3943 REP_PROTOCOL_SUCCESS) {
3944 (void) pthread_mutex_unlock(&np->rn_lock);
3945 free(audit_data.ed_auth);
3946 return (rc);
3947 }
3948 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
3949 (void) pthread_mutex_unlock(&np->rn_lock);
3950 free(audit_data.ed_auth);
3951 return (rc);
3952 }
3953
3954 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3955 name, type)) != REP_PROTOCOL_SUCCESS) {
3956 (void) pthread_mutex_unlock(&np->rn_lock);
3957 free(audit_data.ed_auth);
3958 return (rc);
3959 }
3960 if (perm_rc == PERM_DENIED) {
3961 (void) pthread_mutex_unlock(&np->rn_lock);
3962 smf_audit_event(ADT_smf_create, ADT_FAILURE,
3963 ADT_FAIL_VALUE_AUTH, &audit_data);
3964 free(audit_data.ed_auth);
3965 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3966 }
3967
3968 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3969 audit_data.ed_auth);
3970 (void) pthread_mutex_unlock(&np->rn_lock);
3971
3972 rc = object_create(np, type, name, &cp);
3973 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3974
3975 if (rc == REP_PROTOCOL_SUCCESS) {
3976 rc_node_assign(cpp, cp);
3977 rc_node_rele(cp);
3978 }
3979
3980 (void) pthread_mutex_lock(&np->rn_lock);
3981 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3982 (void) pthread_mutex_unlock(&np->rn_lock);
3983
3984 if (rc == REP_PROTOCOL_SUCCESS) {
3985 smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS,
3986 &audit_data);
3987 }
3988
3989 free(audit_data.ed_auth);
3990
3991 return (rc);
3992 }
3993
3994 int
rc_node_create_child_pg(rc_node_ptr_t * npp,uint32_t type,const char * name,const char * pgtype,uint32_t flags,rc_node_ptr_t * cpp)3995 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
3996 const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
3997 {
3998 rc_node_t *np;
3999 rc_node_t *cp;
4000 int rc;
4001 permcheck_t *pcp;
4002 perm_status_t granted;
4003 char fmri[REP_PROTOCOL_FMRI_LEN];
4004 audit_event_data_t audit_data;
4005 au_event_t event_id;
4006 size_t sz_out;
4007
4008 audit_data.ed_auth = NULL;
4009 audit_data.ed_fmri = fmri;
4010 audit_data.ed_type = (char *)pgtype;
4011
4012 rc_node_clear(cpp, 0);
4013
4014 /* verify flags is valid */
4015 if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
4016 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4017
4018 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
4019
4020 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4021 rc_node_rele(np);
4022 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4023 }
4024
4025 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4026 REP_PROTOCOL_SUCCESS) {
4027 rc_node_rele(np);
4028 return (rc);
4029 }
4030 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
4031 (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
4032 rc_node_rele(np);
4033 return (rc);
4034 }
4035
4036 #ifdef NATIVE_BUILD
4037 if (!client_is_privileged()) {
4038 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4039 }
4040 #else
4041 if (flags & SCF_PG_FLAG_NONPERSISTENT) {
4042 event_id = ADT_smf_create_npg;
4043 } else {
4044 event_id = ADT_smf_create_pg;
4045 }
4046 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4047 name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
4048 rc_node_rele(np);
4049 return (rc);
4050 }
4051
4052 if (is_main_repository) {
4053 /* Must have .smf.modify or smf.modify.<type> authorization */
4054 pcp = pc_create();
4055 if (pcp != NULL) {
4056 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4057
4058 if (rc == REP_PROTOCOL_SUCCESS) {
4059 const char * const auth =
4060 perm_auth_for_pgtype(pgtype);
4061
4062 if (auth != NULL)
4063 rc = perm_add_enabling(pcp, auth);
4064 }
4065
4066 /*
4067 * .manage or $action_authorization can be used to
4068 * create the actions pg and the general_ovr pg.
4069 */
4070 if (rc == REP_PROTOCOL_SUCCESS &&
4071 (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
4072 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
4073 ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
4074 strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
4075 (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
4076 strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
4077 rc = perm_add_enabling(pcp, AUTH_MANAGE);
4078
4079 if (rc == REP_PROTOCOL_SUCCESS)
4080 rc = perm_add_inst_action_auth(pcp, np);
4081 }
4082
4083 if (rc == REP_PROTOCOL_SUCCESS) {
4084 granted = perm_granted(pcp);
4085
4086 rc = map_granted_status(granted, pcp,
4087 &audit_data.ed_auth);
4088 if (granted == PERM_GONE) {
4089 /* No auditing if client gone. */
4090 pc_free(pcp);
4091 rc_node_rele(np);
4092 return (rc);
4093 }
4094 }
4095
4096 pc_free(pcp);
4097 } else {
4098 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4099 }
4100
4101 } else {
4102 rc = REP_PROTOCOL_SUCCESS;
4103 }
4104 #endif /* NATIVE_BUILD */
4105
4106
4107 if (rc != REP_PROTOCOL_SUCCESS) {
4108 rc_node_rele(np);
4109 if (rc != REP_PROTOCOL_FAIL_NO_RESOURCES) {
4110 smf_audit_event(event_id, ADT_FAILURE,
4111 ADT_FAIL_VALUE_AUTH, &audit_data);
4112 }
4113 if (audit_data.ed_auth != NULL)
4114 free(audit_data.ed_auth);
4115 return (rc);
4116 }
4117
4118 (void) pthread_mutex_lock(&np->rn_lock);
4119 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4120 audit_data.ed_auth);
4121 (void) pthread_mutex_unlock(&np->rn_lock);
4122
4123 rc = object_create_pg(np, type, name, pgtype, flags, &cp);
4124
4125 if (rc == REP_PROTOCOL_SUCCESS) {
4126 rc_node_assign(cpp, cp);
4127 rc_node_rele(cp);
4128 }
4129
4130 (void) pthread_mutex_lock(&np->rn_lock);
4131 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4132 (void) pthread_mutex_unlock(&np->rn_lock);
4133
4134 if (rc == REP_PROTOCOL_SUCCESS) {
4135 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4136 &audit_data);
4137 }
4138 if (audit_data.ed_auth != NULL)
4139 free(audit_data.ed_auth);
4140
4141 return (rc);
4142 }
4143
4144 static void
rc_pg_notify_fire(rc_node_pg_notify_t * pnp)4145 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
4146 {
4147 assert(MUTEX_HELD(&rc_pg_notify_lock));
4148
4149 if (pnp->rnpn_pg != NULL) {
4150 uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
4151 (void) close(pnp->rnpn_fd);
4152
4153 pnp->rnpn_pg = NULL;
4154 pnp->rnpn_fd = -1;
4155 } else {
4156 assert(pnp->rnpn_fd == -1);
4157 }
4158 }
4159
4160 static void
rc_notify_node_delete(rc_notify_delete_t * ndp,rc_node_t * np_arg)4161 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
4162 {
4163 rc_node_t *svc = NULL;
4164 rc_node_t *inst = NULL;
4165 rc_node_t *pg = NULL;
4166 rc_node_t *np = np_arg;
4167 rc_node_t *nnp;
4168
4169 while (svc == NULL) {
4170 (void) pthread_mutex_lock(&np->rn_lock);
4171 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4172 (void) pthread_mutex_unlock(&np->rn_lock);
4173 goto cleanup;
4174 }
4175 nnp = np->rn_parent;
4176 rc_node_hold_locked(np); /* hold it in place */
4177
4178 switch (np->rn_id.rl_type) {
4179 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4180 assert(pg == NULL);
4181 pg = np;
4182 break;
4183 case REP_PROTOCOL_ENTITY_INSTANCE:
4184 assert(inst == NULL);
4185 inst = np;
4186 break;
4187 case REP_PROTOCOL_ENTITY_SERVICE:
4188 assert(svc == NULL);
4189 svc = np;
4190 break;
4191 default:
4192 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4193 rc_node_rele_locked(np);
4194 goto cleanup;
4195 }
4196
4197 (void) pthread_mutex_unlock(&np->rn_lock);
4198
4199 np = nnp;
4200 if (np == NULL)
4201 goto cleanup;
4202 }
4203
4204 rc_notify_deletion(ndp,
4205 svc->rn_name,
4206 inst != NULL ? inst->rn_name : NULL,
4207 pg != NULL ? pg->rn_name : NULL);
4208
4209 ndp = NULL;
4210
4211 cleanup:
4212 if (ndp != NULL)
4213 uu_free(ndp);
4214
4215 for (;;) {
4216 if (svc != NULL) {
4217 np = svc;
4218 svc = NULL;
4219 } else if (inst != NULL) {
4220 np = inst;
4221 inst = NULL;
4222 } else if (pg != NULL) {
4223 np = pg;
4224 pg = NULL;
4225 } else
4226 break;
4227
4228 (void) pthread_mutex_lock(&np->rn_lock);
4229 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4230 rc_node_rele_locked(np);
4231 }
4232 }
4233
4234 /*
4235 * Hold RC_NODE_DYING_FLAGS on np's descendents. If andformer is true, do
4236 * the same down the rn_former chain.
4237 */
4238 static void
rc_node_delete_hold(rc_node_t * np,int andformer)4239 rc_node_delete_hold(rc_node_t *np, int andformer)
4240 {
4241 rc_node_t *cp;
4242
4243 again:
4244 assert(MUTEX_HELD(&np->rn_lock));
4245 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4246
4247 for (cp = uu_list_first(np->rn_children); cp != NULL;
4248 cp = uu_list_next(np->rn_children, cp)) {
4249 (void) pthread_mutex_lock(&cp->rn_lock);
4250 (void) pthread_mutex_unlock(&np->rn_lock);
4251 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
4252 /*
4253 * already marked as dead -- can't happen, since that
4254 * would require setting RC_NODE_CHILDREN_CHANGING
4255 * in np, and we're holding that...
4256 */
4257 abort();
4258 }
4259 rc_node_delete_hold(cp, andformer); /* recurse, drop lock */
4260
4261 (void) pthread_mutex_lock(&np->rn_lock);
4262 }
4263 if (andformer && (cp = np->rn_former) != NULL) {
4264 (void) pthread_mutex_lock(&cp->rn_lock);
4265 (void) pthread_mutex_unlock(&np->rn_lock);
4266 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
4267 abort(); /* can't happen, see above */
4268 np = cp;
4269 goto again; /* tail-recurse down rn_former */
4270 }
4271 (void) pthread_mutex_unlock(&np->rn_lock);
4272 }
4273
4274 /*
4275 * N.B.: this function drops np->rn_lock on the way out.
4276 */
4277 static void
rc_node_delete_rele(rc_node_t * np,int andformer)4278 rc_node_delete_rele(rc_node_t *np, int andformer)
4279 {
4280 rc_node_t *cp;
4281
4282 again:
4283 assert(MUTEX_HELD(&np->rn_lock));
4284 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4285
4286 for (cp = uu_list_first(np->rn_children); cp != NULL;
4287 cp = uu_list_next(np->rn_children, cp)) {
4288 (void) pthread_mutex_lock(&cp->rn_lock);
4289 (void) pthread_mutex_unlock(&np->rn_lock);
4290 rc_node_delete_rele(cp, andformer); /* recurse, drop lock */
4291 (void) pthread_mutex_lock(&np->rn_lock);
4292 }
4293 if (andformer && (cp = np->rn_former) != NULL) {
4294 (void) pthread_mutex_lock(&cp->rn_lock);
4295 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4296 (void) pthread_mutex_unlock(&np->rn_lock);
4297
4298 np = cp;
4299 goto again; /* tail-recurse down rn_former */
4300 }
4301 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4302 (void) pthread_mutex_unlock(&np->rn_lock);
4303 }
4304
4305 static void
rc_node_finish_delete(rc_node_t * cp)4306 rc_node_finish_delete(rc_node_t *cp)
4307 {
4308 cache_bucket_t *bp;
4309 rc_node_pg_notify_t *pnp;
4310
4311 assert(MUTEX_HELD(&cp->rn_lock));
4312
4313 if (!(cp->rn_flags & RC_NODE_OLD)) {
4314 assert(cp->rn_flags & RC_NODE_IN_PARENT);
4315 if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
4316 abort(); /* can't happen, see above */
4317 }
4318 cp->rn_flags &= ~RC_NODE_IN_PARENT;
4319 cp->rn_parent = NULL;
4320 rc_node_free_fmri(cp);
4321 }
4322
4323 cp->rn_flags |= RC_NODE_DEAD;
4324
4325 /*
4326 * If this node is not out-dated, we need to remove it from
4327 * the notify list and cache hash table.
4328 */
4329 if (!(cp->rn_flags & RC_NODE_OLD)) {
4330 assert(cp->rn_refs > 0); /* can't go away yet */
4331 (void) pthread_mutex_unlock(&cp->rn_lock);
4332
4333 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4334 while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
4335 rc_pg_notify_fire(pnp);
4336 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4337 rc_notify_remove_node(cp);
4338
4339 bp = cache_hold(cp->rn_hash);
4340 (void) pthread_mutex_lock(&cp->rn_lock);
4341 cache_remove_unlocked(bp, cp);
4342 cache_release(bp);
4343 }
4344 }
4345
4346 /*
4347 * For each child, call rc_node_finish_delete() and recurse. If andformer
4348 * is set, also recurse down rn_former. Finally release np, which might
4349 * free it.
4350 */
4351 static void
rc_node_delete_children(rc_node_t * np,int andformer)4352 rc_node_delete_children(rc_node_t *np, int andformer)
4353 {
4354 rc_node_t *cp;
4355
4356 again:
4357 assert(np->rn_refs > 0);
4358 assert(MUTEX_HELD(&np->rn_lock));
4359 assert(np->rn_flags & RC_NODE_DEAD);
4360
4361 while ((cp = uu_list_first(np->rn_children)) != NULL) {
4362 uu_list_remove(np->rn_children, cp);
4363 (void) pthread_mutex_lock(&cp->rn_lock);
4364 (void) pthread_mutex_unlock(&np->rn_lock);
4365 rc_node_hold_locked(cp); /* hold while we recurse */
4366 rc_node_finish_delete(cp);
4367 rc_node_delete_children(cp, andformer); /* drops lock + ref */
4368 (void) pthread_mutex_lock(&np->rn_lock);
4369 }
4370
4371 /*
4372 * When we drop cp's lock, all the children will be gone, so we
4373 * can release DYING_FLAGS.
4374 */
4375 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4376 if (andformer && (cp = np->rn_former) != NULL) {
4377 np->rn_former = NULL; /* unlink */
4378 (void) pthread_mutex_lock(&cp->rn_lock);
4379
4380 /*
4381 * Register the ephemeral reference created by reading
4382 * np->rn_former into cp. Note that the persistent
4383 * reference (np->rn_former) is locked because we haven't
4384 * dropped np's lock since we dropped its RC_NODE_IN_TX
4385 * (via RC_NODE_DYING_FLAGS).
4386 */
4387 rc_node_hold_ephemeral_locked(cp);
4388
4389 (void) pthread_mutex_unlock(&np->rn_lock);
4390 cp->rn_flags &= ~RC_NODE_ON_FORMER;
4391
4392 rc_node_hold_locked(cp); /* hold while we loop */
4393
4394 rc_node_finish_delete(cp);
4395
4396 rc_node_rele(np); /* drop the old reference */
4397
4398 np = cp;
4399 goto again; /* tail-recurse down rn_former */
4400 }
4401 rc_node_rele_locked(np);
4402 }
4403
4404 /*
4405 * The last client or child reference to np, which must be either
4406 * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed. We'll destroy any
4407 * remaining references (e.g., rn_former) and call rc_node_destroy() to
4408 * free np.
4409 */
4410 static void
rc_node_no_client_refs(rc_node_t * np)4411 rc_node_no_client_refs(rc_node_t *np)
4412 {
4413 int unrefed;
4414 rc_node_t *current, *cur;
4415
4416 assert(MUTEX_HELD(&np->rn_lock));
4417 assert(np->rn_refs == 0);
4418 assert(np->rn_other_refs == 0);
4419 assert(np->rn_other_refs_held == 0);
4420
4421 if (np->rn_flags & RC_NODE_DEAD) {
4422 /*
4423 * The node is DEAD, so the deletion code should have
4424 * destroyed all rn_children or rn_former references.
4425 * Since the last client or child reference has been
4426 * destroyed, we're free to destroy np. Unless another
4427 * thread has an ephemeral reference, in which case we'll
4428 * pass the buck.
4429 */
4430 if (np->rn_erefs > 1) {
4431 --np->rn_erefs;
4432 NODE_UNLOCK(np);
4433 return;
4434 }
4435
4436 (void) pthread_mutex_unlock(&np->rn_lock);
4437 rc_node_destroy(np);
4438 return;
4439 }
4440
4441 /* We only collect DEAD and OLD nodes, thank you. */
4442 assert(np->rn_flags & RC_NODE_OLD);
4443
4444 /*
4445 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4446 * nodes. But it's vulnerable to unfriendly scheduling, so full
4447 * use of rn_erefs should supersede it someday.
4448 */
4449 if (np->rn_flags & RC_NODE_UNREFED) {
4450 (void) pthread_mutex_unlock(&np->rn_lock);
4451 return;
4452 }
4453 np->rn_flags |= RC_NODE_UNREFED;
4454
4455 /*
4456 * Now we'll remove the node from the rn_former chain and take its
4457 * DYING_FLAGS.
4458 */
4459
4460 /*
4461 * Since this node is OLD, it should be on an rn_former chain. To
4462 * remove it, we must find the current in-hash object and grab its
4463 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4464 */
4465
4466 (void) pthread_mutex_unlock(&np->rn_lock);
4467
4468 for (;;) {
4469 current = cache_lookup(&np->rn_id);
4470
4471 if (current == NULL) {
4472 (void) pthread_mutex_lock(&np->rn_lock);
4473
4474 if (np->rn_flags & RC_NODE_DEAD)
4475 goto died;
4476
4477 /*
4478 * We are trying to unreference this node, but the
4479 * owner of the former list does not exist. It must
4480 * be the case that another thread is deleting this
4481 * entire sub-branch, but has not yet reached us.
4482 * We will in short order be deleted.
4483 */
4484 np->rn_flags &= ~RC_NODE_UNREFED;
4485 (void) pthread_mutex_unlock(&np->rn_lock);
4486 return;
4487 }
4488
4489 if (current == np) {
4490 /*
4491 * no longer unreferenced
4492 */
4493 (void) pthread_mutex_lock(&np->rn_lock);
4494 np->rn_flags &= ~RC_NODE_UNREFED;
4495 /* held in cache_lookup() */
4496 rc_node_rele_locked(np);
4497 return;
4498 }
4499
4500 (void) pthread_mutex_lock(¤t->rn_lock);
4501 if (current->rn_flags & RC_NODE_OLD) {
4502 /*
4503 * current has been replaced since we looked it
4504 * up. Try again.
4505 */
4506 /* held in cache_lookup() */
4507 rc_node_rele_locked(current);
4508 continue;
4509 }
4510
4511 if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
4512 /*
4513 * current has been deleted since we looked it up. Try
4514 * again.
4515 */
4516 /* held in cache_lookup() */
4517 rc_node_rele_locked(current);
4518 continue;
4519 }
4520
4521 /*
4522 * rc_node_hold_flag() might have dropped current's lock, so
4523 * check OLD again.
4524 */
4525 if (!(current->rn_flags & RC_NODE_OLD)) {
4526 /* Not old. Stop looping. */
4527 (void) pthread_mutex_unlock(¤t->rn_lock);
4528 break;
4529 }
4530
4531 rc_node_rele_flag(current, RC_NODE_IN_TX);
4532 rc_node_rele_locked(current);
4533 }
4534
4535 /* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4536 (void) pthread_mutex_lock(&np->rn_lock);
4537
4538 /*
4539 * While we didn't have the lock, a thread may have added
4540 * a reference or changed the flags.
4541 */
4542 if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
4543 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4544 np->rn_other_refs_held != 0) {
4545 np->rn_flags &= ~RC_NODE_UNREFED;
4546
4547 (void) pthread_mutex_lock(¤t->rn_lock);
4548 rc_node_rele_flag(current, RC_NODE_IN_TX);
4549 /* held by cache_lookup() */
4550 rc_node_rele_locked(current);
4551 return;
4552 }
4553
4554 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4555 /*
4556 * Someone deleted the node while we were waiting for
4557 * DYING_FLAGS. Undo the modifications to current.
4558 */
4559 (void) pthread_mutex_unlock(&np->rn_lock);
4560
4561 rc_node_rele_flag(current, RC_NODE_IN_TX);
4562 /* held by cache_lookup() */
4563 rc_node_rele_locked(current);
4564
4565 (void) pthread_mutex_lock(&np->rn_lock);
4566 goto died;
4567 }
4568
4569 /* Take RC_NODE_DYING_FLAGS on np's descendents. */
4570 rc_node_delete_hold(np, 0); /* drops np->rn_lock */
4571
4572 /* Mark np DEAD. This requires the lock. */
4573 (void) pthread_mutex_lock(&np->rn_lock);
4574
4575 /* Recheck for new references. */
4576 if (!(np->rn_flags & RC_NODE_OLD) ||
4577 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4578 np->rn_other_refs_held != 0) {
4579 np->rn_flags &= ~RC_NODE_UNREFED;
4580 rc_node_delete_rele(np, 0); /* drops np's lock */
4581
4582 (void) pthread_mutex_lock(¤t->rn_lock);
4583 rc_node_rele_flag(current, RC_NODE_IN_TX);
4584 /* held by cache_lookup() */
4585 rc_node_rele_locked(current);
4586 return;
4587 }
4588
4589 np->rn_flags |= RC_NODE_DEAD;
4590
4591 /*
4592 * Delete the children. This calls rc_node_rele_locked() on np at
4593 * the end, so add a reference to keep the count from going
4594 * negative. It will recurse with RC_NODE_DEAD set, so we'll call
4595 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4596 * shouldn't actually free() np.
4597 */
4598 rc_node_hold_locked(np);
4599 rc_node_delete_children(np, 0); /* unlocks np */
4600
4601 /* Remove np from current's rn_former chain. */
4602 (void) pthread_mutex_lock(¤t->rn_lock);
4603 for (cur = current; cur != NULL && cur->rn_former != np;
4604 cur = cur->rn_former)
4605 ;
4606 assert(cur != NULL && cur != np);
4607
4608 cur->rn_former = np->rn_former;
4609 np->rn_former = NULL;
4610
4611 rc_node_rele_flag(current, RC_NODE_IN_TX);
4612 /* held by cache_lookup() */
4613 rc_node_rele_locked(current);
4614
4615 /* Clear ON_FORMER and UNREFED, and destroy. */
4616 (void) pthread_mutex_lock(&np->rn_lock);
4617 assert(np->rn_flags & RC_NODE_ON_FORMER);
4618 np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
4619
4620 if (np->rn_erefs > 1) {
4621 /* Still referenced. Stay execution. */
4622 --np->rn_erefs;
4623 NODE_UNLOCK(np);
4624 return;
4625 }
4626
4627 (void) pthread_mutex_unlock(&np->rn_lock);
4628 rc_node_destroy(np);
4629 return;
4630
4631 died:
4632 /*
4633 * Another thread marked np DEAD. If there still aren't any
4634 * persistent references, destroy the node.
4635 */
4636 np->rn_flags &= ~RC_NODE_UNREFED;
4637
4638 unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
4639 np->rn_other_refs_held == 0);
4640
4641 if (np->rn_erefs > 0)
4642 --np->rn_erefs;
4643
4644 if (unrefed && np->rn_erefs > 0) {
4645 NODE_UNLOCK(np);
4646 return;
4647 }
4648
4649 (void) pthread_mutex_unlock(&np->rn_lock);
4650
4651 if (unrefed)
4652 rc_node_destroy(np);
4653 }
4654
4655 static au_event_t
get_delete_event_id(rep_protocol_entity_t entity,uint32_t pgflags)4656 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags)
4657 {
4658 au_event_t id = 0;
4659
4660 #ifndef NATIVE_BUILD
4661 switch (entity) {
4662 case REP_PROTOCOL_ENTITY_SERVICE:
4663 case REP_PROTOCOL_ENTITY_INSTANCE:
4664 id = ADT_smf_delete;
4665 break;
4666 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4667 id = ADT_smf_delete_snap;
4668 break;
4669 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4670 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4671 if (pgflags & SCF_PG_FLAG_NONPERSISTENT) {
4672 id = ADT_smf_delete_npg;
4673 } else {
4674 id = ADT_smf_delete_pg;
4675 }
4676 break;
4677 default:
4678 abort();
4679 }
4680 #endif /* NATIVE_BUILD */
4681 return (id);
4682 }
4683
4684 /*
4685 * Fails with
4686 * _NOT_SET
4687 * _DELETED
4688 * _BAD_REQUEST
4689 * _PERMISSION_DENIED
4690 * _NO_RESOURCES
4691 * _TRUNCATED
4692 * and whatever object_delete() fails with.
4693 */
4694 int
rc_node_delete(rc_node_ptr_t * npp)4695 rc_node_delete(rc_node_ptr_t *npp)
4696 {
4697 rc_node_t *np, *np_orig;
4698 rc_node_t *pp = NULL;
4699 int rc;
4700 rc_node_pg_notify_t *pnp;
4701 cache_bucket_t *bp;
4702 rc_notify_delete_t *ndp;
4703 permcheck_t *pcp;
4704 int granted;
4705 au_event_t event_id = 0;
4706 size_t sz_out;
4707 audit_event_data_t audit_data;
4708 int audit_failure = 0;
4709
4710 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4711
4712 audit_data.ed_fmri = NULL;
4713 audit_data.ed_auth = NULL;
4714 audit_data.ed_snapname = NULL;
4715 audit_data.ed_type = NULL;
4716
4717 switch (np->rn_id.rl_type) {
4718 case REP_PROTOCOL_ENTITY_SERVICE:
4719 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE,
4720 np->rn_pgflags);
4721 break;
4722 case REP_PROTOCOL_ENTITY_INSTANCE:
4723 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE,
4724 np->rn_pgflags);
4725 break;
4726 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4727 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT,
4728 np->rn_pgflags);
4729 audit_data.ed_snapname = strdup(np->rn_name);
4730 if (audit_data.ed_snapname == NULL) {
4731 (void) pthread_mutex_unlock(&np->rn_lock);
4732 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4733 }
4734 break; /* deletable */
4735
4736 case REP_PROTOCOL_ENTITY_SCOPE:
4737 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
4738 /* Scopes and snaplevels are indelible. */
4739 (void) pthread_mutex_unlock(&np->rn_lock);
4740 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4741
4742 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4743 (void) pthread_mutex_unlock(&np->rn_lock);
4744 np = np->rn_cchain[0];
4745 RC_NODE_CHECK_AND_LOCK(np);
4746 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP,
4747 np->rn_pgflags);
4748 break;
4749
4750 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4751 if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
4752 event_id =
4753 get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP,
4754 np->rn_pgflags);
4755 audit_data.ed_type = strdup(np->rn_type);
4756 if (audit_data.ed_type == NULL) {
4757 (void) pthread_mutex_unlock(&np->rn_lock);
4758 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4759 }
4760 break;
4761 }
4762
4763 /* Snapshot property groups are indelible. */
4764 (void) pthread_mutex_unlock(&np->rn_lock);
4765 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4766
4767 case REP_PROTOCOL_ENTITY_PROPERTY:
4768 (void) pthread_mutex_unlock(&np->rn_lock);
4769 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4770
4771 default:
4772 assert(0);
4773 abort();
4774 break;
4775 }
4776
4777 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
4778 if (audit_data.ed_fmri == NULL) {
4779 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4780 goto cleanout;
4781 }
4782 np_orig = np;
4783 rc_node_hold_locked(np); /* simplifies rest of the code */
4784
4785 again:
4786 /*
4787 * The following loop is to deal with the fact that snapshots and
4788 * property groups are moving targets -- changes to them result
4789 * in a new "child" node. Since we can only delete from the top node,
4790 * we have to loop until we have a non-RC_NODE_OLD version.
4791 */
4792 for (;;) {
4793 if (!rc_node_wait_flag(np,
4794 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
4795 rc_node_rele_locked(np);
4796 rc = REP_PROTOCOL_FAIL_DELETED;
4797 goto cleanout;
4798 }
4799
4800 if (np->rn_flags & RC_NODE_OLD) {
4801 rc_node_rele_locked(np);
4802 np = cache_lookup(&np_orig->rn_id);
4803 assert(np != np_orig);
4804
4805 if (np == NULL) {
4806 rc = REP_PROTOCOL_FAIL_DELETED;
4807 goto fail;
4808 }
4809 (void) pthread_mutex_lock(&np->rn_lock);
4810 continue;
4811 }
4812
4813 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4814 rc_node_rele_locked(np);
4815 rc_node_clear(npp, 1);
4816 rc = REP_PROTOCOL_FAIL_DELETED;
4817 }
4818
4819 /*
4820 * Mark our parent as children changing. this call drops our
4821 * lock and the RC_NODE_USING_PARENT flag, and returns with
4822 * pp's lock held
4823 */
4824 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4825 if (pp == NULL) {
4826 /* our parent is gone, we're going next... */
4827 rc_node_rele(np);
4828
4829 rc_node_clear(npp, 1);
4830 rc = REP_PROTOCOL_FAIL_DELETED;
4831 goto cleanout;
4832 }
4833
4834 rc_node_hold_locked(pp); /* hold for later */
4835 (void) pthread_mutex_unlock(&pp->rn_lock);
4836
4837 (void) pthread_mutex_lock(&np->rn_lock);
4838 if (!(np->rn_flags & RC_NODE_OLD))
4839 break; /* not old -- we're done */
4840
4841 (void) pthread_mutex_unlock(&np->rn_lock);
4842 (void) pthread_mutex_lock(&pp->rn_lock);
4843 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4844 rc_node_rele_locked(pp);
4845 (void) pthread_mutex_lock(&np->rn_lock);
4846 continue; /* loop around and try again */
4847 }
4848 /*
4849 * Everyone out of the pool -- we grab everything but
4850 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4851 * any changes from occurring while we are attempting to
4852 * delete the node.
4853 */
4854 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4855 (void) pthread_mutex_unlock(&np->rn_lock);
4856 rc = REP_PROTOCOL_FAIL_DELETED;
4857 goto fail;
4858 }
4859
4860 assert(!(np->rn_flags & RC_NODE_OLD));
4861
4862 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
4863 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
4864 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4865 (void) pthread_mutex_unlock(&np->rn_lock);
4866 goto fail;
4867 }
4868
4869 #ifdef NATIVE_BUILD
4870 if (!client_is_privileged()) {
4871 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4872 }
4873 #else
4874 if (is_main_repository) {
4875 /* permission check */
4876 (void) pthread_mutex_unlock(&np->rn_lock);
4877 pcp = pc_create();
4878 if (pcp != NULL) {
4879 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4880
4881 /* add .smf.modify.<type> for pgs. */
4882 if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
4883 REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4884 const char * const auth =
4885 perm_auth_for_pgtype(np->rn_type);
4886
4887 if (auth != NULL)
4888 rc = perm_add_enabling(pcp, auth);
4889 }
4890
4891 if (rc == REP_PROTOCOL_SUCCESS) {
4892 granted = perm_granted(pcp);
4893
4894 rc = map_granted_status(granted, pcp,
4895 &audit_data.ed_auth);
4896 if (granted == PERM_GONE) {
4897 /* No need to audit if client gone. */
4898 pc_free(pcp);
4899 rc_node_rele_flag(np,
4900 RC_NODE_DYING_FLAGS);
4901 return (rc);
4902 }
4903 if (granted == PERM_DENIED)
4904 audit_failure = 1;
4905 }
4906
4907 pc_free(pcp);
4908 } else {
4909 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4910 }
4911
4912 (void) pthread_mutex_lock(&np->rn_lock);
4913 } else {
4914 rc = REP_PROTOCOL_SUCCESS;
4915 }
4916 #endif /* NATIVE_BUILD */
4917
4918 if (rc != REP_PROTOCOL_SUCCESS) {
4919 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4920 (void) pthread_mutex_unlock(&np->rn_lock);
4921 goto fail;
4922 }
4923
4924 ndp = uu_zalloc(sizeof (*ndp));
4925 if (ndp == NULL) {
4926 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4927 (void) pthread_mutex_unlock(&np->rn_lock);
4928 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4929 goto fail;
4930 }
4931
4932 rc_node_delete_hold(np, 1); /* hold entire subgraph, drop lock */
4933
4934 rc = object_delete(np);
4935
4936 if (rc != REP_PROTOCOL_SUCCESS) {
4937 (void) pthread_mutex_lock(&np->rn_lock);
4938 rc_node_delete_rele(np, 1); /* drops lock */
4939 uu_free(ndp);
4940 goto fail;
4941 }
4942
4943 /*
4944 * Now, delicately unlink and delete the object.
4945 *
4946 * Create the delete notification, atomically remove
4947 * from the hash table and set the NODE_DEAD flag, and
4948 * remove from the parent's children list.
4949 */
4950 rc_notify_node_delete(ndp, np); /* frees or uses ndp */
4951
4952 bp = cache_hold(np->rn_hash);
4953
4954 (void) pthread_mutex_lock(&np->rn_lock);
4955 cache_remove_unlocked(bp, np);
4956 cache_release(bp);
4957
4958 np->rn_flags |= RC_NODE_DEAD;
4959
4960 if (pp != NULL) {
4961 /*
4962 * Remove from pp's rn_children. This requires pp's lock,
4963 * so we must drop np's lock to respect lock order.
4964 */
4965 (void) pthread_mutex_unlock(&np->rn_lock);
4966 (void) pthread_mutex_lock(&pp->rn_lock);
4967 (void) pthread_mutex_lock(&np->rn_lock);
4968
4969 uu_list_remove(pp->rn_children, np);
4970
4971 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4972
4973 (void) pthread_mutex_unlock(&pp->rn_lock);
4974
4975 np->rn_flags &= ~RC_NODE_IN_PARENT;
4976 }
4977
4978 /*
4979 * finally, propagate death to our children (including marking
4980 * them DEAD), handle notifications, and release our hold.
4981 */
4982 rc_node_hold_locked(np); /* hold for delete */
4983 rc_node_delete_children(np, 1); /* drops DYING_FLAGS, lock, ref */
4984
4985 rc_node_clear(npp, 1);
4986
4987 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4988 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
4989 rc_pg_notify_fire(pnp);
4990 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4991 rc_notify_remove_node(np);
4992
4993 rc_node_rele(np);
4994
4995 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4996 &audit_data);
4997 free(audit_data.ed_auth);
4998 free(audit_data.ed_snapname);
4999 free(audit_data.ed_type);
5000 free(audit_data.ed_fmri);
5001 return (rc);
5002
5003 fail:
5004 rc_node_rele(np);
5005 if (rc == REP_PROTOCOL_FAIL_DELETED)
5006 rc_node_clear(npp, 1);
5007 if (pp != NULL) {
5008 (void) pthread_mutex_lock(&pp->rn_lock);
5009 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5010 rc_node_rele_locked(pp); /* drop ref and lock */
5011 }
5012 if (audit_failure) {
5013 smf_audit_event(event_id, ADT_FAILURE,
5014 ADT_FAIL_VALUE_AUTH, &audit_data);
5015 }
5016 cleanout:
5017 free(audit_data.ed_auth);
5018 free(audit_data.ed_snapname);
5019 free(audit_data.ed_type);
5020 free(audit_data.ed_fmri);
5021 return (rc);
5022 }
5023
5024 int
rc_node_next_snaplevel(rc_node_ptr_t * npp,rc_node_ptr_t * cpp)5025 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5026 {
5027 rc_node_t *np;
5028 rc_node_t *cp, *pp;
5029 int res;
5030
5031 rc_node_clear(cpp, 0);
5032
5033 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5034
5035 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
5036 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
5037 (void) pthread_mutex_unlock(&np->rn_lock);
5038 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5039 }
5040
5041 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5042 if ((res = rc_node_fill_children(np,
5043 REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
5044 (void) pthread_mutex_unlock(&np->rn_lock);
5045 return (res);
5046 }
5047
5048 for (cp = uu_list_first(np->rn_children);
5049 cp != NULL;
5050 cp = uu_list_next(np->rn_children, cp)) {
5051 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5052 continue;
5053 rc_node_hold(cp);
5054 break;
5055 }
5056
5057 (void) pthread_mutex_unlock(&np->rn_lock);
5058 } else {
5059 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5060 (void) pthread_mutex_unlock(&np->rn_lock);
5061 rc_node_clear(npp, 1);
5062 return (REP_PROTOCOL_FAIL_DELETED);
5063 }
5064
5065 /*
5066 * mark our parent as children changing. This call drops our
5067 * lock and the RC_NODE_USING_PARENT flag, and returns with
5068 * pp's lock held
5069 */
5070 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
5071 if (pp == NULL) {
5072 /* our parent is gone, we're going next... */
5073
5074 rc_node_clear(npp, 1);
5075 return (REP_PROTOCOL_FAIL_DELETED);
5076 }
5077
5078 /*
5079 * find the next snaplevel
5080 */
5081 cp = np;
5082 while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
5083 cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5084 ;
5085
5086 /* it must match the snaplevel list */
5087 assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
5088 (cp != NULL && np->rn_snaplevel->rsl_next ==
5089 cp->rn_snaplevel));
5090
5091 if (cp != NULL)
5092 rc_node_hold(cp);
5093
5094 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5095
5096 (void) pthread_mutex_unlock(&pp->rn_lock);
5097 }
5098
5099 rc_node_assign(cpp, cp);
5100 if (cp != NULL) {
5101 rc_node_rele(cp);
5102
5103 return (REP_PROTOCOL_SUCCESS);
5104 }
5105 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5106 }
5107
5108 /*
5109 * This call takes a snapshot (np) and either:
5110 * an existing snapid (to be associated with np), or
5111 * a non-NULL parentp (from which a new snapshot is taken, and associated
5112 * with np)
5113 *
5114 * To do the association, np is duplicated, the duplicate is made to
5115 * represent the new snapid, and np is replaced with the new rc_node_t on
5116 * np's parent's child list. np is placed on the new node's rn_former list,
5117 * and replaces np in cache_hash (so rc_node_update() will find the new one).
5118 *
5119 * old_fmri and old_name point to the original snap shot's FMRI and name.
5120 * These values are used when generating audit events.
5121 *
5122 * Fails with
5123 * _BAD_REQUEST
5124 * _BACKEND_READONLY
5125 * _DELETED
5126 * _NO_RESOURCES
5127 * _TRUNCATED
5128 * _TYPE_MISMATCH
5129 */
5130 static int
rc_attach_snapshot(rc_node_t * np,uint32_t snapid,rc_node_t * parentp,char * old_fmri,char * old_name)5131 rc_attach_snapshot(
5132 rc_node_t *np,
5133 uint32_t snapid,
5134 rc_node_t *parentp,
5135 char *old_fmri,
5136 char *old_name)
5137 {
5138 rc_node_t *np_orig;
5139 rc_node_t *nnp, *prev;
5140 rc_node_t *pp;
5141 int rc;
5142 size_t sz_out;
5143 perm_status_t granted;
5144 au_event_t event_id;
5145 audit_event_data_t audit_data;
5146
5147 if (parentp == NULL) {
5148 assert(old_fmri != NULL);
5149 } else {
5150 assert(snapid == 0);
5151 }
5152 assert(MUTEX_HELD(&np->rn_lock));
5153
5154 /* Gather the audit data. */
5155 /*
5156 * ADT_smf_* symbols may not be defined in the /usr/include header
5157 * files on the build machine. Thus, the following if-else will
5158 * not be compiled when doing native builds.
5159 */
5160 #ifndef NATIVE_BUILD
5161 if (parentp == NULL) {
5162 event_id = ADT_smf_attach_snap;
5163 } else {
5164 event_id = ADT_smf_create_snap;
5165 }
5166 #endif /* NATIVE_BUILD */
5167 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5168 audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN);
5169 if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) {
5170 (void) pthread_mutex_unlock(&np->rn_lock);
5171 free(audit_data.ed_fmri);
5172 free(audit_data.ed_snapname);
5173 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5174 }
5175 audit_data.ed_auth = NULL;
5176 if (strlcpy(audit_data.ed_snapname, np->rn_name,
5177 REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) {
5178 abort();
5179 }
5180 audit_data.ed_old_fmri = old_fmri;
5181 audit_data.ed_old_name = old_name ? old_name : "NO NAME";
5182
5183 if (parentp == NULL) {
5184 /*
5185 * In the attach case, get the instance FMRIs of the
5186 * snapshots.
5187 */
5188 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5189 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
5190 (void) pthread_mutex_unlock(&np->rn_lock);
5191 free(audit_data.ed_fmri);
5192 free(audit_data.ed_snapname);
5193 return (rc);
5194 }
5195 } else {
5196 /*
5197 * Capture the FMRI of the parent if we're actually going
5198 * to take the snapshot.
5199 */
5200 if ((rc = rc_node_get_fmri_or_fragment(parentp,
5201 audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) !=
5202 REP_PROTOCOL_SUCCESS) {
5203 (void) pthread_mutex_unlock(&np->rn_lock);
5204 free(audit_data.ed_fmri);
5205 free(audit_data.ed_snapname);
5206 return (rc);
5207 }
5208 }
5209
5210 np_orig = np;
5211 rc_node_hold_locked(np); /* simplifies the remainder */
5212
5213 (void) pthread_mutex_unlock(&np->rn_lock);
5214 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5215 switch (granted) {
5216 case PERM_DENIED:
5217 smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH,
5218 &audit_data);
5219 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5220 rc_node_rele(np);
5221 goto cleanout;
5222 case PERM_GRANTED:
5223 break;
5224 case PERM_GONE:
5225 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5226 rc_node_rele(np);
5227 goto cleanout;
5228 case PERM_FAIL:
5229 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5230 rc_node_rele(np);
5231 goto cleanout;
5232 default:
5233 bad_error(rc_node_modify_permission_check, granted);
5234 }
5235 (void) pthread_mutex_lock(&np->rn_lock);
5236
5237 /*
5238 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5239 * list from changing.
5240 */
5241 for (;;) {
5242 if (!(np->rn_flags & RC_NODE_OLD)) {
5243 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5244 goto again;
5245 }
5246 pp = rc_node_hold_parent_flag(np,
5247 RC_NODE_CHILDREN_CHANGING);
5248
5249 (void) pthread_mutex_lock(&np->rn_lock);
5250 if (pp == NULL) {
5251 goto again;
5252 }
5253 if (np->rn_flags & RC_NODE_OLD) {
5254 rc_node_rele_flag(pp,
5255 RC_NODE_CHILDREN_CHANGING);
5256 (void) pthread_mutex_unlock(&pp->rn_lock);
5257 goto again;
5258 }
5259 (void) pthread_mutex_unlock(&pp->rn_lock);
5260
5261 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
5262 /*
5263 * Can't happen, since we're holding our
5264 * parent's CHILDREN_CHANGING flag...
5265 */
5266 abort();
5267 }
5268 break; /* everything's ready */
5269 }
5270 again:
5271 rc_node_rele_locked(np);
5272 np = cache_lookup(&np_orig->rn_id);
5273
5274 if (np == NULL) {
5275 rc = REP_PROTOCOL_FAIL_DELETED;
5276 goto cleanout;
5277 }
5278
5279 (void) pthread_mutex_lock(&np->rn_lock);
5280 }
5281
5282 if (parentp != NULL) {
5283 if (pp != parentp) {
5284 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
5285 goto fail;
5286 }
5287 nnp = NULL;
5288 } else {
5289 /*
5290 * look for a former node with the snapid we need.
5291 */
5292 if (np->rn_snapshot_id == snapid) {
5293 rc_node_rele_flag(np, RC_NODE_IN_TX);
5294 rc_node_rele_locked(np);
5295
5296 (void) pthread_mutex_lock(&pp->rn_lock);
5297 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5298 (void) pthread_mutex_unlock(&pp->rn_lock);
5299 rc = REP_PROTOCOL_SUCCESS; /* nothing to do */
5300 goto cleanout;
5301 }
5302
5303 prev = np;
5304 while ((nnp = prev->rn_former) != NULL) {
5305 if (nnp->rn_snapshot_id == snapid) {
5306 rc_node_hold(nnp);
5307 break; /* existing node with that id */
5308 }
5309 prev = nnp;
5310 }
5311 }
5312
5313 if (nnp == NULL) {
5314 prev = NULL;
5315 nnp = rc_node_alloc();
5316 if (nnp == NULL) {
5317 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5318 goto fail;
5319 }
5320
5321 nnp->rn_id = np->rn_id; /* structure assignment */
5322 nnp->rn_hash = np->rn_hash;
5323 nnp->rn_name = strdup(np->rn_name);
5324 nnp->rn_snapshot_id = snapid;
5325 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
5326
5327 if (nnp->rn_name == NULL) {
5328 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5329 goto fail;
5330 }
5331 }
5332
5333 (void) pthread_mutex_unlock(&np->rn_lock);
5334
5335 rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
5336
5337 if (parentp != NULL)
5338 nnp->rn_snapshot_id = snapid; /* fill in new snapid */
5339 else
5340 assert(nnp->rn_snapshot_id == snapid);
5341
5342 (void) pthread_mutex_lock(&np->rn_lock);
5343 if (rc != REP_PROTOCOL_SUCCESS)
5344 goto fail;
5345
5346 /*
5347 * fix up the former chain
5348 */
5349 if (prev != NULL) {
5350 prev->rn_former = nnp->rn_former;
5351 (void) pthread_mutex_lock(&nnp->rn_lock);
5352 nnp->rn_flags &= ~RC_NODE_ON_FORMER;
5353 nnp->rn_former = NULL;
5354 (void) pthread_mutex_unlock(&nnp->rn_lock);
5355 }
5356 np->rn_flags |= RC_NODE_OLD;
5357 (void) pthread_mutex_unlock(&np->rn_lock);
5358
5359 /*
5360 * replace np with nnp
5361 */
5362 rc_node_relink_child(pp, np, nnp);
5363
5364 rc_node_rele(np);
5365 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data);
5366 rc = REP_PROTOCOL_SUCCESS;
5367
5368 cleanout:
5369 free(audit_data.ed_auth);
5370 free(audit_data.ed_fmri);
5371 free(audit_data.ed_snapname);
5372 return (rc);
5373
5374 fail:
5375 rc_node_rele_flag(np, RC_NODE_IN_TX);
5376 rc_node_rele_locked(np);
5377 (void) pthread_mutex_lock(&pp->rn_lock);
5378 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5379 (void) pthread_mutex_unlock(&pp->rn_lock);
5380
5381 if (nnp != NULL) {
5382 if (prev == NULL)
5383 rc_node_destroy(nnp);
5384 else
5385 rc_node_rele(nnp);
5386 }
5387
5388 free(audit_data.ed_auth);
5389 free(audit_data.ed_fmri);
5390 free(audit_data.ed_snapname);
5391 return (rc);
5392 }
5393
5394 int
rc_snapshot_take_new(rc_node_ptr_t * npp,const char * svcname,const char * instname,const char * name,rc_node_ptr_t * outpp)5395 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
5396 const char *instname, const char *name, rc_node_ptr_t *outpp)
5397 {
5398 perm_status_t granted;
5399 rc_node_t *np;
5400 rc_node_t *outp = NULL;
5401 int rc, perm_rc;
5402 char fmri[REP_PROTOCOL_FMRI_LEN];
5403 audit_event_data_t audit_data;
5404 size_t sz_out;
5405
5406 rc_node_clear(outpp, 0);
5407
5408 /*
5409 * rc_node_modify_permission_check() must be called before the node
5410 * is locked. This is because the library functions that check
5411 * authorizations can trigger calls back into configd.
5412 */
5413 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5414 switch (granted) {
5415 case PERM_DENIED:
5416 /*
5417 * We continue in this case, so that we can generate an
5418 * audit event later in this function.
5419 */
5420 perm_rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5421 break;
5422 case PERM_GRANTED:
5423 perm_rc = REP_PROTOCOL_SUCCESS;
5424 break;
5425 case PERM_GONE:
5426 /* No need to produce audit event if client is gone. */
5427 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5428 case PERM_FAIL:
5429 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5430 default:
5431 bad_error("rc_node_modify_permission_check", granted);
5432 break;
5433 }
5434
5435 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
5436 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5437 (void) pthread_mutex_unlock(&np->rn_lock);
5438 free(audit_data.ed_auth);
5439 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5440 }
5441
5442 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
5443 if (rc != REP_PROTOCOL_SUCCESS) {
5444 (void) pthread_mutex_unlock(&np->rn_lock);
5445 free(audit_data.ed_auth);
5446 return (rc);
5447 }
5448
5449 if (svcname != NULL && (rc =
5450 rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
5451 REP_PROTOCOL_SUCCESS) {
5452 (void) pthread_mutex_unlock(&np->rn_lock);
5453 free(audit_data.ed_auth);
5454 return (rc);
5455 }
5456
5457 if (instname != NULL && (rc =
5458 rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
5459 REP_PROTOCOL_SUCCESS) {
5460 (void) pthread_mutex_unlock(&np->rn_lock);
5461 free(audit_data.ed_auth);
5462 return (rc);
5463 }
5464
5465 audit_data.ed_fmri = fmri;
5466 audit_data.ed_snapname = (char *)name;
5467
5468 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
5469 &sz_out)) != REP_PROTOCOL_SUCCESS) {
5470 (void) pthread_mutex_unlock(&np->rn_lock);
5471 free(audit_data.ed_auth);
5472 return (rc);
5473 }
5474 if (perm_rc != REP_PROTOCOL_SUCCESS) {
5475 (void) pthread_mutex_unlock(&np->rn_lock);
5476 smf_audit_event(ADT_smf_create_snap, ADT_FAILURE,
5477 ADT_FAIL_VALUE_AUTH, &audit_data);
5478 free(audit_data.ed_auth);
5479 return (perm_rc);
5480 }
5481
5482 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
5483 audit_data.ed_auth);
5484 (void) pthread_mutex_unlock(&np->rn_lock);
5485
5486 rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
5487
5488 if (rc == REP_PROTOCOL_SUCCESS) {
5489 rc_node_assign(outpp, outp);
5490 rc_node_rele(outp);
5491 }
5492
5493 (void) pthread_mutex_lock(&np->rn_lock);
5494 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
5495 (void) pthread_mutex_unlock(&np->rn_lock);
5496
5497 if (rc == REP_PROTOCOL_SUCCESS) {
5498 smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS,
5499 &audit_data);
5500 }
5501 if (audit_data.ed_auth != NULL)
5502 free(audit_data.ed_auth);
5503 return (rc);
5504 }
5505
5506 int
rc_snapshot_take_attach(rc_node_ptr_t * npp,rc_node_ptr_t * outpp)5507 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
5508 {
5509 rc_node_t *np, *outp;
5510
5511 RC_NODE_PTR_GET_CHECK(np, npp);
5512 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5513 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5514 }
5515
5516 RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
5517 if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5518 (void) pthread_mutex_unlock(&outp->rn_lock);
5519 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5520 }
5521
5522 return (rc_attach_snapshot(outp, 0, np, NULL,
5523 NULL)); /* drops outp's lock */
5524 }
5525
5526 int
rc_snapshot_attach(rc_node_ptr_t * npp,rc_node_ptr_t * cpp)5527 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5528 {
5529 rc_node_t *np;
5530 rc_node_t *cp;
5531 uint32_t snapid;
5532 char old_name[REP_PROTOCOL_NAME_LEN];
5533 int rc;
5534 size_t sz_out;
5535 char old_fmri[REP_PROTOCOL_FMRI_LEN];
5536
5537 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5538 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5539 (void) pthread_mutex_unlock(&np->rn_lock);
5540 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5541 }
5542 snapid = np->rn_snapshot_id;
5543 rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
5544 &sz_out);
5545 (void) pthread_mutex_unlock(&np->rn_lock);
5546 if (rc != REP_PROTOCOL_SUCCESS)
5547 return (rc);
5548 if (np->rn_name != NULL) {
5549 if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
5550 sizeof (old_name)) {
5551 return (REP_PROTOCOL_FAIL_TRUNCATED);
5552 }
5553 }
5554
5555 RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
5556 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5557 (void) pthread_mutex_unlock(&cp->rn_lock);
5558 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5559 }
5560
5561 rc = rc_attach_snapshot(cp, snapid, NULL,
5562 old_fmri, old_name); /* drops cp's lock */
5563 return (rc);
5564 }
5565
5566 /*
5567 * If the pgname property group under ent has type pgtype, and it has a
5568 * propname property with type ptype, return _SUCCESS. If pgtype is NULL,
5569 * it is not checked. If ent is not a service node, we will return _SUCCESS if
5570 * a property meeting the requirements exists in either the instance or its
5571 * parent.
5572 *
5573 * Returns
5574 * _SUCCESS - see above
5575 * _DELETED - ent or one of its ancestors was deleted
5576 * _NO_RESOURCES - no resources
5577 * _NOT_FOUND - no matching property was found
5578 */
5579 static int
rc_svc_prop_exists(rc_node_t * ent,const char * pgname,const char * pgtype,const char * propname,rep_protocol_value_type_t ptype)5580 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
5581 const char *propname, rep_protocol_value_type_t ptype)
5582 {
5583 int ret;
5584 rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
5585
5586 assert(!MUTEX_HELD(&ent->rn_lock));
5587
5588 (void) pthread_mutex_lock(&ent->rn_lock);
5589 ret = rc_node_find_named_child(ent, pgname,
5590 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
5591 (void) pthread_mutex_unlock(&ent->rn_lock);
5592
5593 switch (ret) {
5594 case REP_PROTOCOL_SUCCESS:
5595 break;
5596
5597 case REP_PROTOCOL_FAIL_DELETED:
5598 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5599 return (ret);
5600
5601 default:
5602 bad_error("rc_node_find_named_child", ret);
5603 }
5604
5605 if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
5606 ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
5607 &svc);
5608 if (ret != REP_PROTOCOL_SUCCESS) {
5609 assert(ret == REP_PROTOCOL_FAIL_DELETED);
5610 if (pg != NULL)
5611 rc_node_rele(pg);
5612 return (ret);
5613 }
5614 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
5615
5616 (void) pthread_mutex_lock(&svc->rn_lock);
5617 ret = rc_node_find_named_child(svc, pgname,
5618 REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
5619 (void) pthread_mutex_unlock(&svc->rn_lock);
5620
5621 rc_node_rele(svc);
5622
5623 switch (ret) {
5624 case REP_PROTOCOL_SUCCESS:
5625 break;
5626
5627 case REP_PROTOCOL_FAIL_DELETED:
5628 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5629 if (pg != NULL)
5630 rc_node_rele(pg);
5631 return (ret);
5632
5633 default:
5634 bad_error("rc_node_find_named_child", ret);
5635 }
5636 }
5637
5638 if (pg != NULL &&
5639 pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
5640 rc_node_rele(pg);
5641 pg = NULL;
5642 }
5643
5644 if (spg != NULL &&
5645 pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
5646 rc_node_rele(spg);
5647 spg = NULL;
5648 }
5649
5650 if (pg == NULL) {
5651 if (spg == NULL)
5652 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5653 pg = spg;
5654 spg = NULL;
5655 }
5656
5657 /*
5658 * At this point, pg is non-NULL, and is a property group node of the
5659 * correct type. spg, if non-NULL, is also a property group node of
5660 * the correct type. Check for the property in pg first, then spg
5661 * (if applicable).
5662 */
5663 (void) pthread_mutex_lock(&pg->rn_lock);
5664 ret = rc_node_find_named_child(pg, propname,
5665 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5666 (void) pthread_mutex_unlock(&pg->rn_lock);
5667 rc_node_rele(pg);
5668 switch (ret) {
5669 case REP_PROTOCOL_SUCCESS:
5670 if (prop != NULL) {
5671 if (prop->rn_valtype == ptype) {
5672 rc_node_rele(prop);
5673 if (spg != NULL)
5674 rc_node_rele(spg);
5675 return (REP_PROTOCOL_SUCCESS);
5676 }
5677 rc_node_rele(prop);
5678 }
5679 break;
5680
5681 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5682 if (spg != NULL)
5683 rc_node_rele(spg);
5684 return (ret);
5685
5686 case REP_PROTOCOL_FAIL_DELETED:
5687 break;
5688
5689 default:
5690 bad_error("rc_node_find_named_child", ret);
5691 }
5692
5693 if (spg == NULL)
5694 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5695
5696 pg = spg;
5697
5698 (void) pthread_mutex_lock(&pg->rn_lock);
5699 ret = rc_node_find_named_child(pg, propname,
5700 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5701 (void) pthread_mutex_unlock(&pg->rn_lock);
5702 rc_node_rele(pg);
5703 switch (ret) {
5704 case REP_PROTOCOL_SUCCESS:
5705 if (prop != NULL) {
5706 if (prop->rn_valtype == ptype) {
5707 rc_node_rele(prop);
5708 return (REP_PROTOCOL_SUCCESS);
5709 }
5710 rc_node_rele(prop);
5711 }
5712 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5713
5714 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5715 return (ret);
5716
5717 case REP_PROTOCOL_FAIL_DELETED:
5718 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5719
5720 default:
5721 bad_error("rc_node_find_named_child", ret);
5722 }
5723
5724 return (REP_PROTOCOL_SUCCESS);
5725 }
5726
5727 /*
5728 * Given a property group node, returns _SUCCESS if the property group may
5729 * be read without any special authorization.
5730 *
5731 * Fails with:
5732 * _DELETED - np or an ancestor node was deleted
5733 * _TYPE_MISMATCH - np does not refer to a property group
5734 * _NO_RESOURCES - no resources
5735 * _PERMISSION_DENIED - authorization is required
5736 */
5737 static int
rc_node_pg_check_read_protect(rc_node_t * np)5738 rc_node_pg_check_read_protect(rc_node_t *np)
5739 {
5740 int ret;
5741 rc_node_t *ent;
5742
5743 assert(!MUTEX_HELD(&np->rn_lock));
5744
5745 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
5746 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5747
5748 if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
5749 strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
5750 strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
5751 return (REP_PROTOCOL_SUCCESS);
5752
5753 ret = rc_node_parent(np, &ent);
5754
5755 if (ret != REP_PROTOCOL_SUCCESS)
5756 return (ret);
5757
5758 ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
5759 AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
5760
5761 rc_node_rele(ent);
5762
5763 switch (ret) {
5764 case REP_PROTOCOL_FAIL_NOT_FOUND:
5765 return (REP_PROTOCOL_SUCCESS);
5766 case REP_PROTOCOL_SUCCESS:
5767 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5768 case REP_PROTOCOL_FAIL_DELETED:
5769 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5770 return (ret);
5771 default:
5772 bad_error("rc_svc_prop_exists", ret);
5773 }
5774
5775 return (REP_PROTOCOL_SUCCESS);
5776 }
5777
5778 /*
5779 * Fails with
5780 * _DELETED - np's node or parent has been deleted
5781 * _TYPE_MISMATCH - np's node is not a property
5782 * _NO_RESOURCES - out of memory
5783 * _PERMISSION_DENIED - no authorization to read this property's value(s)
5784 * _BAD_REQUEST - np's parent is not a property group
5785 */
5786 static int
rc_node_property_may_read(rc_node_t * np)5787 rc_node_property_may_read(rc_node_t *np)
5788 {
5789 int ret;
5790 perm_status_t granted = PERM_DENIED;
5791 rc_node_t *pgp;
5792 permcheck_t *pcp;
5793 audit_event_data_t audit_data;
5794 size_t sz_out;
5795
5796 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5797 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5798
5799 if (client_is_privileged())
5800 return (REP_PROTOCOL_SUCCESS);
5801
5802 #ifdef NATIVE_BUILD
5803 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5804 #else
5805 ret = rc_node_parent(np, &pgp);
5806
5807 if (ret != REP_PROTOCOL_SUCCESS)
5808 return (ret);
5809
5810 if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5811 rc_node_rele(pgp);
5812 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5813 }
5814
5815 ret = rc_node_pg_check_read_protect(pgp);
5816
5817 if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
5818 rc_node_rele(pgp);
5819 return (ret);
5820 }
5821
5822 pcp = pc_create();
5823
5824 if (pcp == NULL) {
5825 rc_node_rele(pgp);
5826 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5827 }
5828
5829 ret = perm_add_enabling(pcp, AUTH_MODIFY);
5830
5831 if (ret == REP_PROTOCOL_SUCCESS) {
5832 const char * const auth =
5833 perm_auth_for_pgtype(pgp->rn_type);
5834
5835 if (auth != NULL)
5836 ret = perm_add_enabling(pcp, auth);
5837 }
5838
5839 /*
5840 * If you are permitted to modify the value, you may also
5841 * read it. This means that both the MODIFY and VALUE
5842 * authorizations are acceptable. We don't allow requests
5843 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5844 * however, to avoid leaking possibly valuable information
5845 * since such a user can't change the property anyway.
5846 */
5847 if (ret == REP_PROTOCOL_SUCCESS)
5848 ret = perm_add_enabling_values(pcp, pgp,
5849 AUTH_PROP_MODIFY);
5850
5851 if (ret == REP_PROTOCOL_SUCCESS &&
5852 strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
5853 ret = perm_add_enabling_values(pcp, pgp,
5854 AUTH_PROP_VALUE);
5855
5856 if (ret == REP_PROTOCOL_SUCCESS)
5857 ret = perm_add_enabling_values(pcp, pgp,
5858 AUTH_PROP_READ);
5859
5860 rc_node_rele(pgp);
5861
5862 if (ret == REP_PROTOCOL_SUCCESS) {
5863 granted = perm_granted(pcp);
5864 if (granted == PERM_FAIL)
5865 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5866 if (granted == PERM_GONE)
5867 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5868 }
5869
5870 if (ret == REP_PROTOCOL_SUCCESS) {
5871 /* Generate a read_prop audit event. */
5872 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5873 if (audit_data.ed_fmri == NULL)
5874 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5875 }
5876 if (ret == REP_PROTOCOL_SUCCESS) {
5877 ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5878 REP_PROTOCOL_FMRI_LEN, &sz_out);
5879 }
5880 if (ret == REP_PROTOCOL_SUCCESS) {
5881 int status;
5882 int ret_value;
5883
5884 if (granted == PERM_DENIED) {
5885 status = ADT_FAILURE;
5886 ret_value = ADT_FAIL_VALUE_AUTH;
5887 } else {
5888 status = ADT_SUCCESS;
5889 ret_value = ADT_SUCCESS;
5890 }
5891 audit_data.ed_auth = pcp->pc_auth_string;
5892 smf_audit_event(ADT_smf_read_prop,
5893 status, ret_value, &audit_data);
5894 }
5895 free(audit_data.ed_fmri);
5896
5897 pc_free(pcp);
5898
5899 if ((ret == REP_PROTOCOL_SUCCESS) && (granted == PERM_DENIED))
5900 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5901
5902 return (ret);
5903 #endif /* NATIVE_BUILD */
5904 }
5905
5906 /*
5907 * Iteration
5908 */
5909 static int
rc_iter_filter_name(rc_node_t * np,void * s)5910 rc_iter_filter_name(rc_node_t *np, void *s)
5911 {
5912 const char *name = s;
5913
5914 return (strcmp(np->rn_name, name) == 0);
5915 }
5916
5917 static int
rc_iter_filter_type(rc_node_t * np,void * s)5918 rc_iter_filter_type(rc_node_t *np, void *s)
5919 {
5920 const char *type = s;
5921
5922 return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
5923 }
5924
5925 /*ARGSUSED*/
5926 static int
rc_iter_null_filter(rc_node_t * np,void * s)5927 rc_iter_null_filter(rc_node_t *np, void *s)
5928 {
5929 return (1);
5930 }
5931
5932 /*
5933 * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure
5934 * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
5935 * If successful, leaves a hold on np & increments np->rn_other_refs
5936 *
5937 * If composed is true, then set up for iteration across the top level of np's
5938 * composition chain. If successful, leaves a hold on np and increments
5939 * rn_other_refs for the top level of np's composition chain.
5940 *
5941 * Fails with
5942 * _NO_RESOURCES
5943 * _INVALID_TYPE
5944 * _TYPE_MISMATCH - np cannot carry type children
5945 * _DELETED
5946 */
5947 static int
rc_iter_create(rc_node_iter_t ** resp,rc_node_t * np,uint32_t type,rc_iter_filter_func * filter,void * arg,boolean_t composed)5948 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
5949 rc_iter_filter_func *filter, void *arg, boolean_t composed)
5950 {
5951 rc_node_iter_t *nip;
5952 int res;
5953
5954 assert(*resp == NULL);
5955
5956 nip = uu_zalloc(sizeof (*nip));
5957 if (nip == NULL)
5958 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5959
5960 /* np is held by the client's rc_node_ptr_t */
5961 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
5962 composed = 1;
5963
5964 if (!composed) {
5965 (void) pthread_mutex_lock(&np->rn_lock);
5966
5967 if ((res = rc_node_fill_children(np, type)) !=
5968 REP_PROTOCOL_SUCCESS) {
5969 (void) pthread_mutex_unlock(&np->rn_lock);
5970 uu_free(nip);
5971 return (res);
5972 }
5973
5974 nip->rni_clevel = -1;
5975
5976 nip->rni_iter = uu_list_walk_start(np->rn_children,
5977 UU_WALK_ROBUST);
5978 if (nip->rni_iter != NULL) {
5979 nip->rni_iter_node = np;
5980 rc_node_hold_other(np);
5981 } else {
5982 (void) pthread_mutex_unlock(&np->rn_lock);
5983 uu_free(nip);
5984 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5985 }
5986 (void) pthread_mutex_unlock(&np->rn_lock);
5987 } else {
5988 rc_node_t *ent;
5989
5990 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5991 /* rn_cchain isn't valid until children are loaded. */
5992 (void) pthread_mutex_lock(&np->rn_lock);
5993 res = rc_node_fill_children(np,
5994 REP_PROTOCOL_ENTITY_SNAPLEVEL);
5995 (void) pthread_mutex_unlock(&np->rn_lock);
5996 if (res != REP_PROTOCOL_SUCCESS) {
5997 uu_free(nip);
5998 return (res);
5999 }
6000
6001 /* Check for an empty snapshot. */
6002 if (np->rn_cchain[0] == NULL)
6003 goto empty;
6004 }
6005
6006 /* Start at the top of the composition chain. */
6007 for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
6008 if (nip->rni_clevel >= COMPOSITION_DEPTH) {
6009 /* Empty composition chain. */
6010 empty:
6011 nip->rni_clevel = -1;
6012 nip->rni_iter = NULL;
6013 /* It's ok, iter_next() will return _DONE. */
6014 goto out;
6015 }
6016
6017 ent = np->rn_cchain[nip->rni_clevel];
6018 assert(ent != NULL);
6019
6020 if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
6021 break;
6022
6023 /* Someone deleted it, so try the next one. */
6024 }
6025
6026 res = rc_node_fill_children(ent, type);
6027
6028 if (res == REP_PROTOCOL_SUCCESS) {
6029 nip->rni_iter = uu_list_walk_start(ent->rn_children,
6030 UU_WALK_ROBUST);
6031
6032 if (nip->rni_iter == NULL)
6033 res = REP_PROTOCOL_FAIL_NO_RESOURCES;
6034 else {
6035 nip->rni_iter_node = ent;
6036 rc_node_hold_other(ent);
6037 }
6038 }
6039
6040 if (res != REP_PROTOCOL_SUCCESS) {
6041 (void) pthread_mutex_unlock(&ent->rn_lock);
6042 uu_free(nip);
6043 return (res);
6044 }
6045
6046 (void) pthread_mutex_unlock(&ent->rn_lock);
6047 }
6048
6049 out:
6050 rc_node_hold(np); /* released by rc_iter_end() */
6051 nip->rni_parent = np;
6052 nip->rni_type = type;
6053 nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
6054 nip->rni_filter_arg = arg;
6055 *resp = nip;
6056 return (REP_PROTOCOL_SUCCESS);
6057 }
6058
6059 static void
rc_iter_end(rc_node_iter_t * iter)6060 rc_iter_end(rc_node_iter_t *iter)
6061 {
6062 rc_node_t *np = iter->rni_parent;
6063
6064 if (iter->rni_clevel >= 0)
6065 np = np->rn_cchain[iter->rni_clevel];
6066
6067 assert(MUTEX_HELD(&np->rn_lock));
6068 if (iter->rni_iter != NULL)
6069 uu_list_walk_end(iter->rni_iter);
6070 iter->rni_iter = NULL;
6071
6072 (void) pthread_mutex_unlock(&np->rn_lock);
6073 rc_node_rele(iter->rni_parent);
6074 if (iter->rni_iter_node != NULL)
6075 rc_node_rele_other(iter->rni_iter_node);
6076 }
6077
6078 /*
6079 * Fails with
6080 * _NOT_SET - npp is reset
6081 * _DELETED - npp's node has been deleted
6082 * _NOT_APPLICABLE - npp's node is not a property
6083 * _NO_RESOURCES - out of memory
6084 */
6085 static int
rc_node_setup_value_iter(rc_node_ptr_t * npp,rc_node_iter_t ** iterp)6086 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
6087 {
6088 rc_node_t *np;
6089
6090 rc_node_iter_t *nip;
6091
6092 assert(*iterp == NULL);
6093
6094 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6095
6096 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6097 (void) pthread_mutex_unlock(&np->rn_lock);
6098 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6099 }
6100
6101 nip = uu_zalloc(sizeof (*nip));
6102 if (nip == NULL) {
6103 (void) pthread_mutex_unlock(&np->rn_lock);
6104 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6105 }
6106
6107 nip->rni_parent = np;
6108 nip->rni_iter = NULL;
6109 nip->rni_clevel = -1;
6110 nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
6111 nip->rni_offset = 0;
6112 nip->rni_last_offset = 0;
6113
6114 rc_node_hold_locked(np);
6115
6116 *iterp = nip;
6117 (void) pthread_mutex_unlock(&np->rn_lock);
6118
6119 return (REP_PROTOCOL_SUCCESS);
6120 }
6121
6122 /*
6123 * Returns:
6124 * _NO_RESOURCES - out of memory
6125 * _NOT_SET - npp is reset
6126 * _DELETED - npp's node has been deleted
6127 * _TYPE_MISMATCH - npp's node is not a property
6128 * _NOT_FOUND - property has no values
6129 * _TRUNCATED - property has >1 values (first is written into out)
6130 * _SUCCESS - property has 1 value (which is written into out)
6131 * _PERMISSION_DENIED - no authorization to read property value(s)
6132 *
6133 * We shorten *sz_out to not include anything after the final '\0'.
6134 */
6135 int
rc_node_get_property_value(rc_node_ptr_t * npp,struct rep_protocol_value_response * out,size_t * sz_out)6136 rc_node_get_property_value(rc_node_ptr_t *npp,
6137 struct rep_protocol_value_response *out, size_t *sz_out)
6138 {
6139 rc_node_t *np;
6140 size_t w;
6141 int ret;
6142
6143 assert(*sz_out == sizeof (*out));
6144
6145 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6146 ret = rc_node_property_may_read(np);
6147 rc_node_rele(np);
6148
6149 if (ret != REP_PROTOCOL_SUCCESS)
6150 return (ret);
6151
6152 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6153
6154 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6155 (void) pthread_mutex_unlock(&np->rn_lock);
6156 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6157 }
6158
6159 if (np->rn_values_size == 0) {
6160 (void) pthread_mutex_unlock(&np->rn_lock);
6161 return (REP_PROTOCOL_FAIL_NOT_FOUND);
6162 }
6163 out->rpr_type = np->rn_valtype;
6164 w = strlcpy(out->rpr_value, &np->rn_values[0],
6165 sizeof (out->rpr_value));
6166
6167 if (w >= sizeof (out->rpr_value))
6168 backend_panic("value too large");
6169
6170 *sz_out = offsetof(struct rep_protocol_value_response,
6171 rpr_value[w + 1]);
6172
6173 ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
6174 REP_PROTOCOL_SUCCESS;
6175 (void) pthread_mutex_unlock(&np->rn_lock);
6176 return (ret);
6177 }
6178
6179 int
rc_iter_next_value(rc_node_iter_t * iter,struct rep_protocol_value_response * out,size_t * sz_out,int repeat)6180 rc_iter_next_value(rc_node_iter_t *iter,
6181 struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
6182 {
6183 rc_node_t *np = iter->rni_parent;
6184 const char *vals;
6185 size_t len;
6186
6187 size_t start;
6188 size_t w;
6189 int ret;
6190
6191 rep_protocol_responseid_t result;
6192
6193 assert(*sz_out == sizeof (*out));
6194
6195 (void) memset(out, '\0', *sz_out);
6196
6197 if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
6198 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6199
6200 RC_NODE_CHECK(np);
6201 ret = rc_node_property_may_read(np);
6202
6203 if (ret != REP_PROTOCOL_SUCCESS)
6204 return (ret);
6205
6206 RC_NODE_CHECK_AND_LOCK(np);
6207
6208 vals = np->rn_values;
6209 len = np->rn_values_size;
6210
6211 out->rpr_type = np->rn_valtype;
6212
6213 start = (repeat)? iter->rni_last_offset : iter->rni_offset;
6214
6215 if (len == 0 || start >= len) {
6216 result = REP_PROTOCOL_DONE;
6217 *sz_out -= sizeof (out->rpr_value);
6218 } else {
6219 w = strlcpy(out->rpr_value, &vals[start],
6220 sizeof (out->rpr_value));
6221
6222 if (w >= sizeof (out->rpr_value))
6223 backend_panic("value too large");
6224
6225 *sz_out = offsetof(struct rep_protocol_value_response,
6226 rpr_value[w + 1]);
6227
6228 /*
6229 * update the offsets if we're not repeating
6230 */
6231 if (!repeat) {
6232 iter->rni_last_offset = iter->rni_offset;
6233 iter->rni_offset += (w + 1);
6234 }
6235
6236 result = REP_PROTOCOL_SUCCESS;
6237 }
6238
6239 (void) pthread_mutex_unlock(&np->rn_lock);
6240 return (result);
6241 }
6242
6243 /*
6244 * Entry point for ITER_START from client.c. Validate the arguments & call
6245 * rc_iter_create().
6246 *
6247 * Fails with
6248 * _NOT_SET
6249 * _DELETED
6250 * _TYPE_MISMATCH - np cannot carry type children
6251 * _BAD_REQUEST - flags is invalid
6252 * pattern is invalid
6253 * _NO_RESOURCES
6254 * _INVALID_TYPE
6255 * _TYPE_MISMATCH - *npp cannot have children of type
6256 * _BACKEND_ACCESS
6257 */
6258 int
rc_node_setup_iter(rc_node_ptr_t * npp,rc_node_iter_t ** iterp,uint32_t type,uint32_t flags,const char * pattern)6259 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
6260 uint32_t type, uint32_t flags, const char *pattern)
6261 {
6262 rc_node_t *np;
6263 rc_iter_filter_func *f = NULL;
6264 int rc;
6265
6266 RC_NODE_PTR_GET_CHECK(np, npp);
6267
6268 if (pattern != NULL && pattern[0] == '\0')
6269 pattern = NULL;
6270
6271 if (type == REP_PROTOCOL_ENTITY_VALUE) {
6272 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
6273 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6274 if (flags != RP_ITER_START_ALL || pattern != NULL)
6275 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6276
6277 rc = rc_node_setup_value_iter(npp, iterp);
6278 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6279 return (rc);
6280 }
6281
6282 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
6283 REP_PROTOCOL_SUCCESS)
6284 return (rc);
6285
6286 if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
6287 (pattern == NULL))
6288 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6289
6290 /* Composition only works for instances & snapshots. */
6291 if ((flags & RP_ITER_START_COMPOSED) &&
6292 (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
6293 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
6294 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6295
6296 if (pattern != NULL) {
6297 if ((rc = rc_check_type_name(type, pattern)) !=
6298 REP_PROTOCOL_SUCCESS)
6299 return (rc);
6300 pattern = strdup(pattern);
6301 if (pattern == NULL)
6302 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6303 }
6304
6305 switch (flags & RP_ITER_START_FILT_MASK) {
6306 case RP_ITER_START_ALL:
6307 f = NULL;
6308 break;
6309 case RP_ITER_START_EXACT:
6310 f = rc_iter_filter_name;
6311 break;
6312 case RP_ITER_START_PGTYPE:
6313 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6314 free((void *)pattern);
6315 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6316 }
6317 f = rc_iter_filter_type;
6318 break;
6319 default:
6320 free((void *)pattern);
6321 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6322 }
6323
6324 rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
6325 flags & RP_ITER_START_COMPOSED);
6326 if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
6327 free((void *)pattern);
6328
6329 return (rc);
6330 }
6331
6332 /*
6333 * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6334 * the filter.
6335 * For composed iterators, then check to see if there's an overlapping entity
6336 * (see embedded comments). If we reach the end of the list, start over at
6337 * the next level.
6338 *
6339 * Returns
6340 * _BAD_REQUEST - iter walks values
6341 * _TYPE_MISMATCH - iter does not walk type entities
6342 * _DELETED - parent was deleted
6343 * _NO_RESOURCES
6344 * _INVALID_TYPE - type is invalid
6345 * _DONE
6346 * _SUCCESS
6347 *
6348 * For composed property group iterators, can also return
6349 * _TYPE_MISMATCH - parent cannot have type children
6350 */
6351 int
rc_iter_next(rc_node_iter_t * iter,rc_node_ptr_t * out,uint32_t type)6352 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
6353 {
6354 rc_node_t *np = iter->rni_parent;
6355 rc_node_t *res;
6356 int rc;
6357
6358 if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
6359 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6360
6361 if (iter->rni_iter == NULL) {
6362 rc_node_clear(out, 0);
6363 return (REP_PROTOCOL_DONE);
6364 }
6365
6366 if (iter->rni_type != type) {
6367 rc_node_clear(out, 0);
6368 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6369 }
6370
6371 (void) pthread_mutex_lock(&np->rn_lock); /* held by _iter_create() */
6372
6373 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6374 (void) pthread_mutex_unlock(&np->rn_lock);
6375 rc_node_clear(out, 1);
6376 return (REP_PROTOCOL_FAIL_DELETED);
6377 }
6378
6379 if (iter->rni_clevel >= 0) {
6380 /* Composed iterator. Iterate over appropriate level. */
6381 (void) pthread_mutex_unlock(&np->rn_lock);
6382 np = np->rn_cchain[iter->rni_clevel];
6383 /*
6384 * If iter->rni_parent is an instance or a snapshot, np must
6385 * be valid since iter holds iter->rni_parent & possible
6386 * levels (service, instance, snaplevel) cannot be destroyed
6387 * while rni_parent is held. If iter->rni_parent is
6388 * a composed property group then rc_node_setup_cpg() put
6389 * a hold on np.
6390 */
6391
6392 (void) pthread_mutex_lock(&np->rn_lock);
6393
6394 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6395 (void) pthread_mutex_unlock(&np->rn_lock);
6396 rc_node_clear(out, 1);
6397 return (REP_PROTOCOL_FAIL_DELETED);
6398 }
6399 }
6400
6401 assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
6402
6403 for (;;) {
6404 res = uu_list_walk_next(iter->rni_iter);
6405 if (res == NULL) {
6406 rc_node_t *parent = iter->rni_parent;
6407
6408 #if COMPOSITION_DEPTH == 2
6409 if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
6410 /* release walker and lock */
6411 rc_iter_end(iter);
6412 break;
6413 }
6414
6415 /* Stop walking current level. */
6416 uu_list_walk_end(iter->rni_iter);
6417 iter->rni_iter = NULL;
6418 (void) pthread_mutex_unlock(&np->rn_lock);
6419 rc_node_rele_other(iter->rni_iter_node);
6420 iter->rni_iter_node = NULL;
6421
6422 /* Start walking next level. */
6423 ++iter->rni_clevel;
6424 np = parent->rn_cchain[iter->rni_clevel];
6425 assert(np != NULL);
6426 #else
6427 #error This code must be updated.
6428 #endif
6429
6430 (void) pthread_mutex_lock(&np->rn_lock);
6431
6432 rc = rc_node_fill_children(np, iter->rni_type);
6433
6434 if (rc == REP_PROTOCOL_SUCCESS) {
6435 iter->rni_iter =
6436 uu_list_walk_start(np->rn_children,
6437 UU_WALK_ROBUST);
6438
6439 if (iter->rni_iter == NULL)
6440 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6441 else {
6442 iter->rni_iter_node = np;
6443 rc_node_hold_other(np);
6444 }
6445 }
6446
6447 if (rc != REP_PROTOCOL_SUCCESS) {
6448 (void) pthread_mutex_unlock(&np->rn_lock);
6449 rc_node_clear(out, 0);
6450 return (rc);
6451 }
6452
6453 continue;
6454 }
6455
6456 if (res->rn_id.rl_type != type ||
6457 !iter->rni_filter(res, iter->rni_filter_arg))
6458 continue;
6459
6460 /*
6461 * If we're composed and not at the top level, check to see if
6462 * there's an entity at a higher level with the same name. If
6463 * so, skip this one.
6464 */
6465 if (iter->rni_clevel > 0) {
6466 rc_node_t *ent = iter->rni_parent->rn_cchain[0];
6467 rc_node_t *pg;
6468
6469 #if COMPOSITION_DEPTH == 2
6470 assert(iter->rni_clevel == 1);
6471
6472 (void) pthread_mutex_unlock(&np->rn_lock);
6473 (void) pthread_mutex_lock(&ent->rn_lock);
6474 rc = rc_node_find_named_child(ent, res->rn_name, type,
6475 &pg);
6476 if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
6477 rc_node_rele(pg);
6478 (void) pthread_mutex_unlock(&ent->rn_lock);
6479 if (rc != REP_PROTOCOL_SUCCESS) {
6480 rc_node_clear(out, 0);
6481 return (rc);
6482 }
6483 (void) pthread_mutex_lock(&np->rn_lock);
6484
6485 /* Make sure np isn't being deleted all of a sudden. */
6486 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6487 (void) pthread_mutex_unlock(&np->rn_lock);
6488 rc_node_clear(out, 1);
6489 return (REP_PROTOCOL_FAIL_DELETED);
6490 }
6491
6492 if (pg != NULL)
6493 /* Keep going. */
6494 continue;
6495 #else
6496 #error This code must be updated.
6497 #endif
6498 }
6499
6500 /*
6501 * If we're composed, iterating over property groups, and not
6502 * at the bottom level, check to see if there's a pg at lower
6503 * level with the same name. If so, return a cpg.
6504 */
6505 if (iter->rni_clevel >= 0 &&
6506 type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
6507 iter->rni_clevel < COMPOSITION_DEPTH - 1) {
6508 #if COMPOSITION_DEPTH == 2
6509 rc_node_t *pg;
6510 rc_node_t *ent = iter->rni_parent->rn_cchain[1];
6511
6512 rc_node_hold(res); /* While we drop np->rn_lock */
6513
6514 (void) pthread_mutex_unlock(&np->rn_lock);
6515 (void) pthread_mutex_lock(&ent->rn_lock);
6516 rc = rc_node_find_named_child(ent, res->rn_name, type,
6517 &pg);
6518 /* holds pg if not NULL */
6519 (void) pthread_mutex_unlock(&ent->rn_lock);
6520 if (rc != REP_PROTOCOL_SUCCESS) {
6521 rc_node_rele(res);
6522 rc_node_clear(out, 0);
6523 return (rc);
6524 }
6525
6526 (void) pthread_mutex_lock(&np->rn_lock);
6527 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6528 (void) pthread_mutex_unlock(&np->rn_lock);
6529 rc_node_rele(res);
6530 if (pg != NULL)
6531 rc_node_rele(pg);
6532 rc_node_clear(out, 1);
6533 return (REP_PROTOCOL_FAIL_DELETED);
6534 }
6535
6536 if (pg == NULL) {
6537 rc_node_rele(res);
6538 } else {
6539 rc_node_t *cpg;
6540
6541 /* Keep res held for rc_node_setup_cpg(). */
6542
6543 cpg = rc_node_alloc();
6544 if (cpg == NULL) {
6545 (void) pthread_mutex_unlock(
6546 &np->rn_lock);
6547 rc_node_rele(res);
6548 rc_node_rele(pg);
6549 rc_node_clear(out, 0);
6550 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6551 }
6552
6553 switch (rc_node_setup_cpg(cpg, res, pg)) {
6554 case REP_PROTOCOL_SUCCESS:
6555 res = cpg;
6556 break;
6557
6558 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
6559 /* Nevermind. */
6560 rc_node_destroy(cpg);
6561 rc_node_rele(pg);
6562 rc_node_rele(res);
6563 break;
6564
6565 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6566 rc_node_destroy(cpg);
6567 (void) pthread_mutex_unlock(
6568 &np->rn_lock);
6569 rc_node_rele(res);
6570 rc_node_rele(pg);
6571 rc_node_clear(out, 0);
6572 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6573
6574 default:
6575 assert(0);
6576 abort();
6577 }
6578 }
6579 #else
6580 #error This code must be updated.
6581 #endif
6582 }
6583
6584 rc_node_hold(res);
6585 (void) pthread_mutex_unlock(&np->rn_lock);
6586 break;
6587 }
6588 rc_node_assign(out, res);
6589
6590 if (res == NULL)
6591 return (REP_PROTOCOL_DONE);
6592 rc_node_rele(res);
6593 return (REP_PROTOCOL_SUCCESS);
6594 }
6595
6596 void
rc_iter_destroy(rc_node_iter_t ** nipp)6597 rc_iter_destroy(rc_node_iter_t **nipp)
6598 {
6599 rc_node_iter_t *nip = *nipp;
6600 rc_node_t *np;
6601
6602 if (nip == NULL)
6603 return; /* already freed */
6604
6605 np = nip->rni_parent;
6606
6607 if (nip->rni_filter_arg != NULL)
6608 free(nip->rni_filter_arg);
6609 nip->rni_filter_arg = NULL;
6610
6611 if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
6612 nip->rni_iter != NULL) {
6613 if (nip->rni_clevel < 0)
6614 (void) pthread_mutex_lock(&np->rn_lock);
6615 else
6616 (void) pthread_mutex_lock(
6617 &np->rn_cchain[nip->rni_clevel]->rn_lock);
6618 rc_iter_end(nip); /* release walker and lock */
6619 }
6620 nip->rni_parent = NULL;
6621
6622 uu_free(nip);
6623 *nipp = NULL;
6624 }
6625
6626 int
rc_node_setup_tx(rc_node_ptr_t * npp,rc_node_ptr_t * txp)6627 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
6628 {
6629 rc_node_t *np;
6630 permcheck_t *pcp;
6631 int ret;
6632 perm_status_t granted;
6633 rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
6634 char *auth_string = NULL;
6635
6636 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6637
6638 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
6639 rc_node_rele(np);
6640 np = np->rn_cchain[0];
6641 RC_NODE_CHECK_AND_HOLD(np);
6642 }
6643
6644 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6645 rc_node_rele(np);
6646 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6647 }
6648
6649 if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
6650 rc_node_rele(np);
6651 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6652 }
6653
6654 #ifdef NATIVE_BUILD
6655 if (client_is_privileged())
6656 goto skip_checks;
6657 rc_node_rele(np);
6658 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6659 #else
6660 if (is_main_repository == 0)
6661 goto skip_checks;
6662
6663 /* permission check */
6664 pcp = pc_create();
6665 if (pcp == NULL) {
6666 rc_node_rele(np);
6667 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6668 }
6669
6670 if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && /* instance pg */
6671 ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
6672 strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
6673 (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6674 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
6675 rc_node_t *instn;
6676
6677 /* solaris.smf.modify can be used */
6678 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6679 if (ret != REP_PROTOCOL_SUCCESS) {
6680 pc_free(pcp);
6681 rc_node_rele(np);
6682 return (ret);
6683 }
6684
6685 /* solaris.smf.manage can be used. */
6686 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6687
6688 if (ret != REP_PROTOCOL_SUCCESS) {
6689 pc_free(pcp);
6690 rc_node_rele(np);
6691 return (ret);
6692 }
6693
6694 /* general/action_authorization values can be used. */
6695 ret = rc_node_parent(np, &instn);
6696 if (ret != REP_PROTOCOL_SUCCESS) {
6697 assert(ret == REP_PROTOCOL_FAIL_DELETED);
6698 rc_node_rele(np);
6699 pc_free(pcp);
6700 return (REP_PROTOCOL_FAIL_DELETED);
6701 }
6702
6703 assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
6704
6705 ret = perm_add_inst_action_auth(pcp, instn);
6706 rc_node_rele(instn);
6707 switch (ret) {
6708 case REP_PROTOCOL_SUCCESS:
6709 break;
6710
6711 case REP_PROTOCOL_FAIL_DELETED:
6712 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6713 rc_node_rele(np);
6714 pc_free(pcp);
6715 return (ret);
6716
6717 default:
6718 bad_error("perm_add_inst_action_auth", ret);
6719 }
6720
6721 if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
6722 authorized = RC_AUTH_PASSED; /* No check on commit. */
6723 } else {
6724 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6725
6726 if (ret == REP_PROTOCOL_SUCCESS) {
6727 /* propertygroup-type-specific authorization */
6728 /* no locking because rn_type won't change anyway */
6729 const char * const auth =
6730 perm_auth_for_pgtype(np->rn_type);
6731
6732 if (auth != NULL)
6733 ret = perm_add_enabling(pcp, auth);
6734 }
6735
6736 if (ret == REP_PROTOCOL_SUCCESS)
6737 /* propertygroup/transaction-type-specific auths */
6738 ret =
6739 perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
6740
6741 if (ret == REP_PROTOCOL_SUCCESS)
6742 ret =
6743 perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
6744
6745 /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6746 if (ret == REP_PROTOCOL_SUCCESS &&
6747 strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6748 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
6749 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6750
6751 if (ret != REP_PROTOCOL_SUCCESS) {
6752 pc_free(pcp);
6753 rc_node_rele(np);
6754 return (ret);
6755 }
6756 }
6757
6758 granted = perm_granted(pcp);
6759 ret = map_granted_status(granted, pcp, &auth_string);
6760 pc_free(pcp);
6761
6762 if ((granted == PERM_GONE) || (granted == PERM_FAIL) ||
6763 (ret == REP_PROTOCOL_FAIL_NO_RESOURCES)) {
6764 free(auth_string);
6765 rc_node_rele(np);
6766 return (ret);
6767 }
6768
6769 if (granted == PERM_DENIED) {
6770 /*
6771 * If we get here, the authorization failed.
6772 * Unfortunately, we don't have enough information at this
6773 * point to generate the security audit events. We'll only
6774 * get that information when the client tries to commit the
6775 * event. Thus, we'll remember the failed authorization,
6776 * so that we can generate the audit events later.
6777 */
6778 authorized = RC_AUTH_FAILED;
6779 }
6780 #endif /* NATIVE_BUILD */
6781
6782 skip_checks:
6783 rc_node_assign(txp, np);
6784 txp->rnp_authorized = authorized;
6785 if (authorized != RC_AUTH_UNKNOWN) {
6786 /* Save the authorization string. */
6787 if (txp->rnp_auth_string != NULL)
6788 free((void *)txp->rnp_auth_string);
6789 txp->rnp_auth_string = auth_string;
6790 auth_string = NULL; /* Don't free until done with txp. */
6791 }
6792
6793 rc_node_rele(np);
6794 if (auth_string != NULL)
6795 free(auth_string);
6796 return (REP_PROTOCOL_SUCCESS);
6797 }
6798
6799 /*
6800 * Return 1 if the given transaction commands only modify the values of
6801 * properties other than "modify_authorization". Return -1 if any of the
6802 * commands are invalid, and 0 otherwise.
6803 */
6804 static int
tx_allow_value(const void * cmds_arg,size_t cmds_sz,rc_node_t * pg)6805 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
6806 {
6807 const struct rep_protocol_transaction_cmd *cmds;
6808 uintptr_t loc;
6809 uint32_t sz;
6810 rc_node_t *prop;
6811 boolean_t ok;
6812
6813 assert(!MUTEX_HELD(&pg->rn_lock));
6814
6815 loc = (uintptr_t)cmds_arg;
6816
6817 while (cmds_sz > 0) {
6818 cmds = (struct rep_protocol_transaction_cmd *)loc;
6819
6820 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6821 return (-1);
6822
6823 sz = cmds->rptc_size;
6824 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6825 return (-1);
6826
6827 sz = TX_SIZE(sz);
6828 if (sz > cmds_sz)
6829 return (-1);
6830
6831 switch (cmds[0].rptc_action) {
6832 case REP_PROTOCOL_TX_ENTRY_CLEAR:
6833 break;
6834
6835 case REP_PROTOCOL_TX_ENTRY_REPLACE:
6836 /* Check type */
6837 (void) pthread_mutex_lock(&pg->rn_lock);
6838 ok = B_FALSE;
6839 if (rc_node_find_named_child(pg,
6840 (const char *)cmds[0].rptc_data,
6841 REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
6842 REP_PROTOCOL_SUCCESS) {
6843 if (prop != NULL) {
6844 ok = prop->rn_valtype ==
6845 cmds[0].rptc_type;
6846 /*
6847 * rc_node_find_named_child()
6848 * places a hold on prop which we
6849 * do not need to hang on to.
6850 */
6851 rc_node_rele(prop);
6852 }
6853 }
6854 (void) pthread_mutex_unlock(&pg->rn_lock);
6855 if (ok)
6856 break;
6857 return (0);
6858
6859 default:
6860 return (0);
6861 }
6862
6863 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
6864 == 0)
6865 return (0);
6866
6867 loc += sz;
6868 cmds_sz -= sz;
6869 }
6870
6871 return (1);
6872 }
6873
6874 /*
6875 * Return 1 if any of the given transaction commands affect
6876 * "action_authorization". Return -1 if any of the commands are invalid and
6877 * 0 in all other cases.
6878 */
6879 static int
tx_modifies_action(const void * cmds_arg,size_t cmds_sz)6880 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
6881 {
6882 const struct rep_protocol_transaction_cmd *cmds;
6883 uintptr_t loc;
6884 uint32_t sz;
6885
6886 loc = (uintptr_t)cmds_arg;
6887
6888 while (cmds_sz > 0) {
6889 cmds = (struct rep_protocol_transaction_cmd *)loc;
6890
6891 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6892 return (-1);
6893
6894 sz = cmds->rptc_size;
6895 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6896 return (-1);
6897
6898 sz = TX_SIZE(sz);
6899 if (sz > cmds_sz)
6900 return (-1);
6901
6902 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
6903 == 0)
6904 return (1);
6905
6906 loc += sz;
6907 cmds_sz -= sz;
6908 }
6909
6910 return (0);
6911 }
6912
6913 /*
6914 * Returns 1 if the transaction commands only modify properties named
6915 * 'enabled'.
6916 */
6917 static int
tx_only_enabled(const void * cmds_arg,size_t cmds_sz)6918 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
6919 {
6920 const struct rep_protocol_transaction_cmd *cmd;
6921 uintptr_t loc;
6922 uint32_t sz;
6923
6924 loc = (uintptr_t)cmds_arg;
6925
6926 while (cmds_sz > 0) {
6927 cmd = (struct rep_protocol_transaction_cmd *)loc;
6928
6929 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6930 return (-1);
6931
6932 sz = cmd->rptc_size;
6933 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6934 return (-1);
6935
6936 sz = TX_SIZE(sz);
6937 if (sz > cmds_sz)
6938 return (-1);
6939
6940 if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
6941 != 0)
6942 return (0);
6943
6944 loc += sz;
6945 cmds_sz -= sz;
6946 }
6947
6948 return (1);
6949 }
6950
6951 int
rc_tx_commit(rc_node_ptr_t * txp,const void * cmds,size_t cmds_sz)6952 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
6953 {
6954 rc_node_t *np = txp->rnp_node;
6955 rc_node_t *pp;
6956 rc_node_t *nnp;
6957 rc_node_pg_notify_t *pnp;
6958 int rc;
6959 permcheck_t *pcp;
6960 perm_status_t granted;
6961 int normal;
6962 char *pg_fmri = NULL;
6963 char *auth_string = NULL;
6964 int auth_status = ADT_SUCCESS;
6965 int auth_ret_value = ADT_SUCCESS;
6966 size_t sz_out;
6967 int tx_flag = 1;
6968 tx_commit_data_t *tx_data = NULL;
6969
6970 RC_NODE_CHECK(np);
6971
6972 if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
6973 (txp->rnp_auth_string != NULL)) {
6974 auth_string = strdup(txp->rnp_auth_string);
6975 if (auth_string == NULL)
6976 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6977 }
6978
6979 if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
6980 is_main_repository) {
6981 #ifdef NATIVE_BUILD
6982 if (!client_is_privileged()) {
6983 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6984 }
6985 #else
6986 /* permission check: depends on contents of transaction */
6987 pcp = pc_create();
6988 if (pcp == NULL)
6989 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6990
6991 /* If normal is cleared, we won't do the normal checks. */
6992 normal = 1;
6993 rc = REP_PROTOCOL_SUCCESS;
6994
6995 if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6996 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
6997 /* Touching general[framework]/action_authorization? */
6998 rc = tx_modifies_action(cmds, cmds_sz);
6999 if (rc == -1) {
7000 pc_free(pcp);
7001 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7002 }
7003
7004 if (rc) {
7005 /*
7006 * Yes: only AUTH_MODIFY and AUTH_MANAGE
7007 * can be used.
7008 */
7009 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7010
7011 if (rc == REP_PROTOCOL_SUCCESS)
7012 rc = perm_add_enabling(pcp,
7013 AUTH_MANAGE);
7014
7015 normal = 0;
7016 } else {
7017 rc = REP_PROTOCOL_SUCCESS;
7018 }
7019 } else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
7020 strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
7021 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
7022 rc_node_t *instn;
7023
7024 rc = tx_only_enabled(cmds, cmds_sz);
7025 if (rc == -1) {
7026 pc_free(pcp);
7027 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7028 }
7029
7030 if (rc) {
7031 rc = rc_node_parent(np, &instn);
7032 if (rc != REP_PROTOCOL_SUCCESS) {
7033 assert(rc == REP_PROTOCOL_FAIL_DELETED);
7034 pc_free(pcp);
7035 return (rc);
7036 }
7037
7038 assert(instn->rn_id.rl_type ==
7039 REP_PROTOCOL_ENTITY_INSTANCE);
7040
7041 rc = perm_add_inst_action_auth(pcp, instn);
7042 rc_node_rele(instn);
7043 switch (rc) {
7044 case REP_PROTOCOL_SUCCESS:
7045 break;
7046
7047 case REP_PROTOCOL_FAIL_DELETED:
7048 case REP_PROTOCOL_FAIL_NO_RESOURCES:
7049 pc_free(pcp);
7050 return (rc);
7051
7052 default:
7053 bad_error("perm_add_inst_action_auth",
7054 rc);
7055 }
7056 } else {
7057 rc = REP_PROTOCOL_SUCCESS;
7058 }
7059 }
7060
7061 if (rc == REP_PROTOCOL_SUCCESS && normal) {
7062 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7063
7064 if (rc == REP_PROTOCOL_SUCCESS) {
7065 /* Add pgtype-specific authorization. */
7066 const char * const auth =
7067 perm_auth_for_pgtype(np->rn_type);
7068
7069 if (auth != NULL)
7070 rc = perm_add_enabling(pcp, auth);
7071 }
7072
7073 /* Add pg-specific modify_authorization auths. */
7074 if (rc == REP_PROTOCOL_SUCCESS)
7075 rc = perm_add_enabling_values(pcp, np,
7076 AUTH_PROP_MODIFY);
7077
7078 /* If value_authorization values are ok, add them. */
7079 if (rc == REP_PROTOCOL_SUCCESS) {
7080 rc = tx_allow_value(cmds, cmds_sz, np);
7081 if (rc == -1)
7082 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
7083 else if (rc)
7084 rc = perm_add_enabling_values(pcp, np,
7085 AUTH_PROP_VALUE);
7086 }
7087 }
7088
7089 if (rc == REP_PROTOCOL_SUCCESS) {
7090 granted = perm_granted(pcp);
7091 rc = map_granted_status(granted, pcp, &auth_string);
7092 if ((granted == PERM_DENIED) && auth_string) {
7093 /*
7094 * _PERMISSION_DENIED should not cause us
7095 * to exit at this point, because we still
7096 * want to generate an audit event.
7097 */
7098 rc = REP_PROTOCOL_SUCCESS;
7099 }
7100 }
7101
7102 pc_free(pcp);
7103
7104 if (rc != REP_PROTOCOL_SUCCESS)
7105 goto cleanout;
7106
7107 if (granted == PERM_DENIED) {
7108 auth_status = ADT_FAILURE;
7109 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7110 tx_flag = 0;
7111 }
7112 #endif /* NATIVE_BUILD */
7113 } else if (txp->rnp_authorized == RC_AUTH_FAILED) {
7114 auth_status = ADT_FAILURE;
7115 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7116 tx_flag = 0;
7117 }
7118
7119 pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
7120 if (pg_fmri == NULL) {
7121 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7122 goto cleanout;
7123 }
7124 if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
7125 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
7126 goto cleanout;
7127 }
7128
7129 /*
7130 * Parse the transaction commands into a useful form.
7131 */
7132 if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
7133 REP_PROTOCOL_SUCCESS) {
7134 goto cleanout;
7135 }
7136
7137 if (tx_flag == 0) {
7138 /* Authorization failed. Generate audit events. */
7139 generate_property_events(tx_data, pg_fmri, auth_string,
7140 auth_status, auth_ret_value);
7141 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
7142 goto cleanout;
7143 }
7144
7145 nnp = rc_node_alloc();
7146 if (nnp == NULL) {
7147 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7148 goto cleanout;
7149 }
7150
7151 nnp->rn_id = np->rn_id; /* structure assignment */
7152 nnp->rn_hash = np->rn_hash;
7153 nnp->rn_name = strdup(np->rn_name);
7154 nnp->rn_type = strdup(np->rn_type);
7155 nnp->rn_pgflags = np->rn_pgflags;
7156
7157 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
7158
7159 if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
7160 rc_node_destroy(nnp);
7161 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7162 goto cleanout;
7163 }
7164
7165 (void) pthread_mutex_lock(&np->rn_lock);
7166
7167 /*
7168 * We must have all of the old properties in the cache, or the
7169 * database deletions could cause inconsistencies.
7170 */
7171 if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
7172 REP_PROTOCOL_SUCCESS) {
7173 (void) pthread_mutex_unlock(&np->rn_lock);
7174 rc_node_destroy(nnp);
7175 goto cleanout;
7176 }
7177
7178 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
7179 (void) pthread_mutex_unlock(&np->rn_lock);
7180 rc_node_destroy(nnp);
7181 rc = REP_PROTOCOL_FAIL_DELETED;
7182 goto cleanout;
7183 }
7184
7185 if (np->rn_flags & RC_NODE_OLD) {
7186 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
7187 (void) pthread_mutex_unlock(&np->rn_lock);
7188 rc_node_destroy(nnp);
7189 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7190 goto cleanout;
7191 }
7192
7193 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
7194 if (pp == NULL) {
7195 /* our parent is gone, we're going next... */
7196 rc_node_destroy(nnp);
7197 (void) pthread_mutex_lock(&np->rn_lock);
7198 if (np->rn_flags & RC_NODE_OLD) {
7199 (void) pthread_mutex_unlock(&np->rn_lock);
7200 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7201 goto cleanout;
7202 }
7203 (void) pthread_mutex_unlock(&np->rn_lock);
7204 rc = REP_PROTOCOL_FAIL_DELETED;
7205 goto cleanout;
7206 }
7207 (void) pthread_mutex_unlock(&pp->rn_lock);
7208
7209 /*
7210 * prepare for the transaction
7211 */
7212 (void) pthread_mutex_lock(&np->rn_lock);
7213 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
7214 (void) pthread_mutex_unlock(&np->rn_lock);
7215 (void) pthread_mutex_lock(&pp->rn_lock);
7216 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7217 (void) pthread_mutex_unlock(&pp->rn_lock);
7218 rc_node_destroy(nnp);
7219 rc = REP_PROTOCOL_FAIL_DELETED;
7220 goto cleanout;
7221 }
7222 nnp->rn_gen_id = np->rn_gen_id;
7223 (void) pthread_mutex_unlock(&np->rn_lock);
7224
7225 /* Sets nnp->rn_gen_id on success. */
7226 rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
7227
7228 (void) pthread_mutex_lock(&np->rn_lock);
7229 if (rc != REP_PROTOCOL_SUCCESS) {
7230 rc_node_rele_flag(np, RC_NODE_IN_TX);
7231 (void) pthread_mutex_unlock(&np->rn_lock);
7232 (void) pthread_mutex_lock(&pp->rn_lock);
7233 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7234 (void) pthread_mutex_unlock(&pp->rn_lock);
7235 rc_node_destroy(nnp);
7236 rc_node_clear(txp, 0);
7237 if (rc == REP_PROTOCOL_DONE)
7238 rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
7239 goto cleanout;
7240 }
7241
7242 /*
7243 * Notify waiters
7244 */
7245 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7246 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
7247 rc_pg_notify_fire(pnp);
7248 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7249
7250 np->rn_flags |= RC_NODE_OLD;
7251 (void) pthread_mutex_unlock(&np->rn_lock);
7252
7253 rc_notify_remove_node(np);
7254
7255 /*
7256 * replace np with nnp
7257 */
7258 rc_node_relink_child(pp, np, nnp);
7259
7260 /*
7261 * all done -- clear the transaction.
7262 */
7263 rc_node_clear(txp, 0);
7264 generate_property_events(tx_data, pg_fmri, auth_string,
7265 auth_status, auth_ret_value);
7266
7267 rc = REP_PROTOCOL_SUCCESS;
7268
7269 cleanout:
7270 free(auth_string);
7271 free(pg_fmri);
7272 tx_commit_data_free(tx_data);
7273 return (rc);
7274 }
7275
7276 void
rc_pg_notify_init(rc_node_pg_notify_t * pnp)7277 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
7278 {
7279 uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7280 pnp->rnpn_pg = NULL;
7281 pnp->rnpn_fd = -1;
7282 }
7283
7284 int
rc_pg_notify_setup(rc_node_pg_notify_t * pnp,rc_node_ptr_t * npp,int fd)7285 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
7286 {
7287 rc_node_t *np;
7288
7289 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
7290
7291 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
7292 (void) pthread_mutex_unlock(&np->rn_lock);
7293 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7294 }
7295
7296 /*
7297 * wait for any transaction in progress to complete
7298 */
7299 if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
7300 (void) pthread_mutex_unlock(&np->rn_lock);
7301 return (REP_PROTOCOL_FAIL_DELETED);
7302 }
7303
7304 if (np->rn_flags & RC_NODE_OLD) {
7305 (void) pthread_mutex_unlock(&np->rn_lock);
7306 return (REP_PROTOCOL_FAIL_NOT_LATEST);
7307 }
7308
7309 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7310 rc_pg_notify_fire(pnp);
7311 pnp->rnpn_pg = np;
7312 pnp->rnpn_fd = fd;
7313 (void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
7314 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7315
7316 (void) pthread_mutex_unlock(&np->rn_lock);
7317 return (REP_PROTOCOL_SUCCESS);
7318 }
7319
7320 void
rc_pg_notify_fini(rc_node_pg_notify_t * pnp)7321 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
7322 {
7323 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7324 rc_pg_notify_fire(pnp);
7325 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7326
7327 uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7328 }
7329
7330 void
rc_notify_info_init(rc_notify_info_t * rnip)7331 rc_notify_info_init(rc_notify_info_t *rnip)
7332 {
7333 int i;
7334
7335 uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7336 uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7337 rc_notify_pool);
7338
7339 rnip->rni_notify.rcn_node = NULL;
7340 rnip->rni_notify.rcn_info = rnip;
7341
7342 bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
7343 bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
7344
7345 (void) pthread_cond_init(&rnip->rni_cv, NULL);
7346
7347 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7348 rnip->rni_namelist[i] = NULL;
7349 rnip->rni_typelist[i] = NULL;
7350 }
7351 }
7352
7353 static void
rc_notify_info_insert_locked(rc_notify_info_t * rnip)7354 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
7355 {
7356 assert(MUTEX_HELD(&rc_pg_notify_lock));
7357
7358 assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
7359
7360 rnip->rni_flags |= RC_NOTIFY_ACTIVE;
7361 (void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
7362 (void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
7363 }
7364
7365 static void
rc_notify_info_remove_locked(rc_notify_info_t * rnip)7366 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
7367 {
7368 rc_notify_t *me = &rnip->rni_notify;
7369 rc_notify_t *np;
7370
7371 assert(MUTEX_HELD(&rc_pg_notify_lock));
7372
7373 assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
7374
7375 assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
7376 rnip->rni_flags |= RC_NOTIFY_DRAIN;
7377 (void) pthread_cond_broadcast(&rnip->rni_cv);
7378
7379 (void) uu_list_remove(rc_notify_info_list, rnip);
7380
7381 /*
7382 * clean up any notifications at the beginning of the list
7383 */
7384 if (uu_list_first(rc_notify_list) == me) {
7385 /*
7386 * We can't call rc_notify_remove_locked() unless
7387 * rc_notify_in_use is 0.
7388 */
7389 while (rc_notify_in_use) {
7390 (void) pthread_cond_wait(&rc_pg_notify_cv,
7391 &rc_pg_notify_lock);
7392 }
7393 while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
7394 np->rcn_info == NULL)
7395 rc_notify_remove_locked(np);
7396 }
7397 (void) uu_list_remove(rc_notify_list, me);
7398
7399 while (rnip->rni_waiters) {
7400 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7401 (void) pthread_cond_broadcast(&rnip->rni_cv);
7402 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7403 }
7404
7405 rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
7406 }
7407
7408 static int
rc_notify_info_add_watch(rc_notify_info_t * rnip,const char ** arr,const char * name)7409 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
7410 const char *name)
7411 {
7412 int i;
7413 int rc;
7414 char *f;
7415
7416 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
7417 if (rc != REP_PROTOCOL_SUCCESS)
7418 return (rc);
7419
7420 f = strdup(name);
7421 if (f == NULL)
7422 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7423
7424 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7425
7426 while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
7427 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7428
7429 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7430 if (arr[i] == NULL)
7431 break;
7432
7433 /*
7434 * Don't add name if it's already being tracked.
7435 */
7436 if (strcmp(arr[i], f) == 0) {
7437 free(f);
7438 goto out;
7439 }
7440 }
7441
7442 if (i == RC_NOTIFY_MAX_NAMES) {
7443 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7444 free(f);
7445 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7446 }
7447
7448 arr[i] = f;
7449
7450 out:
7451 if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
7452 rc_notify_info_insert_locked(rnip);
7453
7454 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7455 return (REP_PROTOCOL_SUCCESS);
7456 }
7457
7458 int
rc_notify_info_add_name(rc_notify_info_t * rnip,const char * name)7459 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
7460 {
7461 return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
7462 }
7463
7464 int
rc_notify_info_add_type(rc_notify_info_t * rnip,const char * type)7465 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
7466 {
7467 return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
7468 }
7469
7470 /*
7471 * Wait for and report an event of interest to rnip, a notification client
7472 */
7473 int
rc_notify_info_wait(rc_notify_info_t * rnip,rc_node_ptr_t * out,char * outp,size_t sz)7474 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
7475 char *outp, size_t sz)
7476 {
7477 rc_notify_t *np;
7478 rc_notify_t *me = &rnip->rni_notify;
7479 rc_node_t *nnp;
7480 rc_notify_delete_t *ndp;
7481
7482 int am_first_info;
7483
7484 if (sz > 0)
7485 outp[0] = 0;
7486
7487 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7488
7489 while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
7490 RC_NOTIFY_ACTIVE) {
7491 /*
7492 * If I'm first on the notify list, it is my job to
7493 * clean up any notifications I pass by. I can't do that
7494 * if someone is blocking the list from removals, so I
7495 * have to wait until they have all drained.
7496 */
7497 am_first_info = (uu_list_first(rc_notify_list) == me);
7498 if (am_first_info && rc_notify_in_use) {
7499 rnip->rni_waiters++;
7500 (void) pthread_cond_wait(&rc_pg_notify_cv,
7501 &rc_pg_notify_lock);
7502 rnip->rni_waiters--;
7503 continue;
7504 }
7505
7506 /*
7507 * Search the list for a node of interest.
7508 */
7509 np = uu_list_next(rc_notify_list, me);
7510 while (np != NULL && !rc_notify_info_interested(rnip, np)) {
7511 rc_notify_t *next = uu_list_next(rc_notify_list, np);
7512
7513 if (am_first_info) {
7514 if (np->rcn_info) {
7515 /*
7516 * Passing another client -- stop
7517 * cleaning up notifications
7518 */
7519 am_first_info = 0;
7520 } else {
7521 rc_notify_remove_locked(np);
7522 }
7523 }
7524 np = next;
7525 }
7526
7527 /*
7528 * Nothing of interest -- wait for notification
7529 */
7530 if (np == NULL) {
7531 rnip->rni_waiters++;
7532 (void) pthread_cond_wait(&rnip->rni_cv,
7533 &rc_pg_notify_lock);
7534 rnip->rni_waiters--;
7535 continue;
7536 }
7537
7538 /*
7539 * found something to report -- move myself after the
7540 * notification and process it.
7541 */
7542 (void) uu_list_remove(rc_notify_list, me);
7543 (void) uu_list_insert_after(rc_notify_list, np, me);
7544
7545 if ((ndp = np->rcn_delete) != NULL) {
7546 (void) strlcpy(outp, ndp->rnd_fmri, sz);
7547 if (am_first_info)
7548 rc_notify_remove_locked(np);
7549 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7550 rc_node_clear(out, 0);
7551 return (REP_PROTOCOL_SUCCESS);
7552 }
7553
7554 nnp = np->rcn_node;
7555 assert(nnp != NULL);
7556
7557 /*
7558 * We can't bump nnp's reference count without grabbing its
7559 * lock, and rc_pg_notify_lock is a leaf lock. So we
7560 * temporarily block all removals to keep nnp from
7561 * disappearing.
7562 */
7563 rc_notify_in_use++;
7564 assert(rc_notify_in_use > 0);
7565 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7566
7567 rc_node_assign(out, nnp);
7568
7569 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7570 assert(rc_notify_in_use > 0);
7571 rc_notify_in_use--;
7572
7573 if (am_first_info) {
7574 /*
7575 * While we had the lock dropped, another thread
7576 * may have also incremented rc_notify_in_use. We
7577 * need to make sure that we're back to 0 before
7578 * removing the node.
7579 */
7580 while (rc_notify_in_use) {
7581 (void) pthread_cond_wait(&rc_pg_notify_cv,
7582 &rc_pg_notify_lock);
7583 }
7584 rc_notify_remove_locked(np);
7585 }
7586 if (rc_notify_in_use == 0)
7587 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7588 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7589
7590 return (REP_PROTOCOL_SUCCESS);
7591 }
7592 /*
7593 * If we're the last one out, let people know it's clear.
7594 */
7595 if (rnip->rni_waiters == 0)
7596 (void) pthread_cond_broadcast(&rnip->rni_cv);
7597 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7598 return (REP_PROTOCOL_DONE);
7599 }
7600
7601 static void
rc_notify_info_reset(rc_notify_info_t * rnip)7602 rc_notify_info_reset(rc_notify_info_t *rnip)
7603 {
7604 int i;
7605
7606 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7607 if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
7608 rc_notify_info_remove_locked(rnip);
7609 assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
7610 rnip->rni_flags |= RC_NOTIFY_EMPTYING;
7611 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7612
7613 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7614 if (rnip->rni_namelist[i] != NULL) {
7615 free((void *)rnip->rni_namelist[i]);
7616 rnip->rni_namelist[i] = NULL;
7617 }
7618 if (rnip->rni_typelist[i] != NULL) {
7619 free((void *)rnip->rni_typelist[i]);
7620 rnip->rni_typelist[i] = NULL;
7621 }
7622 }
7623
7624 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7625 rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
7626 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7627 }
7628
7629 void
rc_notify_info_fini(rc_notify_info_t * rnip)7630 rc_notify_info_fini(rc_notify_info_t *rnip)
7631 {
7632 rc_notify_info_reset(rnip);
7633
7634 uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7635 uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7636 rc_notify_pool);
7637 }
7638