xref: /netbsd-src/crypto/external/bsd/heimdal/dist/lib/base/heimbase.c (revision afab4e300d3a9fb07dd8c80daf53d0feb3345706)
1 /*	$NetBSD: heimbase.c,v 1.3 2023/06/19 21:41:42 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2010 Kungliga Tekniska Högskolan
5  * (Royal Institute of Technology, Stockholm, Sweden).
6  * All rights reserved.
7  *
8  * Portions Copyright (c) 2010 Apple Inc. All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  *
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * 3. Neither the name of the Institute nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include "baselocl.h"
39 #include <syslog.h>
40 
41 static heim_base_atomic_type tidglobal = HEIM_TID_USER;
42 
43 struct heim_base {
44     heim_type_t isa;
45     heim_base_atomic_type ref_cnt;
46     HEIM_TAILQ_ENTRY(heim_base) autorel;
47     heim_auto_release_t autorelpool;
48     uintptr_t isaextra[3];
49 };
50 
51 /* specialized version of base */
52 struct heim_base_mem {
53     heim_type_t isa;
54     heim_base_atomic_type ref_cnt;
55     HEIM_TAILQ_ENTRY(heim_base) autorel;
56     heim_auto_release_t autorelpool;
57     const char *name;
58     void (*dealloc)(void *);
59     uintptr_t isaextra[1];
60 };
61 
62 #define PTR2BASE(ptr) (((struct heim_base *)ptr) - 1)
63 #define BASE2PTR(ptr) ((void *)(((struct heim_base *)ptr) + 1))
64 
65 #ifdef HEIM_BASE_NEED_ATOMIC_MUTEX
66 HEIMDAL_MUTEX _heim_base_mutex = HEIMDAL_MUTEX_INITIALIZER;
67 #endif
68 
69 /*
70  * Auto release structure
71  */
72 
73 struct heim_auto_release {
74     HEIM_TAILQ_HEAD(, heim_base) pool;
75     HEIMDAL_MUTEX pool_mutex;
76     struct heim_auto_release *parent;
77 };
78 
79 
80 /**
81  * Retain object (i.e., take a reference)
82  *
83  * @param object to be released, NULL is ok
84  *
85  * @return the same object as passed in
86  */
87 
88 void *
heim_retain(void * ptr)89 heim_retain(void *ptr)
90 {
91     struct heim_base *p;
92 
93     if (ptr == NULL || heim_base_is_tagged(ptr))
94 	return ptr;
95 
96     p = PTR2BASE(ptr);
97     if (p->ref_cnt == heim_base_atomic_max)
98 	return ptr;
99 
100     if ((heim_base_atomic_inc(&p->ref_cnt) - 1) == 0)
101 	heim_abort("resurection");
102     return ptr;
103 }
104 
105 /**
106  * Release object, free if reference count reaches zero
107  *
108  * @param object to be released
109  */
110 
111 void
heim_release(void * ptr)112 heim_release(void *ptr)
113 {
114     heim_base_atomic_type old;
115     struct heim_base *p;
116 
117     if (ptr == NULL || heim_base_is_tagged(ptr))
118 	return;
119 
120     p = PTR2BASE(ptr);
121     if (p->ref_cnt == heim_base_atomic_max)
122 	return;
123 
124     old = heim_base_atomic_dec(&p->ref_cnt) + 1;
125 
126     if (old > 1)
127 	return;
128 
129     if (old == 1) {
130 	heim_auto_release_t ar = p->autorelpool;
131 	/* remove from autorel pool list */
132 	if (ar) {
133 	    p->autorelpool = NULL;
134 	    HEIMDAL_MUTEX_lock(&ar->pool_mutex);
135 	    HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
136 	    HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
137 	}
138 	if (p->isa->dealloc)
139 	    p->isa->dealloc(ptr);
140 	free(p);
141     } else
142 	heim_abort("over release");
143 }
144 
145 /**
146  * If used require wrapped in autorelease pool
147  */
148 
149 heim_string_t
heim_description(heim_object_t ptr)150 heim_description(heim_object_t ptr)
151 {
152     struct heim_base *p = PTR2BASE(ptr);
153     if (p->isa->desc == NULL)
154 	return heim_auto_release(heim_string_ref_create(p->isa->name, NULL));
155     return heim_auto_release(p->isa->desc(ptr));
156 }
157 
158 
159 void
_heim_make_permanent(heim_object_t ptr)160 _heim_make_permanent(heim_object_t ptr)
161 {
162     struct heim_base *p = PTR2BASE(ptr);
163     p->ref_cnt = heim_base_atomic_max;
164 }
165 
166 
167 static heim_type_t tagged_isa[9] = {
168     &_heim_number_object,
169     &_heim_null_object,
170     &_heim_bool_object,
171 
172     NULL,
173     NULL,
174     NULL,
175 
176     NULL,
177     NULL,
178     NULL
179 };
180 
181 heim_type_t
_heim_get_isa(heim_object_t ptr)182 _heim_get_isa(heim_object_t ptr)
183 {
184     struct heim_base *p;
185     if (heim_base_is_tagged(ptr)) {
186 	if (heim_base_is_tagged_object(ptr))
187 	    return tagged_isa[heim_base_tagged_object_tid(ptr)];
188 	heim_abort("not a supported tagged type");
189     }
190     p = PTR2BASE(ptr);
191     return p->isa;
192 }
193 
194 /**
195  * Get type ID of object
196  *
197  * @param object object to get type id of
198  *
199  * @return type id of object
200  */
201 
202 heim_tid_t
heim_get_tid(heim_object_t ptr)203 heim_get_tid(heim_object_t ptr)
204 {
205     heim_type_t isa = _heim_get_isa(ptr);
206     return isa->tid;
207 }
208 
209 /**
210  * Get hash value of object
211  *
212  * @param object object to get hash value for
213  *
214  * @return a hash value
215  */
216 
217 unsigned long
heim_get_hash(heim_object_t ptr)218 heim_get_hash(heim_object_t ptr)
219 {
220     heim_type_t isa = _heim_get_isa(ptr);
221     if (isa->hash)
222 	return isa->hash(ptr);
223     return (unsigned long)ptr;
224 }
225 
226 /**
227  * Compare two objects, returns 0 if equal, can use used for qsort()
228  * and friends.
229  *
230  * @param a first object to compare
231  * @param b first object to compare
232  *
233  * @return 0 if objects are equal
234  */
235 
236 int
heim_cmp(heim_object_t a,heim_object_t b)237 heim_cmp(heim_object_t a, heim_object_t b)
238 {
239     heim_tid_t ta, tb;
240     heim_type_t isa;
241 
242     ta = heim_get_tid(a);
243     tb = heim_get_tid(b);
244 
245     if (ta != tb)
246 	return ta - tb;
247 
248     isa = _heim_get_isa(a);
249 
250     if (isa->cmp)
251 	return isa->cmp(a, b);
252 
253     return (uintptr_t)a - (uintptr_t)b;
254 }
255 
256 /*
257  * Private - allocates an memory object
258  */
259 
260 static void
memory_dealloc(void * ptr)261 memory_dealloc(void *ptr)
262 {
263     if (ptr) {
264         struct heim_base_mem *p = (struct heim_base_mem *)PTR2BASE(ptr);
265 
266         if (p->dealloc)
267             p->dealloc(ptr);
268     }
269 }
270 
271 struct heim_type_data memory_object = {
272     HEIM_TID_MEMORY,
273     "memory-object",
274     NULL,
275     memory_dealloc,
276     NULL,
277     NULL,
278     NULL,
279     NULL
280 };
281 
282 /**
283  * Allocate memory for an object of anonymous type
284  *
285  * @param size size of object to be allocated
286  * @param name name of ad-hoc type
287  * @param dealloc destructor function
288  *
289  * Objects allocated with this interface do not serialize.
290  *
291  * @return allocated object
292  */
293 
294 void *
heim_alloc(size_t size,const char * name,heim_type_dealloc dealloc)295 heim_alloc(size_t size, const char *name, heim_type_dealloc dealloc)
296 {
297     /* XXX use posix_memalign */
298 
299     struct heim_base_mem *p = calloc(1, size + sizeof(*p));
300     if (p == NULL)
301 	return NULL;
302     p->isa = &memory_object;
303     p->ref_cnt = 1;
304     p->name = name;
305     p->dealloc = dealloc;
306     return BASE2PTR(p);
307 }
308 
309 heim_type_t
_heim_create_type(const char * name,heim_type_init init,heim_type_dealloc dealloc,heim_type_copy copy,heim_type_cmp cmp,heim_type_hash hash,heim_type_description desc)310 _heim_create_type(const char *name,
311 		  heim_type_init init,
312 		  heim_type_dealloc dealloc,
313 		  heim_type_copy copy,
314 		  heim_type_cmp cmp,
315 		  heim_type_hash hash,
316 		  heim_type_description desc)
317 {
318     heim_type_t type;
319 
320     type = calloc(1, sizeof(*type));
321     if (type == NULL)
322 	return NULL;
323 
324     type->tid = heim_base_atomic_inc(&tidglobal);
325     type->name = name;
326     type->init = init;
327     type->dealloc = dealloc;
328     type->copy = copy;
329     type->cmp = cmp;
330     type->hash = hash;
331     type->desc = desc;
332 
333     return type;
334 }
335 
336 heim_object_t
_heim_alloc_object(heim_type_t type,size_t size)337 _heim_alloc_object(heim_type_t type, size_t size)
338 {
339     /* XXX should use posix_memalign */
340     struct heim_base *p = calloc(1, size + sizeof(*p));
341     if (p == NULL)
342 	return NULL;
343     p->isa = type;
344     p->ref_cnt = 1;
345 
346     return BASE2PTR(p);
347 }
348 
349 void *
_heim_get_isaextra(heim_object_t ptr,size_t idx)350 _heim_get_isaextra(heim_object_t ptr, size_t idx)
351 {
352     struct heim_base *p = (struct heim_base *)PTR2BASE(ptr);
353 
354     heim_assert(ptr != NULL, "internal error");
355     if (p->isa == &memory_object)
356 	return NULL;
357     heim_assert(idx < 3, "invalid private heim_base extra data index");
358     return &p->isaextra[idx];
359 }
360 
361 heim_tid_t
_heim_type_get_tid(heim_type_t type)362 _heim_type_get_tid(heim_type_t type)
363 {
364     return type->tid;
365 }
366 
367 #if !defined(WIN32) && !defined(HAVE_DISPATCH_DISPATCH_H) && defined(ENABLE_PTHREAD_SUPPORT)
368 static pthread_once_t once_arg_key_once = PTHREAD_ONCE_INIT;
369 static pthread_key_t once_arg_key;
370 
371 static void
once_arg_key_once_init(void)372 once_arg_key_once_init(void)
373 {
374     errno = pthread_key_create(&once_arg_key, NULL);
375     if (errno != 0) {
376         fprintf(stderr,
377                 "Error: pthread_key_create() failed, cannot continue: %s\n",
378                 strerror(errno));
379         abort();
380     }
381 }
382 
383 struct once_callback {
384     void (*fn)(void *);
385     void *data;
386 };
387 
388 static void
once_callback_caller(void)389 once_callback_caller(void)
390 {
391     struct once_callback *once_callback = pthread_getspecific(once_arg_key);
392 
393     if (once_callback == NULL) {
394         fprintf(stderr, "Error: pthread_once() calls callback on "
395                 "different thread?!  Cannot continue.\n");
396         abort();
397     }
398     once_callback->fn(once_callback->data);
399 }
400 #endif
401 
402 /**
403  * Call func once and only once
404  *
405  * @param once pointer to a heim_base_once_t
406  * @param ctx context passed to func
407  * @param func function to be called
408  */
409 
410 void
heim_base_once_f(heim_base_once_t * once,void * ctx,void (* func)(void *))411 heim_base_once_f(heim_base_once_t *once, void *ctx, void (*func)(void *))
412 {
413 #if defined(WIN32)
414     /*
415      * With a libroken wrapper for some CAS function and a libroken yield()
416      * wrapper we could make this the default implementation when we have
417      * neither Grand Central nor POSX threads.
418      *
419      * We could also adapt the double-checked lock pattern with CAS
420      * providing the necessary memory barriers in the absence of
421      * portable explicit memory barrier APIs.
422      */
423     /*
424      * We use CAS operations in large part to provide implied memory
425      * barriers.
426      *
427      * State 0 means that func() has never executed.
428      * State 1 means that func() is executing.
429      * State 2 means that func() has completed execution.
430      */
431     if (InterlockedCompareExchange(once, 1L, 0L) == 0L) {
432 	/* State is now 1 */
433 	(*func)(ctx);
434 	(void)InterlockedExchange(once, 2L);
435 	/* State is now 2 */
436     } else {
437 	/*
438 	 * The InterlockedCompareExchange is being used to fetch
439 	 * the current state under a full memory barrier.  As long
440 	 * as the current state is 1 continue to spin.
441 	 */
442 	while (InterlockedCompareExchange(once, 2L, 0L) == 1L)
443 	    SwitchToThread();
444     }
445 #elif defined(HAVE_DISPATCH_DISPATCH_H)
446     dispatch_once_f(once, ctx, func);
447 #elif defined(ENABLE_PTHREAD_SUPPORT)
448     struct once_callback once_callback;
449 
450     once_callback.fn = func;
451     once_callback.data = ctx;
452 
453     errno = pthread_once(&once_arg_key_once, once_arg_key_once_init);
454     if (errno != 0) {
455         fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
456                 strerror(errno));
457         abort();
458     }
459     errno = pthread_setspecific(once_arg_key, &once_callback);
460     if (errno != 0) {
461         fprintf(stderr,
462                 "Error: pthread_setspecific() failed, cannot continue: %s\n",
463                 strerror(errno));
464         abort();
465     }
466     errno = pthread_once(once, once_callback_caller);
467     if (errno != 0) {
468         fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
469                 strerror(errno));
470         abort();
471     }
472 #else
473     static HEIMDAL_MUTEX mutex = HEIMDAL_MUTEX_INITIALIZER;
474     HEIMDAL_MUTEX_lock(&mutex);
475     if (*once == 0) {
476 	*once = 1;
477 	HEIMDAL_MUTEX_unlock(&mutex);
478 	func(ctx);
479 	HEIMDAL_MUTEX_lock(&mutex);
480 	*once = 2;
481 	HEIMDAL_MUTEX_unlock(&mutex);
482     } else if (*once == 2) {
483 	HEIMDAL_MUTEX_unlock(&mutex);
484     } else {
485 	HEIMDAL_MUTEX_unlock(&mutex);
486 	while (1) {
487 	    struct timeval tv = { 0, 1000 };
488 	    select(0, NULL, NULL, NULL, &tv);
489 	    HEIMDAL_MUTEX_lock(&mutex);
490 	    if (*once == 2)
491 		break;
492 	    HEIMDAL_MUTEX_unlock(&mutex);
493 	}
494 	HEIMDAL_MUTEX_unlock(&mutex);
495     }
496 #endif
497 }
498 
499 /**
500  * Abort and log the failure (using syslog)
501  */
502 
503 void
heim_abort(const char * fmt,...)504 heim_abort(const char *fmt, ...)
505 {
506     va_list ap;
507     va_start(ap, fmt);
508     heim_abortv(fmt, ap);
509     va_end(ap);
510 }
511 
512 /**
513  * Abort and log the failure (using syslog)
514  */
515 
516 void
heim_abortv(const char * fmt,va_list ap)517 heim_abortv(const char *fmt, va_list ap)
518 {
519     static char str[1024];
520 
521     vsnprintf(str, sizeof(str), fmt, ap);
522     syslog(LOG_ERR, "heim_abort: %s", str);
523     abort();
524 }
525 
526 /*
527  *
528  */
529 
530 static int ar_created = 0;
531 static HEIMDAL_thread_key ar_key;
532 
533 struct ar_tls {
534     struct heim_auto_release *head;
535     struct heim_auto_release *current;
536     HEIMDAL_MUTEX tls_mutex;
537 };
538 
539 static void
ar_tls_delete(void * ptr)540 ar_tls_delete(void *ptr)
541 {
542     struct ar_tls *tls = ptr;
543     heim_auto_release_t next = NULL;
544 
545     if (tls == NULL)
546         return;
547     for (; tls->current != NULL; tls->current = next) {
548         next = tls->current->parent;
549         heim_release(tls->current);
550     }
551     free(tls);
552 }
553 
554 static void
init_ar_tls(void * ptr)555 init_ar_tls(void *ptr)
556 {
557     int ret;
558     HEIMDAL_key_create(&ar_key, ar_tls_delete, ret);
559     if (ret == 0)
560 	ar_created = 1;
561 }
562 
563 static struct ar_tls *
autorel_tls(void)564 autorel_tls(void)
565 {
566     static heim_base_once_t once = HEIM_BASE_ONCE_INIT;
567     struct ar_tls *arp;
568     int ret;
569 
570     heim_base_once_f(&once, NULL, init_ar_tls);
571     if (!ar_created)
572 	return NULL;
573 
574     arp = HEIMDAL_getspecific(ar_key);
575     if (arp == NULL) {
576 
577 	arp = calloc(1, sizeof(*arp));
578 	if (arp == NULL)
579 	    return NULL;
580 	HEIMDAL_setspecific(ar_key, arp, ret);
581 	if (ret) {
582 	    free(arp);
583 	    return NULL;
584 	}
585     }
586     return arp;
587 
588 }
589 
590 static void
autorel_dealloc(void * ptr)591 autorel_dealloc(void *ptr)
592 {
593     heim_auto_release_t ar = ptr;
594     struct ar_tls *tls;
595 
596     tls = autorel_tls();
597     if (tls == NULL)
598 	heim_abort("autorelease pool released on thread w/o autorelease inited");
599 
600     heim_auto_release_drain(ar);
601 
602     if (!HEIM_TAILQ_EMPTY(&ar->pool))
603 	heim_abort("pool not empty after draining");
604 
605     HEIMDAL_MUTEX_lock(&tls->tls_mutex);
606     if (tls->current != ptr)
607 	heim_abort("autorelease not releaseing top pool");
608 
609     tls->current = ar->parent;
610     HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
611 }
612 
613 static int
autorel_cmp(void * a,void * b)614 autorel_cmp(void *a, void *b)
615 {
616     return (a == b);
617 }
618 
619 static unsigned long
autorel_hash(void * ptr)620 autorel_hash(void *ptr)
621 {
622     return (unsigned long)ptr;
623 }
624 
625 
626 static struct heim_type_data _heim_autorel_object = {
627     HEIM_TID_AUTORELEASE,
628     "autorelease-pool",
629     NULL,
630     autorel_dealloc,
631     NULL,
632     autorel_cmp,
633     autorel_hash,
634     NULL
635 };
636 
637 /**
638  * Create thread-specific object auto-release pool
639  *
640  * Objects placed on the per-thread auto-release pool (with
641  * heim_auto_release()) can be released in one fell swoop by calling
642  * heim_auto_release_drain().
643  */
644 
645 heim_auto_release_t
heim_auto_release_create(void)646 heim_auto_release_create(void)
647 {
648     struct ar_tls *tls = autorel_tls();
649     heim_auto_release_t ar;
650 
651     if (tls == NULL)
652 	heim_abort("Failed to create/get autorelease head");
653 
654     ar = _heim_alloc_object(&_heim_autorel_object, sizeof(struct heim_auto_release));
655     if (ar) {
656 	HEIMDAL_MUTEX_lock(&tls->tls_mutex);
657 	if (tls->head == NULL)
658 	    tls->head = ar;
659 	ar->parent = tls->current;
660 	tls->current = ar;
661 	HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
662     }
663 
664     return ar;
665 }
666 
667 /**
668  * Place the current object on the thread's auto-release pool
669  *
670  * @param ptr object
671  */
672 
673 heim_object_t
heim_auto_release(heim_object_t ptr)674 heim_auto_release(heim_object_t ptr)
675 {
676     struct heim_base *p;
677     struct ar_tls *tls;
678     heim_auto_release_t ar;
679 
680     if (ptr == NULL || heim_base_is_tagged(ptr))
681 	return ptr;
682 
683     p = PTR2BASE(ptr);
684     tls = autorel_tls();
685 
686     /* drop from old pool */
687     if ((ar = p->autorelpool) != NULL) {
688 	HEIMDAL_MUTEX_lock(&ar->pool_mutex);
689 	HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
690 	p->autorelpool = NULL;
691 	HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
692     }
693 
694     if (tls == NULL || (ar = tls->current) == NULL)
695 	heim_abort("no auto relase pool in place, would leak");
696 
697     HEIMDAL_MUTEX_lock(&ar->pool_mutex);
698     HEIM_TAILQ_INSERT_HEAD(&ar->pool, p, autorel);
699     p->autorelpool = ar;
700     HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
701 
702     return ptr;
703 }
704 
705 /**
706  * Release all objects on the given auto-release pool
707  */
708 
709 void
heim_auto_release_drain(heim_auto_release_t autorel)710 heim_auto_release_drain(heim_auto_release_t autorel)
711 {
712     heim_object_t obj;
713 
714     /* release all elements on the tail queue */
715 
716     HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
717     while(!HEIM_TAILQ_EMPTY(&autorel->pool)) {
718 	obj = HEIM_TAILQ_FIRST(&autorel->pool);
719 	HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
720 	heim_release(BASE2PTR(obj));
721 	HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
722     }
723     HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
724 }
725 
726 /*
727  * Helper for heim_path_vget() and heim_path_delete().  On success
728  * outputs the node named by the path and the parent node and key
729  * (useful for heim_path_delete()).
730  */
731 
732 static heim_object_t
heim_path_vget2(heim_object_t ptr,heim_object_t * parent,heim_object_t * key,heim_error_t * error,va_list ap)733 heim_path_vget2(heim_object_t ptr, heim_object_t *parent, heim_object_t *key,
734 		heim_error_t *error, va_list ap)
735 {
736     heim_object_t path_element;
737     heim_object_t node, next_node;
738     heim_tid_t node_type;
739 
740     *parent = NULL;
741     *key = NULL;
742     if (ptr == NULL)
743 	return NULL;
744 
745     for (node = ptr; node != NULL; ) {
746 	path_element = va_arg(ap, heim_object_t);
747 	if (path_element == NULL) {
748 	    *parent = node;
749 	    *key = path_element;
750 	    return node;
751 	}
752 
753 	node_type = heim_get_tid(node);
754 	switch (node_type) {
755 	case HEIM_TID_ARRAY:
756 	case HEIM_TID_DICT:
757 	case HEIM_TID_DB:
758 	    break;
759 	default:
760 	    if (node == ptr)
761 		heim_abort("heim_path_get() only operates on container types");
762 	    return NULL;
763 	}
764 
765 	if (node_type == HEIM_TID_DICT) {
766 	    next_node = heim_dict_get_value(node, path_element);
767 	} else if (node_type == HEIM_TID_DB) {
768 	    next_node = _heim_db_get_value(node, NULL, path_element, NULL);
769 	} else if (node_type == HEIM_TID_ARRAY) {
770 	    int idx = -1;
771 
772 	    if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
773 		idx = heim_number_get_int(path_element);
774 	    if (idx < 0) {
775 		if (error)
776 		    *error = heim_error_create(EINVAL,
777 					       "heim_path_get() path elements "
778 					       "for array nodes must be "
779 					       "numeric and positive");
780 		return NULL;
781 	    }
782 	    next_node = heim_array_get_value(node, idx);
783 	} else {
784 	    if (error)
785 		*error = heim_error_create(EINVAL,
786 					   "heim_path_get() node in path "
787 					   "not a container type");
788 	    return NULL;
789 	}
790 	node = next_node;
791     }
792     return NULL;
793 }
794 
795 /**
796  * Get a node in a heim_object tree by path
797  *
798  * @param ptr tree
799  * @param error error (output)
800  * @param ap NULL-terminated va_list of heim_object_ts that form a path
801  *
802  * @return object (not retained) if found
803  *
804  * @addtogroup heimbase
805  */
806 
807 heim_object_t
heim_path_vget(heim_object_t ptr,heim_error_t * error,va_list ap)808 heim_path_vget(heim_object_t ptr, heim_error_t *error, va_list ap)
809 {
810     heim_object_t p, k;
811 
812     return heim_path_vget2(ptr, &p, &k, error, ap);
813 }
814 
815 /**
816  * Get a node in a tree by path, with retained reference
817  *
818  * @param ptr tree
819  * @param error error (output)
820  * @param ap NULL-terminated va_list of heim_object_ts that form a path
821  *
822  * @return retained object if found
823  *
824  * @addtogroup heimbase
825  */
826 
827 heim_object_t
heim_path_vcopy(heim_object_t ptr,heim_error_t * error,va_list ap)828 heim_path_vcopy(heim_object_t ptr, heim_error_t *error, va_list ap)
829 {
830     heim_object_t p, k;
831 
832     return heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
833 }
834 
835 /**
836  * Get a node in a tree by path
837  *
838  * @param ptr tree
839  * @param error error (output)
840  * @param ... NULL-terminated va_list of heim_object_ts that form a path
841  *
842  * @return object (not retained) if found
843  *
844  * @addtogroup heimbase
845  */
846 
847 heim_object_t
heim_path_get(heim_object_t ptr,heim_error_t * error,...)848 heim_path_get(heim_object_t ptr, heim_error_t *error, ...)
849 {
850     heim_object_t o;
851     heim_object_t p, k;
852     va_list ap;
853 
854     if (ptr == NULL)
855 	return NULL;
856 
857     va_start(ap, error);
858     o = heim_path_vget2(ptr, &p, &k, error, ap);
859     va_end(ap);
860     return o;
861 }
862 
863 /**
864  * Get a node in a tree by path, with retained reference
865  *
866  * @param ptr tree
867  * @param error error (output)
868  * @param ... NULL-terminated va_list of heim_object_ts that form a path
869  *
870  * @return retained object if found
871  *
872  * @addtogroup heimbase
873  */
874 
875 heim_object_t
heim_path_copy(heim_object_t ptr,heim_error_t * error,...)876 heim_path_copy(heim_object_t ptr, heim_error_t *error, ...)
877 {
878     heim_object_t o;
879     heim_object_t p, k;
880     va_list ap;
881 
882     if (ptr == NULL)
883 	return NULL;
884 
885     va_start(ap, error);
886     o = heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
887     va_end(ap);
888     return o;
889 }
890 
891 /**
892  * Create a path in a heim_object_t tree
893  *
894  * @param ptr the tree
895  * @param size the size of the heim_dict_t nodes to be created
896  * @param leaf leaf node to be added, if any
897  * @param error error (output)
898  * @param ap NULL-terminated of path component objects
899  *
900  * Create a path of heim_dict_t interior nodes in a given heim_object_t
901  * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
902  * then the leaf is not deleted).
903  *
904  * @return 0 on success, else a system error
905  *
906  * @addtogroup heimbase
907  */
908 
909 int
heim_path_vcreate(heim_object_t ptr,size_t size,heim_object_t leaf,heim_error_t * error,va_list ap)910 heim_path_vcreate(heim_object_t ptr, size_t size, heim_object_t leaf,
911 		  heim_error_t *error, va_list ap)
912 {
913     heim_object_t path_element = va_arg(ap, heim_object_t);
914     heim_object_t next_path_element = NULL;
915     heim_object_t node = ptr;
916     heim_object_t next_node = NULL;
917     heim_tid_t node_type;
918     int ret = 0;
919 
920     if (ptr == NULL)
921 	heim_abort("heim_path_vcreate() does not create root nodes");
922 
923     while (path_element != NULL) {
924 	next_path_element = va_arg(ap, heim_object_t);
925 	node_type = heim_get_tid(node);
926 
927 	if (node_type == HEIM_TID_DICT) {
928 	    next_node = heim_dict_get_value(node, path_element);
929 	} else if (node_type == HEIM_TID_ARRAY) {
930 	    int idx = -1;
931 
932 	    if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
933 		idx = heim_number_get_int(path_element);
934 	    if (idx < 0) {
935 		if (error)
936 		    *error = heim_error_create(EINVAL,
937 					       "heim_path() path elements for "
938 					       "array nodes must be numeric "
939 					       "and positive");
940 		return EINVAL;
941 	    }
942 	    if (idx < heim_array_get_length(node))
943 		next_node = heim_array_get_value(node, idx);
944 	    else
945 		next_node = NULL;
946 	} else if (node_type == HEIM_TID_DB && next_path_element != NULL) {
947 	    if (error)
948 		*error = heim_error_create(EINVAL, "Interior node is a DB");
949 	    return EINVAL;
950 	}
951 
952 	if (next_path_element == NULL)
953 	    break;
954 
955 	/* Create missing interior node */
956 	if (next_node == NULL) {
957 	    next_node = heim_dict_create(size); /* no arrays or DBs, just dicts */
958 	    if (next_node == NULL) {
959 		ret = ENOMEM;
960 		goto err;
961 	    }
962 
963 	    if (node_type == HEIM_TID_DICT) {
964 		ret = heim_dict_set_value(node, path_element, next_node);
965 	    } else if (node_type == HEIM_TID_ARRAY &&
966 		heim_number_get_int(path_element) <= heim_array_get_length(node)) {
967 		ret = heim_array_insert_value(node,
968 					      heim_number_get_int(path_element),
969 					      next_node);
970 	    } else {
971 		ret = EINVAL;
972 		if (error)
973 		    *error = heim_error_create(ret, "Node in path not a "
974 					       "container");
975 	    }
976 	    heim_release(next_node);
977 	    if (ret)
978 		goto err;
979 	}
980 
981 	path_element = next_path_element;
982 	node = next_node;
983 	next_node = NULL;
984     }
985 
986     if (path_element == NULL)
987 	goto err;
988 
989     /* Add the leaf */
990     if (leaf != NULL) {
991 	if (node_type == HEIM_TID_DICT)
992 	    ret = heim_dict_set_value(node, path_element, leaf);
993 	else
994 	    ret = heim_array_insert_value(node,
995 					  heim_number_get_int(path_element),
996 					  leaf);
997     }
998     return ret;
999 
1000 err:
1001     if (error && !*error) {
1002 	if (ret == ENOMEM)
1003 	    *error = heim_error_create_enomem();
1004 	else
1005 	    *error = heim_error_create(ret, "Could not set "
1006 				       "dict value");
1007     }
1008     return ret;
1009 }
1010 
1011 /**
1012  * Create a path in a heim_object_t tree
1013  *
1014  * @param ptr the tree
1015  * @param size the size of the heim_dict_t nodes to be created
1016  * @param leaf leaf node to be added, if any
1017  * @param error error (output)
1018  * @param ... NULL-terminated list of path component objects
1019  *
1020  * Create a path of heim_dict_t interior nodes in a given heim_object_t
1021  * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
1022  * then the leaf is not deleted).
1023  *
1024  * @return 0 on success, else a system error
1025  *
1026  * @addtogroup heimbase
1027  */
1028 
1029 int
heim_path_create(heim_object_t ptr,size_t size,heim_object_t leaf,heim_error_t * error,...)1030 heim_path_create(heim_object_t ptr, size_t size, heim_object_t leaf,
1031 		 heim_error_t *error, ...)
1032 {
1033     va_list ap;
1034     int ret;
1035 
1036     va_start(ap, error);
1037     ret = heim_path_vcreate(ptr, size, leaf, error, ap);
1038     va_end(ap);
1039     return ret;
1040 }
1041 
1042 /**
1043  * Delete leaf node named by a path in a heim_object_t tree
1044  *
1045  * @param ptr the tree
1046  * @param error error (output)
1047  * @param ap NULL-terminated list of path component objects
1048  *
1049  * @addtogroup heimbase
1050  */
1051 
1052 void
heim_path_vdelete(heim_object_t ptr,heim_error_t * error,va_list ap)1053 heim_path_vdelete(heim_object_t ptr, heim_error_t *error, va_list ap)
1054 {
1055     heim_object_t parent, key, child;
1056 
1057     child = heim_path_vget2(ptr, &parent, &key, error, ap);
1058     if (child != NULL) {
1059 	if (heim_get_tid(parent) == HEIM_TID_DICT)
1060 	    heim_dict_delete_key(parent, key);
1061 	else if (heim_get_tid(parent) == HEIM_TID_DB)
1062 	    heim_db_delete_key(parent, NULL, key, error);
1063 	else if (heim_get_tid(parent) == HEIM_TID_ARRAY)
1064 	    heim_array_delete_value(parent, heim_number_get_int(key));
1065 	heim_release(child);
1066     }
1067 }
1068 
1069 /**
1070  * Delete leaf node named by a path in a heim_object_t tree
1071  *
1072  * @param ptr the tree
1073  * @param error error (output)
1074  * @param ap NULL-terminated list of path component objects
1075  *
1076  * @addtogroup heimbase
1077  */
1078 
1079 void
heim_path_delete(heim_object_t ptr,heim_error_t * error,...)1080 heim_path_delete(heim_object_t ptr, heim_error_t *error, ...)
1081 {
1082     va_list ap;
1083 
1084     va_start(ap, error);
1085     heim_path_vdelete(ptr, error, ap);
1086     va_end(ap);
1087     return;
1088 }
1089 
1090