1 /* $NetBSD: cache.c,v 1.3 2021/08/14 16:15:00 christos Exp $ */
2
3 /* cache.c - routines to maintain an in-core cache of entries */
4 /* $OpenLDAP$ */
5 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
6 *
7 * Copyright 2001-2021 The OpenLDAP Foundation.
8 * Portions Copyright 2001-2003 Pierangelo Masarati.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted only as authorized by the OpenLDAP
13 * Public License.
14 *
15 * A copy of this license is available in file LICENSE in the
16 * top-level directory of the distribution or, alternatively, at
17 * <http://www.OpenLDAP.org/license.html>.
18 */
19 /* ACKNOWLEDGEMENTS:
20 * This work was initially developed by Pierangelo Masarati for inclusion
21 * in OpenLDAP Software.
22 */
23
24 #include <sys/cdefs.h>
25 __RCSID("$NetBSD: cache.c,v 1.3 2021/08/14 16:15:00 christos Exp $");
26
27 #include "portable.h"
28
29 #include <stdio.h>
30 #include "ac/string.h"
31
32 #include "slap.h"
33
34 #include "back-monitor.h"
35
36 /*
37 * The cache maps DNs to Entries.
38 * Each entry, on turn, holds the list of its children in the e_private field.
39 * This is used by search operation to perform onelevel and subtree candidate
40 * selection.
41 */
42 typedef struct monitor_cache_t {
43 struct berval mc_ndn;
44 Entry *mc_e;
45 } monitor_cache_t;
46
47 /*
48 * compares entries based on the dn
49 */
50 int
monitor_cache_cmp(const void * c1,const void * c2)51 monitor_cache_cmp(
52 const void *c1,
53 const void *c2 )
54 {
55 monitor_cache_t *cc1 = ( monitor_cache_t * )c1;
56 monitor_cache_t *cc2 = ( monitor_cache_t * )c2;
57
58 /*
59 * case sensitive, because the dn MUST be normalized
60 */
61 return ber_bvcmp( &cc1->mc_ndn, &cc2->mc_ndn );
62 }
63
64 /*
65 * checks for duplicate entries
66 */
67 int
monitor_cache_dup(void * c1,void * c2)68 monitor_cache_dup(
69 void *c1,
70 void *c2 )
71 {
72 monitor_cache_t *cc1 = ( monitor_cache_t * )c1;
73 monitor_cache_t *cc2 = ( monitor_cache_t * )c2;
74
75 /*
76 * case sensitive, because the dn MUST be normalized
77 */
78 return ber_bvcmp( &cc1->mc_ndn, &cc2->mc_ndn ) == 0 ? -1 : 0;
79 }
80
81 /*
82 * adds an entry to the cache and inits the mutex
83 */
84 int
monitor_cache_add(monitor_info_t * mi,Entry * e)85 monitor_cache_add(
86 monitor_info_t *mi,
87 Entry *e )
88 {
89 monitor_cache_t *mc;
90 int rc;
91
92 assert( mi != NULL );
93 assert( e != NULL );
94
95 mc = ( monitor_cache_t * )ch_malloc( sizeof( monitor_cache_t ) );
96 mc->mc_ndn = e->e_nname;
97 mc->mc_e = e;
98 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
99 rc = ldap_avl_insert( &mi->mi_cache, ( caddr_t )mc,
100 monitor_cache_cmp, monitor_cache_dup );
101 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
102
103 return rc;
104 }
105
106 /*
107 * locks the entry (no r/w)
108 */
109 int
monitor_cache_lock(Entry * e)110 monitor_cache_lock(
111 Entry *e )
112 {
113 monitor_entry_t *mp;
114
115 assert( e != NULL );
116 assert( e->e_private != NULL );
117
118 mp = ( monitor_entry_t * )e->e_private;
119 ldap_pvt_thread_mutex_lock( &mp->mp_mutex );
120
121 return( 0 );
122 }
123
124 /*
125 * tries to lock the entry (no r/w)
126 */
127 int
monitor_cache_trylock(Entry * e)128 monitor_cache_trylock(
129 Entry *e )
130 {
131 monitor_entry_t *mp;
132
133 assert( e != NULL );
134 assert( e->e_private != NULL );
135
136 mp = ( monitor_entry_t * )e->e_private;
137 return ldap_pvt_thread_mutex_trylock( &mp->mp_mutex );
138 }
139
140 /*
141 * gets an entry from the cache based on the normalized dn
142 * with mutex locked
143 */
144 int
monitor_cache_get(monitor_info_t * mi,struct berval * ndn,Entry ** ep)145 monitor_cache_get(
146 monitor_info_t *mi,
147 struct berval *ndn,
148 Entry **ep )
149 {
150 monitor_cache_t tmp_mc, *mc;
151
152 assert( mi != NULL );
153 assert( ndn != NULL );
154 assert( ep != NULL );
155
156 *ep = NULL;
157
158 tmp_mc.mc_ndn = *ndn;
159 retry:;
160 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
161 mc = ( monitor_cache_t * )ldap_avl_find( mi->mi_cache,
162 ( caddr_t )&tmp_mc, monitor_cache_cmp );
163
164 if ( mc != NULL ) {
165 /* entry is returned with mutex locked */
166 if ( monitor_cache_trylock( mc->mc_e ) ) {
167 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
168 ldap_pvt_thread_yield();
169 goto retry;
170 }
171 *ep = mc->mc_e;
172 }
173
174 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
175
176 return ( *ep == NULL ? -1 : 0 );
177 }
178
179 /*
180 * gets an entry from the cache based on the normalized dn
181 * with mutex locked
182 */
183 int
monitor_cache_remove(monitor_info_t * mi,struct berval * ndn,Entry ** ep)184 monitor_cache_remove(
185 monitor_info_t *mi,
186 struct berval *ndn,
187 Entry **ep )
188 {
189 monitor_cache_t tmp_mc, *mc;
190 struct berval pndn;
191
192 assert( mi != NULL );
193 assert( ndn != NULL );
194 assert( ep != NULL );
195
196 *ep = NULL;
197
198 dnParent( ndn, &pndn );
199
200 retry:;
201 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
202
203 tmp_mc.mc_ndn = *ndn;
204 mc = ( monitor_cache_t * )ldap_avl_find( mi->mi_cache,
205 ( caddr_t )&tmp_mc, monitor_cache_cmp );
206
207 if ( mc != NULL ) {
208 monitor_cache_t *pmc;
209
210 if ( monitor_cache_trylock( mc->mc_e ) ) {
211 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
212 goto retry;
213 }
214
215 tmp_mc.mc_ndn = pndn;
216 pmc = ( monitor_cache_t * )ldap_avl_find( mi->mi_cache,
217 ( caddr_t )&tmp_mc, monitor_cache_cmp );
218 if ( pmc != NULL ) {
219 monitor_entry_t *mp = (monitor_entry_t *)mc->mc_e->e_private,
220 *pmp = (monitor_entry_t *)pmc->mc_e->e_private;
221 Entry **entryp;
222
223 if ( monitor_cache_trylock( pmc->mc_e ) ) {
224 monitor_cache_release( mi, mc->mc_e );
225 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
226 goto retry;
227 }
228
229 for ( entryp = &pmp->mp_children; *entryp != NULL; ) {
230 monitor_entry_t *next = (monitor_entry_t *)(*entryp)->e_private;
231 if ( next == mp ) {
232 *entryp = next->mp_next;
233 entryp = NULL;
234 break;
235 }
236
237 entryp = &next->mp_next;
238 }
239
240 if ( entryp != NULL ) {
241 Debug( LDAP_DEBUG_ANY,
242 "monitor_cache_remove(\"%s\"): "
243 "not in parent's list\n",
244 ndn->bv_val );
245 }
246
247 /* either succeeded, and the entry is no longer
248 * in its parent's list, or failed, and the
249 * entry is neither mucked with nor returned */
250 monitor_cache_release( mi, pmc->mc_e );
251
252 if ( entryp == NULL ) {
253 monitor_cache_t *tmpmc;
254
255 tmp_mc.mc_ndn = *ndn;
256 tmpmc = ldap_avl_delete( &mi->mi_cache,
257 ( caddr_t )&tmp_mc, monitor_cache_cmp );
258 assert( tmpmc == mc );
259
260 *ep = mc->mc_e;
261 ch_free( mc );
262 mc = NULL;
263
264 /* NOTE: we destroy the mutex, but otherwise
265 * leave the private data around; specifically,
266 * callbacks need be freed by someone else */
267
268 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
269 mp->mp_next = NULL;
270 mp->mp_children = NULL;
271 }
272
273 }
274
275 if ( mc ) {
276 monitor_cache_release( mi, mc->mc_e );
277 }
278 }
279
280 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
281
282 return ( *ep == NULL ? -1 : 0 );
283 }
284
285 /*
286 * If the entry exists in cache, it is returned in locked status;
287 * otherwise, if the parent exists, if it may generate volatile
288 * descendants an attempt to generate the required entry is
289 * performed and, if successful, the entry is returned
290 */
291 int
monitor_cache_dn2entry(Operation * op,SlapReply * rs,struct berval * ndn,Entry ** ep,Entry ** matched)292 monitor_cache_dn2entry(
293 Operation *op,
294 SlapReply *rs,
295 struct berval *ndn,
296 Entry **ep,
297 Entry **matched )
298 {
299 monitor_info_t *mi = (monitor_info_t *)op->o_bd->be_private;
300 int rc;
301 struct berval p_ndn = BER_BVNULL;
302 Entry *e_parent;
303 monitor_entry_t *mp;
304
305 assert( mi != NULL );
306 assert( ndn != NULL );
307 assert( ep != NULL );
308 assert( matched != NULL );
309
310 *matched = NULL;
311
312 if ( !dnIsSuffix( ndn, &op->o_bd->be_nsuffix[ 0 ] ) ) {
313 return( -1 );
314 }
315
316 rc = monitor_cache_get( mi, ndn, ep );
317 if ( !rc && *ep != NULL ) {
318 return( 0 );
319 }
320
321 /* try with parent/ancestors */
322 if ( BER_BVISNULL( ndn ) ) {
323 BER_BVSTR( &p_ndn, "" );
324
325 } else {
326 dnParent( ndn, &p_ndn );
327 }
328
329 rc = monitor_cache_dn2entry( op, rs, &p_ndn, &e_parent, matched );
330 if ( rc || e_parent == NULL ) {
331 return( -1 );
332 }
333
334 mp = ( monitor_entry_t * )e_parent->e_private;
335 rc = -1;
336 if ( mp->mp_flags & MONITOR_F_VOLATILE_CH ) {
337 /* parent entry generates volatile children */
338 rc = monitor_entry_create( op, rs, ndn, e_parent, ep );
339 }
340
341 if ( !rc ) {
342 monitor_cache_lock( *ep );
343 monitor_cache_release( mi, e_parent );
344
345 } else {
346 *matched = e_parent;
347 }
348
349 return( rc );
350 }
351
352 /*
353 * releases the lock of the entry; if it is marked as volatile, it is
354 * destroyed.
355 */
356 int
monitor_cache_release(monitor_info_t * mi,Entry * e)357 monitor_cache_release(
358 monitor_info_t *mi,
359 Entry *e )
360 {
361 monitor_entry_t *mp;
362
363 assert( mi != NULL );
364 assert( e != NULL );
365 assert( e->e_private != NULL );
366
367 mp = ( monitor_entry_t * )e->e_private;
368
369 if ( mp->mp_flags & MONITOR_F_VOLATILE ) {
370 monitor_cache_t *mc, tmp_mc;
371
372 /* volatile entries do not return to cache */
373 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex );
374 tmp_mc.mc_ndn = e->e_nname;
375 mc = ldap_avl_delete( &mi->mi_cache,
376 ( caddr_t )&tmp_mc, monitor_cache_cmp );
377 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex );
378 if ( mc != NULL ) {
379 ch_free( mc );
380 }
381
382 ldap_pvt_thread_mutex_unlock( &mp->mp_mutex );
383 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
384 ch_free( mp );
385 e->e_private = NULL;
386 entry_free( e );
387
388 return( 0 );
389 }
390
391 ldap_pvt_thread_mutex_unlock( &mp->mp_mutex );
392
393 return( 0 );
394 }
395
396 static void
monitor_entry_destroy(void * v_mc)397 monitor_entry_destroy( void *v_mc )
398 {
399 monitor_cache_t *mc = (monitor_cache_t *)v_mc;
400
401 if ( mc->mc_e != NULL ) {
402 monitor_entry_t *mp;
403
404 assert( mc->mc_e->e_private != NULL );
405
406 mp = ( monitor_entry_t * )mc->mc_e->e_private;
407
408 if ( mp->mp_cb ) {
409 monitor_callback_t *cb;
410
411 for ( cb = mp->mp_cb; cb != NULL; ) {
412 monitor_callback_t *next = cb->mc_next;
413
414 if ( cb->mc_free ) {
415 (void)cb->mc_free( mc->mc_e, &cb->mc_private );
416 }
417 ch_free( mp->mp_cb );
418
419 cb = next;
420 }
421 }
422
423 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex );
424
425 ch_free( mp );
426 mc->mc_e->e_private = NULL;
427 entry_free( mc->mc_e );
428 }
429
430 ch_free( mc );
431 }
432
433 int
monitor_cache_destroy(monitor_info_t * mi)434 monitor_cache_destroy(
435 monitor_info_t *mi )
436 {
437 if ( mi->mi_cache ) {
438 ldap_avl_free( mi->mi_cache, monitor_entry_destroy );
439 }
440
441 return 0;
442 }
443
monitor_back_release(Operation * op,Entry * e,int rw)444 int monitor_back_release(
445 Operation *op,
446 Entry *e,
447 int rw )
448 {
449 monitor_info_t *mi = ( monitor_info_t * )op->o_bd->be_private;
450 return monitor_cache_release( mi, e );
451 }
452