1 /* 2 * Copyright (c) 2005-2012 The DragonFly Project. 3 * Copyright (c) 2013 François Tigeot 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Jeffrey Hsu. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 */ 37 38 #include <sys/idr.h> 39 #include <sys/kernel.h> 40 #include <sys/libkern.h> 41 #include <sys/malloc.h> 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/spinlock2.h> 45 #include <sys/limits.h> 46 47 #define IDR_DEFAULT_SIZE 256 48 49 MALLOC_DEFINE(M_IDR, "idr", "Integer ID management"); 50 51 static void idr_grow(struct idr *idp, int want); 52 static void idr_reserve(struct idr *idp, int id, int incr); 53 static void idr_set(struct idr *idp, int id, void *ptr); 54 static int idr_find_free(struct idr *idp, int want, int lim); 55 static int idr_pre_get1(struct idr *idp, int want, int lim); 56 57 /* 58 * Number of nodes in right subtree, including the root. 59 */ 60 static __inline int 61 right_subtree_size(int n) 62 { 63 return (n ^ (n | (n + 1))); 64 } 65 66 /* 67 * Bigger ancestor. 68 */ 69 static __inline int 70 right_ancestor(int n) 71 { 72 return (n | (n + 1)); 73 } 74 75 /* 76 * Smaller ancestor. 77 */ 78 static __inline int 79 left_ancestor(int n) 80 { 81 return ((n & (n + 1)) - 1); 82 } 83 84 static __inline void 85 idrfixup(struct idr *idp, int id) 86 { 87 if (id < idp->idr_freeindex) { 88 idp->idr_freeindex = id; 89 } 90 while (idp->idr_lastindex >= 0 && 91 idp->idr_nodes[idp->idr_lastindex].data == NULL && 92 idp->idr_nodes[idp->idr_lastindex].reserved == 0 93 ) { 94 --idp->idr_lastindex; 95 } 96 } 97 98 static __inline struct idr_node * 99 idr_get_node(struct idr *idp, int id) 100 { 101 struct idr_node *idrnp; 102 if (id >= idp->idr_count) 103 return (NULL); 104 idrnp = &idp->idr_nodes[id]; 105 if (idrnp->allocated == 0) 106 return (NULL); 107 return (idrnp); 108 } 109 110 static void 111 idr_reserve(struct idr *idp, int id, int incr) 112 { 113 while (id >= 0) { 114 idp->idr_nodes[id].allocated += incr; 115 KKASSERT(idp->idr_nodes[id].allocated >= 0); 116 id = left_ancestor(id); 117 } 118 } 119 120 static int 121 idr_find_free(struct idr *idp, int want, int lim) 122 { 123 int id, rsum, rsize, node; 124 125 /* 126 * Search for a free descriptor starting at the higher 127 * of want or fd_freefile. If that fails, consider 128 * expanding the ofile array. 129 * 130 * NOTE! the 'allocated' field is a cumulative recursive allocation 131 * count. If we happen to see a value of 0 then we can shortcut 132 * our search. Otherwise we run through through the tree going 133 * down branches we know have free descriptor(s) until we hit a 134 * leaf node. The leaf node will be free but will not necessarily 135 * have an allocated field of 0. 136 */ 137 138 /* move up the tree looking for a subtree with a free node */ 139 for (id = max(want, idp->idr_freeindex); id < min(idp->idr_count, lim); 140 id = right_ancestor(id)) { 141 if (idp->idr_nodes[id].allocated == 0) 142 return (id); 143 144 rsize = right_subtree_size(id); 145 if (idp->idr_nodes[id].allocated == rsize) 146 continue; /* right subtree full */ 147 148 /* 149 * Free fd is in the right subtree of the tree rooted at fd. 150 * Call that subtree R. Look for the smallest (leftmost) 151 * subtree of R with an unallocated fd: continue moving 152 * down the left branch until encountering a full left 153 * subtree, then move to the right. 154 */ 155 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) { 156 node = id + rsize; 157 rsum += idp->idr_nodes[node].allocated; 158 if (idp->idr_nodes[id].allocated == rsum + rsize) { 159 id = node; /* move to the right */ 160 if (idp->idr_nodes[node].allocated == 0) 161 return (id); 162 rsum = 0; 163 } 164 } 165 return (id); 166 } 167 return (-1); 168 } 169 170 static int 171 idr_pre_get1(struct idr *idp, int want, int lim) 172 { 173 int id; 174 175 if (want >= idp->idr_count) 176 idr_grow(idp, want); 177 178 retry: 179 id = idr_find_free(idp, want, lim); 180 if (id > -1) 181 goto found; 182 183 /* 184 * No space in current array. Expand? 185 */ 186 if (idp->idr_count >= lim) { 187 return (ENOSPC); 188 } 189 idr_grow(idp, want); 190 goto retry; 191 192 found: 193 return (0); 194 } 195 196 int 197 idr_pre_get(struct idr *idp, __unused unsigned gfp_mask) 198 { 199 lwkt_gettoken(&idp->idr_token); 200 int error = idr_pre_get1(idp, idp->idr_maxwant, INT_MAX); 201 lwkt_reltoken(&idp->idr_token); 202 return (error == 0); 203 } 204 205 int 206 idr_get_new_above(struct idr *idp, void *ptr, int sid, int *id) 207 { 208 int resid; 209 210 KKASSERT(ptr != NULL); 211 212 lwkt_gettoken(&idp->idr_token); 213 if (sid >= idp->idr_count) { 214 idp->idr_maxwant = max(idp->idr_maxwant, sid); 215 lwkt_reltoken(&idp->idr_token); 216 return -EAGAIN; 217 } 218 219 resid = idr_find_free(idp, sid, INT_MAX); 220 if (resid == -1) { 221 lwkt_reltoken(&idp->idr_token); 222 return -EAGAIN; 223 } 224 225 if (resid >= idp->idr_count) 226 idr_grow(idp, resid); 227 if (resid > idp->idr_lastindex) 228 idp->idr_lastindex = resid; 229 if (sid <= idp->idr_freeindex) 230 idp->idr_freeindex = resid; 231 *id = resid; 232 KKASSERT(idp->idr_nodes[resid].reserved == 0); 233 idp->idr_nodes[resid].reserved = 1; 234 idr_reserve(idp, resid, 1); 235 idr_set(idp, resid, ptr); 236 237 lwkt_reltoken(&idp->idr_token); 238 return (0); 239 } 240 241 int 242 idr_get_new(struct idr *idp, void *ptr, int *id) 243 { 244 return idr_get_new_above(idp, ptr, 0, id); 245 } 246 247 /* 248 * Grow the file table so it can hold through descriptor (want). 249 */ 250 static void 251 idr_grow(struct idr *idp, int want) 252 { 253 struct idr_node *oldnodes, *newnodes; 254 int nf; 255 256 /* We want 2^n descriptors */ 257 nf = idp->idr_count; 258 do { 259 nf *= 2; 260 } while (nf <= want); 261 262 /* Allocate a new zero'ed node array */ 263 newnodes = kmalloc(nf * sizeof(struct idr_node), M_IDR, M_ZERO|M_WAITOK); 264 265 /* Copy the existing nodes at the beginning of the new array */ 266 if (idp->idr_nodes != NULL) 267 bcopy(idp->idr_nodes, newnodes, idp->idr_count * sizeof(struct idr_node)); 268 269 oldnodes = idp->idr_nodes; 270 idp->idr_nodes = newnodes; 271 idp->idr_count = nf; 272 273 if (oldnodes != NULL) { 274 kfree(oldnodes, M_IDR); 275 } 276 277 idp->idr_nexpands++; 278 } 279 280 void 281 idr_remove(struct idr *idp, int id) 282 { 283 void *ptr; 284 285 lwkt_gettoken(&idp->idr_token); 286 287 if (id >= idp->idr_count) 288 goto out; 289 if ((ptr = idp->idr_nodes[id].data) == NULL) 290 goto out; 291 idp->idr_nodes[id].data = NULL; 292 293 idr_reserve(idp, id, -1); 294 idrfixup(idp, id); 295 296 out: 297 lwkt_reltoken(&idp->idr_token); 298 } 299 300 void 301 idr_remove_all(struct idr *idp) 302 { 303 kfree(idp->idr_nodes, M_IDR); 304 idp->idr_nodes = kmalloc(idp->idr_count * sizeof *idp, M_IDR, M_WAITOK | M_ZERO); 305 idp->idr_lastindex = -1; 306 idp->idr_freeindex = 0; 307 idp->idr_nexpands = 0; 308 idp->idr_maxwant = 0; 309 } 310 311 void 312 idr_destroy(struct idr *idp) 313 { 314 lwkt_token_uninit(&idp->idr_token); 315 kfree(idp->idr_nodes, M_IDR); 316 memset(idp, 0, sizeof(struct idr)); 317 } 318 319 void * 320 idr_find(struct idr *idp, int id) 321 { 322 void * ret = NULL; 323 324 lwkt_gettoken(&idp->idr_token); 325 326 if (id > idp->idr_count) { 327 goto out; 328 } else if (idp->idr_nodes[id].allocated == 0) { 329 goto out; 330 } 331 ret = idp->idr_nodes[id].data; 332 333 out: 334 lwkt_reltoken(&idp->idr_token); 335 return ret; 336 } 337 338 static void 339 idr_set(struct idr *idp, int id, void *ptr) 340 { 341 KKASSERT(id < idp->idr_count); 342 KKASSERT(idp->idr_nodes[id].reserved != 0); 343 if (ptr) { 344 idp->idr_nodes[id].data = ptr; 345 idp->idr_nodes[id].reserved = 0; 346 } else { 347 idp->idr_nodes[id].reserved = 0; 348 idr_reserve(idp, id, -1); 349 idrfixup(idp, id); 350 } 351 } 352 353 int 354 idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data) 355 { 356 int i, error = 0; 357 struct idr_node *nodes = idp->idr_nodes; 358 359 lwkt_gettoken(&idp->idr_token); 360 for (i = 0; i < idp->idr_count; i++) { 361 if (nodes[i].data != NULL && nodes[i].allocated > 0) { 362 error = fn(i, nodes[i].data, data); 363 if (error != 0) 364 goto out; 365 } 366 } 367 out: 368 lwkt_reltoken(&idp->idr_token); 369 return error; 370 } 371 372 void * 373 idr_replace(struct idr *idp, void *ptr, int id) 374 { 375 struct idr_node *idrnp; 376 void *ret = NULL; 377 378 lwkt_gettoken(&idp->idr_token); 379 380 idrnp = idr_get_node(idp, id); 381 if (idrnp == NULL || ptr == NULL) 382 goto out; 383 384 ret = idrnp->data; 385 idrnp->data = ptr; 386 387 out: 388 lwkt_reltoken(&idp->idr_token); 389 return (ret); 390 } 391 392 void 393 idr_init(struct idr *idp) 394 { 395 bzero(idp, sizeof(struct idr)); 396 idp->idr_nodes = kmalloc(IDR_DEFAULT_SIZE * sizeof(*idp), 397 M_IDR, M_WAITOK | M_ZERO); 398 idp->idr_count = IDR_DEFAULT_SIZE; 399 idp->idr_lastindex = -1; 400 idp->idr_maxwant = 0; 401 lwkt_token_init(&idp->idr_token, "idr token"); 402 } 403