1 /* $NetBSD: sysv_shm.c,v 1.110 2008/05/31 13:11:14 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, and by Mindaugas Rasiukevicius. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed by Adam Glass and Charles M. 47 * Hannum. 48 * 4. The names of the authors may not be used to endorse or promote products 49 * derived from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.110 2008/05/31 13:11:14 ad Exp $"); 65 66 #define SYSVSHM 67 68 #include <sys/param.h> 69 #include <sys/kernel.h> 70 #include <sys/kmem.h> 71 #include <sys/shm.h> 72 #include <sys/mutex.h> 73 #include <sys/mman.h> 74 #include <sys/stat.h> 75 #include <sys/sysctl.h> 76 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ 77 #include <sys/syscallargs.h> 78 #include <sys/queue.h> 79 #include <sys/pool.h> 80 #include <sys/kauth.h> 81 82 #include <uvm/uvm_extern.h> 83 #include <uvm/uvm_object.h> 84 85 int shm_nused; 86 struct shmid_ds *shmsegs; 87 88 struct shmmap_entry { 89 SLIST_ENTRY(shmmap_entry) next; 90 vaddr_t va; 91 int shmid; 92 }; 93 94 static kmutex_t shm_lock; 95 static kcondvar_t * shm_cv; 96 static struct pool shmmap_entry_pool; 97 static int shm_last_free, shm_use_phys; 98 static size_t shm_committed; 99 100 static kcondvar_t shm_realloc_cv; 101 static bool shm_realloc_state; 102 static u_int shm_realloc_disable; 103 104 struct shmmap_state { 105 unsigned int nitems; 106 unsigned int nrefs; 107 SLIST_HEAD(, shmmap_entry) entries; 108 }; 109 110 #ifdef SHMDEBUG 111 #define SHMPRINTF(a) printf a 112 #else 113 #define SHMPRINTF(a) 114 #endif 115 116 static int shmrealloc(int); 117 118 /* 119 * Find the shared memory segment by the identifier. 120 * => must be called with shm_lock held; 121 */ 122 static struct shmid_ds * 123 shm_find_segment_by_shmid(int shmid) 124 { 125 int segnum; 126 struct shmid_ds *shmseg; 127 128 KASSERT(mutex_owned(&shm_lock)); 129 130 segnum = IPCID_TO_IX(shmid); 131 if (segnum < 0 || segnum >= shminfo.shmmni) 132 return NULL; 133 shmseg = &shmsegs[segnum]; 134 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0) 135 return NULL; 136 if ((shmseg->shm_perm.mode & 137 (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED) 138 return NULL; 139 if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid)) 140 return NULL; 141 142 return shmseg; 143 } 144 145 /* 146 * Free memory segment. 147 * => must be called with shm_lock held; 148 */ 149 static void 150 shm_free_segment(int segnum) 151 { 152 struct shmid_ds *shmseg; 153 size_t size; 154 bool wanted; 155 156 KASSERT(mutex_owned(&shm_lock)); 157 158 shmseg = &shmsegs[segnum]; 159 SHMPRINTF(("shm freeing key 0x%lx seq 0x%x\n", 160 shmseg->shm_perm._key, shmseg->shm_perm._seq)); 161 162 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET; 163 wanted = (shmseg->shm_perm.mode & SHMSEG_WANTED); 164 165 shmseg->_shm_internal = NULL; 166 shm_committed -= btoc(size); 167 shm_nused--; 168 shmseg->shm_perm.mode = SHMSEG_FREE; 169 shm_last_free = segnum; 170 if (wanted == true) 171 cv_broadcast(&shm_cv[segnum]); 172 } 173 174 /* 175 * Delete entry from the shm map. 176 * => must be called with shm_lock held; 177 */ 178 static struct uvm_object * 179 shm_delete_mapping(struct shmmap_state *shmmap_s, 180 struct shmmap_entry *shmmap_se) 181 { 182 struct uvm_object *uobj = NULL; 183 struct shmid_ds *shmseg; 184 int segnum; 185 186 KASSERT(mutex_owned(&shm_lock)); 187 188 segnum = IPCID_TO_IX(shmmap_se->shmid); 189 shmseg = &shmsegs[segnum]; 190 SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next); 191 shmmap_s->nitems--; 192 shmseg->shm_dtime = time_second; 193 if ((--shmseg->shm_nattch <= 0) && 194 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 195 uobj = shmseg->_shm_internal; 196 shm_free_segment(segnum); 197 } 198 199 return uobj; 200 } 201 202 /* 203 * Get a non-shared shm map for that vmspace. Note, that memory 204 * allocation might be performed with lock held. 205 */ 206 static struct shmmap_state * 207 shmmap_getprivate(struct proc *p) 208 { 209 struct shmmap_state *oshmmap_s, *shmmap_s; 210 struct shmmap_entry *oshmmap_se, *shmmap_se; 211 212 KASSERT(mutex_owned(&shm_lock)); 213 214 /* 1. A shm map with refcnt = 1, used by ourselves, thus return */ 215 oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 216 if (oshmmap_s && oshmmap_s->nrefs == 1) 217 return oshmmap_s; 218 219 /* 2. No shm map preset - create a fresh one */ 220 shmmap_s = kmem_zalloc(sizeof(struct shmmap_state), KM_SLEEP); 221 shmmap_s->nrefs = 1; 222 SLIST_INIT(&shmmap_s->entries); 223 p->p_vmspace->vm_shm = (void *)shmmap_s; 224 225 if (oshmmap_s == NULL) 226 return shmmap_s; 227 228 SHMPRINTF(("shmmap_getprivate: vm %p split (%d entries), was used by %d\n", 229 p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs)); 230 231 /* 3. A shared shm map, copy to a fresh one and adjust refcounts */ 232 SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) { 233 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK); 234 shmmap_se->va = oshmmap_se->va; 235 shmmap_se->shmid = oshmmap_se->shmid; 236 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next); 237 } 238 shmmap_s->nitems = oshmmap_s->nitems; 239 oshmmap_s->nrefs--; 240 241 return shmmap_s; 242 } 243 244 /* 245 * Lock/unlock the memory. 246 * => must be called with shm_lock held; 247 * => called from one place, thus, inline; 248 */ 249 static inline int 250 shm_memlock(struct lwp *l, struct shmid_ds *shmseg, int shmid, int cmd) 251 { 252 struct proc *p = l->l_proc; 253 struct shmmap_entry *shmmap_se; 254 struct shmmap_state *shmmap_s; 255 size_t size; 256 int error; 257 258 KASSERT(mutex_owned(&shm_lock)); 259 shmmap_s = shmmap_getprivate(p); 260 261 /* Find our shared memory address by shmid */ 262 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) { 263 if (shmmap_se->shmid != shmid) 264 continue; 265 266 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET; 267 268 if (cmd == SHM_LOCK && 269 (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) { 270 /* Wire the object and map, then tag it */ 271 error = uobj_wirepages(shmseg->_shm_internal, 0, size); 272 if (error) 273 return EIO; 274 error = uvm_map_pageable(&p->p_vmspace->vm_map, 275 shmmap_se->va, shmmap_se->va + size, false, 0); 276 if (error) { 277 uobj_unwirepages(shmseg->_shm_internal, 0, size); 278 if (error == EFAULT) 279 error = ENOMEM; 280 return error; 281 } 282 shmseg->shm_perm.mode |= SHMSEG_WIRED; 283 284 } else if (cmd == SHM_UNLOCK && 285 (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) { 286 /* Unwire the object and map, then untag it */ 287 uobj_unwirepages(shmseg->_shm_internal, 0, size); 288 error = uvm_map_pageable(&p->p_vmspace->vm_map, 289 shmmap_se->va, shmmap_se->va + size, true, 0); 290 if (error) 291 return EIO; 292 shmseg->shm_perm.mode &= ~SHMSEG_WIRED; 293 } 294 } 295 296 return 0; 297 } 298 299 /* 300 * Unmap shared memory. 301 */ 302 int 303 sys_shmdt(struct lwp *l, const struct sys_shmdt_args *uap, register_t *retval) 304 { 305 /* { 306 syscallarg(const void *) shmaddr; 307 } */ 308 struct proc *p = l->l_proc; 309 struct shmmap_state *shmmap_s1, *shmmap_s; 310 struct shmmap_entry *shmmap_se; 311 struct uvm_object *uobj; 312 struct shmid_ds *shmseg; 313 size_t size; 314 315 mutex_enter(&shm_lock); 316 /* In case of reallocation, we will wait for completion */ 317 while (__predict_false(shm_realloc_state)) 318 cv_wait(&shm_realloc_cv, &shm_lock); 319 320 shmmap_s1 = (struct shmmap_state *)p->p_vmspace->vm_shm; 321 if (shmmap_s1 == NULL) { 322 mutex_exit(&shm_lock); 323 return EINVAL; 324 } 325 326 /* Find the map entry */ 327 SLIST_FOREACH(shmmap_se, &shmmap_s1->entries, next) 328 if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr)) 329 break; 330 if (shmmap_se == NULL) { 331 mutex_exit(&shm_lock); 332 return EINVAL; 333 } 334 335 shmmap_s = shmmap_getprivate(p); 336 if (shmmap_s != shmmap_s1) { 337 /* Map has been copied, lookup entry in new map */ 338 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) 339 if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr)) 340 break; 341 if (shmmap_se == NULL) { 342 mutex_exit(&shm_lock); 343 return EINVAL; 344 } 345 } 346 347 SHMPRINTF(("shmdt: vm %p: remove %d @%lx\n", 348 p->p_vmspace, shmmap_se->shmid, shmmap_se->va)); 349 350 /* Delete the entry from shm map */ 351 uobj = shm_delete_mapping(shmmap_s, shmmap_se); 352 shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)]; 353 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET; 354 mutex_exit(&shm_lock); 355 356 uvm_deallocate(&p->p_vmspace->vm_map, shmmap_se->va, size); 357 if (uobj != NULL) 358 uao_detach(uobj); 359 pool_put(&shmmap_entry_pool, shmmap_se); 360 361 return 0; 362 } 363 364 /* 365 * Map shared memory. 366 */ 367 int 368 sys_shmat(struct lwp *l, const struct sys_shmat_args *uap, register_t *retval) 369 { 370 /* { 371 syscallarg(int) shmid; 372 syscallarg(const void *) shmaddr; 373 syscallarg(int) shmflg; 374 } */ 375 int error, flags = 0; 376 struct proc *p = l->l_proc; 377 kauth_cred_t cred = l->l_cred; 378 struct shmid_ds *shmseg; 379 struct shmmap_state *shmmap_s; 380 struct shmmap_entry *shmmap_se; 381 struct uvm_object *uobj; 382 struct vmspace *vm; 383 vaddr_t attach_va; 384 vm_prot_t prot; 385 vsize_t size; 386 387 /* Allocate a new map entry and set it */ 388 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK); 389 390 mutex_enter(&shm_lock); 391 /* In case of reallocation, we will wait for completion */ 392 while (__predict_false(shm_realloc_state)) 393 cv_wait(&shm_realloc_cv, &shm_lock); 394 395 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid)); 396 if (shmseg == NULL) { 397 error = EINVAL; 398 goto err; 399 } 400 error = ipcperm(cred, &shmseg->shm_perm, 401 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 402 if (error) 403 goto err; 404 405 vm = p->p_vmspace; 406 shmmap_s = (struct shmmap_state *)vm->vm_shm; 407 if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg) { 408 error = EMFILE; 409 goto err; 410 } 411 412 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET; 413 prot = VM_PROT_READ; 414 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0) 415 prot |= VM_PROT_WRITE; 416 if (SCARG(uap, shmaddr)) { 417 flags |= UVM_FLAG_FIXED; 418 if (SCARG(uap, shmflg) & SHM_RND) 419 attach_va = 420 (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1); 421 else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0) 422 attach_va = (vaddr_t)SCARG(uap, shmaddr); 423 else { 424 error = EINVAL; 425 goto err; 426 } 427 } else { 428 /* This is just a hint to uvm_map() about where to put it. */ 429 attach_va = p->p_emul->e_vm_default_addr(p, 430 (vaddr_t)vm->vm_daddr, size); 431 } 432 433 /* 434 * Create a map entry, add it to the list and increase the counters. 435 * The lock will be dropped before the mapping, disable reallocation. 436 */ 437 shmmap_s = shmmap_getprivate(p); 438 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next); 439 shmmap_s->nitems++; 440 shmseg->shm_lpid = p->p_pid; 441 shmseg->shm_nattch++; 442 shm_realloc_disable++; 443 mutex_exit(&shm_lock); 444 445 /* 446 * Add a reference to the memory object, map it to the 447 * address space, and lock the memory, if needed. 448 */ 449 uobj = shmseg->_shm_internal; 450 uao_reference(uobj); 451 error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0, 452 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags)); 453 if (error) 454 goto err_detach; 455 if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) { 456 error = uvm_map_pageable(&vm->vm_map, attach_va, 457 attach_va + size, false, 0); 458 if (error) { 459 if (error == EFAULT) 460 error = ENOMEM; 461 uvm_deallocate(&vm->vm_map, attach_va, size); 462 goto err_detach; 463 } 464 } 465 466 /* Set the new address, and update the time */ 467 mutex_enter(&shm_lock); 468 shmmap_se->va = attach_va; 469 shmmap_se->shmid = SCARG(uap, shmid); 470 shmseg->shm_atime = time_second; 471 shm_realloc_disable--; 472 retval[0] = attach_va; 473 SHMPRINTF(("shmat: vm %p: add %d @%lx\n", 474 p->p_vmspace, shmmap_se->shmid, attach_va)); 475 err: 476 cv_broadcast(&shm_realloc_cv); 477 mutex_exit(&shm_lock); 478 if (error && shmmap_se) 479 pool_put(&shmmap_entry_pool, shmmap_se); 480 return error; 481 482 err_detach: 483 uao_detach(uobj); 484 mutex_enter(&shm_lock); 485 uobj = shm_delete_mapping(shmmap_s, shmmap_se); 486 shm_realloc_disable--; 487 cv_broadcast(&shm_realloc_cv); 488 mutex_exit(&shm_lock); 489 if (uobj != NULL) 490 uao_detach(uobj); 491 pool_put(&shmmap_entry_pool, shmmap_se); 492 return error; 493 } 494 495 /* 496 * Shared memory control operations. 497 */ 498 int 499 sys___shmctl13(struct lwp *l, const struct sys___shmctl13_args *uap, register_t *retval) 500 { 501 /* { 502 syscallarg(int) shmid; 503 syscallarg(int) cmd; 504 syscallarg(struct shmid_ds *) buf; 505 } */ 506 struct shmid_ds shmbuf; 507 int cmd, error; 508 509 cmd = SCARG(uap, cmd); 510 if (cmd == IPC_SET) { 511 error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf)); 512 if (error) 513 return error; 514 } 515 516 error = shmctl1(l, SCARG(uap, shmid), cmd, 517 (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL); 518 519 if (error == 0 && cmd == IPC_STAT) 520 error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf)); 521 522 return error; 523 } 524 525 int 526 shmctl1(struct lwp *l, int shmid, int cmd, struct shmid_ds *shmbuf) 527 { 528 struct uvm_object *uobj = NULL; 529 kauth_cred_t cred = l->l_cred; 530 struct shmid_ds *shmseg; 531 int error = 0; 532 533 mutex_enter(&shm_lock); 534 /* In case of reallocation, we will wait for completion */ 535 while (__predict_false(shm_realloc_state)) 536 cv_wait(&shm_realloc_cv, &shm_lock); 537 538 shmseg = shm_find_segment_by_shmid(shmid); 539 if (shmseg == NULL) { 540 mutex_exit(&shm_lock); 541 return EINVAL; 542 } 543 544 switch (cmd) { 545 case IPC_STAT: 546 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0) 547 break; 548 memcpy(shmbuf, shmseg, sizeof(struct shmid_ds)); 549 break; 550 case IPC_SET: 551 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0) 552 break; 553 shmseg->shm_perm.uid = shmbuf->shm_perm.uid; 554 shmseg->shm_perm.gid = shmbuf->shm_perm.gid; 555 shmseg->shm_perm.mode = 556 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 557 (shmbuf->shm_perm.mode & ACCESSPERMS); 558 shmseg->shm_ctime = time_second; 559 break; 560 case IPC_RMID: 561 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0) 562 break; 563 shmseg->shm_perm._key = IPC_PRIVATE; 564 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 565 if (shmseg->shm_nattch <= 0) { 566 uobj = shmseg->_shm_internal; 567 shm_free_segment(IPCID_TO_IX(shmid)); 568 } 569 break; 570 case SHM_LOCK: 571 case SHM_UNLOCK: 572 if ((error = kauth_authorize_generic(cred, 573 KAUTH_GENERIC_ISSUSER, NULL)) != 0) 574 break; 575 error = shm_memlock(l, shmseg, shmid, cmd); 576 break; 577 default: 578 error = EINVAL; 579 } 580 581 mutex_exit(&shm_lock); 582 if (uobj != NULL) 583 uao_detach(uobj); 584 return error; 585 } 586 587 /* 588 * Try to take an already existing segment. 589 * => must be called with shm_lock held; 590 * => called from one place, thus, inline; 591 */ 592 static inline int 593 shmget_existing(struct lwp *l, const struct sys_shmget_args *uap, int mode, 594 register_t *retval) 595 { 596 struct shmid_ds *shmseg; 597 kauth_cred_t cred = l->l_cred; 598 int segnum, error; 599 again: 600 KASSERT(mutex_owned(&shm_lock)); 601 602 /* Find segment by key */ 603 for (segnum = 0; segnum < shminfo.shmmni; segnum++) 604 if ((shmsegs[segnum].shm_perm.mode & SHMSEG_ALLOCATED) && 605 shmsegs[segnum].shm_perm._key == SCARG(uap, key)) 606 break; 607 if (segnum == shminfo.shmmni) { 608 /* Not found */ 609 return -1; 610 } 611 612 shmseg = &shmsegs[segnum]; 613 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 614 /* 615 * This segment is in the process of being allocated. Wait 616 * until it's done, and look the key up again (in case the 617 * allocation failed or it was freed). 618 */ 619 shmseg->shm_perm.mode |= SHMSEG_WANTED; 620 error = cv_wait_sig(&shm_cv[segnum], &shm_lock); 621 if (error) 622 return error; 623 goto again; 624 } 625 626 /* Check the permission, segment size and appropriate flag */ 627 error = ipcperm(cred, &shmseg->shm_perm, mode); 628 if (error) 629 return error; 630 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz) 631 return EINVAL; 632 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) == 633 (IPC_CREAT | IPC_EXCL)) 634 return EEXIST; 635 636 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 637 return 0; 638 } 639 640 int 641 sys_shmget(struct lwp *l, const struct sys_shmget_args *uap, register_t *retval) 642 { 643 /* { 644 syscallarg(key_t) key; 645 syscallarg(size_t) size; 646 syscallarg(int) shmflg; 647 } */ 648 struct shmid_ds *shmseg; 649 kauth_cred_t cred = l->l_cred; 650 key_t key = SCARG(uap, key); 651 size_t size; 652 int error, mode, segnum; 653 bool lockmem; 654 655 mode = SCARG(uap, shmflg) & ACCESSPERMS; 656 if (SCARG(uap, shmflg) & _SHM_RMLINGER) 657 mode |= SHMSEG_RMLINGER; 658 659 SHMPRINTF(("shmget: key 0x%lx size 0x%x shmflg 0x%x mode 0x%x\n", 660 SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode)); 661 662 mutex_enter(&shm_lock); 663 /* In case of reallocation, we will wait for completion */ 664 while (__predict_false(shm_realloc_state)) 665 cv_wait(&shm_realloc_cv, &shm_lock); 666 667 if (key != IPC_PRIVATE) { 668 error = shmget_existing(l, uap, mode, retval); 669 if (error != -1) { 670 mutex_exit(&shm_lock); 671 return error; 672 } 673 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0) { 674 mutex_exit(&shm_lock); 675 return ENOENT; 676 } 677 } 678 error = 0; 679 680 /* 681 * Check the for the limits. 682 */ 683 size = SCARG(uap, size); 684 if (size < shminfo.shmmin || size > shminfo.shmmax) { 685 mutex_exit(&shm_lock); 686 return EINVAL; 687 } 688 if (shm_nused >= shminfo.shmmni) { 689 mutex_exit(&shm_lock); 690 return ENOSPC; 691 } 692 size = (size + PGOFSET) & ~PGOFSET; 693 if (shm_committed + btoc(size) > shminfo.shmall) { 694 mutex_exit(&shm_lock); 695 return ENOMEM; 696 } 697 698 /* Find the first available segment */ 699 if (shm_last_free < 0) { 700 for (segnum = 0; segnum < shminfo.shmmni; segnum++) 701 if (shmsegs[segnum].shm_perm.mode & SHMSEG_FREE) 702 break; 703 KASSERT(segnum < shminfo.shmmni); 704 } else { 705 segnum = shm_last_free; 706 shm_last_free = -1; 707 } 708 709 /* 710 * Initialize the segment. 711 * We will drop the lock while allocating the memory, thus mark the 712 * segment present, but removed, that no other thread could take it. 713 * Also, disable reallocation, while lock is dropped. 714 */ 715 shmseg = &shmsegs[segnum]; 716 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 717 shm_committed += btoc(size); 718 shm_nused++; 719 lockmem = shm_use_phys; 720 shm_realloc_disable++; 721 mutex_exit(&shm_lock); 722 723 /* Allocate the memory object and lock it if needed */ 724 shmseg->_shm_internal = uao_create(size, 0); 725 if (lockmem) { 726 /* Wire the pages and tag it */ 727 error = uobj_wirepages(shmseg->_shm_internal, 0, size); 728 if (error) { 729 uao_detach(shmseg->_shm_internal); 730 mutex_enter(&shm_lock); 731 shm_free_segment(segnum); 732 shm_realloc_disable--; 733 mutex_exit(&shm_lock); 734 return error; 735 } 736 } 737 738 /* 739 * Please note, while segment is marked, there are no need to hold the 740 * lock, while setting it (except shm_perm.mode). 741 */ 742 shmseg->shm_perm._key = SCARG(uap, key); 743 shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff; 744 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 745 746 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = kauth_cred_geteuid(cred); 747 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = kauth_cred_getegid(cred); 748 shmseg->shm_segsz = SCARG(uap, size); 749 shmseg->shm_cpid = l->l_proc->p_pid; 750 shmseg->shm_lpid = shmseg->shm_nattch = 0; 751 shmseg->shm_atime = shmseg->shm_dtime = 0; 752 shmseg->shm_ctime = time_second; 753 754 /* 755 * Segment is initialized. 756 * Enter the lock, mark as allocated, and notify waiters (if any). 757 * Also, unmark the state of reallocation. 758 */ 759 mutex_enter(&shm_lock); 760 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 761 (mode & (ACCESSPERMS | SHMSEG_RMLINGER)) | 762 SHMSEG_ALLOCATED | (lockmem ? SHMSEG_WIRED : 0); 763 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 764 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 765 cv_broadcast(&shm_cv[segnum]); 766 } 767 shm_realloc_disable--; 768 cv_broadcast(&shm_realloc_cv); 769 mutex_exit(&shm_lock); 770 771 return error; 772 } 773 774 void 775 shmfork(struct vmspace *vm1, struct vmspace *vm2) 776 { 777 struct shmmap_state *shmmap_s; 778 struct shmmap_entry *shmmap_se; 779 780 SHMPRINTF(("shmfork %p->%p\n", vm1, vm2)); 781 mutex_enter(&shm_lock); 782 vm2->vm_shm = vm1->vm_shm; 783 if (vm1->vm_shm) { 784 shmmap_s = (struct shmmap_state *)vm1->vm_shm; 785 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) 786 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++; 787 shmmap_s->nrefs++; 788 } 789 mutex_exit(&shm_lock); 790 } 791 792 void 793 shmexit(struct vmspace *vm) 794 { 795 struct shmmap_state *shmmap_s; 796 struct shmmap_entry *shmmap_se; 797 struct uvm_object **uobj; 798 size_t *size; 799 u_int i, n; 800 801 SLIST_HEAD(, shmmap_entry) tmp_entries; 802 803 mutex_enter(&shm_lock); 804 shmmap_s = (struct shmmap_state *)vm->vm_shm; 805 if (shmmap_s == NULL) { 806 mutex_exit(&shm_lock); 807 return; 808 } 809 810 vm->vm_shm = NULL; 811 812 if (--shmmap_s->nrefs > 0) { 813 SHMPRINTF(("shmexit: vm %p drop ref (%d entries), refs = %d\n", 814 vm, shmmap_s->nitems, shmmap_s->nrefs)); 815 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) 816 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--; 817 mutex_exit(&shm_lock); 818 return; 819 } 820 821 KASSERT(shmmap_s->nrefs == 0); 822 n = shmmap_s->nitems; 823 SHMPRINTF(("shmexit: vm %p cleanup (%d entries)\n", vm, n)); 824 mutex_exit(&shm_lock); 825 if (n == 0) { 826 kmem_free(shmmap_s, sizeof(struct shmmap_state)); 827 return; 828 } 829 830 /* Allocate the arrays */ 831 SLIST_INIT(&tmp_entries); 832 uobj = kmem_zalloc(n * sizeof(void *), KM_SLEEP); 833 size = kmem_zalloc(n * sizeof(size_t), KM_SLEEP); 834 835 /* Delete the entry from shm map */ 836 i = 0; 837 mutex_enter(&shm_lock); 838 while (!SLIST_EMPTY(&shmmap_s->entries)) { 839 struct shmid_ds *shmseg; 840 841 shmmap_se = SLIST_FIRST(&shmmap_s->entries); 842 shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)]; 843 size[i] = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET; 844 uobj[i] = shm_delete_mapping(shmmap_s, shmmap_se); 845 SLIST_INSERT_HEAD(&tmp_entries, shmmap_se, next); 846 i++; 847 } 848 mutex_exit(&shm_lock); 849 850 /* Unmap all segments, free the entries */ 851 i = 0; 852 while (!SLIST_EMPTY(&tmp_entries)) { 853 KASSERT(i < n); 854 shmmap_se = SLIST_FIRST(&tmp_entries); 855 SLIST_REMOVE(&tmp_entries, shmmap_se, shmmap_entry, next); 856 uvm_deallocate(&vm->vm_map, shmmap_se->va, size[i]); 857 if (uobj[i] != NULL) 858 uao_detach(uobj[i]); 859 pool_put(&shmmap_entry_pool, shmmap_se); 860 i++; 861 } 862 863 kmem_free(uobj, n * sizeof(void *)); 864 kmem_free(size, n * sizeof(size_t)); 865 kmem_free(shmmap_s, sizeof(struct shmmap_state)); 866 } 867 868 static int 869 shmrealloc(int newshmni) 870 { 871 vaddr_t v; 872 struct shmid_ds *oldshmsegs, *newshmsegs; 873 kcondvar_t *newshm_cv, *oldshm_cv; 874 size_t sz; 875 int i, lsegid, oldshmni; 876 877 if (newshmni < 1) 878 return EINVAL; 879 880 /* Allocate new memory area */ 881 sz = ALIGN(newshmni * sizeof(struct shmid_ds)) + 882 ALIGN(newshmni * sizeof(kcondvar_t)); 883 v = uvm_km_alloc(kernel_map, round_page(sz), 0, 884 UVM_KMF_WIRED|UVM_KMF_ZERO); 885 if (v == 0) 886 return ENOMEM; 887 888 mutex_enter(&shm_lock); 889 while (shm_realloc_state || shm_realloc_disable) 890 cv_wait(&shm_realloc_cv, &shm_lock); 891 892 /* 893 * Get the number of last segment. Fail we are trying to 894 * reallocate less memory than we use. 895 */ 896 lsegid = 0; 897 for (i = 0; i < shminfo.shmmni; i++) 898 if ((shmsegs[i].shm_perm.mode & SHMSEG_FREE) == 0) 899 lsegid = i; 900 if (lsegid >= newshmni) { 901 mutex_exit(&shm_lock); 902 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 903 return EBUSY; 904 } 905 shm_realloc_state = true; 906 907 newshmsegs = (void *)v; 908 newshm_cv = (void *)(ALIGN(newshmsegs) + 909 newshmni * sizeof(struct shmid_ds)); 910 911 /* Copy all memory to the new area */ 912 for (i = 0; i < shm_nused; i++) 913 (void)memcpy(&newshmsegs[i], &shmsegs[i], 914 sizeof(newshmsegs[0])); 915 916 /* Mark as free all new segments, if there is any */ 917 for (; i < newshmni; i++) { 918 cv_init(&newshm_cv[i], "shmwait"); 919 newshmsegs[i].shm_perm.mode = SHMSEG_FREE; 920 newshmsegs[i].shm_perm._seq = 0; 921 } 922 923 oldshmsegs = shmsegs; 924 oldshmni = shminfo.shmmni; 925 shminfo.shmmni = newshmni; 926 shmsegs = newshmsegs; 927 shm_cv = newshm_cv; 928 929 /* Reallocation completed - notify all waiters, if any */ 930 shm_realloc_state = false; 931 cv_broadcast(&shm_realloc_cv); 932 mutex_exit(&shm_lock); 933 934 /* Release now unused resources. */ 935 oldshm_cv = (void *)(ALIGN(oldshmsegs) + 936 oldshmni * sizeof(struct shmid_ds)); 937 for (i = 0; i < oldshmni; i++) 938 cv_destroy(&oldshm_cv[i]); 939 940 sz = ALIGN(oldshmni * sizeof(struct shmid_ds)) + 941 ALIGN(oldshmni * sizeof(kcondvar_t)); 942 uvm_km_free(kernel_map, (vaddr_t)oldshmsegs, sz, UVM_KMF_WIRED); 943 944 return 0; 945 } 946 947 void 948 shminit(void) 949 { 950 vaddr_t v; 951 size_t sz; 952 int i; 953 954 mutex_init(&shm_lock, MUTEX_DEFAULT, IPL_NONE); 955 pool_init(&shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0, 956 "shmmp", &pool_allocator_nointr, IPL_NONE); 957 cv_init(&shm_realloc_cv, "shmrealc"); 958 959 /* Allocate the wired memory for our structures */ 960 sz = ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)) + 961 ALIGN(shminfo.shmmni * sizeof(kcondvar_t)); 962 v = uvm_km_alloc(kernel_map, round_page(sz), 0, 963 UVM_KMF_WIRED|UVM_KMF_ZERO); 964 if (v == 0) 965 panic("sysv_shm: cannot allocate memory"); 966 shmsegs = (void *)v; 967 shm_cv = (void *)(ALIGN(shmsegs) + 968 shminfo.shmmni * sizeof(struct shmid_ds)); 969 970 shminfo.shmmax *= PAGE_SIZE; 971 972 for (i = 0; i < shminfo.shmmni; i++) { 973 cv_init(&shm_cv[i], "shmwait"); 974 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 975 shmsegs[i].shm_perm._seq = 0; 976 } 977 shm_last_free = 0; 978 shm_nused = 0; 979 shm_committed = 0; 980 shm_realloc_disable = 0; 981 shm_realloc_state = false; 982 } 983 984 static int 985 sysctl_ipc_shmmni(SYSCTLFN_ARGS) 986 { 987 int newsize, error; 988 struct sysctlnode node; 989 node = *rnode; 990 node.sysctl_data = &newsize; 991 992 newsize = shminfo.shmmni; 993 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 994 if (error || newp == NULL) 995 return error; 996 997 sysctl_unlock(); 998 error = shmrealloc(newsize); 999 sysctl_relock(); 1000 return error; 1001 } 1002 1003 static int 1004 sysctl_ipc_shmmaxpgs(SYSCTLFN_ARGS) 1005 { 1006 int newsize, error; 1007 struct sysctlnode node; 1008 node = *rnode; 1009 node.sysctl_data = &newsize; 1010 1011 newsize = shminfo.shmall; 1012 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1013 if (error || newp == NULL) 1014 return error; 1015 1016 if (newsize < 1) 1017 return EINVAL; 1018 1019 shminfo.shmall = newsize; 1020 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 1021 1022 return 0; 1023 } 1024 1025 SYSCTL_SETUP(sysctl_ipc_shm_setup, "sysctl kern.ipc subtree setup") 1026 { 1027 1028 sysctl_createv(clog, 0, NULL, NULL, 1029 CTLFLAG_PERMANENT, 1030 CTLTYPE_NODE, "kern", NULL, 1031 NULL, 0, NULL, 0, 1032 CTL_KERN, CTL_EOL); 1033 sysctl_createv(clog, 0, NULL, NULL, 1034 CTLFLAG_PERMANENT, 1035 CTLTYPE_NODE, "ipc", 1036 SYSCTL_DESCR("SysV IPC options"), 1037 NULL, 0, NULL, 0, 1038 CTL_KERN, KERN_SYSVIPC, CTL_EOL); 1039 sysctl_createv(clog, 0, NULL, NULL, 1040 CTLFLAG_PERMANENT | CTLFLAG_READONLY, 1041 CTLTYPE_INT, "shmmax", 1042 SYSCTL_DESCR("Max shared memory segment size in bytes"), 1043 NULL, 0, &shminfo.shmmax, 0, 1044 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAX, CTL_EOL); 1045 sysctl_createv(clog, 0, NULL, NULL, 1046 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1047 CTLTYPE_INT, "shmmni", 1048 SYSCTL_DESCR("Max number of shared memory identifiers"), 1049 sysctl_ipc_shmmni, 0, &shminfo.shmmni, 0, 1050 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMNI, CTL_EOL); 1051 sysctl_createv(clog, 0, NULL, NULL, 1052 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1053 CTLTYPE_INT, "shmseg", 1054 SYSCTL_DESCR("Max shared memory segments per process"), 1055 NULL, 0, &shminfo.shmseg, 0, 1056 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMSEG, CTL_EOL); 1057 sysctl_createv(clog, 0, NULL, NULL, 1058 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1059 CTLTYPE_INT, "shmmaxpgs", 1060 SYSCTL_DESCR("Max amount of shared memory in pages"), 1061 sysctl_ipc_shmmaxpgs, 0, &shminfo.shmall, 0, 1062 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAXPGS, CTL_EOL); 1063 sysctl_createv(clog, 0, NULL, NULL, 1064 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1065 CTLTYPE_INT, "shm_use_phys", 1066 SYSCTL_DESCR("Enable/disable locking of shared memory in " 1067 "physical memory"), NULL, 0, &shm_use_phys, 0, 1068 CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMUSEPHYS, CTL_EOL); 1069 } 1070