Lines Matching defs:shmfd

108 	struct shmfd	*sm_shmfd;
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
123 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
127 static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
129 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
133 static int shm_deallocate(struct shmfd *shmfd, off_t *offset,
186 #define shm_rangelock_unlock(shmfd, cookie) \
187 rangelock_unlock(&(shmfd)->shm_rl, (cookie))
188 #define shm_rangelock_rlock(shmfd, start, end) \
189 rangelock_rlock(&(shmfd)->shm_rl, (start), (end))
190 #define shm_rangelock_tryrlock(shmfd, start, end) \
191 rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end))
192 #define shm_rangelock_wlock(shmfd, start, end) \
193 rangelock_wlock(&(shmfd)->shm_rl, (start), (end))
353 shm_largepage(struct shmfd *shmfd)
355 return (shmfd->shm_object->type == OBJT_PHYS);
361 struct shmfd *shm;
380 struct shmfd *shm;
392 struct shmfd *shm;
416 struct shmfd *shmfd;
420 shmfd = fp->f_data;
433 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
437 offset += shmfd->shm_size;
445 if (offset < 0 || offset > shmfd->shm_size)
458 struct shmfd *shmfd;
462 shmfd = fp->f_data;
464 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
469 rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
471 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
472 shm_rangelock_unlock(shmfd, rl_cookie);
481 struct shmfd *shmfd;
486 shmfd = fp->f_data;
488 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
492 if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
501 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
506 size = shmfd->shm_size;
511 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
513 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
514 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
518 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
519 size > shmfd->shm_size) {
520 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
523 error = uiomove_object(shmfd->shm_object,
524 shmfd->shm_size, uio);
526 shm_rangelock_unlock(shmfd, rl_cookie);
535 struct shmfd *shmfd;
540 shmfd = fp->f_data;
542 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
546 return (shm_dotruncate(shmfd, length));
553 struct shmfd *shmfd;
557 shmfd = fp->f_data;
567 if (!shm_largepage(shmfd))
570 if (shmfd->shm_lp_psind != 0 &&
571 conf->psind != shmfd->shm_lp_psind)
581 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
582 shmfd->shm_lp_psind = conf->psind;
583 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
584 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
585 shm_rangelock_unlock(shmfd, rl_cookie);
588 if (!shm_largepage(shmfd))
591 rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
592 conf->psind = shmfd->shm_lp_psind;
593 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
594 shm_rangelock_unlock(shmfd, rl_cookie);
604 struct shmfd *shmfd;
609 shmfd = fp->f_data;
612 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
623 sb->st_size = shmfd->shm_size;
625 sb->st_atim = shmfd->shm_atime;
626 sb->st_ctim = shmfd->shm_ctime;
627 sb->st_mtim = shmfd->shm_mtime;
628 sb->st_birthtim = shmfd->shm_birthtime;
629 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */
630 sb->st_uid = shmfd->shm_uid;
631 sb->st_gid = shmfd->shm_gid;
634 sb->st_ino = shmfd->shm_ino;
635 sb->st_nlink = shmfd->shm_object->ref_count;
636 if (shm_largepage(shmfd)) {
637 sb->st_blocks = shmfd->shm_object->size /
638 (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
640 sb->st_blocks = shmfd->shm_pages;
649 struct shmfd *shmfd;
651 shmfd = fp->f_data;
653 shm_drop(shmfd);
748 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
756 object = shmfd->shm_object;
759 if (length == shmfd->shm_size)
764 if (length < shmfd->shm_size) {
765 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
772 if (shmfd->shm_kmappings > 0)
795 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
804 shmfd->shm_size = length;
806 vfs_timestamp(&shmfd->shm_ctime);
807 shmfd->shm_mtime = shmfd->shm_ctime;
814 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
823 object = shmfd->shm_object;
829 if (length == shmfd->shm_size)
831 psind = shmfd->shm_lp_psind;
837 if (length < shmfd->shm_size) {
838 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
840 if (shmfd->shm_kmappings > 0)
846 shmfd->shm_size = length;
851 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
855 if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
860 * Extend shmfd and object, keeping all already fully
871 if (shmfd->shm_lp_alloc_policy ==
873 (shmfd->shm_lp_alloc_policy ==
900 shmfd->shm_size += pagesizes[psind];
908 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
912 VM_OBJECT_WLOCK(shmfd->shm_object);
913 error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
914 length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
916 VM_OBJECT_WUNLOCK(shmfd->shm_object);
921 shm_dotruncate(struct shmfd *shmfd, off_t length)
926 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
927 error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
928 shm_rangelock_unlock(shmfd, rl_cookie);
933 * shmfd object management including creation and reference counting
936 struct shmfd *
939 struct shmfd *shmfd;
957 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
958 shmfd->shm_uid = ucred->cr_uid;
959 shmfd->shm_gid = ucred->cr_gid;
960 shmfd->shm_mode = mode;
962 obj->un_pager.phys.phys_priv = shmfd;
963 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
965 obj->un_pager.swp.swp_priv = shmfd;
971 shmfd->shm_object = obj;
972 vfs_timestamp(&shmfd->shm_birthtime);
973 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
974 shmfd->shm_birthtime;
975 shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
976 refcount_init(&shmfd->shm_refs, 1);
977 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
978 rangelock_init(&shmfd->shm_rl);
980 mac_posixshm_init(shmfd);
981 mac_posixshm_create(ucred, shmfd);
984 return (shmfd);
987 struct shmfd *
988 shm_hold(struct shmfd *shmfd)
991 refcount_acquire(&shmfd->shm_refs);
992 return (shmfd);
996 shm_drop(struct shmfd *shmfd)
1000 if (refcount_release(&shmfd->shm_refs)) {
1002 mac_posixshm_destroy(shmfd);
1004 rangelock_destroy(&shmfd->shm_rl);
1005 mtx_destroy(&shmfd->shm_mtx);
1006 obj = shmfd->shm_object;
1008 if (shm_largepage(shmfd))
1014 free(shmfd, M_SHMFD);
1023 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1034 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1098 * paths to shmfd objects. We use the FNV hash on the path to store
1101 static struct shmfd *
1117 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1124 map->sm_shmfd = shm_hold(shmfd);
1125 shmfd->shm_path = path;
1171 struct shmfd *shmfd;
1202 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1204 * taken below to ensure that the seals are properly set if the shmfd
1255 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1256 if (shmfd == NULL) {
1260 shmfd->shm_seals = initial_seals;
1261 shmfd->shm_flags = shmflags;
1265 shmfd = shm_lookup(path, fnv);
1266 if (shmfd == NULL) {
1274 shmfd = shm_alloc(td->td_ucred, cmode,
1276 if (shmfd == NULL) {
1279 shmfd->shm_seals =
1281 shmfd->shm_flags = shmflags;
1282 shm_insert(path, fnv, shmfd);
1296 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1306 initial_seals &= ~shmfd->shm_seals;
1317 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1323 else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1328 shmfd, FFLAGS(flags & O_ACCMODE));
1331 error = shm_access(shmfd, td->td_ucred,
1343 VM_OBJECT_WLOCK(shmfd->shm_object);
1346 td->td_ucred, fp->f_cred, shmfd);
1349 error = shm_dotruncate_locked(shmfd, 0,
1351 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1359 shmfd->shm_seals |= initial_seals;
1360 shm_hold(shmfd);
1362 shm_rangelock_unlock(shmfd, rl_cookie);
1370 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1424 struct shmfd *fd_from;
1425 struct shmfd *fd_to;
1568 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1578 if (shmfd->shm_lp_psind == 0)
1594 docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1599 mask = pagesizes[shmfd->shm_lp_psind] - 1;
1612 align = pagesizes[shmfd->shm_lp_psind];
1616 * i.e., [1, VM_NRESERVLEVEL]. shmfd->shm_lp_psind < 1 is
1621 shmfd->shm_lp_psind > VM_NRESERVLEVEL
1623 shmfd->shm_lp_psind > 1
1627 align = pagesizes[shmfd->shm_lp_psind];
1632 if (align < pagesizes[shmfd->shm_lp_psind])
1645 pagesizes[shmfd->shm_lp_psind]);
1672 rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1686 struct shmfd *shmfd;
1692 shmfd = fp->f_data;
1695 rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1711 (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1740 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1746 vfs_timestamp(&shmfd->shm_atime);
1748 vm_object_reference(shmfd->shm_object);
1750 if (shm_largepage(shmfd)) {
1752 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1756 vm_pager_update_writecount(shmfd->shm_object, 0,
1760 shmfd->shm_object, foff, writecnt, td);
1764 vm_pager_release_writecount(shmfd->shm_object, 0,
1766 vm_object_deallocate(shmfd->shm_object);
1769 shm_rangelock_unlock(shmfd, rl_cookie);
1777 struct shmfd *shmfd;
1781 shmfd = fp->f_data;
1788 error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1792 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1796 shmfd->shm_mode = mode & ACCESSPERMS;
1806 struct shmfd *shmfd;
1810 shmfd = fp->f_data;
1813 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1818 uid = shmfd->shm_uid;
1820 gid = shmfd->shm_gid;
1821 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1822 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1825 shmfd->shm_uid = uid;
1826 shmfd->shm_gid = gid;
1839 struct shmfd *shmfd;
1846 shmfd = fp->f_data;
1847 obj = shmfd->shm_object;
1853 if (offset >= shmfd->shm_size ||
1854 offset + size > round_page(shmfd->shm_size)) {
1859 shmfd->shm_kmappings++;
1884 shmfd->shm_kmappings--;
1897 struct shmfd *shmfd;
1909 shmfd = fp->f_data;
1924 if (obj != shmfd->shm_object)
1928 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1929 shmfd->shm_kmappings--;
1935 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1943 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1944 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1945 if (shmfd->shm_path != NULL) {
1946 path = shmfd->shm_path;
1978 struct shmfd *shmfd;
1984 shmfd = fp->f_data;
1985 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1988 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1992 nseals = seals & ~shmfd->shm_seals;
1994 if (shm_largepage(shmfd)) {
2005 VM_OBJECT_RLOCK(shmfd->shm_object);
2006 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
2007 VM_OBJECT_RUNLOCK(shmfd->shm_object);
2014 shmfd->shm_seals |= nseals;
2016 shm_rangelock_unlock(shmfd, rl_cookie);
2023 struct shmfd *shmfd;
2025 shmfd = fp->f_data;
2026 *seals = shmfd->shm_seals;
2031 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
2042 if (off + len > shmfd->shm_size)
2043 len = shmfd->shm_size - off;
2044 object = shmfd->shm_object;
2085 VM_OBJECT_WUNLOCK(shmfd->shm_object);
2096 struct shmfd *shmfd;
2106 shmfd = fp->f_data;
2110 rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2113 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2117 error = shm_deallocate(shmfd, &off, &len, flags);
2124 shm_rangelock_unlock(shmfd, rl_cookie);
2133 struct shmfd *shmfd;
2139 shmfd = fp->f_data;
2146 * attempts to resize the shmfd will have to take a write lock from 0 to
2148 * the shmfd is not necessarily a concern. If other mechanisms are
2149 * added to grow a shmfd, this may need to be re-evaluated.
2151 rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2152 if (size > shmfd->shm_size)
2153 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2154 shm_rangelock_unlock(shmfd, rl_cookie);
2231 struct shmfd *shmfd;
2235 shmfd = NULL;
2242 shmfd = obj->un_pager.swp.swp_priv;
2244 shmfd = obj->un_pager.phys.phys_priv;
2245 if (shmfd == NULL) {
2248 strlcpy(path, shmfd->shm_path == NULL ? "anon" :
2249 shmfd->shm_path, sz);