Lines Matching +full:enforce +full:- +full:video +full:- +full:mode
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
114 td->td_retval[0] = PAGE_SIZE;
124 * page-aligned, the actual mapping starts at trunc_page(addr)
128 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
149 .mr_hint = (uintptr_t)uap->addr,
150 .mr_len = uap->len,
151 .mr_prot = uap->prot,
152 .mr_flags = uap->flags,
153 .mr_fd = uap->fd,
154 .mr_pos = uap->pos,
162 if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 ||
163 (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0)
165 if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) &&
185 orig_addr = addr = mrp->mr_hint;
186 len = mrp->mr_len;
187 prot = mrp->mr_prot;
188 flags = mrp->mr_flags;
189 fd = mrp->mr_fd;
190 pos = mrp->mr_pos;
191 check_fp_fn = mrp->mr_check_fp_fn;
200 p = td->td_proc;
209 vms = p->p_vmspace;
219 * Enforce the constraints.
221 * Anonymous mapping shall specify -1 as filedescriptor and
224 * ld.so sometimes issues anonymous map requests with non-zero
228 if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) ||
229 ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
237 if ((fd != -1) ||
254 if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
264 pos -= pageoff;
290 addr -= pageoff;
295 if (!vm_map_range_valid(&vms->vm_map, addr, addr + size))
309 * XXX for non-fixed mappings where no hint is provided or
319 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
320 addr < round_page((vm_offset_t)vms->vm_daddr +
322 addr = round_page((vm_offset_t)vms->vm_daddr +
328 * binaries that request a page-aligned mapping of
334 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
342 error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
364 p->p_osrel >= P_OSREL_MAP_FSTRICT) {
374 if (fp->f_ops == &shm_ops && shm_largepage(fp->f_data))
377 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
382 td->td_retval[0] = addr + pageoff;
395 .mr_hint = (uintptr_t)uap->addr,
396 .mr_len = uap->len,
397 .mr_prot = uap->prot,
398 .mr_flags = uap->flags,
399 .mr_fd = uap->fd,
400 .mr_pos = uap->pos,
419 return (kern_ommap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
420 uap->flags, uap->fd, uap->pos));
449 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
486 return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
499 addr -= pageoff;
508 map = &td->td_proc->p_vmspace->vm_map;
539 return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
560 addr -= pageoff;
564 map = &td->td_proc->p_vmspace->vm_map;
579 for (; entry->start < end;
581 if (vm_map_check_protection(map, entry->start,
582 entry->end, VM_PROT_EXECUTE) == TRUE) {
595 /* downgrade the lock to prevent a LOR with the pmc-sx lock */
618 return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len,
619 uap->prot, 0));
636 addr -= pageoff;
640 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
651 vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
678 return (kern_minherit(td, (uintptr_t)uap->addr, uap->len,
679 uap->inherit));
694 addr -= pageoff;
700 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
722 return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
738 return (kern_procctl(td, P_PID, td->td_proc->p_pid,
746 map = &td->td_proc->p_vmspace->vm_map;
776 return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
795 * mode.
799 map = &td->td_proc->p_vmspace->vm_map;
803 pmap = vmspace_pmap(td->td_proc->p_vmspace);
807 timestamp = map->timestamp;
819 lastvecindex = -1;
820 while (entry->start < end) {
826 if (current->end < end &&
827 entry->start > current->end) {
835 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
836 current->object.vm_object == NULL)
843 if (addr < current->start)
844 addr = current->start;
845 cend = current->end;
873 * object lock is acquired, so re-validate if
877 while (object == NULL || m->object != object) {
880 object = atomic_load_ptr(&m->object);
897 if (current->object.vm_object != object) {
900 object = current->object.vm_object;
903 if ((object->flags & OBJ_SWAP) != 0 ||
904 object->type == OBJT_VNODE) {
905 pindex = OFF_TO_IDX(current->offset +
906 (addr - current->start));
915 VM_OBJECT_ASSERT_WLOCKED(m->object);
918 if (m->dirty == 0 && pmap_is_modified(m))
920 if (m->dirty != 0)
931 if ((m->a.flags & PGA_REFERENCED) != 0 ||
933 (m->a.flags & PGA_REFERENCED) != 0)
948 vecindex = atop(addr - first_addr);
977 if (timestamp != map->timestamp)
993 vecindex = atop(end - first_addr);
1008 if (timestamp != map->timestamp)
1025 return (kern_mlock(td->td_proc, td->td_ucred,
1026 __DECONST(uintptr_t, uap->addr), uap->len));
1048 npages = atop(end - start);
1051 map = &proc->p_vmspace->vm_map;
1053 nsize = ptoa(npages + pmap_wired_count(map->pmap));
1074 ptoa(pmap_wired_count(map->pmap)));
1100 map = &td->td_proc->p_vmspace->vm_map;
1105 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1112 if (!old_mlock && uap->how & MCL_CURRENT) {
1113 if (map->size > lim_cur(td, RLIMIT_MEMLOCK))
1118 PROC_LOCK(td->td_proc);
1119 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
1120 PROC_UNLOCK(td->td_proc);
1126 if (uap->how & MCL_FUTURE) {
1133 if (uap->how & MCL_CURRENT) {
1135 * P1003.1-2001 mandates that all currently mapped pages
1151 PROC_LOCK(td->td_proc);
1152 racct_set(td->td_proc, RACCT_MEMLOCK,
1153 ptoa(pmap_wired_count(map->pmap)));
1154 PROC_UNLOCK(td->td_proc);
1173 map = &td->td_proc->p_vmspace->vm_map;
1188 PROC_LOCK(td->td_proc);
1189 racct_set(td->td_proc, RACCT_MEMLOCK, 0);
1190 PROC_UNLOCK(td->td_proc);
1207 return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
1228 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1232 PROC_LOCK(td->td_proc);
1233 map = &td->td_proc->p_vmspace->vm_map;
1234 racct_set(td->td_proc, RACCT_MEMLOCK,
1235 ptoa(pmap_wired_count(map->pmap)));
1236 PROC_UNLOCK(td->td_proc);
1261 cred = td->td_ucred;
1269 obj = vp->v_object;
1270 if (vp->v_type == VREG) {
1278 if (obj->type == OBJT_VNODE && obj->handle != vp) {
1280 vp = (struct vnode *)obj->handle;
1322 if (obj->type == OBJT_VNODE) {
1330 KASSERT((obj->flags & OBJ_SWAP) != 0, ("wrong object type"));
1333 if ((obj->flags & OBJ_COLORED) == 0) {
1370 if (dsw->d_flags & D_MMAP_ANON) {
1390 error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
1403 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1407 td->td_ucred);
1481 RACCT_PROC_LOCK(td->td_proc);
1482 if (map->size + size > lim_cur(td, RLIMIT_VMEM)) {
1483 RACCT_PROC_UNLOCK(td->td_proc);
1486 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
1487 RACCT_PROC_UNLOCK(td->td_proc);
1490 if (!old_mlock && map->flags & MAP_WIREFUTURE) {
1491 if (ptoa(pmap_wired_count(map->pmap)) + size >
1493 racct_set_force(td->td_proc, RACCT_VMEM, map->size);
1494 RACCT_PROC_UNLOCK(td->td_proc);
1497 error = racct_set(td->td_proc, RACCT_MEMLOCK,
1498 ptoa(pmap_wired_count(map->pmap)) + size);
1500 racct_set_force(td->td_proc, RACCT_VMEM, map->size);
1501 RACCT_PROC_UNLOCK(td->td_proc);
1505 RACCT_PROC_UNLOCK(td->td_proc);
1522 curmap = map == &td->td_proc->p_vmspace->vm_map;
1592 round_page((vm_offset_t)td->td_proc->p_vmspace->
1613 if ((map->flags & MAP_WIREFUTURE) != 0) {
1615 if ((map->flags & MAP_WIREFUTURE) != 0)