| /openbsd-src/sys/uvm/ |
| H A D | uvm_object.h | 81 #define UVM_OBJ_IS_KERN_OBJECT(uobj) \ argument 82 ((uobj)->uo_refs == UVM_OBJ_KERN) 95 #define UVM_OBJ_IS_VNODE(uobj) \ argument 96 ((uobj)->pgops == &uvm_vnodeops) 98 #define UVM_OBJ_IS_DEVICE(uobj) \ argument 99 ((uobj)->pgops == &uvm_deviceops) 101 #define UVM_OBJ_IS_VTEXT(uobj) \ argument 102 ((uobj)->pgops == &uvm_vnodeops && \ 103 ((struct vnode *)uobj)->v_flag & VTEXT) 105 #define UVM_OBJ_IS_AOBJ(uobj) \ argument [all …]
|
| H A D | uvm_aobj.c | 244 uao_find_swslot(struct uvm_object *uobj, int pageidx) in uao_find_swslot() argument 246 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; in uao_find_swslot() 248 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); in uao_find_swslot() 284 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot) in uao_set_swslot() argument 286 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; in uao_set_swslot() 289 KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0); in uao_set_swslot() 290 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); in uao_set_swslot() 300 printf("uao_set_swslot: uobj = %p\n", uobj); in uao_set_swslot() 358 struct uvm_object *uobj = &aobj->u_obj; uao_free() local 394 uao_shrink_flush(struct uvm_object * uobj,int startpg,int endpg) uao_shrink_flush() argument 404 uao_shrink_hash(struct uvm_object * uobj,int pages) uao_shrink_hash() argument 453 uao_shrink_convert(struct uvm_object * uobj,int pages) uao_shrink_convert() argument 489 uao_shrink_array(struct uvm_object * uobj,int pages) uao_shrink_array() argument 513 uao_shrink(struct uvm_object * uobj,int pages) uao_shrink() argument 542 uao_grow_array(struct uvm_object * uobj,int pages) uao_grow_array() argument 566 uao_grow_hash(struct uvm_object * uobj,int pages) uao_grow_hash() argument 610 uao_grow_convert(struct uvm_object * uobj,int pages) uao_grow_convert() argument 643 uao_grow(struct uvm_object * uobj,int pages) uao_grow() argument 788 uao_reference(struct uvm_object * uobj) uao_reference() argument 802 uao_detach(struct uvm_object * uobj) uao_detach() argument 867 uao_flush(struct uvm_object * uobj,voff_t start,voff_t stop,int flags) uao_flush() argument 992 uao_get(struct uvm_object * uobj,voff_t offset,struct vm_page ** pps,int * npagesp,int centeridx,vm_prot_t access_type,int advice,int flags) uao_get() argument 1243 uao_dropswap(struct uvm_object * uobj,int pageidx) uao_dropswap() argument 1399 struct uvm_object *uobj = &aobj->u_obj; uao_pagein_page() local 1454 uao_dropswap_range(struct uvm_object * uobj,voff_t start,voff_t end) uao_dropswap_range() argument [all...] |
| H A D | uvm_object.c | 62 uvm_obj_init(struct uvm_object *uobj, const struct uvm_pagerops *pgops, int refs) in uvm_obj_init() argument 71 rw_obj_alloc(&uobj->vmobjlock, "uobjlk"); in uvm_obj_init() 74 uobj->vmobjlock = NULL; in uvm_obj_init() 76 uobj->pgops = pgops; in uvm_obj_init() 77 RBT_INIT(uvm_objtree, &uobj->memt); in uvm_obj_init() 78 uobj->uo_npages = 0; in uvm_obj_init() 79 uobj->uo_refs = refs; in uvm_obj_init() 127 uvm_obj_wire(struct uvm_object *uobj, voff_t start, voff_t end, in uvm_obj_wire() argument 136 rw_enter(uobj->vmobjlock, RW_WRITE | RW_DUPOK); in uvm_obj_wire() 143 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0, in uvm_obj_wire() [all …]
|
| H A D | uvm_vnode.c | 275 uvn_reference(struct uvm_object *uobj) in uvn_reference() argument 278 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; in uvn_reference() 281 rw_enter(uobj->vmobjlock, RW_WRITE); in uvn_reference() 285 uobj->uo_refs, uvn->u_flags); in uvn_reference() 289 uobj->uo_refs++; in uvn_reference() 290 rw_exit(uobj->vmobjlock); in uvn_reference() 303 uvn_detach(struct uvm_object *uobj) in uvn_detach() argument 309 rw_enter(uobj->vmobjlock, RW_WRITE); in uvn_detach() 310 uobj->uo_refs--; /* drop ref! */ in uvn_detach() 311 if (uobj in uvn_detach() 423 struct uvm_object *uobj = &uvn->u_obj; uvm_vnp_terminate() local 599 uvn_flush(struct uvm_object * uobj,voff_t start,voff_t stop,int flags) uvn_flush() argument 871 uvn_cluster(struct uvm_object * uobj,voff_t offset,voff_t * loffset,voff_t * hoffset) uvn_cluster() argument 900 uvn_put(struct uvm_object * uobj,struct vm_page ** pps,int npages,int flags) uvn_put() argument 944 uvn_get(struct uvm_object * uobj,voff_t offset,struct vm_page ** pps,int * npagesp,int centeridx,vm_prot_t access_type,int advice,int flags) uvn_get() argument 1166 struct uvm_object *uobj = &uvn->u_obj; uvn_io() local 1368 struct uvm_object *uobj = &uvn->u_obj; uvm_vnp_uncache() local 1447 struct uvm_object *uobj = &uvn->u_obj; uvm_vnp_setsize() local [all...] |
| H A D | uvm_device.c | 231 udv_reference(struct uvm_object *uobj) in udv_reference() argument 233 rw_enter(uobj->vmobjlock, RW_WRITE); in udv_reference() 234 uobj->uo_refs++; in udv_reference() 235 rw_exit(uobj->vmobjlock); in udv_reference() 244 udv_detach(struct uvm_object *uobj) in udv_detach() argument 246 struct uvm_device *udv = (struct uvm_device *)uobj; in udv_detach() 252 rw_enter(uobj->vmobjlock, RW_WRITE); in udv_detach() 253 if (uobj->uo_refs > 1) { in udv_detach() 254 uobj->uo_refs--; in udv_detach() 255 rw_exit(uobj in udv_detach() 294 udv_flush(struct uvm_object * uobj,voff_t start,voff_t stop,int flags) udv_flush() argument 318 struct uvm_object *uobj = entry->object.uvm_obj; udv_fault() local [all...] |
| H A D | uvm_pager.c | 329 uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages, 344 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi); 379 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */ in uvm_mk_pcluster() 448 * => if page is not swap-backed, then "uobj" points to the object in uvm_mk_pcluster() 450 * => if page is swap-backed, then "uobj" should be NULL. in uvm_mk_pcluster() 457 * PGO_ALLPAGES: all pages in uobj are valid targets in uvm_mk_pcluster() 462 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range 463 * if (!uobj) start is the (daddr_t) of the starting swapblk 475 uvm_pager_put(struct uvm_object *uobj, struc 349 uvm_mk_pcluster(struct uvm_object * uobj,struct vm_page ** pps,int * npages,struct vm_page * center,int flags,voff_t mlo,voff_t mhi) uvm_mk_pcluster() argument 495 uvm_pager_put(struct uvm_object * uobj,struct vm_page * pg,struct vm_page *** ppsp_ptr,int * npages,int flags,voff_t start,voff_t stop) uvm_pager_put() argument 659 uvm_pager_dropcluster(struct uvm_object * uobj,struct vm_page * pg,struct vm_page ** ppsp,int * npages,int flags) uvm_pager_dropcluster() argument [all...] |
| H A D | uvm_fault.c | 62 * uobj | d/c | | d/c | | V | +----+ | 68 * no amap or uobj is present. this is an error. 72 * I/O takes place in upper level anon and uobj is not touched. 76 * case [2]: lower layer fault [uobj] 77 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area] 79 * 2B: [write to copy_on_write] or [read on NULL uobj] 80 * data is "promoted" from uobj to a new anon. 81 * if uobj is null, then we zero fill. 108 * - ensure source page is resident (if uobj) 110 * fill if uobj 610 struct uvm_object *uobj = ufi.entry->object.uvm_obj; uvm_fault() local 666 struct uvm_object *uobj; uvm_fault_check() local 1119 struct uvm_object *uobj = ufi->entry->object.uvm_obj; uvm_fault_lower_lookup() local 1209 struct uvm_object *uobj = ufi->entry->object.uvm_obj; uvm_fault_lower() local 1733 uvmfault_unlockall(struct uvm_faultinfo * ufi,struct vm_amap * amap,struct uvm_object * uobj) uvmfault_unlockall() argument [all...] |
| H A D | uvm_pdaemon.c | 395 struct uvm_object *uobj = pg->uobject; in uvmpd_dropswap() 398 if (uobj != NULL) { in uvmpd_dropswap() 399 slock = uobj->vmobjlock; in uvmpd_dropswap() 471 struct uvm_object *uobj; in uvmpd_scan_inactive() 509 uobj = NULL; in uvmpd_scan_inactive() 546 uobj = p->uobject; in uvmpd_scan_inactive() 579 if (uobj != NULL) { in uvmpd_scan_inactive() 694 uao_set_swslot(uobj, in uvmpd_scan_inactive() 759 result = uvm_pager_put(swap_backed ? NULL : uobj, p, in uvmpd_scan_inactive() 1055 struct uvm_object * uobj 368 struct uvm_object *uobj = pg->uobject; uvmpd_trylockowner() local 427 struct uvm_object *uobj; uvmpd_scan_inactive() local 997 struct uvm_object * uobj = p->uobject; uvmpd_drop() local [all...] |
| H A D | uvm_km.c | 247 uvm_km_pgremove(struct uvm_object *uobj, vaddr_t startva, vaddr_t endva) in uvm_km_pgremove() argument 256 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); in uvm_km_pgremove() 257 KASSERT(rw_write_held(uobj->vmobjlock)); in uvm_km_pgremove() 261 pp = uvm_pagelookup(uobj, curoff); in uvm_km_pgremove() 263 uvm_pagewait(pp, uobj->vmobjlock, "km_pgrm"); in uvm_km_pgremove() 264 rw_enter(uobj->vmobjlock, RW_WRITE); in uvm_km_pgremove() 270 slot = uao_dropswap(uobj, curoff >> PAGE_SHIFT); in uvm_km_pgremove() 738 struct uvm_object *uobj = NULL; in km_alloc() 744 uobj = *kp->kp_object; in km_alloc() 748 if (uvm_map(map, &va, sz, uobj, k in km_alloc() 817 struct uvm_object *uobj = NULL; km_alloc() local [all...] |
| H A D | uvm_mmap.c | 1042 struct uvm_object *uobj; in uvm_mmapfile() 1067 uobj = uvn_attach(vp, (flags & MAP_SHARED) ? in uvm_mmapfile() 1100 uobj = udv_attach(vp->v_rdev, in uvm_mmapfile() 1108 if (uobj == NULL && (prot & PROT_EXEC) == 0) { in uvm_mmapfile() 1110 uobj = udv_attach(vp->v_rdev, in uvm_mmapfile() 1117 if (uobj == NULL) in uvm_mmapfile() 1134 error = uvm_map(map, addr, size, uobj, foff, align, uvmflag); in uvm_mmapfile() 1139 /* errors: first detach from the uobj, if any. */ in uvm_mmapfile() 1140 if (uobj) in uvm_mmapfile() 1141 uobj in uvm_mmapfile() 1038 struct uvm_object *uobj; uvm_mmapfile() local [all...] |
| H A D | uvm_map.c | 920 * => <uobj,uoffset> value meanings (4 cases): 923 * [3] <uobj,uoffset> == normal mapping 924 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA 935 struct uvm_object *uobj, voff_t uoffset, in uvm_map() argument 990 KASSERT(uobj == NULL && (flags & UVM_FLAG_FIXED) && in uvm_map() 1117 if (uobj == NULL) in uvm_map() 1120 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj)); in uvm_map() 1136 entry->object.uvm_obj = uobj; in uvm_map() 1148 if (uobj) in uvm_map() 1171 if ((map->flags & VM_MAP_ISVMSPACE) && uobj in uvm_map() 2935 uvm_object_printit(struct uvm_object * uobj,boolean_t full,int (* pr)(const char *,...)) uvm_object_printit() argument 2977 struct uvm_object *uobj; uvm_page_printit() local 4462 struct uvm_object *uobj; uvm_map_clean() local [all...] |
| /openbsd-src/sys/dev/pci/drm/ttm/ |
| H A D | ttm_bo_vm.c | 484 struct uvm_object *uobj = ufi->entry->object.uvm_obj; in ttm_bo_vm_fault_reserved() local 485 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; in ttm_bo_vm_fault_reserved() 595 struct uvm_object *uobj = ufi->entry->object.uvm_obj; in ttm_bo_vm_fault() local 596 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; in ttm_bo_vm_fault() 618 uvmfault_unlockall(ufi, NULL, uobj); in ttm_bo_vm_fault() 729 ttm_bo_vm_reference(struct uvm_object *uobj) in ttm_bo_vm_access() 732 (struct ttm_buffer_object *)uobj; 738 ttm_bo_vm_detach(struct uvm_object *uobj) 740 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; 801 if (bo->base.uobj 743 ttm_bo_vm_reference(struct uvm_object * uobj) ttm_bo_vm_reference() argument 752 ttm_bo_vm_detach(struct uvm_object * uobj) ttm_bo_vm_detach() argument [all...] |
| /openbsd-src/sys/dev/pci/drm/ |
| H A D | drm_gem.c | 72 drm_ref(struct uvm_object *uobj) in drm_ref() argument 75 container_of(uobj, struct drm_gem_object, uobj); in drm_ref() 81 drm_unref(struct uvm_object *uobj) in drm_unref() argument 84 container_of(uobj, struct drm_gem_object, uobj); in drm_unref() 95 struct uvm_object *uobj = entry->object.uvm_obj; in drm_fault() local 97 container_of(uobj, struct drm_gem_object, uobj); in drm_fault() 106 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); in drm_fault() 148 drm_flush(struct uvm_object * uobj,voff_t start,voff_t stop,int flags) drm_flush() argument [all...] |
| H A D | drm_gem_dma_helper.c | 191 struct uvm_object *uobj = &obj->base.uobj; in drm_gem_dma_fault() local 219 uobj); in drm_gem_dma_fault() 226 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); in drm_gem_dma_fault()
|
| /openbsd-src/sys/kern/ |
| H A D | exec_subr.c | 165 struct uvm_object *uobj; in vmcmd_map_pagedvn() 186 uobj = uvn_attach(cmd->ev_vp, PROT_READ | PROT_EXEC); in vmcmd_map_pagedvn() 187 if (uobj == NULL) in vmcmd_map_pagedvn() 194 uobj, cmd->ev_offset, 0, in vmcmd_map_pagedvn() 206 uobj->pgops->pgo_detach(uobj); in vmcmd_map_pagedvn() 169 struct uvm_object *uobj; vmcmd_map_pagedvn() local
|
| H A D | vfs_biomem.c | 295 struct uvm_object *uobj = bp->b_pobj; in buf_free_pages() local 300 KASSERT(uobj != NULL); in buf_free_pages() 308 pg = uvm_pagelookup(uobj, off + ptoa(i)); in buf_free_pages() 319 uvm_obj_free(uobj); in buf_free_pages()
|
| H A D | exec_elf.c | 434 struct uvm_object *uobj; in elf_load_file() 440 uobj = &vp->v_uvm.u_obj; in elf_load_file() 444 uobj = NULL; in elf_load_file() 432 struct uvm_object *uobj; elf_load_file() local
|
| /openbsd-src/sys/tmpfs/ |
| H A D | tmpfs_subr.c | 108 struct uvm_object *uobj; in tmpfs_alloc_node() local 188 uobj = uao_create(0, UAO_FLAG_CANFAIL); in tmpfs_alloc_node() 189 if (uobj == NULL) { in tmpfs_alloc_node() 193 nnode->tn_spec.tn_reg.tn_aobj = uobj; in tmpfs_alloc_node() 304 struct uvm_object *uobj = node->tn_spec.tn_reg.tn_aobj; in tmpfs_vnode_get() 305 slock = uobj->vmobjlock; in tmpfs_vnode_get() 873 struct uvm_object *uobj = node->tn_spec.tn_reg.tn_aobj; in tmpfs_reg_resize() 892 rw_enter(uobj->vmobjlock, RW_WRITE); in tmpfs_reg_resize() 893 error = uao_grow(uobj, newpages); in tmpfs_reg_resize() 894 rw_exit(uobj in tmpfs_reg_resize() 868 struct uvm_object *uobj = node->tn_spec.tn_reg.tn_aobj; tmpfs_reg_resize() local [all...] |
| /openbsd-src/sys/dev/pci/drm/i915/gem/ |
| H A D | i915_gem_ttm.c | 1231 struct uvm_object *uobj = ufi->entry->object.uvm_obj; in vm_fault_ttm() local 1232 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; in vm_fault_ttm() 1242 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); in vm_fault_ttm() 1259 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); in vm_fault_ttm() 1265 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); in vm_fault_ttm() 1287 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); in vm_fault_ttm() 1375 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); in vm_fault_ttm() 1381 ttm_vm_reference(struct uvm_object *uobj) in ttm_vm_reference() argument 1384 i915_ttm_to_gem((struct ttm_buffer_object *)uobj); in ttm_vm_reference() 1390 ttm_vm_detach(struct uvm_object *uobj) in ttm_vm_detach() argument [all...] |
| H A D | i915_gem_mman.c | 631 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); in vm_fault_cpu() 678 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); in vm_fault_gtt() 900 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); in i915_gem_object_release_mmap_gtt() 923 uvmfault_unlockall(ufi, NULL, &gem_obj->uobj); in i915_gem_object_runtime_pm_release_mmap_offset() 1562 uvm_obj_init(&obj->base.uobj, obj->ops->mmap_ops, 1); 1564 return &obj->base.uobj;
|
| /openbsd-src/sys/dev/pci/drm/amd/amdgpu/ |
| H A D | amdgpu_gem.c | 94 struct uvm_object *uobj = ufi->entry->object.uvm_obj; in amdgpu_gem_fault() local 95 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; in amdgpu_gem_fault() 141 uvmfault_unlockall(ufi, NULL, uobj); in amdgpu_gem_fault() 146 amdgpu_gem_vm_reference(struct uvm_object *uobj) in amdgpu_gem_fault() 148 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; in amdgpu_gem_fault() 154 amdgpu_gem_vm_detach(struct uvm_object *uobj) in amdgpu_gem_fault() 156 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; 157 amdgpu_gem_vm_reference(struct uvm_object * uobj) amdgpu_gem_vm_reference() argument 165 amdgpu_gem_vm_detach(struct uvm_object * uobj) amdgpu_gem_vm_detach() argument
|
| /openbsd-src/sys/dev/pci/drm/radeon/ |
| H A D | radeon_gem.c | 90 struct uvm_object *uobj = ufi->entry->object.uvm_obj; in radeon_gem_fault() local 91 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; in radeon_gem_fault() 128 uvmfault_unlockall(ufi, NULL, uobj); in radeon_gem_fault() 133 radeon_gem_vm_reference(struct uvm_object *uobj) in radeon_gem_vm_reference() argument 135 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; in radeon_gem_vm_reference() 141 radeon_gem_vm_detach(struct uvm_object *uobj) in radeon_gem_vm_detach() argument 143 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; in radeon_gem_vm_detach()
|
| /openbsd-src/usr.sbin/procmap/ |
| H A D | procmap.c | 79 #define UVM_OBJ_IS_VNODE(uobj) ((uobj)->pgops == uvm_vnodeops) argument 80 #define UVM_OBJ_IS_AOBJ(uobj) ((uobj)->pgops == aobj_pager) argument 81 #define UVM_OBJ_IS_DEVICE(uobj) ((uobj)->pgops == uvm_deviceops) argument
|
| /openbsd-src/sys/dev/pci/drm/include/drm/ |
| H A D | drm_gem.h | 277 struct uvm_object uobj; member
|