Home
last modified time | relevance | path

Searched refs:uobj (Results 1 – 25 of 62) sorted by relevance

123

/netbsd-src/sys/uvm/
H A Duvm_object.c131 uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end, in uvm_obj_wirepages() argument
140 rw_enter(uobj->vmobjlock, RW_WRITER); in uvm_obj_wirepages()
147 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0, in uvm_obj_wirepages()
154 rw_enter(uobj->vmobjlock, RW_WRITER); in uvm_obj_wirepages()
167 rw_exit(uobj->vmobjlock); in uvm_obj_wirepages()
169 rw_enter(uobj->vmobjlock, RW_WRITER); in uvm_obj_wirepages()
179 uao_dropswap(uobj, i); in uvm_obj_wirepages()
198 rw_exit(uobj->vmobjlock); in uvm_obj_wirepages()
204 uvm_obj_unwirepages(uobj, start, offset); in uvm_obj_wirepages()
217 uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end) in uvm_obj_unwirepages() argument
[all …]
H A Duvm_aobj.c248 uao_find_swslot(struct uvm_object *uobj, int pageidx) in uao_find_swslot() argument
250 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; in uao_find_swslot()
253 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); in uao_find_swslot()
288 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot) in uao_set_swslot() argument
290 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; in uao_set_swslot()
297 KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0); in uao_set_swslot()
298 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); in uao_set_swslot()
369 struct uvm_object *uobj = &aobj->u_obj; in uao_free() local
371 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); in uao_free()
372 KASSERT(rw_write_held(uobj->vmobjlock)); in uao_free()
[all …]
H A Duvm_object.h87 #define UVM_OBJ_IS_KERN_OBJECT(uobj) \ argument
88 ((uobj)->uo_refs == UVM_OBJ_KERN)
97 #define UVM_OBJ_IS_VNODE(uobj) \ argument
98 ((uobj)->pgops == &uvm_vnodeops)
100 #define UVM_OBJ_IS_DEVICE(uobj) \ argument
101 ((uobj)->pgops == &uvm_deviceops)
103 #define UVM_OBJ_IS_VTEXT(uobj) \ argument
104 (UVM_OBJ_IS_VNODE(uobj) && uvn_text_p(uobj))
106 #define UVM_OBJ_IS_CLEAN(uobj) \ argument
107 (UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
[all …]
H A Duvm_bio.c79 #define UBC_HASH(uobj, offset) \ argument
80 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
95 struct uvm_object * uobj; /* mapped object */ member
110 struct uvm_object uobj; /* glue for uvm_map() */ member
181 uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN); in UBC_EVCNT_DEFINE()
215 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va, in UBC_EVCNT_DEFINE()
311 struct uvm_object *uobj; in ubc_fault() local
327 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj); in ubc_fault()
366 uobj = umap->uobj; in ubc_fault()
379 rw_enter(uobj->vmobjlock, RW_WRITER); in ubc_fault()
[all …]
H A Duvm_vnode.c120 uvn_reference(struct uvm_object *uobj) in uvn_reference() argument
122 vref((struct vnode *)uobj); in uvn_reference()
135 uvn_detach(struct uvm_object *uobj) in uvn_detach() argument
137 vrele((struct vnode *)uobj); in uvn_detach()
148 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags) in uvn_put() argument
150 struct vnode *vp = (struct vnode *)uobj; in uvn_put()
153 KASSERT(rw_write_held(uobj->vmobjlock)); in uvn_put()
170 uvn_get(struct uvm_object *uobj, voff_t offset, in uvn_get() argument
175 struct vnode *vp = (struct vnode *)uobj; in uvn_get()
184 uvn_alloc_ractx(uobj); in uvn_get()
[all …]
H A Duvm_device.c294 udv_reference(struct uvm_object *uobj) in udv_reference() argument
298 rw_enter(uobj->vmobjlock, RW_WRITER); in udv_reference()
299 uobj->uo_refs++; in udv_reference()
301 (uintptr_t)uobj, uobj->uo_refs,0,0); in udv_reference()
302 rw_exit(uobj->vmobjlock); in udv_reference()
314 udv_detach(struct uvm_object *uobj) in udv_detach() argument
316 struct uvm_device *udv = (struct uvm_device *)uobj; in udv_detach()
323 rw_enter(uobj->vmobjlock, RW_WRITER); in udv_detach()
324 if (uobj->uo_refs > 1) { in udv_detach()
325 uobj->uo_refs--; in udv_detach()
[all …]
H A Duvm_fault.c553 struct uvm_object *uobj; in uvmfault_promote() local
574 uobj = opg->uobject; in uvmfault_promote()
576 uobj = NULL; in uvmfault_promote()
583 KASSERT(uobj == NULL || rw_lock_held(uobj->vmobjlock)); in uvmfault_promote()
622 uvmfault_unlockall(ufi, amap, uobj); in uvmfault_promote()
912 struct uvm_object * const uobj = in uvm_fault_internal() local
915 if (uobj && uobj->pgops->pgo_fault != NULL) { in uvm_fault_internal()
919 rw_enter(uobj->vmobjlock, RW_WRITER); in uvm_fault_internal()
921 error = uobj->pgops->pgo_fault(&ufi, in uvm_fault_internal()
982 struct uvm_object *uobj; in uvm_fault_check() local
[all …]
H A Duvm_loan.c142 struct uvm_object *uobj = ufi->entry->object.uvm_obj; in uvm_loanentry() local
173 } else if (uobj) { in uvm_loanentry()
178 uvmfault_unlockall(ufi, aref->ar_amap, uobj); in uvm_loanentry()
184 KASSERT(rv > 0 || uobj == NULL || in uvm_loanentry()
185 !rw_write_held(uobj->vmobjlock)); in uvm_loanentry()
511 uvm_loanuobjchunk(struct uvm_object *uobj, voff_t pgoff, int orignpages, in uvm_loanuobjchunk() argument
516 rw_enter(uobj->vmobjlock, RW_WRITER); in uvm_loanuobjchunk()
519 error = (*uobj->pgops->pgo_get)(uobj, pgoff, pgpp, &npages, 0, in uvm_loanuobjchunk()
526 rw_enter(uobj->vmobjlock, RW_WRITER); in uvm_loanuobjchunk()
528 KASSERT(pgpp[i]->uobject->vmobjlock == uobj->vmobjlock); in uvm_loanuobjchunk()
[all …]
H A Duvm_readahead.c118 ra_startio(struct uvm_object *uobj, off_t off, size_t sz) in ra_startio() argument
123 __func__, uobj, off, endoff)); in ra_startio()
125 KASSERT(rw_write_held(uobj->vmobjlock)); in ra_startio()
134 struct vm_page *pg = uvm_pagelookup(uobj, trunc_page(endoff - 1)); in ra_startio()
161 error = (*uobj->pgops->pgo_get)(uobj, off, NULL, in ra_startio()
163 rw_enter(uobj->vmobjlock, RW_WRITER); in ra_startio()
219 uvm_ra_request(struct uvm_ractx *ra, int advice, struct uvm_object *uobj, in uvm_ra_request() argument
223 KASSERT(rw_write_held(uobj->vmobjlock)); in uvm_ra_request()
331 next = ra_startio(uobj, raoff, rasize); in uvm_ra_request()
350 uvm_readahead(struct uvm_object *uobj, off_t off, off_t size) in uvm_readahead() argument
[all …]
H A Duvm_page_status.c62 struct uvm_object * const uobj __diagused = pg->uobject; in uvm_pagegetdirty()
66 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == in uvm_pagegetdirty()
86 struct uvm_object * const uobj = pg->uobject; in uvm_pagemarkdirty() local
93 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == in uvm_pagemarkdirty()
105 if (uobj != NULL) { in uvm_pagemarkdirty()
113 if (uvm_obj_clean_p(uobj) && in uvm_pagemarkdirty()
114 uobj->pgops->pgo_markdirty != NULL) { in uvm_pagemarkdirty()
115 (*uobj->pgops->pgo_markdirty)(uobj); in uvm_pagemarkdirty()
128 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == in uvm_pagemarkdirty()
H A Duvm_mmap.c129 struct uvm_object *uobj; in sys_mincore() local
201 uobj = entry->object.uvm_obj; /* lower layer */ in sys_mincore()
205 if (uobj != NULL) in sys_mincore()
206 rw_enter(uobj->vmobjlock, RW_READER); in sys_mincore()
225 if (uobj != NULL && pgi == 0) { in sys_mincore()
227 pg = uvm_pagelookup(uobj, in sys_mincore()
241 if (uobj != NULL) in sys_mincore()
242 rw_exit(uobj->vmobjlock); in sys_mincore()
283 struct uvm_object *uobj; in sys_mmap() local
377 &advice, &uobj, &maxprot); in sys_mmap()
[all …]
H A Duvm_pdaemon.c385 struct uvm_object *uobj = pg->uobject; in uvmpd_page_owner_lock() local
392 if (uobj == (void *)0xdeadbeef || anon == (void *)0xdeadbeef) { in uvmpd_page_owner_lock()
396 if (uobj != NULL) { in uvmpd_page_owner_lock()
397 slock = uobj->vmobjlock; in uvmpd_page_owner_lock()
398 KASSERTMSG(slock != NULL, "pg %p uobj %p, NULL lock", pg, uobj); in uvmpd_page_owner_lock()
519 struct uvm_object *uobj; in swapcluster_add() local
526 uobj = pg->uobject; in swapcluster_add()
527 if (uobj == NULL) { in swapcluster_add()
533 KASSERT(rw_write_held(uobj->vmobjlock)); in swapcluster_add()
534 result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot); in swapcluster_add()
[all …]
H A Duvm_page_array.c45 uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj, in uvm_page_array_init() argument
51 ar->ar_uobj = uobj; in uvm_page_array_init()
139 struct uvm_object *uobj = ar->ar_uobj; in uvm_page_array_fill() local
149 KASSERT(rw_lock_held(uobj->vmobjlock)); in uvm_page_array_fill()
161 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages, in uvm_page_array_fill()
167 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages, in uvm_page_array_fill()
200 KDASSERT(pg->uobject == uobj); in uvm_page_array_fill()
H A Duvm_page.c207 uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg) in uvm_pageinsert_object() argument
210 KASSERT(uobj == pg->uobject); in uvm_pageinsert_object()
211 KASSERT(rw_write_held(uobj->vmobjlock)); in uvm_pageinsert_object()
219 if (uobj->uo_npages == 0) { in uvm_pageinsert_object()
220 struct vnode *vp = (struct vnode *)uobj; in uvm_pageinsert_object()
227 if (UVM_OBJ_IS_VTEXT(uobj)) { in uvm_pageinsert_object()
236 uobj->uo_npages++; in uvm_pageinsert_object()
240 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg) in uvm_pageinsert_tree() argument
245 KASSERT(rw_write_held(uobj->vmobjlock)); in uvm_pageinsert_tree()
247 error = radix_tree_insert_node(&uobj->uo_pages, idx, pg); in uvm_pageinsert_tree()
[all …]
H A Duvm_map.c200 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \ argument
204 (ent)->object.uvm_obj == (uobj) && \
1051 * => <uobj,uoffset> value meanings (4 cases):
1054 * [3] <uobj,uoffset> == normal mapping
1055 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1070 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags) in uvm_map()
1093 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align, in uvm_map()
1120 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags, in uvm_map_prepare()
1130 UVMHIST_LOG(maphist, " uobj/offset %#jx/%jd", (uintptr_t)uobj, in uvm_map_prepare()
1058 uvm_map(struct vm_map * map,vaddr_t * startp,vsize_t size,struct uvm_object * uobj,voff_t uoffset,vsize_t align,uvm_flag_t flags) uvm_map() argument
1108 uvm_map_prepare(struct vm_map * map,vaddr_t start,vsize_t size,struct uvm_object * uobj,voff_t uoffset,vsize_t align,uvm_flag_t flags,struct uvm_map_args * args) uvm_map_prepare() argument
1278 struct uvm_object *uobj = args->uma_uobj; uvm_map_enter() local
1775 uvm_findspace_invariants(struct vm_map * map,vaddr_t orig_hint,vaddr_t length,struct uvm_object * uobj,voff_t uoffset,vsize_t align,int flags,vaddr_t hint,struct vm_map_entry * entry,int line) uvm_findspace_invariants() argument
1820 uvm_map_findspace(struct vm_map * map,vaddr_t hint,vsize_t length,vaddr_t * result,struct uvm_object * uobj,voff_t uoffset,vsize_t align,int flags) uvm_map_findspace() argument
3179 struct uvm_object *uobj = uvm_map_protect() local
3360 struct uvm_object * const uobj = entry->object.uvm_obj; uvm_map_willneed() local
3898 struct uvm_object *uobj; uvm_map_clean() local
4663 struct uvm_object *uobj; uvm_mapent_trymerge() local
4890 UVM_VOADDR_SET_UOBJ(voa,uobj) global() argument
5062 struct uvm_object *uobj = entry->object.uvm_obj; uvm_voaddr_acquire() local
5096 struct uvm_object * const uobj = UVM_VOADDR_GET_UOBJ(voaddr); uvm_voaddr_release() local
5258 struct uvm_object *uobj = e->object.uvm_obj; fill_vmentry() local
[all...]
/netbsd-src/sys/external/bsd/drm2/include/linux/
H A Dshmem_fs.h50 shmem_read_mapping_page_gfp(struct uvm_object *uobj, voff_t i, gfp_t gfp) in shmem_read_mapping_page_gfp() argument
55 error = uvm_obj_wirepages(uobj, i*PAGE_SIZE, (i + 1)*PAGE_SIZE, NULL); in shmem_read_mapping_page_gfp()
59 rw_enter(uobj->vmobjlock, RW_READER); in shmem_read_mapping_page_gfp()
60 vm_page = uvm_pagelookup(uobj, i*PAGE_SIZE); in shmem_read_mapping_page_gfp()
61 rw_exit(uobj->vmobjlock); in shmem_read_mapping_page_gfp()
68 shmem_read_mapping_page(struct uvm_object *uobj, voff_t i) in shmem_read_mapping_page() argument
70 return shmem_read_mapping_page_gfp(uobj, i, GFP_KERNEL); in shmem_read_mapping_page()
74 shmem_truncate_range(struct uvm_object *uobj, voff_t start, voff_t end) in shmem_truncate_range() argument
85 rw_enter(uobj->vmobjlock, RW_WRITER); in shmem_truncate_range()
86 (*uobj->pgops->pgo_put)(uobj, start, end, flags); in shmem_truncate_range()
/netbsd-src/sys/rump/librump/rumpkern/
H A Dvm.c174 uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon, in uvm_pagealloc_strat() argument
179 KASSERT(uobj && rw_write_held(uobj->vmobjlock)); in uvm_pagealloc_strat()
189 pg->uobject = uobj; in uvm_pagealloc_strat()
191 if (radix_tree_insert_node(&uobj->uo_pages, off >> PAGE_SHIFT, in uvm_pagealloc_strat()
197 if (UVM_OBJ_IS_VNODE(uobj)) { in uvm_pagealloc_strat()
198 if (uobj->uo_npages == 0) { in uvm_pagealloc_strat()
199 struct vnode *vp = (struct vnode *)uobj; in uvm_pagealloc_strat()
206 uobj->uo_npages++; in uvm_pagealloc_strat()
218 if (!UVM_OBJ_IS_AOBJ(uobj)) { in uvm_pagealloc_strat()
238 struct uvm_object *uobj = pg->uobject; in uvm_pagefree() local
[all …]
/netbsd-src/tests/rump/kernspace/
H A Dbusypage.c45 static struct uvm_object *uobj; variable
60 rw_enter(uobj->vmobjlock, RW_READER); in thread()
61 uvm_pagewait(testpg, uobj->vmobjlock, "tw"); in thread()
74 uobj = uao_create(1, 0); in rumptest_busypage()
75 rw_enter(uobj->vmobjlock, RW_WRITER); in rumptest_busypage()
76 testpg = uvm_pagealloc(uobj, 0, NULL, 0); in rumptest_busypage()
77 rw_exit(uobj->vmobjlock); in rumptest_busypage()
93 rw_enter(uobj->vmobjlock, RW_WRITER); in rumptest_busypage()
95 rw_exit(uobj->vmobjlock); in rumptest_busypage()
/netbsd-src/sys/rump/librump/rumpvfs/
H A Dvm_vfs.c42 struct uvm_object *uobj = pgs[0]->uobject; in uvm_aio_aiodone_pages() local
46 rw_enter(uobj->vmobjlock, RW_WRITER); in uvm_aio_aiodone_pages()
63 rw_exit(uobj->vmobjlock); in uvm_aio_aiodone_pages()
72 struct uvm_object *uobj = NULL; in uvm_aio_aiodone() local
88 if (uobj == NULL) { in uvm_aio_aiodone()
89 uobj = pgs[i]->uobject; in uvm_aio_aiodone()
90 KASSERT(uobj != NULL); in uvm_aio_aiodone()
92 KASSERT(uobj == pgs[i]->uobject); in uvm_aio_aiodone()
/netbsd-src/sys/compat/linux32/arch/aarch64/
H A Dlinux32_exec_machdep.c56 struct uvm_object *uobj; in vmcmd_linux32_kuser_helper_map() local
67 uobj = *e->e_sigobject; in vmcmd_linux32_kuser_helper_map()
68 if (uobj == NULL) in vmcmd_linux32_kuser_helper_map()
74 (*uobj->pgops->pgo_reference)(uobj); in vmcmd_linux32_kuser_helper_map()
75 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz), uobj, 0, 0, in vmcmd_linux32_kuser_helper_map()
79 (*uobj->pgops->pgo_detach)(uobj); in vmcmd_linux32_kuser_helper_map()
/netbsd-src/usr.bin/pmap/
H A Dpmap.h78 #define UVM_OBJ_IS_VNODE(uobj) ((uobj)->pgops == uvm_vnodeops) argument
79 #define UVM_OBJ_IS_AOBJ(uobj) ((uobj)->pgops == aobj_pager) argument
80 #define UVM_OBJ_IS_DEVICE(uobj) ((uobj)->pgops == uvm_deviceops) argument
81 #define UVM_OBJ_IS_UBCPAGER(uobj) ((uobj)->pgops == ubc_pager) argument
/netbsd-src/sys/miscfs/genfs/
H A Dgenfs_io.c110 struct uvm_object * const uobj = &vp->v_uobj; in genfs_getpages() local
134 KASSERT(rw_lock_held(uobj->vmobjlock)); in genfs_getpages()
135 KASSERT(rw_write_held(uobj->vmobjlock) || in genfs_getpages()
155 rw_exit(uobj->vmobjlock); in genfs_getpages()
197 rw_exit(uobj->vmobjlock); in genfs_getpages()
239 nfound = uvn_findpages(uobj, origoffset, &npages, in genfs_getpages()
285 rw_exit(uobj->vmobjlock); in genfs_getpages()
364 rw_enter(uobj->vmobjlock, RW_WRITER); in genfs_getpages()
374 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL, in genfs_getpages()
381 rw_exit(uobj->vmobjlock); in genfs_getpages()
[all …]
/netbsd-src/sys/modules/examples/fopsmapper/
H A Dfopsmapper.c89 struct uvm_object *uobj; member
126 fo->uobj = uao_create(size, 0); in fopsmapper_mmap()
134 error = uvm_map(kernel_map, &va, fo->bufsize, fo->uobj, 0, 0, in fopsmapper_mmap()
138 uao_detach(fo->uobj); in fopsmapper_mmap()
145 uao_reference(fo->uobj); in fopsmapper_mmap()
146 *uobjp = fo->uobj; in fopsmapper_mmap()
/netbsd-src/sys/kern/
H A Dsysv_shm.c207 struct uvm_object *uobj = NULL; in shm_delete_mapping() local
220 uobj = shmseg->_shm_internal; in shm_delete_mapping()
224 return uobj; in shm_delete_mapping()
313 struct uvm_object *uobj; in sys_shmdt() local
353 uobj = shm_delete_mapping(shmmap_s, shmmap_se); in sys_shmdt()
359 if (uobj != NULL) { in sys_shmdt()
360 uao_detach(uobj); in sys_shmdt()
384 struct uvm_object *uobj; in sys_shmat() local
450 uobj = shmseg->_shm_internal; in sys_shmat()
451 uao_reference(uobj); in sys_shmat()
[all …]
/netbsd-src/sys/external/bsd/drm2/ttm/
H A Dttm_bus_dma.c99 struct uvm_object *const uobj = ttm_dma->ttm.swap_storage; in ttm_bus_dma_put() local
120 KASSERT(uobj->pgops->pgo_put); in ttm_bus_dma_put()
123 rw_enter(uobj->vmobjlock, RW_WRITER); in ttm_bus_dma_put()
124 (void)(*uobj->pgops->pgo_put)(uobj, 0, size, flags); in ttm_bus_dma_put()

123