| /netbsd-src/sys/uvm/ |
| H A D | uvm_page.c | 207 uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg) in uvm_pageinsert_object() argument 210 KASSERT(uobj == pg->uobject); in uvm_pageinsert_object() 212 KASSERT((pg->flags & PG_TABLED) == 0); in uvm_pageinsert_object() 214 if ((pg->flags & PG_STAT) != 0) { in uvm_pageinsert_object() 216 const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY); in uvm_pageinsert_object() 218 if ((pg->flags & PG_FILE) != 0) { in uvm_pageinsert_object() 235 pg->flags |= PG_TABLED; in uvm_pageinsert_object() 240 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg) in uvm_pageinsert_tree() argument 242 const uint64_t idx = pg->offset >> PAGE_SHIFT; in uvm_pageinsert_tree() 247 error = radix_tree_insert_node(&uobj->uo_pages, idx, pg); in uvm_pageinsert_tree() [all …]
|
| H A D | uvm_pdpolicy_clockpro.c | 147 clockpro_setq(struct vm_page *pg, int qidx) in clockpro_setq() argument 152 pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR); in clockpro_setq() 156 clockpro_getq(struct vm_page *pg) in clockpro_getq() argument 160 qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR; in clockpro_getq() 236 pageq_insert_tail(pageq_t *q, struct vm_page *pg) in pageq_insert_tail() argument 239 TAILQ_INSERT_TAIL(&q->q_q, pg, pdqueue); in pageq_insert_tail() 245 pageq_insert_head(pageq_t *q, struct vm_page *pg) in pageq_insert_head() argument 248 TAILQ_INSERT_HEAD(&q->q_q, pg, pdqueue); in pageq_insert_head() 254 pageq_remove(pageq_t *q, struct vm_page *pg) in pageq_remove() argument 258 KASSERT(clockpro_queue(&clockpro, clockpro_getq(pg)) == q); in pageq_remove() [all …]
|
| H A D | uvm_loan.c | 346 struct vm_page *pg; in uvm_loananon() local 359 pg = anon->an_page; in uvm_loananon() 360 if (pg && (pg->flags & PG_ANON) != 0 && anon->an_ref == 1) { in uvm_loananon() 361 if (pg->wire_count > 0) { in uvm_loananon() 363 (uintptr_t)pg, 0, 0, 0); in uvm_loananon() 369 pmap_page_protect(pg, VM_PROT_READ); in uvm_loananon() 415 pg = anon->an_page; in uvm_loananon() 416 if (pg->wire_count > 0) { in uvm_loananon() 417 UVMHIST_LOG(loanhist, "->K wired %#jx", (uintptr_t)pg, 0, 0, 0); in uvm_loananon() 418 KASSERT(pg->uobject == NULL); in uvm_loananon() [all …]
|
| H A D | uvm_anon.c | 107 struct vm_page *pg = anon->an_page, *pg2 __diagused; in uvm_anfree() local 119 if (__predict_true(pg != NULL)) { in uvm_anfree() 128 if (__predict_false(pg->loan_count != 0)) { in uvm_anfree() 130 KASSERT(pg2 == pg); in uvm_anfree() 139 if (__predict_false(pg->uobject != NULL)) { in uvm_anfree() 140 mutex_enter(&pg->interlock); in uvm_anfree() 141 KASSERT(pg->loan_count > 0); in uvm_anfree() 142 pg->loan_count--; in uvm_anfree() 143 pg->uanon = NULL; in uvm_anfree() 144 mutex_exit(&pg->interlock); in uvm_anfree() [all …]
|
| H A D | uvm_pdpolicy_clock.c | 248 struct vm_page *pg; in uvmpdpol_selectvictim() local 256 pg = TAILQ_NEXT(&ss->ss_marker, pdqueue); in uvmpdpol_selectvictim() 257 if (pg == NULL) { in uvmpdpol_selectvictim() 260 KASSERT((pg->flags & PG_MARKER) == 0); in uvmpdpol_selectvictim() 268 mutex_enter(&pg->interlock); in uvmpdpol_selectvictim() 269 if (uvmpdpol_pagerealize_locked(pg)) { in uvmpdpol_selectvictim() 270 mutex_exit(&pg->interlock); in uvmpdpol_selectvictim() 279 TAILQ_INSERT_AFTER(&pdpol_state.s_inactiveq, pg, in uvmpdpol_selectvictim() 289 anon = pg->uanon; in uvmpdpol_selectvictim() 290 uobj = pg->uobject; in uvmpdpol_selectvictim() [all …]
|
| H A D | uvm_page_status.c | 60 uvm_pagegetdirty(struct vm_page *pg) in uvm_pagegetdirty() argument 62 struct uvm_object * const uobj __diagused = pg->uobject; in uvm_pagegetdirty() 64 KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0); in uvm_pagegetdirty() 65 KASSERT(uvm_page_owner_locked_p(pg, false)); in uvm_pagegetdirty() 66 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == in uvm_pagegetdirty() 67 uvm_obj_page_dirty_p(pg)); in uvm_pagegetdirty() 68 return pg->flags & (PG_CLEAN|PG_DIRTY); in uvm_pagegetdirty() 84 uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus) in uvm_pagemarkdirty() argument 86 struct uvm_object * const uobj = pg->uobject; in uvm_pagemarkdirty() 87 const unsigned int oldstatus = uvm_pagegetdirty(pg); in uvm_pagemarkdirty() [all …]
|
| H A D | uvm_fault.c | 188 struct vm_page *pg; in uvmfault_anonflush() local 194 pg = anons[lcv]->an_page; in uvmfault_anonflush() 195 if (pg && (pg->flags & PG_BUSY) == 0) { in uvmfault_anonflush() 196 uvm_pagelock(pg); in uvmfault_anonflush() 197 uvm_pagedeactivate(pg); in uvmfault_anonflush() 198 uvm_pageunlock(pg); in uvmfault_anonflush() 274 struct vm_page *pg; in uvmfault_anonget() local 301 pg = anon->an_page; in uvmfault_anonget() 309 if (pg && pg->loan_count) in uvmfault_anonget() 310 pg = uvm_anon_lockloanpg(anon); in uvmfault_anonget() [all …]
|
| H A D | uvm_pager.c | 330 struct vm_page *pg; in uvm_aio_aiodone_pages() local 342 pg = pgs[0]; in uvm_aio_aiodone_pages() 343 swap = (pg->uanon != NULL && pg->uobject == NULL) || in uvm_aio_aiodone_pages() 344 (pg->flags & PG_AOBJ) != 0; in uvm_aio_aiodone_pages() 346 uobj = pg->uobject; in uvm_aio_aiodone_pages() 352 if (pg->uobject != NULL) { in uvm_aio_aiodone_pages() 353 swslot = uao_find_swslot(pg->uobject, in uvm_aio_aiodone_pages() 354 pg->offset >> PAGE_SHIFT); in uvm_aio_aiodone_pages() 356 KASSERT(pg->uanon != NULL); in uvm_aio_aiodone_pages() 357 swslot = pg->uanon->an_swslot; in uvm_aio_aiodone_pages() [all …]
|
| H A D | uvm_object.c | 135 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; in uvm_obj_wirepages() local 165 pg = uvm_loanbreak(pgs[i]); in uvm_obj_wirepages() 166 if (!pg) { in uvm_obj_wirepages() 173 pgs[i] = pg; in uvm_obj_wirepages() 219 struct vm_page *pg; in uvm_obj_unwirepages() local 224 pg = uvm_pagelookup(uobj, offset); in uvm_obj_unwirepages() 226 KASSERT(pg != NULL); in uvm_obj_unwirepages() 227 KASSERT(!(pg->flags & PG_RELEASED)); in uvm_obj_unwirepages() 229 uvm_pagelock(pg); in uvm_obj_unwirepages() 230 uvm_pageunwire(pg); in uvm_obj_unwirepages() [all …]
|
| H A D | uvm_bio.c | 240 struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va) in ubc_fault_page() argument 246 KASSERT(rw_write_held(pg->uobject->vmobjlock)); in ubc_fault_page() 248 KASSERT((pg->flags & PG_FAKE) == 0); in ubc_fault_page() 249 if (pg->flags & PG_RELEASED) { in ubc_fault_page() 250 uvm_pagefree(pg); in ubc_fault_page() 253 if (pg->loan_count != 0) { in ubc_fault_page() 265 newpg = uvm_loanbreak(pg); in ubc_fault_page() 267 uvm_page_unbusy(&pg, 1); in ubc_fault_page() 270 pg = newpg; in ubc_fault_page() 282 KASSERT((pg->flags & PG_RDONLY) == 0 || in ubc_fault_page() [all …]
|
| H A D | uvm_pgflcache.c | 119 struct vm_page *pg; in uvm_pgflcache_fill() local 140 pg = LIST_FIRST(head); in uvm_pgflcache_fill() 141 while (__predict_true(pg != NULL && count < FILLPGS)) { in uvm_pgflcache_fill() 142 KASSERT(pg->flags & PG_FREE); in uvm_pgflcache_fill() 143 KASSERT(uvm_page_get_bucket(pg) == b); in uvm_pgflcache_fill() 144 pcc->pages[count++] = pg; in uvm_pgflcache_fill() 145 pg = LIST_NEXT(pg, pageq.list); in uvm_pgflcache_fill() 149 head->lh_first = pg; in uvm_pgflcache_fill() 150 if (__predict_true(pg != NULL)) { in uvm_pgflcache_fill() 151 pg->pageq.list.le_prev = &head->lh_first; in uvm_pgflcache_fill() [all …]
|
| H A D | uvm_pglist.c | 84 uvm_pglist_add(struct vm_page *pg, struct pglist *rlist) in uvm_pglist_add() argument 89 pgfl = &uvm.page_free[uvm_page_get_freelist(pg)]; in uvm_pglist_add() 90 pgb = pgfl->pgfl_buckets[uvm_page_get_bucket(pg)]; in uvm_pglist_add() 94 LIST_FOREACH(tp, &pgb->pgb_colors[VM_PGCOLOR(pg)], pageq.list) { in uvm_pglist_add() 95 if (tp == pg) in uvm_pglist_add() 101 LIST_REMOVE(pg, pageq.list); in uvm_pglist_add() 104 pg->flags = PG_CLEAN; in uvm_pglist_add() 105 pg->uobject = NULL; in uvm_pglist_add() 106 pg->uanon = NULL; in uvm_pglist_add() 107 TAILQ_INSERT_TAIL(rlist, pg, pageq.queue); in uvm_pglist_add() [all …]
|
| H A D | uvm_vnode.c | 291 struct vm_page *pg; in uvn_findpage() local 314 pg = uvm_page_array_fill_and_peek(a, offset, nleft); in uvn_findpage() 315 if (pg != NULL && pg->offset != offset) { in uvn_findpage() 319 == (pg->offset < offset)); in uvn_findpage() 323 pg = NULL; in uvn_findpage() 331 if (pg == NULL) { in uvn_findpage() 336 pg = uvm_pagealloc(uobj, offset, NULL, in uvn_findpage() 338 if (pg == NULL) { in uvn_findpage() 350 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); in uvn_findpage() 351 KASSERTMSG(uvm_pagegetdirty(pg) == in uvn_findpage() [all …]
|
| H A D | uvm_pdaemon.c | 383 uvmpd_page_owner_lock(struct vm_page *pg) in uvmpd_page_owner_lock() argument 385 struct uvm_object *uobj = pg->uobject; in uvmpd_page_owner_lock() 386 struct vm_anon *anon = pg->uanon; in uvmpd_page_owner_lock() 389 KASSERT(mutex_owned(&pg->interlock)); in uvmpd_page_owner_lock() 398 KASSERTMSG(slock != NULL, "pg %p uobj %p, NULL lock", pg, uobj); in uvmpd_page_owner_lock() 401 KASSERTMSG(slock != NULL, "pg %p anon %p, NULL lock", pg, anon); in uvmpd_page_owner_lock() 417 uvmpd_trylockowner(struct vm_page *pg) in uvmpd_trylockowner() argument 421 KASSERT(mutex_owned(&pg->interlock)); in uvmpd_trylockowner() 423 slock = uvmpd_page_owner_lock(pg); in uvmpd_trylockowner() 426 mutex_exit(&pg->interlock); in uvmpd_trylockowner() [all …]
|
| /netbsd-src/regress/sys/uvm/pdsim/ |
| H A D | pdsim.c | 61 struct vm_page *pg; in pdsim_pagealloc() local 63 pg = TAILQ_FIRST(&freeq); in pdsim_pagealloc() 64 if (pg == NULL) { in pdsim_pagealloc() 67 TAILQ_REMOVE(&freeq, pg, pageq); in pdsim_pagealloc() 68 pg->offset = idx << PAGE_SHIFT; in pdsim_pagealloc() 69 pg->uanon = NULL; in pdsim_pagealloc() 70 pg->uobject = obj; in pdsim_pagealloc() 71 pg->pqflags = 0; in pdsim_pagealloc() 72 obj->pages[idx] = pg; in pdsim_pagealloc() 76 return pg; in pdsim_pagealloc() [all …]
|
| /netbsd-src/sys/rump/librump/rumpkern/ |
| H A D | vm.c | 148 struct vm_page *pg = obj; in pgctor() local 150 memset(pg, 0, sizeof(*pg)); in pgctor() 151 pg->uanon = rump_hypermalloc(PAGE_SIZE, PAGE_SIZE, in pgctor() 153 return pg->uanon == NULL; in pgctor() 159 struct vm_page *pg = obj; in pgdtor() local 161 rump_hyperfree(pg->uanon, PAGE_SIZE); in pgdtor() 177 struct vm_page *pg; in uvm_pagealloc_strat() local 182 pg = pool_cache_get(&pagecache, PR_NOWAIT); in uvm_pagealloc_strat() 183 if (__predict_false(pg == NULL)) { in uvm_pagealloc_strat() 186 mutex_init(&pg->interlock, MUTEX_DEFAULT, IPL_NONE); in uvm_pagealloc_strat() [all …]
|
| /netbsd-src/lib/libc/db/btree/ |
| H A D | bt_delete.c | 99 if ((h = mpool_get(t->bt_mp, c->pg.pgno, 0)) == NULL) in __bt_delete() 110 status = __bt_dleaf(t, NULL, h, (u_int)c->pg.index); in __bt_delete() 166 if (h->pgno == c->pg.pgno) in __bt_stkacq() 175 while (h->pgno != c->pg.pgno) { in __bt_stkacq() 215 if (h->pgno == c->pg.pgno) in __bt_stkacq() 230 while (h->pgno != c->pg.pgno) { in __bt_stkacq() 273 return ((*hp = mpool_get(t->bt_mp, c->pg.pgno, 0)) == NULL); in __bt_stkacq() 376 PAGE *pg; in __bt_pdelete() local 396 if ((pg = mpool_get(t->bt_mp, parent->pgno, 0)) == NULL) in __bt_pdelete() 400 bi = GETBINTERNAL(pg, idx); in __bt_pdelete() [all …]
|
| H A D | bt_seq.c | 159 pgno_t pg; in __bt_seqset() local 183 for (pg = P_ROOT;;) { in __bt_seqset() 184 if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL) in __bt_seqset() 195 pg = GETBINTERNAL(h, 0)->pgno; in __bt_seqset() 207 for (pg = P_ROOT;;) { in __bt_seqset() 208 if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL) in __bt_seqset() 219 pg = GETBINTERNAL(h, NEXTINDEX(h) - 1)->pgno; in __bt_seqset() 251 pgno_t pg; in __bt_seqadv() local 296 c->pg.pgno = ep->page->pgno; in __bt_seqadv() 297 c->pg.index = ep->index; in __bt_seqadv() [all …]
|
| /netbsd-src/sys/arch/xen/x86/ |
| H A D | xen_bus_dma.c | 83 struct vm_page *pg, *pgnext; in _xen_alloc_contig() local 104 for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) { in _xen_alloc_contig() 105 pa = VM_PAGE_TO_PHYS(pg); in _xen_alloc_contig() 140 pg = NULL; in _xen_alloc_contig() 145 for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) { in _xen_alloc_contig() 146 pgnext = pg->pageq.queue.tqe_next; in _xen_alloc_contig() 147 pa = VM_PAGE_TO_PHYS(pg); in _xen_alloc_contig() 155 for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) { in _xen_alloc_contig() 156 pgnext = pg->pageq.queue.tqe_next; in _xen_alloc_contig() 158 TAILQ_REMOVE(mlistp, pg, pageq.queue); in _xen_alloc_contig() [all …]
|
| /netbsd-src/sys/arch/powerpc/ibm4xx/ |
| H A D | pmap.c | 206 psize_t pg; in pa_to_pv() local 208 bank = uvm_physseg_find(atop(pa), &pg); in pa_to_pv() 211 return &uvm_physseg_get_pmseg(bank)->pvent[pg]; in pa_to_pv() 218 psize_t pg; in pa_to_attr() local 220 bank = uvm_physseg_find(atop(pa), &pg); in pa_to_attr() 223 return &uvm_physseg_get_pmseg(bank)->attrs[pg]; in pa_to_attr() 534 paddr_t pg; in pmap_growkernel() local 547 pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1()); in pmap_growkernel() 548 else if (!uvm_page_physget(&pg)) in pmap_growkernel() 550 if (!pg) in pmap_growkernel() [all …]
|
| /netbsd-src/sys/ufs/lfs/ |
| H A D | lfs_pages.c | 149 wait_for_page(struct vnode *vp, struct vm_page *pg, const char *label) in wait_for_page() argument 152 if ((pg->flags & PG_BUSY) == 0) in wait_for_page() 158 if (label != NULL && pg != lastpg) { in wait_for_page() 159 if (pg->owner_tag) { in wait_for_page() 162 pg, pg->owner, pg->lowner, pg->owner_tag); in wait_for_page() 165 curproc->p_pid, curlwp->l_lid, label, pg); in wait_for_page() 168 lastpg = pg; in wait_for_page() 171 uvm_pagewait(pg, vp->v_uobj.vmobjlock, "lfsput"); in wait_for_page() 188 write_and_wait(struct lfs *fs, struct vnode *vp, struct vm_page *pg, in write_and_wait() argument 197 if (pg == NULL) in write_and_wait() [all …]
|
| /netbsd-src/external/bsd/openldap/dist/contrib/slapd-modules/acl/ |
| H A D | posixgroup.c | 50 pg_t *pg; in pg_dynacl_parse() local 57 pg = ch_calloc( 1, sizeof( pg_t ) ); in pg_dynacl_parse() 59 pg->pg_style = style; in pg_dynacl_parse() 61 switch ( pg->pg_style ) { in pg_dynacl_parse() 63 rc = dnNormalize( 0, NULL, NULL, &pat, &pg->pg_pat, NULL ); in pg_dynacl_parse() 73 ber_dupbv( &pg->pg_pat, &pat ); in pg_dynacl_parse() 79 fname, lineno, style_strings[ pg->pg_style ] ); in pg_dynacl_parse() 123 *privp = (void *)pg; in pg_dynacl_parse() 127 (void)pg_dynacl_destroy( (void *)pg ); in pg_dynacl_parse() 137 pg_t *pg = (pg_t *)priv; in pg_dynacl_unparse() local [all …]
|
| /netbsd-src/sys/miscfs/genfs/ |
| H A D | genfs_io.c | 74 struct vm_page *pg = pgs[i]; in genfs_rel_pages() local 76 if (pg == NULL || pg == PGO_DONTCARE) in genfs_rel_pages() 78 KASSERT(uvm_page_owner_locked_p(pg, true)); in genfs_rel_pages() 79 if (pg->flags & PG_FAKE) { in genfs_rel_pages() 80 pg->flags |= PG_RELEASED; in genfs_rel_pages() 229 struct vm_page *pg; in genfs_getpages() local 235 pg = ap->a_m[i]; in genfs_getpages() 236 KASSERT(pg == NULL || pg == PGO_DONTCARE); in genfs_getpages() 258 pg = ap->a_m[i]; in genfs_getpages() 260 if (pg != NULL && pg != PGO_DONTCARE) { in genfs_getpages() [all …]
|
| /netbsd-src/sys/compat/common/ |
| H A D | tty_60.c | 55 ptmget_to_ptmget60(struct ptmget *pg, struct compat_60_ptmget *pg60) in ptmget_to_ptmget60() argument 58 pg60->cfd = pg->cfd; in ptmget_to_ptmget60() 59 pg60->sfd = pg->sfd; in ptmget_to_ptmget60() 60 strlcpy(pg60->cn, pg->cn, sizeof(pg60->cn)); in ptmget_to_ptmget60() 61 strlcpy(pg60->sn, pg->sn, sizeof(pg60->sn)); in ptmget_to_ptmget60() 62 if (strlen(pg->cn) >= sizeof(pg60->cn) in ptmget_to_ptmget60() 63 || strlen(pg->sn) >= sizeof(pg60->sn)) in ptmget_to_ptmget60() 75 struct ptmget *pg; in compat_60_ptmget_ioctl() local 87 pg = kmem_alloc(sizeof(*pg), KM_SLEEP); in compat_60_ptmget_ioctl() 89 ret = (cd->d_ioctl)(dev, newcmd, pg, flag, l); in compat_60_ptmget_ioctl() [all …]
|
| /netbsd-src/sys/arch/atari/atari/ |
| H A D | atari_init.c | 215 pt_entry_t *pg, *epg; in start_c() local 441 pg = (pt_entry_t *)ptpa; in start_c() 442 *pg++ = PG_NV; in start_c() 446 *pg++ = pg_proto; in start_c() 467 *pg++ = pg_proto; in start_c() 473 *pg++ = pg_proto; in start_c() 488 *pg++ = pg_proto; in start_c() 497 while (pg < epg) in start_c() 498 *pg++ = PG_NV; in start_c() 512 pg = (pt_entry_t *)ptpa; in start_c() [all …]
|