Lines Matching full:pages
52 * keeping the list of resident pages, it may also keep a list of allocated
59 * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
92 #define UAO_SWHASH_BUCKETS(pages) \
93 (min((pages) >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
125 struct uvm_object u_obj; /* has: pgops, memt, #pages, #refs */
126 int u_pages; /* number of pages in entire object */
387 * Shrink an aobj to a given number of pages. The procedure is always the same:
389 * resources, flush pages and drop swap slots.
404 uao_shrink_hash(struct uvm_object *uobj, int pages)
418 if (UAO_SWHASH_BUCKETS(aobj->u_pages) == UAO_SWHASH_BUCKETS(pages)) {
419 uao_shrink_flush(uobj, pages, aobj->u_pages);
420 aobj->u_pages = pages;
424 new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages), M_UVMAOBJ,
429 uao_shrink_flush(uobj, pages, aobj->u_pages);
446 aobj->u_pages = pages;
453 uao_shrink_convert(struct uvm_object *uobj, int pages)
459 new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ,
464 uao_shrink_flush(uobj, pages, aobj->u_pages);
467 for (i = 0; i < pages; i++) {
483 aobj->u_pages = pages;
489 uao_shrink_array(struct uvm_object *uobj, int pages)
494 new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ,
499 uao_shrink_flush(uobj, pages, aobj->u_pages);
501 for (i = 0; i < pages; i++)
507 aobj->u_pages = pages;
513 uao_shrink(struct uvm_object *uobj, int pages)
517 KASSERT(pages < aobj->u_pages);
525 if (pages > UAO_SWHASH_THRESHOLD)
526 return uao_shrink_hash(uobj, pages); /* case 3 */
528 return uao_shrink_convert(uobj, pages); /* case 1 */
530 return uao_shrink_array(uobj, pages); /* case 2 */
534 * Grow an aobj to a given number of pages. Right now we only adjust the swap
542 uao_grow_array(struct uvm_object *uobj, int pages)
549 new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ,
560 aobj->u_pages = pages;
566 uao_grow_hash(struct uvm_object *uobj, int pages)
574 KASSERT(pages > UAO_SWHASH_THRESHOLD);
580 if (UAO_SWHASH_BUCKETS(aobj->u_pages) == UAO_SWHASH_BUCKETS(pages)) {
581 aobj->u_pages = pages;
585 KASSERT(UAO_SWHASH_BUCKETS(aobj->u_pages) < UAO_SWHASH_BUCKETS(pages));
587 new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages), M_UVMAOBJ,
603 aobj->u_pages = pages;
610 uao_grow_convert(struct uvm_object *uobj, int pages)
618 new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages), M_UVMAOBJ,
637 aobj->u_pages = pages;
643 uao_grow(struct uvm_object *uobj, int pages)
647 KASSERT(pages > aobj->u_pages);
655 if (pages <= UAO_SWHASH_THRESHOLD)
656 return uao_grow_array(uobj, pages); /* case 2 */
658 return uao_grow_hash(uobj, pages); /* case 1 */
660 return uao_grow_convert(uobj, pages);
678 int pages = round_page(size) >> PAGE_SHIFT;
688 aobj->u_pages = pages;
698 aobj->u_pages = pages;
716 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(pages),
726 aobj->u_swslots = mallocarray(pages, sizeof(int),
775 * NOTE: Pages for this pool must not come from a pageable
828 * Free all the pages left in the aobj. For each page, when the
854 * uao_flush: flush pages out of a uvm object
857 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
891 * or deactivating pages.
985 * => flags: PGO_ALLPAGES: get all of the pages
988 * => NOTE: caller must check for released pages!!
1006 * get number of pages
1012 * step 1a: get pages that are already resident. only do
1017 gotpages = 0; /* # of pages we got so far */
1055 * step 2: get non-resident or busy pages.
1061 * - skip over pages we've already gotten or don't want
1062 * - skip over pages we don't _have_ to get
1266 * Page in all pages in the swap slot range.
1293 * page in any pages from aobj in the given range.
1515 * adjust the counter of pages only in swap for all