/openbsd-src/lib/libusbhid/ |
H A D | usage.c | 51 } *pages; variable 61 printf("%d\t%s\n", pages[i].usage, pages[i].name); in dump_hid_table() 62 for (j = 0; j < pages[i].pagesize; j++) { in dump_hid_table() 63 printf("\t%d\t%s\n", pages[i].page_contents[j].usage, in dump_hid_table() 64 pages[i].page_contents[j].name); in dump_hid_table() 148 if (pages == NULL) { in hid_start() 150 pages = calloc(len, in hid_start() 154 new = reallocarray(pages, in hid_start() 160 pages = new; in hid_start() 161 bzero(pages + npagesmax, in hid_start() [all …]
|
/openbsd-src/share/man/man9/ |
H A D | uvm_init.9 | 244 int npages; /* number of pages we manage */ 245 int free; /* number of free pages */ 246 int active; /* number of active pages */ 247 int inactive; /* number of pages that we free'd but may want back */ 248 int paging; /* number of pages in the process of being paged out */ 249 int wired; /* number of wired pages */ 251 int zeropages; /* number of zero'd pages */ 252 int reserve_pagedaemon; /* number of pages reserved for pagedaemon */ 253 int reserve_kernel; /* number of pages reserved for kernel */ 255 int vnodepages; /* XXX # of pages used by vnode page cache */ [all …]
|
H A D | km_alloc.9 | 34 function allocates kernel virtual space optionally backed by physical pages. 67 Allocates single pages. 70 For physical pages the predefined modes are: 74 Maps dirty pages into the allocation. 76 Maps zeroed pages into the allocation. 78 Maps dma-accessible pages into the allocation. 80 Maps zeroed dma-accessible pages into the allocation. 82 Pages will be demand paged. 91 Sleeping for physical pages is allowed. 118 A flag indicating if the allocations will always be for single pages. [all …]
|
H A D | uvm_pagealloc.9 | 65 if no pages are free. 76 #define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */ 83 free pages being lower than 91 from a pool of pre-zeroed pages or by zeroing it in-line as necessary. 109 function allocates a list of pages for size 135 #define UVM_PLA_ZERO 0x0004 /* zero all pages before returning */ 141 pages (this is currently ignored). 152 function frees the list of pages pointed to by 160 must be called at system boot time to set up physical memory management pages. 166 addresses of pages no [all...] |
/openbsd-src/sys/dev/pci/drm/i915/gem/selftests/ |
H A D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 42 if (!pages) in huge_get_pages() 45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 46 kfree(pages); in huge_get_pages() 50 sg = pages->sgl; in huge_get_pages() [all …]
|
/openbsd-src/sys/uvm/ |
H A D | uvmexp.h | 14 #define VM_NKMEMPAGES 6 /* int - # kmem_map pages */ 59 int npages; /* [I] number of pages we manage */ 60 int free; /* [F] number of free pages */ 61 int active; /* [L] # of active pages */ 62 int inactive; /* [L] # of pages that we free'd but may want back */ 63 int paging; /* [a] # of pages in the process of being paged out */ 64 int wired; /* number of wired pages */ 66 int zeropages; /* [F] number of zero'd pages */ 67 int reserve_pagedaemon; /* [I] # of pages reserved for pagedaemon */ 68 int reserve_kernel; /* [I] # of pages reserve [all...] |
H A D | uvm_aobj.c | 52 * keeping the list of resident pages, it may also keep a list of allocated 59 * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two. 92 #define UAO_SWHASH_BUCKETS(pages) \ argument 93 (min((pages) >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS)) 125 struct uvm_object u_obj; /* has: pgops, memt, #pages, #refs */ 126 int u_pages; /* number of pages in entire object */ 387 * Shrink an aobj to a given number of pages. The procedure is always the same: 389 * resources, flush pages and drop swap slots. 404 uao_shrink_hash(struct uvm_object *uobj, int pages) in uao_shrink_hash() argument 418 if (UAO_SWHASH_BUCKETS(aobj->u_pages) == UAO_SWHASH_BUCKETS(pages)) { in uao_shrink_hash() 453 uao_shrink_convert(struct uvm_object * uobj,int pages) uao_shrink_convert() argument 489 uao_shrink_array(struct uvm_object * uobj,int pages) uao_shrink_array() argument 513 uao_shrink(struct uvm_object * uobj,int pages) uao_shrink() argument 542 uao_grow_array(struct uvm_object * uobj,int pages) uao_grow_array() argument 566 uao_grow_hash(struct uvm_object * uobj,int pages) uao_grow_hash() argument 610 uao_grow_convert(struct uvm_object * uobj,int pages) uao_grow_convert() argument 643 uao_grow(struct uvm_object * uobj,int pages) uao_grow() argument 678 int pages = round_page(size) >> PAGE_SHIFT; uao_create() local [all...] |
/openbsd-src/sys/dev/pci/drm/include/drm/ttm/ |
H A D | ttm_tt.h | 42 * struct ttm_tt - This is a structure holding the pages, caching- and aperture 47 /** @pages: Array of pages backing the data. */ 48 struct vm_page **pages; member 56 * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated 58 * pages back in, and unset the flag. Drivers should in general never 61 * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on 64 * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated 66 * TTM swapping out such pages. Also important is to prevent TTM from 67 * ever directly mapping these pages. 74 * still valid to use TTM to map the pages directly. This is useful when [all …]
|
/openbsd-src/usr.bin/systat/ |
H A D | systat.1 | 398 .Ic number of pages . 519 The first two columns report the average number of pages 522 The third and fourth columns report the average number of pages 528 number of pages transferred per second over the last refresh interval. 570 min number of free pages 572 target number of free pages 574 target number of inactive pages 576 wired pages 578 pages daemon freed since boot 580 pages daemo [all...] |
/openbsd-src/gnu/lib/libiberty/src/ |
H A D | physmem.c | 87 double pages = sysconf (_SC_PHYS_PAGES); in physmem_total() local 89 if (0 <= pages && 0 <= pagesize) in physmem_total() 90 return pages * pagesize; in physmem_total() 99 double pages = pss.physical_memory; in physmem_total() local 101 if (0 <= pages && 0 <= pagesize) in physmem_total() 102 return pages * pagesize; in physmem_total() 113 double pages = realmem.physmem; in physmem_total() local 114 if (0 <= pages && 0 <= pagesize) in physmem_total() 115 return pages * pagesize; in physmem_total() 191 double pages = sysconf (_SC_AVPHYS_PAGES); in physmem_available() local [all …]
|
/openbsd-src/sys/dev/pci/drm/i915/gem/ |
H A D | i915_gem_pages.c | 19 struct sg_table *pages) in __i915_gem_object_set_pages() argument 31 /* Make the pages coherent with the GPU (flushing any swapin). */ in __i915_gem_object_set_pages() 36 drm_clflush_sg(pages); in __i915_gem_object_set_pages() 40 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 42 obj->mm.get_dma_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 45 obj->mm.pages = pages; in __i915_gem_object_set_pages() 47 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); in __i915_gem_object_set_pages() 55 * 64K or 4K pages, although in practice this will depend on a number of in __i915_gem_object_set_pages() 116 /* Ensure that the associated pages are gathered from the backing storage 119 * i915_gem_object_unpin_pages() - once the pages are no longer referenced [all …]
|
H A D | i915_gem_phys.c | 140 struct sg_table *pages) in i915_gem_object_put_pages_phys() argument 142 dma_addr_t dma = sg_dma_address(pages->sgl); in i915_gem_object_put_pages_phys() 144 void *vaddr = sg_page(pages->sgl); in i915_gem_object_put_pages_phys() 146 struct drm_dmamem *dmah = (void *)sg_page(pages->sgl); in i915_gem_object_put_pages_phys() 151 __i915_gem_object_release_shmem(obj, pages, false); in i915_gem_object_put_pages_phys() 197 sg_free_table(pages); in i915_gem_object_put_pages_phys() 198 kfree(pages); in i915_gem_object_put_pages_phys() 213 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in i915_gem_object_pwrite_phys() 215 struct drm_dmamem *dmah = (void *)sg_page(obj->mm.pages->sgl); in i915_gem_object_pwrite_phys() 249 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in i915_gem_object_pread_phys() [all …]
|
H A D | i915_gem_object_types.h | 48 * of pages before to binding them into the GTT, and put_pages() is 50 * associated cost with migrating pages between the backing storage 52 * onto the pages after they are no longer referenced by the GPU 54 * pages to a different memory domain within the GTT). put_pages() 57 * reap pages for the shrinker). 61 struct sg_table *pages); 72 * backing pages, if supported. 127 * pages. 129 * These translate to some special GTT PTE bits when binding pages into some 130 * address space. It also determines whether an object, or rather its pages are [all …]
|
H A D | i915_gem_shrinker.c | 32 * We can only return physical pages to the system if we can either in can_release_pages() 79 * @target: amount of memory to make available, in pages 80 * @nr_scanned: optional output for number of pages scanned (incremental) 84 * up to @target pages of main memory backing storage from buffer objects. 89 * free system memory - the pages might still be in-used to due to other reasons 99 * The number of pages of backing storage actually released. 143 * so can not be simply unbound to retire and unpin their pages. To in i915_gem_shrink() 187 * to be able to shrink their pages, so they remain on in i915_gem_shrink() 268 * The number of pages of backing storage actually released. 298 * available GEM objects worth of pages. That is we don't want in i915_gem_shrinker_count() [all …]
|
/openbsd-src/sys/dev/pci/drm/radeon/ |
H A D | radeon_gart.c | 43 * in the GPU's address space. System pages can be mapped into 44 * the aperture and look like contiguous pages from the GPU's 45 * perspective. A page table maps the pages in the aperture 46 * to the actual backing pages in system memory. 278 * radeon_gart_unbind - unbind pages from the gart page table 282 * @pages: number of pages to unbind 284 * Unbinds the requested pages from the gart page table and 288 int pages) in radeon_gart_unbind() argument 299 for (i = 0; i < pages; i++, p++) { in radeon_gart_unbind() 300 if (rdev->gart.pages[p]) { in radeon_gart_unbind() [all …]
|
/openbsd-src/usr.bin/mandoc/ |
H A D | dba.c | 37 struct dba_array *pages; member 63 dba->pages = dba_array_new(npages, DBA_GROW); in dba_new() 85 dba_array_free(entry->pages); in dba_free() 93 dba_array_undel(dba->pages); in dba_free() 94 dba_array_FOREACH(dba->pages, page) { in dba_free() 102 dba_array_free(dba->pages); in dba_free() 111 * - The pages table. 126 dba_pages_write(dba->pages); in dba_write() 144 /*** functions for handling pages *************************************/ 147 * Create a new page and append it to the pages table. [all …]
|
H A D | dbm.c | 36 int32_t pages; member 58 static struct page *pages; variable 72 * Map the pages and macros[] arrays. 85 warnx("dbm_open(%s): Invalid number of pages: %d", in dbm_open() 89 pages = (struct page *)dbm_getint(5); in dbm_open() 124 /*** functions for handling pages *************************************/ 142 res.name = dbm_get(pages[ip].name); in dbm_page_get() 145 res.sect = dbm_get(pages[ip].sect); in dbm_page_get() 148 res.arch = pages[ip].arch ? dbm_get(pages[ip].arch) : NULL; in dbm_page_get() 149 res.desc = dbm_get(pages[ip].desc); in dbm_page_get() [all …]
|
H A D | man.1 | 39 .Nd display manual pages 56 Pages may be selected according to 78 Display all matching manual pages. 109 in manual page names and displays the header lines from all matching pages. 112 Display only the SYNOPSIS lines of the requested manual pages. 125 By default, it displays the header lines of all matching pages. 143 Override the list of directories to search for manual pages. 155 Augment the list of directories to search for manual pages. 169 Only show pages for the specified 175 By default manual pages fo [all...] |
/openbsd-src/sys/dev/pci/drm/amd/amdgpu/ |
H A D | amdgpu_gart.c | 44 * in the GPU's address space. System pages can be mapped into 45 * the aperture and look like contiguous pages from the GPU's 46 * perspective. A page table maps the pages in the aperture 47 * to the actual backing pages in system memory. 72 * when pages are taken out of the GART 138 * then set_memory_wc() could be used as a workaround to mark the pages in amdgpu_gart_table_ram_alloc() 285 * amdgpu_gart_unbind - unbind pages from the gart page table 289 * @pages: number of pages to unbind 291 * Unbinds the requested pages fro 295 amdgpu_gart_unbind(struct amdgpu_device * adev,uint64_t offset,int pages) amdgpu_gart_unbind() argument 346 amdgpu_gart_map(struct amdgpu_device * adev,uint64_t offset,int pages,dma_addr_t * dma_addr,uint64_t flags,void * dst) amdgpu_gart_map() argument 382 amdgpu_gart_bind(struct amdgpu_device * adev,uint64_t offset,int pages,dma_addr_t * dma_addr,uint64_t flags) amdgpu_gart_bind() argument [all...] |
/openbsd-src/lib/libc/sys/ |
H A D | mlock.2 | 39 .Nd lock (unlock) physical pages in memory 50 locks into memory the physical pages associated with the virtual address 58 call unlocks pages previously locked by one or more 72 call, the indicated pages will cause neither a non-resident page 76 The physical pages remain in memory until all locked mappings for the pages 78 Multiple processes may have the same physical pages locked via their own 80 A single process may likewise have pages multiply locked via different virtual 81 mappings of the same pages or via nested 98 .Dq wired pages 151 calls to actually unlock the pages, i.e., [all …]
|
/openbsd-src/sys/dev/pci/drm/ttm/ |
H A D | ttm_pool.c | 26 /* Pooling of allocated pages is necessary because changing the caching 68 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); 85 /* Allocate pages of size 1 << order with the given gfp_flags */ 95 * Mapping pages directly into an userspace process and calling in ttm_pool_alloc_page() 139 /* Reset the caching and pages of size 1 << order */ 254 /* Apply a new caching to an array of pages */ 278 /* Map pages of 1 << order size and fill the DMA address array */ 305 /* Unmap pages of 1 << order size */ 344 /* Give pages into a specific pool_type */ 367 /* Take pages from a specific pool_type, return NULL when nothing available */ [all …]
|
H A D | ttm_tt.c | 49 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages"); 54 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages"); 90 * mapped TT pages need to be decrypted or otherwise the drivers in ttm_tt_create() 109 * Allocates storage for pointers to the pages that back the ttm. 113 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); in ttm_tt_alloc_page_directory() 114 if (!ttm->pages) in ttm_tt_alloc_page_directory() 125 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + in ttm_dma_tt_alloc_page_directory() 127 if (!ttm->pages) in ttm_dma_tt_alloc_page_directory() 130 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); in ttm_dma_tt_alloc_page_directory() 194 if (ttm->pages) in ttm_tt_fini() [all …]
|
/openbsd-src/sys/dev/pci/drm/ |
H A D | drm_scatter.c | 51 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup() 83 unsigned long pages, i, j; in drm_legacy_sg_alloc() local 103 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc() 104 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc() 106 entry->pages = pages; in drm_legacy_sg_alloc() 107 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc() 113 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc() 120 entry->virtual = vmalloc_32(pages << PAGE_SHIFT); in drm_legacy_sg_alloc() 128 /* This also forces the mapping of COW pages, so our page list in drm_legacy_sg_alloc() 131 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc() [all …]
|
/openbsd-src/sys/sys/ |
H A D | pool.h | 54 unsigned int pr_maxpages; /* maximum # of idle pages to keep */ 58 unsigned int pr_npages; /* # of pages allocated */ 65 unsigned long pr_npagealloc; /* # of pages allocated */ 66 unsigned long pr_npagefree; /* # of pages released */ 67 unsigned int pr_hiwat; /* max # of pages in pool */ 68 unsigned long pr_nidle; /* # of idle pages */ 115 * The pa_pagesz member encodes the sizes of pages that can be 121 * allocator can provide 16k and 32k pages you initialise pa_pagesz 124 * If the allocator can provide aligned pages the low bit in pa_pagesz 159 pr_emptypages; /* Empty pages */ [all...] |
/openbsd-src/sys/dev/pci/drm/ttm/tests/ |
H A D | ttm_pool_test.c | 165 fst_page = tt->pages[0]; in ttm_pool_alloc_basic() 166 last_page = tt->pages[tt->num_pages - 1]; in ttm_pool_alloc_basic() 252 KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages)); in ttm_pool_alloc_order_caching_match() 260 KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages)); in ttm_pool_alloc_order_caching_match() 286 KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); in ttm_pool_alloc_caching_mismatch() 287 KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages)); in ttm_pool_alloc_caching_mismatch() 295 KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); in ttm_pool_alloc_caching_mismatch() 296 KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages)); in ttm_pool_alloc_caching_mismatch() 320 KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); in ttm_pool_alloc_order_mismatch() 321 KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages)); in ttm_pool_alloc_order_mismatch() [all …]
|