Lines Matching defs:page

86 linux_page_address(struct page *page)
89 if (page->object != kernel_object) {
91 ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) :
95 IDX_TO_OFF(page->pindex)));
98 struct page *
101 struct page *page;
110 page = vm_page_alloc_noobj(req);
111 if (page == NULL)
117 page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
119 if (page == NULL) {
140 page = virt_to_page((void *)vaddr);
142 KASSERT(vaddr == (vm_offset_t)page_address(page),
146 return (page);
158 linux_free_pages(struct page *page, unsigned int order)
165 vm_page_t pgo = page + x;
173 vaddr = (vm_offset_t)page_address(page);
198 ("%s: addr %p is not page aligned", __func__, (void *)addr));
203 vm_page_t page;
205 page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr));
206 linux_free_pages(page, order);
212 int write, struct page **pages)
226 struct page **pages)
253 * Explicitly dirty the physical page. Otherwise, the
270 struct page **pages, struct vm_area_struct **vmas)
281 unsigned int gup_flags, struct page **pages)
302 vm_page_t page;
312 page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT);
313 if (page == NULL) {
314 page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
315 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL))
317 if (page->object != NULL) {
318 tmp_obj = page->object;
319 vm_page_xunbusy(page);
322 if (page->object == tmp_obj &&
323 vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
324 KASSERT(page->object == tmp_obj,
325 ("page has changed identity"));
326 KASSERT((page->oflags & VPO_UNMANAGED) == 0,
327 ("page does not belong to shmem"));
328 vm_pager_page_unswapped(page);
329 if (pmap_page_is_mapped(page)) {
330 vm_page_xunbusy(page);
332 printf("%s: page rename failed: page "
337 vm_page_remove(page);
343 if (vm_page_insert(page, vm_obj, pindex)) {
344 vm_page_xunbusy(page);
347 vm_page_valid(page);
349 pmap_page_set_memattr(page, pgprot2cachemode(prot));
496 * We only support up-to 1 single page as fragment size and we will
497 * always return a full page. This may be wasteful on small objects
498 * but the only known consumer (mt76) is either asking for a half-page
499 * or a full page. If this was to become a problem we can implement
528 vm_page_t page;
530 page = virt_to_page(addr);
531 linux_free_pages(page, 0);
535 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
538 linux_free_pages(page, 0);