1 /* $NetBSD: mm.h,v 1.24 2021/12/19 12:21:30 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _LINUX_MM_H_ 33 #define _LINUX_MM_H_ 34 35 #include <uvm/uvm_extern.h> 36 #include <uvm/uvm_object.h> 37 38 #include <asm/page.h> 39 40 #include <linux/pfn.h> 41 #include <linux/shrinker.h> 42 #include <linux/slab.h> 43 #include <linux/sizes.h> 44 45 struct file; 46 47 /* XXX Ugh bletch! Whattakludge! Linux's sense is reversed... */ 48 #undef PAGE_MASK 49 #define PAGE_MASK (~(PAGE_SIZE-1)) 50 51 #define PAGE_ALIGN(x) (((x) + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1)) 52 #define offset_in_page(x) ((uintptr_t)(x) & (PAGE_SIZE-1)) 53 54 #define untagged_addr(x) (x) 55 56 struct sysinfo { 57 unsigned long totalram; 58 unsigned long totalhigh; 59 uint32_t mem_unit; 60 }; 61 62 static inline void 63 si_meminfo(struct sysinfo *si) 64 { 65 66 si->totalram = uvmexp.npages; 67 si->totalhigh = kernel_map->size >> PAGE_SHIFT; 68 si->mem_unit = PAGE_SIZE; 69 /* XXX Fill in more as needed. */ 70 } 71 72 static inline size_t 73 si_mem_available(void) 74 { 75 76 /* XXX ? */ 77 return uvmexp.free; 78 } 79 80 static inline unsigned long 81 vm_mmap(struct file *file __unused, unsigned long base __unused, 82 unsigned long size __unused, unsigned long prot __unused, 83 unsigned long flags __unused, unsigned long token __unused) 84 { 85 86 return -ENODEV; 87 } 88 89 static inline unsigned long 90 totalram_pages(void) 91 { 92 93 return uvmexp.npages; 94 } 95 96 static inline unsigned long 97 get_num_physpages(void) 98 { 99 100 return uvmexp.npages; 101 } 102 103 static inline void * 104 kvmalloc(size_t size, gfp_t gfp) 105 { 106 107 return kmalloc(size, gfp); 108 } 109 110 static inline void * 111 kvzalloc(size_t size, gfp_t gfp) 112 { 113 114 return kmalloc(size, gfp | __GFP_ZERO); 115 } 116 117 static inline void * 118 kvcalloc(size_t nelem, size_t elemsize, gfp_t gfp) 119 { 120 121 KASSERT(elemsize > 0); 122 if (SIZE_MAX/elemsize < nelem) 123 return NULL; 124 return kvzalloc(nelem * elemsize, gfp); 125 } 126 127 static inline void * 128 kvmalloc_array(size_t nelem, size_t elemsize, gfp_t gfp) 129 { 130 131 KASSERT(elemsize != 0); 132 if (nelem > SIZE_MAX/elemsize) 133 return NULL; 134 return kmalloc(nelem * elemsize, gfp); 135 } 136 137 /* 138 * XXX kvfree must additionally work on kmalloc (linux/slab.h) and 139 * vmalloc (linux/vmalloc.h). If you change either of those, be sure 140 * to change this too. 141 */ 142 143 static inline void 144 kvfree(void *ptr) 145 { 146 kfree(ptr); 147 } 148 149 static inline void 150 set_page_dirty(struct page *page) 151 { 152 struct vm_page *pg = &page->p_vmp; 153 154 /* XXX */ 155 if (pg->uobject != NULL) { 156 rw_enter(pg->uobject->vmobjlock, RW_WRITER); 157 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 158 rw_exit(pg->uobject->vmobjlock); 159 } else { 160 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 161 } 162 } 163 164 #endif /* _LINUX_MM_H_ */ 165