1 /* $NetBSD: tmpfs_mem.c,v 1.14 2023/04/29 06:29:55 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2010, 2011, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Mindaugas Rasiukevicius. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * tmpfs memory allocation routines. 34 * Implements memory usage accounting and limiting. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: tmpfs_mem.c,v 1.14 2023/04/29 06:29:55 riastradh Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/atomic.h> 42 #include <sys/kmem.h> 43 #include <sys/namei.h> 44 #include <sys/pool.h> 45 46 #include <fs/tmpfs/tmpfs.h> 47 48 extern struct pool tmpfs_dirent_pool; 49 extern struct pool tmpfs_node_pool; 50 51 void 52 tmpfs_mntmem_init(struct tmpfs_mount *mp, uint64_t memlimit) 53 { 54 55 mutex_init(&mp->tm_acc_lock, MUTEX_DEFAULT, IPL_NONE); 56 mp->tm_mem_limit = memlimit; 57 mp->tm_bytes_used = 0; 58 } 59 60 void 61 tmpfs_mntmem_destroy(struct tmpfs_mount *mp) 62 { 63 64 KASSERT(mp->tm_bytes_used == 0); 65 mutex_destroy(&mp->tm_acc_lock); 66 } 67 68 int 69 tmpfs_mntmem_set(struct tmpfs_mount *mp, uint64_t memlimit) 70 { 71 int error; 72 73 mutex_enter(&mp->tm_acc_lock); 74 if (round_page(mp->tm_bytes_used) >= memlimit) 75 error = EBUSY; 76 else { 77 error = 0; 78 mp->tm_mem_limit = memlimit; 79 } 80 mutex_exit(&mp->tm_acc_lock); 81 return error; 82 } 83 84 /* 85 * tmpfs_mem_info: return the number of available memory pages. 86 * 87 * => If 'total' is true, then return _total_ amount of pages. 88 * => If false, then return the amount of _free_ memory pages. 89 * 90 * Remember to remove uvmexp.freetarg from the returned value to avoid 91 * excessive memory usage. 92 */ 93 size_t 94 tmpfs_mem_info(bool total) 95 { 96 size_t size = 0; 97 98 size += uvmexp.swpgavail; 99 if (!total) { 100 size -= uvmexp.swpgonly; 101 } 102 size += uvm_availmem(true); 103 size += uvmexp.filepages; 104 if (size > uvmexp.wired) { 105 size -= uvmexp.wired; 106 } else { 107 size = 0; 108 } 109 return size; 110 } 111 112 uint64_t 113 tmpfs_bytes_max(struct tmpfs_mount *mp) 114 { 115 psize_t freepages = tmpfs_mem_info(false); 116 int freetarg = uvmexp.freetarg; // XXX unlocked 117 uint64_t avail_mem; 118 119 if (freepages < freetarg) { 120 freepages = 0; 121 } else { 122 freepages -= freetarg; 123 } 124 avail_mem = round_page(mp->tm_bytes_used) + (freepages << PAGE_SHIFT); 125 return MIN(mp->tm_mem_limit, avail_mem); 126 } 127 128 size_t 129 tmpfs_pages_avail(struct tmpfs_mount *mp) 130 { 131 132 return (tmpfs_bytes_max(mp) - mp->tm_bytes_used) >> PAGE_SHIFT; 133 } 134 135 bool 136 tmpfs_mem_incr(struct tmpfs_mount *mp, size_t sz) 137 { 138 uint64_t lim; 139 140 mutex_enter(&mp->tm_acc_lock); 141 lim = tmpfs_bytes_max(mp); 142 if (mp->tm_bytes_used + sz >= lim) { 143 mutex_exit(&mp->tm_acc_lock); 144 return false; 145 } 146 mp->tm_bytes_used += sz; 147 mutex_exit(&mp->tm_acc_lock); 148 return true; 149 } 150 151 void 152 tmpfs_mem_decr(struct tmpfs_mount *mp, size_t sz) 153 { 154 155 mutex_enter(&mp->tm_acc_lock); 156 KASSERT(mp->tm_bytes_used >= sz); 157 mp->tm_bytes_used -= sz; 158 mutex_exit(&mp->tm_acc_lock); 159 } 160 161 struct tmpfs_dirent * 162 tmpfs_dirent_get(struct tmpfs_mount *mp) 163 { 164 165 if (!tmpfs_mem_incr(mp, sizeof(struct tmpfs_dirent))) { 166 return NULL; 167 } 168 return pool_get(&tmpfs_dirent_pool, PR_WAITOK); 169 } 170 171 void 172 tmpfs_dirent_put(struct tmpfs_mount *mp, struct tmpfs_dirent *de) 173 { 174 175 tmpfs_mem_decr(mp, sizeof(struct tmpfs_dirent)); 176 pool_put(&tmpfs_dirent_pool, de); 177 } 178 179 struct tmpfs_node * 180 tmpfs_node_get(struct tmpfs_mount *mp) 181 { 182 183 if (atomic_inc_uint_nv(&mp->tm_nodes_cnt) >= mp->tm_nodes_max) { 184 atomic_dec_uint(&mp->tm_nodes_cnt); 185 return NULL; 186 } 187 if (!tmpfs_mem_incr(mp, sizeof(struct tmpfs_node))) { 188 atomic_dec_uint(&mp->tm_nodes_cnt); 189 return NULL; 190 } 191 return pool_get(&tmpfs_node_pool, PR_WAITOK); 192 } 193 194 void 195 tmpfs_node_put(struct tmpfs_mount *mp, struct tmpfs_node *tn) 196 { 197 198 atomic_dec_uint(&mp->tm_nodes_cnt); 199 tmpfs_mem_decr(mp, sizeof(struct tmpfs_node)); 200 pool_put(&tmpfs_node_pool, tn); 201 } 202 203 /* 204 * Quantum size to round-up the tmpfs names in order to reduce re-allocations. 205 */ 206 207 #define TMPFS_NAME_QUANTUM (32) 208 209 char * 210 tmpfs_strname_alloc(struct tmpfs_mount *mp, size_t len) 211 { 212 const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM); 213 214 KASSERT(sz > 0 && sz <= 1024); 215 if (!tmpfs_mem_incr(mp, sz)) { 216 return NULL; 217 } 218 return kmem_alloc(sz, KM_SLEEP); 219 } 220 221 void 222 tmpfs_strname_free(struct tmpfs_mount *mp, char *str, size_t len) 223 { 224 const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM); 225 226 KASSERT(sz > 0 && sz <= 1024); 227 tmpfs_mem_decr(mp, sz); 228 kmem_free(str, sz); 229 } 230 231 bool 232 tmpfs_strname_neqlen(struct componentname *fcnp, struct componentname *tcnp) 233 { 234 const size_t fln = fcnp->cn_namelen; 235 const size_t tln = tcnp->cn_namelen; 236 237 return (fln != tln) || memcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fln); 238 } 239