1 /* $NetBSD: random.c,v 1.8 2020/08/14 00:53:16 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * /dev/random, /dev/urandom -- stateless version 34 * 35 * For short reads from /dev/urandom, up to 256 bytes, read from a 36 * per-CPU NIST Hash_DRBG instance that is reseeded as soon as the 37 * system has enough entropy. 38 * 39 * For all other reads, instantiate a fresh NIST Hash_DRBG from 40 * the global entropy pool, and draw from it. 41 * 42 * Each read is independent; there is no per-open state. 43 * Concurrent reads from the same open run in parallel. 44 * 45 * Reading from /dev/random may block until entropy is available. 46 * Either device may return short reads if interrupted. 47 */ 48 49 #include <sys/cdefs.h> 50 __KERNEL_RCSID(0, "$NetBSD: random.c,v 1.8 2020/08/14 00:53:16 riastradh Exp $"); 51 52 #include <sys/param.h> 53 #include <sys/types.h> 54 #include <sys/atomic.h> 55 #include <sys/conf.h> 56 #include <sys/cprng.h> 57 #include <sys/entropy.h> 58 #include <sys/errno.h> 59 #include <sys/event.h> 60 #include <sys/fcntl.h> 61 #include <sys/kauth.h> 62 #include <sys/kmem.h> 63 #include <sys/lwp.h> 64 #include <sys/poll.h> 65 #include <sys/random.h> 66 #include <sys/rnd.h> 67 #include <sys/rndsource.h> 68 #include <sys/signalvar.h> 69 #include <sys/systm.h> 70 71 #include "ioconf.h" 72 73 static dev_type_open(random_open); 74 static dev_type_close(random_close); 75 static dev_type_ioctl(random_ioctl); 76 static dev_type_poll(random_poll); 77 static dev_type_kqfilter(random_kqfilter); 78 static dev_type_read(random_read); 79 static dev_type_write(random_write); 80 81 const struct cdevsw rnd_cdevsw = { 82 .d_open = random_open, 83 .d_close = random_close, 84 .d_read = random_read, 85 .d_write = random_write, 86 .d_ioctl = random_ioctl, 87 .d_stop = nostop, 88 .d_tty = notty, 89 .d_poll = random_poll, 90 .d_mmap = nommap, 91 .d_kqfilter = random_kqfilter, 92 .d_discard = nodiscard, 93 .d_flag = D_OTHER|D_MPSAFE, 94 }; 95 96 #define RANDOM_BUFSIZE 512 /* XXX pulled from arse */ 97 98 /* Entropy source for writes to /dev/random and /dev/urandom */ 99 static krndsource_t user_rndsource; 100 101 void 102 rndattach(int num) 103 { 104 105 rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN, 106 RND_FLAG_COLLECT_VALUE); 107 } 108 109 static int 110 random_open(dev_t dev, int flags, int fmt, struct lwp *l) 111 { 112 113 /* Validate minor. */ 114 switch (minor(dev)) { 115 case RND_DEV_RANDOM: 116 case RND_DEV_URANDOM: 117 break; 118 default: 119 return ENXIO; 120 } 121 122 return 0; 123 } 124 125 static int 126 random_close(dev_t dev, int flags, int fmt, struct lwp *l) 127 { 128 129 /* Success! */ 130 return 0; 131 } 132 133 static int 134 random_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l) 135 { 136 137 /* 138 * No non-blocking/async options; otherwise defer to 139 * entropy_ioctl. 140 */ 141 switch (cmd) { 142 case FIONBIO: 143 case FIOASYNC: 144 return 0; 145 default: 146 return entropy_ioctl(cmd, data); 147 } 148 } 149 150 static int 151 random_poll(dev_t dev, int events, struct lwp *l) 152 { 153 154 /* /dev/random may block; /dev/urandom is always ready. */ 155 switch (minor(dev)) { 156 case RND_DEV_RANDOM: 157 return entropy_poll(events); 158 case RND_DEV_URANDOM: 159 return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM); 160 default: 161 return 0; 162 } 163 } 164 165 static int 166 random_kqfilter(dev_t dev, struct knote *kn) 167 { 168 169 /* Validate the event filter. */ 170 switch (kn->kn_filter) { 171 case EVFILT_READ: 172 case EVFILT_WRITE: 173 break; 174 default: 175 return EINVAL; 176 } 177 178 /* /dev/random may block; /dev/urandom never does. */ 179 switch (minor(dev)) { 180 case RND_DEV_RANDOM: 181 if (kn->kn_filter == EVFILT_READ) 182 return entropy_kqfilter(kn); 183 /* FALLTHROUGH */ 184 case RND_DEV_URANDOM: 185 kn->kn_fop = &seltrue_filtops; 186 return 0; 187 default: 188 return ENXIO; 189 } 190 } 191 192 /* 193 * random_read(dev, uio, flags) 194 * 195 * Generate data from a PRNG seeded from the entropy pool. 196 * 197 * - If /dev/random, block until we have full entropy, or fail 198 * with EWOULDBLOCK, and if `depleting' entropy, return at most 199 * the entropy pool's capacity at once. 200 * 201 * - If /dev/urandom, generate data from whatever is in the 202 * entropy pool now. 203 * 204 * On interrupt, return a short read, but not shorter than 256 205 * bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is 206 * 512 for hysterical raisins). 207 */ 208 static int 209 random_read(dev_t dev, struct uio *uio, int flags) 210 { 211 int gflags; 212 213 /* Set the appropriate GRND_* mode. */ 214 switch (minor(dev)) { 215 case RND_DEV_RANDOM: 216 gflags = GRND_RANDOM; 217 break; 218 case RND_DEV_URANDOM: 219 gflags = GRND_INSECURE; 220 break; 221 default: 222 return ENXIO; 223 } 224 225 /* Set GRND_NONBLOCK if the user requested FNONBLOCK. */ 226 if (flags & FNONBLOCK) 227 gflags |= GRND_NONBLOCK; 228 229 /* Defer to getrandom. */ 230 return dogetrandom(uio, gflags); 231 } 232 233 /* 234 * random_write(dev, uio, flags) 235 * 236 * Enter data from uio into the entropy pool. 237 * 238 * Assume privileged users provide full entropy, and unprivileged 239 * users provide no entropy. If you have a nonuniform source of 240 * data with n bytes of min-entropy, hash it with an XOF like 241 * SHAKE128 into exactly n bytes first. 242 */ 243 static int 244 random_write(dev_t dev, struct uio *uio, int flags) 245 { 246 kauth_cred_t cred = kauth_cred_get(); 247 uint8_t *buf; 248 bool privileged = false, any = false; 249 int error = 0; 250 251 /* Verify user's authorization to affect the entropy pool. */ 252 error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA, 253 NULL, NULL, NULL, NULL); 254 if (error) 255 return error; 256 257 /* 258 * Check whether user is privileged. If so, assume user 259 * furnishes full-entropy data; if not, accept user's data but 260 * assume it has zero entropy when we do accounting. If you 261 * want to specify less entropy, use ioctl(RNDADDDATA). 262 */ 263 if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, 264 NULL, NULL, NULL, NULL) == 0) 265 privileged = true; 266 267 /* Get a buffer for transfers. */ 268 buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP); 269 270 /* Consume data. */ 271 while (uio->uio_resid) { 272 size_t n = MIN(uio->uio_resid, RANDOM_BUFSIZE); 273 274 /* Transfer n bytes in and enter them into the pool. */ 275 error = uiomove(buf, n, uio); 276 if (error) 277 break; 278 rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); 279 any = true; 280 281 /* Yield if requested. */ 282 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 283 preempt(); 284 285 /* Check for interruption. */ 286 if (__predict_false(curlwp->l_flag & LW_PENDSIG) && 287 sigispending(curlwp, 0)) { 288 error = EINTR; 289 break; 290 } 291 } 292 293 /* Zero the buffer and free it. */ 294 explicit_memset(buf, 0, RANDOM_BUFSIZE); 295 kmem_free(buf, RANDOM_BUFSIZE); 296 297 /* If we added anything, consolidate entropy now. */ 298 if (any) 299 entropy_consolidate(); 300 301 return error; 302 } 303