1 /* $NetBSD: drm_cdevsw.c,v 1.31 2024/04/21 03:02:39 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: drm_cdevsw.c,v 1.31 2024/04/21 03:02:39 riastradh Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/types.h> 37 #include <sys/conf.h> 38 #include <sys/device.h> 39 #include <sys/file.h> 40 #include <sys/filedesc.h> 41 #include <sys/ioccom.h> 42 #include <sys/kauth.h> 43 #ifndef _MODULE 44 /* XXX Mega-kludge because modules are broken. */ 45 #include <sys/once.h> 46 #endif 47 #include <sys/pmf.h> 48 #include <sys/poll.h> 49 #ifndef _MODULE 50 #include <sys/reboot.h> /* XXX drm_init kludge */ 51 #endif 52 #include <sys/select.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #include <linux/err.h> 57 58 #include <linux/pm.h> 59 60 #include <drm/drm_agpsupport.h> 61 #include <drm/drm_device.h> 62 #include <drm/drm_drv.h> 63 #include <drm/drm_file.h> 64 #include <drm/drm_irq.h> 65 #include <drm/drm_legacy.h> 66 67 #include "../dist/drm/drm_internal.h" 68 #include "../dist/drm/drm_legacy.h" 69 70 static dev_type_open(drm_open); 71 72 static int drm_close(struct file *); 73 static int drm_read(struct file *, off_t *, struct uio *, kauth_cred_t, 74 int); 75 static int drm_dequeue_event(struct drm_file *, size_t, 76 struct drm_pending_event **, int); 77 static int drm_ioctl_shim(struct file *, unsigned long, void *); 78 static int drm_poll(struct file *, int); 79 static int drm_kqfilter(struct file *, struct knote *); 80 static int drm_stat(struct file *, struct stat *); 81 static int drm_fop_mmap(struct file *, off_t *, size_t, int, int *, int *, 82 struct uvm_object **, int *); 83 static void drm_requeue_event(struct drm_file *, struct drm_pending_event *); 84 85 static paddr_t drm_legacy_mmap(dev_t, off_t, int); 86 87 const struct cdevsw drm_cdevsw = { 88 .d_open = drm_open, 89 .d_close = noclose, 90 .d_read = noread, 91 .d_write = nowrite, 92 .d_ioctl = noioctl, 93 .d_stop = nostop, 94 .d_tty = notty, 95 .d_poll = nopoll, 96 .d_mmap = drm_legacy_mmap, 97 .d_kqfilter = nokqfilter, 98 .d_discard = nodiscard, 99 /* XXX was D_TTY | D_NEGOFFSAFE */ 100 /* XXX Add D_MPSAFE some day... */ 101 .d_flag = D_NEGOFFSAFE, 102 }; 103 104 const struct fileops drm_fileops = { 105 .fo_name = "drm", 106 .fo_read = drm_read, 107 .fo_write = fbadop_write, 108 .fo_ioctl = drm_ioctl_shim, 109 .fo_fcntl = fnullop_fcntl, 110 .fo_poll = drm_poll, 111 .fo_stat = drm_stat, 112 .fo_close = drm_close, 113 .fo_kqfilter = drm_kqfilter, 114 .fo_restart = fnullop_restart, 115 .fo_mmap = drm_fop_mmap, 116 }; 117 118 static int 119 drm_open(dev_t d, int flags, int fmt, struct lwp *l) 120 { 121 struct drm_minor *dminor; 122 struct drm_device *dev; 123 bool lastclose; 124 int fd; 125 struct file *fp; 126 struct drm_file *priv; 127 int need_setup = 0; 128 int error; 129 130 error = drm_guarantee_initialized(); 131 if (error) 132 goto fail0; 133 134 /* Synchronize with drm_file.c, drm_open and drm_open_helper. */ 135 136 if (flags & O_EXCL) { 137 error = EBUSY; 138 goto fail0; 139 } 140 141 dminor = drm_minor_acquire(minor(d)); 142 if (IS_ERR(dminor)) { 143 /* XXX errno Linux->NetBSD */ 144 error = -PTR_ERR(dminor); 145 goto fail0; 146 } 147 dev = dminor->dev; 148 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) { 149 error = EINVAL; 150 goto fail1; 151 } 152 153 mutex_lock(&drm_global_mutex); 154 if (dev->open_count == INT_MAX) { 155 mutex_unlock(&drm_global_mutex); 156 error = EBUSY; 157 goto fail1; 158 } 159 if (dev->open_count++ == 0) 160 need_setup = 1; 161 mutex_unlock(&drm_global_mutex); 162 163 error = fd_allocfile(&fp, &fd); 164 if (error) 165 goto fail2; 166 167 priv = drm_file_alloc(dminor); 168 if (IS_ERR(priv)) { 169 /* XXX errno Linux->NetBSD */ 170 error = -PTR_ERR(priv); 171 goto fail3; 172 } 173 174 if (drm_is_primary_client(priv)) { 175 /* XXX errno Linux->NetBSD */ 176 error = -drm_master_open(priv); 177 if (error) 178 goto fail4; 179 } 180 priv->filp = fp; 181 182 mutex_lock(&dev->filelist_mutex); 183 list_add(&priv->lhead, &dev->filelist); 184 mutex_unlock(&dev->filelist_mutex); 185 /* XXX Alpha hose? */ 186 187 if (need_setup) { 188 /* XXX errno Linux->NetBSD */ 189 error = -drm_legacy_setup(dev); 190 if (error) 191 goto fail5; 192 } 193 194 error = fd_clone(fp, fd, flags, &drm_fileops, priv); 195 KASSERT(error == EMOVEFD); /* XXX */ 196 197 /* Success! (But error has to be EMOVEFD, not 0.) */ 198 return error; 199 200 fail5: mutex_lock(&dev->filelist_mutex); 201 list_del(&priv->lhead); 202 mutex_unlock(&dev->filelist_mutex); 203 fail4: drm_file_free(priv); 204 fail3: fd_abort(curproc, fp, fd); 205 fail2: mutex_lock(&drm_global_mutex); 206 KASSERT(0 < dev->open_count); 207 --dev->open_count; 208 lastclose = (dev->open_count == 0); 209 mutex_unlock(&drm_global_mutex); 210 if (lastclose) 211 drm_lastclose(dev); 212 fail1: drm_minor_release(dminor); 213 fail0: KASSERT(error); 214 if (error == ERESTARTSYS) 215 error = ERESTART; 216 return error; 217 } 218 219 static int 220 drm_close(struct file *fp) 221 { 222 struct drm_file *const priv = fp->f_data; 223 struct drm_minor *const dminor = priv->minor; 224 struct drm_device *const dev = dminor->dev; 225 bool lastclose; 226 227 /* Synchronize with drm_file.c, drm_release. */ 228 229 mutex_lock(&dev->filelist_mutex); 230 list_del(&priv->lhead); 231 mutex_unlock(&dev->filelist_mutex); 232 233 drm_file_free(priv); 234 235 mutex_lock(&drm_global_mutex); 236 KASSERT(0 < dev->open_count); 237 --dev->open_count; 238 lastclose = (dev->open_count == 0); 239 mutex_unlock(&drm_global_mutex); 240 241 if (lastclose) 242 drm_lastclose(dev); 243 244 drm_minor_release(dminor); 245 246 return 0; 247 } 248 249 static int 250 drm_read(struct file *fp, off_t *off, struct uio *uio, kauth_cred_t cred, 251 int flags) 252 { 253 struct drm_file *const file = fp->f_data; 254 struct drm_device *const dev = file->minor->dev; 255 struct drm_pending_event *event; 256 bool first; 257 int ret = 0; 258 259 /* 260 * Only one event reader at a time, so that if copyout faults 261 * after dequeueing one event and we have to put the event 262 * back, another reader won't see out-of-order events. 263 */ 264 spin_lock(&dev->event_lock); 265 DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &file->event_read_wq, &dev->event_lock, 266 file->event_read_lock == NULL); 267 if (ret) { 268 spin_unlock(&dev->event_lock); 269 /* XXX errno Linux->NetBSD */ 270 return -ret; 271 } 272 file->event_read_lock = curlwp; 273 spin_unlock(&dev->event_lock); 274 275 for (first = true; ; first = false) { 276 int f = 0; 277 off_t offset; 278 size_t resid; 279 280 if (!first || ISSET(fp->f_flag, FNONBLOCK)) 281 f |= FNONBLOCK; 282 283 ret = drm_dequeue_event(file, uio->uio_resid, &event, f); 284 if (ret) { 285 if ((ret == -EWOULDBLOCK) && !first) 286 ret = 0; 287 break; 288 } 289 if (event == NULL) 290 break; 291 292 offset = uio->uio_offset; 293 resid = uio->uio_resid; 294 /* XXX errno NetBSD->Linux */ 295 ret = -uiomove(event->event, event->event->length, uio); 296 if (ret) { 297 /* 298 * Faulted on copyout. Put the event back and 299 * stop here. 300 */ 301 if (!first) { 302 /* 303 * Already transferred some events. 304 * Rather than back them all out, just 305 * say we succeeded at returning those. 306 */ 307 ret = 0; 308 } 309 uio->uio_offset = offset; 310 uio->uio_resid = resid; 311 drm_requeue_event(file, event); 312 break; 313 } 314 kfree(event); 315 } 316 317 /* Release the event read lock. */ 318 spin_lock(&dev->event_lock); 319 KASSERT(file->event_read_lock == curlwp); 320 file->event_read_lock = NULL; 321 DRM_SPIN_WAKEUP_ONE(&file->event_read_wq, &dev->event_lock); 322 spin_unlock(&dev->event_lock); 323 324 /* XXX errno Linux->NetBSD */ 325 326 /* Success! */ 327 if (ret == ERESTARTSYS) 328 ret = ERESTART; 329 return -ret; 330 } 331 332 static int 333 drm_dequeue_event(struct drm_file *file, size_t max_length, 334 struct drm_pending_event **eventp, int flags) 335 { 336 struct drm_device *const dev = file->minor->dev; 337 struct drm_pending_event *event = NULL; 338 unsigned long irqflags; 339 int ret = 0; 340 341 spin_lock_irqsave(&dev->event_lock, irqflags); 342 343 if (ISSET(flags, FNONBLOCK)) { 344 if (list_empty(&file->event_list)) 345 ret = -EWOULDBLOCK; 346 } else { 347 DRM_SPIN_WAIT_UNTIL(ret, &file->event_wait, &dev->event_lock, 348 !list_empty(&file->event_list)); 349 } 350 if (ret) 351 goto out; 352 353 event = list_first_entry(&file->event_list, struct drm_pending_event, 354 link); 355 if (event->event->length > max_length) { 356 /* Event is too large, can't return it. */ 357 event = NULL; 358 ret = 0; 359 goto out; 360 } 361 362 file->event_space += event->event->length; 363 list_del(&event->link); 364 365 out: spin_unlock_irqrestore(&dev->event_lock, irqflags); 366 *eventp = event; 367 return ret; 368 } 369 370 static void 371 drm_requeue_event(struct drm_file *file, struct drm_pending_event *event) 372 { 373 struct drm_device *const dev = file->minor->dev; 374 unsigned long irqflags; 375 376 spin_lock_irqsave(&dev->event_lock, irqflags); 377 list_add(&event->link, &file->event_list); 378 KASSERT(file->event_space >= event->event->length); 379 file->event_space -= event->event->length; 380 spin_unlock_irqrestore(&dev->event_lock, irqflags); 381 } 382 383 static int 384 drm_ioctl_shim(struct file *fp, unsigned long cmd, void *data) 385 { 386 struct drm_file *file = fp->f_data; 387 struct drm_driver *driver = file->minor->dev->driver; 388 int error; 389 390 if (driver->ioctl_override) 391 error = driver->ioctl_override(fp, cmd, data); 392 else 393 error = drm_ioctl(fp, cmd, data); 394 if (error == ERESTARTSYS) 395 error = ERESTART; 396 397 return error; 398 } 399 400 static int 401 drm_poll(struct file *fp, int events) 402 { 403 struct drm_file *const file = fp->f_data; 404 struct drm_device *const dev = file->minor->dev; 405 int revents = 0; 406 unsigned long irqflags; 407 408 if (!ISSET(events, (POLLIN | POLLRDNORM))) 409 return 0; 410 411 spin_lock_irqsave(&dev->event_lock, irqflags); 412 if (list_empty(&file->event_list)) 413 selrecord(curlwp, &file->event_selq); 414 else 415 revents |= (events & (POLLIN | POLLRDNORM)); 416 spin_unlock_irqrestore(&dev->event_lock, irqflags); 417 418 return revents; 419 } 420 421 static void filt_drm_detach(struct knote *); 422 static int filt_drm_event(struct knote *, long); 423 424 static const struct filterops drm_filtops = { 425 .f_flags = FILTEROP_ISFD, 426 .f_attach = NULL, 427 .f_detach = filt_drm_detach, 428 .f_event = filt_drm_event, 429 }; 430 431 static int 432 drm_kqfilter(struct file *fp, struct knote *kn) 433 { 434 struct drm_file *const file = fp->f_data; 435 struct drm_device *const dev = file->minor->dev; 436 unsigned long irqflags; 437 438 switch (kn->kn_filter) { 439 case EVFILT_READ: 440 kn->kn_fop = &drm_filtops; 441 kn->kn_hook = file; 442 spin_lock_irqsave(&dev->event_lock, irqflags); 443 selrecord_knote(&file->event_selq, kn); 444 spin_unlock_irqrestore(&dev->event_lock, irqflags); 445 return 0; 446 case EVFILT_WRITE: 447 default: 448 return EINVAL; 449 } 450 } 451 452 static void 453 filt_drm_detach(struct knote *kn) 454 { 455 struct drm_file *const file = kn->kn_hook; 456 struct drm_device *const dev = file->minor->dev; 457 unsigned long irqflags; 458 459 spin_lock_irqsave(&dev->event_lock, irqflags); 460 selremove_knote(&file->event_selq, kn); 461 spin_unlock_irqrestore(&dev->event_lock, irqflags); 462 } 463 464 static int 465 filt_drm_event(struct knote *kn, long hint) 466 { 467 struct drm_file *const file = kn->kn_hook; 468 struct drm_device *const dev = file->minor->dev; 469 unsigned long irqflags; 470 int ret; 471 472 if (hint == NOTE_SUBMIT) 473 KASSERT(spin_is_locked(&dev->event_lock)); 474 else 475 spin_lock_irqsave(&dev->event_lock, irqflags); 476 if (list_empty(&file->event_list)) { 477 ret = 0; 478 } else { 479 struct drm_pending_event *const event = 480 list_first_entry(&file->event_list, 481 struct drm_pending_event, link); 482 kn->kn_data = event->event->length; 483 ret = 1; 484 } 485 if (hint == NOTE_SUBMIT) 486 KASSERT(spin_is_locked(&dev->event_lock)); 487 else 488 spin_unlock_irqrestore(&dev->event_lock, irqflags); 489 490 return ret; 491 } 492 493 static int 494 drm_stat(struct file *fp, struct stat *st) 495 { 496 struct drm_file *const file = fp->f_data; 497 struct drm_minor *const dminor = file->minor; 498 const dev_t devno = makedev(cdevsw_lookup_major(&drm_cdevsw), 499 dminor->index); 500 501 (void)memset(st, 0, sizeof(*st)); 502 503 st->st_dev = devno; 504 st->st_ino = 0; /* XXX (dev,ino) uniqueness bleh */ 505 st->st_uid = kauth_cred_geteuid(fp->f_cred); 506 st->st_gid = kauth_cred_getegid(fp->f_cred); 507 st->st_mode = S_IFCHR; /* XXX what? */ 508 st->st_rdev = devno; 509 /* XXX what else? */ 510 511 return 0; 512 } 513 514 static int 515 drm_fop_mmap(struct file *fp, off_t *offp, size_t len, int prot, int *flagsp, 516 int *advicep, struct uvm_object **uobjp, int *maxprotp) 517 { 518 struct drm_file *const file = fp->f_data; 519 struct drm_device *const dev = file->minor->dev; 520 int error; 521 522 KASSERT(fp == file->filp); 523 KASSERT(len > 0); 524 525 /* XXX errno Linux->NetBSD */ 526 error = -(*dev->driver->mmap_object)(dev, *offp, len, prot, uobjp, 527 offp, file->filp); 528 *maxprotp = prot; 529 *advicep = UVM_ADV_RANDOM; 530 if (error == ERESTARTSYS) 531 error = ERESTART; 532 return error; 533 } 534 535 static paddr_t 536 drm_legacy_mmap(dev_t d, off_t offset, int prot) 537 { 538 struct drm_minor *dminor; 539 paddr_t paddr; 540 541 dminor = drm_minor_acquire(minor(d)); 542 if (IS_ERR(dminor)) 543 return (paddr_t)-1; 544 545 paddr = drm_legacy_mmap_paddr(dminor->dev, offset, prot); 546 547 drm_minor_release(dminor); 548 return paddr; 549 } 550