1 /** 2 * \file drm_drv.c 3 * Generic driver template 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com> 7 * 8 * To use this template, you must at least define the following (samples 9 * given for the MGA driver): 10 * 11 * \code 12 * #define DRIVER_AUTHOR "VA Linux Systems, Inc." 13 * 14 * #define DRIVER_NAME "mga" 15 * #define DRIVER_DESC "Matrox G200/G400" 16 * #define DRIVER_DATE "20001127" 17 * 18 * #define drm_x mga_##x 19 * \endcode 20 */ 21 22 /* 23 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com 24 * 25 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 26 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 27 * All Rights Reserved. 28 * 29 * Permission is hereby granted, free of charge, to any person obtaining a 30 * copy of this software and associated documentation files (the "Software"), 31 * to deal in the Software without restriction, including without limitation 32 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 33 * and/or sell copies of the Software, and to permit persons to whom the 34 * Software is furnished to do so, subject to the following conditions: 35 * 36 * The above copyright notice and this permission notice (including the next 37 * paragraph) shall be included in all copies or substantial portions of the 38 * Software. 39 * 40 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 41 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 42 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 43 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 44 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 45 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 46 * OTHER DEALINGS IN THE SOFTWARE. 47 */ 48 49 #include <sys/devfs.h> 50 51 #include <linux/export.h> 52 #include <drm/drmP.h> 53 #include <drm/drm_core.h> 54 55 #if DRM_DEBUG_DEFAULT_ON == 1 56 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \ 57 DRM_DEBUGBITS_FAILED_IOCTL) 58 #elif DRM_DEBUG_DEFAULT_ON == 2 59 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \ 60 DRM_DEBUGBITS_FAILED_IOCTL | DRM_DEBUGBITS_VERBOSE) 61 #else 62 #define DRM_DEBUGBITS_ON (0x0) 63 #endif 64 65 int drm_notyet_flag = 0; 66 67 static int drm_load(struct drm_device *dev); 68 drm_pci_id_list_t *drm_find_description(int vendor, int device, 69 drm_pci_id_list_t *idlist); 70 71 #define DRIVER_SOFTC(unit) \ 72 ((struct drm_device *)devclass_get_softc(drm_devclass, unit)) 73 74 static int 75 drm_modevent(module_t mod, int type, void *data) 76 { 77 78 switch (type) { 79 case MOD_LOAD: 80 TUNABLE_INT_FETCH("drm.debug", &drm_debug); 81 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag); 82 break; 83 } 84 return (0); 85 } 86 87 static moduledata_t drm_mod = { 88 "drm", 89 drm_modevent, 90 0 91 }; 92 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 93 MODULE_VERSION(drm, 1); 94 MODULE_DEPEND(drm, agp, 1, 1, 1); 95 MODULE_DEPEND(drm, pci, 1, 1, 1); 96 MODULE_DEPEND(drm, iicbus, 1, 1, 1); 97 98 #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 99 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} 100 101 /** Ioctl table */ 102 static struct drm_ioctl_desc drm_ioctls[] = { 103 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), 104 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 105 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 106 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 107 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), 108 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 109 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 110 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), 111 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 112 113 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 114 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 115 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 116 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), 117 118 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 119 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), 120 121 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 122 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), 123 124 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 126 127 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 128 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 129 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 130 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), 131 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 132 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 133 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), 134 135 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 136 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 137 138 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), 139 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), 140 141 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), 142 143 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 144 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 145 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), 146 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), 147 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), 148 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH), 149 150 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 151 152 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 153 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 154 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 155 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), 156 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 157 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 158 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 159 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 160 161 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 162 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 163 164 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 165 166 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 167 168 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 169 170 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), 171 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 172 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 173 174 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 175 176 #if 0 177 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), 178 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), 179 #endif 180 181 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 182 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 183 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 184 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 185 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 186 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 187 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), 188 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 189 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 190 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 191 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 192 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 193 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 194 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 195 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 196 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 197 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 198 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 199 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 200 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 201 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 202 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 203 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 204 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 205 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 206 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 207 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 208 }; 209 210 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 211 212 static struct dev_ops drm_cdevsw = { 213 { "drm", 0, D_TRACKCLOSE | D_MPSAFE }, 214 .d_open = drm_open, 215 .d_close = drm_close, 216 .d_read = drm_read, 217 .d_ioctl = drm_ioctl, 218 .d_kqfilter = drm_kqfilter, 219 .d_mmap = drm_mmap, 220 .d_mmap_single = drm_mmap_single, 221 }; 222 223 static int drm_msi = 0; /* Disable by default. This is because there are issues with 224 freezes using MSI and i915 225 */ 226 TUNABLE_INT("hw.drm.msi.enable", &drm_msi); 227 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); 228 SYSCTL_NODE(_hw_drm, OID_AUTO, msi, CTLFLAG_RW, NULL, "DRM device msi"); 229 SYSCTL_INT(_hw_drm_msi, OID_AUTO, enable, CTLFLAG_RD, &drm_msi, 0, 230 "Enable MSI interrupts for drm devices"); 231 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0, 232 "DRM debugging"); 233 234 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = { 235 {0x8086, 0x2772}, /* Intel i945G */ \ 236 {0x8086, 0x27A2}, /* Intel i945GM */ \ 237 {0x8086, 0x27AE}, /* Intel i945GME */ \ 238 {0, 0} 239 }; 240 241 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags) 242 { 243 int i = 0; 244 245 if (dev->driver->use_msi != NULL) { 246 int use_msi; 247 248 use_msi = dev->driver->use_msi(dev, flags); 249 250 return (!use_msi); 251 } 252 253 /* TODO: Maybe move this to a callback in i915? */ 254 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) { 255 if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) && 256 (drm_msi_blacklist[i].device == dev->pci_device)) { 257 return 1; 258 } 259 } 260 261 return 0; 262 } 263 264 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist) 265 { 266 drm_pci_id_list_t *id_entry; 267 int vendor, device; 268 269 vendor = pci_get_vendor(kdev); 270 device = pci_get_device(kdev); 271 272 if (pci_get_class(kdev) != PCIC_DISPLAY) 273 return ENXIO; 274 275 id_entry = drm_find_description(vendor, device, idlist); 276 if (id_entry != NULL) { 277 if (!device_get_desc(kdev)) { 278 DRM_DEBUG("desc : %s\n", device_get_desc(kdev)); 279 device_set_desc(kdev, id_entry->name); 280 } 281 return 0; 282 } 283 284 return ENXIO; 285 } 286 287 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) 288 { 289 struct drm_device *dev; 290 drm_pci_id_list_t *id_entry; 291 int unit, error; 292 u_int irq_flags; 293 int msi_enable; 294 295 unit = device_get_unit(kdev); 296 dev = device_get_softc(kdev); 297 298 if (!strcmp(device_get_name(kdev), "drmsub")) 299 dev->dev = device_get_parent(kdev); 300 else 301 dev->dev = kdev; 302 303 dev->pci_domain = pci_get_domain(dev->dev); 304 dev->pci_bus = pci_get_bus(dev->dev); 305 dev->pci_slot = pci_get_slot(dev->dev); 306 dev->pci_func = pci_get_function(dev->dev); 307 308 dev->pci_vendor = pci_get_vendor(dev->dev); 309 dev->pci_device = pci_get_device(dev->dev); 310 dev->pci_subvendor = pci_get_subvendor(dev->dev); 311 dev->pci_subdevice = pci_get_subdevice(dev->dev); 312 313 id_entry = drm_find_description(dev->pci_vendor, 314 dev->pci_device, idlist); 315 dev->id_entry = id_entry; 316 317 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) { 318 msi_enable = drm_msi; 319 320 if (drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) { 321 msi_enable = 0; 322 } 323 324 dev->irq_type = pci_alloc_1intr(dev->dev, msi_enable, 325 &dev->irqrid, &irq_flags); 326 327 dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ, 328 &dev->irqrid, irq_flags); 329 330 if (!dev->irqr) { 331 return (ENOENT); 332 } 333 334 dev->irq = (int) rman_get_start(dev->irqr); 335 } 336 337 lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE); 338 lwkt_serialize_init(&dev->irq_lock); 339 lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE); 340 lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE); 341 342 error = drm_load(dev); 343 if (error) 344 goto error; 345 346 error = drm_create_cdevs(kdev); 347 if (error) 348 goto error; 349 350 return (error); 351 error: 352 if (dev->irqr) { 353 bus_release_resource(dev->dev, SYS_RES_IRQ, 354 dev->irqrid, dev->irqr); 355 } 356 if (dev->irq_type == PCI_INTR_TYPE_MSI) { 357 pci_release_msi(dev->dev); 358 } 359 return (error); 360 } 361 362 int 363 drm_create_cdevs(device_t kdev) 364 { 365 struct drm_device *dev; 366 int error, unit; 367 368 unit = device_get_unit(kdev); 369 dev = device_get_softc(kdev); 370 371 dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID, 372 DRM_DEV_MODE, "dri/card%d", unit); 373 error = 0; 374 if (error == 0) 375 dev->devnode->si_drv1 = dev; 376 return (error); 377 } 378 379 #ifndef DRM_DEV_NAME 380 #define DRM_DEV_NAME "drm" 381 #endif 382 383 devclass_t drm_devclass; 384 385 drm_pci_id_list_t *drm_find_description(int vendor, int device, 386 drm_pci_id_list_t *idlist) 387 { 388 int i = 0; 389 390 for (i = 0; idlist[i].vendor != 0; i++) { 391 if ((idlist[i].vendor == vendor) && 392 ((idlist[i].device == device) || 393 (idlist[i].device == 0))) { 394 return &idlist[i]; 395 } 396 } 397 return NULL; 398 } 399 400 /** 401 * Take down the DRM device. 402 * 403 * \param dev DRM device structure. 404 * 405 * Frees every resource in \p dev. 406 * 407 * \sa drm_device 408 */ 409 int drm_lastclose(struct drm_device * dev) 410 { 411 drm_magic_entry_t *pt, *next; 412 413 DRM_DEBUG("\n"); 414 415 if (dev->driver->lastclose != NULL) 416 dev->driver->lastclose(dev); 417 418 if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled) 419 drm_irq_uninstall(dev); 420 421 DRM_LOCK(dev); 422 if (dev->unique) { 423 drm_free(dev->unique, M_DRM); 424 dev->unique = NULL; 425 dev->unique_len = 0; 426 } 427 428 /* Clear pid list */ 429 if (dev->magicfree.next) { 430 list_for_each_entry_safe(pt, next, &dev->magicfree, head) { 431 list_del(&pt->head); 432 drm_ht_remove_item(&dev->magiclist, &pt->hash_item); 433 kfree(pt); 434 } 435 drm_ht_remove(&dev->magiclist); 436 } 437 438 /* Clear AGP information */ 439 if (dev->agp) { 440 drm_agp_mem_t *entry; 441 drm_agp_mem_t *nexte; 442 443 /* Remove AGP resources, but leave dev->agp intact until 444 * drm_unload is called. 445 */ 446 for (entry = dev->agp->memory; entry; entry = nexte) { 447 nexte = entry->next; 448 if (entry->bound) 449 drm_agp_unbind_memory(entry->handle); 450 drm_agp_free_memory(entry->handle); 451 drm_free(entry, M_DRM); 452 } 453 dev->agp->memory = NULL; 454 455 if (dev->agp->acquired) 456 drm_agp_release(dev); 457 458 dev->agp->acquired = 0; 459 dev->agp->enabled = 0; 460 } 461 if (dev->sg != NULL) { 462 drm_sg_cleanup(dev->sg); 463 dev->sg = NULL; 464 } 465 466 drm_dma_takedown(dev); 467 if (dev->lock.hw_lock) { 468 dev->lock.hw_lock = NULL; /* SHM removed */ 469 dev->lock.file_priv = NULL; 470 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); 471 } 472 DRM_UNLOCK(dev); 473 474 return 0; 475 } 476 477 static int drm_load(struct drm_device *dev) 478 { 479 int i, retcode; 480 481 DRM_DEBUG("\n"); 482 483 INIT_LIST_HEAD(&dev->maplist); 484 485 drm_mem_init(); 486 drm_sysctl_init(dev); 487 INIT_LIST_HEAD(&dev->filelist); 488 489 dev->counters = 6; 490 dev->types[0] = _DRM_STAT_LOCK; 491 dev->types[1] = _DRM_STAT_OPENS; 492 dev->types[2] = _DRM_STAT_CLOSES; 493 dev->types[3] = _DRM_STAT_IOCTLS; 494 dev->types[4] = _DRM_STAT_LOCKS; 495 dev->types[5] = _DRM_STAT_UNLOCKS; 496 497 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) 498 atomic_set(&dev->counts[i], 0); 499 500 INIT_LIST_HEAD(&dev->vblank_event_list); 501 502 if (drm_core_has_AGP(dev)) { 503 if (drm_device_is_agp(dev)) 504 dev->agp = drm_agp_init(); 505 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && 506 dev->agp == NULL) { 507 DRM_ERROR("Card isn't AGP, or couldn't initialize " 508 "AGP.\n"); 509 retcode = ENOMEM; 510 goto error; 511 } 512 if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) { 513 if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base, 514 dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0) 515 dev->agp->agp_mtrr = 1; 516 } 517 } 518 519 retcode = drm_ctxbitmap_init(dev); 520 if (retcode != 0) { 521 DRM_ERROR("Cannot allocate memory for context bitmap.\n"); 522 goto error; 523 } 524 525 if (dev->driver->driver_features & DRIVER_GEM) { 526 retcode = drm_gem_init(dev); 527 if (retcode != 0) { 528 DRM_ERROR("Cannot initialize graphics execution " 529 "manager (GEM)\n"); 530 goto error1; 531 } 532 } 533 534 if (dev->driver->load != NULL) { 535 DRM_LOCK(dev); 536 /* Shared code returns -errno. */ 537 retcode = -dev->driver->load(dev, 538 dev->id_entry->driver_private); 539 if (pci_enable_busmaster(dev->dev)) 540 DRM_ERROR("Request to enable bus-master failed.\n"); 541 DRM_UNLOCK(dev); 542 if (retcode != 0) 543 goto error1; 544 } 545 546 DRM_INFO("Initialized %s %d.%d.%d %s\n", 547 dev->driver->name, 548 dev->driver->major, 549 dev->driver->minor, 550 dev->driver->patchlevel, 551 dev->driver->date); 552 553 return 0; 554 555 error1: 556 drm_gem_destroy(dev); 557 error: 558 drm_ctxbitmap_cleanup(dev); 559 drm_sysctl_cleanup(dev); 560 DRM_LOCK(dev); 561 drm_lastclose(dev); 562 DRM_UNLOCK(dev); 563 if (dev->devnode != NULL) 564 destroy_dev(dev->devnode); 565 566 lockuninit(&dev->vbl_lock); 567 lockuninit(&dev->dev_lock); 568 lockuninit(&dev->event_lock); 569 lockuninit(&dev->struct_mutex); 570 571 return retcode; 572 } 573 574 /** 575 * Get version information 576 * 577 * \param inode device inode. 578 * \param filp file pointer. 579 * \param cmd command. 580 * \param arg user argument, pointing to a drm_version structure. 581 * \return zero on success or negative number on failure. 582 * 583 * Fills in the version information in \p arg. 584 */ 585 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) 586 { 587 struct drm_version *version = data; 588 int len; 589 590 #define DRM_COPY( name, value ) \ 591 len = strlen( value ); \ 592 if ( len > name##_len ) len = name##_len; \ 593 name##_len = strlen( value ); \ 594 if ( len && name ) { \ 595 if ( DRM_COPY_TO_USER( name, value, len ) ) \ 596 return EFAULT; \ 597 } 598 599 version->version_major = dev->driver->major; 600 version->version_minor = dev->driver->minor; 601 version->version_patchlevel = dev->driver->patchlevel; 602 603 DRM_COPY(version->name, dev->driver->name); 604 DRM_COPY(version->date, dev->driver->date); 605 DRM_COPY(version->desc, dev->driver->desc); 606 607 return 0; 608 } 609 610 /* 611 * Stub is needed for devfs 612 */ 613 int drm_close(struct dev_close_args *ap) 614 { 615 return 0; 616 } 617 618 void drm_cdevpriv_dtor(void *cd) 619 { 620 struct drm_file *file_priv = cd; 621 struct drm_device *dev = file_priv->dev; 622 int retcode = 0; 623 624 DRM_DEBUG("open_count = %d\n", dev->open_count); 625 626 DRM_LOCK(dev); 627 628 if (dev->driver->preclose != NULL) 629 dev->driver->preclose(dev, file_priv); 630 631 /* ======================================================== 632 * Begin inline drm_release 633 */ 634 635 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 636 DRM_CURRENTPID, (long)dev->dev, dev->open_count); 637 638 if (dev->driver->driver_features & DRIVER_GEM) 639 drm_gem_release(dev, file_priv); 640 641 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) 642 && dev->lock.file_priv == file_priv) { 643 DRM_DEBUG("Process %d dead, freeing lock for context %d\n", 644 DRM_CURRENTPID, 645 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 646 if (dev->driver->reclaim_buffers_locked != NULL) 647 dev->driver->reclaim_buffers_locked(dev, file_priv); 648 649 drm_lock_free(&dev->lock, 650 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 651 652 /* FIXME: may require heavy-handed reset of 653 hardware at this point, possibly 654 processed via a callback to the X 655 server. */ 656 } else if (dev->driver->reclaim_buffers_locked != NULL && 657 dev->lock.hw_lock != NULL) { 658 /* The lock is required to reclaim buffers */ 659 for (;;) { 660 if (!dev->lock.hw_lock) { 661 /* Device has been unregistered */ 662 retcode = EINTR; 663 break; 664 } 665 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { 666 dev->lock.file_priv = file_priv; 667 dev->lock.lock_time = jiffies; 668 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 669 break; /* Got lock */ 670 } 671 /* Contention */ 672 retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue, 673 PCATCH, "drmlk2", 0); 674 if (retcode) 675 break; 676 } 677 if (retcode == 0) { 678 dev->driver->reclaim_buffers_locked(dev, file_priv); 679 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); 680 } 681 } 682 683 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 684 !dev->driver->reclaim_buffers_locked) 685 drm_reclaim_buffers(dev, file_priv); 686 687 funsetown(&dev->buf_sigio); 688 689 if (dev->driver->postclose != NULL) 690 dev->driver->postclose(dev, file_priv); 691 list_del(&file_priv->lhead); 692 693 694 /* ======================================================== 695 * End inline drm_release 696 */ 697 698 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); 699 device_unbusy(dev->dev); 700 if (--dev->open_count == 0) { 701 retcode = drm_lastclose(dev); 702 } 703 704 DRM_UNLOCK(dev); 705 } 706 707 /** 708 * Called whenever a process performs an ioctl on /dev/drm. 709 * 710 * \param inode device inode. 711 * \param file_priv DRM file private. 712 * \param cmd command. 713 * \param arg user argument. 714 * \return zero on success or negative number on failure. 715 * 716 * Looks up the ioctl function in the ::ioctls table, checking for root 717 * previleges if so required, and dispatches to the respective function. 718 */ 719 int drm_ioctl(struct dev_ioctl_args *ap) 720 { 721 struct cdev *kdev = ap->a_head.a_dev; 722 struct drm_device *dev; 723 struct drm_ioctl_desc *ioctl = NULL; 724 u_long cmd = ap->a_cmd; 725 unsigned int nr = DRM_IOCTL_NR(cmd); 726 int retcode = 0; 727 caddr_t data = ap->a_data; 728 struct thread *p = curthread; 729 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv); 730 int is_driver_ioctl = 0; 731 struct drm_file *file_priv; 732 733 dev = drm_get_device_from_kdev(kdev); 734 735 retcode = devfs_get_cdevpriv(ap->a_fp, (void **)&file_priv); 736 if (retcode !=0) { 737 DRM_ERROR("can't find authenticator\n"); 738 return EINVAL; 739 } 740 741 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 742 743 if (drm_device_is_unplugged(dev)) 744 return -ENODEV; 745 746 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) { 747 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd)); 748 return EINVAL; 749 } 750 751 ioctl = &drm_ioctls[nr]; 752 /* It's not a core DRM ioctl, try driver-specific. */ 753 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) { 754 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */ 755 nr -= DRM_COMMAND_BASE; 756 if (nr >= dev->driver->num_ioctls) { 757 return EINVAL; 758 } 759 ioctl = &dev->driver->ioctls[nr]; 760 is_driver_ioctl = 1; 761 } 762 763 DRM_DEBUG_VERBOSE("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", 764 DRM_CURRENTPID, cmd, nr, (long)dev->dev, 765 file_priv->authenticated); 766 767 /* Do not trust userspace, use our own definition */ 768 func = ioctl->func; 769 770 if (func == NULL) { 771 DRM_DEBUG("no function\n"); 772 return EINVAL; 773 } 774 775 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) || 776 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || 777 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) 778 return EACCES; 779 780 if (is_driver_ioctl) { 781 if ((ioctl->flags & DRM_UNLOCKED) == 0) 782 mutex_lock(&drm_global_mutex); 783 /* shared code returns -errno */ 784 retcode = -func(dev, data, file_priv); 785 if ((ioctl->flags & DRM_UNLOCKED) == 0) 786 mutex_unlock(&drm_global_mutex); 787 } else { 788 retcode = func(dev, data, file_priv); 789 } 790 791 if (!ioctl) 792 DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02lx, nr=0x%02x\n", 793 DRM_CURRENTPID, 794 (long)dev->dev, 795 file_priv->authenticated, cmd, nr); 796 797 if (retcode) 798 DRM_DEBUG("ret = %d\n", retcode); 799 return retcode; 800 } 801 802 drm_local_map_t *drm_getsarea(struct drm_device *dev) 803 { 804 struct drm_map_list *entry; 805 806 list_for_each_entry(entry, &dev->maplist, head) { 807 if (entry->map && entry->map->type == _DRM_SHM && 808 (entry->map->flags & _DRM_CONTAINS_LOCK)) { 809 return entry->map; 810 } 811 } 812 813 return NULL; 814 } 815 816 int 817 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, 818 struct sysctl_oid *top) 819 { 820 struct sysctl_oid *oid; 821 822 ksnprintf(dev->busid_str, sizeof(dev->busid_str), 823 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus, 824 dev->pci_slot, dev->pci_func); 825 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid", 826 CTLFLAG_RD, dev->busid_str, 0, NULL); 827 if (oid == NULL) 828 return (ENOMEM); 829 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0; 830 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 831 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL); 832 if (oid == NULL) 833 return (ENOMEM); 834 835 return (0); 836 } 837 838 int 839 drm_mmap_single(struct dev_mmap_single_args *ap) 840 { 841 struct drm_device *dev; 842 struct cdev *kdev = ap->a_head.a_dev; 843 vm_ooffset_t *offset = ap->a_offset; 844 vm_size_t size = ap->a_size; 845 struct vm_object **obj_res = ap->a_object; 846 int nprot = ap->a_nprot; 847 848 dev = drm_get_device_from_kdev(kdev); 849 if (dev->drm_ttm_bdev != NULL) { 850 return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size, 851 obj_res, nprot)); 852 } else if ((dev->driver->driver_features & DRIVER_GEM) != 0) { 853 return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot)); 854 } else { 855 return (ENODEV); 856 } 857 } 858 859 #if DRM_LINUX 860 861 #include <sys/sysproto.h> 862 863 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); 864 865 #define LINUX_IOCTL_DRM_MIN 0x6400 866 #define LINUX_IOCTL_DRM_MAX 0x64ff 867 868 static linux_ioctl_function_t drm_linux_ioctl; 869 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, 870 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; 871 872 /* The bits for in/out are switched on Linux */ 873 #define LINUX_IOC_IN IOC_OUT 874 #define LINUX_IOC_OUT IOC_IN 875 876 static int 877 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) 878 { 879 int error; 880 int cmd = args->cmd; 881 882 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT); 883 if (cmd & LINUX_IOC_IN) 884 args->cmd |= IOC_IN; 885 if (cmd & LINUX_IOC_OUT) 886 args->cmd |= IOC_OUT; 887 888 error = ioctl(p, (struct ioctl_args *)args); 889 890 return error; 891 } 892 #endif /* DRM_LINUX */ 893 894 static int 895 drm_core_init(void *arg) 896 { 897 898 drm_global_init(); 899 900 #if DRM_LINUX 901 linux_ioctl_register_handler(&drm_handler); 902 #endif /* DRM_LINUX */ 903 904 DRM_INFO("Initialized %s %d.%d.%d %s\n", 905 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 906 return 0; 907 } 908 909 static void 910 drm_core_exit(void *arg) 911 { 912 913 #if DRM_LINUX 914 linux_ioctl_unregister_handler(&drm_handler); 915 #endif /* DRM_LINUX */ 916 917 drm_global_release(); 918 } 919 920 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, 921 drm_core_init, NULL); 922 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, 923 drm_core_exit, NULL); 924 925 /* 926 * Check if dmi_system_id structure matches system DMI data 927 */ 928 static bool 929 dmi_found(const struct dmi_system_id *dsi) 930 { 931 int i, slot; 932 bool found = false; 933 char *sys_vendor, *board_vendor, *product_name, *board_name; 934 935 sys_vendor = kgetenv("smbios.system.maker"); 936 board_vendor = kgetenv("smbios.planar.maker"); 937 product_name = kgetenv("smbios.system.product"); 938 board_name = kgetenv("smbios.planar.product"); 939 940 for (i = 0; i < NELEM(dsi->matches); i++) { 941 slot = dsi->matches[i].slot; 942 switch (slot) { 943 case DMI_NONE: 944 break; 945 case DMI_SYS_VENDOR: 946 if (sys_vendor != NULL && 947 !strcmp(sys_vendor, dsi->matches[i].substr)) 948 break; 949 else 950 goto done; 951 case DMI_BOARD_VENDOR: 952 if (board_vendor != NULL && 953 !strcmp(board_vendor, dsi->matches[i].substr)) 954 break; 955 else 956 goto done; 957 case DMI_PRODUCT_NAME: 958 if (product_name != NULL && 959 !strcmp(product_name, dsi->matches[i].substr)) 960 break; 961 else 962 goto done; 963 case DMI_BOARD_NAME: 964 if (board_name != NULL && 965 !strcmp(board_name, dsi->matches[i].substr)) 966 break; 967 else 968 goto done; 969 default: 970 goto done; 971 } 972 } 973 found = true; 974 975 done: 976 if (sys_vendor != NULL) 977 kfreeenv(sys_vendor); 978 if (board_vendor != NULL) 979 kfreeenv(board_vendor); 980 if (product_name != NULL) 981 kfreeenv(product_name); 982 if (board_name != NULL) 983 kfreeenv(board_name); 984 985 return found; 986 } 987 988 bool 989 dmi_check_system(const struct dmi_system_id *sysid) 990 { 991 const struct dmi_system_id *dsi; 992 int num = 0; 993 994 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 995 if (dmi_found(dsi)) { 996 num++; 997 if (dsi->callback && dsi->callback(dsi)) 998 break; 999 } 1000 } 1001 return (num); 1002 } 1003