1 /** 2 * \file drm_drv.c 3 * Generic driver template 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com> 7 * 8 * To use this template, you must at least define the following (samples 9 * given for the MGA driver): 10 * 11 * \code 12 * #define DRIVER_AUTHOR "VA Linux Systems, Inc." 13 * 14 * #define DRIVER_NAME "mga" 15 * #define DRIVER_DESC "Matrox G200/G400" 16 * #define DRIVER_DATE "20001127" 17 * 18 * #define drm_x mga_##x 19 * \endcode 20 */ 21 22 /* 23 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com 24 * 25 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 26 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 27 * All Rights Reserved. 28 * 29 * Permission is hereby granted, free of charge, to any person obtaining a 30 * copy of this software and associated documentation files (the "Software"), 31 * to deal in the Software without restriction, including without limitation 32 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 33 * and/or sell copies of the Software, and to permit persons to whom the 34 * Software is furnished to do so, subject to the following conditions: 35 * 36 * The above copyright notice and this permission notice (including the next 37 * paragraph) shall be included in all copies or substantial portions of the 38 * Software. 39 * 40 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 41 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 42 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 43 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 44 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 45 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 46 * OTHER DEALINGS IN THE SOFTWARE. 47 */ 48 49 #include <sys/devfs.h> 50 51 #include <linux/export.h> 52 #include <drm/drmP.h> 53 #include <drm/drm_core.h> 54 55 #if DRM_DEBUG_DEFAULT_ON == 1 56 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \ 57 DRM_DEBUGBITS_FAILED_IOCTL) 58 #elif DRM_DEBUG_DEFAULT_ON == 2 59 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \ 60 DRM_DEBUGBITS_FAILED_IOCTL | DRM_DEBUGBITS_VERBOSE) 61 #else 62 #define DRM_DEBUGBITS_ON (0x0) 63 #endif 64 65 int drm_debug = DRM_DEBUGBITS_ON; 66 int drm_notyet_flag = 0; 67 68 unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 69 unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 70 71 static int drm_load(struct drm_device *dev); 72 drm_pci_id_list_t *drm_find_description(int vendor, int device, 73 drm_pci_id_list_t *idlist); 74 75 #define DRIVER_SOFTC(unit) \ 76 ((struct drm_device *)devclass_get_softc(drm_devclass, unit)) 77 78 static int 79 drm_modevent(module_t mod, int type, void *data) 80 { 81 82 switch (type) { 83 case MOD_LOAD: 84 TUNABLE_INT_FETCH("drm.debug", &drm_debug); 85 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag); 86 break; 87 } 88 return (0); 89 } 90 91 static moduledata_t drm_mod = { 92 "drm", 93 drm_modevent, 94 0 95 }; 96 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 97 MODULE_VERSION(drm, 1); 98 MODULE_DEPEND(drm, agp, 1, 1, 1); 99 MODULE_DEPEND(drm, pci, 1, 1, 1); 100 MODULE_DEPEND(drm, iicbus, 1, 1, 1); 101 102 #define DRM_IOCTL_DEF(ioctl, func, flags) \ 103 [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} 104 105 /** Ioctl table */ 106 static struct drm_ioctl_desc drm_ioctls[] = { 107 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), 108 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 109 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 110 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 111 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), 112 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 113 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 114 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), 115 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 116 117 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 118 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 119 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 120 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), 121 122 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 123 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), 124 125 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), 127 128 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 129 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 130 131 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 132 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 133 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 134 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), 135 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 136 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 137 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), 138 139 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 140 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 141 142 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), 143 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), 144 145 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), 146 147 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 148 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 149 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), 150 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), 151 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), 152 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH), 153 154 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 155 156 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 157 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 158 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 159 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), 160 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 161 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 162 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 163 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 164 165 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 166 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 167 168 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 169 170 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 171 172 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 173 174 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), 175 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 176 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 177 178 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 179 180 #if 0 181 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), 182 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), 183 #endif 184 185 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 186 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 187 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 188 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 189 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 190 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 191 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), 192 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 193 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 194 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 195 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 196 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 197 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 198 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 199 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 200 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 201 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 202 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 203 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 204 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 205 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 206 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 207 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 208 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 209 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 210 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 211 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 212 }; 213 214 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 215 216 static struct dev_ops drm_cdevsw = { 217 { "drm", 0, D_TRACKCLOSE | D_MPSAFE }, 218 .d_open = drm_open, 219 .d_close = drm_close, 220 .d_read = drm_read, 221 .d_ioctl = drm_ioctl, 222 .d_kqfilter = drm_kqfilter, 223 .d_mmap = drm_mmap, 224 .d_mmap_single = drm_mmap_single, 225 }; 226 227 static int drm_msi = 0; /* Disable by default. This is because there are issues with 228 freezes using MSI and i915 229 */ 230 TUNABLE_INT("hw.drm.msi.enable", &drm_msi); 231 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); 232 SYSCTL_NODE(_hw_drm, OID_AUTO, msi, CTLFLAG_RW, NULL, "DRM device msi"); 233 SYSCTL_INT(_hw_drm_msi, OID_AUTO, enable, CTLFLAG_RD, &drm_msi, 0, 234 "Enable MSI interrupts for drm devices"); 235 236 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = { 237 {0x8086, 0x2772}, /* Intel i945G */ \ 238 {0x8086, 0x27A2}, /* Intel i945GM */ \ 239 {0x8086, 0x27AE}, /* Intel i945GME */ \ 240 {0, 0} 241 }; 242 243 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags) 244 { 245 int i = 0; 246 247 if (dev->driver->use_msi != NULL) { 248 int use_msi; 249 250 use_msi = dev->driver->use_msi(dev, flags); 251 252 return (!use_msi); 253 } 254 255 /* TODO: Maybe move this to a callback in i915? */ 256 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) { 257 if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) && 258 (drm_msi_blacklist[i].device == dev->pci_device)) { 259 return 1; 260 } 261 } 262 263 return 0; 264 } 265 266 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist) 267 { 268 drm_pci_id_list_t *id_entry; 269 int vendor, device; 270 271 vendor = pci_get_vendor(kdev); 272 device = pci_get_device(kdev); 273 274 if (pci_get_class(kdev) != PCIC_DISPLAY) 275 return ENXIO; 276 277 id_entry = drm_find_description(vendor, device, idlist); 278 if (id_entry != NULL) { 279 if (!device_get_desc(kdev)) { 280 DRM_DEBUG("desc : %s\n", device_get_desc(kdev)); 281 device_set_desc(kdev, id_entry->name); 282 } 283 return 0; 284 } 285 286 return ENXIO; 287 } 288 289 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) 290 { 291 struct drm_device *dev; 292 drm_pci_id_list_t *id_entry; 293 int unit, error; 294 u_int irq_flags; 295 int msi_enable; 296 297 unit = device_get_unit(kdev); 298 dev = device_get_softc(kdev); 299 300 if (!strcmp(device_get_name(kdev), "drmsub")) 301 dev->dev = device_get_parent(kdev); 302 else 303 dev->dev = kdev; 304 305 dev->pci_domain = pci_get_domain(dev->dev); 306 dev->pci_bus = pci_get_bus(dev->dev); 307 dev->pci_slot = pci_get_slot(dev->dev); 308 dev->pci_func = pci_get_function(dev->dev); 309 310 dev->pci_vendor = pci_get_vendor(dev->dev); 311 dev->pci_device = pci_get_device(dev->dev); 312 dev->pci_subvendor = pci_get_subvendor(dev->dev); 313 dev->pci_subdevice = pci_get_subdevice(dev->dev); 314 315 id_entry = drm_find_description(dev->pci_vendor, 316 dev->pci_device, idlist); 317 dev->id_entry = id_entry; 318 319 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) { 320 msi_enable = drm_msi; 321 322 if (drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) { 323 msi_enable = 0; 324 } 325 326 dev->irq_type = pci_alloc_1intr(dev->dev, msi_enable, 327 &dev->irqrid, &irq_flags); 328 329 dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ, 330 &dev->irqrid, irq_flags); 331 332 if (!dev->irqr) { 333 return (ENOENT); 334 } 335 336 dev->irq = (int) rman_get_start(dev->irqr); 337 } 338 339 lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE); 340 lwkt_serialize_init(&dev->irq_lock); 341 lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE); 342 lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE); 343 344 error = drm_load(dev); 345 if (error) 346 goto error; 347 348 error = drm_create_cdevs(kdev); 349 if (error) 350 goto error; 351 352 return (error); 353 error: 354 if (dev->irqr) { 355 bus_release_resource(dev->dev, SYS_RES_IRQ, 356 dev->irqrid, dev->irqr); 357 } 358 if (dev->irq_type == PCI_INTR_TYPE_MSI) { 359 pci_release_msi(dev->dev); 360 } 361 return (error); 362 } 363 364 int 365 drm_create_cdevs(device_t kdev) 366 { 367 struct drm_device *dev; 368 int error, unit; 369 370 unit = device_get_unit(kdev); 371 dev = device_get_softc(kdev); 372 373 dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID, 374 DRM_DEV_MODE, "dri/card%d", unit); 375 error = 0; 376 if (error == 0) 377 dev->devnode->si_drv1 = dev; 378 return (error); 379 } 380 381 #ifndef DRM_DEV_NAME 382 #define DRM_DEV_NAME "drm" 383 #endif 384 385 devclass_t drm_devclass; 386 387 drm_pci_id_list_t *drm_find_description(int vendor, int device, 388 drm_pci_id_list_t *idlist) 389 { 390 int i = 0; 391 392 for (i = 0; idlist[i].vendor != 0; i++) { 393 if ((idlist[i].vendor == vendor) && 394 ((idlist[i].device == device) || 395 (idlist[i].device == 0))) { 396 return &idlist[i]; 397 } 398 } 399 return NULL; 400 } 401 402 /** 403 * Take down the DRM device. 404 * 405 * \param dev DRM device structure. 406 * 407 * Frees every resource in \p dev. 408 * 409 * \sa drm_device 410 */ 411 int drm_lastclose(struct drm_device * dev) 412 { 413 drm_magic_entry_t *pt, *next; 414 415 DRM_DEBUG("\n"); 416 417 if (dev->driver->lastclose != NULL) 418 dev->driver->lastclose(dev); 419 420 if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled) 421 drm_irq_uninstall(dev); 422 423 DRM_LOCK(dev); 424 if (dev->unique) { 425 drm_free(dev->unique, M_DRM); 426 dev->unique = NULL; 427 dev->unique_len = 0; 428 } 429 430 /* Clear pid list */ 431 if (dev->magicfree.next) { 432 list_for_each_entry_safe(pt, next, &dev->magicfree, head) { 433 list_del(&pt->head); 434 drm_ht_remove_item(&dev->magiclist, &pt->hash_item); 435 kfree(pt); 436 } 437 drm_ht_remove(&dev->magiclist); 438 } 439 440 /* Clear AGP information */ 441 if (dev->agp) { 442 drm_agp_mem_t *entry; 443 drm_agp_mem_t *nexte; 444 445 /* Remove AGP resources, but leave dev->agp intact until 446 * drm_unload is called. 447 */ 448 for (entry = dev->agp->memory; entry; entry = nexte) { 449 nexte = entry->next; 450 if (entry->bound) 451 drm_agp_unbind_memory(entry->handle); 452 drm_agp_free_memory(entry->handle); 453 drm_free(entry, M_DRM); 454 } 455 dev->agp->memory = NULL; 456 457 if (dev->agp->acquired) 458 drm_agp_release(dev); 459 460 dev->agp->acquired = 0; 461 dev->agp->enabled = 0; 462 } 463 if (dev->sg != NULL) { 464 drm_sg_cleanup(dev->sg); 465 dev->sg = NULL; 466 } 467 468 drm_dma_takedown(dev); 469 if (dev->lock.hw_lock) { 470 dev->lock.hw_lock = NULL; /* SHM removed */ 471 dev->lock.file_priv = NULL; 472 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); 473 } 474 DRM_UNLOCK(dev); 475 476 return 0; 477 } 478 479 static int drm_load(struct drm_device *dev) 480 { 481 int i, retcode; 482 483 DRM_DEBUG("\n"); 484 485 INIT_LIST_HEAD(&dev->maplist); 486 487 drm_mem_init(); 488 drm_sysctl_init(dev); 489 INIT_LIST_HEAD(&dev->filelist); 490 491 dev->counters = 6; 492 dev->types[0] = _DRM_STAT_LOCK; 493 dev->types[1] = _DRM_STAT_OPENS; 494 dev->types[2] = _DRM_STAT_CLOSES; 495 dev->types[3] = _DRM_STAT_IOCTLS; 496 dev->types[4] = _DRM_STAT_LOCKS; 497 dev->types[5] = _DRM_STAT_UNLOCKS; 498 499 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) 500 atomic_set(&dev->counts[i], 0); 501 502 INIT_LIST_HEAD(&dev->vblank_event_list); 503 504 if (drm_core_has_AGP(dev)) { 505 if (drm_device_is_agp(dev)) 506 dev->agp = drm_agp_init(); 507 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && 508 dev->agp == NULL) { 509 DRM_ERROR("Card isn't AGP, or couldn't initialize " 510 "AGP.\n"); 511 retcode = ENOMEM; 512 goto error; 513 } 514 if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) { 515 if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base, 516 dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0) 517 dev->agp->agp_mtrr = 1; 518 } 519 } 520 521 retcode = drm_ctxbitmap_init(dev); 522 if (retcode != 0) { 523 DRM_ERROR("Cannot allocate memory for context bitmap.\n"); 524 goto error; 525 } 526 527 if (dev->driver->driver_features & DRIVER_GEM) { 528 retcode = drm_gem_init(dev); 529 if (retcode != 0) { 530 DRM_ERROR("Cannot initialize graphics execution " 531 "manager (GEM)\n"); 532 goto error1; 533 } 534 } 535 536 if (dev->driver->load != NULL) { 537 DRM_LOCK(dev); 538 /* Shared code returns -errno. */ 539 retcode = -dev->driver->load(dev, 540 dev->id_entry->driver_private); 541 if (pci_enable_busmaster(dev->dev)) 542 DRM_ERROR("Request to enable bus-master failed.\n"); 543 DRM_UNLOCK(dev); 544 if (retcode != 0) 545 goto error1; 546 } 547 548 DRM_INFO("Initialized %s %d.%d.%d %s\n", 549 dev->driver->name, 550 dev->driver->major, 551 dev->driver->minor, 552 dev->driver->patchlevel, 553 dev->driver->date); 554 555 return 0; 556 557 error1: 558 drm_gem_destroy(dev); 559 error: 560 drm_ctxbitmap_cleanup(dev); 561 drm_sysctl_cleanup(dev); 562 DRM_LOCK(dev); 563 drm_lastclose(dev); 564 DRM_UNLOCK(dev); 565 if (dev->devnode != NULL) 566 destroy_dev(dev->devnode); 567 568 lockuninit(&dev->vbl_lock); 569 lockuninit(&dev->dev_lock); 570 lockuninit(&dev->event_lock); 571 lockuninit(&dev->struct_mutex); 572 573 return retcode; 574 } 575 576 /** 577 * Get version information 578 * 579 * \param inode device inode. 580 * \param filp file pointer. 581 * \param cmd command. 582 * \param arg user argument, pointing to a drm_version structure. 583 * \return zero on success or negative number on failure. 584 * 585 * Fills in the version information in \p arg. 586 */ 587 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) 588 { 589 struct drm_version *version = data; 590 int len; 591 592 #define DRM_COPY( name, value ) \ 593 len = strlen( value ); \ 594 if ( len > name##_len ) len = name##_len; \ 595 name##_len = strlen( value ); \ 596 if ( len && name ) { \ 597 if ( DRM_COPY_TO_USER( name, value, len ) ) \ 598 return EFAULT; \ 599 } 600 601 version->version_major = dev->driver->major; 602 version->version_minor = dev->driver->minor; 603 version->version_patchlevel = dev->driver->patchlevel; 604 605 DRM_COPY(version->name, dev->driver->name); 606 DRM_COPY(version->date, dev->driver->date); 607 DRM_COPY(version->desc, dev->driver->desc); 608 609 return 0; 610 } 611 612 /* 613 * Stub is needed for devfs 614 */ 615 int drm_close(struct dev_close_args *ap) 616 { 617 return 0; 618 } 619 620 void drm_cdevpriv_dtor(void *cd) 621 { 622 struct drm_file *file_priv = cd; 623 struct drm_device *dev = file_priv->dev; 624 int retcode = 0; 625 626 DRM_DEBUG("open_count = %d\n", dev->open_count); 627 628 DRM_LOCK(dev); 629 630 if (dev->driver->preclose != NULL) 631 dev->driver->preclose(dev, file_priv); 632 633 /* ======================================================== 634 * Begin inline drm_release 635 */ 636 637 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 638 DRM_CURRENTPID, (long)dev->dev, dev->open_count); 639 640 if (dev->driver->driver_features & DRIVER_GEM) 641 drm_gem_release(dev, file_priv); 642 643 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) 644 && dev->lock.file_priv == file_priv) { 645 DRM_DEBUG("Process %d dead, freeing lock for context %d\n", 646 DRM_CURRENTPID, 647 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 648 if (dev->driver->reclaim_buffers_locked != NULL) 649 dev->driver->reclaim_buffers_locked(dev, file_priv); 650 651 drm_lock_free(&dev->lock, 652 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 653 654 /* FIXME: may require heavy-handed reset of 655 hardware at this point, possibly 656 processed via a callback to the X 657 server. */ 658 } else if (dev->driver->reclaim_buffers_locked != NULL && 659 dev->lock.hw_lock != NULL) { 660 /* The lock is required to reclaim buffers */ 661 for (;;) { 662 if (!dev->lock.hw_lock) { 663 /* Device has been unregistered */ 664 retcode = EINTR; 665 break; 666 } 667 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { 668 dev->lock.file_priv = file_priv; 669 dev->lock.lock_time = jiffies; 670 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 671 break; /* Got lock */ 672 } 673 /* Contention */ 674 retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue, 675 PCATCH, "drmlk2", 0); 676 if (retcode) 677 break; 678 } 679 if (retcode == 0) { 680 dev->driver->reclaim_buffers_locked(dev, file_priv); 681 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); 682 } 683 } 684 685 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 686 !dev->driver->reclaim_buffers_locked) 687 drm_reclaim_buffers(dev, file_priv); 688 689 funsetown(&dev->buf_sigio); 690 691 if (dev->driver->postclose != NULL) 692 dev->driver->postclose(dev, file_priv); 693 list_del(&file_priv->lhead); 694 695 696 /* ======================================================== 697 * End inline drm_release 698 */ 699 700 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); 701 device_unbusy(dev->dev); 702 if (--dev->open_count == 0) { 703 retcode = drm_lastclose(dev); 704 } 705 706 DRM_UNLOCK(dev); 707 } 708 709 /** 710 * Called whenever a process performs an ioctl on /dev/drm. 711 * 712 * \param inode device inode. 713 * \param file_priv DRM file private. 714 * \param cmd command. 715 * \param arg user argument. 716 * \return zero on success or negative number on failure. 717 * 718 * Looks up the ioctl function in the ::ioctls table, checking for root 719 * previleges if so required, and dispatches to the respective function. 720 */ 721 int drm_ioctl(struct dev_ioctl_args *ap) 722 { 723 struct cdev *kdev = ap->a_head.a_dev; 724 struct drm_device *dev; 725 struct drm_ioctl_desc *ioctl = NULL; 726 u_long cmd = ap->a_cmd; 727 unsigned int nr = DRM_IOCTL_NR(cmd); 728 int retcode = 0; 729 caddr_t data = ap->a_data; 730 struct thread *p = curthread; 731 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv); 732 int is_driver_ioctl = 0; 733 struct drm_file *file_priv; 734 735 dev = drm_get_device_from_kdev(kdev); 736 737 retcode = devfs_get_cdevpriv(ap->a_fp, (void **)&file_priv); 738 if (retcode !=0) { 739 DRM_ERROR("can't find authenticator\n"); 740 return EINVAL; 741 } 742 743 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 744 745 if (drm_device_is_unplugged(dev)) 746 return -ENODEV; 747 748 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) { 749 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd)); 750 return EINVAL; 751 } 752 753 ioctl = &drm_ioctls[nr]; 754 /* It's not a core DRM ioctl, try driver-specific. */ 755 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) { 756 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */ 757 nr -= DRM_COMMAND_BASE; 758 if (nr > dev->driver->num_ioctls) { 759 return EINVAL; 760 } 761 ioctl = &dev->driver->ioctls[nr]; 762 is_driver_ioctl = 1; 763 } 764 765 DRM_DEBUG_VERBOSE("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", 766 DRM_CURRENTPID, cmd, nr, (long)dev->dev, 767 file_priv->authenticated); 768 769 /* Do not trust userspace, use our own definition */ 770 func = ioctl->func; 771 772 if (func == NULL) { 773 DRM_DEBUG("no function\n"); 774 return EINVAL; 775 } 776 777 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) || 778 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || 779 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) 780 return EACCES; 781 782 if (is_driver_ioctl) { 783 if ((ioctl->flags & DRM_UNLOCKED) == 0) 784 mutex_lock(&drm_global_mutex); 785 /* shared code returns -errno */ 786 retcode = -func(dev, data, file_priv); 787 if ((ioctl->flags & DRM_UNLOCKED) == 0) 788 mutex_unlock(&drm_global_mutex); 789 } else { 790 retcode = func(dev, data, file_priv); 791 } 792 793 if (!ioctl) 794 DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02lx, nr=0x%02x\n", 795 DRM_CURRENTPID, 796 (long)dev->dev, 797 file_priv->authenticated, cmd, nr); 798 799 if (retcode) 800 DRM_DEBUG("ret = %d\n", retcode); 801 return retcode; 802 } 803 804 drm_local_map_t *drm_getsarea(struct drm_device *dev) 805 { 806 struct drm_map_list *entry; 807 808 list_for_each_entry(entry, &dev->maplist, head) { 809 if (entry->map && entry->map->type == _DRM_SHM && 810 (entry->map->flags & _DRM_CONTAINS_LOCK)) { 811 return entry->map; 812 } 813 } 814 815 return NULL; 816 } 817 818 int 819 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, 820 struct sysctl_oid *top) 821 { 822 struct sysctl_oid *oid; 823 824 ksnprintf(dev->busid_str, sizeof(dev->busid_str), 825 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus, 826 dev->pci_slot, dev->pci_func); 827 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid", 828 CTLFLAG_RD, dev->busid_str, 0, NULL); 829 if (oid == NULL) 830 return (ENOMEM); 831 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0; 832 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 833 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL); 834 if (oid == NULL) 835 return (ENOMEM); 836 837 return (0); 838 } 839 840 int 841 drm_mmap_single(struct dev_mmap_single_args *ap) 842 { 843 struct drm_device *dev; 844 struct cdev *kdev = ap->a_head.a_dev; 845 vm_ooffset_t *offset = ap->a_offset; 846 vm_size_t size = ap->a_size; 847 struct vm_object **obj_res = ap->a_object; 848 int nprot = ap->a_nprot; 849 850 dev = drm_get_device_from_kdev(kdev); 851 if (dev->drm_ttm_bdev != NULL) { 852 return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size, 853 obj_res, nprot)); 854 } else if ((dev->driver->driver_features & DRIVER_GEM) != 0) { 855 return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot)); 856 } else { 857 return (ENODEV); 858 } 859 } 860 861 #if DRM_LINUX 862 863 #include <sys/sysproto.h> 864 865 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); 866 867 #define LINUX_IOCTL_DRM_MIN 0x6400 868 #define LINUX_IOCTL_DRM_MAX 0x64ff 869 870 static linux_ioctl_function_t drm_linux_ioctl; 871 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, 872 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; 873 874 /* The bits for in/out are switched on Linux */ 875 #define LINUX_IOC_IN IOC_OUT 876 #define LINUX_IOC_OUT IOC_IN 877 878 static int 879 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) 880 { 881 int error; 882 int cmd = args->cmd; 883 884 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT); 885 if (cmd & LINUX_IOC_IN) 886 args->cmd |= IOC_IN; 887 if (cmd & LINUX_IOC_OUT) 888 args->cmd |= IOC_OUT; 889 890 error = ioctl(p, (struct ioctl_args *)args); 891 892 return error; 893 } 894 #endif /* DRM_LINUX */ 895 896 static int 897 drm_core_init(void *arg) 898 { 899 900 drm_global_init(); 901 902 #if DRM_LINUX 903 linux_ioctl_register_handler(&drm_handler); 904 #endif /* DRM_LINUX */ 905 906 DRM_INFO("Initialized %s %d.%d.%d %s\n", 907 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 908 return 0; 909 } 910 911 static void 912 drm_core_exit(void *arg) 913 { 914 915 #if DRM_LINUX 916 linux_ioctl_unregister_handler(&drm_handler); 917 #endif /* DRM_LINUX */ 918 919 drm_global_release(); 920 } 921 922 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, 923 drm_core_init, NULL); 924 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, 925 drm_core_exit, NULL); 926 927 /* 928 * Check if dmi_system_id structure matches system DMI data 929 */ 930 static bool 931 dmi_found(const struct dmi_system_id *dsi) 932 { 933 int i, slot; 934 bool found = false; 935 char *sys_vendor, *board_vendor, *product_name, *board_name; 936 937 sys_vendor = kgetenv("smbios.system.maker"); 938 board_vendor = kgetenv("smbios.planar.maker"); 939 product_name = kgetenv("smbios.system.product"); 940 board_name = kgetenv("smbios.planar.product"); 941 942 for (i = 0; i < NELEM(dsi->matches); i++) { 943 slot = dsi->matches[i].slot; 944 switch (slot) { 945 case DMI_NONE: 946 break; 947 case DMI_SYS_VENDOR: 948 if (sys_vendor != NULL && 949 !strcmp(sys_vendor, dsi->matches[i].substr)) 950 break; 951 else 952 goto done; 953 case DMI_BOARD_VENDOR: 954 if (board_vendor != NULL && 955 !strcmp(board_vendor, dsi->matches[i].substr)) 956 break; 957 else 958 goto done; 959 case DMI_PRODUCT_NAME: 960 if (product_name != NULL && 961 !strcmp(product_name, dsi->matches[i].substr)) 962 break; 963 else 964 goto done; 965 case DMI_BOARD_NAME: 966 if (board_name != NULL && 967 !strcmp(board_name, dsi->matches[i].substr)) 968 break; 969 else 970 goto done; 971 default: 972 goto done; 973 } 974 } 975 found = true; 976 977 done: 978 if (sys_vendor != NULL) 979 kfreeenv(sys_vendor); 980 if (board_vendor != NULL) 981 kfreeenv(board_vendor); 982 if (product_name != NULL) 983 kfreeenv(product_name); 984 if (board_name != NULL) 985 kfreeenv(board_name); 986 987 return found; 988 } 989 990 bool 991 dmi_check_system(const struct dmi_system_id *sysid) 992 { 993 const struct dmi_system_id *dsi; 994 int num = 0; 995 996 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 997 if (dmi_found(dsi)) { 998 num++; 999 if (dsi->callback && dsi->callback(dsi)) 1000 break; 1001 } 1002 } 1003 return (num); 1004 } 1005 1006