xref: /netbsd-src/sys/external/bsd/drm/dist/shared-core/drm.h (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /**
2  * \file drm.h
3  * Header for the Direct Rendering Manager
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  *
7  * \par Acknowledgments:
8  * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
9  */
10 
11 /*
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All rights reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 /**
37  * \mainpage
38  *
39  * The Direct Rendering Manager (DRM) is a device-independent kernel-level
40  * device driver that provides support for the XFree86 Direct Rendering
41  * Infrastructure (DRI).
42  *
43  * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
44  * ways:
45  *     -# The DRM provides synchronized access to the graphics hardware via
46  *        the use of an optimized two-tiered lock.
47  *     -# The DRM enforces the DRI security policy for access to the graphics
48  *        hardware by only allowing authenticated X11 clients access to
49  *        restricted regions of memory.
50  *     -# The DRM provides a generic DMA engine, complete with multiple
51  *        queues and the ability to detect the need for an OpenGL context
52  *        switch.
53  *     -# The DRM is extensible via the use of small device-specific modules
54  *        that rely extensively on the API exported by the DRM module.
55  *
56  */
57 
58 #ifndef _DRM_H_
59 #define _DRM_H_
60 
61 #ifndef __user
62 #define __user
63 #endif
64 #ifndef __iomem
65 #define __iomem
66 #endif
67 
68 #ifdef __GNUC__
69 # define DEPRECATED  __attribute__ ((deprecated))
70 #else
71 # define DEPRECATED
72 #endif
73 
74 #if defined(__linux__)
75 #include <asm/ioctl.h>		/* For _IO* macros */
76 #define DRM_IOCTL_NR(n)		_IOC_NR(n)
77 #define DRM_IOC_VOID		_IOC_NONE
78 #define DRM_IOC_READ		_IOC_READ
79 #define DRM_IOC_WRITE		_IOC_WRITE
80 #define DRM_IOC_READWRITE	_IOC_READ|_IOC_WRITE
81 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
82 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
83 #include <sys/ioccom.h>
84 #define DRM_IOCTL_NR(n)		((n) & 0xff)
85 #define DRM_IOC_VOID		IOC_VOID
86 #define DRM_IOC_READ		IOC_OUT
87 #define DRM_IOC_WRITE		IOC_IN
88 #define DRM_IOC_READWRITE	IOC_INOUT
89 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
90 #endif
91 
92 #ifdef __OpenBSD__
93 #define DRM_MAJOR       81
94 #endif
95 #if defined(__linux__) || defined(__NetBSD__)
96 #define DRM_MAJOR       226
97 #endif
98 #define DRM_MAX_MINOR   15
99 
100 #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
101 #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
102 #define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
103 #define DRM_RAM_PERCENT 10	  /**< How much system ram can we lock? */
104 
105 #define _DRM_LOCK_HELD	0x80000000U /**< Hardware lock is held */
106 #define _DRM_LOCK_CONT	0x40000000U /**< Hardware lock is contended */
107 #define _DRM_LOCK_IS_HELD(lock)	   ((lock) & _DRM_LOCK_HELD)
108 #define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
109 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
110 
111 #if defined(__linux__)
112 typedef unsigned int drm_handle_t;
113 #else
114 #include <sys/types.h>
115 typedef unsigned long drm_handle_t;	/**< To mapped regions */
116 #endif
117 typedef unsigned int drm_context_t;	/**< GLXContext handle */
118 typedef unsigned int drm_drawable_t;
119 typedef unsigned int drm_magic_t;	/**< Magic for authentication */
120 
121 /**
122  * Cliprect.
123  *
124  * \warning If you change this structure, make sure you change
125  * XF86DRIClipRectRec in the server as well
126  *
127  * \note KW: Actually it's illegal to change either for
128  * backwards-compatibility reasons.
129  */
130 struct drm_clip_rect {
131 	unsigned short x1;
132 	unsigned short y1;
133 	unsigned short x2;
134 	unsigned short y2;
135 };
136 
137 /**
138  * Texture region,
139  */
140 struct drm_tex_region {
141 	unsigned char next;
142 	unsigned char prev;
143 	unsigned char in_use;
144 	unsigned char padding;
145 	unsigned int age;
146 };
147 
148 /**
149  * Hardware lock.
150  *
151  * The lock structure is a simple cache-line aligned integer.  To avoid
152  * processor bus contention on a multiprocessor system, there should not be any
153  * other data stored in the same cache line.
154  */
155 struct drm_hw_lock {
156 	__volatile__ unsigned int lock;		/**< lock variable */
157 	char padding[60];			/**< Pad to cache line */
158 };
159 
160 /* This is beyond ugly, and only works on GCC.  However, it allows me to use
161  * drm.h in places (i.e., in the X-server) where I can't use size_t.  The real
162  * fix is to use uint32_t instead of size_t, but that fix will break existing
163  * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems.  That *will*
164  * eventually happen, though.  I chose 'unsigned long' to be the fallback type
165  * because that works on all the platforms I know about.  Hopefully, the
166  * real fix will happen before that bites us.
167  */
168 
169 #ifdef __SIZE_TYPE__
170 # define DRM_SIZE_T __SIZE_TYPE__
171 #else
172 # warning "__SIZE_TYPE__ not defined.  Assuming sizeof(size_t) == sizeof(unsigned long)!"
173 # define DRM_SIZE_T unsigned long
174 #endif
175 
176 /**
177  * DRM_IOCTL_VERSION ioctl argument type.
178  *
179  * \sa drmGetVersion().
180  */
181 struct drm_version {
182 	int version_major;	  /**< Major version */
183 	int version_minor;	  /**< Minor version */
184 	int version_patchlevel;	  /**< Patch level */
185 	DRM_SIZE_T name_len;	  /**< Length of name buffer */
186 	char __user *name;		  /**< Name of driver */
187 	DRM_SIZE_T date_len;	  /**< Length of date buffer */
188 	char __user *date;		  /**< User-space buffer to hold date */
189 	DRM_SIZE_T desc_len;	  /**< Length of desc buffer */
190 	char __user *desc;		  /**< User-space buffer to hold desc */
191 };
192 
193 /**
194  * DRM_IOCTL_GET_UNIQUE ioctl argument type.
195  *
196  * \sa drmGetBusid() and drmSetBusId().
197  */
198 struct drm_unique {
199 	DRM_SIZE_T unique_len;	  /**< Length of unique */
200 	char __user *unique;		  /**< Unique name for driver instantiation */
201 };
202 
203 #undef DRM_SIZE_T
204 
205 struct drm_list {
206 	int count;		  /**< Length of user-space structures */
207 	struct drm_version __user *version;
208 };
209 
210 struct drm_block {
211 	int unused;
212 };
213 
214 /**
215  * DRM_IOCTL_CONTROL ioctl argument type.
216  *
217  * \sa drmCtlInstHandler() and drmCtlUninstHandler().
218  */
219 struct drm_control {
220 	enum {
221 		DRM_ADD_COMMAND,
222 		DRM_RM_COMMAND,
223 		DRM_INST_HANDLER,
224 		DRM_UNINST_HANDLER
225 	} func;
226 	int irq;
227 };
228 
229 /**
230  * Type of memory to map.
231  */
232 enum drm_map_type {
233 	_DRM_FRAME_BUFFER = 0,	  /**< WC (no caching), no core dump */
234 	_DRM_REGISTERS = 1,	  /**< no caching, no core dump */
235 	_DRM_SHM = 2,		  /**< shared, cached */
236 	_DRM_AGP = 3,		  /**< AGP/GART */
237 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
238 	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
239 	_DRM_TTM = 6
240 };
241 
242 /**
243  * Memory mapping flags.
244  */
245 enum drm_map_flags {
246 	_DRM_RESTRICTED = 0x01,	     /**< Cannot be mapped to user-virtual */
247 	_DRM_READ_ONLY = 0x02,
248 	_DRM_LOCKED = 0x04,	     /**< shared, cached, locked */
249 	_DRM_KERNEL = 0x08,	     /**< kernel requires access */
250 	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
251 	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
252 	_DRM_REMOVABLE = 0x40,	     /**< Removable mapping */
253 	_DRM_DRIVER = 0x80	     /**< Managed by driver */
254 };
255 
256 struct drm_ctx_priv_map {
257 	unsigned int ctx_id;	 /**< Context requesting private mapping */
258 	void *handle;		 /**< Handle of map */
259 };
260 
261 /**
262  * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
263  * argument type.
264  *
265  * \sa drmAddMap().
266  */
267 struct drm_map {
268 	unsigned long offset;	 /**< Requested physical address (0 for SAREA)*/
269 	unsigned long size;	 /**< Requested physical size (bytes) */
270 	enum drm_map_type type;	 /**< Type of memory to map */
271 	enum drm_map_flags flags;	 /**< Flags */
272 	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
273 				 /**< Kernel-space: kernel-virtual address */
274 	int mtrr;		 /**< MTRR slot used */
275 	/*   Private data */
276 };
277 
278 /**
279  * DRM_IOCTL_GET_CLIENT ioctl argument type.
280  */
281 struct drm_client {
282 	int idx;		/**< Which client desired? */
283 	int auth;		/**< Is client authenticated? */
284 	unsigned long pid;	/**< Process ID */
285 	unsigned long uid;	/**< User ID */
286 	unsigned long magic;	/**< Magic */
287 	unsigned long iocs;	/**< Ioctl count */
288 };
289 
290 enum drm_stat_type {
291 	_DRM_STAT_LOCK,
292 	_DRM_STAT_OPENS,
293 	_DRM_STAT_CLOSES,
294 	_DRM_STAT_IOCTLS,
295 	_DRM_STAT_LOCKS,
296 	_DRM_STAT_UNLOCKS,
297 	_DRM_STAT_VALUE,	/**< Generic value */
298 	_DRM_STAT_BYTE,		/**< Generic byte counter (1024bytes/K) */
299 	_DRM_STAT_COUNT,	/**< Generic non-byte counter (1000/k) */
300 
301 	_DRM_STAT_IRQ,		/**< IRQ */
302 	_DRM_STAT_PRIMARY,	/**< Primary DMA bytes */
303 	_DRM_STAT_SECONDARY,	/**< Secondary DMA bytes */
304 	_DRM_STAT_DMA,		/**< DMA */
305 	_DRM_STAT_SPECIAL,	/**< Special DMA (e.g., priority or polled) */
306 	_DRM_STAT_MISSED	/**< Missed DMA opportunity */
307 	    /* Add to the *END* of the list */
308 };
309 
310 /**
311  * DRM_IOCTL_GET_STATS ioctl argument type.
312  */
313 struct drm_stats {
314 	unsigned long count;
315 	struct {
316 		unsigned long value;
317 		enum drm_stat_type type;
318 	} data[15];
319 };
320 
321 /**
322  * Hardware locking flags.
323  */
324 enum drm_lock_flags {
325 	_DRM_LOCK_READY = 0x01,	     /**< Wait until hardware is ready for DMA */
326 	_DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
327 	_DRM_LOCK_FLUSH = 0x04,	     /**< Flush this context's DMA queue first */
328 	_DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
329 	/* These *HALT* flags aren't supported yet
330 	   -- they will be used to support the
331 	   full-screen DGA-like mode. */
332 	_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
333 	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
334 };
335 
336 /**
337  * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
338  *
339  * \sa drmGetLock() and drmUnlock().
340  */
341 struct drm_lock {
342 	int context;
343 	enum drm_lock_flags flags;
344 };
345 
346 /**
347  * DMA flags
348  *
349  * \warning
350  * These values \e must match xf86drm.h.
351  *
352  * \sa drm_dma.
353  */
354 enum drm_dma_flags {
355 	/* Flags for DMA buffer dispatch */
356 	_DRM_DMA_BLOCK = 0x01,	      /**<
357 				       * Block until buffer dispatched.
358 				       *
359 				       * \note The buffer may not yet have
360 				       * been processed by the hardware --
361 				       * getting a hardware lock with the
362 				       * hardware quiescent will ensure
363 				       * that the buffer has been
364 				       * processed.
365 				       */
366 	_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
367 	_DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
368 
369 	/* Flags for DMA buffer request */
370 	_DRM_DMA_WAIT = 0x10,	      /**< Wait for free buffers */
371 	_DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
372 	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
373 };
374 
375 /**
376  * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
377  *
378  * \sa drmAddBufs().
379  */
380 struct drm_buf_desc {
381 	int count;		 /**< Number of buffers of this size */
382 	int size;		 /**< Size in bytes */
383 	int low_mark;		 /**< Low water mark */
384 	int high_mark;		 /**< High water mark */
385 	enum {
386 		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
387 		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
388 		_DRM_SG_BUFFER  = 0x04,	/**< Scatter/gather memory buffer */
389 		_DRM_FB_BUFFER  = 0x08, /**< Buffer is in frame buffer */
390 		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
391 	} flags;
392 	unsigned long agp_start; /**<
393 				  * Start address of where the AGP buffers are
394 				  * in the AGP aperture
395 				  */
396 };
397 
398 /**
399  * DRM_IOCTL_INFO_BUFS ioctl argument type.
400  */
401 struct drm_buf_info {
402 	int count;		  /**< Number of buffers described in list */
403 	struct drm_buf_desc __user *list; /**< List of buffer descriptions */
404 };
405 
406 /**
407  * DRM_IOCTL_FREE_BUFS ioctl argument type.
408  */
409 struct drm_buf_free {
410 	int count;
411 	int __user *list;
412 };
413 
414 /**
415  * Buffer information
416  *
417  * \sa drm_buf_map.
418  */
419 struct drm_buf_pub {
420 	int idx;		       /**< Index into the master buffer list */
421 	int total;		       /**< Buffer size */
422 	int used;		       /**< Amount of buffer in use (for DMA) */
423 	void __user *address;	       /**< Address of buffer */
424 };
425 
426 /**
427  * DRM_IOCTL_MAP_BUFS ioctl argument type.
428  */
429 struct drm_buf_map {
430 	int count;		/**< Length of the buffer list */
431 #if defined(__cplusplus)
432 	void __user *c_virtual;
433 #else
434 	void __user *virtual;		/**< Mmap'd area in user-virtual */
435 #endif
436 	struct drm_buf_pub __user *list;	/**< Buffer information */
437 };
438 
439 /**
440  * DRM_IOCTL_DMA ioctl argument type.
441  *
442  * Indices here refer to the offset into the buffer list in drm_buf_get.
443  *
444  * \sa drmDMA().
445  */
446 struct drm_dma {
447 	int context;			  /**< Context handle */
448 	int send_count;			  /**< Number of buffers to send */
449 	int __user *send_indices;	  /**< List of handles to buffers */
450 	int __user *send_sizes;		  /**< Lengths of data to send */
451 	enum drm_dma_flags flags;	  /**< Flags */
452 	int request_count;		  /**< Number of buffers requested */
453 	int request_size;		  /**< Desired size for buffers */
454 	int __user *request_indices;	 /**< Buffer information */
455 	int __user *request_sizes;
456 	int granted_count;		  /**< Number of buffers granted */
457 };
458 
459 enum drm_ctx_flags {
460 	_DRM_CONTEXT_PRESERVED = 0x01,
461 	_DRM_CONTEXT_2DONLY = 0x02
462 };
463 
464 /**
465  * DRM_IOCTL_ADD_CTX ioctl argument type.
466  *
467  * \sa drmCreateContext() and drmDestroyContext().
468  */
469 struct drm_ctx {
470 	drm_context_t handle;
471 	enum drm_ctx_flags flags;
472 };
473 
474 /**
475  * DRM_IOCTL_RES_CTX ioctl argument type.
476  */
477 struct drm_ctx_res {
478 	int count;
479 	struct drm_ctx __user *contexts;
480 };
481 
482 /**
483  * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
484  */
485 struct drm_draw {
486 	drm_drawable_t handle;
487 };
488 
489 /**
490  * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
491  */
492 typedef enum {
493 	DRM_DRAWABLE_CLIPRECTS,
494 } drm_drawable_info_type_t;
495 
496 struct drm_update_draw {
497 	drm_drawable_t handle;
498 	unsigned int type;
499 	unsigned int num;
500 	unsigned long long data;
501 };
502 
503 /**
504  * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
505  */
506 struct drm_auth {
507 	drm_magic_t magic;
508 };
509 
510 /**
511  * DRM_IOCTL_IRQ_BUSID ioctl argument type.
512  *
513  * \sa drmGetInterruptFromBusID().
514  */
515 struct drm_irq_busid {
516 	int irq;	/**< IRQ number */
517 	int busnum;	/**< bus number */
518 	int devnum;	/**< device number */
519 	int funcnum;	/**< function number */
520 };
521 
522 enum drm_vblank_seq_type {
523 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
524 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
525 	_DRM_VBLANK_FLIP = 0x8000000,	/**< Scheduled buffer swap should flip */
526 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
527 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
528 	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking */
529 };
530 
531 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
532 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
533 				_DRM_VBLANK_NEXTONMISS)
534 
535 struct drm_wait_vblank_request {
536 	enum drm_vblank_seq_type type;
537 	unsigned int sequence;
538 	unsigned long signal;
539 };
540 
541 struct drm_wait_vblank_reply {
542 	enum drm_vblank_seq_type type;
543 	unsigned int sequence;
544 	long tval_sec;
545 	long tval_usec;
546 };
547 
548 /**
549  * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
550  *
551  * \sa drmWaitVBlank().
552  */
553 union drm_wait_vblank {
554 	struct drm_wait_vblank_request request;
555 	struct drm_wait_vblank_reply reply;
556 };
557 
558 
559 #define _DRM_PRE_MODESET 1
560 #define _DRM_POST_MODESET 2
561 
562 /**
563  * DRM_IOCTL_MODESET_CTL ioctl argument type
564  *
565  * \sa drmModesetCtl().
566  */
567 struct drm_modeset_ctl {
568 	uint32_t crtc;
569 	uint32_t cmd;
570 };
571 
572 /**
573  * DRM_IOCTL_AGP_ENABLE ioctl argument type.
574  *
575  * \sa drmAgpEnable().
576  */
577 struct drm_agp_mode {
578 	unsigned long mode;	/**< AGP mode */
579 };
580 
581 /**
582  * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
583  *
584  * \sa drmAgpAlloc() and drmAgpFree().
585  */
586 struct drm_agp_buffer {
587 	unsigned long size;	/**< In bytes -- will round to page boundary */
588 	unsigned long handle;	/**< Used for binding / unbinding */
589 	unsigned long type;	/**< Type of memory to allocate */
590 	unsigned long physical;	/**< Physical used by i810 */
591 };
592 
593 /**
594  * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
595  *
596  * \sa drmAgpBind() and drmAgpUnbind().
597  */
598 struct drm_agp_binding {
599 	unsigned long handle;	/**< From drm_agp_buffer */
600 	unsigned long offset;	/**< In bytes -- will round to page boundary */
601 };
602 
603 /**
604  * DRM_IOCTL_AGP_INFO ioctl argument type.
605  *
606  * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
607  * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
608  * drmAgpVendorId() and drmAgpDeviceId().
609  */
610 struct drm_agp_info {
611 	int agp_version_major;
612 	int agp_version_minor;
613 	unsigned long mode;
614 	unsigned long aperture_base;   /**< physical address */
615 	unsigned long aperture_size;   /**< bytes */
616 	unsigned long memory_allowed;  /**< bytes */
617 	unsigned long memory_used;
618 
619 	/** \name PCI information */
620 	/*@{ */
621 	unsigned short id_vendor;
622 	unsigned short id_device;
623 	/*@} */
624 };
625 
626 /**
627  * DRM_IOCTL_SG_ALLOC ioctl argument type.
628  */
629 struct drm_scatter_gather {
630 	unsigned long size;	/**< In bytes -- will round to page boundary */
631 	unsigned long handle;	/**< Used for mapping / unmapping */
632 };
633 
634 /**
635  * DRM_IOCTL_SET_VERSION ioctl argument type.
636  */
637 struct drm_set_version {
638 	int drm_di_major;
639 	int drm_di_minor;
640 	int drm_dd_major;
641 	int drm_dd_minor;
642 };
643 
644 
645 #define DRM_FENCE_FLAG_EMIT                0x00000001
646 #define DRM_FENCE_FLAG_SHAREABLE           0x00000002
647 /**
648  * On hardware with no interrupt events for operation completion,
649  * indicates that the kernel should sleep while waiting for any blocking
650  * operation to complete rather than spinning.
651  *
652  * Has no effect otherwise.
653  */
654 #define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
655 #define DRM_FENCE_FLAG_NO_USER             0x00000010
656 
657 /* Reserved for driver use */
658 #define DRM_FENCE_MASK_DRIVER              0xFF000000
659 
660 #define DRM_FENCE_TYPE_EXE                 0x00000001
661 
662 struct drm_fence_arg {
663 	unsigned int handle;
664 	unsigned int fence_class;
665 	unsigned int type;
666 	unsigned int flags;
667 	unsigned int signaled;
668 	unsigned int error;
669 	unsigned int sequence;
670 	unsigned int pad64;
671 	uint64_t expand_pad[2]; /*Future expansion */
672 };
673 
674 /* Buffer permissions, referring to how the GPU uses the buffers.
675  * these translate to fence types used for the buffers.
676  * Typically a texture buffer is read, A destination buffer is write and
677  *  a command (batch-) buffer is exe. Can be or-ed together.
678  */
679 
680 #define DRM_BO_FLAG_READ        (1ULL << 0)
681 #define DRM_BO_FLAG_WRITE       (1ULL << 1)
682 #define DRM_BO_FLAG_EXE         (1ULL << 2)
683 
684 /*
685  * All of the bits related to access mode
686  */
687 #define DRM_BO_MASK_ACCESS	(DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
688 /*
689  * Status flags. Can be read to determine the actual state of a buffer.
690  * Can also be set in the buffer mask before validation.
691  */
692 
693 /*
694  * Mask: Never evict this buffer. Not even with force. This type of buffer is only
695  * available to root and must be manually removed before buffer manager shutdown
696  * or lock.
697  * Flags: Acknowledge
698  */
699 #define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
700 
701 /*
702  * Mask: Require that the buffer is placed in mappable memory when validated.
703  *       If not set the buffer may or may not be in mappable memory when validated.
704  * Flags: If set, the buffer is in mappable memory.
705  */
706 #define DRM_BO_FLAG_MAPPABLE    (1ULL << 5)
707 
708 /* Mask: The buffer should be shareable with other processes.
709  * Flags: The buffer is shareable with other processes.
710  */
711 #define DRM_BO_FLAG_SHAREABLE   (1ULL << 6)
712 
713 /* Mask: If set, place the buffer in cache-coherent memory if available.
714  *       If clear, never place the buffer in cache coherent memory if validated.
715  * Flags: The buffer is currently in cache-coherent memory.
716  */
717 #define DRM_BO_FLAG_CACHED      (1ULL << 7)
718 
719 /* Mask: Make sure that every time this buffer is validated,
720  *       it ends up on the same location provided that the memory mask is the same.
721  *       The buffer will also not be evicted when claiming space for
722  *       other buffers. Basically a pinned buffer but it may be thrown out as
723  *       part of buffer manager shutdown or locking.
724  * Flags: Acknowledge.
725  */
726 #define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
727 
728 /* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction
729  * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
730  * with unsnooped PTEs instead of snooped, by using chipset-specific cache
731  * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
732  * as the eviction to local memory (TTM unbind) on map is just a side effect
733  * to prevent aggressive cache prefetch from the GPU disturbing the cache
734  * management that the DRM is doing.
735  *
736  * Flags: Acknowledge.
737  * Buffers allocated with this flag should not be used for suballocators
738  * This type may have issues on CPUs with over-aggressive caching
739  * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
740  */
741 #define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19)
742 
743 
744 /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
745  * Flags: Acknowledge.
746  */
747 #define DRM_BO_FLAG_FORCE_CACHING  (1ULL << 13)
748 
749 /*
750  * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
751  * Flags: Acknowledge.
752  */
753 #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
754 #define DRM_BO_FLAG_TILE           (1ULL << 15)
755 
756 /*
757  * Memory type flags that can be or'ed together in the mask, but only
758  * one appears in flags.
759  */
760 
761 /* System memory */
762 #define DRM_BO_FLAG_MEM_LOCAL  (1ULL << 24)
763 /* Translation table memory */
764 #define DRM_BO_FLAG_MEM_TT     (1ULL << 25)
765 /* Vram memory */
766 #define DRM_BO_FLAG_MEM_VRAM   (1ULL << 26)
767 /* Up to the driver to define. */
768 #define DRM_BO_FLAG_MEM_PRIV0  (1ULL << 27)
769 #define DRM_BO_FLAG_MEM_PRIV1  (1ULL << 28)
770 #define DRM_BO_FLAG_MEM_PRIV2  (1ULL << 29)
771 #define DRM_BO_FLAG_MEM_PRIV3  (1ULL << 30)
772 #define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
773 /* We can add more of these now with a 64-bit flag type */
774 
775 /*
776  * This is a mask covering all of the memory type flags; easier to just
777  * use a single constant than a bunch of | values. It covers
778  * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
779  */
780 #define DRM_BO_MASK_MEM         0x00000000FF000000ULL
781 /*
782  * This adds all of the CPU-mapping options in with the memory
783  * type to label all bits which change how the page gets mapped
784  */
785 #define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \
786 				 DRM_BO_FLAG_CACHED_MAPPED | \
787 				 DRM_BO_FLAG_CACHED | \
788 				 DRM_BO_FLAG_MAPPABLE)
789 
790 /* Driver-private flags */
791 #define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
792 
793 /*
794  * Don't block on validate and map. Instead, return EBUSY.
795  */
796 #define DRM_BO_HINT_DONT_BLOCK  0x00000002
797 /*
798  * Don't place this buffer on the unfenced list. This means
799  * that the buffer will not end up having a fence associated
800  * with it as a result of this operation
801  */
802 #define DRM_BO_HINT_DONT_FENCE  0x00000004
803 /**
804  * On hardware with no interrupt events for operation completion,
805  * indicates that the kernel should sleep while waiting for any blocking
806  * operation to complete rather than spinning.
807  *
808  * Has no effect otherwise.
809  */
810 #define DRM_BO_HINT_WAIT_LAZY   0x00000008
811 /*
812  * The client has compute relocations refering to this buffer using the
813  * offset in the presumed_offset field. If that offset ends up matching
814  * where this buffer lands, the kernel is free to skip executing those
815  * relocations
816  */
817 #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
818 
819 #define DRM_BO_INIT_MAGIC 0xfe769812
820 #define DRM_BO_INIT_MAJOR 1
821 #define DRM_BO_INIT_MINOR 0
822 #define DRM_BO_INIT_PATCH 0
823 
824 
825 struct drm_bo_info_req {
826 	uint64_t mask;
827 	uint64_t flags;
828 	unsigned int handle;
829 	unsigned int hint;
830 	unsigned int fence_class;
831 	unsigned int desired_tile_stride;
832 	unsigned int tile_info;
833 	unsigned int pad64;
834 	uint64_t presumed_offset;
835 };
836 
837 struct drm_bo_create_req {
838 	uint64_t flags;
839 	uint64_t size;
840 	uint64_t buffer_start;
841 	unsigned int hint;
842 	unsigned int page_alignment;
843 };
844 
845 
846 /*
847  * Reply flags
848  */
849 
850 #define DRM_BO_REP_BUSY 0x00000001
851 
852 struct drm_bo_info_rep {
853 	uint64_t flags;
854 	uint64_t proposed_flags;
855 	uint64_t size;
856 	uint64_t offset;
857 	uint64_t arg_handle;
858 	uint64_t buffer_start;
859 	unsigned int handle;
860 	unsigned int fence_flags;
861 	unsigned int rep_flags;
862 	unsigned int page_alignment;
863 	unsigned int desired_tile_stride;
864 	unsigned int hw_tile_stride;
865 	unsigned int tile_info;
866 	unsigned int pad64;
867 	uint64_t expand_pad[4]; /*Future expansion */
868 };
869 
870 struct drm_bo_arg_rep {
871 	struct drm_bo_info_rep bo_info;
872 	int ret;
873 	unsigned int pad64;
874 };
875 
876 struct drm_bo_create_arg {
877 	union {
878 		struct drm_bo_create_req req;
879 		struct drm_bo_info_rep rep;
880 	} d;
881 };
882 
883 struct drm_bo_handle_arg {
884 	unsigned int handle;
885 };
886 
887 struct drm_bo_reference_info_arg {
888 	union {
889 		struct drm_bo_handle_arg req;
890 		struct drm_bo_info_rep rep;
891 	} d;
892 };
893 
894 struct drm_bo_map_wait_idle_arg {
895 	union {
896 		struct drm_bo_info_req req;
897 		struct drm_bo_info_rep rep;
898 	} d;
899 };
900 
901 struct drm_bo_op_req {
902 	enum {
903 		drm_bo_validate,
904 		drm_bo_fence,
905 		drm_bo_ref_fence,
906 	} op;
907 	unsigned int arg_handle;
908 	struct drm_bo_info_req bo_req;
909 };
910 
911 
912 struct drm_bo_op_arg {
913 	uint64_t next;
914 	union {
915 		struct drm_bo_op_req req;
916 		struct drm_bo_arg_rep rep;
917 	} d;
918 	int handled;
919 	unsigned int pad64;
920 };
921 
922 
923 #define DRM_BO_MEM_LOCAL 0
924 #define DRM_BO_MEM_TT 1
925 #define DRM_BO_MEM_VRAM 2
926 #define DRM_BO_MEM_PRIV0 3
927 #define DRM_BO_MEM_PRIV1 4
928 #define DRM_BO_MEM_PRIV2 5
929 #define DRM_BO_MEM_PRIV3 6
930 #define DRM_BO_MEM_PRIV4 7
931 
932 #define DRM_BO_MEM_TYPES 8 /* For now. */
933 
934 #define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
935 #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
936 
937 struct drm_bo_version_arg {
938 	uint32_t major;
939 	uint32_t minor;
940 	uint32_t patchlevel;
941 };
942 
943 struct drm_mm_type_arg {
944 	unsigned int mem_type;
945 	unsigned int lock_flags;
946 };
947 
948 struct drm_mm_init_arg {
949 	unsigned int magic;
950 	unsigned int major;
951 	unsigned int minor;
952 	unsigned int mem_type;
953 	uint64_t p_offset;
954 	uint64_t p_size;
955 };
956 
957 struct drm_mm_info_arg {
958 	unsigned int mem_type;
959 	uint64_t p_size;
960 };
961 
962 /**
963  * \name Ioctls Definitions
964  */
965 /*@{*/
966 
967 #define DRM_IOCTL_BASE			'd'
968 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
969 #define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
970 #define DRM_IOW(nr,type)		_IOW(DRM_IOCTL_BASE,nr,type)
971 #define DRM_IOWR(nr,type)		_IOWR(DRM_IOCTL_BASE,nr,type)
972 
973 #define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
974 #define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, struct drm_unique)
975 #define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, struct drm_auth)
976 #define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, struct drm_irq_busid)
977 #define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
978 #define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
979 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
980 #define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
981 #define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
982 
983 #define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
984 #define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
985 #define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, struct drm_block)
986 #define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, struct drm_block)
987 #define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, struct drm_control)
988 #define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, struct drm_map)
989 #define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, struct drm_buf_desc)
990 #define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, struct drm_buf_desc)
991 #define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, struct drm_buf_info)
992 #define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, struct drm_buf_map)
993 #define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, struct drm_buf_free)
994 
995 #define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
996 
997 #define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
998 #define DRM_IOCTL_GET_SAREA_CTX		DRM_IOWR(0x1d, struct drm_ctx_priv_map)
999 
1000 #define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
1001 #define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
1002 #define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
1003 #define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, struct drm_ctx)
1004 #define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, struct drm_ctx)
1005 #define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, struct drm_ctx)
1006 #define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, struct drm_ctx_res)
1007 #define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, struct drm_draw)
1008 #define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, struct drm_draw)
1009 #define DRM_IOCTL_DMA			DRM_IOWR(0x29, struct drm_dma)
1010 #define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, struct drm_lock)
1011 #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
1012 #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
1013 
1014 #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
1015 #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
1016 #define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
1017 #define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, struct drm_agp_info)
1018 #define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, struct drm_agp_buffer)
1019 #define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, struct drm_agp_buffer)
1020 #define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, struct drm_agp_binding)
1021 #define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, struct drm_agp_binding)
1022 
1023 #define DRM_IOCTL_SG_ALLOC		DRM_IOWR(0x38, struct drm_scatter_gather)
1024 #define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, struct drm_scatter_gather)
1025 
1026 #define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
1027 
1028 #define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, struct drm_update_draw)
1029 
1030 #define DRM_IOCTL_MM_INIT               DRM_IOWR(0xc0, struct drm_mm_init_arg)
1031 #define DRM_IOCTL_MM_TAKEDOWN           DRM_IOWR(0xc1, struct drm_mm_type_arg)
1032 #define DRM_IOCTL_MM_LOCK               DRM_IOWR(0xc2, struct drm_mm_type_arg)
1033 #define DRM_IOCTL_MM_UNLOCK             DRM_IOWR(0xc3, struct drm_mm_type_arg)
1034 
1035 #define DRM_IOCTL_FENCE_CREATE          DRM_IOWR(0xc4, struct drm_fence_arg)
1036 #define DRM_IOCTL_FENCE_REFERENCE       DRM_IOWR(0xc6, struct drm_fence_arg)
1037 #define DRM_IOCTL_FENCE_UNREFERENCE     DRM_IOWR(0xc7, struct drm_fence_arg)
1038 #define DRM_IOCTL_FENCE_SIGNALED        DRM_IOWR(0xc8, struct drm_fence_arg)
1039 #define DRM_IOCTL_FENCE_FLUSH           DRM_IOWR(0xc9, struct drm_fence_arg)
1040 #define DRM_IOCTL_FENCE_WAIT            DRM_IOWR(0xca, struct drm_fence_arg)
1041 #define DRM_IOCTL_FENCE_EMIT            DRM_IOWR(0xcb, struct drm_fence_arg)
1042 #define DRM_IOCTL_FENCE_BUFFERS         DRM_IOWR(0xcc, struct drm_fence_arg)
1043 
1044 #define DRM_IOCTL_BO_CREATE             DRM_IOWR(0xcd, struct drm_bo_create_arg)
1045 #define DRM_IOCTL_BO_MAP                DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
1046 #define DRM_IOCTL_BO_UNMAP              DRM_IOWR(0xd0, struct drm_bo_handle_arg)
1047 #define DRM_IOCTL_BO_REFERENCE          DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
1048 #define DRM_IOCTL_BO_UNREFERENCE        DRM_IOWR(0xd2, struct drm_bo_handle_arg)
1049 #define DRM_IOCTL_BO_SETSTATUS          DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
1050 #define DRM_IOCTL_BO_INFO               DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
1051 #define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
1052 #define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
1053 #define DRM_IOCTL_MM_INFO               DRM_IOWR(0xd7, struct drm_mm_info_arg)
1054 
1055 /*@}*/
1056 
1057 /**
1058  * Device specific ioctls should only be in their respective headers
1059  * The device specific ioctl range is from 0x40 to 0x99.
1060  * Generic IOCTLS restart at 0xA0.
1061  *
1062  * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1063  * drmCommandReadWrite().
1064  */
1065 #define DRM_COMMAND_BASE                0x40
1066 #define DRM_COMMAND_END                 0xA0
1067 
1068 /* typedef area */
1069 #if !defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
1070 typedef struct drm_clip_rect drm_clip_rect_t;
1071 typedef struct drm_tex_region drm_tex_region_t;
1072 typedef struct drm_hw_lock drm_hw_lock_t;
1073 typedef struct drm_version drm_version_t;
1074 typedef struct drm_unique drm_unique_t;
1075 typedef struct drm_list drm_list_t;
1076 typedef struct drm_block drm_block_t;
1077 typedef struct drm_control drm_control_t;
1078 typedef enum drm_map_type drm_map_type_t;
1079 typedef enum drm_map_flags drm_map_flags_t;
1080 typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1081 typedef struct drm_map drm_map_t;
1082 typedef struct drm_client drm_client_t;
1083 typedef enum drm_stat_type drm_stat_type_t;
1084 typedef struct drm_stats drm_stats_t;
1085 typedef enum drm_lock_flags drm_lock_flags_t;
1086 typedef struct drm_lock drm_lock_t;
1087 typedef enum drm_dma_flags drm_dma_flags_t;
1088 typedef struct drm_buf_desc drm_buf_desc_t;
1089 typedef struct drm_buf_info drm_buf_info_t;
1090 typedef struct drm_buf_free drm_buf_free_t;
1091 typedef struct drm_buf_pub drm_buf_pub_t;
1092 typedef struct drm_buf_map drm_buf_map_t;
1093 typedef struct drm_dma drm_dma_t;
1094 typedef union drm_wait_vblank drm_wait_vblank_t;
1095 typedef struct drm_agp_mode drm_agp_mode_t;
1096 typedef enum drm_ctx_flags drm_ctx_flags_t;
1097 typedef struct drm_ctx drm_ctx_t;
1098 typedef struct drm_ctx_res drm_ctx_res_t;
1099 typedef struct drm_draw drm_draw_t;
1100 typedef struct drm_update_draw drm_update_draw_t;
1101 typedef struct drm_auth drm_auth_t;
1102 typedef struct drm_irq_busid drm_irq_busid_t;
1103 typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1104 typedef struct drm_agp_buffer drm_agp_buffer_t;
1105 typedef struct drm_agp_binding drm_agp_binding_t;
1106 typedef struct drm_agp_info drm_agp_info_t;
1107 typedef struct drm_scatter_gather drm_scatter_gather_t;
1108 typedef struct drm_set_version drm_set_version_t;
1109 
1110 typedef struct drm_fence_arg drm_fence_arg_t;
1111 typedef struct drm_mm_type_arg drm_mm_type_arg_t;
1112 typedef struct drm_mm_init_arg drm_mm_init_arg_t;
1113 typedef enum drm_bo_type drm_bo_type_t;
1114 #endif
1115 
1116 #endif
1117