xref: /onnv-gate/usr/src/uts/intel/io/drm/i915_gem.c (revision 11387:0072514d53c7)
111260SMiao.Chen@Sun.COM /* BEGIN CSTYLED */
211260SMiao.Chen@Sun.COM 
311260SMiao.Chen@Sun.COM /*
411260SMiao.Chen@Sun.COM  * Copyright (c) 2009, Intel Corporation.
511260SMiao.Chen@Sun.COM  * All Rights Reserved.
611260SMiao.Chen@Sun.COM  *
711260SMiao.Chen@Sun.COM  * Permission is hereby granted, free of charge, to any person obtaining a
811260SMiao.Chen@Sun.COM  * copy of this software and associated documentation files (the "Software"),
911260SMiao.Chen@Sun.COM  * to deal in the Software without restriction, including without limitation
1011260SMiao.Chen@Sun.COM  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1111260SMiao.Chen@Sun.COM  * and/or sell copies of the Software, and to permit persons to whom the
1211260SMiao.Chen@Sun.COM  * Software is furnished to do so, subject to the following conditions:
1311260SMiao.Chen@Sun.COM  *
1411260SMiao.Chen@Sun.COM  * The above copyright notice and this permission notice (including the next
1511260SMiao.Chen@Sun.COM  * paragraph) shall be included in all copies or substantial portions of the
1611260SMiao.Chen@Sun.COM  * Software.
1711260SMiao.Chen@Sun.COM  *
1811260SMiao.Chen@Sun.COM  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1911260SMiao.Chen@Sun.COM  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2011260SMiao.Chen@Sun.COM  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
2111260SMiao.Chen@Sun.COM  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2211260SMiao.Chen@Sun.COM  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2311260SMiao.Chen@Sun.COM  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2411260SMiao.Chen@Sun.COM  * IN THE SOFTWARE.
2511260SMiao.Chen@Sun.COM  *
2611260SMiao.Chen@Sun.COM  * Authors:
2711260SMiao.Chen@Sun.COM  *    Eric Anholt <eric@anholt.net>
2811260SMiao.Chen@Sun.COM  *
2911260SMiao.Chen@Sun.COM  */
3011260SMiao.Chen@Sun.COM 
3111260SMiao.Chen@Sun.COM /*
3211260SMiao.Chen@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3311260SMiao.Chen@Sun.COM  * Use is subject to license terms.
3411260SMiao.Chen@Sun.COM  */
3511260SMiao.Chen@Sun.COM 
3611260SMiao.Chen@Sun.COM #include <sys/x86_archext.h>
3711260SMiao.Chen@Sun.COM #include <sys/vfs_opreg.h>
3811260SMiao.Chen@Sun.COM #include "drmP.h"
3911260SMiao.Chen@Sun.COM #include "drm.h"
4011260SMiao.Chen@Sun.COM #include "i915_drm.h"
4111260SMiao.Chen@Sun.COM #include "i915_drv.h"
4211260SMiao.Chen@Sun.COM 
4311260SMiao.Chen@Sun.COM #ifndef roundup
4411260SMiao.Chen@Sun.COM #define	roundup(x, y)   ((((x)+((y)-1))/(y))*(y))
4511260SMiao.Chen@Sun.COM #endif /* !roundup */
4611260SMiao.Chen@Sun.COM 
4711260SMiao.Chen@Sun.COM #define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
4811260SMiao.Chen@Sun.COM 
4911260SMiao.Chen@Sun.COM static timeout_id_t worktimer_id = NULL;
5011260SMiao.Chen@Sun.COM 
5111260SMiao.Chen@Sun.COM extern int drm_mm_init(struct drm_mm *mm,
5211260SMiao.Chen@Sun.COM 		    unsigned long start, unsigned long size);
5311260SMiao.Chen@Sun.COM extern void drm_mm_put_block(struct drm_mm_node *cur);
5411260SMiao.Chen@Sun.COM extern int choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
5511260SMiao.Chen@Sun.COM     int vacalign, uint_t flags);
5611260SMiao.Chen@Sun.COM 
5711260SMiao.Chen@Sun.COM static void
5811260SMiao.Chen@Sun.COM i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
5911260SMiao.Chen@Sun.COM 				  uint32_t read_domains,
6011260SMiao.Chen@Sun.COM 				  uint32_t write_domain);
6111260SMiao.Chen@Sun.COM static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
6211260SMiao.Chen@Sun.COM static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
6311260SMiao.Chen@Sun.COM static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
6411260SMiao.Chen@Sun.COM static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
6511260SMiao.Chen@Sun.COM 					     int write);
6611260SMiao.Chen@Sun.COM static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
6711260SMiao.Chen@Sun.COM 					     int write);
6811260SMiao.Chen@Sun.COM static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
6911260SMiao.Chen@Sun.COM 						     uint64_t offset,
7011260SMiao.Chen@Sun.COM 						     uint64_t size);
7111260SMiao.Chen@Sun.COM static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
7211260SMiao.Chen@Sun.COM static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
7311260SMiao.Chen@Sun.COM static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
7411260SMiao.Chen@Sun.COM static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
7511260SMiao.Chen@Sun.COM 
7611260SMiao.Chen@Sun.COM static void
7711260SMiao.Chen@Sun.COM i915_gem_cleanup_ringbuffer(struct drm_device *dev);
7811260SMiao.Chen@Sun.COM 
7911260SMiao.Chen@Sun.COM /*ARGSUSED*/
8011260SMiao.Chen@Sun.COM int
i915_gem_init_ioctl(DRM_IOCTL_ARGS)8111260SMiao.Chen@Sun.COM i915_gem_init_ioctl(DRM_IOCTL_ARGS)
8211260SMiao.Chen@Sun.COM {
8311260SMiao.Chen@Sun.COM 	DRM_DEVICE;
8411260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
8511260SMiao.Chen@Sun.COM 	struct drm_i915_gem_init args;
8611260SMiao.Chen@Sun.COM 
8711260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
8811260SMiao.Chen@Sun.COM 		return ENODEV;
8911260SMiao.Chen@Sun.COM 
9011260SMiao.Chen@Sun.COM 	DRM_COPYFROM_WITH_RETURN(&args,
9111260SMiao.Chen@Sun.COM             (struct drm_i915_gem_init *) data, sizeof(args));
9211260SMiao.Chen@Sun.COM 
9311260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
9411260SMiao.Chen@Sun.COM 
9511260SMiao.Chen@Sun.COM 	if ((args.gtt_start >= args.gtt_end) ||
9611260SMiao.Chen@Sun.COM 	    ((args.gtt_start & (PAGE_SIZE - 1)) != 0) ||
9711260SMiao.Chen@Sun.COM 	    ((args.gtt_end & (PAGE_SIZE - 1)) != 0)) {
9811260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
9911260SMiao.Chen@Sun.COM 		DRM_ERROR("i915_gem_init_ioctel invalid arg 0x%lx args.start 0x%lx end 0x%lx", &args, args.gtt_start, args.gtt_end);
10011260SMiao.Chen@Sun.COM 		return EINVAL;
10111260SMiao.Chen@Sun.COM 	}
10211260SMiao.Chen@Sun.COM 
10311260SMiao.Chen@Sun.COM 	dev->gtt_total = (uint32_t) (args.gtt_end - args.gtt_start);
10411260SMiao.Chen@Sun.COM 
105*11387SSurya.Prakki@Sun.COM 	(void) drm_mm_init(&dev_priv->mm.gtt_space,
106*11387SSurya.Prakki@Sun.COM 	    (unsigned long) args.gtt_start, dev->gtt_total);
10711260SMiao.Chen@Sun.COM 	DRM_DEBUG("i915_gem_init_ioctl dev->gtt_total %x, dev_priv->mm.gtt_space 0x%x gtt_start 0x%lx", dev->gtt_total, dev_priv->mm.gtt_space, args.gtt_start);
10811260SMiao.Chen@Sun.COM 	ASSERT(dev->gtt_total != 0);
10911260SMiao.Chen@Sun.COM 
11011260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
11111260SMiao.Chen@Sun.COM 
11211260SMiao.Chen@Sun.COM 
11311260SMiao.Chen@Sun.COM 	return 0;
11411260SMiao.Chen@Sun.COM }
11511260SMiao.Chen@Sun.COM 
11611260SMiao.Chen@Sun.COM /*ARGSUSED*/
11711260SMiao.Chen@Sun.COM int
i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS)11811260SMiao.Chen@Sun.COM i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS)
11911260SMiao.Chen@Sun.COM {
12011260SMiao.Chen@Sun.COM 	DRM_DEVICE;
12111260SMiao.Chen@Sun.COM 	struct drm_i915_gem_get_aperture args;
12211260SMiao.Chen@Sun.COM 	int ret;
12311260SMiao.Chen@Sun.COM 
12411260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
12511260SMiao.Chen@Sun.COM 		return ENODEV;
12611260SMiao.Chen@Sun.COM 
12711260SMiao.Chen@Sun.COM 	args.aper_size = (uint64_t)dev->gtt_total;
12811260SMiao.Chen@Sun.COM 	args.aper_available_size = (args.aper_size -
12911260SMiao.Chen@Sun.COM 				     atomic_read(&dev->pin_memory));
13011260SMiao.Chen@Sun.COM 
13111260SMiao.Chen@Sun.COM         ret = DRM_COPY_TO_USER((struct drm_i915_gem_get_aperture __user *) data, &args, sizeof(args));
13211260SMiao.Chen@Sun.COM 
13311260SMiao.Chen@Sun.COM         if ( ret != 0)
13411260SMiao.Chen@Sun.COM                 DRM_ERROR(" i915_gem_get_aperture_ioctl error! %d", ret);
13511260SMiao.Chen@Sun.COM 
13611260SMiao.Chen@Sun.COM 	DRM_DEBUG("i915_gem_get_aaperture_ioctl called sizeof %d, aper_size 0x%x, aper_available_size 0x%x\n", sizeof(args), dev->gtt_total, args.aper_available_size);
13711260SMiao.Chen@Sun.COM 
13811260SMiao.Chen@Sun.COM 	return 0;
13911260SMiao.Chen@Sun.COM }
14011260SMiao.Chen@Sun.COM 
14111260SMiao.Chen@Sun.COM /**
14211260SMiao.Chen@Sun.COM  * Creates a new mm object and returns a handle to it.
14311260SMiao.Chen@Sun.COM  */
14411260SMiao.Chen@Sun.COM /*ARGSUSED*/
14511260SMiao.Chen@Sun.COM int
i915_gem_create_ioctl(DRM_IOCTL_ARGS)14611260SMiao.Chen@Sun.COM i915_gem_create_ioctl(DRM_IOCTL_ARGS)
14711260SMiao.Chen@Sun.COM {
14811260SMiao.Chen@Sun.COM 	DRM_DEVICE;
14911260SMiao.Chen@Sun.COM 	struct drm_i915_gem_create args;
15011260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
15111260SMiao.Chen@Sun.COM 	int handlep;
15211260SMiao.Chen@Sun.COM 	int ret;
15311260SMiao.Chen@Sun.COM 
15411260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
15511260SMiao.Chen@Sun.COM 		return ENODEV;
15611260SMiao.Chen@Sun.COM 
15711260SMiao.Chen@Sun.COM 	DRM_COPYFROM_WITH_RETURN(&args,
15811260SMiao.Chen@Sun.COM 	    (struct drm_i915_gem_create *) data, sizeof(args));
15911260SMiao.Chen@Sun.COM 
16011260SMiao.Chen@Sun.COM 
16111260SMiao.Chen@Sun.COM 	args.size = (uint64_t) roundup(args.size, PAGE_SIZE);
16211260SMiao.Chen@Sun.COM 
16311260SMiao.Chen@Sun.COM 	if (args.size == 0) {
16411260SMiao.Chen@Sun.COM 		DRM_ERROR("Invalid obj size %d", args.size);
16511260SMiao.Chen@Sun.COM 		return EINVAL;
16611260SMiao.Chen@Sun.COM 	}
16711260SMiao.Chen@Sun.COM 	/* Allocate the new object */
16811260SMiao.Chen@Sun.COM 	obj = drm_gem_object_alloc(dev, args.size);
16911260SMiao.Chen@Sun.COM 	if (obj == NULL) {
17011260SMiao.Chen@Sun.COM 		DRM_ERROR("Failed to alloc obj");
17111260SMiao.Chen@Sun.COM 		return ENOMEM;
17211260SMiao.Chen@Sun.COM 	}
17311260SMiao.Chen@Sun.COM 
17411260SMiao.Chen@Sun.COM 	ret = drm_gem_handle_create(fpriv, obj, &handlep);
17511260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
17611260SMiao.Chen@Sun.COM 	drm_gem_object_handle_unreference(obj);
17711260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
17811260SMiao.Chen@Sun.COM 	if (ret)
17911260SMiao.Chen@Sun.COM 		return ret;
18011260SMiao.Chen@Sun.COM 
18111260SMiao.Chen@Sun.COM 	args.handle = handlep;
18211260SMiao.Chen@Sun.COM 
18311260SMiao.Chen@Sun.COM 	ret = DRM_COPY_TO_USER((struct drm_i915_gem_create *) data, &args, sizeof(args));
18411260SMiao.Chen@Sun.COM 
18511260SMiao.Chen@Sun.COM 	if ( ret != 0)
18611260SMiao.Chen@Sun.COM 		DRM_ERROR(" gem create error! %d", ret);
18711260SMiao.Chen@Sun.COM 
18811260SMiao.Chen@Sun.COM 	DRM_DEBUG("i915_gem_create_ioctl object name %d, size 0x%lx, list 0x%lx, obj 0x%lx",handlep, args.size, &fpriv->object_idr, obj);
18911260SMiao.Chen@Sun.COM 
19011260SMiao.Chen@Sun.COM 	return 0;
19111260SMiao.Chen@Sun.COM }
19211260SMiao.Chen@Sun.COM 
19311260SMiao.Chen@Sun.COM /**
19411260SMiao.Chen@Sun.COM  * Reads data from the object referenced by handle.
19511260SMiao.Chen@Sun.COM  *
19611260SMiao.Chen@Sun.COM  * On error, the contents of *data are undefined.
19711260SMiao.Chen@Sun.COM  */
19811260SMiao.Chen@Sun.COM /*ARGSUSED*/
19911260SMiao.Chen@Sun.COM int
i915_gem_pread_ioctl(DRM_IOCTL_ARGS)20011260SMiao.Chen@Sun.COM i915_gem_pread_ioctl(DRM_IOCTL_ARGS)
20111260SMiao.Chen@Sun.COM {
20211260SMiao.Chen@Sun.COM 	DRM_DEVICE;
20311260SMiao.Chen@Sun.COM 	struct drm_i915_gem_pread args;
20411260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
20511260SMiao.Chen@Sun.COM 	int ret;
20611260SMiao.Chen@Sun.COM 
20711260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
20811260SMiao.Chen@Sun.COM 		return ENODEV;
20911260SMiao.Chen@Sun.COM 
21011260SMiao.Chen@Sun.COM 	DRM_COPYFROM_WITH_RETURN(&args,
21111260SMiao.Chen@Sun.COM 	    (struct drm_i915_gem_pread __user *) data, sizeof(args));
21211260SMiao.Chen@Sun.COM 
21311260SMiao.Chen@Sun.COM 	obj = drm_gem_object_lookup(fpriv, args.handle);
21411260SMiao.Chen@Sun.COM 	if (obj == NULL)
21511260SMiao.Chen@Sun.COM 		return EBADF;
21611260SMiao.Chen@Sun.COM 
21711260SMiao.Chen@Sun.COM 	/* Bounds check source.
21811260SMiao.Chen@Sun.COM 	 *
21911260SMiao.Chen@Sun.COM 	 * XXX: This could use review for overflow issues...
22011260SMiao.Chen@Sun.COM 	 */
22111260SMiao.Chen@Sun.COM 	if (args.offset > obj->size || args.size > obj->size ||
22211260SMiao.Chen@Sun.COM 	    args.offset + args.size > obj->size) {
22311260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
22411260SMiao.Chen@Sun.COM 		DRM_ERROR("i915_gem_pread_ioctl invalid args");
22511260SMiao.Chen@Sun.COM 		return EINVAL;
22611260SMiao.Chen@Sun.COM 	}
22711260SMiao.Chen@Sun.COM 
22811260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
22911260SMiao.Chen@Sun.COM 
23011260SMiao.Chen@Sun.COM 	ret = i915_gem_object_set_cpu_read_domain_range(obj, args.offset, args.size);
23111260SMiao.Chen@Sun.COM 	if (ret != 0) {
23211260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
23311260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
23411260SMiao.Chen@Sun.COM 		DRM_ERROR("pread failed to read domain range ret %d!!!", ret);
23511260SMiao.Chen@Sun.COM 		return EFAULT;
23611260SMiao.Chen@Sun.COM 	}
23711260SMiao.Chen@Sun.COM 
23811260SMiao.Chen@Sun.COM 	unsigned long unwritten = 0;
23911260SMiao.Chen@Sun.COM 	uint32_t *user_data;
24011260SMiao.Chen@Sun.COM 	user_data = (uint32_t *) (uintptr_t) args.data_ptr;
24111260SMiao.Chen@Sun.COM 
24211260SMiao.Chen@Sun.COM 	unwritten = DRM_COPY_TO_USER(user_data, obj->kaddr + args.offset, args.size);
24311260SMiao.Chen@Sun.COM         if (unwritten) {
24411260SMiao.Chen@Sun.COM                 ret = EFAULT;
24511260SMiao.Chen@Sun.COM                 DRM_ERROR("i915_gem_pread error!!! unwritten %d", unwritten);
24611260SMiao.Chen@Sun.COM         }
24711260SMiao.Chen@Sun.COM 
24811260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
24911260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
25011260SMiao.Chen@Sun.COM 
25111260SMiao.Chen@Sun.COM 	return ret;
25211260SMiao.Chen@Sun.COM }
25311260SMiao.Chen@Sun.COM 
25411260SMiao.Chen@Sun.COM /*ARGSUSED*/
25511260SMiao.Chen@Sun.COM static int
i915_gem_gtt_pwrite(struct drm_device * dev,struct drm_gem_object * obj,struct drm_i915_gem_pwrite * args,struct drm_file * file_priv)25611260SMiao.Chen@Sun.COM i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
25711260SMiao.Chen@Sun.COM 		    struct drm_i915_gem_pwrite *args,
25811260SMiao.Chen@Sun.COM 		    struct drm_file *file_priv)
25911260SMiao.Chen@Sun.COM {
26011260SMiao.Chen@Sun.COM 	uint32_t *user_data;
26111260SMiao.Chen@Sun.COM 	int ret = 0;
26211260SMiao.Chen@Sun.COM 	unsigned long unwritten = 0;
26311260SMiao.Chen@Sun.COM 
26411260SMiao.Chen@Sun.COM 	user_data = (uint32_t *) (uintptr_t) args->data_ptr;
26511260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
26611260SMiao.Chen@Sun.COM 	ret = i915_gem_object_pin(obj, 0);
26711260SMiao.Chen@Sun.COM 	if (ret) {
26811260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
26911260SMiao.Chen@Sun.COM 		DRM_ERROR("i915_gem_gtt_pwrite failed to pin ret %d", ret);
27011260SMiao.Chen@Sun.COM 		return ret;
27111260SMiao.Chen@Sun.COM 	}
27211260SMiao.Chen@Sun.COM 
27311260SMiao.Chen@Sun.COM 	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
27411260SMiao.Chen@Sun.COM 	if (ret)
27511260SMiao.Chen@Sun.COM 		goto err;
27611260SMiao.Chen@Sun.COM 
27711260SMiao.Chen@Sun.COM 	DRM_DEBUG("obj %d write domain 0x%x read domain 0x%x", obj->name, obj->write_domain, obj->read_domains);
27811260SMiao.Chen@Sun.COM 
27911260SMiao.Chen@Sun.COM 	unwritten = DRM_COPY_FROM_USER(obj->kaddr + args->offset, user_data, args->size);
28011260SMiao.Chen@Sun.COM         if (unwritten) {
28111260SMiao.Chen@Sun.COM                 ret = EFAULT;
28211260SMiao.Chen@Sun.COM                 DRM_ERROR("i915_gem_gtt_pwrite error!!! unwritten %d", unwritten);
28311260SMiao.Chen@Sun.COM                 goto err;
28411260SMiao.Chen@Sun.COM         }
28511260SMiao.Chen@Sun.COM 
28611260SMiao.Chen@Sun.COM err:
28711260SMiao.Chen@Sun.COM 	i915_gem_object_unpin(obj);
28811260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
28911260SMiao.Chen@Sun.COM 	if (ret)
29011260SMiao.Chen@Sun.COM 		DRM_ERROR("i915_gem_gtt_pwrite error %d", ret);
29111260SMiao.Chen@Sun.COM 	return ret;
29211260SMiao.Chen@Sun.COM }
29311260SMiao.Chen@Sun.COM 
29411260SMiao.Chen@Sun.COM /*ARGSUSED*/
29511260SMiao.Chen@Sun.COM int
i915_gem_shmem_pwrite(struct drm_device * dev,struct drm_gem_object * obj,struct drm_i915_gem_pwrite * args,struct drm_file * file_priv)29611260SMiao.Chen@Sun.COM i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
29711260SMiao.Chen@Sun.COM 		      struct drm_i915_gem_pwrite *args,
29811260SMiao.Chen@Sun.COM 		      struct drm_file *file_priv)
29911260SMiao.Chen@Sun.COM {
30011260SMiao.Chen@Sun.COM 	DRM_ERROR(" i915_gem_shmem_pwrite Not support");
30111260SMiao.Chen@Sun.COM 	return -1;
30211260SMiao.Chen@Sun.COM }
30311260SMiao.Chen@Sun.COM 
30411260SMiao.Chen@Sun.COM /**
30511260SMiao.Chen@Sun.COM  * Writes data to the object referenced by handle.
30611260SMiao.Chen@Sun.COM  *
30711260SMiao.Chen@Sun.COM  * On error, the contents of the buffer that were to be modified are undefined.
30811260SMiao.Chen@Sun.COM  */
30911260SMiao.Chen@Sun.COM /*ARGSUSED*/
31011260SMiao.Chen@Sun.COM int
i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS)31111260SMiao.Chen@Sun.COM i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS)
31211260SMiao.Chen@Sun.COM {
31311260SMiao.Chen@Sun.COM 	DRM_DEVICE;
31411260SMiao.Chen@Sun.COM 	struct drm_i915_gem_pwrite args;
31511260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
31611260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
31711260SMiao.Chen@Sun.COM 	int ret = 0;
31811260SMiao.Chen@Sun.COM 
31911260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
32011260SMiao.Chen@Sun.COM 		return ENODEV;
32111260SMiao.Chen@Sun.COM 
32211260SMiao.Chen@Sun.COM 	ret = DRM_COPY_FROM_USER(&args,
32311260SMiao.Chen@Sun.COM             (struct drm_i915_gem_pwrite __user *) data, sizeof(args));
32411260SMiao.Chen@Sun.COM 	if (ret)
32511260SMiao.Chen@Sun.COM 		DRM_ERROR("i915_gem_pwrite_ioctl failed to copy from user");
32611260SMiao.Chen@Sun.COM 	obj = drm_gem_object_lookup(fpriv, args.handle);
32711260SMiao.Chen@Sun.COM 	if (obj == NULL)
32811260SMiao.Chen@Sun.COM 		return EBADF;
32911260SMiao.Chen@Sun.COM 	obj_priv = obj->driver_private;
33011260SMiao.Chen@Sun.COM 	DRM_DEBUG("i915_gem_pwrite_ioctl, obj->name %d",obj->name);
33111260SMiao.Chen@Sun.COM 
33211260SMiao.Chen@Sun.COM 	/* Bounds check destination.
33311260SMiao.Chen@Sun.COM 	 *
33411260SMiao.Chen@Sun.COM 	 * XXX: This could use review for overflow issues...
33511260SMiao.Chen@Sun.COM 	 */
33611260SMiao.Chen@Sun.COM 	if (args.offset > obj->size || args.size > obj->size ||
33711260SMiao.Chen@Sun.COM 	    args.offset + args.size > obj->size) {
33811260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
33911260SMiao.Chen@Sun.COM 		DRM_ERROR("i915_gem_pwrite_ioctl invalid arg");
34011260SMiao.Chen@Sun.COM 		return EINVAL;
34111260SMiao.Chen@Sun.COM 	}
34211260SMiao.Chen@Sun.COM 
34311260SMiao.Chen@Sun.COM 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
34411260SMiao.Chen@Sun.COM 	 * it would end up going through the fenced access, and we'll get
34511260SMiao.Chen@Sun.COM 	 * different detiling behavior between reading and writing.
34611260SMiao.Chen@Sun.COM 	 * pread/pwrite currently are reading and writing from the CPU
34711260SMiao.Chen@Sun.COM 	 * perspective, requiring manual detiling by the client.
34811260SMiao.Chen@Sun.COM 	 */
34911260SMiao.Chen@Sun.COM 	if (obj_priv->tiling_mode == I915_TILING_NONE &&
35011260SMiao.Chen@Sun.COM 	    dev->gtt_total != 0)
35111260SMiao.Chen@Sun.COM 		ret = i915_gem_gtt_pwrite(dev, obj, &args, fpriv);
35211260SMiao.Chen@Sun.COM 	else
35311260SMiao.Chen@Sun.COM 		ret = i915_gem_shmem_pwrite(dev, obj, &args, fpriv);
35411260SMiao.Chen@Sun.COM 
35511260SMiao.Chen@Sun.COM 	if (ret)
35611260SMiao.Chen@Sun.COM 		DRM_ERROR("pwrite failed %d\n", ret);
35711260SMiao.Chen@Sun.COM 
35811260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
35911260SMiao.Chen@Sun.COM 
36011260SMiao.Chen@Sun.COM 	return ret;
36111260SMiao.Chen@Sun.COM }
36211260SMiao.Chen@Sun.COM 
36311260SMiao.Chen@Sun.COM /**
36411260SMiao.Chen@Sun.COM  * Called when user space prepares to use an object with the CPU, either
36511260SMiao.Chen@Sun.COM  * through the mmap ioctl's mapping or a GTT mapping.
36611260SMiao.Chen@Sun.COM  */
36711260SMiao.Chen@Sun.COM /*ARGSUSED*/
36811260SMiao.Chen@Sun.COM int
i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS)36911260SMiao.Chen@Sun.COM i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS)
37011260SMiao.Chen@Sun.COM {
37111260SMiao.Chen@Sun.COM 	DRM_DEVICE;
37211260SMiao.Chen@Sun.COM 	struct drm_i915_gem_set_domain args;
37311260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
37411260SMiao.Chen@Sun.COM 	int ret = 0;
37511260SMiao.Chen@Sun.COM 
37611260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
37711260SMiao.Chen@Sun.COM 		return ENODEV;
37811260SMiao.Chen@Sun.COM 
37911260SMiao.Chen@Sun.COM         DRM_COPYFROM_WITH_RETURN(&args,
38011260SMiao.Chen@Sun.COM             (struct drm_i915_gem_set_domain __user *) data, sizeof(args));
38111260SMiao.Chen@Sun.COM 
38211260SMiao.Chen@Sun.COM 	uint32_t read_domains = args.read_domains;
38311260SMiao.Chen@Sun.COM 	uint32_t write_domain = args.write_domain;
38411260SMiao.Chen@Sun.COM 
38511260SMiao.Chen@Sun.COM 	/* Only handle setting domains to types used by the CPU. */
38611260SMiao.Chen@Sun.COM 	if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38711260SMiao.Chen@Sun.COM 		ret = EINVAL;
38811260SMiao.Chen@Sun.COM 
38911260SMiao.Chen@Sun.COM 	if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
39011260SMiao.Chen@Sun.COM 		ret = EINVAL;
39111260SMiao.Chen@Sun.COM 
39211260SMiao.Chen@Sun.COM 	/* Having something in the write domain implies it's in the read
39311260SMiao.Chen@Sun.COM 	 * domain, and only that read domain.  Enforce that in the request.
39411260SMiao.Chen@Sun.COM 	 */
39511260SMiao.Chen@Sun.COM 	if (write_domain != 0 && read_domains != write_domain)
39611260SMiao.Chen@Sun.COM 		ret = EINVAL;
39711260SMiao.Chen@Sun.COM 	if (ret) {
39811260SMiao.Chen@Sun.COM 		DRM_ERROR("set_domain invalid read or write");
39911260SMiao.Chen@Sun.COM 		return EINVAL;
40011260SMiao.Chen@Sun.COM 	}
40111260SMiao.Chen@Sun.COM 
40211260SMiao.Chen@Sun.COM 	obj = drm_gem_object_lookup(fpriv, args.handle);
40311260SMiao.Chen@Sun.COM 	if (obj == NULL)
40411260SMiao.Chen@Sun.COM 		return EBADF;
40511260SMiao.Chen@Sun.COM 
40611260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
40711260SMiao.Chen@Sun.COM 	DRM_DEBUG("set_domain_ioctl %p(name %d size 0x%x), %08x %08x\n",
40811260SMiao.Chen@Sun.COM 		 obj, obj->name, obj->size, args.read_domains, args.write_domain);
40911260SMiao.Chen@Sun.COM 
41011260SMiao.Chen@Sun.COM 	if (read_domains & I915_GEM_DOMAIN_GTT) {
41111260SMiao.Chen@Sun.COM 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
41211260SMiao.Chen@Sun.COM 
41311260SMiao.Chen@Sun.COM 		/* Silently promote "you're not bound, there was nothing to do"
41411260SMiao.Chen@Sun.COM 		 * to success, since the client was just asking us to
41511260SMiao.Chen@Sun.COM 		 * make sure everything was done.
41611260SMiao.Chen@Sun.COM 		 */
41711260SMiao.Chen@Sun.COM 		if (ret == EINVAL)
41811260SMiao.Chen@Sun.COM 			ret = 0;
41911260SMiao.Chen@Sun.COM 	} else {
42011260SMiao.Chen@Sun.COM 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
42111260SMiao.Chen@Sun.COM 	}
42211260SMiao.Chen@Sun.COM 
42311260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
42411260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
42511260SMiao.Chen@Sun.COM 	if (ret)
42611260SMiao.Chen@Sun.COM 		DRM_ERROR("i915_set_domain_ioctl ret %d", ret);
42711260SMiao.Chen@Sun.COM 	return ret;
42811260SMiao.Chen@Sun.COM }
42911260SMiao.Chen@Sun.COM 
43011260SMiao.Chen@Sun.COM /**
43111260SMiao.Chen@Sun.COM  * Called when user space has done writes to this buffer
43211260SMiao.Chen@Sun.COM  */
43311260SMiao.Chen@Sun.COM /*ARGSUSED*/
43411260SMiao.Chen@Sun.COM int
i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS)43511260SMiao.Chen@Sun.COM i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS)
43611260SMiao.Chen@Sun.COM {
43711260SMiao.Chen@Sun.COM 	DRM_DEVICE;
43811260SMiao.Chen@Sun.COM 	struct drm_i915_gem_sw_finish args;
43911260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
44011260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
44111260SMiao.Chen@Sun.COM 	int ret = 0;
44211260SMiao.Chen@Sun.COM 
44311260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
44411260SMiao.Chen@Sun.COM 		return ENODEV;
44511260SMiao.Chen@Sun.COM 
44611260SMiao.Chen@Sun.COM         DRM_COPYFROM_WITH_RETURN(&args,
44711260SMiao.Chen@Sun.COM             (struct drm_i915_gem_sw_finish __user *) data, sizeof(args));
44811260SMiao.Chen@Sun.COM 
44911260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
45011260SMiao.Chen@Sun.COM 	obj = drm_gem_object_lookup(fpriv, args.handle);
45111260SMiao.Chen@Sun.COM 	if (obj == NULL) {
45211260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
45311260SMiao.Chen@Sun.COM 		return EBADF;
45411260SMiao.Chen@Sun.COM 	}
45511260SMiao.Chen@Sun.COM 
45611260SMiao.Chen@Sun.COM 	DRM_DEBUG("%s: sw_finish %d (%p name %d size 0x%x)\n",
45711260SMiao.Chen@Sun.COM 		 __func__, args.handle, obj, obj->name, obj->size);
45811260SMiao.Chen@Sun.COM 
45911260SMiao.Chen@Sun.COM 	obj_priv = obj->driver_private;
46011260SMiao.Chen@Sun.COM 	/* Pinned buffers may be scanout, so flush the cache */
46111260SMiao.Chen@Sun.COM 	if (obj_priv->pin_count)
46211260SMiao.Chen@Sun.COM 	{
46311260SMiao.Chen@Sun.COM 		i915_gem_object_flush_cpu_write_domain(obj);
46411260SMiao.Chen@Sun.COM 	}
46511260SMiao.Chen@Sun.COM 
46611260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
46711260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
46811260SMiao.Chen@Sun.COM 	return ret;
46911260SMiao.Chen@Sun.COM }
47011260SMiao.Chen@Sun.COM 
47111260SMiao.Chen@Sun.COM /**
47211260SMiao.Chen@Sun.COM  * Maps the contents of an object, returning the address it is mapped
47311260SMiao.Chen@Sun.COM  * into.
47411260SMiao.Chen@Sun.COM  *
47511260SMiao.Chen@Sun.COM  * While the mapping holds a reference on the contents of the object, it doesn't
47611260SMiao.Chen@Sun.COM  * imply a ref on the object itself.
47711260SMiao.Chen@Sun.COM  */
47811260SMiao.Chen@Sun.COM /*ARGSUSED*/
47911260SMiao.Chen@Sun.COM int
i915_gem_mmap_ioctl(DRM_IOCTL_ARGS)48011260SMiao.Chen@Sun.COM i915_gem_mmap_ioctl(DRM_IOCTL_ARGS)
48111260SMiao.Chen@Sun.COM {
48211260SMiao.Chen@Sun.COM 	DRM_DEVICE;
48311260SMiao.Chen@Sun.COM 	struct drm_i915_gem_mmap args;
48411260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
48511260SMiao.Chen@Sun.COM 	caddr_t vvaddr = NULL;
48611260SMiao.Chen@Sun.COM 	int ret;
48711260SMiao.Chen@Sun.COM 
48811260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
48911260SMiao.Chen@Sun.COM 		return ENODEV;
49011260SMiao.Chen@Sun.COM 
49111260SMiao.Chen@Sun.COM 	DRM_COPYFROM_WITH_RETURN(
49211260SMiao.Chen@Sun.COM 	    &args, (struct drm_i915_gem_mmap __user *)data,
49311260SMiao.Chen@Sun.COM 	    sizeof (struct drm_i915_gem_mmap));
49411260SMiao.Chen@Sun.COM 
49511260SMiao.Chen@Sun.COM 	obj = drm_gem_object_lookup(fpriv, args.handle);
49611260SMiao.Chen@Sun.COM 	if (obj == NULL)
49711260SMiao.Chen@Sun.COM 		return EBADF;
49811260SMiao.Chen@Sun.COM 
49911260SMiao.Chen@Sun.COM 	ret = ddi_devmap_segmap(fpriv->dev, (off_t)obj->map->handle,
50011260SMiao.Chen@Sun.COM 	    ttoproc(curthread)->p_as, &vvaddr, obj->map->size,
50111260SMiao.Chen@Sun.COM 	    PROT_ALL, PROT_ALL, MAP_SHARED, fpriv->credp);
50211260SMiao.Chen@Sun.COM 	if (ret)
50311260SMiao.Chen@Sun.COM 		return ret;
50411260SMiao.Chen@Sun.COM 
50511260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
50611260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
50711260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
50811260SMiao.Chen@Sun.COM 
50911260SMiao.Chen@Sun.COM 	args.addr_ptr = (uint64_t)(uintptr_t)vvaddr;
51011260SMiao.Chen@Sun.COM 
51111260SMiao.Chen@Sun.COM 	DRM_COPYTO_WITH_RETURN(
51211260SMiao.Chen@Sun.COM 	    (struct drm_i915_gem_mmap __user *)data,
51311260SMiao.Chen@Sun.COM 	    &args, sizeof (struct drm_i915_gem_mmap));
51411260SMiao.Chen@Sun.COM 
51511260SMiao.Chen@Sun.COM 	return 0;
51611260SMiao.Chen@Sun.COM }
51711260SMiao.Chen@Sun.COM 
51811260SMiao.Chen@Sun.COM static void
i915_gem_object_free_page_list(struct drm_gem_object * obj)51911260SMiao.Chen@Sun.COM i915_gem_object_free_page_list(struct drm_gem_object *obj)
52011260SMiao.Chen@Sun.COM {
52111260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
52211260SMiao.Chen@Sun.COM 	if (obj_priv->page_list == NULL)
52311260SMiao.Chen@Sun.COM 		return;
52411260SMiao.Chen@Sun.COM 
52511260SMiao.Chen@Sun.COM         kmem_free(obj_priv->page_list,
52611260SMiao.Chen@Sun.COM                  btop(obj->size) * sizeof(caddr_t));
52711260SMiao.Chen@Sun.COM 
52811260SMiao.Chen@Sun.COM         obj_priv->page_list = NULL;
52911260SMiao.Chen@Sun.COM }
53011260SMiao.Chen@Sun.COM 
53111260SMiao.Chen@Sun.COM static void
i915_gem_object_move_to_active(struct drm_gem_object * obj,uint32_t seqno)53211260SMiao.Chen@Sun.COM i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
53311260SMiao.Chen@Sun.COM {
53411260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
53511260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
53611260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
53711260SMiao.Chen@Sun.COM 
53811260SMiao.Chen@Sun.COM 	/* Add a reference if we're newly entering the active list. */
53911260SMiao.Chen@Sun.COM 	if (!obj_priv->active) {
54011260SMiao.Chen@Sun.COM 		drm_gem_object_reference(obj);
54111260SMiao.Chen@Sun.COM 		obj_priv->active = 1;
54211260SMiao.Chen@Sun.COM 	}
54311260SMiao.Chen@Sun.COM 	/* Move from whatever list we were on to the tail of execution. */
54411260SMiao.Chen@Sun.COM 	list_move_tail(&obj_priv->list,
54511260SMiao.Chen@Sun.COM 		       &dev_priv->mm.active_list, (caddr_t)obj_priv);
54611260SMiao.Chen@Sun.COM 	obj_priv->last_rendering_seqno = seqno;
54711260SMiao.Chen@Sun.COM }
54811260SMiao.Chen@Sun.COM 
54911260SMiao.Chen@Sun.COM static void
i915_gem_object_move_to_flushing(struct drm_gem_object * obj)55011260SMiao.Chen@Sun.COM i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
55111260SMiao.Chen@Sun.COM {
55211260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
55311260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
55411260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
55511260SMiao.Chen@Sun.COM 
55611260SMiao.Chen@Sun.COM 	list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list, (caddr_t)obj_priv);
55711260SMiao.Chen@Sun.COM 	obj_priv->last_rendering_seqno = 0;
55811260SMiao.Chen@Sun.COM }
55911260SMiao.Chen@Sun.COM 
56011260SMiao.Chen@Sun.COM static void
i915_gem_object_move_to_inactive(struct drm_gem_object * obj)56111260SMiao.Chen@Sun.COM i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
56211260SMiao.Chen@Sun.COM {
56311260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
56411260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
56511260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
56611260SMiao.Chen@Sun.COM 
56711260SMiao.Chen@Sun.COM 	if (obj_priv->pin_count != 0)
56811260SMiao.Chen@Sun.COM 	{
56911260SMiao.Chen@Sun.COM 		list_del_init(&obj_priv->list);
57011260SMiao.Chen@Sun.COM 	} else {
57111260SMiao.Chen@Sun.COM 		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list, (caddr_t)obj_priv);
57211260SMiao.Chen@Sun.COM 	}
57311260SMiao.Chen@Sun.COM 	obj_priv->last_rendering_seqno = 0;
57411260SMiao.Chen@Sun.COM 	if (obj_priv->active) {
57511260SMiao.Chen@Sun.COM 		obj_priv->active = 0;
57611260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
57711260SMiao.Chen@Sun.COM 	}
57811260SMiao.Chen@Sun.COM }
57911260SMiao.Chen@Sun.COM 
58011260SMiao.Chen@Sun.COM /**
58111260SMiao.Chen@Sun.COM  * Creates a new sequence number, emitting a write of it to the status page
58211260SMiao.Chen@Sun.COM  * plus an interrupt, which will trigger i915_user_interrupt_handler.
58311260SMiao.Chen@Sun.COM  *
58411260SMiao.Chen@Sun.COM  * Must be called with struct_lock held.
58511260SMiao.Chen@Sun.COM  *
58611260SMiao.Chen@Sun.COM  * Returned sequence numbers are nonzero on success.
58711260SMiao.Chen@Sun.COM  */
58811260SMiao.Chen@Sun.COM static uint32_t
i915_add_request(struct drm_device * dev,uint32_t flush_domains)58911260SMiao.Chen@Sun.COM i915_add_request(struct drm_device *dev, uint32_t flush_domains)
59011260SMiao.Chen@Sun.COM {
59111260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
59211260SMiao.Chen@Sun.COM 	struct drm_i915_gem_request *request;
59311260SMiao.Chen@Sun.COM 	uint32_t seqno;
59411260SMiao.Chen@Sun.COM 	int was_empty;
59511260SMiao.Chen@Sun.COM 	RING_LOCALS;
59611260SMiao.Chen@Sun.COM 
59711260SMiao.Chen@Sun.COM 	request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
59811260SMiao.Chen@Sun.COM 	if (request == NULL) {
59911260SMiao.Chen@Sun.COM 		DRM_ERROR("Failed to alloc request");
60011260SMiao.Chen@Sun.COM 		return 0;
60111260SMiao.Chen@Sun.COM 	}
60211260SMiao.Chen@Sun.COM 	/* Grab the seqno we're going to make this request be, and bump the
60311260SMiao.Chen@Sun.COM 	 * next (skipping 0 so it can be the reserved no-seqno value).
60411260SMiao.Chen@Sun.COM 	 */
60511260SMiao.Chen@Sun.COM 	seqno = dev_priv->mm.next_gem_seqno;
60611260SMiao.Chen@Sun.COM 	dev_priv->mm.next_gem_seqno++;
60711260SMiao.Chen@Sun.COM 	if (dev_priv->mm.next_gem_seqno == 0)
60811260SMiao.Chen@Sun.COM 		dev_priv->mm.next_gem_seqno++;
60911260SMiao.Chen@Sun.COM 
61011260SMiao.Chen@Sun.COM 	DRM_DEBUG("add_request seqno = %d dev 0x%lx", seqno, dev);
61111260SMiao.Chen@Sun.COM 
61211260SMiao.Chen@Sun.COM 	BEGIN_LP_RING(4);
61311260SMiao.Chen@Sun.COM 	OUT_RING(MI_STORE_DWORD_INDEX);
61411260SMiao.Chen@Sun.COM 	OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
61511260SMiao.Chen@Sun.COM 	OUT_RING(seqno);
61611260SMiao.Chen@Sun.COM         OUT_RING(0);
61711260SMiao.Chen@Sun.COM         ADVANCE_LP_RING();
61811260SMiao.Chen@Sun.COM 
61911260SMiao.Chen@Sun.COM 	BEGIN_LP_RING(2);
62011260SMiao.Chen@Sun.COM 	OUT_RING(0);
62111260SMiao.Chen@Sun.COM 	OUT_RING(MI_USER_INTERRUPT);
62211260SMiao.Chen@Sun.COM 	ADVANCE_LP_RING();
62311260SMiao.Chen@Sun.COM 
62411260SMiao.Chen@Sun.COM 	request->seqno = seqno;
62511260SMiao.Chen@Sun.COM 	request->emitted_jiffies = jiffies;
62611260SMiao.Chen@Sun.COM 	was_empty = list_empty(&dev_priv->mm.request_list);
62711260SMiao.Chen@Sun.COM 	list_add_tail(&request->list, &dev_priv->mm.request_list, (caddr_t)request);
62811260SMiao.Chen@Sun.COM 
62911260SMiao.Chen@Sun.COM 	/* Associate any objects on the flushing list matching the write
63011260SMiao.Chen@Sun.COM 	 * domain we're flushing with our flush.
63111260SMiao.Chen@Sun.COM 	 */
63211260SMiao.Chen@Sun.COM 	if (flush_domains != 0) {
63311260SMiao.Chen@Sun.COM 		struct drm_i915_gem_object *obj_priv, *next;
63411260SMiao.Chen@Sun.COM 
63511260SMiao.Chen@Sun.COM 		obj_priv = list_entry(dev_priv->mm.flushing_list.next, struct drm_i915_gem_object, list),
63611260SMiao.Chen@Sun.COM 		next = list_entry(obj_priv->list.next, struct drm_i915_gem_object, list);
63711260SMiao.Chen@Sun.COM 		for(; &obj_priv->list != &dev_priv->mm.flushing_list;
63811260SMiao.Chen@Sun.COM 			obj_priv = next,
63911260SMiao.Chen@Sun.COM 			next = list_entry(next->list.next, struct drm_i915_gem_object, list)) {
64011260SMiao.Chen@Sun.COM 			struct drm_gem_object *obj = obj_priv->obj;
64111260SMiao.Chen@Sun.COM 
64211260SMiao.Chen@Sun.COM 			if ((obj->write_domain & flush_domains) ==
64311260SMiao.Chen@Sun.COM 			    obj->write_domain) {
64411260SMiao.Chen@Sun.COM 				obj->write_domain = 0;
64511260SMiao.Chen@Sun.COM 				i915_gem_object_move_to_active(obj, seqno);
64611260SMiao.Chen@Sun.COM 			}
64711260SMiao.Chen@Sun.COM 		}
64811260SMiao.Chen@Sun.COM 
64911260SMiao.Chen@Sun.COM 	}
65011260SMiao.Chen@Sun.COM 
65111260SMiao.Chen@Sun.COM 	if (was_empty && !dev_priv->mm.suspended)
65211260SMiao.Chen@Sun.COM 	{
65311260SMiao.Chen@Sun.COM 		/* change to delay HZ and then run work (not insert to workqueue of Linux) */
65411260SMiao.Chen@Sun.COM 		worktimer_id = timeout(i915_gem_retire_work_handler, (void *) dev, DRM_HZ);
65511260SMiao.Chen@Sun.COM 		DRM_DEBUG("i915_gem: schedule_delayed_work");
65611260SMiao.Chen@Sun.COM 	}
65711260SMiao.Chen@Sun.COM 	return seqno;
65811260SMiao.Chen@Sun.COM }
65911260SMiao.Chen@Sun.COM 
66011260SMiao.Chen@Sun.COM /**
66111260SMiao.Chen@Sun.COM  * Command execution barrier
66211260SMiao.Chen@Sun.COM  *
66311260SMiao.Chen@Sun.COM  * Ensures that all commands in the ring are finished
66411260SMiao.Chen@Sun.COM  * before signalling the CPU
66511260SMiao.Chen@Sun.COM  */
66611260SMiao.Chen@Sun.COM uint32_t
i915_retire_commands(struct drm_device * dev)66711260SMiao.Chen@Sun.COM i915_retire_commands(struct drm_device *dev)
66811260SMiao.Chen@Sun.COM {
66911260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
67011260SMiao.Chen@Sun.COM 	uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
67111260SMiao.Chen@Sun.COM 	uint32_t flush_domains = 0;
67211260SMiao.Chen@Sun.COM 	RING_LOCALS;
67311260SMiao.Chen@Sun.COM 
67411260SMiao.Chen@Sun.COM 	/* The sampler always gets flushed on i965 (sigh) */
67511260SMiao.Chen@Sun.COM 	if (IS_I965G(dev))
67611260SMiao.Chen@Sun.COM 		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
67711359SMiao.Chen@Sun.COM 	BEGIN_LP_RING(2);
67811260SMiao.Chen@Sun.COM 	OUT_RING(cmd);
67911260SMiao.Chen@Sun.COM 	OUT_RING(0); /* noop */
68011260SMiao.Chen@Sun.COM 	ADVANCE_LP_RING();
68111260SMiao.Chen@Sun.COM 
68211260SMiao.Chen@Sun.COM 	return flush_domains;
68311260SMiao.Chen@Sun.COM }
68411260SMiao.Chen@Sun.COM 
68511260SMiao.Chen@Sun.COM /**
68611260SMiao.Chen@Sun.COM  * Moves buffers associated only with the given active seqno from the active
68711260SMiao.Chen@Sun.COM  * to inactive list, potentially freeing them.
68811260SMiao.Chen@Sun.COM  */
68911260SMiao.Chen@Sun.COM static void
i915_gem_retire_request(struct drm_device * dev,struct drm_i915_gem_request * request)69011260SMiao.Chen@Sun.COM i915_gem_retire_request(struct drm_device *dev,
69111260SMiao.Chen@Sun.COM 			struct drm_i915_gem_request *request)
69211260SMiao.Chen@Sun.COM {
69311260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
69411260SMiao.Chen@Sun.COM 	/* Move any buffers on the active list that are no longer referenced
69511260SMiao.Chen@Sun.COM 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
69611260SMiao.Chen@Sun.COM 	 */
69711260SMiao.Chen@Sun.COM 	while (!list_empty(&dev_priv->mm.active_list)) {
69811260SMiao.Chen@Sun.COM 		struct drm_gem_object *obj;
69911260SMiao.Chen@Sun.COM 		struct drm_i915_gem_object *obj_priv;
70011260SMiao.Chen@Sun.COM 
70111260SMiao.Chen@Sun.COM 		obj_priv = list_entry(dev_priv->mm.active_list.next,
70211260SMiao.Chen@Sun.COM 					    struct drm_i915_gem_object,
70311260SMiao.Chen@Sun.COM 					    list);
70411260SMiao.Chen@Sun.COM 		obj = obj_priv->obj;
70511260SMiao.Chen@Sun.COM 
70611260SMiao.Chen@Sun.COM 		/* If the seqno being retired doesn't match the oldest in the
70711260SMiao.Chen@Sun.COM 		 * list, then the oldest in the list must still be newer than
70811260SMiao.Chen@Sun.COM 		 * this seqno.
70911260SMiao.Chen@Sun.COM 		 */
71011260SMiao.Chen@Sun.COM 		if (obj_priv->last_rendering_seqno != request->seqno)
71111260SMiao.Chen@Sun.COM 			return;
71211260SMiao.Chen@Sun.COM 
71311260SMiao.Chen@Sun.COM 		DRM_DEBUG("%s: retire %d moves to inactive list %p\n",
71411260SMiao.Chen@Sun.COM 			 __func__, request->seqno, obj);
71511260SMiao.Chen@Sun.COM 
71611260SMiao.Chen@Sun.COM 		if (obj->write_domain != 0) {
71711260SMiao.Chen@Sun.COM 			i915_gem_object_move_to_flushing(obj);
71811260SMiao.Chen@Sun.COM 		} else {
71911260SMiao.Chen@Sun.COM 			i915_gem_object_move_to_inactive(obj);
72011260SMiao.Chen@Sun.COM 		}
72111260SMiao.Chen@Sun.COM 	}
72211260SMiao.Chen@Sun.COM }
72311260SMiao.Chen@Sun.COM 
72411260SMiao.Chen@Sun.COM /**
72511260SMiao.Chen@Sun.COM  * Returns true if seq1 is later than seq2.
72611260SMiao.Chen@Sun.COM  */
72711260SMiao.Chen@Sun.COM static int
i915_seqno_passed(uint32_t seq1,uint32_t seq2)72811260SMiao.Chen@Sun.COM i915_seqno_passed(uint32_t seq1, uint32_t seq2)
72911260SMiao.Chen@Sun.COM {
73011260SMiao.Chen@Sun.COM 	return (int32_t)(seq1 - seq2) >= 0;
73111260SMiao.Chen@Sun.COM }
73211260SMiao.Chen@Sun.COM 
73311260SMiao.Chen@Sun.COM uint32_t
i915_get_gem_seqno(struct drm_device * dev)73411260SMiao.Chen@Sun.COM i915_get_gem_seqno(struct drm_device *dev)
73511260SMiao.Chen@Sun.COM {
73611260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
73711260SMiao.Chen@Sun.COM 
73811260SMiao.Chen@Sun.COM 	return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
73911260SMiao.Chen@Sun.COM }
74011260SMiao.Chen@Sun.COM 
74111260SMiao.Chen@Sun.COM /**
74211260SMiao.Chen@Sun.COM  * This function clears the request list as sequence numbers are passed.
74311260SMiao.Chen@Sun.COM  */
74411260SMiao.Chen@Sun.COM void
i915_gem_retire_requests(struct drm_device * dev)74511260SMiao.Chen@Sun.COM i915_gem_retire_requests(struct drm_device *dev)
74611260SMiao.Chen@Sun.COM {
74711260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
74811260SMiao.Chen@Sun.COM 	uint32_t seqno;
74911260SMiao.Chen@Sun.COM 
75011260SMiao.Chen@Sun.COM 	seqno = i915_get_gem_seqno(dev);
75111260SMiao.Chen@Sun.COM 
75211260SMiao.Chen@Sun.COM 	while (!list_empty(&dev_priv->mm.request_list)) {
75311260SMiao.Chen@Sun.COM 		struct drm_i915_gem_request *request;
75411260SMiao.Chen@Sun.COM 		uint32_t retiring_seqno;
75511260SMiao.Chen@Sun.COM 		request = (struct drm_i915_gem_request *)(uintptr_t)(dev_priv->mm.request_list.next->contain_ptr);
75611260SMiao.Chen@Sun.COM 		retiring_seqno = request->seqno;
75711260SMiao.Chen@Sun.COM 
75811260SMiao.Chen@Sun.COM 		if (i915_seqno_passed(seqno, retiring_seqno) ||
75911260SMiao.Chen@Sun.COM 		    dev_priv->mm.wedged) {
76011260SMiao.Chen@Sun.COM 			i915_gem_retire_request(dev, request);
76111260SMiao.Chen@Sun.COM 
76211260SMiao.Chen@Sun.COM 			list_del(&request->list);
76311260SMiao.Chen@Sun.COM 			drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
76411260SMiao.Chen@Sun.COM 		} else
76511260SMiao.Chen@Sun.COM 			break;
76611260SMiao.Chen@Sun.COM 	}
76711260SMiao.Chen@Sun.COM }
76811260SMiao.Chen@Sun.COM 
76911260SMiao.Chen@Sun.COM void
i915_gem_retire_work_handler(void * device)77011260SMiao.Chen@Sun.COM i915_gem_retire_work_handler(void *device)
77111260SMiao.Chen@Sun.COM {
77211260SMiao.Chen@Sun.COM 	struct drm_device *dev = (struct drm_device *)device;
77311260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
77411260SMiao.Chen@Sun.COM 
77511260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
77611260SMiao.Chen@Sun.COM 
77711260SMiao.Chen@Sun.COM 	/* Return if gem idle */
77811260SMiao.Chen@Sun.COM 	if (worktimer_id == NULL) {
77911260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
78011260SMiao.Chen@Sun.COM 		return;
78111260SMiao.Chen@Sun.COM 	}
78211260SMiao.Chen@Sun.COM 
78311260SMiao.Chen@Sun.COM 	i915_gem_retire_requests(dev);
78411260SMiao.Chen@Sun.COM 	if (!dev_priv->mm.suspended && !list_empty(&dev_priv->mm.request_list))
78511260SMiao.Chen@Sun.COM 	{
78611260SMiao.Chen@Sun.COM 		DRM_DEBUG("i915_gem: schedule_delayed_work");
78711260SMiao.Chen@Sun.COM 		worktimer_id = timeout(i915_gem_retire_work_handler, (void *) dev, DRM_HZ);
78811260SMiao.Chen@Sun.COM 	}
78911260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
79011260SMiao.Chen@Sun.COM }
79111260SMiao.Chen@Sun.COM 
79211260SMiao.Chen@Sun.COM /**
79311260SMiao.Chen@Sun.COM  * i965_reset - reset chip after a hang
79411260SMiao.Chen@Sun.COM  * @dev: drm device to reset
79511260SMiao.Chen@Sun.COM  * @flags: reset domains
79611260SMiao.Chen@Sun.COM  *
79711260SMiao.Chen@Sun.COM  * Reset the chip.  Useful if a hang is detected.
79811260SMiao.Chen@Sun.COM  *
79911260SMiao.Chen@Sun.COM  * Procedure is fairly simple:
80011260SMiao.Chen@Sun.COM  *   - reset the chip using the reset reg
80111260SMiao.Chen@Sun.COM  *   - re-init context state
80211260SMiao.Chen@Sun.COM  *   - re-init hardware status page
80311260SMiao.Chen@Sun.COM  *   - re-init ring buffer
80411260SMiao.Chen@Sun.COM  *   - re-init interrupt state
80511260SMiao.Chen@Sun.COM  *   - re-init display
80611260SMiao.Chen@Sun.COM  */
i965_reset(struct drm_device * dev,u8 flags)80711260SMiao.Chen@Sun.COM void i965_reset(struct drm_device *dev, u8 flags)
80811260SMiao.Chen@Sun.COM {
80911260SMiao.Chen@Sun.COM 	ddi_acc_handle_t conf_hdl;
81011260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
81111260SMiao.Chen@Sun.COM 	int timeout = 0;
81211260SMiao.Chen@Sun.COM 	uint8_t gdrst;
81311260SMiao.Chen@Sun.COM 
81411260SMiao.Chen@Sun.COM 	if (flags & GDRST_FULL)
81511260SMiao.Chen@Sun.COM 		i915_save_display(dev);
81611260SMiao.Chen@Sun.COM 
81711260SMiao.Chen@Sun.COM 	if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
81811260SMiao.Chen@Sun.COM 		DRM_ERROR(("i915_reset: pci_config_setup fail"));
81911260SMiao.Chen@Sun.COM 		return;
82011260SMiao.Chen@Sun.COM 	}
82111260SMiao.Chen@Sun.COM 
82211260SMiao.Chen@Sun.COM 	/*
82311260SMiao.Chen@Sun.COM 	 * Set the reset bit, wait for reset, then clear it.  Hardware
82411260SMiao.Chen@Sun.COM 	 * will clear the status bit (bit 1) when it's actually ready
82511260SMiao.Chen@Sun.COM 	 * for action again.
82611260SMiao.Chen@Sun.COM 	 */
82711260SMiao.Chen@Sun.COM 	gdrst = pci_config_get8(conf_hdl, GDRST);
82811260SMiao.Chen@Sun.COM 	pci_config_put8(conf_hdl, GDRST, gdrst | flags);
82911260SMiao.Chen@Sun.COM 	drv_usecwait(50);
83011260SMiao.Chen@Sun.COM 	pci_config_put8(conf_hdl, GDRST, gdrst | 0xfe);
83111260SMiao.Chen@Sun.COM 
83211260SMiao.Chen@Sun.COM 	/* ...we don't want to loop forever though, 500ms should be plenty */
83311260SMiao.Chen@Sun.COM 	do {
83411260SMiao.Chen@Sun.COM 		drv_usecwait(100);
83511260SMiao.Chen@Sun.COM 		gdrst = pci_config_get8(conf_hdl, GDRST);
83611260SMiao.Chen@Sun.COM 	} while ((gdrst & 2) && (timeout++ < 5));
83711260SMiao.Chen@Sun.COM 
83811260SMiao.Chen@Sun.COM 	/* Ok now get things going again... */
83911260SMiao.Chen@Sun.COM 
84011260SMiao.Chen@Sun.COM 	/*
84111260SMiao.Chen@Sun.COM 	 * Everything depends on having the GTT running, so we need to start
84211260SMiao.Chen@Sun.COM 	 * there.  Fortunately we don't need to do this unless we reset the
84311260SMiao.Chen@Sun.COM 	 * chip at a PCI level.
84411260SMiao.Chen@Sun.COM 	 *
84511260SMiao.Chen@Sun.COM 	 * Next we need to restore the context, but we don't use those
84611260SMiao.Chen@Sun.COM 	 * yet either...
84711260SMiao.Chen@Sun.COM 	 *
84811260SMiao.Chen@Sun.COM 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
84911260SMiao.Chen@Sun.COM 	 * was running at the time of the reset (i.e. we weren't VT
85011260SMiao.Chen@Sun.COM 	 * switched away).
85111260SMiao.Chen@Sun.COM 	 */
85211260SMiao.Chen@Sun.COM 	 if (!dev_priv->mm.suspended) {
85311260SMiao.Chen@Sun.COM 		drm_i915_ring_buffer_t *ring = &dev_priv->ring;
85411260SMiao.Chen@Sun.COM 		struct drm_gem_object *obj = ring->ring_obj;
85511260SMiao.Chen@Sun.COM 		struct drm_i915_gem_object *obj_priv = obj->driver_private;
85611260SMiao.Chen@Sun.COM 		dev_priv->mm.suspended = 0;
85711260SMiao.Chen@Sun.COM 
85811260SMiao.Chen@Sun.COM 		/* Stop the ring if it's running. */
85911260SMiao.Chen@Sun.COM 		I915_WRITE(PRB0_CTL, 0);
86011260SMiao.Chen@Sun.COM 		I915_WRITE(PRB0_TAIL, 0);
86111260SMiao.Chen@Sun.COM 		I915_WRITE(PRB0_HEAD, 0);
86211260SMiao.Chen@Sun.COM 
86311260SMiao.Chen@Sun.COM 		/* Initialize the ring. */
86411260SMiao.Chen@Sun.COM 		I915_WRITE(PRB0_START, obj_priv->gtt_offset);
86511260SMiao.Chen@Sun.COM 		I915_WRITE(PRB0_CTL,
86611260SMiao.Chen@Sun.COM 			   ((obj->size - 4096) & RING_NR_PAGES) |
86711260SMiao.Chen@Sun.COM 			   RING_NO_REPORT |
86811260SMiao.Chen@Sun.COM 			   RING_VALID);
86911260SMiao.Chen@Sun.COM 		i915_kernel_lost_context(dev);
87011260SMiao.Chen@Sun.COM 
871*11387SSurya.Prakki@Sun.COM 		(void) drm_irq_install(dev);
87211260SMiao.Chen@Sun.COM 	}
87311260SMiao.Chen@Sun.COM 
87411260SMiao.Chen@Sun.COM 	/*
87511260SMiao.Chen@Sun.COM 	 * Display needs restore too...
87611260SMiao.Chen@Sun.COM 	 */
87711260SMiao.Chen@Sun.COM 	if (flags & GDRST_FULL)
87811260SMiao.Chen@Sun.COM 		i915_restore_display(dev);
87911260SMiao.Chen@Sun.COM }
88011260SMiao.Chen@Sun.COM 
88111260SMiao.Chen@Sun.COM /**
88211260SMiao.Chen@Sun.COM  * Waits for a sequence number to be signaled, and cleans up the
88311260SMiao.Chen@Sun.COM  * request and object lists appropriately for that event.
88411260SMiao.Chen@Sun.COM  */
88511260SMiao.Chen@Sun.COM int
i915_wait_request(struct drm_device * dev,uint32_t seqno)88611260SMiao.Chen@Sun.COM i915_wait_request(struct drm_device *dev, uint32_t seqno)
88711260SMiao.Chen@Sun.COM {
88811260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
88911359SMiao.Chen@Sun.COM 	u32 ier;
89011260SMiao.Chen@Sun.COM 	int ret = 0;
89111260SMiao.Chen@Sun.COM 
89211260SMiao.Chen@Sun.COM 	ASSERT(seqno != 0);
89311260SMiao.Chen@Sun.COM 
89411260SMiao.Chen@Sun.COM 	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
89511359SMiao.Chen@Sun.COM 		if (IS_IGDNG(dev))
89611359SMiao.Chen@Sun.COM 			ier = I915_READ(DEIER) | I915_READ(GTIER);
89711359SMiao.Chen@Sun.COM 		else
89811359SMiao.Chen@Sun.COM 			ier = I915_READ(IER);
89911359SMiao.Chen@Sun.COM 		if (!ier) {
90011359SMiao.Chen@Sun.COM  			DRM_ERROR("something (likely vbetool) disabled "
90111359SMiao.Chen@Sun.COM  				  "interrupts, re-enabling\n");
90211359SMiao.Chen@Sun.COM 			(void) i915_driver_irq_preinstall(dev);
90311359SMiao.Chen@Sun.COM 			i915_driver_irq_postinstall(dev);
90411359SMiao.Chen@Sun.COM 		}
90511359SMiao.Chen@Sun.COM 
90611260SMiao.Chen@Sun.COM 		dev_priv->mm.waiting_gem_seqno = seqno;
90711260SMiao.Chen@Sun.COM 		i915_user_irq_on(dev);
90811260SMiao.Chen@Sun.COM 		DRM_WAIT(ret, &dev_priv->irq_queue,
90911260SMiao.Chen@Sun.COM 		    (i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
91011260SMiao.Chen@Sun.COM 				dev_priv->mm.wedged));
91111260SMiao.Chen@Sun.COM 		i915_user_irq_off(dev);
91211260SMiao.Chen@Sun.COM 		dev_priv->mm.waiting_gem_seqno = 0;
91311260SMiao.Chen@Sun.COM 	}
91411260SMiao.Chen@Sun.COM 	if (dev_priv->mm.wedged) {
91511260SMiao.Chen@Sun.COM 		ret = EIO;
91611260SMiao.Chen@Sun.COM 	}
91711260SMiao.Chen@Sun.COM 
91811260SMiao.Chen@Sun.COM 	/* GPU maybe hang, reset needed*/
91911260SMiao.Chen@Sun.COM 	if (ret == -2 && (seqno > i915_get_gem_seqno(dev))) {
92011260SMiao.Chen@Sun.COM 		if (IS_I965G(dev)) {
92111260SMiao.Chen@Sun.COM 			DRM_ERROR("GPU hang detected try to reset ... wait for irq_queue seqno %d, now seqno %d", seqno, i915_get_gem_seqno(dev));
92211260SMiao.Chen@Sun.COM 			dev_priv->mm.wedged = 1;
92311260SMiao.Chen@Sun.COM 			i965_reset(dev, GDRST_RENDER);
92411260SMiao.Chen@Sun.COM 			i915_gem_retire_requests(dev);
92511260SMiao.Chen@Sun.COM 			dev_priv->mm.wedged = 0;
92611260SMiao.Chen@Sun.COM 		}
92711260SMiao.Chen@Sun.COM 		else
92811260SMiao.Chen@Sun.COM 			DRM_ERROR("GPU hang detected.... reboot required");
92911260SMiao.Chen@Sun.COM 		return 0;
93011260SMiao.Chen@Sun.COM 	}
93111260SMiao.Chen@Sun.COM 	/* Directly dispatch request retiring.  While we have the work queue
93211260SMiao.Chen@Sun.COM 	 * to handle this, the waiter on a request often wants an associated
93311260SMiao.Chen@Sun.COM 	 * buffer to have made it to the inactive list, and we would need
93411260SMiao.Chen@Sun.COM 	 * a separate wait queue to handle that.
93511260SMiao.Chen@Sun.COM 	 */
93611260SMiao.Chen@Sun.COM 	if (ret == 0)
93711260SMiao.Chen@Sun.COM 		i915_gem_retire_requests(dev);
93811260SMiao.Chen@Sun.COM 
93911260SMiao.Chen@Sun.COM 	return ret;
94011260SMiao.Chen@Sun.COM }
94111260SMiao.Chen@Sun.COM 
94211260SMiao.Chen@Sun.COM static void
i915_gem_flush(struct drm_device * dev,uint32_t invalidate_domains,uint32_t flush_domains)94311260SMiao.Chen@Sun.COM i915_gem_flush(struct drm_device *dev,
94411260SMiao.Chen@Sun.COM 	       uint32_t invalidate_domains,
94511260SMiao.Chen@Sun.COM 	       uint32_t flush_domains)
94611260SMiao.Chen@Sun.COM {
94711260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
94811260SMiao.Chen@Sun.COM 	uint32_t cmd;
94911260SMiao.Chen@Sun.COM 	RING_LOCALS;
95011260SMiao.Chen@Sun.COM 
95111260SMiao.Chen@Sun.COM 	DRM_DEBUG("%s: invalidate %08x flush %08x\n", __func__,
95211260SMiao.Chen@Sun.COM 		  invalidate_domains, flush_domains);
95311260SMiao.Chen@Sun.COM 
95411260SMiao.Chen@Sun.COM 	if (flush_domains & I915_GEM_DOMAIN_CPU)
95511260SMiao.Chen@Sun.COM 		drm_agp_chipset_flush(dev);
95611260SMiao.Chen@Sun.COM 
95711260SMiao.Chen@Sun.COM 	if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
95811260SMiao.Chen@Sun.COM 						     I915_GEM_DOMAIN_GTT)) {
95911260SMiao.Chen@Sun.COM 		/*
96011260SMiao.Chen@Sun.COM 		 * read/write caches:
96111260SMiao.Chen@Sun.COM 		 *
96211260SMiao.Chen@Sun.COM 		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
96311260SMiao.Chen@Sun.COM 		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
96411260SMiao.Chen@Sun.COM 		 * also flushed at 2d versus 3d pipeline switches.
96511260SMiao.Chen@Sun.COM 		 *
96611260SMiao.Chen@Sun.COM 		 * read-only caches:
96711260SMiao.Chen@Sun.COM 		 *
96811260SMiao.Chen@Sun.COM 		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
96911260SMiao.Chen@Sun.COM 		 * MI_READ_FLUSH is set, and is always flushed on 965.
97011260SMiao.Chen@Sun.COM 		 *
97111260SMiao.Chen@Sun.COM 		 * I915_GEM_DOMAIN_COMMAND may not exist?
97211260SMiao.Chen@Sun.COM 		 *
97311260SMiao.Chen@Sun.COM 		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
97411260SMiao.Chen@Sun.COM 		 * invalidated when MI_EXE_FLUSH is set.
97511260SMiao.Chen@Sun.COM 		 *
97611260SMiao.Chen@Sun.COM 		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
97711260SMiao.Chen@Sun.COM 		 * invalidated with every MI_FLUSH.
97811260SMiao.Chen@Sun.COM 		 *
97911260SMiao.Chen@Sun.COM 		 * TLBs:
98011260SMiao.Chen@Sun.COM 		 *
98111260SMiao.Chen@Sun.COM 		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
98211260SMiao.Chen@Sun.COM 		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
98311260SMiao.Chen@Sun.COM 		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
98411260SMiao.Chen@Sun.COM 		 * are flushed at any MI_FLUSH.
98511260SMiao.Chen@Sun.COM 		 */
98611260SMiao.Chen@Sun.COM 
98711260SMiao.Chen@Sun.COM 		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
98811260SMiao.Chen@Sun.COM 		if ((invalidate_domains|flush_domains) &
98911260SMiao.Chen@Sun.COM 		    I915_GEM_DOMAIN_RENDER)
99011260SMiao.Chen@Sun.COM 			cmd &= ~MI_NO_WRITE_FLUSH;
99111260SMiao.Chen@Sun.COM 		if (!IS_I965G(dev)) {
99211260SMiao.Chen@Sun.COM 			/*
99311260SMiao.Chen@Sun.COM 			 * On the 965, the sampler cache always gets flushed
99411260SMiao.Chen@Sun.COM 			 * and this bit is reserved.
99511260SMiao.Chen@Sun.COM 			 */
99611260SMiao.Chen@Sun.COM 			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
99711260SMiao.Chen@Sun.COM 				cmd |= MI_READ_FLUSH;
99811260SMiao.Chen@Sun.COM 		}
99911260SMiao.Chen@Sun.COM 		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
100011260SMiao.Chen@Sun.COM 			cmd |= MI_EXE_FLUSH;
100111260SMiao.Chen@Sun.COM 
100211260SMiao.Chen@Sun.COM 		DRM_DEBUG("%s: queue flush %08x to ring\n", __func__, cmd);
100311260SMiao.Chen@Sun.COM 
100411260SMiao.Chen@Sun.COM 		BEGIN_LP_RING(2);
100511260SMiao.Chen@Sun.COM 		OUT_RING(cmd);
100611260SMiao.Chen@Sun.COM 		OUT_RING(0); /* noop */
100711260SMiao.Chen@Sun.COM 		ADVANCE_LP_RING();
100811260SMiao.Chen@Sun.COM 	}
100911260SMiao.Chen@Sun.COM }
101011260SMiao.Chen@Sun.COM 
101111260SMiao.Chen@Sun.COM /**
101211260SMiao.Chen@Sun.COM  * Ensures that all rendering to the object has completed and the object is
101311260SMiao.Chen@Sun.COM  * safe to unbind from the GTT or access from the CPU.
101411260SMiao.Chen@Sun.COM  */
101511260SMiao.Chen@Sun.COM static int
i915_gem_object_wait_rendering(struct drm_gem_object * obj)101611260SMiao.Chen@Sun.COM i915_gem_object_wait_rendering(struct drm_gem_object *obj)
101711260SMiao.Chen@Sun.COM {
101811260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
101911260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
102011260SMiao.Chen@Sun.COM 	int ret, seqno;
102111260SMiao.Chen@Sun.COM 
102211260SMiao.Chen@Sun.COM 	/* This function only exists to support waiting for existing rendering,
102311260SMiao.Chen@Sun.COM 	 * not for emitting required flushes.
102411260SMiao.Chen@Sun.COM 	 */
102511260SMiao.Chen@Sun.COM 
102611260SMiao.Chen@Sun.COM 	if((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0) {
102711260SMiao.Chen@Sun.COM 		DRM_ERROR("write domain should not be GPU DOMAIN %d", obj_priv->active);
102811260SMiao.Chen@Sun.COM 		return 0;
102911260SMiao.Chen@Sun.COM 	}
103011260SMiao.Chen@Sun.COM 
103111260SMiao.Chen@Sun.COM 	/* If there is rendering queued on the buffer being evicted, wait for
103211260SMiao.Chen@Sun.COM 	 * it.
103311260SMiao.Chen@Sun.COM 	 */
103411260SMiao.Chen@Sun.COM 	if (obj_priv->active) {
103511260SMiao.Chen@Sun.COM 		DRM_DEBUG("%s: object %d %p wait for seqno %08x\n",
103611260SMiao.Chen@Sun.COM 			  __func__, obj->name, obj, obj_priv->last_rendering_seqno);
103711260SMiao.Chen@Sun.COM 
103811260SMiao.Chen@Sun.COM 		seqno = obj_priv->last_rendering_seqno;
103911260SMiao.Chen@Sun.COM 		if (seqno == 0) {
104011260SMiao.Chen@Sun.COM 			DRM_DEBUG("last rendering maybe finished");
104111260SMiao.Chen@Sun.COM 			return 0;
104211260SMiao.Chen@Sun.COM 		}
104311260SMiao.Chen@Sun.COM 		ret = i915_wait_request(dev, seqno);
104411260SMiao.Chen@Sun.COM 		if (ret != 0) {
104511260SMiao.Chen@Sun.COM 			DRM_ERROR("%s: i915_wait_request request->seqno %d now %d\n", __func__, seqno, i915_get_gem_seqno(dev));
104611260SMiao.Chen@Sun.COM 			return ret;
104711260SMiao.Chen@Sun.COM 		}
104811260SMiao.Chen@Sun.COM 	}
104911260SMiao.Chen@Sun.COM 
105011260SMiao.Chen@Sun.COM 	return 0;
105111260SMiao.Chen@Sun.COM }
105211260SMiao.Chen@Sun.COM 
105311260SMiao.Chen@Sun.COM /**
105411260SMiao.Chen@Sun.COM  * Unbinds an object from the GTT aperture.
105511260SMiao.Chen@Sun.COM  */
105611260SMiao.Chen@Sun.COM int
i915_gem_object_unbind(struct drm_gem_object * obj,uint32_t type)105711260SMiao.Chen@Sun.COM i915_gem_object_unbind(struct drm_gem_object *obj, uint32_t type)
105811260SMiao.Chen@Sun.COM {
105911260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
106011260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
106111260SMiao.Chen@Sun.COM 	int ret = 0;
106211260SMiao.Chen@Sun.COM 
106311260SMiao.Chen@Sun.COM 	if (obj_priv->gtt_space == NULL)
106411260SMiao.Chen@Sun.COM 		return 0;
106511260SMiao.Chen@Sun.COM 
106611260SMiao.Chen@Sun.COM 	if (obj_priv->pin_count != 0) {
106711260SMiao.Chen@Sun.COM 		DRM_ERROR("Attempting to unbind pinned buffer\n");
106811260SMiao.Chen@Sun.COM 		return EINVAL;
106911260SMiao.Chen@Sun.COM 	}
107011260SMiao.Chen@Sun.COM 
107111260SMiao.Chen@Sun.COM 	/* Wait for any rendering to complete
107211260SMiao.Chen@Sun.COM 	 */
107311260SMiao.Chen@Sun.COM 	ret = i915_gem_object_wait_rendering(obj);
107411260SMiao.Chen@Sun.COM 	if (ret) {
107511260SMiao.Chen@Sun.COM 		DRM_ERROR("wait_rendering failed: %d\n", ret);
107611260SMiao.Chen@Sun.COM 		return ret;
107711260SMiao.Chen@Sun.COM 	}
107811260SMiao.Chen@Sun.COM 
107911260SMiao.Chen@Sun.COM 	/* Move the object to the CPU domain to ensure that
108011260SMiao.Chen@Sun.COM 	 * any possible CPU writes while it's not in the GTT
108111260SMiao.Chen@Sun.COM 	 * are flushed when we go to remap it. This will
108211260SMiao.Chen@Sun.COM 	 * also ensure that all pending GPU writes are finished
108311260SMiao.Chen@Sun.COM 	 * before we unbind.
108411260SMiao.Chen@Sun.COM 	 */
108511260SMiao.Chen@Sun.COM 	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
108611260SMiao.Chen@Sun.COM 	if (ret) {
108711260SMiao.Chen@Sun.COM 		DRM_ERROR("set_domain failed: %d\n", ret);
108811260SMiao.Chen@Sun.COM 		return ret;
108911260SMiao.Chen@Sun.COM 	}
109011260SMiao.Chen@Sun.COM 
109111260SMiao.Chen@Sun.COM 	if (!obj_priv->agp_mem) {
1092*11387SSurya.Prakki@Sun.COM 		(void) drm_agp_unbind_pages(dev, obj->size / PAGE_SIZE,
1093*11387SSurya.Prakki@Sun.COM 		    obj_priv->gtt_offset, type);
109411260SMiao.Chen@Sun.COM 		obj_priv->agp_mem = -1;
109511260SMiao.Chen@Sun.COM 	}
109611260SMiao.Chen@Sun.COM 
109711260SMiao.Chen@Sun.COM 	ASSERT(!obj_priv->active);
109811260SMiao.Chen@Sun.COM 
109911260SMiao.Chen@Sun.COM 	i915_gem_object_free_page_list(obj);
110011260SMiao.Chen@Sun.COM 
110111260SMiao.Chen@Sun.COM 	if (obj_priv->gtt_space) {
110211260SMiao.Chen@Sun.COM 		atomic_dec(&dev->gtt_count);
110311260SMiao.Chen@Sun.COM 		atomic_sub(obj->size, &dev->gtt_memory);
110411260SMiao.Chen@Sun.COM 		drm_mm_put_block(obj_priv->gtt_space);
110511260SMiao.Chen@Sun.COM 		obj_priv->gtt_space = NULL;
110611260SMiao.Chen@Sun.COM 	}
110711260SMiao.Chen@Sun.COM 
110811260SMiao.Chen@Sun.COM 	/* Remove ourselves from the LRU list if present. */
110911260SMiao.Chen@Sun.COM 	if (!list_empty(&obj_priv->list))
111011260SMiao.Chen@Sun.COM 		list_del_init(&obj_priv->list);
111111260SMiao.Chen@Sun.COM 
111211260SMiao.Chen@Sun.COM 	return 0;
111311260SMiao.Chen@Sun.COM }
111411260SMiao.Chen@Sun.COM 
111511260SMiao.Chen@Sun.COM static int
i915_gem_evict_something(struct drm_device * dev)111611260SMiao.Chen@Sun.COM i915_gem_evict_something(struct drm_device *dev)
111711260SMiao.Chen@Sun.COM {
111811260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
111911260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
112011260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
112111260SMiao.Chen@Sun.COM 	int ret = 0;
112211260SMiao.Chen@Sun.COM 
112311260SMiao.Chen@Sun.COM 	for (;;) {
112411260SMiao.Chen@Sun.COM 		/* If there's an inactive buffer available now, grab it
112511260SMiao.Chen@Sun.COM 		 * and be done.
112611260SMiao.Chen@Sun.COM 		 */
112711260SMiao.Chen@Sun.COM 		if (!list_empty(&dev_priv->mm.inactive_list)) {
112811260SMiao.Chen@Sun.COM 			obj_priv = list_entry(dev_priv->mm.inactive_list.next,
112911260SMiao.Chen@Sun.COM 						    struct drm_i915_gem_object,
113011260SMiao.Chen@Sun.COM 						    list);
113111260SMiao.Chen@Sun.COM 			obj = obj_priv->obj;
113211260SMiao.Chen@Sun.COM 			ASSERT(!(obj_priv->pin_count != 0));
113311260SMiao.Chen@Sun.COM 			DRM_DEBUG("%s: evicting %d\n", __func__, obj->name);
113411260SMiao.Chen@Sun.COM 			ASSERT(!(obj_priv->active));
113511260SMiao.Chen@Sun.COM 			/* Wait on the rendering and unbind the buffer. */
113611260SMiao.Chen@Sun.COM 			ret = i915_gem_object_unbind(obj, 1);
113711260SMiao.Chen@Sun.COM 			break;
113811260SMiao.Chen@Sun.COM 		}
113911260SMiao.Chen@Sun.COM 		/* If we didn't get anything, but the ring is still processing
114011260SMiao.Chen@Sun.COM 		 * things, wait for one of those things to finish and hopefully
114111260SMiao.Chen@Sun.COM 		 * leave us a buffer to evict.
114211260SMiao.Chen@Sun.COM 		 */
114311260SMiao.Chen@Sun.COM 		if (!list_empty(&dev_priv->mm.request_list)) {
114411260SMiao.Chen@Sun.COM 			struct drm_i915_gem_request *request;
114511260SMiao.Chen@Sun.COM 
114611260SMiao.Chen@Sun.COM 			request = list_entry(dev_priv->mm.request_list.next,
114711260SMiao.Chen@Sun.COM 						   struct drm_i915_gem_request,
114811260SMiao.Chen@Sun.COM 						   list);
114911260SMiao.Chen@Sun.COM 
115011260SMiao.Chen@Sun.COM 			ret = i915_wait_request(dev, request->seqno);
115111260SMiao.Chen@Sun.COM 			if (ret) {
115211260SMiao.Chen@Sun.COM 				break;
115311260SMiao.Chen@Sun.COM 			}
115411260SMiao.Chen@Sun.COM 			/* if waiting caused an object to become inactive,
115511260SMiao.Chen@Sun.COM 			 * then loop around and wait for it. Otherwise, we
115611260SMiao.Chen@Sun.COM 			 * assume that waiting freed and unbound something,
115711260SMiao.Chen@Sun.COM 			 * so there should now be some space in the GTT
115811260SMiao.Chen@Sun.COM 			 */
115911260SMiao.Chen@Sun.COM 			if (!list_empty(&dev_priv->mm.inactive_list))
116011260SMiao.Chen@Sun.COM 				continue;
116111260SMiao.Chen@Sun.COM 			break;
116211260SMiao.Chen@Sun.COM 		}
116311260SMiao.Chen@Sun.COM 
116411260SMiao.Chen@Sun.COM 		/* If we didn't have anything on the request list but there
116511260SMiao.Chen@Sun.COM 		 * are buffers awaiting a flush, emit one and try again.
116611260SMiao.Chen@Sun.COM 		 * When we wait on it, those buffers waiting for that flush
116711260SMiao.Chen@Sun.COM 		 * will get moved to inactive.
116811260SMiao.Chen@Sun.COM 		 */
116911260SMiao.Chen@Sun.COM 		if (!list_empty(&dev_priv->mm.flushing_list)) {
117011260SMiao.Chen@Sun.COM 			obj_priv = list_entry(dev_priv->mm.flushing_list.next,
117111260SMiao.Chen@Sun.COM 						    struct drm_i915_gem_object,
117211260SMiao.Chen@Sun.COM 						    list);
117311260SMiao.Chen@Sun.COM 			obj = obj_priv->obj;
117411260SMiao.Chen@Sun.COM 
117511260SMiao.Chen@Sun.COM 			i915_gem_flush(dev,
117611260SMiao.Chen@Sun.COM 				       obj->write_domain,
117711260SMiao.Chen@Sun.COM 				       obj->write_domain);
117811260SMiao.Chen@Sun.COM 			(void) i915_add_request(dev, obj->write_domain);
117911260SMiao.Chen@Sun.COM 
118011260SMiao.Chen@Sun.COM 			obj = NULL;
118111260SMiao.Chen@Sun.COM 			continue;
118211260SMiao.Chen@Sun.COM 		}
118311260SMiao.Chen@Sun.COM 
118411260SMiao.Chen@Sun.COM 		DRM_ERROR("inactive empty %d request empty %d "
118511260SMiao.Chen@Sun.COM 			  "flushing empty %d\n",
118611260SMiao.Chen@Sun.COM 			  list_empty(&dev_priv->mm.inactive_list),
118711260SMiao.Chen@Sun.COM 			  list_empty(&dev_priv->mm.request_list),
118811260SMiao.Chen@Sun.COM 			  list_empty(&dev_priv->mm.flushing_list));
118911260SMiao.Chen@Sun.COM 		/* If we didn't do any of the above, there's nothing to be done
119011260SMiao.Chen@Sun.COM 		 * and we just can't fit it in.
119111260SMiao.Chen@Sun.COM 		 */
119211260SMiao.Chen@Sun.COM 		return ENOMEM;
119311260SMiao.Chen@Sun.COM 	}
119411260SMiao.Chen@Sun.COM 	return ret;
119511260SMiao.Chen@Sun.COM }
119611260SMiao.Chen@Sun.COM 
119711260SMiao.Chen@Sun.COM static int
i915_gem_evict_everything(struct drm_device * dev)119811260SMiao.Chen@Sun.COM i915_gem_evict_everything(struct drm_device *dev)
119911260SMiao.Chen@Sun.COM {
120011260SMiao.Chen@Sun.COM 	int ret;
120111260SMiao.Chen@Sun.COM 
120211260SMiao.Chen@Sun.COM 	for (;;) {
120311260SMiao.Chen@Sun.COM 		ret = i915_gem_evict_something(dev);
120411260SMiao.Chen@Sun.COM 		if (ret != 0)
120511260SMiao.Chen@Sun.COM 			break;
120611260SMiao.Chen@Sun.COM 	}
120711260SMiao.Chen@Sun.COM 	if (ret == ENOMEM)
120811260SMiao.Chen@Sun.COM 		return 0;
120911260SMiao.Chen@Sun.COM 	else
121011260SMiao.Chen@Sun.COM 		DRM_ERROR("evict_everything ret %d", ret);
121111260SMiao.Chen@Sun.COM 	return ret;
121211260SMiao.Chen@Sun.COM }
121311260SMiao.Chen@Sun.COM 
121411260SMiao.Chen@Sun.COM /**
121511260SMiao.Chen@Sun.COM  * Finds free space in the GTT aperture and binds the object there.
121611260SMiao.Chen@Sun.COM  */
121711260SMiao.Chen@Sun.COM static int
i915_gem_object_bind_to_gtt(struct drm_gem_object * obj,uint32_t alignment)121811260SMiao.Chen@Sun.COM i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, uint32_t alignment)
121911260SMiao.Chen@Sun.COM {
122011260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
122111260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
122211260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
122311260SMiao.Chen@Sun.COM 	struct drm_mm_node *free_space;
122411260SMiao.Chen@Sun.COM 	int page_count, ret;
122511260SMiao.Chen@Sun.COM 
122611260SMiao.Chen@Sun.COM 	if (dev_priv->mm.suspended)
122711260SMiao.Chen@Sun.COM 		return EBUSY;
122811260SMiao.Chen@Sun.COM 	if (alignment == 0)
122911260SMiao.Chen@Sun.COM 		alignment = PAGE_SIZE;
123011260SMiao.Chen@Sun.COM 	if (alignment & (PAGE_SIZE - 1)) {
123111260SMiao.Chen@Sun.COM 		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
123211260SMiao.Chen@Sun.COM 		return EINVAL;
123311260SMiao.Chen@Sun.COM 	}
123411260SMiao.Chen@Sun.COM 
123511260SMiao.Chen@Sun.COM 	if (obj_priv->gtt_space) {
123611260SMiao.Chen@Sun.COM 		DRM_ERROR("Already bind!!");
123711260SMiao.Chen@Sun.COM 		return 0;
123811260SMiao.Chen@Sun.COM 	}
123911260SMiao.Chen@Sun.COM search_free:
124011260SMiao.Chen@Sun.COM 	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
124111260SMiao.Chen@Sun.COM 					(unsigned long) obj->size, alignment, 0);
124211260SMiao.Chen@Sun.COM 	if (free_space != NULL) {
124311260SMiao.Chen@Sun.COM 		obj_priv->gtt_space = drm_mm_get_block(free_space, (unsigned long) obj->size,
124411260SMiao.Chen@Sun.COM 						       alignment);
124511260SMiao.Chen@Sun.COM 		if (obj_priv->gtt_space != NULL) {
124611260SMiao.Chen@Sun.COM 			obj_priv->gtt_space->private = obj;
124711260SMiao.Chen@Sun.COM 			obj_priv->gtt_offset = obj_priv->gtt_space->start;
124811260SMiao.Chen@Sun.COM 		}
124911260SMiao.Chen@Sun.COM 	}
125011260SMiao.Chen@Sun.COM 	if (obj_priv->gtt_space == NULL) {
125111260SMiao.Chen@Sun.COM 		/* If the gtt is empty and we're still having trouble
125211260SMiao.Chen@Sun.COM 		 * fitting our object in, we're out of memory.
125311260SMiao.Chen@Sun.COM 		 */
125411260SMiao.Chen@Sun.COM 		if (list_empty(&dev_priv->mm.inactive_list) &&
125511260SMiao.Chen@Sun.COM 		    list_empty(&dev_priv->mm.flushing_list) &&
125611260SMiao.Chen@Sun.COM 		    list_empty(&dev_priv->mm.active_list)) {
125711260SMiao.Chen@Sun.COM 			DRM_ERROR("GTT full, but LRU list empty\n");
125811260SMiao.Chen@Sun.COM 			return ENOMEM;
125911260SMiao.Chen@Sun.COM 		}
126011260SMiao.Chen@Sun.COM 
126111260SMiao.Chen@Sun.COM 		ret = i915_gem_evict_something(dev);
126211260SMiao.Chen@Sun.COM 		if (ret != 0) {
126311260SMiao.Chen@Sun.COM 			DRM_ERROR("Failed to evict a buffer %d\n", ret);
126411260SMiao.Chen@Sun.COM 			return ret;
126511260SMiao.Chen@Sun.COM 		}
126611260SMiao.Chen@Sun.COM 		goto search_free;
126711260SMiao.Chen@Sun.COM 	}
126811260SMiao.Chen@Sun.COM 
126911260SMiao.Chen@Sun.COM 	ret = i915_gem_object_get_page_list(obj);
127011260SMiao.Chen@Sun.COM 	if (ret) {
127111260SMiao.Chen@Sun.COM 		drm_mm_put_block(obj_priv->gtt_space);
127211260SMiao.Chen@Sun.COM 		obj_priv->gtt_space = NULL;
127311260SMiao.Chen@Sun.COM 		DRM_ERROR("bind to gtt failed to get page list");
127411260SMiao.Chen@Sun.COM 		return ret;
127511260SMiao.Chen@Sun.COM 	}
127611260SMiao.Chen@Sun.COM 
127711260SMiao.Chen@Sun.COM 	page_count = obj->size / PAGE_SIZE;
127811260SMiao.Chen@Sun.COM 	/* Create an AGP memory structure pointing at our pages, and bind it
127911260SMiao.Chen@Sun.COM 	 * into the GTT.
128011260SMiao.Chen@Sun.COM 	 */
128111260SMiao.Chen@Sun.COM 	DRM_DEBUG("Binding object %d of page_count %d at gtt_offset 0x%x obj->pfnarray = 0x%lx",
128211260SMiao.Chen@Sun.COM 		 obj->name, page_count, obj_priv->gtt_offset, obj->pfnarray);
128311260SMiao.Chen@Sun.COM 
128411260SMiao.Chen@Sun.COM 	obj_priv->agp_mem = drm_agp_bind_pages(dev,
128511260SMiao.Chen@Sun.COM 					       obj->pfnarray,
128611260SMiao.Chen@Sun.COM 					       page_count,
128711260SMiao.Chen@Sun.COM 					       obj_priv->gtt_offset);
128811260SMiao.Chen@Sun.COM 	if (obj_priv->agp_mem) {
128911260SMiao.Chen@Sun.COM 		i915_gem_object_free_page_list(obj);
129011260SMiao.Chen@Sun.COM 		drm_mm_put_block(obj_priv->gtt_space);
129111260SMiao.Chen@Sun.COM 		obj_priv->gtt_space = NULL;
129211260SMiao.Chen@Sun.COM 		DRM_ERROR("Failed to bind pages obj %d, obj 0x%lx", obj->name, obj);
129311260SMiao.Chen@Sun.COM 		return ENOMEM;
129411260SMiao.Chen@Sun.COM 	}
129511260SMiao.Chen@Sun.COM 	atomic_inc(&dev->gtt_count);
129611260SMiao.Chen@Sun.COM 	atomic_add(obj->size, &dev->gtt_memory);
129711260SMiao.Chen@Sun.COM 
129811260SMiao.Chen@Sun.COM 	/* Assert that the object is not currently in any GPU domain. As it
129911260SMiao.Chen@Sun.COM 	 * wasn't in the GTT, there shouldn't be any way it could have been in
130011260SMiao.Chen@Sun.COM 	 * a GPU cache
130111260SMiao.Chen@Sun.COM 	 */
130211260SMiao.Chen@Sun.COM 	ASSERT(!(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)));
130311260SMiao.Chen@Sun.COM 	ASSERT(!(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)));
130411260SMiao.Chen@Sun.COM 
130511260SMiao.Chen@Sun.COM 	return 0;
130611260SMiao.Chen@Sun.COM }
130711260SMiao.Chen@Sun.COM 
130811260SMiao.Chen@Sun.COM void
i915_gem_clflush_object(struct drm_gem_object * obj)130911260SMiao.Chen@Sun.COM i915_gem_clflush_object(struct drm_gem_object *obj)
131011260SMiao.Chen@Sun.COM {
131111260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
131211260SMiao.Chen@Sun.COM 
131311260SMiao.Chen@Sun.COM 	/* If we don't have a page list set up, then we're not pinned
131411260SMiao.Chen@Sun.COM 	 * to GPU, and we can ignore the cache flush because it'll happen
131511260SMiao.Chen@Sun.COM 	 * again at bind time.
131611260SMiao.Chen@Sun.COM 	 */
131711260SMiao.Chen@Sun.COM 
131811260SMiao.Chen@Sun.COM 	if (obj_priv->page_list == NULL)
131911260SMiao.Chen@Sun.COM 		return;
132011260SMiao.Chen@Sun.COM 	drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
132111260SMiao.Chen@Sun.COM }
132211260SMiao.Chen@Sun.COM 
132311260SMiao.Chen@Sun.COM /** Flushes any GPU write domain for the object if it's dirty. */
132411260SMiao.Chen@Sun.COM static void
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object * obj)132511260SMiao.Chen@Sun.COM i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
132611260SMiao.Chen@Sun.COM {
132711260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
132811260SMiao.Chen@Sun.COM 	uint32_t seqno;
132911260SMiao.Chen@Sun.COM 
133011260SMiao.Chen@Sun.COM 	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
133111260SMiao.Chen@Sun.COM 		return;
133211260SMiao.Chen@Sun.COM 
133311260SMiao.Chen@Sun.COM 	/* Queue the GPU write cache flushing we need. */
133411260SMiao.Chen@Sun.COM 	i915_gem_flush(dev, 0, obj->write_domain);
133511260SMiao.Chen@Sun.COM 	seqno = i915_add_request(dev, obj->write_domain);
133611260SMiao.Chen@Sun.COM 	DRM_DEBUG("flush_gpu_write_domain seqno = %d", seqno);
133711260SMiao.Chen@Sun.COM 	obj->write_domain = 0;
133811260SMiao.Chen@Sun.COM 	i915_gem_object_move_to_active(obj, seqno);
133911260SMiao.Chen@Sun.COM }
134011260SMiao.Chen@Sun.COM 
134111260SMiao.Chen@Sun.COM /** Flushes the GTT write domain for the object if it's dirty. */
134211260SMiao.Chen@Sun.COM static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object * obj)134311260SMiao.Chen@Sun.COM i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
134411260SMiao.Chen@Sun.COM {
134511260SMiao.Chen@Sun.COM 	if (obj->write_domain != I915_GEM_DOMAIN_GTT)
134611260SMiao.Chen@Sun.COM 		return;
134711260SMiao.Chen@Sun.COM 
134811260SMiao.Chen@Sun.COM 	/* No actual flushing is required for the GTT write domain.   Writes
134911260SMiao.Chen@Sun.COM 	 * to it immediately go to main memory as far as we know, so there's
135011260SMiao.Chen@Sun.COM 	 * no chipset flush.  It also doesn't land in render cache.
135111260SMiao.Chen@Sun.COM 	 */
135211260SMiao.Chen@Sun.COM 	obj->write_domain = 0;
135311260SMiao.Chen@Sun.COM }
135411260SMiao.Chen@Sun.COM 
135511260SMiao.Chen@Sun.COM /** Flushes the CPU write domain for the object if it's dirty. */
135611260SMiao.Chen@Sun.COM static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object * obj)135711260SMiao.Chen@Sun.COM i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
135811260SMiao.Chen@Sun.COM {
135911260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
136011260SMiao.Chen@Sun.COM 
136111260SMiao.Chen@Sun.COM 	if (obj->write_domain != I915_GEM_DOMAIN_CPU)
136211260SMiao.Chen@Sun.COM 		return;
136311260SMiao.Chen@Sun.COM 
136411260SMiao.Chen@Sun.COM 	i915_gem_clflush_object(obj);
136511260SMiao.Chen@Sun.COM 	drm_agp_chipset_flush(dev);
136611260SMiao.Chen@Sun.COM 	obj->write_domain = 0;
136711260SMiao.Chen@Sun.COM }
136811260SMiao.Chen@Sun.COM 
136911260SMiao.Chen@Sun.COM /**
137011260SMiao.Chen@Sun.COM  * Moves a single object to the GTT read, and possibly write domain.
137111260SMiao.Chen@Sun.COM  *
137211260SMiao.Chen@Sun.COM  * This function returns when the move is complete, including waiting on
137311260SMiao.Chen@Sun.COM  * flushes to occur.
137411260SMiao.Chen@Sun.COM  */
137511260SMiao.Chen@Sun.COM static int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object * obj,int write)137611260SMiao.Chen@Sun.COM i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
137711260SMiao.Chen@Sun.COM {
137811260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
137911260SMiao.Chen@Sun.COM 	int ret;
138011260SMiao.Chen@Sun.COM 
138111260SMiao.Chen@Sun.COM 	/* Not valid to be called on unbound objects. */
138211260SMiao.Chen@Sun.COM 	if (obj_priv->gtt_space == NULL)
138311260SMiao.Chen@Sun.COM 		return EINVAL;
138411260SMiao.Chen@Sun.COM 
138511260SMiao.Chen@Sun.COM 	i915_gem_object_flush_gpu_write_domain(obj);
138611260SMiao.Chen@Sun.COM 	/* Wait on any GPU rendering and flushing to occur. */
138711260SMiao.Chen@Sun.COM 	ret = i915_gem_object_wait_rendering(obj);
138811260SMiao.Chen@Sun.COM 	if (ret != 0) {
138911260SMiao.Chen@Sun.COM 		DRM_ERROR("set_to_gtt_domain wait_rendering ret %d", ret);
139011260SMiao.Chen@Sun.COM 		return ret;
139111260SMiao.Chen@Sun.COM 	}
139211260SMiao.Chen@Sun.COM 	/* If we're writing through the GTT domain, then CPU and GPU caches
139311260SMiao.Chen@Sun.COM 	 * will need to be invalidated at next use.
139411260SMiao.Chen@Sun.COM 	 */
139511260SMiao.Chen@Sun.COM 	if (write)
139611260SMiao.Chen@Sun.COM 		obj->read_domains &= I915_GEM_DOMAIN_GTT;
139711260SMiao.Chen@Sun.COM 	i915_gem_object_flush_cpu_write_domain(obj);
139811260SMiao.Chen@Sun.COM 
139911260SMiao.Chen@Sun.COM 	DRM_DEBUG("i915_gem_object_set_to_gtt_domain obj->read_domains %x ", obj->read_domains);
140011260SMiao.Chen@Sun.COM 	/* It should now be out of any other write domains, and we can update
140111260SMiao.Chen@Sun.COM 	 * the domain values for our changes.
140211260SMiao.Chen@Sun.COM 	 */
140311260SMiao.Chen@Sun.COM 	ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0));
140411260SMiao.Chen@Sun.COM 	obj->read_domains |= I915_GEM_DOMAIN_GTT;
140511260SMiao.Chen@Sun.COM 	if (write) {
140611260SMiao.Chen@Sun.COM 		obj->write_domain = I915_GEM_DOMAIN_GTT;
140711260SMiao.Chen@Sun.COM 		obj_priv->dirty = 1;
140811260SMiao.Chen@Sun.COM 	}
140911260SMiao.Chen@Sun.COM 
141011260SMiao.Chen@Sun.COM 	return 0;
141111260SMiao.Chen@Sun.COM }
141211260SMiao.Chen@Sun.COM 
141311260SMiao.Chen@Sun.COM /**
141411260SMiao.Chen@Sun.COM  * Moves a single object to the CPU read, and possibly write domain.
141511260SMiao.Chen@Sun.COM  *
141611260SMiao.Chen@Sun.COM  * This function returns when the move is complete, including waiting on
141711260SMiao.Chen@Sun.COM  * flushes to occur.
141811260SMiao.Chen@Sun.COM  */
141911260SMiao.Chen@Sun.COM static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object * obj,int write)142011260SMiao.Chen@Sun.COM i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
142111260SMiao.Chen@Sun.COM {
142211260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
142311260SMiao.Chen@Sun.COM 	int ret;
142411260SMiao.Chen@Sun.COM 
142511260SMiao.Chen@Sun.COM 
142611260SMiao.Chen@Sun.COM 	i915_gem_object_flush_gpu_write_domain(obj);
142711260SMiao.Chen@Sun.COM 	/* Wait on any GPU rendering and flushing to occur. */
142811260SMiao.Chen@Sun.COM 
142911260SMiao.Chen@Sun.COM 	ret = i915_gem_object_wait_rendering(obj);
143011260SMiao.Chen@Sun.COM 	if (ret != 0)
143111260SMiao.Chen@Sun.COM 		return ret;
143211260SMiao.Chen@Sun.COM 
143311260SMiao.Chen@Sun.COM 	i915_gem_object_flush_gtt_write_domain(obj);
143411260SMiao.Chen@Sun.COM 
143511260SMiao.Chen@Sun.COM 	/* If we have a partially-valid cache of the object in the CPU,
143611260SMiao.Chen@Sun.COM 	 * finish invalidating it and free the per-page flags.
143711260SMiao.Chen@Sun.COM 	 */
143811260SMiao.Chen@Sun.COM 	i915_gem_object_set_to_full_cpu_read_domain(obj);
143911260SMiao.Chen@Sun.COM 
144011260SMiao.Chen@Sun.COM 	/* Flush the CPU cache if it's still invalid. */
144111260SMiao.Chen@Sun.COM 	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
144211260SMiao.Chen@Sun.COM 		i915_gem_clflush_object(obj);
144311260SMiao.Chen@Sun.COM 		drm_agp_chipset_flush(dev);
144411260SMiao.Chen@Sun.COM 		obj->read_domains |= I915_GEM_DOMAIN_CPU;
144511260SMiao.Chen@Sun.COM 	}
144611260SMiao.Chen@Sun.COM 
144711260SMiao.Chen@Sun.COM 	/* It should now be out of any other write domains, and we can update
144811260SMiao.Chen@Sun.COM 	 * the domain values for our changes.
144911260SMiao.Chen@Sun.COM 	 */
145011260SMiao.Chen@Sun.COM 	ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0));
145111260SMiao.Chen@Sun.COM 
145211260SMiao.Chen@Sun.COM 	/* If we're writing through the CPU, then the GPU read domains will
145311260SMiao.Chen@Sun.COM 	 * need to be invalidated at next use.
145411260SMiao.Chen@Sun.COM 	 */
145511260SMiao.Chen@Sun.COM 	if (write) {
145611260SMiao.Chen@Sun.COM 		obj->read_domains &= I915_GEM_DOMAIN_CPU;
145711260SMiao.Chen@Sun.COM 		obj->write_domain = I915_GEM_DOMAIN_CPU;
145811260SMiao.Chen@Sun.COM 	}
145911260SMiao.Chen@Sun.COM 
146011260SMiao.Chen@Sun.COM 	return 0;
146111260SMiao.Chen@Sun.COM }
146211260SMiao.Chen@Sun.COM 
146311260SMiao.Chen@Sun.COM /*
146411260SMiao.Chen@Sun.COM  * Set the next domain for the specified object. This
146511260SMiao.Chen@Sun.COM  * may not actually perform the necessary flushing/invaliding though,
146611260SMiao.Chen@Sun.COM  * as that may want to be batched with other set_domain operations
146711260SMiao.Chen@Sun.COM  *
146811260SMiao.Chen@Sun.COM  * This is (we hope) the only really tricky part of gem. The goal
146911260SMiao.Chen@Sun.COM  * is fairly simple -- track which caches hold bits of the object
147011260SMiao.Chen@Sun.COM  * and make sure they remain coherent. A few concrete examples may
147111260SMiao.Chen@Sun.COM  * help to explain how it works. For shorthand, we use the notation
147211260SMiao.Chen@Sun.COM  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
147311260SMiao.Chen@Sun.COM  * a pair of read and write domain masks.
147411260SMiao.Chen@Sun.COM  *
147511260SMiao.Chen@Sun.COM  * Case 1: the batch buffer
147611260SMiao.Chen@Sun.COM  *
147711260SMiao.Chen@Sun.COM  *	1. Allocated
147811260SMiao.Chen@Sun.COM  *	2. Written by CPU
147911260SMiao.Chen@Sun.COM  *	3. Mapped to GTT
148011260SMiao.Chen@Sun.COM  *	4. Read by GPU
148111260SMiao.Chen@Sun.COM  *	5. Unmapped from GTT
148211260SMiao.Chen@Sun.COM  *	6. Freed
148311260SMiao.Chen@Sun.COM  *
148411260SMiao.Chen@Sun.COM  *	Let's take these a step at a time
148511260SMiao.Chen@Sun.COM  *
148611260SMiao.Chen@Sun.COM  *	1. Allocated
148711260SMiao.Chen@Sun.COM  *		Pages allocated from the kernel may still have
148811260SMiao.Chen@Sun.COM  *		cache contents, so we set them to (CPU, CPU) always.
148911260SMiao.Chen@Sun.COM  *	2. Written by CPU (using pwrite)
149011260SMiao.Chen@Sun.COM  *		The pwrite function calls set_domain (CPU, CPU) and
149111260SMiao.Chen@Sun.COM  *		this function does nothing (as nothing changes)
149211260SMiao.Chen@Sun.COM  *	3. Mapped by GTT
149311260SMiao.Chen@Sun.COM  *		This function asserts that the object is not
149411260SMiao.Chen@Sun.COM  *		currently in any GPU-based read or write domains
149511260SMiao.Chen@Sun.COM  *	4. Read by GPU
149611260SMiao.Chen@Sun.COM  *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
149711260SMiao.Chen@Sun.COM  *		As write_domain is zero, this function adds in the
149811260SMiao.Chen@Sun.COM  *		current read domains (CPU+COMMAND, 0).
149911260SMiao.Chen@Sun.COM  *		flush_domains is set to CPU.
150011260SMiao.Chen@Sun.COM  *		invalidate_domains is set to COMMAND
150111260SMiao.Chen@Sun.COM  *		clflush is run to get data out of the CPU caches
150211260SMiao.Chen@Sun.COM  *		then i915_dev_set_domain calls i915_gem_flush to
150311260SMiao.Chen@Sun.COM  *		emit an MI_FLUSH and drm_agp_chipset_flush
150411260SMiao.Chen@Sun.COM  *	5. Unmapped from GTT
150511260SMiao.Chen@Sun.COM  *		i915_gem_object_unbind calls set_domain (CPU, CPU)
150611260SMiao.Chen@Sun.COM  *		flush_domains and invalidate_domains end up both zero
150711260SMiao.Chen@Sun.COM  *		so no flushing/invalidating happens
150811260SMiao.Chen@Sun.COM  *	6. Freed
150911260SMiao.Chen@Sun.COM  *		yay, done
151011260SMiao.Chen@Sun.COM  *
151111260SMiao.Chen@Sun.COM  * Case 2: The shared render buffer
151211260SMiao.Chen@Sun.COM  *
151311260SMiao.Chen@Sun.COM  *	1. Allocated
151411260SMiao.Chen@Sun.COM  *	2. Mapped to GTT
151511260SMiao.Chen@Sun.COM  *	3. Read/written by GPU
151611260SMiao.Chen@Sun.COM  *	4. set_domain to (CPU,CPU)
151711260SMiao.Chen@Sun.COM  *	5. Read/written by CPU
151811260SMiao.Chen@Sun.COM  *	6. Read/written by GPU
151911260SMiao.Chen@Sun.COM  *
152011260SMiao.Chen@Sun.COM  *	1. Allocated
152111260SMiao.Chen@Sun.COM  *		Same as last example, (CPU, CPU)
152211260SMiao.Chen@Sun.COM  *	2. Mapped to GTT
152311260SMiao.Chen@Sun.COM  *		Nothing changes (assertions find that it is not in the GPU)
152411260SMiao.Chen@Sun.COM  *	3. Read/written by GPU
152511260SMiao.Chen@Sun.COM  *		execbuffer calls set_domain (RENDER, RENDER)
152611260SMiao.Chen@Sun.COM  *		flush_domains gets CPU
152711260SMiao.Chen@Sun.COM  *		invalidate_domains gets GPU
152811260SMiao.Chen@Sun.COM  *		clflush (obj)
152911260SMiao.Chen@Sun.COM  *		MI_FLUSH and drm_agp_chipset_flush
153011260SMiao.Chen@Sun.COM  *	4. set_domain (CPU, CPU)
153111260SMiao.Chen@Sun.COM  *		flush_domains gets GPU
153211260SMiao.Chen@Sun.COM  *		invalidate_domains gets CPU
153311260SMiao.Chen@Sun.COM  *		wait_rendering (obj) to make sure all drawing is complete.
153411260SMiao.Chen@Sun.COM  *		This will include an MI_FLUSH to get the data from GPU
153511260SMiao.Chen@Sun.COM  *		to memory
153611260SMiao.Chen@Sun.COM  *		clflush (obj) to invalidate the CPU cache
153711260SMiao.Chen@Sun.COM  *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
153811260SMiao.Chen@Sun.COM  *	5. Read/written by CPU
153911260SMiao.Chen@Sun.COM  *		cache lines are loaded and dirtied
154011260SMiao.Chen@Sun.COM  *	6. Read written by GPU
154111260SMiao.Chen@Sun.COM  *		Same as last GPU access
154211260SMiao.Chen@Sun.COM  *
154311260SMiao.Chen@Sun.COM  * Case 3: The constant buffer
154411260SMiao.Chen@Sun.COM  *
154511260SMiao.Chen@Sun.COM  *	1. Allocated
154611260SMiao.Chen@Sun.COM  *	2. Written by CPU
154711260SMiao.Chen@Sun.COM  *	3. Read by GPU
154811260SMiao.Chen@Sun.COM  *	4. Updated (written) by CPU again
154911260SMiao.Chen@Sun.COM  *	5. Read by GPU
155011260SMiao.Chen@Sun.COM  *
155111260SMiao.Chen@Sun.COM  *	1. Allocated
155211260SMiao.Chen@Sun.COM  *		(CPU, CPU)
155311260SMiao.Chen@Sun.COM  *	2. Written by CPU
155411260SMiao.Chen@Sun.COM  *		(CPU, CPU)
155511260SMiao.Chen@Sun.COM  *	3. Read by GPU
155611260SMiao.Chen@Sun.COM  *		(CPU+RENDER, 0)
155711260SMiao.Chen@Sun.COM  *		flush_domains = CPU
155811260SMiao.Chen@Sun.COM  *		invalidate_domains = RENDER
155911260SMiao.Chen@Sun.COM  *		clflush (obj)
156011260SMiao.Chen@Sun.COM  *		MI_FLUSH
156111260SMiao.Chen@Sun.COM  *		drm_agp_chipset_flush
156211260SMiao.Chen@Sun.COM  *	4. Updated (written) by CPU again
156311260SMiao.Chen@Sun.COM  *		(CPU, CPU)
156411260SMiao.Chen@Sun.COM  *		flush_domains = 0 (no previous write domain)
156511260SMiao.Chen@Sun.COM  *		invalidate_domains = 0 (no new read domains)
156611260SMiao.Chen@Sun.COM  *	5. Read by GPU
156711260SMiao.Chen@Sun.COM  *		(CPU+RENDER, 0)
156811260SMiao.Chen@Sun.COM  *		flush_domains = CPU
156911260SMiao.Chen@Sun.COM  *		invalidate_domains = RENDER
157011260SMiao.Chen@Sun.COM  *		clflush (obj)
157111260SMiao.Chen@Sun.COM  *		MI_FLUSH
157211260SMiao.Chen@Sun.COM  *		drm_agp_chipset_flush
157311260SMiao.Chen@Sun.COM  */
157411260SMiao.Chen@Sun.COM static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object * obj,uint32_t read_domains,uint32_t write_domain)157511260SMiao.Chen@Sun.COM i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
157611260SMiao.Chen@Sun.COM 			    uint32_t read_domains,
157711260SMiao.Chen@Sun.COM 			    uint32_t write_domain)
157811260SMiao.Chen@Sun.COM {
157911260SMiao.Chen@Sun.COM 	struct drm_device		*dev = obj->dev;
158011260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
158111260SMiao.Chen@Sun.COM 	uint32_t			invalidate_domains = 0;
158211260SMiao.Chen@Sun.COM 	uint32_t			flush_domains = 0;
158311260SMiao.Chen@Sun.COM 
158411260SMiao.Chen@Sun.COM 	DRM_DEBUG("%s: object %p read %08x -> %08x write %08x -> %08x\n",
158511260SMiao.Chen@Sun.COM 		 __func__, obj,
158611260SMiao.Chen@Sun.COM 		 obj->read_domains, read_domains,
158711260SMiao.Chen@Sun.COM 		 obj->write_domain, write_domain);
158811260SMiao.Chen@Sun.COM 	/*
158911260SMiao.Chen@Sun.COM 	 * If the object isn't moving to a new write domain,
159011260SMiao.Chen@Sun.COM 	 * let the object stay in multiple read domains
159111260SMiao.Chen@Sun.COM 	 */
159211260SMiao.Chen@Sun.COM 	if (write_domain == 0)
159311260SMiao.Chen@Sun.COM 		read_domains |= obj->read_domains;
159411260SMiao.Chen@Sun.COM 	else
159511260SMiao.Chen@Sun.COM 		obj_priv->dirty = 1;
159611260SMiao.Chen@Sun.COM 
159711260SMiao.Chen@Sun.COM 	/*
159811260SMiao.Chen@Sun.COM 	 * Flush the current write domain if
159911260SMiao.Chen@Sun.COM 	 * the new read domains don't match. Invalidate
160011260SMiao.Chen@Sun.COM 	 * any read domains which differ from the old
160111260SMiao.Chen@Sun.COM 	 * write domain
160211260SMiao.Chen@Sun.COM 	 */
160311260SMiao.Chen@Sun.COM 	if (obj->write_domain && obj->write_domain != read_domains) {
160411260SMiao.Chen@Sun.COM 		flush_domains |= obj->write_domain;
160511260SMiao.Chen@Sun.COM 		invalidate_domains |= read_domains & ~obj->write_domain;
160611260SMiao.Chen@Sun.COM 	}
160711260SMiao.Chen@Sun.COM 	/*
160811260SMiao.Chen@Sun.COM 	 * Invalidate any read caches which may have
160911260SMiao.Chen@Sun.COM 	 * stale data. That is, any new read domains.
161011260SMiao.Chen@Sun.COM 	 */
161111260SMiao.Chen@Sun.COM 	invalidate_domains |= read_domains & ~obj->read_domains;
161211260SMiao.Chen@Sun.COM 	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
161311260SMiao.Chen@Sun.COM 		DRM_DEBUG("%s: CPU domain flush %08x invalidate %08x\n",
161411260SMiao.Chen@Sun.COM 			 __func__, flush_domains, invalidate_domains);
161511260SMiao.Chen@Sun.COM 	i915_gem_clflush_object(obj);
161611260SMiao.Chen@Sun.COM 	}
161711260SMiao.Chen@Sun.COM 
161811260SMiao.Chen@Sun.COM 	if ((write_domain | flush_domains) != 0)
161911260SMiao.Chen@Sun.COM 		obj->write_domain = write_domain;
162011260SMiao.Chen@Sun.COM 	obj->read_domains = read_domains;
162111260SMiao.Chen@Sun.COM 
162211260SMiao.Chen@Sun.COM 	dev->invalidate_domains |= invalidate_domains;
162311260SMiao.Chen@Sun.COM 	dev->flush_domains |= flush_domains;
162411260SMiao.Chen@Sun.COM 
162511260SMiao.Chen@Sun.COM 	DRM_DEBUG("%s: read %08x write %08x invalidate %08x flush %08x\n",
162611260SMiao.Chen@Sun.COM 		 __func__,
162711260SMiao.Chen@Sun.COM 		 obj->read_domains, obj->write_domain,
162811260SMiao.Chen@Sun.COM 		 dev->invalidate_domains, dev->flush_domains);
162911260SMiao.Chen@Sun.COM 
163011260SMiao.Chen@Sun.COM }
163111260SMiao.Chen@Sun.COM 
163211260SMiao.Chen@Sun.COM /**
163311260SMiao.Chen@Sun.COM  * Moves the object from a partially CPU read to a full one.
163411260SMiao.Chen@Sun.COM  *
163511260SMiao.Chen@Sun.COM  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
163611260SMiao.Chen@Sun.COM  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
163711260SMiao.Chen@Sun.COM  */
163811260SMiao.Chen@Sun.COM static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object * obj)163911260SMiao.Chen@Sun.COM i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
164011260SMiao.Chen@Sun.COM {
164111260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
164211260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
164311260SMiao.Chen@Sun.COM 
164411260SMiao.Chen@Sun.COM 	if (!obj_priv->page_cpu_valid)
164511260SMiao.Chen@Sun.COM 		return;
164611260SMiao.Chen@Sun.COM 
164711260SMiao.Chen@Sun.COM 	/* If we're partially in the CPU read domain, finish moving it in.
164811260SMiao.Chen@Sun.COM 	 */
164911260SMiao.Chen@Sun.COM 	if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
165011260SMiao.Chen@Sun.COM 		int i;
165111260SMiao.Chen@Sun.COM 
165211260SMiao.Chen@Sun.COM 		for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
165311260SMiao.Chen@Sun.COM 			if (obj_priv->page_cpu_valid[i])
165411260SMiao.Chen@Sun.COM 				continue;
165511260SMiao.Chen@Sun.COM 			drm_clflush_pages(obj_priv->page_list + i, 1);
165611260SMiao.Chen@Sun.COM 		}
165711260SMiao.Chen@Sun.COM 		drm_agp_chipset_flush(dev);
165811260SMiao.Chen@Sun.COM 	}
165911260SMiao.Chen@Sun.COM 
166011260SMiao.Chen@Sun.COM 	/* Free the page_cpu_valid mappings which are now stale, whether
166111260SMiao.Chen@Sun.COM 	 * or not we've got I915_GEM_DOMAIN_CPU.
166211260SMiao.Chen@Sun.COM 	 */
166311260SMiao.Chen@Sun.COM 	drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
166411260SMiao.Chen@Sun.COM 		 DRM_MEM_DRIVER);
166511260SMiao.Chen@Sun.COM 	obj_priv->page_cpu_valid = NULL;
166611260SMiao.Chen@Sun.COM }
166711260SMiao.Chen@Sun.COM 
166811260SMiao.Chen@Sun.COM /**
166911260SMiao.Chen@Sun.COM  * Set the CPU read domain on a range of the object.
167011260SMiao.Chen@Sun.COM  *
167111260SMiao.Chen@Sun.COM  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
167211260SMiao.Chen@Sun.COM  * not entirely valid.  The page_cpu_valid member of the object flags which
167311260SMiao.Chen@Sun.COM  * pages have been flushed, and will be respected by
167411260SMiao.Chen@Sun.COM  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
167511260SMiao.Chen@Sun.COM  * of the whole object.
167611260SMiao.Chen@Sun.COM  *
167711260SMiao.Chen@Sun.COM  * This function returns when the move is complete, including waiting on
167811260SMiao.Chen@Sun.COM  * flushes to occur.
167911260SMiao.Chen@Sun.COM  */
168011260SMiao.Chen@Sun.COM static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object * obj,uint64_t offset,uint64_t size)168111260SMiao.Chen@Sun.COM i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
168211260SMiao.Chen@Sun.COM 					  uint64_t offset, uint64_t size)
168311260SMiao.Chen@Sun.COM {
168411260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
168511260SMiao.Chen@Sun.COM 	int i, ret;
168611260SMiao.Chen@Sun.COM 
168711260SMiao.Chen@Sun.COM 	if (offset == 0 && size == obj->size)
168811260SMiao.Chen@Sun.COM 		return i915_gem_object_set_to_cpu_domain(obj, 0);
168911260SMiao.Chen@Sun.COM 
169011260SMiao.Chen@Sun.COM 	i915_gem_object_flush_gpu_write_domain(obj);
169111260SMiao.Chen@Sun.COM 	/* Wait on any GPU rendering and flushing to occur. */
169211260SMiao.Chen@Sun.COM 	ret = i915_gem_object_wait_rendering(obj);
169311260SMiao.Chen@Sun.COM 	if (ret != 0)
169411260SMiao.Chen@Sun.COM 		return ret;
169511260SMiao.Chen@Sun.COM 	i915_gem_object_flush_gtt_write_domain(obj);
169611260SMiao.Chen@Sun.COM 
169711260SMiao.Chen@Sun.COM 	/* If we're already fully in the CPU read domain, we're done. */
169811260SMiao.Chen@Sun.COM 	if (obj_priv->page_cpu_valid == NULL &&
169911260SMiao.Chen@Sun.COM 	    (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
170011260SMiao.Chen@Sun.COM 		return 0;
170111260SMiao.Chen@Sun.COM 
170211260SMiao.Chen@Sun.COM 	/* Otherwise, create/clear the per-page CPU read domain flag if we're
170311260SMiao.Chen@Sun.COM 	 * newly adding I915_GEM_DOMAIN_CPU
170411260SMiao.Chen@Sun.COM 	 */
170511260SMiao.Chen@Sun.COM 	if (obj_priv->page_cpu_valid == NULL) {
170611260SMiao.Chen@Sun.COM 		obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
170711260SMiao.Chen@Sun.COM 						      DRM_MEM_DRIVER);
170811260SMiao.Chen@Sun.COM 		if (obj_priv->page_cpu_valid == NULL)
170911260SMiao.Chen@Sun.COM 			return ENOMEM;
171011260SMiao.Chen@Sun.COM 	} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
171111260SMiao.Chen@Sun.COM 		(void) memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
171211260SMiao.Chen@Sun.COM 
171311260SMiao.Chen@Sun.COM 	/* Flush the cache on any pages that are still invalid from the CPU's
171411260SMiao.Chen@Sun.COM 	 * perspective.
171511260SMiao.Chen@Sun.COM 	 */
171611260SMiao.Chen@Sun.COM 	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
171711260SMiao.Chen@Sun.COM 	     i++) {
171811260SMiao.Chen@Sun.COM 		if (obj_priv->page_cpu_valid[i])
171911260SMiao.Chen@Sun.COM 			continue;
172011260SMiao.Chen@Sun.COM 
172111260SMiao.Chen@Sun.COM 		drm_clflush_pages(obj_priv->page_list + i, 1);
172211260SMiao.Chen@Sun.COM 		obj_priv->page_cpu_valid[i] = 1;
172311260SMiao.Chen@Sun.COM 	}
172411260SMiao.Chen@Sun.COM 
172511260SMiao.Chen@Sun.COM 	/* It should now be out of any other write domains, and we can update
172611260SMiao.Chen@Sun.COM 	 * the domain values for our changes.
172711260SMiao.Chen@Sun.COM 	 */
172811260SMiao.Chen@Sun.COM 	ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0));
172911260SMiao.Chen@Sun.COM 
173011260SMiao.Chen@Sun.COM 	obj->read_domains |= I915_GEM_DOMAIN_CPU;
173111260SMiao.Chen@Sun.COM 
173211260SMiao.Chen@Sun.COM 	return 0;
173311260SMiao.Chen@Sun.COM }
173411260SMiao.Chen@Sun.COM 
173511260SMiao.Chen@Sun.COM /**
173611260SMiao.Chen@Sun.COM  * Pin an object to the GTT and evaluate the relocations landing in it.
173711260SMiao.Chen@Sun.COM  */
173811260SMiao.Chen@Sun.COM static int
i915_gem_object_pin_and_relocate(struct drm_gem_object * obj,struct drm_file * file_priv,struct drm_i915_gem_exec_object * entry)173911260SMiao.Chen@Sun.COM i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
174011260SMiao.Chen@Sun.COM 				 struct drm_file *file_priv,
174111260SMiao.Chen@Sun.COM 				 struct drm_i915_gem_exec_object *entry)
174211260SMiao.Chen@Sun.COM {
174311260SMiao.Chen@Sun.COM 	struct drm_i915_gem_relocation_entry reloc;
174411260SMiao.Chen@Sun.COM 	struct drm_i915_gem_relocation_entry __user *relocs;
174511260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
174611260SMiao.Chen@Sun.COM 	int i, ret;
174711260SMiao.Chen@Sun.COM 
174811260SMiao.Chen@Sun.COM 	/* Choose the GTT offset for our buffer and put it there. */
174911260SMiao.Chen@Sun.COM 	ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
175011260SMiao.Chen@Sun.COM 	if (ret) {
175111260SMiao.Chen@Sun.COM 		DRM_ERROR("failed to pin");
175211260SMiao.Chen@Sun.COM 		return ret;
175311260SMiao.Chen@Sun.COM 	}
175411260SMiao.Chen@Sun.COM 	entry->offset = obj_priv->gtt_offset;
175511260SMiao.Chen@Sun.COM 
175611260SMiao.Chen@Sun.COM 	relocs = (struct drm_i915_gem_relocation_entry __user *)
175711260SMiao.Chen@Sun.COM 		 (uintptr_t) entry->relocs_ptr;
175811260SMiao.Chen@Sun.COM 	/* Apply the relocations, using the GTT aperture to avoid cache
175911260SMiao.Chen@Sun.COM 	 * flushing requirements.
176011260SMiao.Chen@Sun.COM 	 */
176111260SMiao.Chen@Sun.COM 	for (i = 0; i < entry->relocation_count; i++) {
176211260SMiao.Chen@Sun.COM 		struct drm_gem_object *target_obj;
176311260SMiao.Chen@Sun.COM 		struct drm_i915_gem_object *target_obj_priv;
176411260SMiao.Chen@Sun.COM 		uint32_t reloc_val, reloc_offset, *reloc_entry;
176511260SMiao.Chen@Sun.COM 
176611260SMiao.Chen@Sun.COM 		ret = DRM_COPY_FROM_USER(&reloc, relocs + i, sizeof(reloc));
176711260SMiao.Chen@Sun.COM 		if (ret != 0) {
176811260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
176911260SMiao.Chen@Sun.COM 			DRM_ERROR("failed to copy from user");
177011260SMiao.Chen@Sun.COM 			return ret;
177111260SMiao.Chen@Sun.COM 		}
177211260SMiao.Chen@Sun.COM 
177311260SMiao.Chen@Sun.COM 		target_obj = drm_gem_object_lookup(file_priv,
177411260SMiao.Chen@Sun.COM 						   reloc.target_handle);
177511260SMiao.Chen@Sun.COM 		if (target_obj == NULL) {
177611260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
177711260SMiao.Chen@Sun.COM 			return EBADF;
177811260SMiao.Chen@Sun.COM 		}
177911260SMiao.Chen@Sun.COM 		target_obj_priv = target_obj->driver_private;
178011260SMiao.Chen@Sun.COM 
178111260SMiao.Chen@Sun.COM 		/* The target buffer should have appeared before us in the
178211260SMiao.Chen@Sun.COM 		 * exec_object list, so it should have a GTT space bound by now.
178311260SMiao.Chen@Sun.COM 		 */
178411260SMiao.Chen@Sun.COM 		if (target_obj_priv->gtt_space == NULL) {
178511260SMiao.Chen@Sun.COM 			DRM_ERROR("No GTT space found for object %d\n",
178611260SMiao.Chen@Sun.COM 				  reloc.target_handle);
178711260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(target_obj);
178811260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
178911260SMiao.Chen@Sun.COM 			return EINVAL;
179011260SMiao.Chen@Sun.COM 		}
179111260SMiao.Chen@Sun.COM 
179211260SMiao.Chen@Sun.COM 		if (reloc.offset > obj->size - 4) {
179311260SMiao.Chen@Sun.COM 			DRM_ERROR("Relocation beyond object bounds: "
179411260SMiao.Chen@Sun.COM 				  "obj %p target %d offset %d size %d.\n",
179511260SMiao.Chen@Sun.COM 				  obj, reloc.target_handle,
179611260SMiao.Chen@Sun.COM 				  (int) reloc.offset, (int) obj->size);
179711260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(target_obj);
179811260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
179911260SMiao.Chen@Sun.COM 			return EINVAL;
180011260SMiao.Chen@Sun.COM 		}
180111260SMiao.Chen@Sun.COM 		if (reloc.offset & 3) {
180211260SMiao.Chen@Sun.COM 			DRM_ERROR("Relocation not 4-byte aligned: "
180311260SMiao.Chen@Sun.COM 				  "obj %p target %d offset %d.\n",
180411260SMiao.Chen@Sun.COM 				  obj, reloc.target_handle,
180511260SMiao.Chen@Sun.COM 				  (int) reloc.offset);
180611260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(target_obj);
180711260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
180811260SMiao.Chen@Sun.COM 			return EINVAL;
180911260SMiao.Chen@Sun.COM 		}
181011260SMiao.Chen@Sun.COM 
181111260SMiao.Chen@Sun.COM 		if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
181211260SMiao.Chen@Sun.COM 		    reloc.read_domains & I915_GEM_DOMAIN_CPU) {
181311260SMiao.Chen@Sun.COM 			DRM_ERROR("reloc with read/write CPU domains: "
181411260SMiao.Chen@Sun.COM 				  "obj %p target %d offset %d "
181511260SMiao.Chen@Sun.COM 				  "read %08x write %08x",
181611260SMiao.Chen@Sun.COM 				  obj, reloc.target_handle,
181711260SMiao.Chen@Sun.COM 				  (int) reloc.offset,
181811260SMiao.Chen@Sun.COM 				  reloc.read_domains,
181911260SMiao.Chen@Sun.COM 				  reloc.write_domain);
182011260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(target_obj);
182111260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
182211260SMiao.Chen@Sun.COM 			return EINVAL;
182311260SMiao.Chen@Sun.COM 		}
182411260SMiao.Chen@Sun.COM 
182511260SMiao.Chen@Sun.COM 		if (reloc.write_domain && target_obj->pending_write_domain &&
182611260SMiao.Chen@Sun.COM 		    reloc.write_domain != target_obj->pending_write_domain) {
182711260SMiao.Chen@Sun.COM 			DRM_ERROR("Write domain conflict: "
182811260SMiao.Chen@Sun.COM 				  "obj %p target %d offset %d "
182911260SMiao.Chen@Sun.COM 				  "new %08x old %08x\n",
183011260SMiao.Chen@Sun.COM 				  obj, reloc.target_handle,
183111260SMiao.Chen@Sun.COM 				  (int) reloc.offset,
183211260SMiao.Chen@Sun.COM 				  reloc.write_domain,
183311260SMiao.Chen@Sun.COM 				  target_obj->pending_write_domain);
183411260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(target_obj);
183511260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
183611260SMiao.Chen@Sun.COM 			return EINVAL;
183711260SMiao.Chen@Sun.COM 		}
183811260SMiao.Chen@Sun.COM 		DRM_DEBUG("%s: obj %p offset %08x target %d "
183911260SMiao.Chen@Sun.COM 			 "read %08x write %08x gtt %08x "
184011260SMiao.Chen@Sun.COM 			 "presumed %08x delta %08x\n",
184111260SMiao.Chen@Sun.COM 			 __func__,
184211260SMiao.Chen@Sun.COM 			 obj,
184311260SMiao.Chen@Sun.COM 			 (int) reloc.offset,
184411260SMiao.Chen@Sun.COM 			 (int) reloc.target_handle,
184511260SMiao.Chen@Sun.COM 			 (int) reloc.read_domains,
184611260SMiao.Chen@Sun.COM 			 (int) reloc.write_domain,
184711260SMiao.Chen@Sun.COM 			 (int) target_obj_priv->gtt_offset,
184811260SMiao.Chen@Sun.COM 			 (int) reloc.presumed_offset,
184911260SMiao.Chen@Sun.COM 			 reloc.delta);
185011260SMiao.Chen@Sun.COM 
185111260SMiao.Chen@Sun.COM 		target_obj->pending_read_domains |= reloc.read_domains;
185211260SMiao.Chen@Sun.COM 		target_obj->pending_write_domain |= reloc.write_domain;
185311260SMiao.Chen@Sun.COM 
185411260SMiao.Chen@Sun.COM 		/* If the relocation already has the right value in it, no
185511260SMiao.Chen@Sun.COM 		 * more work needs to be done.
185611260SMiao.Chen@Sun.COM 		 */
185711260SMiao.Chen@Sun.COM 		if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
185811260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(target_obj);
185911260SMiao.Chen@Sun.COM 			continue;
186011260SMiao.Chen@Sun.COM 		}
186111260SMiao.Chen@Sun.COM 
186211260SMiao.Chen@Sun.COM 		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
186311260SMiao.Chen@Sun.COM 		if (ret != 0) {
186411260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(target_obj);
186511260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
186611260SMiao.Chen@Sun.COM 			return EINVAL;
186711260SMiao.Chen@Sun.COM 		}
186811260SMiao.Chen@Sun.COM 
186911260SMiao.Chen@Sun.COM                /* Map the page containing the relocation we're going to
187011260SMiao.Chen@Sun.COM                 * perform.
187111260SMiao.Chen@Sun.COM                 */
187211260SMiao.Chen@Sun.COM 
187311260SMiao.Chen@Sun.COM 		int reloc_base = (reloc.offset & ~(PAGE_SIZE-1));
187411260SMiao.Chen@Sun.COM 		reloc_offset = reloc.offset & (PAGE_SIZE-1);
187511260SMiao.Chen@Sun.COM 		reloc_entry = (uint32_t *)(uintptr_t)(obj_priv->page_list[reloc_base/PAGE_SIZE] + reloc_offset);
187611260SMiao.Chen@Sun.COM 		reloc_val = target_obj_priv->gtt_offset + reloc.delta;
187711260SMiao.Chen@Sun.COM 		*reloc_entry = reloc_val;
187811260SMiao.Chen@Sun.COM 
187911260SMiao.Chen@Sun.COM 		/* Write the updated presumed offset for this entry back out
188011260SMiao.Chen@Sun.COM 		 * to the user.
188111260SMiao.Chen@Sun.COM 		 */
188211260SMiao.Chen@Sun.COM 		reloc.presumed_offset = target_obj_priv->gtt_offset;
188311260SMiao.Chen@Sun.COM 		ret = DRM_COPY_TO_USER(relocs + i, &reloc, sizeof(reloc));
188411260SMiao.Chen@Sun.COM 		if (ret != 0) {
188511260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(target_obj);
188611260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(obj);
188711260SMiao.Chen@Sun.COM 			DRM_ERROR("%s: Failed to copy to user ret %d", __func__, ret);
188811260SMiao.Chen@Sun.COM 			return ret;
188911260SMiao.Chen@Sun.COM 		}
189011260SMiao.Chen@Sun.COM 
189111260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(target_obj);
189211260SMiao.Chen@Sun.COM 	}
189311260SMiao.Chen@Sun.COM 
189411260SMiao.Chen@Sun.COM 	return 0;
189511260SMiao.Chen@Sun.COM }
189611260SMiao.Chen@Sun.COM 
189711260SMiao.Chen@Sun.COM /** Dispatch a batchbuffer to the ring
189811260SMiao.Chen@Sun.COM  */
189911260SMiao.Chen@Sun.COM static int
i915_dispatch_gem_execbuffer(struct drm_device * dev,struct drm_i915_gem_execbuffer * exec,uint64_t exec_offset)190011260SMiao.Chen@Sun.COM i915_dispatch_gem_execbuffer(struct drm_device *dev,
190111260SMiao.Chen@Sun.COM 			      struct drm_i915_gem_execbuffer *exec,
190211260SMiao.Chen@Sun.COM 			      uint64_t exec_offset)
190311260SMiao.Chen@Sun.COM {
190411260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
190511260SMiao.Chen@Sun.COM 	struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
190611260SMiao.Chen@Sun.COM 					     (uintptr_t) exec->cliprects_ptr;
190711260SMiao.Chen@Sun.COM 	int nbox = exec->num_cliprects;
190811260SMiao.Chen@Sun.COM 	int i = 0, count;
190911260SMiao.Chen@Sun.COM 	uint64_t	exec_start, exec_len;
191011260SMiao.Chen@Sun.COM 	RING_LOCALS;
191111260SMiao.Chen@Sun.COM 
191211260SMiao.Chen@Sun.COM 	exec_start = exec_offset + exec->batch_start_offset;
191311260SMiao.Chen@Sun.COM 	exec_len = exec->batch_len;
191411260SMiao.Chen@Sun.COM 
191511260SMiao.Chen@Sun.COM 	if ((exec_start | exec_len) & 0x7) {
191611260SMiao.Chen@Sun.COM 		DRM_ERROR("alignment\n");
191711260SMiao.Chen@Sun.COM 		return EINVAL;
191811260SMiao.Chen@Sun.COM 	}
191911260SMiao.Chen@Sun.COM 
192011260SMiao.Chen@Sun.COM 	if (!exec_start) {
192111260SMiao.Chen@Sun.COM 		DRM_ERROR("wrong arg");
192211260SMiao.Chen@Sun.COM 		return EINVAL;
192311260SMiao.Chen@Sun.COM 	}
192411260SMiao.Chen@Sun.COM 
192511260SMiao.Chen@Sun.COM 	count = nbox ? nbox : 1;
192611260SMiao.Chen@Sun.COM 
192711260SMiao.Chen@Sun.COM 	for (i = 0; i < count; i++) {
192811260SMiao.Chen@Sun.COM 		if (i < nbox) {
192911260SMiao.Chen@Sun.COM 			int ret = i915_emit_box(dev, boxes, i,
193011260SMiao.Chen@Sun.COM 						exec->DR1, exec->DR4);
193111260SMiao.Chen@Sun.COM 			if (ret) {
193211260SMiao.Chen@Sun.COM 				DRM_ERROR("i915_emit_box %d DR1 0x%lx DRI2 0x%lx", ret, exec->DR1, exec->DR4);
193311260SMiao.Chen@Sun.COM 				return ret;
193411260SMiao.Chen@Sun.COM 			}
193511260SMiao.Chen@Sun.COM 		}
193611260SMiao.Chen@Sun.COM 		if (IS_I830(dev) || IS_845G(dev)) {
193711260SMiao.Chen@Sun.COM 			BEGIN_LP_RING(4);
193811260SMiao.Chen@Sun.COM 			OUT_RING(MI_BATCH_BUFFER);
193911260SMiao.Chen@Sun.COM 			OUT_RING(exec_start | MI_BATCH_NON_SECURE);
194011260SMiao.Chen@Sun.COM 			OUT_RING(exec_start + exec_len - 4);
194111260SMiao.Chen@Sun.COM 			OUT_RING(0);
194211260SMiao.Chen@Sun.COM 			ADVANCE_LP_RING();
194311260SMiao.Chen@Sun.COM 		} else {
194411260SMiao.Chen@Sun.COM 			BEGIN_LP_RING(2);
194511260SMiao.Chen@Sun.COM 			if (IS_I965G(dev)) {
194611260SMiao.Chen@Sun.COM 				OUT_RING(MI_BATCH_BUFFER_START |
194711260SMiao.Chen@Sun.COM 					 (2 << 6) |
194811260SMiao.Chen@Sun.COM 					 (3 << 9) |
194911260SMiao.Chen@Sun.COM 					 MI_BATCH_NON_SECURE_I965);
195011260SMiao.Chen@Sun.COM 				OUT_RING(exec_start);
195111260SMiao.Chen@Sun.COM 
195211260SMiao.Chen@Sun.COM 			} else {
195311260SMiao.Chen@Sun.COM 				OUT_RING(MI_BATCH_BUFFER_START |
195411260SMiao.Chen@Sun.COM 					 (2 << 6));
195511260SMiao.Chen@Sun.COM 				OUT_RING(exec_start | MI_BATCH_NON_SECURE);
195611260SMiao.Chen@Sun.COM 			}
195711260SMiao.Chen@Sun.COM 			ADVANCE_LP_RING();
195811260SMiao.Chen@Sun.COM 		}
195911260SMiao.Chen@Sun.COM 	}
196011260SMiao.Chen@Sun.COM 	/* XXX breadcrumb */
196111260SMiao.Chen@Sun.COM 	return 0;
196211260SMiao.Chen@Sun.COM }
196311260SMiao.Chen@Sun.COM 
196411260SMiao.Chen@Sun.COM /* Throttle our rendering by waiting until the ring has completed our requests
196511260SMiao.Chen@Sun.COM  * emitted over 20 msec ago.
196611260SMiao.Chen@Sun.COM  *
196711260SMiao.Chen@Sun.COM  * This should get us reasonable parallelism between CPU and GPU but also
196811260SMiao.Chen@Sun.COM  * relatively low latency when blocking on a particular request to finish.
196911260SMiao.Chen@Sun.COM  */
197011260SMiao.Chen@Sun.COM static int
i915_gem_ring_throttle(struct drm_device * dev,struct drm_file * file_priv)197111260SMiao.Chen@Sun.COM i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
197211260SMiao.Chen@Sun.COM {
197311260SMiao.Chen@Sun.COM 	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
197411260SMiao.Chen@Sun.COM 	int ret = 0;
197511260SMiao.Chen@Sun.COM 	uint32_t seqno;
197611260SMiao.Chen@Sun.COM 
197711260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
197811260SMiao.Chen@Sun.COM 	seqno = i915_file_priv->mm.last_gem_throttle_seqno;
197911260SMiao.Chen@Sun.COM 	i915_file_priv->mm.last_gem_throttle_seqno =
198011260SMiao.Chen@Sun.COM 		i915_file_priv->mm.last_gem_seqno;
198111260SMiao.Chen@Sun.COM 	if (seqno) {
198211260SMiao.Chen@Sun.COM 		ret = i915_wait_request(dev, seqno);
198311260SMiao.Chen@Sun.COM 		if (ret != 0)
198411260SMiao.Chen@Sun.COM 			DRM_ERROR("%s: i915_wait_request request->seqno %d now %d\n", __func__, seqno, i915_get_gem_seqno(dev));
198511260SMiao.Chen@Sun.COM 	}
198611260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
198711260SMiao.Chen@Sun.COM 	return ret;
198811260SMiao.Chen@Sun.COM }
198911260SMiao.Chen@Sun.COM 
199011260SMiao.Chen@Sun.COM /*ARGSUSED*/
199111260SMiao.Chen@Sun.COM int
i915_gem_execbuffer(DRM_IOCTL_ARGS)199211260SMiao.Chen@Sun.COM i915_gem_execbuffer(DRM_IOCTL_ARGS)
199311260SMiao.Chen@Sun.COM {
199411260SMiao.Chen@Sun.COM 	DRM_DEVICE;
199511260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
199611260SMiao.Chen@Sun.COM 	struct drm_i915_file_private *i915_file_priv = fpriv->driver_priv;
199711260SMiao.Chen@Sun.COM 	struct drm_i915_gem_execbuffer args;
199811260SMiao.Chen@Sun.COM 	struct drm_i915_gem_exec_object *exec_list = NULL;
199911260SMiao.Chen@Sun.COM 	struct drm_gem_object **object_list = NULL;
200011260SMiao.Chen@Sun.COM 	struct drm_gem_object *batch_obj;
200111260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
200211260SMiao.Chen@Sun.COM 	int ret = 0, i, pinned = 0;
200311260SMiao.Chen@Sun.COM 	uint64_t exec_offset;
200411260SMiao.Chen@Sun.COM 	uint32_t seqno, flush_domains;
200511260SMiao.Chen@Sun.COM 	int pin_tries;
200611260SMiao.Chen@Sun.COM 
200711260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
200811260SMiao.Chen@Sun.COM 		return ENODEV;
200911260SMiao.Chen@Sun.COM 
201011260SMiao.Chen@Sun.COM         DRM_COPYFROM_WITH_RETURN(&args,
201111260SMiao.Chen@Sun.COM             (struct drm_i915_gem_execbuffer __user *) data, sizeof(args));
201211260SMiao.Chen@Sun.COM 
201311260SMiao.Chen@Sun.COM 	DRM_DEBUG("buffer_count %d len %x\n", args.buffer_count, args.batch_len);
201411260SMiao.Chen@Sun.COM 
201511260SMiao.Chen@Sun.COM 	if (args.buffer_count < 1) {
201611260SMiao.Chen@Sun.COM 		DRM_ERROR("execbuf with %d buffers\n", args.buffer_count);
201711260SMiao.Chen@Sun.COM 		return EINVAL;
201811260SMiao.Chen@Sun.COM 	}
201911260SMiao.Chen@Sun.COM 	/* Copy in the exec list from userland */
202011260SMiao.Chen@Sun.COM 	exec_list = drm_calloc(sizeof(*exec_list), args.buffer_count,
202111260SMiao.Chen@Sun.COM 			       DRM_MEM_DRIVER);
202211260SMiao.Chen@Sun.COM 	object_list = drm_calloc(sizeof(*object_list), args.buffer_count,
202311260SMiao.Chen@Sun.COM 				 DRM_MEM_DRIVER);
202411260SMiao.Chen@Sun.COM 	if (exec_list == NULL || object_list == NULL) {
202511260SMiao.Chen@Sun.COM 		DRM_ERROR("Failed to allocate exec or object list "
202611260SMiao.Chen@Sun.COM 			  "for %d buffers\n",
202711260SMiao.Chen@Sun.COM 			  args.buffer_count);
202811260SMiao.Chen@Sun.COM 		ret = ENOMEM;
202911260SMiao.Chen@Sun.COM 		goto pre_mutex_err;
203011260SMiao.Chen@Sun.COM 	}
203111260SMiao.Chen@Sun.COM 
203211260SMiao.Chen@Sun.COM 	ret = DRM_COPY_FROM_USER(exec_list,
203311260SMiao.Chen@Sun.COM 			     (struct drm_i915_gem_exec_object __user *)
203411260SMiao.Chen@Sun.COM 			     (uintptr_t) args.buffers_ptr,
203511260SMiao.Chen@Sun.COM 			     sizeof(*exec_list) * args.buffer_count);
203611260SMiao.Chen@Sun.COM 	if (ret != 0) {
203711260SMiao.Chen@Sun.COM 		DRM_ERROR("copy %d exec entries failed %d\n",
203811260SMiao.Chen@Sun.COM 			  args.buffer_count, ret);
203911260SMiao.Chen@Sun.COM 		goto pre_mutex_err;
204011260SMiao.Chen@Sun.COM 	}
204111260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
204211260SMiao.Chen@Sun.COM 
204311260SMiao.Chen@Sun.COM 	if (dev_priv->mm.wedged) {
204411260SMiao.Chen@Sun.COM 		DRM_ERROR("Execbuf while wedged\n");
204511260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
204611260SMiao.Chen@Sun.COM 		return EIO;
204711260SMiao.Chen@Sun.COM 	}
204811260SMiao.Chen@Sun.COM 
204911260SMiao.Chen@Sun.COM 	if (dev_priv->mm.suspended) {
205011260SMiao.Chen@Sun.COM 		DRM_ERROR("Execbuf while VT-switched.\n");
205111260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
205211260SMiao.Chen@Sun.COM 		return EBUSY;
205311260SMiao.Chen@Sun.COM 	}
205411260SMiao.Chen@Sun.COM 
205511260SMiao.Chen@Sun.COM 	/* Look up object handles */
205611260SMiao.Chen@Sun.COM 	for (i = 0; i < args.buffer_count; i++) {
205711260SMiao.Chen@Sun.COM 		object_list[i] = drm_gem_object_lookup(fpriv,
205811260SMiao.Chen@Sun.COM 						       exec_list[i].handle);
205911260SMiao.Chen@Sun.COM 		if (object_list[i] == NULL) {
206011260SMiao.Chen@Sun.COM 			DRM_ERROR("Invalid object handle %d at index %d\n",
206111260SMiao.Chen@Sun.COM 				   exec_list[i].handle, i);
206211260SMiao.Chen@Sun.COM 			ret = EBADF;
206311260SMiao.Chen@Sun.COM 			goto err;
206411260SMiao.Chen@Sun.COM 		}
206511260SMiao.Chen@Sun.COM 		obj_priv = object_list[i]->driver_private;
206611260SMiao.Chen@Sun.COM 		if (obj_priv->in_execbuffer) {
206711260SMiao.Chen@Sun.COM 			DRM_ERROR("Object[%d] (%d) %p appears more than once in object list in args.buffer_count %d \n",
206811260SMiao.Chen@Sun.COM 				   i, object_list[i]->name, object_list[i], args.buffer_count);
206911260SMiao.Chen@Sun.COM 
207011260SMiao.Chen@Sun.COM 			ret = EBADF;
207111260SMiao.Chen@Sun.COM 			goto err;
207211260SMiao.Chen@Sun.COM 		}
207311260SMiao.Chen@Sun.COM 
207411260SMiao.Chen@Sun.COM 		obj_priv->in_execbuffer = 1;
207511260SMiao.Chen@Sun.COM 	}
207611260SMiao.Chen@Sun.COM 
207711260SMiao.Chen@Sun.COM 	/* Pin and relocate */
207811260SMiao.Chen@Sun.COM 	for (pin_tries = 0; ; pin_tries++) {
207911260SMiao.Chen@Sun.COM 		ret = 0;
208011260SMiao.Chen@Sun.COM 		for (i = 0; i < args.buffer_count; i++) {
208111260SMiao.Chen@Sun.COM 			object_list[i]->pending_read_domains = 0;
208211260SMiao.Chen@Sun.COM 			object_list[i]->pending_write_domain = 0;
208311260SMiao.Chen@Sun.COM 			ret = i915_gem_object_pin_and_relocate(object_list[i],
208411260SMiao.Chen@Sun.COM 							       fpriv,
208511260SMiao.Chen@Sun.COM 							       &exec_list[i]);
208611260SMiao.Chen@Sun.COM 			if (ret) {
208711260SMiao.Chen@Sun.COM 				DRM_ERROR("Not all object pinned");
208811260SMiao.Chen@Sun.COM 				break;
208911260SMiao.Chen@Sun.COM 			}
209011260SMiao.Chen@Sun.COM 			pinned = i + 1;
209111260SMiao.Chen@Sun.COM 		}
209211260SMiao.Chen@Sun.COM 		/* success */
209311260SMiao.Chen@Sun.COM 		if (ret == 0)
209411260SMiao.Chen@Sun.COM 		{
209511260SMiao.Chen@Sun.COM 			DRM_DEBUG("gem_execbuffer pin_relocate success");
209611260SMiao.Chen@Sun.COM 			break;
209711260SMiao.Chen@Sun.COM 		}
209811260SMiao.Chen@Sun.COM 		/* error other than GTT full, or we've already tried again */
209911260SMiao.Chen@Sun.COM 		if (ret != ENOMEM || pin_tries >= 1) {
210011260SMiao.Chen@Sun.COM 			if (ret != ERESTART)
210111260SMiao.Chen@Sun.COM 				DRM_ERROR("Failed to pin buffers %d\n", ret);
210211260SMiao.Chen@Sun.COM 			goto err;
210311260SMiao.Chen@Sun.COM 		}
210411260SMiao.Chen@Sun.COM 
210511260SMiao.Chen@Sun.COM 		/* unpin all of our buffers */
210611260SMiao.Chen@Sun.COM 		for (i = 0; i < pinned; i++)
210711260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(object_list[i]);
210811260SMiao.Chen@Sun.COM 		pinned = 0;
210911260SMiao.Chen@Sun.COM 
211011260SMiao.Chen@Sun.COM 		/* evict everyone we can from the aperture */
211111260SMiao.Chen@Sun.COM 		ret = i915_gem_evict_everything(dev);
211211260SMiao.Chen@Sun.COM 		if (ret)
211311260SMiao.Chen@Sun.COM 			goto err;
211411260SMiao.Chen@Sun.COM 	}
211511260SMiao.Chen@Sun.COM 
211611260SMiao.Chen@Sun.COM 	/* Set the pending read domains for the batch buffer to COMMAND */
211711260SMiao.Chen@Sun.COM 	batch_obj = object_list[args.buffer_count-1];
211811260SMiao.Chen@Sun.COM 	batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
211911260SMiao.Chen@Sun.COM 	batch_obj->pending_write_domain = 0;
212011260SMiao.Chen@Sun.COM 
212111260SMiao.Chen@Sun.COM 	/* Zero the gloabl flush/invalidate flags. These
212211260SMiao.Chen@Sun.COM 	 * will be modified as each object is bound to the
212311260SMiao.Chen@Sun.COM 	 * gtt
212411260SMiao.Chen@Sun.COM 	 */
212511260SMiao.Chen@Sun.COM 	dev->invalidate_domains = 0;
212611260SMiao.Chen@Sun.COM 	dev->flush_domains = 0;
212711260SMiao.Chen@Sun.COM 
212811260SMiao.Chen@Sun.COM 	for (i = 0; i < args.buffer_count; i++) {
212911260SMiao.Chen@Sun.COM 		struct drm_gem_object *obj = object_list[i];
213011260SMiao.Chen@Sun.COM 
213111260SMiao.Chen@Sun.COM 		/* Compute new gpu domains and update invalidate/flush */
213211260SMiao.Chen@Sun.COM 		i915_gem_object_set_to_gpu_domain(obj,
213311260SMiao.Chen@Sun.COM 						  obj->pending_read_domains,
213411260SMiao.Chen@Sun.COM 						  obj->pending_write_domain);
213511260SMiao.Chen@Sun.COM 	}
213611260SMiao.Chen@Sun.COM 
213711260SMiao.Chen@Sun.COM 	if (dev->invalidate_domains | dev->flush_domains) {
213811260SMiao.Chen@Sun.COM 
213911260SMiao.Chen@Sun.COM 		DRM_DEBUG("%s: invalidate_domains %08x flush_domains %08x Then flush\n",
214011260SMiao.Chen@Sun.COM 			  __func__,
214111260SMiao.Chen@Sun.COM 			 dev->invalidate_domains,
214211260SMiao.Chen@Sun.COM 			 dev->flush_domains);
214311260SMiao.Chen@Sun.COM                 i915_gem_flush(dev,
214411260SMiao.Chen@Sun.COM                                dev->invalidate_domains,
214511260SMiao.Chen@Sun.COM                                dev->flush_domains);
214611260SMiao.Chen@Sun.COM                 if (dev->flush_domains) {
214711260SMiao.Chen@Sun.COM                         (void) i915_add_request(dev, dev->flush_domains);
214811260SMiao.Chen@Sun.COM 
214911260SMiao.Chen@Sun.COM 		}
215011260SMiao.Chen@Sun.COM 	}
215111260SMiao.Chen@Sun.COM 
215211260SMiao.Chen@Sun.COM 	for (i = 0; i < args.buffer_count; i++) {
215311260SMiao.Chen@Sun.COM 		struct drm_gem_object *obj = object_list[i];
215411260SMiao.Chen@Sun.COM 
215511260SMiao.Chen@Sun.COM 		obj->write_domain = obj->pending_write_domain;
215611260SMiao.Chen@Sun.COM 	}
215711260SMiao.Chen@Sun.COM 
215811260SMiao.Chen@Sun.COM 	exec_offset = exec_list[args.buffer_count - 1].offset;
215911260SMiao.Chen@Sun.COM 
216011260SMiao.Chen@Sun.COM 	/* Exec the batchbuffer */
216111260SMiao.Chen@Sun.COM 	ret = i915_dispatch_gem_execbuffer(dev, &args, exec_offset);
216211260SMiao.Chen@Sun.COM 	if (ret) {
216311260SMiao.Chen@Sun.COM 		DRM_ERROR("dispatch failed %d\n", ret);
216411260SMiao.Chen@Sun.COM 		goto err;
216511260SMiao.Chen@Sun.COM 	}
216611260SMiao.Chen@Sun.COM 
216711260SMiao.Chen@Sun.COM 	/*
216811260SMiao.Chen@Sun.COM 	 * Ensure that the commands in the batch buffer are
216911260SMiao.Chen@Sun.COM 	 * finished before the interrupt fires
217011260SMiao.Chen@Sun.COM 	 */
217111260SMiao.Chen@Sun.COM 	flush_domains = i915_retire_commands(dev);
217211260SMiao.Chen@Sun.COM 
217311260SMiao.Chen@Sun.COM 	/*
217411260SMiao.Chen@Sun.COM 	 * Get a seqno representing the execution of the current buffer,
217511260SMiao.Chen@Sun.COM 	 * which we can wait on.  We would like to mitigate these interrupts,
217611260SMiao.Chen@Sun.COM 	 * likely by only creating seqnos occasionally (so that we have
217711260SMiao.Chen@Sun.COM 	 * *some* interrupts representing completion of buffers that we can
217811260SMiao.Chen@Sun.COM 	 * wait on when trying to clear up gtt space).
217911260SMiao.Chen@Sun.COM 	 */
218011260SMiao.Chen@Sun.COM 	seqno = i915_add_request(dev, flush_domains);
218111260SMiao.Chen@Sun.COM 	ASSERT(!(seqno == 0));
218211260SMiao.Chen@Sun.COM 	i915_file_priv->mm.last_gem_seqno = seqno;
218311260SMiao.Chen@Sun.COM 	for (i = 0; i < args.buffer_count; i++) {
218411260SMiao.Chen@Sun.COM 		struct drm_gem_object *obj = object_list[i];
218511260SMiao.Chen@Sun.COM 		i915_gem_object_move_to_active(obj, seqno);
218611260SMiao.Chen@Sun.COM 		DRM_DEBUG("%s: move to exec list %p\n", __func__, obj);
218711260SMiao.Chen@Sun.COM 	}
218811260SMiao.Chen@Sun.COM 
218911260SMiao.Chen@Sun.COM err:
219011260SMiao.Chen@Sun.COM 	if (object_list != NULL) {
219111260SMiao.Chen@Sun.COM 		for (i = 0; i < pinned; i++)
219211260SMiao.Chen@Sun.COM 			i915_gem_object_unpin(object_list[i]);
219311260SMiao.Chen@Sun.COM 
219411260SMiao.Chen@Sun.COM 		for (i = 0; i < args.buffer_count; i++) {
219511260SMiao.Chen@Sun.COM 			if (object_list[i]) {
219611260SMiao.Chen@Sun.COM 				obj_priv = object_list[i]->driver_private;
219711260SMiao.Chen@Sun.COM 				obj_priv->in_execbuffer = 0;
219811260SMiao.Chen@Sun.COM 			}
219911260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(object_list[i]);
220011260SMiao.Chen@Sun.COM 		}
220111260SMiao.Chen@Sun.COM 	}
220211260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
220311260SMiao.Chen@Sun.COM 
220411260SMiao.Chen@Sun.COM 	if (!ret) {
220511260SMiao.Chen@Sun.COM 	        /* Copy the new buffer offsets back to the user's exec list. */
220611260SMiao.Chen@Sun.COM 	        ret = DRM_COPY_TO_USER((struct drm_i915_relocation_entry __user *)
220711260SMiao.Chen@Sun.COM 	                           (uintptr_t) args.buffers_ptr,
220811260SMiao.Chen@Sun.COM 	                           exec_list,
220911260SMiao.Chen@Sun.COM 	                           sizeof(*exec_list) * args.buffer_count);
221011260SMiao.Chen@Sun.COM 	        if (ret)
221111260SMiao.Chen@Sun.COM 	                DRM_ERROR("failed to copy %d exec entries "
221211260SMiao.Chen@Sun.COM 	                          "back to user (%d)\n",
221311260SMiao.Chen@Sun.COM 	                           args.buffer_count, ret);
221411260SMiao.Chen@Sun.COM 	}
221511260SMiao.Chen@Sun.COM 
221611260SMiao.Chen@Sun.COM pre_mutex_err:
221711260SMiao.Chen@Sun.COM 	drm_free(object_list, sizeof(*object_list) * args.buffer_count,
221811260SMiao.Chen@Sun.COM 		 DRM_MEM_DRIVER);
221911260SMiao.Chen@Sun.COM 	drm_free(exec_list, sizeof(*exec_list) * args.buffer_count,
222011260SMiao.Chen@Sun.COM 		 DRM_MEM_DRIVER);
222111260SMiao.Chen@Sun.COM 
222211260SMiao.Chen@Sun.COM 	return ret;
222311260SMiao.Chen@Sun.COM }
222411260SMiao.Chen@Sun.COM 
222511260SMiao.Chen@Sun.COM int
i915_gem_object_pin(struct drm_gem_object * obj,uint32_t alignment)222611260SMiao.Chen@Sun.COM i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
222711260SMiao.Chen@Sun.COM {
222811260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
222911260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
223011260SMiao.Chen@Sun.COM 	int ret;
223111260SMiao.Chen@Sun.COM 
223211260SMiao.Chen@Sun.COM 	if (obj_priv->gtt_space == NULL) {
223311260SMiao.Chen@Sun.COM 		ret = i915_gem_object_bind_to_gtt(obj, alignment);
223411260SMiao.Chen@Sun.COM 		if (ret != 0) {
223511260SMiao.Chen@Sun.COM 			DRM_ERROR("Failure to bind: %d", ret);
223611260SMiao.Chen@Sun.COM 			return ret;
223711260SMiao.Chen@Sun.COM 		}
223811260SMiao.Chen@Sun.COM 	}
223911260SMiao.Chen@Sun.COM 	obj_priv->pin_count++;
224011260SMiao.Chen@Sun.COM 
224111260SMiao.Chen@Sun.COM 	/* If the object is not active and not pending a flush,
224211260SMiao.Chen@Sun.COM 	 * remove it from the inactive list
224311260SMiao.Chen@Sun.COM 	 */
224411260SMiao.Chen@Sun.COM 	if (obj_priv->pin_count == 1) {
224511260SMiao.Chen@Sun.COM 		atomic_inc(&dev->pin_count);
224611260SMiao.Chen@Sun.COM 		atomic_add(obj->size, &dev->pin_memory);
224711260SMiao.Chen@Sun.COM 		if (!obj_priv->active &&
224811260SMiao.Chen@Sun.COM 		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
224911260SMiao.Chen@Sun.COM 					   I915_GEM_DOMAIN_GTT)) == 0 &&
225011260SMiao.Chen@Sun.COM 		    !list_empty(&obj_priv->list))
225111260SMiao.Chen@Sun.COM 			list_del_init(&obj_priv->list);
225211260SMiao.Chen@Sun.COM 	}
225311260SMiao.Chen@Sun.COM 	return 0;
225411260SMiao.Chen@Sun.COM }
225511260SMiao.Chen@Sun.COM 
225611260SMiao.Chen@Sun.COM void
i915_gem_object_unpin(struct drm_gem_object * obj)225711260SMiao.Chen@Sun.COM i915_gem_object_unpin(struct drm_gem_object *obj)
225811260SMiao.Chen@Sun.COM {
225911260SMiao.Chen@Sun.COM 	struct drm_device *dev = obj->dev;
226011260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
226111260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
226211260SMiao.Chen@Sun.COM 	obj_priv->pin_count--;
226311260SMiao.Chen@Sun.COM 	ASSERT(!(obj_priv->pin_count < 0));
226411260SMiao.Chen@Sun.COM 	ASSERT(!(obj_priv->gtt_space == NULL));
226511260SMiao.Chen@Sun.COM 
226611260SMiao.Chen@Sun.COM 	/* If the object is no longer pinned, and is
226711260SMiao.Chen@Sun.COM 	 * neither active nor being flushed, then stick it on
226811260SMiao.Chen@Sun.COM 	 * the inactive list
226911260SMiao.Chen@Sun.COM 	 */
227011260SMiao.Chen@Sun.COM 	if (obj_priv->pin_count == 0) {
227111260SMiao.Chen@Sun.COM 		if (!obj_priv->active &&
227211260SMiao.Chen@Sun.COM 		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
227311260SMiao.Chen@Sun.COM 					   I915_GEM_DOMAIN_GTT)) == 0)
227411260SMiao.Chen@Sun.COM 			list_move_tail(&obj_priv->list,
227511260SMiao.Chen@Sun.COM 				       &dev_priv->mm.inactive_list, (caddr_t)obj_priv);
227611260SMiao.Chen@Sun.COM 		atomic_dec(&dev->pin_count);
227711260SMiao.Chen@Sun.COM 		atomic_sub(obj->size, &dev->pin_memory);
227811260SMiao.Chen@Sun.COM 	}
227911260SMiao.Chen@Sun.COM }
228011260SMiao.Chen@Sun.COM 
228111260SMiao.Chen@Sun.COM /*ARGSUSED*/
228211260SMiao.Chen@Sun.COM int
i915_gem_pin_ioctl(DRM_IOCTL_ARGS)228311260SMiao.Chen@Sun.COM i915_gem_pin_ioctl(DRM_IOCTL_ARGS)
228411260SMiao.Chen@Sun.COM {
228511260SMiao.Chen@Sun.COM 	DRM_DEVICE;
228611260SMiao.Chen@Sun.COM 	struct drm_i915_gem_pin args;
228711260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
228811260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
228911260SMiao.Chen@Sun.COM 	int ret;
229011260SMiao.Chen@Sun.COM 
229111260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
229211260SMiao.Chen@Sun.COM 		return ENODEV;
229311260SMiao.Chen@Sun.COM 
229411260SMiao.Chen@Sun.COM         DRM_COPYFROM_WITH_RETURN(&args,
229511260SMiao.Chen@Sun.COM             (struct drm_i915_gem_pin __user *) data, sizeof(args));
229611260SMiao.Chen@Sun.COM 
229711260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
229811260SMiao.Chen@Sun.COM 
229911260SMiao.Chen@Sun.COM 	obj = drm_gem_object_lookup(fpriv, args.handle);
230011260SMiao.Chen@Sun.COM 	if (obj == NULL) {
230111260SMiao.Chen@Sun.COM 		DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
230211260SMiao.Chen@Sun.COM 			  args.handle);
230311260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
230411260SMiao.Chen@Sun.COM 		return EBADF;
230511260SMiao.Chen@Sun.COM 	}
230611260SMiao.Chen@Sun.COM 	DRM_DEBUG("i915_gem_pin_ioctl obj->name %d", obj->name);
230711260SMiao.Chen@Sun.COM 	obj_priv = obj->driver_private;
230811260SMiao.Chen@Sun.COM 
230911260SMiao.Chen@Sun.COM 	if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != fpriv) {
231011260SMiao.Chen@Sun.COM 		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
231111260SMiao.Chen@Sun.COM 			  args.handle);
231211260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
231311260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
231411260SMiao.Chen@Sun.COM 		return EINVAL;
231511260SMiao.Chen@Sun.COM 	}
231611260SMiao.Chen@Sun.COM 
231711260SMiao.Chen@Sun.COM 	obj_priv->user_pin_count++;
231811260SMiao.Chen@Sun.COM 	obj_priv->pin_filp = fpriv;
231911260SMiao.Chen@Sun.COM 	if (obj_priv->user_pin_count == 1) {
232011260SMiao.Chen@Sun.COM 		ret = i915_gem_object_pin(obj, args.alignment);
232111260SMiao.Chen@Sun.COM 		if (ret != 0) {
232211260SMiao.Chen@Sun.COM 			drm_gem_object_unreference(obj);
232311260SMiao.Chen@Sun.COM 			spin_unlock(&dev->struct_mutex);
232411260SMiao.Chen@Sun.COM 			return ret;
232511260SMiao.Chen@Sun.COM 		}
232611260SMiao.Chen@Sun.COM 	}
232711260SMiao.Chen@Sun.COM 
232811260SMiao.Chen@Sun.COM 	/* XXX - flush the CPU caches for pinned objects
232911260SMiao.Chen@Sun.COM 	 * as the X server doesn't manage domains yet
233011260SMiao.Chen@Sun.COM 	 */
233111260SMiao.Chen@Sun.COM 	i915_gem_object_flush_cpu_write_domain(obj);
233211260SMiao.Chen@Sun.COM 	args.offset = obj_priv->gtt_offset;
233311260SMiao.Chen@Sun.COM 
233411260SMiao.Chen@Sun.COM 	ret = DRM_COPY_TO_USER((struct drm_i915_gem_pin __user *) data, &args, sizeof(args));
233511260SMiao.Chen@Sun.COM 	if ( ret != 0)
233611260SMiao.Chen@Sun.COM 		DRM_ERROR(" gem pin ioctl error! %d", ret);
233711260SMiao.Chen@Sun.COM 
233811260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
233911260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
234011260SMiao.Chen@Sun.COM 
234111260SMiao.Chen@Sun.COM 	return 0;
234211260SMiao.Chen@Sun.COM }
234311260SMiao.Chen@Sun.COM 
234411260SMiao.Chen@Sun.COM /*ARGSUSED*/
234511260SMiao.Chen@Sun.COM int
i915_gem_unpin_ioctl(DRM_IOCTL_ARGS)234611260SMiao.Chen@Sun.COM i915_gem_unpin_ioctl(DRM_IOCTL_ARGS)
234711260SMiao.Chen@Sun.COM {
234811260SMiao.Chen@Sun.COM 	DRM_DEVICE;
234911260SMiao.Chen@Sun.COM 	struct drm_i915_gem_pin args;
235011260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
235111260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
235211260SMiao.Chen@Sun.COM 
235311260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
235411260SMiao.Chen@Sun.COM 		return ENODEV;
235511260SMiao.Chen@Sun.COM 
235611260SMiao.Chen@Sun.COM         DRM_COPYFROM_WITH_RETURN(&args,
235711260SMiao.Chen@Sun.COM             (struct drm_i915_gem_pin __user *) data, sizeof(args));
235811260SMiao.Chen@Sun.COM 
235911260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
236011260SMiao.Chen@Sun.COM 
236111260SMiao.Chen@Sun.COM 	obj = drm_gem_object_lookup(fpriv, args.handle);
236211260SMiao.Chen@Sun.COM 	if (obj == NULL) {
236311260SMiao.Chen@Sun.COM 		DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
236411260SMiao.Chen@Sun.COM 			  args.handle);
236511260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
236611260SMiao.Chen@Sun.COM 		return EBADF;
236711260SMiao.Chen@Sun.COM 	}
236811260SMiao.Chen@Sun.COM 	obj_priv = obj->driver_private;
236911260SMiao.Chen@Sun.COM 	DRM_DEBUG("i915_gem_unpin_ioctl, obj->name %d", obj->name);
237011260SMiao.Chen@Sun.COM 	if (obj_priv->pin_filp != fpriv) {
237111260SMiao.Chen@Sun.COM 		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
237211260SMiao.Chen@Sun.COM 			  args.handle);
237311260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
237411260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
237511260SMiao.Chen@Sun.COM 		return EINVAL;
237611260SMiao.Chen@Sun.COM 	}
237711260SMiao.Chen@Sun.COM 	obj_priv->user_pin_count--;
237811260SMiao.Chen@Sun.COM 	if (obj_priv->user_pin_count == 0) {
237911260SMiao.Chen@Sun.COM 		obj_priv->pin_filp = NULL;
238011260SMiao.Chen@Sun.COM 		i915_gem_object_unpin(obj);
238111260SMiao.Chen@Sun.COM 	}
238211260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
238311260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
238411260SMiao.Chen@Sun.COM 	return 0;
238511260SMiao.Chen@Sun.COM }
238611260SMiao.Chen@Sun.COM 
238711260SMiao.Chen@Sun.COM /*ARGSUSED*/
238811260SMiao.Chen@Sun.COM int
i915_gem_busy_ioctl(DRM_IOCTL_ARGS)238911260SMiao.Chen@Sun.COM i915_gem_busy_ioctl(DRM_IOCTL_ARGS)
239011260SMiao.Chen@Sun.COM {
239111260SMiao.Chen@Sun.COM 	DRM_DEVICE;
239211260SMiao.Chen@Sun.COM 	struct drm_i915_gem_busy args;
239311260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
239411260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
239511260SMiao.Chen@Sun.COM 	int ret;
239611260SMiao.Chen@Sun.COM 
239711260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
239811260SMiao.Chen@Sun.COM 		return ENODEV;
239911260SMiao.Chen@Sun.COM 
240011260SMiao.Chen@Sun.COM         DRM_COPYFROM_WITH_RETURN(&args,
240111260SMiao.Chen@Sun.COM             (struct drm_i915_gem_busy __user *) data, sizeof(args));
240211260SMiao.Chen@Sun.COM 
240311260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
240411260SMiao.Chen@Sun.COM 	obj = drm_gem_object_lookup(fpriv, args.handle);
240511260SMiao.Chen@Sun.COM 	if (obj == NULL) {
240611260SMiao.Chen@Sun.COM 		DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
240711260SMiao.Chen@Sun.COM 			  args.handle);
240811260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
240911260SMiao.Chen@Sun.COM 		return EBADF;
241011260SMiao.Chen@Sun.COM 	}
241111260SMiao.Chen@Sun.COM 
241211260SMiao.Chen@Sun.COM 	obj_priv = obj->driver_private;
241311260SMiao.Chen@Sun.COM 	/* Don't count being on the flushing list against the object being
241411260SMiao.Chen@Sun.COM 	 * done.  Otherwise, a buffer left on the flushing list but not getting
241511260SMiao.Chen@Sun.COM 	 * flushed (because nobody's flushing that domain) won't ever return
241611260SMiao.Chen@Sun.COM 	 * unbusy and get reused by libdrm's bo cache.  The other expected
241711260SMiao.Chen@Sun.COM 	 * consumer of this interface, OpenGL's occlusion queries, also specs
241811260SMiao.Chen@Sun.COM 	 * that the objects get unbusy "eventually" without any interference.
241911260SMiao.Chen@Sun.COM 	 */
242011260SMiao.Chen@Sun.COM 	args.busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
242111260SMiao.Chen@Sun.COM 	DRM_DEBUG("i915_gem_busy_ioctl call obj->name %d busy %d", obj->name, args.busy);
242211260SMiao.Chen@Sun.COM 
242311260SMiao.Chen@Sun.COM         ret = DRM_COPY_TO_USER((struct drm_i915_gem_busy __user *) data, &args, sizeof(args));
242411260SMiao.Chen@Sun.COM         if ( ret != 0)
242511260SMiao.Chen@Sun.COM                 DRM_ERROR(" gem busy error! %d", ret);
242611260SMiao.Chen@Sun.COM 
242711260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
242811260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
242911260SMiao.Chen@Sun.COM 	return 0;
243011260SMiao.Chen@Sun.COM }
243111260SMiao.Chen@Sun.COM 
243211260SMiao.Chen@Sun.COM /*ARGSUSED*/
243311260SMiao.Chen@Sun.COM int
i915_gem_throttle_ioctl(DRM_IOCTL_ARGS)243411260SMiao.Chen@Sun.COM i915_gem_throttle_ioctl(DRM_IOCTL_ARGS)
243511260SMiao.Chen@Sun.COM {
243611260SMiao.Chen@Sun.COM 	DRM_DEVICE;
243711260SMiao.Chen@Sun.COM 
243811260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
243911260SMiao.Chen@Sun.COM 		return ENODEV;
244011260SMiao.Chen@Sun.COM 
244111260SMiao.Chen@Sun.COM 	return i915_gem_ring_throttle(dev, fpriv);
244211260SMiao.Chen@Sun.COM }
244311260SMiao.Chen@Sun.COM 
244411260SMiao.Chen@Sun.COM static int
i915_gem_object_get_page_list(struct drm_gem_object * obj)244511260SMiao.Chen@Sun.COM i915_gem_object_get_page_list(struct drm_gem_object *obj)
244611260SMiao.Chen@Sun.COM {
244711260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
244811260SMiao.Chen@Sun.COM         caddr_t va;
244911260SMiao.Chen@Sun.COM         long i;
245011260SMiao.Chen@Sun.COM 
245111260SMiao.Chen@Sun.COM 	if (obj_priv->page_list)
245211260SMiao.Chen@Sun.COM 		return 0;
245311260SMiao.Chen@Sun.COM         pgcnt_t np = btop(obj->size);
245411260SMiao.Chen@Sun.COM 
245511260SMiao.Chen@Sun.COM         obj_priv->page_list = kmem_zalloc(np * sizeof(caddr_t), KM_SLEEP);
245611260SMiao.Chen@Sun.COM         if (obj_priv->page_list == NULL) {
245711260SMiao.Chen@Sun.COM                 DRM_ERROR("Faled to allocate page list\n");
245811260SMiao.Chen@Sun.COM                 return ENOMEM;
245911260SMiao.Chen@Sun.COM         }
246011260SMiao.Chen@Sun.COM 
246111260SMiao.Chen@Sun.COM 	for (i = 0, va = obj->kaddr; i < np; i++, va += PAGESIZE) {
246211260SMiao.Chen@Sun.COM 		obj_priv->page_list[i] = va;
246311260SMiao.Chen@Sun.COM 	}
246411260SMiao.Chen@Sun.COM 	return 0;
246511260SMiao.Chen@Sun.COM }
246611260SMiao.Chen@Sun.COM 
246711260SMiao.Chen@Sun.COM 
i915_gem_init_object(struct drm_gem_object * obj)246811260SMiao.Chen@Sun.COM int i915_gem_init_object(struct drm_gem_object *obj)
246911260SMiao.Chen@Sun.COM {
247011260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
247111260SMiao.Chen@Sun.COM 
247211260SMiao.Chen@Sun.COM 	obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
247311260SMiao.Chen@Sun.COM 	if (obj_priv == NULL)
247411260SMiao.Chen@Sun.COM 		return ENOMEM;
247511260SMiao.Chen@Sun.COM 
247611260SMiao.Chen@Sun.COM 	/*
247711260SMiao.Chen@Sun.COM 	 * We've just allocated pages from the kernel,
247811260SMiao.Chen@Sun.COM 	 * so they've just been written by the CPU with
247911260SMiao.Chen@Sun.COM 	 * zeros. They'll need to be clflushed before we
248011260SMiao.Chen@Sun.COM 	 * use them with the GPU.
248111260SMiao.Chen@Sun.COM 	 */
248211260SMiao.Chen@Sun.COM 	obj->write_domain = I915_GEM_DOMAIN_CPU;
248311260SMiao.Chen@Sun.COM 	obj->read_domains = I915_GEM_DOMAIN_CPU;
248411260SMiao.Chen@Sun.COM 
248511260SMiao.Chen@Sun.COM 	obj->driver_private = obj_priv;
248611260SMiao.Chen@Sun.COM 	obj_priv->obj = obj;
248711260SMiao.Chen@Sun.COM 	INIT_LIST_HEAD(&obj_priv->list);
248811260SMiao.Chen@Sun.COM 	return 0;
248911260SMiao.Chen@Sun.COM }
249011260SMiao.Chen@Sun.COM 
i915_gem_free_object(struct drm_gem_object * obj)249111260SMiao.Chen@Sun.COM void i915_gem_free_object(struct drm_gem_object *obj)
249211260SMiao.Chen@Sun.COM {
249311260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
249411260SMiao.Chen@Sun.COM 
249511260SMiao.Chen@Sun.COM 	while (obj_priv->pin_count > 0)
249611260SMiao.Chen@Sun.COM 		i915_gem_object_unpin(obj);
249711260SMiao.Chen@Sun.COM 
249811260SMiao.Chen@Sun.COM 	DRM_DEBUG("%s: obj %d",__func__, obj->name);
249911260SMiao.Chen@Sun.COM 
250011260SMiao.Chen@Sun.COM 	(void) i915_gem_object_unbind(obj, 1);
250111260SMiao.Chen@Sun.COM 	if (obj_priv->page_cpu_valid != NULL)
250211260SMiao.Chen@Sun.COM 		drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, DRM_MEM_DRIVER);
250311260SMiao.Chen@Sun.COM 	drm_free(obj->driver_private, sizeof(*obj_priv), DRM_MEM_DRIVER);
250411260SMiao.Chen@Sun.COM }
250511260SMiao.Chen@Sun.COM 
250611260SMiao.Chen@Sun.COM /** Unbinds all objects that are on the given buffer list. */
250711260SMiao.Chen@Sun.COM static int
i915_gem_evict_from_list(struct drm_device * dev,struct list_head * head,uint32_t type)250811260SMiao.Chen@Sun.COM i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head, uint32_t type)
250911260SMiao.Chen@Sun.COM {
251011260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
251111260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
251211260SMiao.Chen@Sun.COM 	int ret;
251311260SMiao.Chen@Sun.COM 
251411260SMiao.Chen@Sun.COM 	while (!list_empty(head)) {
251511260SMiao.Chen@Sun.COM 		obj_priv = list_entry(head->next,
251611260SMiao.Chen@Sun.COM 				struct drm_i915_gem_object,
251711260SMiao.Chen@Sun.COM 			    	list);
251811260SMiao.Chen@Sun.COM 		obj = obj_priv->obj;
251911260SMiao.Chen@Sun.COM 
252011260SMiao.Chen@Sun.COM 		if (obj_priv->pin_count != 0) {
252111260SMiao.Chen@Sun.COM 			DRM_ERROR("Pinned object in unbind list\n");
252211260SMiao.Chen@Sun.COM 			spin_unlock(&dev->struct_mutex);
252311260SMiao.Chen@Sun.COM 			return EINVAL;
252411260SMiao.Chen@Sun.COM 		}
252511260SMiao.Chen@Sun.COM 		DRM_DEBUG("%s: obj %d type %d",__func__, obj->name, type);
252611260SMiao.Chen@Sun.COM 		ret = i915_gem_object_unbind(obj, type);
252711260SMiao.Chen@Sun.COM 		if (ret != 0) {
252811260SMiao.Chen@Sun.COM 			DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
252911260SMiao.Chen@Sun.COM 				  ret);
253011260SMiao.Chen@Sun.COM 			spin_unlock(&dev->struct_mutex);
253111260SMiao.Chen@Sun.COM 			return ret;
253211260SMiao.Chen@Sun.COM 		}
253311260SMiao.Chen@Sun.COM 	}
253411260SMiao.Chen@Sun.COM 
253511260SMiao.Chen@Sun.COM 
253611260SMiao.Chen@Sun.COM 	return 0;
253711260SMiao.Chen@Sun.COM }
253811260SMiao.Chen@Sun.COM 
253911260SMiao.Chen@Sun.COM static int
i915_gem_idle(struct drm_device * dev,uint32_t type)254011260SMiao.Chen@Sun.COM i915_gem_idle(struct drm_device *dev, uint32_t type)
254111260SMiao.Chen@Sun.COM {
254211260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
254311260SMiao.Chen@Sun.COM 	uint32_t seqno, cur_seqno, last_seqno;
254411260SMiao.Chen@Sun.COM 	int stuck, ret;
254511260SMiao.Chen@Sun.COM 
254611260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
254711260SMiao.Chen@Sun.COM 
254811260SMiao.Chen@Sun.COM 	if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
254911260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
255011260SMiao.Chen@Sun.COM 		return 0;
255111260SMiao.Chen@Sun.COM 	}
255211260SMiao.Chen@Sun.COM 
255311260SMiao.Chen@Sun.COM 	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
255411260SMiao.Chen@Sun.COM 	 * We need to replace this with a semaphore, or something.
255511260SMiao.Chen@Sun.COM 	 */
255611260SMiao.Chen@Sun.COM 	dev_priv->mm.suspended = 1;
255711260SMiao.Chen@Sun.COM 
255811260SMiao.Chen@Sun.COM 	/* Cancel the retire work handler, wait for it to finish if running
255911260SMiao.Chen@Sun.COM 	 */
256011260SMiao.Chen@Sun.COM 	if (worktimer_id != NULL) {
256111260SMiao.Chen@Sun.COM 		(void) untimeout(worktimer_id);
256211260SMiao.Chen@Sun.COM 		worktimer_id = NULL;
256311260SMiao.Chen@Sun.COM 	}
256411260SMiao.Chen@Sun.COM 
256511260SMiao.Chen@Sun.COM 	i915_kernel_lost_context(dev);
256611260SMiao.Chen@Sun.COM 
256711260SMiao.Chen@Sun.COM 	/* Flush the GPU along with all non-CPU write domains
256811260SMiao.Chen@Sun.COM 	 */
256911260SMiao.Chen@Sun.COM 	i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
257011260SMiao.Chen@Sun.COM 		       ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
257111260SMiao.Chen@Sun.COM 	seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
257211260SMiao.Chen@Sun.COM 					I915_GEM_DOMAIN_GTT));
257311260SMiao.Chen@Sun.COM 	if (seqno == 0) {
257411260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
257511260SMiao.Chen@Sun.COM 		return ENOMEM;
257611260SMiao.Chen@Sun.COM 	}
257711260SMiao.Chen@Sun.COM 
257811260SMiao.Chen@Sun.COM 	dev_priv->mm.waiting_gem_seqno = seqno;
257911260SMiao.Chen@Sun.COM 	last_seqno = 0;
258011260SMiao.Chen@Sun.COM 	stuck = 0;
258111260SMiao.Chen@Sun.COM 	for (;;) {
258211260SMiao.Chen@Sun.COM 		cur_seqno = i915_get_gem_seqno(dev);
258311260SMiao.Chen@Sun.COM 		if (i915_seqno_passed(cur_seqno, seqno))
258411260SMiao.Chen@Sun.COM 			break;
258511260SMiao.Chen@Sun.COM 		if (last_seqno == cur_seqno) {
258611260SMiao.Chen@Sun.COM 			if (stuck++ > 100) {
258711260SMiao.Chen@Sun.COM 				DRM_ERROR("hardware wedged\n");
258811260SMiao.Chen@Sun.COM 				dev_priv->mm.wedged = 1;
258911260SMiao.Chen@Sun.COM 				DRM_WAKEUP(&dev_priv->irq_queue);
259011260SMiao.Chen@Sun.COM 				break;
259111260SMiao.Chen@Sun.COM 			}
259211260SMiao.Chen@Sun.COM 		}
259311260SMiao.Chen@Sun.COM 		DRM_UDELAY(10);
259411260SMiao.Chen@Sun.COM 		last_seqno = cur_seqno;
259511260SMiao.Chen@Sun.COM 	}
259611260SMiao.Chen@Sun.COM 	dev_priv->mm.waiting_gem_seqno = 0;
259711260SMiao.Chen@Sun.COM 
259811260SMiao.Chen@Sun.COM 	i915_gem_retire_requests(dev);
259911260SMiao.Chen@Sun.COM 
260011260SMiao.Chen@Sun.COM 	/* Empty the active and flushing lists to inactive.  If there's
260111260SMiao.Chen@Sun.COM 	 * anything left at this point, it means that we're wedged and
260211260SMiao.Chen@Sun.COM 	 * nothing good's going to happen by leaving them there.  So strip
260311260SMiao.Chen@Sun.COM 	 * the GPU domains and just stuff them onto inactive.
260411260SMiao.Chen@Sun.COM 	 */
260511260SMiao.Chen@Sun.COM 	while (!list_empty(&dev_priv->mm.active_list)) {
260611260SMiao.Chen@Sun.COM 		struct drm_i915_gem_object *obj_priv;
260711260SMiao.Chen@Sun.COM 
260811260SMiao.Chen@Sun.COM 		obj_priv = list_entry(dev_priv->mm.active_list.next,
260911260SMiao.Chen@Sun.COM 					    struct drm_i915_gem_object,
261011260SMiao.Chen@Sun.COM 					    list);
261111260SMiao.Chen@Sun.COM 		obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
261211260SMiao.Chen@Sun.COM 		i915_gem_object_move_to_inactive(obj_priv->obj);
261311260SMiao.Chen@Sun.COM 	}
261411260SMiao.Chen@Sun.COM 
261511260SMiao.Chen@Sun.COM 	while (!list_empty(&dev_priv->mm.flushing_list)) {
261611260SMiao.Chen@Sun.COM 		struct drm_i915_gem_object *obj_priv;
261711260SMiao.Chen@Sun.COM 
261811260SMiao.Chen@Sun.COM 		obj_priv = list_entry(dev_priv->mm.flushing_list.next,
261911260SMiao.Chen@Sun.COM 					    struct drm_i915_gem_object,
262011260SMiao.Chen@Sun.COM 					    list);
262111260SMiao.Chen@Sun.COM 		obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
262211260SMiao.Chen@Sun.COM 		i915_gem_object_move_to_inactive(obj_priv->obj);
262311260SMiao.Chen@Sun.COM 	}
262411260SMiao.Chen@Sun.COM 
262511260SMiao.Chen@Sun.COM 	/* Move all inactive buffers out of the GTT. */
262611260SMiao.Chen@Sun.COM 	ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list, type);
262711260SMiao.Chen@Sun.COM 	ASSERT(list_empty(&dev_priv->mm.inactive_list));
262811260SMiao.Chen@Sun.COM 	if (ret) {
262911260SMiao.Chen@Sun.COM 		spin_unlock(&dev->struct_mutex);
263011260SMiao.Chen@Sun.COM 		return ret;
263111260SMiao.Chen@Sun.COM 	}
263211260SMiao.Chen@Sun.COM 
263311260SMiao.Chen@Sun.COM 	i915_gem_cleanup_ringbuffer(dev);
263411260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
263511260SMiao.Chen@Sun.COM 
263611260SMiao.Chen@Sun.COM 	return 0;
263711260SMiao.Chen@Sun.COM }
263811260SMiao.Chen@Sun.COM 
263911260SMiao.Chen@Sun.COM static int
i915_gem_init_hws(struct drm_device * dev)264011260SMiao.Chen@Sun.COM i915_gem_init_hws(struct drm_device *dev)
264111260SMiao.Chen@Sun.COM {
264211260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
264311260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
264411260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
264511260SMiao.Chen@Sun.COM 	int ret;
264611260SMiao.Chen@Sun.COM 
264711260SMiao.Chen@Sun.COM 	/* If we need a physical address for the status page, it's already
264811260SMiao.Chen@Sun.COM 	 * initialized at driver load time.
264911260SMiao.Chen@Sun.COM 	 */
265011260SMiao.Chen@Sun.COM 	if (!I915_NEED_GFX_HWS(dev))
265111260SMiao.Chen@Sun.COM 		return 0;
265211260SMiao.Chen@Sun.COM 
265311260SMiao.Chen@Sun.COM 
265411260SMiao.Chen@Sun.COM 	obj = drm_gem_object_alloc(dev, 4096);
265511260SMiao.Chen@Sun.COM 	if (obj == NULL) {
265611260SMiao.Chen@Sun.COM 		DRM_ERROR("Failed to allocate status page\n");
265711260SMiao.Chen@Sun.COM 		return ENOMEM;
265811260SMiao.Chen@Sun.COM 	}
265911260SMiao.Chen@Sun.COM 
266011260SMiao.Chen@Sun.COM 	obj_priv = obj->driver_private;
266111260SMiao.Chen@Sun.COM 
266211260SMiao.Chen@Sun.COM 	ret = i915_gem_object_pin(obj, 4096);
266311260SMiao.Chen@Sun.COM 	if (ret != 0) {
266411260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
266511260SMiao.Chen@Sun.COM 		return ret;
266611260SMiao.Chen@Sun.COM 	}
266711260SMiao.Chen@Sun.COM 
266811260SMiao.Chen@Sun.COM 	dev_priv->status_gfx_addr = obj_priv->gtt_offset;
266911260SMiao.Chen@Sun.COM 	dev_priv->hws_map.offset = dev->agp->agp_info.agpi_aperbase + obj_priv->gtt_offset;
267011260SMiao.Chen@Sun.COM 	dev_priv->hws_map.size = 4096;
267111260SMiao.Chen@Sun.COM 	dev_priv->hws_map.type = 0;
267211260SMiao.Chen@Sun.COM 	dev_priv->hws_map.flags = 0;
267311260SMiao.Chen@Sun.COM 	dev_priv->hws_map.mtrr = 0;
267411260SMiao.Chen@Sun.COM 
267511260SMiao.Chen@Sun.COM 	drm_core_ioremap(&dev_priv->hws_map, dev);
267611260SMiao.Chen@Sun.COM 	if (dev_priv->hws_map.handle == NULL) {
267711260SMiao.Chen@Sun.COM 		DRM_ERROR("Failed to map status page.\n");
267811260SMiao.Chen@Sun.COM 		(void) memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
267911260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
268011260SMiao.Chen@Sun.COM 		return EINVAL;
268111260SMiao.Chen@Sun.COM 	}
268211260SMiao.Chen@Sun.COM 
268311260SMiao.Chen@Sun.COM 	dev_priv->hws_obj = obj;
268411260SMiao.Chen@Sun.COM 
268511260SMiao.Chen@Sun.COM 	dev_priv->hw_status_page = dev_priv->hws_map.handle;
268611260SMiao.Chen@Sun.COM 
268711260SMiao.Chen@Sun.COM 	(void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
268811260SMiao.Chen@Sun.COM 	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
268911260SMiao.Chen@Sun.COM 	(void) I915_READ(HWS_PGA); /* posting read */
269011260SMiao.Chen@Sun.COM 	DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
269111260SMiao.Chen@Sun.COM 
269211260SMiao.Chen@Sun.COM 	return 0;
269311260SMiao.Chen@Sun.COM }
269411260SMiao.Chen@Sun.COM 
269511260SMiao.Chen@Sun.COM static void
i915_gem_cleanup_hws(struct drm_device * dev)269611260SMiao.Chen@Sun.COM i915_gem_cleanup_hws(struct drm_device *dev)
269711260SMiao.Chen@Sun.COM {
269811260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
269911260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
270011260SMiao.Chen@Sun.COM 
270111260SMiao.Chen@Sun.COM 	if (dev_priv->hws_obj == NULL)
270211260SMiao.Chen@Sun.COM 		return;
270311260SMiao.Chen@Sun.COM 
270411260SMiao.Chen@Sun.COM 	obj = dev_priv->hws_obj;
270511260SMiao.Chen@Sun.COM 
270611260SMiao.Chen@Sun.COM 	drm_core_ioremapfree(&dev_priv->hws_map, dev);
270711260SMiao.Chen@Sun.COM 	i915_gem_object_unpin(obj);
270811260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(obj);
270911260SMiao.Chen@Sun.COM 	dev_priv->hws_obj = NULL;
271011260SMiao.Chen@Sun.COM 
271111260SMiao.Chen@Sun.COM 	(void) memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
271211260SMiao.Chen@Sun.COM 	dev_priv->hw_status_page = NULL;
271311260SMiao.Chen@Sun.COM 
271411260SMiao.Chen@Sun.COM 	/* Write high address into HWS_PGA when disabling. */
271511260SMiao.Chen@Sun.COM 	I915_WRITE(HWS_PGA, 0x1ffff000);
271611260SMiao.Chen@Sun.COM }
271711260SMiao.Chen@Sun.COM 
271811260SMiao.Chen@Sun.COM int
i915_gem_init_ringbuffer(struct drm_device * dev)271911260SMiao.Chen@Sun.COM i915_gem_init_ringbuffer(struct drm_device *dev)
272011260SMiao.Chen@Sun.COM {
272111260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
272211260SMiao.Chen@Sun.COM 	struct drm_gem_object *obj;
272311260SMiao.Chen@Sun.COM 	struct drm_i915_gem_object *obj_priv;
272411260SMiao.Chen@Sun.COM 	int ret;
272511260SMiao.Chen@Sun.COM 	u32 head;
272611260SMiao.Chen@Sun.COM 
272711260SMiao.Chen@Sun.COM 	ret = i915_gem_init_hws(dev);
272811260SMiao.Chen@Sun.COM 	if (ret != 0)
272911260SMiao.Chen@Sun.COM 		return ret;
273011260SMiao.Chen@Sun.COM 	obj = drm_gem_object_alloc(dev, 128 * 1024);
273111260SMiao.Chen@Sun.COM 	if (obj == NULL) {
273211260SMiao.Chen@Sun.COM 		DRM_ERROR("Failed to allocate ringbuffer\n");
273311260SMiao.Chen@Sun.COM 		i915_gem_cleanup_hws(dev);
273411260SMiao.Chen@Sun.COM 		return ENOMEM;
273511260SMiao.Chen@Sun.COM 	}
273611260SMiao.Chen@Sun.COM 
273711260SMiao.Chen@Sun.COM 	obj_priv = obj->driver_private;
273811260SMiao.Chen@Sun.COM 	ret = i915_gem_object_pin(obj, 4096);
273911260SMiao.Chen@Sun.COM 	if (ret != 0) {
274011260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
274111260SMiao.Chen@Sun.COM 		i915_gem_cleanup_hws(dev);
274211260SMiao.Chen@Sun.COM 		return ret;
274311260SMiao.Chen@Sun.COM 	}
274411260SMiao.Chen@Sun.COM 
274511260SMiao.Chen@Sun.COM 	/* Set up the kernel mapping for the ring. */
274611260SMiao.Chen@Sun.COM 	dev_priv->ring.Size = obj->size;
274711260SMiao.Chen@Sun.COM 	dev_priv->ring.tail_mask = obj->size - 1;
274811260SMiao.Chen@Sun.COM 
274911260SMiao.Chen@Sun.COM 	dev_priv->ring.map.offset = dev->agp->agp_info.agpi_aperbase + obj_priv->gtt_offset;
275011260SMiao.Chen@Sun.COM 	dev_priv->ring.map.size = obj->size;
275111260SMiao.Chen@Sun.COM 	dev_priv->ring.map.type = 0;
275211260SMiao.Chen@Sun.COM 	dev_priv->ring.map.flags = 0;
275311260SMiao.Chen@Sun.COM 	dev_priv->ring.map.mtrr = 0;
275411260SMiao.Chen@Sun.COM 
275511260SMiao.Chen@Sun.COM 	drm_core_ioremap(&dev_priv->ring.map, dev);
275611260SMiao.Chen@Sun.COM 	if (dev_priv->ring.map.handle == NULL) {
275711260SMiao.Chen@Sun.COM 		DRM_ERROR("Failed to map ringbuffer.\n");
275811260SMiao.Chen@Sun.COM 		(void) memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
275911260SMiao.Chen@Sun.COM 		drm_gem_object_unreference(obj);
276011260SMiao.Chen@Sun.COM 		i915_gem_cleanup_hws(dev);
276111260SMiao.Chen@Sun.COM 		return EINVAL;
276211260SMiao.Chen@Sun.COM 	}
276311260SMiao.Chen@Sun.COM 
276411260SMiao.Chen@Sun.COM 	dev_priv->ring.ring_obj = obj;
276511260SMiao.Chen@Sun.COM 
276611260SMiao.Chen@Sun.COM 	dev_priv->ring.virtual_start = (u8 *) dev_priv->ring.map.handle;
276711260SMiao.Chen@Sun.COM 
276811260SMiao.Chen@Sun.COM 	/* Stop the ring if it's running. */
276911260SMiao.Chen@Sun.COM 	I915_WRITE(PRB0_CTL, 0);
277011260SMiao.Chen@Sun.COM 	I915_WRITE(PRB0_HEAD, 0);
277111260SMiao.Chen@Sun.COM 	I915_WRITE(PRB0_TAIL, 0);
277211260SMiao.Chen@Sun.COM 
277311260SMiao.Chen@Sun.COM 
277411260SMiao.Chen@Sun.COM 	/* Initialize the ring. */
277511260SMiao.Chen@Sun.COM 	I915_WRITE(PRB0_START, obj_priv->gtt_offset);
277611260SMiao.Chen@Sun.COM 	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
277711260SMiao.Chen@Sun.COM 
277811260SMiao.Chen@Sun.COM 	/* G45 ring initialization fails to reset head to zero */
277911260SMiao.Chen@Sun.COM 	if (head != 0) {
278011260SMiao.Chen@Sun.COM 		DRM_ERROR("Ring head not reset to zero "
278111260SMiao.Chen@Sun.COM 			  "ctl %08x head %08x tail %08x start %08x\n",
278211260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_CTL),
278311260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_HEAD),
278411260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_TAIL),
278511260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_START));
278611260SMiao.Chen@Sun.COM 		I915_WRITE(PRB0_HEAD, 0);
278711260SMiao.Chen@Sun.COM 
278811260SMiao.Chen@Sun.COM 		DRM_ERROR("Ring head forced to zero "
278911260SMiao.Chen@Sun.COM 			  "ctl %08x head %08x tail %08x start %08x\n",
279011260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_CTL),
279111260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_HEAD),
279211260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_TAIL),
279311260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_START));
279411260SMiao.Chen@Sun.COM 	}
279511260SMiao.Chen@Sun.COM 
279611260SMiao.Chen@Sun.COM 	I915_WRITE(PRB0_CTL,
279711260SMiao.Chen@Sun.COM 		   ((obj->size - 4096) & RING_NR_PAGES) |
279811260SMiao.Chen@Sun.COM 		   RING_NO_REPORT |
279911260SMiao.Chen@Sun.COM 		   RING_VALID);
280011260SMiao.Chen@Sun.COM 
280111260SMiao.Chen@Sun.COM 	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
280211260SMiao.Chen@Sun.COM 
280311260SMiao.Chen@Sun.COM 	/* If the head is still not zero, the ring is dead */
280411260SMiao.Chen@Sun.COM 	if (head != 0) {
280511260SMiao.Chen@Sun.COM 		DRM_ERROR("Ring initialization failed "
280611260SMiao.Chen@Sun.COM 			  "ctl %08x head %08x tail %08x start %08x\n",
280711260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_CTL),
280811260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_HEAD),
280911260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_TAIL),
281011260SMiao.Chen@Sun.COM 			  I915_READ(PRB0_START));
281111260SMiao.Chen@Sun.COM 		return EIO;
281211260SMiao.Chen@Sun.COM 	}
281311260SMiao.Chen@Sun.COM 
281411260SMiao.Chen@Sun.COM 	/* Update our cache of the ring state */
281511260SMiao.Chen@Sun.COM 	i915_kernel_lost_context(dev);
281611260SMiao.Chen@Sun.COM 
281711260SMiao.Chen@Sun.COM 	return 0;
281811260SMiao.Chen@Sun.COM }
281911260SMiao.Chen@Sun.COM 
282011260SMiao.Chen@Sun.COM static void
i915_gem_cleanup_ringbuffer(struct drm_device * dev)282111260SMiao.Chen@Sun.COM i915_gem_cleanup_ringbuffer(struct drm_device *dev)
282211260SMiao.Chen@Sun.COM {
282311260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
282411260SMiao.Chen@Sun.COM 
282511260SMiao.Chen@Sun.COM 	if (dev_priv->ring.ring_obj == NULL)
282611260SMiao.Chen@Sun.COM 		return;
282711260SMiao.Chen@Sun.COM 
282811260SMiao.Chen@Sun.COM 	drm_core_ioremapfree(&dev_priv->ring.map, dev);
282911260SMiao.Chen@Sun.COM 
283011260SMiao.Chen@Sun.COM 	i915_gem_object_unpin(dev_priv->ring.ring_obj);
283111260SMiao.Chen@Sun.COM 	drm_gem_object_unreference(dev_priv->ring.ring_obj);
283211260SMiao.Chen@Sun.COM 	dev_priv->ring.ring_obj = NULL;
283311260SMiao.Chen@Sun.COM 	(void) memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
283411260SMiao.Chen@Sun.COM 	i915_gem_cleanup_hws(dev);
283511260SMiao.Chen@Sun.COM }
283611260SMiao.Chen@Sun.COM 
283711260SMiao.Chen@Sun.COM /*ARGSUSED*/
283811260SMiao.Chen@Sun.COM int
i915_gem_entervt_ioctl(DRM_IOCTL_ARGS)283911260SMiao.Chen@Sun.COM i915_gem_entervt_ioctl(DRM_IOCTL_ARGS)
284011260SMiao.Chen@Sun.COM {
284111260SMiao.Chen@Sun.COM 	DRM_DEVICE;
284211260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
284311260SMiao.Chen@Sun.COM 	int ret;
284411260SMiao.Chen@Sun.COM 
284511260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
284611260SMiao.Chen@Sun.COM 		return ENODEV;
284711260SMiao.Chen@Sun.COM 
284811260SMiao.Chen@Sun.COM 	if (dev_priv->mm.wedged) {
284911260SMiao.Chen@Sun.COM 		DRM_ERROR("Reenabling wedged hardware, good luck\n");
285011260SMiao.Chen@Sun.COM 		dev_priv->mm.wedged = 0;
285111260SMiao.Chen@Sun.COM 	}
285211260SMiao.Chen@Sun.COM         /* Set up the kernel mapping for the ring. */
285311260SMiao.Chen@Sun.COM         dev_priv->mm.gtt_mapping.offset = dev->agp->agp_info.agpi_aperbase;
285411260SMiao.Chen@Sun.COM         dev_priv->mm.gtt_mapping.size = dev->agp->agp_info.agpi_apersize;
285511260SMiao.Chen@Sun.COM         dev_priv->mm.gtt_mapping.type = 0;
285611260SMiao.Chen@Sun.COM         dev_priv->mm.gtt_mapping.flags = 0;
285711260SMiao.Chen@Sun.COM         dev_priv->mm.gtt_mapping.mtrr = 0;
285811260SMiao.Chen@Sun.COM 
285911260SMiao.Chen@Sun.COM         drm_core_ioremap(&dev_priv->mm.gtt_mapping, dev);
286011260SMiao.Chen@Sun.COM 
286111260SMiao.Chen@Sun.COM 	spin_lock(&dev->struct_mutex);
286211260SMiao.Chen@Sun.COM 	dev_priv->mm.suspended = 0;
286311260SMiao.Chen@Sun.COM 	ret = i915_gem_init_ringbuffer(dev);
286411260SMiao.Chen@Sun.COM 	if (ret != 0)
286511260SMiao.Chen@Sun.COM 		return ret;
286611260SMiao.Chen@Sun.COM 
286711260SMiao.Chen@Sun.COM 	spin_unlock(&dev->struct_mutex);
286811260SMiao.Chen@Sun.COM 
2869*11387SSurya.Prakki@Sun.COM 	(void) drm_irq_install(dev);
287011260SMiao.Chen@Sun.COM 
287111260SMiao.Chen@Sun.COM 	return 0;
287211260SMiao.Chen@Sun.COM }
287311260SMiao.Chen@Sun.COM 
287411260SMiao.Chen@Sun.COM /*ARGSUSED*/
287511260SMiao.Chen@Sun.COM int
i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS)287611260SMiao.Chen@Sun.COM i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS)
287711260SMiao.Chen@Sun.COM {
287811260SMiao.Chen@Sun.COM 	DRM_DEVICE;
287911260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
288011260SMiao.Chen@Sun.COM 	int ret;
288111260SMiao.Chen@Sun.COM 
288211260SMiao.Chen@Sun.COM 	if (dev->driver->use_gem != 1)
288311260SMiao.Chen@Sun.COM 		return ENODEV;
288411260SMiao.Chen@Sun.COM 
288511260SMiao.Chen@Sun.COM 	ret = i915_gem_idle(dev, 0);
2886*11387SSurya.Prakki@Sun.COM 	(void) drm_irq_uninstall(dev);
288711260SMiao.Chen@Sun.COM 
288811260SMiao.Chen@Sun.COM 	drm_core_ioremapfree(&dev_priv->mm.gtt_mapping, dev);
288911260SMiao.Chen@Sun.COM 	return ret;
289011260SMiao.Chen@Sun.COM }
289111260SMiao.Chen@Sun.COM 
289211260SMiao.Chen@Sun.COM void
i915_gem_lastclose(struct drm_device * dev)289311260SMiao.Chen@Sun.COM i915_gem_lastclose(struct drm_device *dev)
289411260SMiao.Chen@Sun.COM {
289511260SMiao.Chen@Sun.COM         drm_i915_private_t *dev_priv = dev->dev_private;
289611260SMiao.Chen@Sun.COM 	int ret;
289711260SMiao.Chen@Sun.COM 
289811260SMiao.Chen@Sun.COM 	ret = i915_gem_idle(dev, 1);
289911260SMiao.Chen@Sun.COM 	if (ret)
290011260SMiao.Chen@Sun.COM 		DRM_ERROR("failed to idle hardware: %d\n", ret);
290111260SMiao.Chen@Sun.COM 
290211260SMiao.Chen@Sun.COM 	drm_mm_clean_ml(&dev_priv->mm.gtt_space);
290311260SMiao.Chen@Sun.COM }
290411260SMiao.Chen@Sun.COM 
290511260SMiao.Chen@Sun.COM void
i915_gem_load(struct drm_device * dev)290611260SMiao.Chen@Sun.COM i915_gem_load(struct drm_device *dev)
290711260SMiao.Chen@Sun.COM {
290811260SMiao.Chen@Sun.COM 	drm_i915_private_t *dev_priv = dev->dev_private;
290911260SMiao.Chen@Sun.COM 
291011260SMiao.Chen@Sun.COM 	INIT_LIST_HEAD(&dev_priv->mm.active_list);
291111260SMiao.Chen@Sun.COM 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
291211260SMiao.Chen@Sun.COM 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
291311260SMiao.Chen@Sun.COM 	INIT_LIST_HEAD(&dev_priv->mm.request_list);
291411260SMiao.Chen@Sun.COM 	dev_priv->mm.next_gem_seqno = 1;
291511260SMiao.Chen@Sun.COM 
291611260SMiao.Chen@Sun.COM 	i915_gem_detect_bit_6_swizzle(dev);
291711260SMiao.Chen@Sun.COM 
291811260SMiao.Chen@Sun.COM }
291911260SMiao.Chen@Sun.COM 
2920