1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 
33 #if WATCH_LISTS
34 int
i915_verify_lists(struct drm_device * dev)35 i915_verify_lists(struct drm_device *dev)
36 {
37 	static int warned;
38 	drm_i915_private_t *dev_priv = dev->dev_private;
39 	struct drm_i915_gem_object *obj;
40 	int err = 0;
41 
42 	if (warned)
43 		return 0;
44 
45 	list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
46 		if (obj->base.dev != dev ||
47 		    !atomic_read(&obj->base.refcount.refcount)) {
48 			DRM_ERROR("freed render active %p\n", obj);
49 			err++;
50 			break;
51 		} else if (!obj->active ||
52 			   (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
53 			DRM_ERROR("invalid render active %p (a %d r %x)\n",
54 				  obj,
55 				  obj->active,
56 				  obj->base.read_domains);
57 			err++;
58 		} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
59 			DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
60 				  obj,
61 				  obj->base.write_domain,
62 				  !list_empty(&obj->gpu_write_list));
63 			err++;
64 		}
65 	}
66 
67 	list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
68 		if (obj->base.dev != dev ||
69 		    !atomic_read(&obj->base.refcount.refcount)) {
70 			DRM_ERROR("freed flushing %p\n", obj);
71 			err++;
72 			break;
73 		} else if (!obj->active ||
74 			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
75 			   list_empty(&obj->gpu_write_list)){
76 			DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
77 				  obj,
78 				  obj->active,
79 				  obj->base.write_domain,
80 				  !list_empty(&obj->gpu_write_list));
81 			err++;
82 		}
83 	}
84 
85 	list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
86 		if (obj->base.dev != dev ||
87 		    !atomic_read(&obj->base.refcount.refcount)) {
88 			DRM_ERROR("freed gpu write %p\n", obj);
89 			err++;
90 			break;
91 		} else if (!obj->active ||
92 			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
93 			DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
94 				  obj,
95 				  obj->active,
96 				  obj->base.write_domain);
97 			err++;
98 		}
99 	}
100 
101 	list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
102 		if (obj->base.dev != dev ||
103 		    !atomic_read(&obj->base.refcount.refcount)) {
104 			DRM_ERROR("freed inactive %p\n", obj);
105 			err++;
106 			break;
107 		} else if (obj->pin_count || obj->active ||
108 			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
109 			DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
110 				  obj,
111 				  obj->pin_count, obj->active,
112 				  obj->base.write_domain);
113 			err++;
114 		}
115 	}
116 
117 	list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
118 		if (obj->base.dev != dev ||
119 		    !atomic_read(&obj->base.refcount.refcount)) {
120 			DRM_ERROR("freed pinned %p\n", obj);
121 			err++;
122 			break;
123 		} else if (!obj->pin_count || obj->active ||
124 			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
125 			DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
126 				  obj,
127 				  obj->pin_count, obj->active,
128 				  obj->base.write_domain);
129 			err++;
130 		}
131 	}
132 
133 	return warned = err;
134 }
135 #endif /* WATCH_INACTIVE */
136 
137 #if WATCH_COHERENCY
138 void
i915_gem_object_check_coherency(struct drm_i915_gem_object * obj,int handle)139 i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
140 {
141 	struct drm_device *dev = obj->base.dev;
142 	int page;
143 	uint32_t *gtt_mapping;
144 	uint32_t *backing_map = NULL;
145 	int bad_count = 0;
146 
147 	DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
148 		 __func__, obj, obj->gtt_offset, handle,
149 		 obj->size / 1024);
150 
151 	gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
152 	if (gtt_mapping == NULL) {
153 		DRM_ERROR("failed to map GTT space\n");
154 		return;
155 	}
156 
157 	for (page = 0; page < obj->size / PAGE_SIZE; page++) {
158 		int i;
159 
160 		backing_map = kmap_atomic(obj->pages[page], KM_USER0);
161 
162 		if (backing_map == NULL) {
163 			DRM_ERROR("failed to map backing page\n");
164 			goto out;
165 		}
166 
167 		for (i = 0; i < PAGE_SIZE / 4; i++) {
168 			uint32_t cpuval = backing_map[i];
169 			uint32_t gttval = readl(gtt_mapping +
170 						page * 1024 + i);
171 
172 			if (cpuval != gttval) {
173 				DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
174 					 "0x%08x vs 0x%08x\n",
175 					 (int)(obj->gtt_offset +
176 					       page * PAGE_SIZE + i * 4),
177 					 cpuval, gttval);
178 				if (bad_count++ >= 8) {
179 					DRM_INFO("...\n");
180 					goto out;
181 				}
182 			}
183 		}
184 		kunmap_atomic(backing_map, KM_USER0);
185 		backing_map = NULL;
186 	}
187 
188  out:
189 	if (backing_map != NULL)
190 		kunmap_atomic(backing_map, KM_USER0);
191 	iounmap(gtt_mapping);
192 
193 	/* give syslog time to catch up */
194 	msleep(1);
195 
196 	/* Directly flush the object, since we just loaded values with the CPU
197 	 * from the backing pages and we don't want to disturb the cache
198 	 * management that we're trying to observe.
199 	 */
200 
201 	i915_gem_clflush_object(obj);
202 }
203 #endif
204