1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
9
10 #include <linux/mmu_notifier.h>
11
12 #include <drm/drm_gem.h>
13 #include <drm/ttm/ttm_bo_api.h>
14 #include <uapi/drm/i915_drm.h>
15
16 #include "i915_active.h"
17 #include "i915_selftest.h"
18 #include "i915_vma_resource.h"
19
20 struct drm_i915_gem_object;
21 struct intel_fronbuffer;
22 struct intel_memory_region;
23
24 /*
25 * struct i915_lut_handle tracks the fast lookups from handle to vma used
26 * for execbuf. Although we use a radixtree for that mapping, in order to
27 * remove them as the object or context is closed, we need a secondary list
28 * and a translation entry (i915_lut_handle).
29 */
30 struct i915_lut_handle {
31 struct list_head obj_link;
32 struct i915_gem_context *ctx;
33 u32 handle;
34 };
35
36 struct drm_i915_gem_object_ops {
37 unsigned int flags;
38 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
39 /* Skip the shrinker management in set_pages/unset_pages */
40 #define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2)
41 #define I915_GEM_OBJECT_IS_PROXY BIT(3)
42 #define I915_GEM_OBJECT_NO_MMAP BIT(4)
43
44 /* Interface between the GEM object and its backing storage.
45 * get_pages() is called once prior to the use of the associated set
46 * of pages before to binding them into the GTT, and put_pages() is
47 * called after we no longer need them. As we expect there to be
48 * associated cost with migrating pages between the backing storage
49 * and making them available for the GPU (e.g. clflush), we may hold
50 * onto the pages after they are no longer referenced by the GPU
51 * in case they may be used again shortly (for example migrating the
52 * pages to a different memory domain within the GTT). put_pages()
53 * will therefore most likely be called when the object itself is
54 * being released or under memory pressure (where we attempt to
55 * reap pages for the shrinker).
56 */
57 int (*get_pages)(struct drm_i915_gem_object *obj);
58 void (*put_pages)(struct drm_i915_gem_object *obj,
59 struct sg_table *pages);
60 int (*truncate)(struct drm_i915_gem_object *obj);
61 /**
62 * shrink - Perform further backend specific actions to facilate
63 * shrinking.
64 * @obj: The gem object
65 * @flags: Extra flags to control shrinking behaviour in the backend
66 *
67 * Possible values for @flags:
68 *
69 * I915_GEM_OBJECT_SHRINK_WRITEBACK - Try to perform writeback of the
70 * backing pages, if supported.
71 *
72 * I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT - Don't wait for the object to
73 * idle. Active objects can be considered later. The TTM backend for
74 * example might have aync migrations going on, which don't use any
75 * i915_vma to track the active GTT binding, and hence having an unbound
76 * object might not be enough.
77 */
78 #define I915_GEM_OBJECT_SHRINK_WRITEBACK BIT(0)
79 #define I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT BIT(1)
80 int (*shrink)(struct drm_i915_gem_object *obj, unsigned int flags);
81
82 int (*pread)(struct drm_i915_gem_object *obj,
83 const struct drm_i915_gem_pread *arg);
84 int (*pwrite)(struct drm_i915_gem_object *obj,
85 const struct drm_i915_gem_pwrite *arg);
86 u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
87 void (*unmap_virtual)(struct drm_i915_gem_object *obj);
88
89 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
90
91 /**
92 * adjust_lru - notify that the madvise value was updated
93 * @obj: The gem object
94 *
95 * The madvise value may have been updated, or object was recently
96 * referenced so act accordingly (Perhaps changing an LRU list etc).
97 */
98 void (*adjust_lru)(struct drm_i915_gem_object *obj);
99
100 /**
101 * delayed_free - Override the default delayed free implementation
102 */
103 void (*delayed_free)(struct drm_i915_gem_object *obj);
104
105 /**
106 * migrate - Migrate object to a different region either for
107 * pinning or for as long as the object lock is held.
108 */
109 int (*migrate)(struct drm_i915_gem_object *obj,
110 struct intel_memory_region *mr);
111
112 void (*release)(struct drm_i915_gem_object *obj);
113
114 const struct vm_operations_struct *mmap_ops;
115 const char *name; /* friendly name for debug, e.g. lockdep classes */
116 };
117
118 /**
119 * enum i915_cache_level - The supported GTT caching values for system memory
120 * pages.
121 *
122 * These translate to some special GTT PTE bits when binding pages into some
123 * address space. It also determines whether an object, or rather its pages are
124 * coherent with the GPU, when also reading or writing through the CPU cache
125 * with those pages.
126 *
127 * Userspace can also control this through struct drm_i915_gem_caching.
128 */
129 enum i915_cache_level {
130 /**
131 * @I915_CACHE_NONE:
132 *
133 * GPU access is not coherent with the CPU cache. If the cache is dirty
134 * and we need the underlying pages to be coherent with some later GPU
135 * access then we need to manually flush the pages.
136 *
137 * On shared LLC platforms reads and writes through the CPU cache are
138 * still coherent even with this setting. See also
139 * &drm_i915_gem_object.cache_coherent for more details. Due to this we
140 * should only ever use uncached for scanout surfaces, otherwise we end
141 * up over-flushing in some places.
142 *
143 * This is the default on non-LLC platforms.
144 */
145 I915_CACHE_NONE = 0,
146 /**
147 * @I915_CACHE_LLC:
148 *
149 * GPU access is coherent with the CPU cache. If the cache is dirty,
150 * then the GPU will ensure that access remains coherent, when both
151 * reading and writing through the CPU cache. GPU writes can dirty the
152 * CPU cache.
153 *
154 * Not used for scanout surfaces.
155 *
156 * Applies to both platforms with shared LLC(HAS_LLC), and snooping
157 * based platforms(HAS_SNOOP).
158 *
159 * This is the default on shared LLC platforms. The only exception is
160 * scanout objects, where the display engine is not coherent with the
161 * CPU cache. For such objects I915_CACHE_NONE or I915_CACHE_WT is
162 * automatically applied by the kernel in pin_for_display, if userspace
163 * has not done so already.
164 */
165 I915_CACHE_LLC,
166 /**
167 * @I915_CACHE_L3_LLC:
168 *
169 * Explicitly enable the Gfx L3 cache, with coherent LLC.
170 *
171 * The Gfx L3 sits between the domain specific caches, e.g
172 * sampler/render caches, and the larger LLC. LLC is coherent with the
173 * GPU, but L3 is only visible to the GPU, so likely needs to be flushed
174 * when the workload completes.
175 *
176 * Not used for scanout surfaces.
177 *
178 * Only exposed on some gen7 + GGTT. More recent hardware has dropped
179 * this explicit setting, where it should now be enabled by default.
180 */
181 I915_CACHE_L3_LLC,
182 /**
183 * @I915_CACHE_WT:
184 *
185 * Write-through. Used for scanout surfaces.
186 *
187 * The GPU can utilise the caches, while still having the display engine
188 * be coherent with GPU writes, as a result we don't need to flush the
189 * CPU caches when moving out of the render domain. This is the default
190 * setting chosen by the kernel, if supported by the HW, otherwise we
191 * fallback to I915_CACHE_NONE. On the CPU side writes through the CPU
192 * cache still need to be flushed, to remain coherent with the display
193 * engine.
194 */
195 I915_CACHE_WT,
196 };
197
198 enum i915_map_type {
199 I915_MAP_WB = 0,
200 I915_MAP_WC,
201 #define I915_MAP_OVERRIDE BIT(31)
202 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
203 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
204 };
205
206 enum i915_mmap_type {
207 I915_MMAP_TYPE_GTT = 0,
208 I915_MMAP_TYPE_WC,
209 I915_MMAP_TYPE_WB,
210 I915_MMAP_TYPE_UC,
211 I915_MMAP_TYPE_FIXED,
212 };
213
214 struct i915_mmap_offset {
215 struct drm_vma_offset_node vma_node;
216 struct drm_i915_gem_object *obj;
217 enum i915_mmap_type mmap_type;
218
219 struct rb_node offset;
220 };
221
222 struct i915_gem_object_page_iter {
223 struct scatterlist *sg_pos;
224 unsigned int sg_idx; /* in pages, but 32bit eek! */
225
226 struct radix_tree_root radix;
227 struct mutex lock; /* protects this cache */
228 };
229
230 struct drm_i915_gem_object {
231 /*
232 * We might have reason to revisit the below since it wastes
233 * a lot of space for non-ttm gem objects.
234 * In any case, always use the accessors for the ttm_buffer_object
235 * when accessing it.
236 */
237 union {
238 struct drm_gem_object base;
239 struct ttm_buffer_object __do_not_access;
240 };
241
242 const struct drm_i915_gem_object_ops *ops;
243
244 struct {
245 /**
246 * @vma.lock: protect the list/tree of vmas
247 */
248 spinlock_t lock;
249
250 /**
251 * @vma.list: List of VMAs backed by this object
252 *
253 * The VMA on this list are ordered by type, all GGTT vma are
254 * placed at the head and all ppGTT vma are placed at the tail.
255 * The different types of GGTT vma are unordered between
256 * themselves, use the @vma.tree (which has a defined order
257 * between all VMA) to quickly find an exact match.
258 */
259 struct list_head list;
260
261 /**
262 * @vma.tree: Ordered tree of VMAs backed by this object
263 *
264 * All VMA created for this object are placed in the @vma.tree
265 * for fast retrieval via a binary search in
266 * i915_vma_instance(). They are also added to @vma.list for
267 * easy iteration.
268 */
269 struct rb_root tree;
270 } vma;
271
272 /**
273 * @lut_list: List of vma lookup entries in use for this object.
274 *
275 * If this object is closed, we need to remove all of its VMA from
276 * the fast lookup index in associated contexts; @lut_list provides
277 * this translation from object to context->handles_vma.
278 */
279 struct list_head lut_list;
280 spinlock_t lut_lock; /* guards lut_list */
281
282 /**
283 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
284 *
285 * When we lock this object through i915_gem_object_lock() with a
286 * context, we add it to the list to ensure we can unlock everything
287 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
288 */
289 struct list_head obj_link;
290 /**
291 * @shared_resv_from: The object shares the resv from this vm.
292 */
293 struct i915_address_space *shares_resv_from;
294
295 union {
296 struct rcu_head rcu;
297 struct llist_node freed;
298 };
299
300 /**
301 * Whether the object is currently in the GGTT mmap.
302 */
303 unsigned int userfault_count;
304 struct list_head userfault_link;
305
306 struct {
307 spinlock_t lock; /* Protects access to mmo offsets */
308 struct rb_root offsets;
309 } mmo;
310
311 I915_SELFTEST_DECLARE(struct list_head st_link);
312
313 unsigned long flags;
314 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
315 #define I915_BO_ALLOC_VOLATILE BIT(1)
316 #define I915_BO_ALLOC_CPU_CLEAR BIT(2)
317 #define I915_BO_ALLOC_USER BIT(3)
318 /* Object is allowed to lose its contents on suspend / resume, even if pinned */
319 #define I915_BO_ALLOC_PM_VOLATILE BIT(4)
320 /* Object needs to be restored early using memcpy during resume */
321 #define I915_BO_ALLOC_PM_EARLY BIT(5)
322 /*
323 * Object is likely never accessed by the CPU. This will prioritise the BO to be
324 * allocated in the non-mappable portion of lmem. This is merely a hint, and if
325 * dealing with userspace objects the CPU fault handler is free to ignore this.
326 */
327 #define I915_BO_ALLOC_GPU_ONLY BIT(6)
328 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
329 I915_BO_ALLOC_VOLATILE | \
330 I915_BO_ALLOC_CPU_CLEAR | \
331 I915_BO_ALLOC_USER | \
332 I915_BO_ALLOC_PM_VOLATILE | \
333 I915_BO_ALLOC_PM_EARLY | \
334 I915_BO_ALLOC_GPU_ONLY)
335 #define I915_BO_READONLY BIT(7)
336 #define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
337 #define I915_BO_PROTECTED BIT(9)
338 /**
339 * @mem_flags - Mutable placement-related flags
340 *
341 * These are flags that indicate specifics of the memory region
342 * the object is currently in. As such they are only stable
343 * either under the object lock or if the object is pinned.
344 */
345 unsigned int mem_flags;
346 #define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
347 #define I915_BO_FLAG_IOMEM BIT(1) /* Object backed by IO memory */
348 /**
349 * @cache_level: The desired GTT caching level.
350 *
351 * See enum i915_cache_level for possible values, along with what
352 * each does.
353 */
354 unsigned int cache_level:3;
355 /**
356 * @cache_coherent:
357 *
358 * Track whether the pages are coherent with the GPU if reading or
359 * writing through the CPU caches. The largely depends on the
360 * @cache_level setting.
361 *
362 * On platforms which don't have the shared LLC(HAS_SNOOP), like on Atom
363 * platforms, coherency must be explicitly requested with some special
364 * GTT caching bits(see enum i915_cache_level). When enabling coherency
365 * it does come at a performance and power cost on such platforms. On
366 * the flip side the kernel does not need to manually flush any buffers
367 * which need to be coherent with the GPU, if the object is not coherent
368 * i.e @cache_coherent is zero.
369 *
370 * On platforms that share the LLC with the CPU(HAS_LLC), all GT memory
371 * access will automatically snoop the CPU caches(even with CACHE_NONE).
372 * The one exception is when dealing with the display engine, like with
373 * scanout surfaces. To handle this the kernel will always flush the
374 * surface out of the CPU caches when preparing it for scanout. Also
375 * note that since scanout surfaces are only ever read by the display
376 * engine we only need to care about flushing any writes through the CPU
377 * cache, reads on the other hand will always be coherent.
378 *
379 * Something strange here is why @cache_coherent is not a simple
380 * boolean, i.e coherent vs non-coherent. The reasoning for this is back
381 * to the display engine not being fully coherent. As a result scanout
382 * surfaces will either be marked as I915_CACHE_NONE or I915_CACHE_WT.
383 * In the case of seeing I915_CACHE_NONE the kernel makes the assumption
384 * that this is likely a scanout surface, and will set @cache_coherent
385 * as only I915_BO_CACHE_COHERENT_FOR_READ, on platforms with the shared
386 * LLC. The kernel uses this to always flush writes through the CPU
387 * cache as early as possible, where it can, in effect keeping
388 * @cache_dirty clean, so we can potentially avoid stalling when
389 * flushing the surface just before doing the scanout. This does mean
390 * we might unnecessarily flush non-scanout objects in some places, but
391 * the default assumption is that all normal objects should be using
392 * I915_CACHE_LLC, at least on platforms with the shared LLC.
393 *
394 * Supported values:
395 *
396 * I915_BO_CACHE_COHERENT_FOR_READ:
397 *
398 * On shared LLC platforms, we use this for special scanout surfaces,
399 * where the display engine is not coherent with the CPU cache. As such
400 * we need to ensure we flush any writes before doing the scanout. As an
401 * optimisation we try to flush any writes as early as possible to avoid
402 * stalling later.
403 *
404 * Thus for scanout surfaces using I915_CACHE_NONE, on shared LLC
405 * platforms, we use:
406 *
407 * cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ
408 *
409 * While for normal objects that are fully coherent, including special
410 * scanout surfaces marked as I915_CACHE_WT, we use:
411 *
412 * cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ |
413 * I915_BO_CACHE_COHERENT_FOR_WRITE
414 *
415 * And then for objects that are not coherent at all we use:
416 *
417 * cache_coherent = 0
418 *
419 * I915_BO_CACHE_COHERENT_FOR_WRITE:
420 *
421 * When writing through the CPU cache, the GPU is still coherent. Note
422 * that this also implies I915_BO_CACHE_COHERENT_FOR_READ.
423 */
424 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
425 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
426 unsigned int cache_coherent:2;
427
428 /**
429 * @cache_dirty:
430 *
431 * Track if we are we dirty with writes through the CPU cache for this
432 * object. As a result reading directly from main memory might yield
433 * stale data.
434 *
435 * This also ties into whether the kernel is tracking the object as
436 * coherent with the GPU, as per @cache_coherent, as it determines if
437 * flushing might be needed at various points.
438 *
439 * Another part of @cache_dirty is managing flushing when first
440 * acquiring the pages for system memory, at this point the pages are
441 * considered foreign, so the default assumption is that the cache is
442 * dirty, for example the page zeroing done by the kernel might leave
443 * writes though the CPU cache, or swapping-in, while the actual data in
444 * main memory is potentially stale. Note that this is a potential
445 * security issue when dealing with userspace objects and zeroing. Now,
446 * whether we actually need apply the big sledgehammer of flushing all
447 * the pages on acquire depends on if @cache_coherent is marked as
448 * I915_BO_CACHE_COHERENT_FOR_WRITE, i.e that the GPU will be coherent
449 * for both reads and writes though the CPU cache.
450 *
451 * Note that on shared LLC platforms we still apply the heavy flush for
452 * I915_CACHE_NONE objects, under the assumption that this is going to
453 * be used for scanout.
454 *
455 * Update: On some hardware there is now also the 'Bypass LLC' MOCS
456 * entry, which defeats our @cache_coherent tracking, since userspace
457 * can freely bypass the CPU cache when touching the pages with the GPU,
458 * where the kernel is completely unaware. On such platform we need
459 * apply the sledgehammer-on-acquire regardless of the @cache_coherent.
460 *
461 * Special care is taken on non-LLC platforms, to prevent potential
462 * information leak. The driver currently ensures:
463 *
464 * 1. All userspace objects, by default, have @cache_level set as
465 * I915_CACHE_NONE. The only exception is userptr objects, where we
466 * instead force I915_CACHE_LLC, but we also don't allow userspace to
467 * ever change the @cache_level for such objects. Another special case
468 * is dma-buf, which doesn't rely on @cache_dirty, but there we
469 * always do a forced flush when acquiring the pages, if there is a
470 * chance that the pages can be read directly from main memory with
471 * the GPU.
472 *
473 * 2. All I915_CACHE_NONE objects have @cache_dirty initially true.
474 *
475 * 3. All swapped-out objects(i.e shmem) have @cache_dirty set to
476 * true.
477 *
478 * 4. The @cache_dirty is never freely reset before the initial
479 * flush, even if userspace adjusts the @cache_level through the
480 * i915_gem_set_caching_ioctl.
481 *
482 * 5. All @cache_dirty objects(including swapped-in) are initially
483 * flushed with a synchronous call to drm_clflush_sg in
484 * __i915_gem_object_set_pages. The @cache_dirty can be freely reset
485 * at this point. All further asynchronous clfushes are never security
486 * critical, i.e userspace is free to race against itself.
487 */
488 unsigned int cache_dirty:1;
489
490 /**
491 * @read_domains: Read memory domains.
492 *
493 * These monitor which caches contain read/write data related to the
494 * object. When transitioning from one set of domains to another,
495 * the driver is called to ensure that caches are suitably flushed and
496 * invalidated.
497 */
498 u16 read_domains;
499
500 /**
501 * @write_domain: Corresponding unique write memory domain.
502 */
503 u16 write_domain;
504
505 struct intel_frontbuffer __rcu *frontbuffer;
506
507 /** Current tiling stride for the object, if it's tiled. */
508 unsigned int tiling_and_stride;
509 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
510 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
511 #define STRIDE_MASK (~TILING_MASK)
512
513 struct {
514 /*
515 * Protects the pages and their use. Do not use directly, but
516 * instead go through the pin/unpin interfaces.
517 */
518 atomic_t pages_pin_count;
519
520 /**
521 * @shrink_pin: Prevents the pages from being made visible to
522 * the shrinker, while the shrink_pin is non-zero. Most users
523 * should pretty much never have to care about this, outside of
524 * some special use cases.
525 *
526 * By default most objects will start out as visible to the
527 * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the
528 * backing pages are attached to the object, like in
529 * __i915_gem_object_set_pages(). They will then be removed the
530 * shrinker list once the pages are released.
531 *
532 * The @shrink_pin is incremented by calling
533 * i915_gem_object_make_unshrinkable(), which will also remove
534 * the object from the shrinker list, if the pin count was zero.
535 *
536 * Callers will then typically call
537 * i915_gem_object_make_shrinkable() or
538 * i915_gem_object_make_purgeable() to decrement the pin count,
539 * and make the pages visible again.
540 */
541 atomic_t shrink_pin;
542
543 /**
544 * @ttm_shrinkable: True when the object is using shmem pages
545 * underneath. Protected by the object lock.
546 */
547 bool ttm_shrinkable;
548
549 /**
550 * Priority list of potential placements for this object.
551 */
552 struct intel_memory_region **placements;
553 int n_placements;
554
555 /**
556 * Memory region for this object.
557 */
558 struct intel_memory_region *region;
559
560 /**
561 * Memory manager resource allocated for this object. Only
562 * needed for the mock region.
563 */
564 struct ttm_resource *res;
565
566 /**
567 * Element within memory_region->objects or region->purgeable
568 * if the object is marked as DONTNEED. Access is protected by
569 * region->obj_lock.
570 */
571 struct list_head region_link;
572
573 struct i915_refct_sgt *rsgt;
574 struct sg_table *pages;
575 void *mapping;
576
577 struct i915_page_sizes page_sizes;
578
579 I915_SELFTEST_DECLARE(unsigned int page_mask);
580
581 struct i915_gem_object_page_iter get_page;
582 struct i915_gem_object_page_iter get_dma_page;
583
584 /**
585 * Element within i915->mm.shrink_list or i915->mm.purge_list,
586 * locked by i915->mm.obj_lock.
587 */
588 struct list_head link;
589
590 /**
591 * Advice: are the backing pages purgeable?
592 */
593 unsigned int madv:2;
594
595 /**
596 * This is set if the object has been written to since the
597 * pages were last acquired.
598 */
599 bool dirty:1;
600
601 u32 tlb;
602 } mm;
603
604 struct {
605 struct i915_refct_sgt *cached_io_rsgt;
606 struct i915_gem_object_page_iter get_io_page;
607 struct drm_i915_gem_object *backup;
608 bool created:1;
609 } ttm;
610
611 /*
612 * Record which PXP key instance this object was created against (if
613 * any), so we can use it to determine if the encryption is valid by
614 * comparing against the current key instance.
615 */
616 u32 pxp_key_instance;
617
618 /** Record of address bit 17 of each page at last unbind. */
619 unsigned long *bit_17;
620
621 union {
622 #ifdef CONFIG_MMU_NOTIFIER
623 struct i915_gem_userptr {
624 uintptr_t ptr;
625 unsigned long notifier_seq;
626
627 struct mmu_interval_notifier notifier;
628 struct page **pvec;
629 int page_ref;
630 } userptr;
631 #endif
632
633 struct drm_mm_node *stolen;
634
635 resource_size_t bo_offset;
636
637 unsigned long scratch;
638 u64 encode;
639
640 void *gvt_info;
641 };
642 };
643
644 static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object * gem)645 to_intel_bo(struct drm_gem_object *gem)
646 {
647 /* Assert that to_intel_bo(NULL) == NULL */
648 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
649
650 return container_of(gem, struct drm_i915_gem_object, base);
651 }
652
653 #endif
654