1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #ifndef I915_SCATTERLIST_H
8 #define I915_SCATTERLIST_H
9
10 #include <linux/pfn.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-mapping.h>
13 #include <xen/xen.h>
14
15 #include "i915_gem.h"
16
17 struct drm_mm_node;
18 struct ttm_resource;
19
20 /*
21 * Optimised SGL iterator for GEM objects
22 */
23 static __always_inline struct sgt_iter {
24 struct scatterlist *sgp;
25 union {
26 unsigned long pfn;
27 dma_addr_t dma;
28 };
29 unsigned int curr;
30 unsigned int max;
__sgt_iter(struct scatterlist * sgl,bool dma)31 } __sgt_iter(struct scatterlist *sgl, bool dma) {
32 struct sgt_iter s = { .sgp = sgl };
33
34 if (dma && s.sgp && sg_dma_len(s.sgp) == 0) {
35 s.sgp = NULL;
36 } else if (s.sgp) {
37 s.max = s.curr = s.sgp->offset;
38 if (dma) {
39 s.dma = sg_dma_address(s.sgp);
40 s.max += sg_dma_len(s.sgp);
41 } else {
42 s.pfn = page_to_pfn(sg_page(s.sgp));
43 s.max += s.sgp->length;
44 }
45 }
46
47 return s;
48 }
49
__sg_page_count(const struct scatterlist * sg)50 static inline int __sg_page_count(const struct scatterlist *sg)
51 {
52 return sg->length >> PAGE_SHIFT;
53 }
54
__sg_dma_page_count(const struct scatterlist * sg)55 static inline int __sg_dma_page_count(const struct scatterlist *sg)
56 {
57 return sg_dma_len(sg) >> PAGE_SHIFT;
58 }
59
____sg_next(struct scatterlist * sg)60 static inline struct scatterlist *____sg_next(struct scatterlist *sg)
61 {
62 ++sg;
63 if (unlikely(sg_is_chain(sg)))
64 sg = sg_chain_ptr(sg);
65 return sg;
66 }
67
68 /**
69 * __sg_next - return the next scatterlist entry in a list
70 * @sg: The current sg entry
71 *
72 * Description:
73 * If the entry is the last, return NULL; otherwise, step to the next
74 * element in the array (@sg@+1). If that's a chain pointer, follow it;
75 * otherwise just return the pointer to the current element.
76 **/
__sg_next(struct scatterlist * sg)77 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
78 {
79 return sg_is_last(sg) ? NULL : ____sg_next(sg);
80 }
81
82 /**
83 * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
84 * @__dp: Device address (output)
85 * @__iter: 'struct sgt_iter' (iterator state, internal)
86 * @__sgt: sg_table to iterate over (input)
87 * @__step: step size
88 */
89 #define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \
90 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
91 ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
92 (((__iter).curr += (__step)) >= (__iter).max) ? \
93 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
94
95 /**
96 * for_each_sgt_page - iterate over the pages of the given sg_table
97 * @__pp: page pointer (output)
98 * @__iter: 'struct sgt_iter' (iterator state, internal)
99 * @__sgt: sg_table to iterate over (input)
100 */
101 #define for_each_sgt_page(__pp, __iter, __sgt) \
102 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
103 ((__pp) = (__iter).pfn == 0 ? NULL : \
104 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
105 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
106 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
107
108 /**
109 * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist
110 * @sg: The scatterlist
111 *
112 * Return: An unsigned int with segment sizes logically or'ed together.
113 * A caller can use this information to determine what hardware page table
114 * entry sizes can be used to map the memory represented by the scatterlist.
115 */
i915_sg_dma_sizes(struct scatterlist * sg)116 static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg)
117 {
118 unsigned int page_sizes;
119
120 page_sizes = 0;
121 while (sg && sg_dma_len(sg)) {
122 GEM_BUG_ON(sg->offset);
123 GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE));
124 page_sizes |= sg_dma_len(sg);
125 sg = __sg_next(sg);
126 }
127
128 return page_sizes;
129 }
130
i915_sg_segment_size(struct device * dev)131 static inline unsigned int i915_sg_segment_size(struct device *dev)
132 {
133 size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev));
134
135 /*
136 * For Xen PV guests pages aren't contiguous in DMA (machine) address
137 * space. The DMA API takes care of that both in dma_alloc_* (by
138 * calling into the hypervisor to make the pages contiguous) and in
139 * dma_map_* (by bounce buffering). But i915 abuses ignores the
140 * coherency aspects of the DMA API and thus can't cope with bounce
141 * buffering actually happening, so add a hack here to force small
142 * allocations and mappings when running in PV mode on Xen.
143 *
144 * Note this will still break if bounce buffering is required for other
145 * reasons, like confidential computing hypervisors or PCIe root ports
146 * with addressing limitations.
147 */
148 if (xen_pv_domain())
149 max = PAGE_SIZE;
150 return round_down(max, PAGE_SIZE);
151 }
152
153 bool i915_sg_trim(struct sg_table *orig_st);
154
155 /**
156 * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt
157 */
158 struct i915_refct_sgt_ops {
159 /**
160 * @release: Free the memory of the struct i915_refct_sgt
161 */
162 void (*release)(struct kref *ref);
163 };
164
165 /**
166 * struct i915_refct_sgt - A refcounted scatter-gather table
167 * @kref: struct kref for refcounting
168 * @table: struct sg_table holding the scatter-gather table itself. Note that
169 * @table->sgl = NULL can be used to determine whether a scatter-gather table
170 * is present or not.
171 * @size: The size in bytes of the underlying memory buffer
172 * @ops: The operations structure.
173 */
174 struct i915_refct_sgt {
175 struct kref kref;
176 struct sg_table table;
177 size_t size;
178 const struct i915_refct_sgt_ops *ops;
179 };
180
181 /**
182 * i915_refct_sgt_put - Put a refcounted sg-table
183 * @rsgt: the struct i915_refct_sgt to put.
184 */
i915_refct_sgt_put(struct i915_refct_sgt * rsgt)185 static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
186 {
187 if (rsgt)
188 kref_put(&rsgt->kref, rsgt->ops->release);
189 }
190
191 /**
192 * i915_refct_sgt_get - Get a refcounted sg-table
193 * @rsgt: the struct i915_refct_sgt to get.
194 */
195 static inline struct i915_refct_sgt *
i915_refct_sgt_get(struct i915_refct_sgt * rsgt)196 i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
197 {
198 kref_get(&rsgt->kref);
199 return rsgt;
200 }
201
202 /**
203 * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom
204 * operations structure
205 * @rsgt: The struct i915_refct_sgt to initialize.
206 * @size: Size in bytes of the underlying memory buffer.
207 * @ops: A customized operations structure in case the refcounted sg-list
208 * is embedded into another structure.
209 */
__i915_refct_sgt_init(struct i915_refct_sgt * rsgt,size_t size,const struct i915_refct_sgt_ops * ops)210 static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
211 size_t size,
212 const struct i915_refct_sgt_ops *ops)
213 {
214 kref_init(&rsgt->kref);
215 rsgt->table.sgl = NULL;
216 rsgt->size = size;
217 rsgt->ops = ops;
218 }
219
220 void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
221
222 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
223 u64 region_start,
224 u32 page_alignment);
225
226 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
227 u64 region_start,
228 u32 page_alignment);
229
230 #endif
231