1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #include "i915_scatterlist.h"
8 #include "i915_ttm_buddy_manager.h"
9
10 #include <drm/drm_buddy.h>
11 #include <drm/drm_mm.h>
12
13 #include <linux/slab.h>
14
i915_sg_trim(struct sg_table * orig_st)15 bool i915_sg_trim(struct sg_table *orig_st)
16 {
17 struct sg_table new_st;
18 struct scatterlist *sg, *new_sg;
19 unsigned int i;
20
21 if (orig_st->nents == orig_st->orig_nents)
22 return false;
23
24 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
25 return false;
26
27 new_sg = new_st.sgl;
28 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
29 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
30 sg_dma_address(new_sg) = sg_dma_address(sg);
31 sg_dma_len(new_sg) = sg_dma_len(sg);
32
33 new_sg = sg_next(new_sg);
34 }
35 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
36
37 sg_free_table(orig_st);
38
39 *orig_st = new_st;
40 return true;
41 }
42
i915_refct_sgt_release(struct kref * ref)43 static void i915_refct_sgt_release(struct kref *ref)
44 {
45 struct i915_refct_sgt *rsgt =
46 container_of(ref, typeof(*rsgt), kref);
47
48 sg_free_table(&rsgt->table);
49 kfree(rsgt);
50 }
51
52 static const struct i915_refct_sgt_ops rsgt_ops = {
53 .release = i915_refct_sgt_release
54 };
55
56 /**
57 * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops
58 * @rsgt: The struct i915_refct_sgt to initialize.
59 * size: The size of the underlying memory buffer.
60 */
i915_refct_sgt_init(struct i915_refct_sgt * rsgt,size_t size)61 void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
62 {
63 __i915_refct_sgt_init(rsgt, size, &rsgt_ops);
64 }
65
66 /**
67 * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct
68 * drm_mm_node
69 * @node: The drm_mm_node.
70 * @region_start: An offset to add to the dma addresses of the sg list.
71 * @page_alignment: Required page alignment for each sg entry. Power of two.
72 *
73 * Create a struct sg_table, initializing it from a struct drm_mm_node,
74 * taking a maximum segment length into account, splitting into segments
75 * if necessary.
76 *
77 * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative
78 * error code cast to an error pointer on failure.
79 */
i915_rsgt_from_mm_node(const struct drm_mm_node * node,u64 region_start,u32 page_alignment)80 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
81 u64 region_start,
82 u32 page_alignment)
83 {
84 const u32 max_segment = round_down(UINT_MAX, page_alignment);
85 const u32 segment_pages = max_segment >> PAGE_SHIFT;
86 u64 block_size, offset, prev_end;
87 struct i915_refct_sgt *rsgt;
88 struct sg_table *st;
89 struct scatterlist *sg;
90
91 GEM_BUG_ON(!max_segment);
92
93 rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
94 if (!rsgt)
95 return ERR_PTR(-ENOMEM);
96
97 i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
98 st = &rsgt->table;
99 if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
100 GFP_KERNEL)) {
101 i915_refct_sgt_put(rsgt);
102 return ERR_PTR(-ENOMEM);
103 }
104
105 sg = st->sgl;
106 st->nents = 0;
107 prev_end = (resource_size_t)-1;
108 block_size = node->size << PAGE_SHIFT;
109 offset = node->start << PAGE_SHIFT;
110
111 while (block_size) {
112 u64 len;
113
114 if (offset != prev_end || sg->length >= max_segment) {
115 if (st->nents)
116 sg = __sg_next(sg);
117
118 sg_dma_address(sg) = region_start + offset;
119 GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
120 page_alignment));
121 sg_dma_len(sg) = 0;
122 sg->length = 0;
123 st->nents++;
124 }
125
126 len = min_t(u64, block_size, max_segment - sg->length);
127 sg->length += len;
128 sg_dma_len(sg) += len;
129
130 offset += len;
131 block_size -= len;
132
133 prev_end = offset;
134 }
135
136 sg_mark_end(sg);
137 i915_sg_trim(st);
138
139 return rsgt;
140 }
141
142 /**
143 * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct
144 * i915_buddy_block list
145 * @res: The struct i915_ttm_buddy_resource.
146 * @region_start: An offset to add to the dma addresses of the sg list.
147 * @page_alignment: Required page alignment for each sg entry. Power of two.
148 *
149 * Create a struct sg_table, initializing it from struct i915_buddy_block list,
150 * taking a maximum segment length into account, splitting into segments
151 * if necessary.
152 *
153 * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative
154 * error code cast to an error pointer on failure.
155 */
i915_rsgt_from_buddy_resource(struct ttm_resource * res,u64 region_start,u32 page_alignment)156 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
157 u64 region_start,
158 u32 page_alignment)
159 {
160 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
161 const u64 size = res->num_pages << PAGE_SHIFT;
162 const u32 max_segment = round_down(UINT_MAX, page_alignment);
163 struct drm_buddy *mm = bman_res->mm;
164 struct list_head *blocks = &bman_res->blocks;
165 struct drm_buddy_block *block;
166 struct i915_refct_sgt *rsgt;
167 struct scatterlist *sg;
168 struct sg_table *st;
169 resource_size_t prev_end;
170
171 GEM_BUG_ON(list_empty(blocks));
172 GEM_BUG_ON(!max_segment);
173
174 rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
175 if (!rsgt)
176 return ERR_PTR(-ENOMEM);
177
178 i915_refct_sgt_init(rsgt, size);
179 st = &rsgt->table;
180 if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
181 i915_refct_sgt_put(rsgt);
182 return ERR_PTR(-ENOMEM);
183 }
184
185 sg = st->sgl;
186 st->nents = 0;
187 prev_end = (resource_size_t)-1;
188
189 list_for_each_entry(block, blocks, link) {
190 u64 block_size, offset;
191
192 block_size = min_t(u64, size, drm_buddy_block_size(mm, block));
193 offset = drm_buddy_block_offset(block);
194
195 while (block_size) {
196 u64 len;
197
198 if (offset != prev_end || sg->length >= max_segment) {
199 if (st->nents)
200 sg = __sg_next(sg);
201
202 sg_dma_address(sg) = region_start + offset;
203 GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
204 page_alignment));
205 sg_dma_len(sg) = 0;
206 sg->length = 0;
207 st->nents++;
208 }
209
210 len = min_t(u64, block_size, max_segment - sg->length);
211 sg->length += len;
212 sg_dma_len(sg) += len;
213
214 offset += len;
215 block_size -= len;
216
217 prev_end = offset;
218 }
219 }
220
221 sg_mark_end(sg);
222 i915_sg_trim(st);
223
224 return rsgt;
225 }
226
227 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
228 #include "selftests/scatterlist.c"
229 #endif
230