1 /*
2  * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3  *
4  * Scatterlist handling helpers.
5  *
6  * This source code is licensed under the GNU General Public License,
7  * Version 2. See the file COPYING for more details.
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
14 
15 /**
16  * sg_next - return the next scatterlist entry in a list
17  * @sg:		The current sg entry
18  *
19  * Description:
20  *   Usually the next entry will be @sg@ + 1, but if this sg element is part
21  *   of a chained scatterlist, it could jump to the start of a new
22  *   scatterlist array.
23  *
24  **/
sg_next(struct scatterlist * sg)25 struct scatterlist *sg_next(struct scatterlist *sg)
26 {
27 #ifdef CONFIG_DEBUG_SG
28 	BUG_ON(sg->sg_magic != SG_MAGIC);
29 #endif
30 	if (sg_is_last(sg))
31 		return NULL;
32 
33 	sg++;
34 	if (unlikely(sg_is_chain(sg)))
35 		sg = sg_chain_ptr(sg);
36 
37 	return sg;
38 }
39 EXPORT_SYMBOL(sg_next);
40 
41 /**
42  * sg_last - return the last scatterlist entry in a list
43  * @sgl:	First entry in the scatterlist
44  * @nents:	Number of entries in the scatterlist
45  *
46  * Description:
47  *   Should only be used casually, it (currently) scans the entire list
48  *   to get the last entry.
49  *
50  *   Note that the @sgl@ pointer passed in need not be the first one,
51  *   the important bit is that @nents@ denotes the number of entries that
52  *   exist from @sgl@.
53  *
54  **/
sg_last(struct scatterlist * sgl,unsigned int nents)55 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
56 {
57 #ifndef ARCH_HAS_SG_CHAIN
58 	struct scatterlist *ret = &sgl[nents - 1];
59 #else
60 	struct scatterlist *sg, *ret = NULL;
61 	unsigned int i;
62 
63 	for_each_sg(sgl, sg, nents, i)
64 		ret = sg;
65 
66 #endif
67 #ifdef CONFIG_DEBUG_SG
68 	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
69 	BUG_ON(!sg_is_last(ret));
70 #endif
71 	return ret;
72 }
73 EXPORT_SYMBOL(sg_last);
74 
75 /**
76  * sg_init_table - Initialize SG table
77  * @sgl:	   The SG table
78  * @nents:	   Number of entries in table
79  *
80  * Notes:
81  *   If this is part of a chained sg table, sg_mark_end() should be
82  *   used only on the last table part.
83  *
84  **/
sg_init_table(struct scatterlist * sgl,unsigned int nents)85 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
86 {
87 	memset(sgl, 0, sizeof(*sgl) * nents);
88 #ifdef CONFIG_DEBUG_SG
89 	{
90 		unsigned int i;
91 		for (i = 0; i < nents; i++)
92 			sgl[i].sg_magic = SG_MAGIC;
93 	}
94 #endif
95 	sg_mark_end(&sgl[nents - 1]);
96 }
97 EXPORT_SYMBOL(sg_init_table);
98 
99 /**
100  * sg_init_one - Initialize a single entry sg list
101  * @sg:		 SG entry
102  * @buf:	 Virtual address for IO
103  * @buflen:	 IO length
104  *
105  **/
sg_init_one(struct scatterlist * sg,const void * buf,unsigned int buflen)106 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
107 {
108 	sg_init_table(sg, 1);
109 	sg_set_buf(sg, buf, buflen);
110 }
111 EXPORT_SYMBOL(sg_init_one);
112 
113 /*
114  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
115  * helpers.
116  */
sg_kmalloc(unsigned int nents,gfp_t gfp_mask)117 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
118 {
119 	if (nents == SG_MAX_SINGLE_ALLOC) {
120 		/*
121 		 * Kmemleak doesn't track page allocations as they are not
122 		 * commonly used (in a raw form) for kernel data structures.
123 		 * As we chain together a list of pages and then a normal
124 		 * kmalloc (tracked by kmemleak), in order to for that last
125 		 * allocation not to become decoupled (and thus a
126 		 * false-positive) we need to inform kmemleak of all the
127 		 * intermediate allocations.
128 		 */
129 		void *ptr = (void *) __get_free_page(gfp_mask);
130 		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
131 		return ptr;
132 	} else
133 		return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
134 }
135 
sg_kfree(struct scatterlist * sg,unsigned int nents)136 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
137 {
138 	if (nents == SG_MAX_SINGLE_ALLOC) {
139 		kmemleak_free(sg);
140 		free_page((unsigned long) sg);
141 	} else
142 		kfree(sg);
143 }
144 
145 /**
146  * __sg_free_table - Free a previously mapped sg table
147  * @table:	The sg table header to use
148  * @max_ents:	The maximum number of entries per single scatterlist
149  * @free_fn:	Free function
150  *
151  *  Description:
152  *    Free an sg table previously allocated and setup with
153  *    __sg_alloc_table().  The @max_ents value must be identical to
154  *    that previously used with __sg_alloc_table().
155  *
156  **/
__sg_free_table(struct sg_table * table,unsigned int max_ents,sg_free_fn * free_fn)157 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
158 		     sg_free_fn *free_fn)
159 {
160 	struct scatterlist *sgl, *next;
161 
162 	if (unlikely(!table->sgl))
163 		return;
164 
165 	sgl = table->sgl;
166 	while (table->orig_nents) {
167 		unsigned int alloc_size = table->orig_nents;
168 		unsigned int sg_size;
169 
170 		/*
171 		 * If we have more than max_ents segments left,
172 		 * then assign 'next' to the sg table after the current one.
173 		 * sg_size is then one less than alloc size, since the last
174 		 * element is the chain pointer.
175 		 */
176 		if (alloc_size > max_ents) {
177 			next = sg_chain_ptr(&sgl[max_ents - 1]);
178 			alloc_size = max_ents;
179 			sg_size = alloc_size - 1;
180 		} else {
181 			sg_size = alloc_size;
182 			next = NULL;
183 		}
184 
185 		table->orig_nents -= sg_size;
186 		free_fn(sgl, alloc_size);
187 		sgl = next;
188 	}
189 
190 	table->sgl = NULL;
191 }
192 EXPORT_SYMBOL(__sg_free_table);
193 
194 /**
195  * sg_free_table - Free a previously allocated sg table
196  * @table:	The mapped sg table header
197  *
198  **/
sg_free_table(struct sg_table * table)199 void sg_free_table(struct sg_table *table)
200 {
201 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
202 }
203 EXPORT_SYMBOL(sg_free_table);
204 
205 /**
206  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
207  * @table:	The sg table header to use
208  * @nents:	Number of entries in sg list
209  * @max_ents:	The maximum number of entries the allocator returns per call
210  * @gfp_mask:	GFP allocation mask
211  * @alloc_fn:	Allocator to use
212  *
213  * Description:
214  *   This function returns a @table @nents long. The allocator is
215  *   defined to return scatterlist chunks of maximum size @max_ents.
216  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
217  *   chained in units of @max_ents.
218  *
219  * Notes:
220  *   If this function returns non-0 (eg failure), the caller must call
221  *   __sg_free_table() to cleanup any leftover allocations.
222  *
223  **/
__sg_alloc_table(struct sg_table * table,unsigned int nents,unsigned int max_ents,gfp_t gfp_mask,sg_alloc_fn * alloc_fn)224 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
225 		     unsigned int max_ents, gfp_t gfp_mask,
226 		     sg_alloc_fn *alloc_fn)
227 {
228 	struct scatterlist *sg, *prv;
229 	unsigned int left;
230 
231 #ifndef ARCH_HAS_SG_CHAIN
232 	BUG_ON(nents > max_ents);
233 #endif
234 
235 	memset(table, 0, sizeof(*table));
236 
237 	left = nents;
238 	prv = NULL;
239 	do {
240 		unsigned int sg_size, alloc_size = left;
241 
242 		if (alloc_size > max_ents) {
243 			alloc_size = max_ents;
244 			sg_size = alloc_size - 1;
245 		} else
246 			sg_size = alloc_size;
247 
248 		left -= sg_size;
249 
250 		sg = alloc_fn(alloc_size, gfp_mask);
251 		if (unlikely(!sg)) {
252 			/*
253 			 * Adjust entry count to reflect that the last
254 			 * entry of the previous table won't be used for
255 			 * linkage.  Without this, sg_kfree() may get
256 			 * confused.
257 			 */
258 			if (prv)
259 				table->nents = ++table->orig_nents;
260 
261  			return -ENOMEM;
262 		}
263 
264 		sg_init_table(sg, alloc_size);
265 		table->nents = table->orig_nents += sg_size;
266 
267 		/*
268 		 * If this is the first mapping, assign the sg table header.
269 		 * If this is not the first mapping, chain previous part.
270 		 */
271 		if (prv)
272 			sg_chain(prv, max_ents, sg);
273 		else
274 			table->sgl = sg;
275 
276 		/*
277 		 * If no more entries after this one, mark the end
278 		 */
279 		if (!left)
280 			sg_mark_end(&sg[sg_size - 1]);
281 
282 		/*
283 		 * only really needed for mempool backed sg allocations (like
284 		 * SCSI), a possible improvement here would be to pass the
285 		 * table pointer into the allocator and let that clear these
286 		 * flags
287 		 */
288 		gfp_mask &= ~__GFP_WAIT;
289 		gfp_mask |= __GFP_HIGH;
290 		prv = sg;
291 	} while (left);
292 
293 	return 0;
294 }
295 EXPORT_SYMBOL(__sg_alloc_table);
296 
297 /**
298  * sg_alloc_table - Allocate and initialize an sg table
299  * @table:	The sg table header to use
300  * @nents:	Number of entries in sg list
301  * @gfp_mask:	GFP allocation mask
302  *
303  *  Description:
304  *    Allocate and initialize an sg table. If @nents@ is larger than
305  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
306  *
307  **/
sg_alloc_table(struct sg_table * table,unsigned int nents,gfp_t gfp_mask)308 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
309 {
310 	int ret;
311 
312 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
313 			       gfp_mask, sg_kmalloc);
314 	if (unlikely(ret))
315 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
316 
317 	return ret;
318 }
319 EXPORT_SYMBOL(sg_alloc_table);
320 
321 /**
322  * sg_miter_start - start mapping iteration over a sg list
323  * @miter: sg mapping iter to be started
324  * @sgl: sg list to iterate over
325  * @nents: number of sg entries
326  *
327  * Description:
328  *   Starts mapping iterator @miter.
329  *
330  * Context:
331  *   Don't care.
332  */
sg_miter_start(struct sg_mapping_iter * miter,struct scatterlist * sgl,unsigned int nents,unsigned int flags)333 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
334 		    unsigned int nents, unsigned int flags)
335 {
336 	memset(miter, 0, sizeof(struct sg_mapping_iter));
337 
338 	miter->__sg = sgl;
339 	miter->__nents = nents;
340 	miter->__offset = 0;
341 	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
342 	miter->__flags = flags;
343 }
344 EXPORT_SYMBOL(sg_miter_start);
345 
346 /**
347  * sg_miter_next - proceed mapping iterator to the next mapping
348  * @miter: sg mapping iter to proceed
349  *
350  * Description:
351  *   Proceeds @miter@ to the next mapping.  @miter@ should have been
352  *   started using sg_miter_start().  On successful return,
353  *   @miter@->page, @miter@->addr and @miter@->length point to the
354  *   current mapping.
355  *
356  * Context:
357  *   IRQ disabled if SG_MITER_ATOMIC.  IRQ must stay disabled till
358  *   @miter@ is stopped.  May sleep if !SG_MITER_ATOMIC.
359  *
360  * Returns:
361  *   true if @miter contains the next mapping.  false if end of sg
362  *   list is reached.
363  */
sg_miter_next(struct sg_mapping_iter * miter)364 bool sg_miter_next(struct sg_mapping_iter *miter)
365 {
366 	unsigned int off, len;
367 
368 	/* check for end and drop resources from the last iteration */
369 	if (!miter->__nents)
370 		return false;
371 
372 	sg_miter_stop(miter);
373 
374 	/* get to the next sg if necessary.  __offset is adjusted by stop */
375 	while (miter->__offset == miter->__sg->length) {
376 		if (--miter->__nents) {
377 			miter->__sg = sg_next(miter->__sg);
378 			miter->__offset = 0;
379 		} else
380 			return false;
381 	}
382 
383 	/* map the next page */
384 	off = miter->__sg->offset + miter->__offset;
385 	len = miter->__sg->length - miter->__offset;
386 
387 	miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
388 	off &= ~PAGE_MASK;
389 	miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
390 	miter->consumed = miter->length;
391 
392 	if (miter->__flags & SG_MITER_ATOMIC)
393 		miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
394 	else
395 		miter->addr = kmap(miter->page) + off;
396 
397 	return true;
398 }
399 EXPORT_SYMBOL(sg_miter_next);
400 
401 /**
402  * sg_miter_stop - stop mapping iteration
403  * @miter: sg mapping iter to be stopped
404  *
405  * Description:
406  *   Stops mapping iterator @miter.  @miter should have been started
407  *   started using sg_miter_start().  A stopped iteration can be
408  *   resumed by calling sg_miter_next() on it.  This is useful when
409  *   resources (kmap) need to be released during iteration.
410  *
411  * Context:
412  *   IRQ disabled if the SG_MITER_ATOMIC is set.  Don't care otherwise.
413  */
sg_miter_stop(struct sg_mapping_iter * miter)414 void sg_miter_stop(struct sg_mapping_iter *miter)
415 {
416 	WARN_ON(miter->consumed > miter->length);
417 
418 	/* drop resources from the last iteration */
419 	if (miter->addr) {
420 		miter->__offset += miter->consumed;
421 
422 		if (miter->__flags & SG_MITER_TO_SG)
423 			flush_kernel_dcache_page(miter->page);
424 
425 		if (miter->__flags & SG_MITER_ATOMIC) {
426 			WARN_ON(!irqs_disabled());
427 			kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
428 		} else
429 			kunmap(miter->page);
430 
431 		miter->page = NULL;
432 		miter->addr = NULL;
433 		miter->length = 0;
434 		miter->consumed = 0;
435 	}
436 }
437 EXPORT_SYMBOL(sg_miter_stop);
438 
439 /**
440  * sg_copy_buffer - Copy data between a linear buffer and an SG list
441  * @sgl:		 The SG list
442  * @nents:		 Number of SG entries
443  * @buf:		 Where to copy from
444  * @buflen:		 The number of bytes to copy
445  * @to_buffer: 		 transfer direction (non zero == from an sg list to a
446  * 			 buffer, 0 == from a buffer to an sg list
447  *
448  * Returns the number of copied bytes.
449  *
450  **/
sg_copy_buffer(struct scatterlist * sgl,unsigned int nents,void * buf,size_t buflen,int to_buffer)451 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
452 			     void *buf, size_t buflen, int to_buffer)
453 {
454 	unsigned int offset = 0;
455 	struct sg_mapping_iter miter;
456 	unsigned long flags;
457 	unsigned int sg_flags = SG_MITER_ATOMIC;
458 
459 	if (to_buffer)
460 		sg_flags |= SG_MITER_FROM_SG;
461 	else
462 		sg_flags |= SG_MITER_TO_SG;
463 
464 	sg_miter_start(&miter, sgl, nents, sg_flags);
465 
466 	local_irq_save(flags);
467 
468 	while (sg_miter_next(&miter) && offset < buflen) {
469 		unsigned int len;
470 
471 		len = min(miter.length, buflen - offset);
472 
473 		if (to_buffer)
474 			memcpy(buf + offset, miter.addr, len);
475 		else
476 			memcpy(miter.addr, buf + offset, len);
477 
478 		offset += len;
479 	}
480 
481 	sg_miter_stop(&miter);
482 
483 	local_irq_restore(flags);
484 	return offset;
485 }
486 
487 /**
488  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
489  * @sgl:		 The SG list
490  * @nents:		 Number of SG entries
491  * @buf:		 Where to copy from
492  * @buflen:		 The number of bytes to copy
493  *
494  * Returns the number of copied bytes.
495  *
496  **/
sg_copy_from_buffer(struct scatterlist * sgl,unsigned int nents,void * buf,size_t buflen)497 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
498 			   void *buf, size_t buflen)
499 {
500 	return sg_copy_buffer(sgl, nents, buf, buflen, 0);
501 }
502 EXPORT_SYMBOL(sg_copy_from_buffer);
503 
504 /**
505  * sg_copy_to_buffer - Copy from an SG list to a linear buffer
506  * @sgl:		 The SG list
507  * @nents:		 Number of SG entries
508  * @buf:		 Where to copy to
509  * @buflen:		 The number of bytes to copy
510  *
511  * Returns the number of copied bytes.
512  *
513  **/
sg_copy_to_buffer(struct scatterlist * sgl,unsigned int nents,void * buf,size_t buflen)514 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
515 			 void *buf, size_t buflen)
516 {
517 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
518 }
519 EXPORT_SYMBOL(sg_copy_to_buffer);
520