1 /*
2  * address space "slices" (meta-segments) support
3  *
4  * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5  *
6  * Based on hugetlb implementation
7  *
8  * Copyright (C) 2003 David Gibson, IBM Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  */
24 
25 #undef DEBUG
26 
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/module.h>
33 #include <asm/mman.h>
34 #include <asm/mmu.h>
35 #include <asm/spu.h>
36 
37 static DEFINE_SPINLOCK(slice_convert_lock);
38 
39 
40 #ifdef DEBUG
41 int _slice_debug = 1;
42 
slice_print_mask(const char * label,struct slice_mask mask)43 static void slice_print_mask(const char *label, struct slice_mask mask)
44 {
45 	char	*p, buf[16 + 3 + 16 + 1];
46 	int	i;
47 
48 	if (!_slice_debug)
49 		return;
50 	p = buf;
51 	for (i = 0; i < SLICE_NUM_LOW; i++)
52 		*(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
53 	*(p++) = ' ';
54 	*(p++) = '-';
55 	*(p++) = ' ';
56 	for (i = 0; i < SLICE_NUM_HIGH; i++)
57 		*(p++) = (mask.high_slices & (1 << i)) ? '1' : '0';
58 	*(p++) = 0;
59 
60 	printk(KERN_DEBUG "%s:%s\n", label, buf);
61 }
62 
63 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
64 
65 #else
66 
slice_print_mask(const char * label,struct slice_mask mask)67 static void slice_print_mask(const char *label, struct slice_mask mask) {}
68 #define slice_dbg(fmt...)
69 
70 #endif
71 
slice_range_to_mask(unsigned long start,unsigned long len)72 static struct slice_mask slice_range_to_mask(unsigned long start,
73 					     unsigned long len)
74 {
75 	unsigned long end = start + len - 1;
76 	struct slice_mask ret = { 0, 0 };
77 
78 	if (start < SLICE_LOW_TOP) {
79 		unsigned long mend = min(end, SLICE_LOW_TOP);
80 		unsigned long mstart = min(start, SLICE_LOW_TOP);
81 
82 		ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
83 			- (1u << GET_LOW_SLICE_INDEX(mstart));
84 	}
85 
86 	if ((start + len) > SLICE_LOW_TOP)
87 		ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1))
88 			- (1u << GET_HIGH_SLICE_INDEX(start));
89 
90 	return ret;
91 }
92 
slice_area_is_free(struct mm_struct * mm,unsigned long addr,unsigned long len)93 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
94 			      unsigned long len)
95 {
96 	struct vm_area_struct *vma;
97 
98 	if ((mm->task_size - len) < addr)
99 		return 0;
100 	vma = find_vma(mm, addr);
101 	return (!vma || (addr + len) <= vma->vm_start);
102 }
103 
slice_low_has_vma(struct mm_struct * mm,unsigned long slice)104 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
105 {
106 	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
107 				   1ul << SLICE_LOW_SHIFT);
108 }
109 
slice_high_has_vma(struct mm_struct * mm,unsigned long slice)110 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
111 {
112 	unsigned long start = slice << SLICE_HIGH_SHIFT;
113 	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
114 
115 	/* Hack, so that each addresses is controlled by exactly one
116 	 * of the high or low area bitmaps, the first high area starts
117 	 * at 4GB, not 0 */
118 	if (start == 0)
119 		start = SLICE_LOW_TOP;
120 
121 	return !slice_area_is_free(mm, start, end - start);
122 }
123 
slice_mask_for_free(struct mm_struct * mm)124 static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
125 {
126 	struct slice_mask ret = { 0, 0 };
127 	unsigned long i;
128 
129 	for (i = 0; i < SLICE_NUM_LOW; i++)
130 		if (!slice_low_has_vma(mm, i))
131 			ret.low_slices |= 1u << i;
132 
133 	if (mm->task_size <= SLICE_LOW_TOP)
134 		return ret;
135 
136 	for (i = 0; i < SLICE_NUM_HIGH; i++)
137 		if (!slice_high_has_vma(mm, i))
138 			ret.high_slices |= 1u << i;
139 
140 	return ret;
141 }
142 
slice_mask_for_size(struct mm_struct * mm,int psize)143 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
144 {
145 	struct slice_mask ret = { 0, 0 };
146 	unsigned long i;
147 	u64 psizes;
148 
149 	psizes = mm->context.low_slices_psize;
150 	for (i = 0; i < SLICE_NUM_LOW; i++)
151 		if (((psizes >> (i * 4)) & 0xf) == psize)
152 			ret.low_slices |= 1u << i;
153 
154 	psizes = mm->context.high_slices_psize;
155 	for (i = 0; i < SLICE_NUM_HIGH; i++)
156 		if (((psizes >> (i * 4)) & 0xf) == psize)
157 			ret.high_slices |= 1u << i;
158 
159 	return ret;
160 }
161 
slice_check_fit(struct slice_mask mask,struct slice_mask available)162 static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
163 {
164 	return (mask.low_slices & available.low_slices) == mask.low_slices &&
165 		(mask.high_slices & available.high_slices) == mask.high_slices;
166 }
167 
slice_flush_segments(void * parm)168 static void slice_flush_segments(void *parm)
169 {
170 	struct mm_struct *mm = parm;
171 	unsigned long flags;
172 
173 	if (mm != current->active_mm)
174 		return;
175 
176 	/* update the paca copy of the context struct */
177 	get_paca()->context = current->active_mm->context;
178 
179 	local_irq_save(flags);
180 	slb_flush_and_rebolt();
181 	local_irq_restore(flags);
182 }
183 
slice_convert(struct mm_struct * mm,struct slice_mask mask,int psize)184 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
185 {
186 	/* Write the new slice psize bits */
187 	u64 lpsizes, hpsizes;
188 	unsigned long i, flags;
189 
190 	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
191 	slice_print_mask(" mask", mask);
192 
193 	/* We need to use a spinlock here to protect against
194 	 * concurrent 64k -> 4k demotion ...
195 	 */
196 	spin_lock_irqsave(&slice_convert_lock, flags);
197 
198 	lpsizes = mm->context.low_slices_psize;
199 	for (i = 0; i < SLICE_NUM_LOW; i++)
200 		if (mask.low_slices & (1u << i))
201 			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
202 				(((unsigned long)psize) << (i * 4));
203 
204 	hpsizes = mm->context.high_slices_psize;
205 	for (i = 0; i < SLICE_NUM_HIGH; i++)
206 		if (mask.high_slices & (1u << i))
207 			hpsizes = (hpsizes & ~(0xful << (i * 4))) |
208 				(((unsigned long)psize) << (i * 4));
209 
210 	mm->context.low_slices_psize = lpsizes;
211 	mm->context.high_slices_psize = hpsizes;
212 
213 	slice_dbg(" lsps=%lx, hsps=%lx\n",
214 		  mm->context.low_slices_psize,
215 		  mm->context.high_slices_psize);
216 
217 	spin_unlock_irqrestore(&slice_convert_lock, flags);
218 
219 #ifdef CONFIG_SPU_BASE
220 	spu_flush_all_slbs(mm);
221 #endif
222 }
223 
slice_find_area_bottomup(struct mm_struct * mm,unsigned long len,struct slice_mask available,int psize,int use_cache)224 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
225 					      unsigned long len,
226 					      struct slice_mask available,
227 					      int psize, int use_cache)
228 {
229 	struct vm_area_struct *vma;
230 	unsigned long start_addr, addr;
231 	struct slice_mask mask;
232 	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
233 
234 	if (use_cache) {
235 		if (len <= mm->cached_hole_size) {
236 			start_addr = addr = TASK_UNMAPPED_BASE;
237 			mm->cached_hole_size = 0;
238 		} else
239 			start_addr = addr = mm->free_area_cache;
240 	} else
241 		start_addr = addr = TASK_UNMAPPED_BASE;
242 
243 full_search:
244 	for (;;) {
245 		addr = _ALIGN_UP(addr, 1ul << pshift);
246 		if ((TASK_SIZE - len) < addr)
247 			break;
248 		vma = find_vma(mm, addr);
249 		BUG_ON(vma && (addr >= vma->vm_end));
250 
251 		mask = slice_range_to_mask(addr, len);
252 		if (!slice_check_fit(mask, available)) {
253 			if (addr < SLICE_LOW_TOP)
254 				addr = _ALIGN_UP(addr + 1,  1ul << SLICE_LOW_SHIFT);
255 			else
256 				addr = _ALIGN_UP(addr + 1,  1ul << SLICE_HIGH_SHIFT);
257 			continue;
258 		}
259 		if (!vma || addr + len <= vma->vm_start) {
260 			/*
261 			 * Remember the place where we stopped the search:
262 			 */
263 			if (use_cache)
264 				mm->free_area_cache = addr + len;
265 			return addr;
266 		}
267 		if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
268 		        mm->cached_hole_size = vma->vm_start - addr;
269 		addr = vma->vm_end;
270 	}
271 
272 	/* Make sure we didn't miss any holes */
273 	if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
274 		start_addr = addr = TASK_UNMAPPED_BASE;
275 		mm->cached_hole_size = 0;
276 		goto full_search;
277 	}
278 	return -ENOMEM;
279 }
280 
slice_find_area_topdown(struct mm_struct * mm,unsigned long len,struct slice_mask available,int psize,int use_cache)281 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
282 					     unsigned long len,
283 					     struct slice_mask available,
284 					     int psize, int use_cache)
285 {
286 	struct vm_area_struct *vma;
287 	unsigned long addr;
288 	struct slice_mask mask;
289 	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
290 
291 	/* check if free_area_cache is useful for us */
292 	if (use_cache) {
293 		if (len <= mm->cached_hole_size) {
294 			mm->cached_hole_size = 0;
295 			mm->free_area_cache = mm->mmap_base;
296 		}
297 
298 		/* either no address requested or can't fit in requested
299 		 * address hole
300 		 */
301 		addr = mm->free_area_cache;
302 
303 		/* make sure it can fit in the remaining address space */
304 		if (addr > len) {
305 			addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
306 			mask = slice_range_to_mask(addr, len);
307 			if (slice_check_fit(mask, available) &&
308 			    slice_area_is_free(mm, addr, len))
309 					/* remember the address as a hint for
310 					 * next time
311 					 */
312 					return (mm->free_area_cache = addr);
313 		}
314 	}
315 
316 	addr = mm->mmap_base;
317 	while (addr > len) {
318 		/* Go down by chunk size */
319 		addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
320 
321 		/* Check for hit with different page size */
322 		mask = slice_range_to_mask(addr, len);
323 		if (!slice_check_fit(mask, available)) {
324 			if (addr < SLICE_LOW_TOP)
325 				addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
326 			else if (addr < (1ul << SLICE_HIGH_SHIFT))
327 				addr = SLICE_LOW_TOP;
328 			else
329 				addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
330 			continue;
331 		}
332 
333 		/*
334 		 * Lookup failure means no vma is above this address,
335 		 * else if new region fits below vma->vm_start,
336 		 * return with success:
337 		 */
338 		vma = find_vma(mm, addr);
339 		if (!vma || (addr + len) <= vma->vm_start) {
340 			/* remember the address as a hint for next time */
341 			if (use_cache)
342 				mm->free_area_cache = addr;
343 			return addr;
344 		}
345 
346 		/* remember the largest hole we saw so far */
347 		if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
348 		        mm->cached_hole_size = vma->vm_start - addr;
349 
350 		/* try just below the current vma->vm_start */
351 		addr = vma->vm_start;
352 	}
353 
354 	/*
355 	 * A failed mmap() very likely causes application failure,
356 	 * so fall back to the bottom-up function here. This scenario
357 	 * can happen with large stack limits and large mmap()
358 	 * allocations.
359 	 */
360 	addr = slice_find_area_bottomup(mm, len, available, psize, 0);
361 
362 	/*
363 	 * Restore the topdown base:
364 	 */
365 	if (use_cache) {
366 		mm->free_area_cache = mm->mmap_base;
367 		mm->cached_hole_size = ~0UL;
368 	}
369 
370 	return addr;
371 }
372 
373 
slice_find_area(struct mm_struct * mm,unsigned long len,struct slice_mask mask,int psize,int topdown,int use_cache)374 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
375 				     struct slice_mask mask, int psize,
376 				     int topdown, int use_cache)
377 {
378 	if (topdown)
379 		return slice_find_area_topdown(mm, len, mask, psize, use_cache);
380 	else
381 		return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
382 }
383 
384 #define or_mask(dst, src)	do {			\
385 	(dst).low_slices |= (src).low_slices;		\
386 	(dst).high_slices |= (src).high_slices;		\
387 } while (0)
388 
389 #define andnot_mask(dst, src)	do {			\
390 	(dst).low_slices &= ~(src).low_slices;		\
391 	(dst).high_slices &= ~(src).high_slices;	\
392 } while (0)
393 
394 #ifdef CONFIG_PPC_64K_PAGES
395 #define MMU_PAGE_BASE	MMU_PAGE_64K
396 #else
397 #define MMU_PAGE_BASE	MMU_PAGE_4K
398 #endif
399 
slice_get_unmapped_area(unsigned long addr,unsigned long len,unsigned long flags,unsigned int psize,int topdown,int use_cache)400 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
401 				      unsigned long flags, unsigned int psize,
402 				      int topdown, int use_cache)
403 {
404 	struct slice_mask mask = {0, 0};
405 	struct slice_mask good_mask;
406 	struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
407 	struct slice_mask compat_mask = {0, 0};
408 	int fixed = (flags & MAP_FIXED);
409 	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
410 	struct mm_struct *mm = current->mm;
411 	unsigned long newaddr;
412 
413 	/* Sanity checks */
414 	BUG_ON(mm->task_size == 0);
415 
416 	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
417 	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
418 		  addr, len, flags, topdown, use_cache);
419 
420 	if (len > mm->task_size)
421 		return -ENOMEM;
422 	if (len & ((1ul << pshift) - 1))
423 		return -EINVAL;
424 	if (fixed && (addr & ((1ul << pshift) - 1)))
425 		return -EINVAL;
426 	if (fixed && addr > (mm->task_size - len))
427 		return -EINVAL;
428 
429 	/* If hint, make sure it matches our alignment restrictions */
430 	if (!fixed && addr) {
431 		addr = _ALIGN_UP(addr, 1ul << pshift);
432 		slice_dbg(" aligned addr=%lx\n", addr);
433 		/* Ignore hint if it's too large or overlaps a VMA */
434 		if (addr > mm->task_size - len ||
435 		    !slice_area_is_free(mm, addr, len))
436 			addr = 0;
437 	}
438 
439 	/* First make up a "good" mask of slices that have the right size
440 	 * already
441 	 */
442 	good_mask = slice_mask_for_size(mm, psize);
443 	slice_print_mask(" good_mask", good_mask);
444 
445 	/*
446 	 * Here "good" means slices that are already the right page size,
447 	 * "compat" means slices that have a compatible page size (i.e.
448 	 * 4k in a 64k pagesize kernel), and "free" means slices without
449 	 * any VMAs.
450 	 *
451 	 * If MAP_FIXED:
452 	 *	check if fits in good | compat => OK
453 	 *	check if fits in good | compat | free => convert free
454 	 *	else bad
455 	 * If have hint:
456 	 *	check if hint fits in good => OK
457 	 *	check if hint fits in good | free => convert free
458 	 * Otherwise:
459 	 *	search in good, found => OK
460 	 *	search in good | free, found => convert free
461 	 *	search in good | compat | free, found => convert free.
462 	 */
463 
464 #ifdef CONFIG_PPC_64K_PAGES
465 	/* If we support combo pages, we can allow 64k pages in 4k slices */
466 	if (psize == MMU_PAGE_64K) {
467 		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
468 		if (fixed)
469 			or_mask(good_mask, compat_mask);
470 	}
471 #endif
472 
473 	/* First check hint if it's valid or if we have MAP_FIXED */
474 	if (addr != 0 || fixed) {
475 		/* Build a mask for the requested range */
476 		mask = slice_range_to_mask(addr, len);
477 		slice_print_mask(" mask", mask);
478 
479 		/* Check if we fit in the good mask. If we do, we just return,
480 		 * nothing else to do
481 		 */
482 		if (slice_check_fit(mask, good_mask)) {
483 			slice_dbg(" fits good !\n");
484 			return addr;
485 		}
486 	} else {
487 		/* Now let's see if we can find something in the existing
488 		 * slices for that size
489 		 */
490 		newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
491 					  use_cache);
492 		if (newaddr != -ENOMEM) {
493 			/* Found within the good mask, we don't have to setup,
494 			 * we thus return directly
495 			 */
496 			slice_dbg(" found area at 0x%lx\n", newaddr);
497 			return newaddr;
498 		}
499 	}
500 
501 	/* We don't fit in the good mask, check what other slices are
502 	 * empty and thus can be converted
503 	 */
504 	potential_mask = slice_mask_for_free(mm);
505 	or_mask(potential_mask, good_mask);
506 	slice_print_mask(" potential", potential_mask);
507 
508 	if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
509 		slice_dbg(" fits potential !\n");
510 		goto convert;
511 	}
512 
513 	/* If we have MAP_FIXED and failed the above steps, then error out */
514 	if (fixed)
515 		return -EBUSY;
516 
517 	slice_dbg(" search...\n");
518 
519 	/* If we had a hint that didn't work out, see if we can fit
520 	 * anywhere in the good area.
521 	 */
522 	if (addr) {
523 		addr = slice_find_area(mm, len, good_mask, psize, topdown,
524 				       use_cache);
525 		if (addr != -ENOMEM) {
526 			slice_dbg(" found area at 0x%lx\n", addr);
527 			return addr;
528 		}
529 	}
530 
531 	/* Now let's see if we can find something in the existing slices
532 	 * for that size plus free slices
533 	 */
534 	addr = slice_find_area(mm, len, potential_mask, psize, topdown,
535 			       use_cache);
536 
537 #ifdef CONFIG_PPC_64K_PAGES
538 	if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
539 		/* retry the search with 4k-page slices included */
540 		or_mask(potential_mask, compat_mask);
541 		addr = slice_find_area(mm, len, potential_mask, psize,
542 				       topdown, use_cache);
543 	}
544 #endif
545 
546 	if (addr == -ENOMEM)
547 		return -ENOMEM;
548 
549 	mask = slice_range_to_mask(addr, len);
550 	slice_dbg(" found potential area at 0x%lx\n", addr);
551 	slice_print_mask(" mask", mask);
552 
553  convert:
554 	andnot_mask(mask, good_mask);
555 	andnot_mask(mask, compat_mask);
556 	if (mask.low_slices || mask.high_slices) {
557 		slice_convert(mm, mask, psize);
558 		if (psize > MMU_PAGE_BASE)
559 			on_each_cpu(slice_flush_segments, mm, 1);
560 	}
561 	return addr;
562 
563 }
564 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
565 
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)566 unsigned long arch_get_unmapped_area(struct file *filp,
567 				     unsigned long addr,
568 				     unsigned long len,
569 				     unsigned long pgoff,
570 				     unsigned long flags)
571 {
572 	return slice_get_unmapped_area(addr, len, flags,
573 				       current->mm->context.user_psize,
574 				       0, 1);
575 }
576 
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)577 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
578 					     const unsigned long addr0,
579 					     const unsigned long len,
580 					     const unsigned long pgoff,
581 					     const unsigned long flags)
582 {
583 	return slice_get_unmapped_area(addr0, len, flags,
584 				       current->mm->context.user_psize,
585 				       1, 1);
586 }
587 
get_slice_psize(struct mm_struct * mm,unsigned long addr)588 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
589 {
590 	u64 psizes;
591 	int index;
592 
593 	if (addr < SLICE_LOW_TOP) {
594 		psizes = mm->context.low_slices_psize;
595 		index = GET_LOW_SLICE_INDEX(addr);
596 	} else {
597 		psizes = mm->context.high_slices_psize;
598 		index = GET_HIGH_SLICE_INDEX(addr);
599 	}
600 
601 	return (psizes >> (index * 4)) & 0xf;
602 }
603 EXPORT_SYMBOL_GPL(get_slice_psize);
604 
605 /*
606  * This is called by hash_page when it needs to do a lazy conversion of
607  * an address space from real 64K pages to combo 4K pages (typically
608  * when hitting a non cacheable mapping on a processor or hypervisor
609  * that won't allow them for 64K pages).
610  *
611  * This is also called in init_new_context() to change back the user
612  * psize from whatever the parent context had it set to
613  * N.B. This may be called before mm->context.id has been set.
614  *
615  * This function will only change the content of the {low,high)_slice_psize
616  * masks, it will not flush SLBs as this shall be handled lazily by the
617  * caller.
618  */
slice_set_user_psize(struct mm_struct * mm,unsigned int psize)619 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
620 {
621 	unsigned long flags, lpsizes, hpsizes;
622 	unsigned int old_psize;
623 	int i;
624 
625 	slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
626 
627 	spin_lock_irqsave(&slice_convert_lock, flags);
628 
629 	old_psize = mm->context.user_psize;
630 	slice_dbg(" old_psize=%d\n", old_psize);
631 	if (old_psize == psize)
632 		goto bail;
633 
634 	mm->context.user_psize = psize;
635 	wmb();
636 
637 	lpsizes = mm->context.low_slices_psize;
638 	for (i = 0; i < SLICE_NUM_LOW; i++)
639 		if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
640 			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
641 				(((unsigned long)psize) << (i * 4));
642 
643 	hpsizes = mm->context.high_slices_psize;
644 	for (i = 0; i < SLICE_NUM_HIGH; i++)
645 		if (((hpsizes >> (i * 4)) & 0xf) == old_psize)
646 			hpsizes = (hpsizes & ~(0xful << (i * 4))) |
647 				(((unsigned long)psize) << (i * 4));
648 
649 	mm->context.low_slices_psize = lpsizes;
650 	mm->context.high_slices_psize = hpsizes;
651 
652 	slice_dbg(" lsps=%lx, hsps=%lx\n",
653 		  mm->context.low_slices_psize,
654 		  mm->context.high_slices_psize);
655 
656  bail:
657 	spin_unlock_irqrestore(&slice_convert_lock, flags);
658 }
659 
slice_set_psize(struct mm_struct * mm,unsigned long address,unsigned int psize)660 void slice_set_psize(struct mm_struct *mm, unsigned long address,
661 		     unsigned int psize)
662 {
663 	unsigned long i, flags;
664 	u64 *p;
665 
666 	spin_lock_irqsave(&slice_convert_lock, flags);
667 	if (address < SLICE_LOW_TOP) {
668 		i = GET_LOW_SLICE_INDEX(address);
669 		p = &mm->context.low_slices_psize;
670 	} else {
671 		i = GET_HIGH_SLICE_INDEX(address);
672 		p = &mm->context.high_slices_psize;
673 	}
674 	*p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4));
675 	spin_unlock_irqrestore(&slice_convert_lock, flags);
676 
677 #ifdef CONFIG_SPU_BASE
678 	spu_flush_all_slbs(mm);
679 #endif
680 }
681 
slice_set_range_psize(struct mm_struct * mm,unsigned long start,unsigned long len,unsigned int psize)682 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
683 			   unsigned long len, unsigned int psize)
684 {
685 	struct slice_mask mask = slice_range_to_mask(start, len);
686 
687 	slice_convert(mm, mask, psize);
688 }
689 
690 /*
691  * is_hugepage_only_range() is used by generic code to verify wether
692  * a normal mmap mapping (non hugetlbfs) is valid on a given area.
693  *
694  * until the generic code provides a more generic hook and/or starts
695  * calling arch get_unmapped_area for MAP_FIXED (which our implementation
696  * here knows how to deal with), we hijack it to keep standard mappings
697  * away from us.
698  *
699  * because of that generic code limitation, MAP_FIXED mapping cannot
700  * "convert" back a slice with no VMAs to the standard page size, only
701  * get_unmapped_area() can. It would be possible to fix it here but I
702  * prefer working on fixing the generic code instead.
703  *
704  * WARNING: This will not work if hugetlbfs isn't enabled since the
705  * generic code will redefine that function as 0 in that. This is ok
706  * for now as we only use slices with hugetlbfs enabled. This should
707  * be fixed as the generic code gets fixed.
708  */
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)709 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
710 			   unsigned long len)
711 {
712 	struct slice_mask mask, available;
713 	unsigned int psize = mm->context.user_psize;
714 
715 	mask = slice_range_to_mask(addr, len);
716 	available = slice_mask_for_size(mm, psize);
717 #ifdef CONFIG_PPC_64K_PAGES
718 	/* We need to account for 4k slices too */
719 	if (psize == MMU_PAGE_64K) {
720 		struct slice_mask compat_mask;
721 		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
722 		or_mask(available, compat_mask);
723 	}
724 #endif
725 
726 #if 0 /* too verbose */
727 	slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
728 		 mm, addr, len);
729 	slice_print_mask(" mask", mask);
730 	slice_print_mask(" available", available);
731 #endif
732 	return !slice_check_fit(mask, available);
733 }
734 
735