Lines Matching refs:slab_obj_ptr

123     struct slab_obj *slab_obj_ptr = slab_pool->cache_pool_entry;  in slab_destroy()  local
131 while (!list_empty(&slab_obj_ptr->list)) in slab_destroy()
133 tmp_slab_obj = slab_obj_ptr; in slab_destroy()
135 slab_obj_ptr = container_of(list_next(&slab_obj_ptr->list), struct slab_obj, list); in slab_destroy()
148 kfree(slab_obj_ptr->bmp); in slab_destroy()
149 page_clean(slab_obj_ptr->page); in slab_destroy()
150 free_pages(slab_obj_ptr->page, 1); in slab_destroy()
151 kfree(slab_obj_ptr); in slab_destroy()
165 struct slab_obj *slab_obj_ptr = slab_pool->cache_pool_entry; in slab_malloc() local
216 slab_obj_ptr = tmp_slab_obj; in slab_malloc()
223 if (slab_obj_ptr->count_free == 0) in slab_malloc()
225 slab_obj_ptr = container_of(list_next(&slab_obj_ptr->list), struct slab_obj, list); in slab_malloc()
229 for (int i = 0; i < slab_obj_ptr->bmp_count; ++i) in slab_malloc()
232 if (*(slab_obj_ptr->bmp + (i >> 6)) == 0xffffffffffffffffUL) in slab_malloc()
240 if ((*(slab_obj_ptr->bmp + (i >> 6)) & (1UL << tmp_md)) == 0) in slab_malloc()
243 *(slab_obj_ptr->bmp + (i >> 6)) |= (1UL << tmp_md); in slab_malloc()
246 ++(slab_obj_ptr->count_using); in slab_malloc()
247 --(slab_obj_ptr->count_free); in slab_malloc()
255 … return slab_pool->constructor((char *)slab_obj_ptr->vaddr + slab_pool->size * i, arg); in slab_malloc()
259 return (void *)((char *)slab_obj_ptr->vaddr + slab_pool->size * i); in slab_malloc()
263 } while (slab_obj_ptr != slab_pool->cache_pool_entry); in slab_malloc()
291 struct slab_obj *slab_obj_ptr = slab_pool->cache_pool_entry; in slab_free() local
296 if (!(slab_obj_ptr->vaddr <= addr && addr <= (slab_obj_ptr->vaddr + PAGE_2M_SIZE))) in slab_free()
298 slab_obj_ptr = container_of(list_next(&slab_obj_ptr->list), struct slab_obj, list); in slab_free()
304 int index = (addr - slab_obj_ptr->vaddr) / slab_pool->size; in slab_free()
307 *(slab_obj_ptr->bmp + (index >> 6)) ^= (1UL << index % 64); in slab_free()
309 ++(slab_obj_ptr->count_free); in slab_free()
310 --(slab_obj_ptr->count_using); in slab_free()
317 slab_pool->destructor((char *)slab_obj_ptr->vaddr + slab_pool->size * index, arg); in slab_free()
320 …if ((slab_obj_ptr->count_using == 0) && ((slab_pool->count_total_free >> 1) >= slab_obj_ptr->count… in slab_free()
323 list_del(&slab_obj_ptr->list); in slab_free()
324 slab_pool->count_total_free -= slab_obj_ptr->count_free; in slab_free()
326 kfree(slab_obj_ptr->bmp); in slab_free()
327 page_clean(slab_obj_ptr->page); in slab_free()
328 free_pages(slab_obj_ptr->page, 1); in slab_free()
330 kfree(slab_obj_ptr); in slab_free()
335 } while (slab_obj_ptr != slab_pool->cache_pool_entry); in slab_free()
453 struct slab_obj *slab_obj_ptr; in kmalloc_create_slab_obj() local
470 slab_obj_ptr = (struct slab_obj *)((unsigned char *)vaddr + PAGE_2M_SIZE - struct_size); in kmalloc_create_slab_obj()
471 slab_obj_ptr->bmp = (void *)slab_obj_ptr + sizeof(struct slab_obj); in kmalloc_create_slab_obj()
473 slab_obj_ptr->count_free = (PAGE_2M_SIZE - struct_size) / size; in kmalloc_create_slab_obj()
474 slab_obj_ptr->count_using = 0; in kmalloc_create_slab_obj()
475 slab_obj_ptr->bmp_count = slab_obj_ptr->count_free; in kmalloc_create_slab_obj()
476 slab_obj_ptr->vaddr = vaddr; in kmalloc_create_slab_obj()
477 slab_obj_ptr->page = page; in kmalloc_create_slab_obj()
479 list_init(&slab_obj_ptr->list); in kmalloc_create_slab_obj()
481 slab_obj_ptr->bmp_len = ((slab_obj_ptr->bmp_count + sizeof(ul) * 8 - 1) >> 6) << 3; in kmalloc_create_slab_obj()
484 memset(slab_obj_ptr->bmp, 0xff, slab_obj_ptr->bmp_len); in kmalloc_create_slab_obj()
486 for (int i = 0; i < slab_obj_ptr->bmp_count; ++i) in kmalloc_create_slab_obj()
487 *(slab_obj_ptr->bmp + (i >> 6)) ^= 1UL << (i % 64); in kmalloc_create_slab_obj()
503 slab_obj_ptr = (struct slab_obj *)kmalloc(sizeof(struct slab_obj), 0); in kmalloc_create_slab_obj()
505 slab_obj_ptr->count_free = PAGE_2M_SIZE / size; in kmalloc_create_slab_obj()
506 slab_obj_ptr->count_using = 0; in kmalloc_create_slab_obj()
507 slab_obj_ptr->bmp_count = slab_obj_ptr->count_free; in kmalloc_create_slab_obj()
509 slab_obj_ptr->bmp_len = ((slab_obj_ptr->bmp_count + sizeof(ul) * 8 - 1) >> 6) << 3; in kmalloc_create_slab_obj()
511 slab_obj_ptr->bmp = (ul *)kmalloc(slab_obj_ptr->bmp_len, 0); in kmalloc_create_slab_obj()
514 memset(slab_obj_ptr->bmp, 0xff, slab_obj_ptr->bmp_len); in kmalloc_create_slab_obj()
515 for (int i = 0; i < slab_obj_ptr->bmp_count; ++i) in kmalloc_create_slab_obj()
516 *(slab_obj_ptr->bmp + (i >> 6)) ^= 1UL << (i % 64); in kmalloc_create_slab_obj()
518 slab_obj_ptr->vaddr = phys_2_virt(page->addr_phys); in kmalloc_create_slab_obj()
519 slab_obj_ptr->page = page; in kmalloc_create_slab_obj()
520 list_init(&slab_obj_ptr->list); in kmalloc_create_slab_obj()
530 return slab_obj_ptr; in kmalloc_create_slab_obj()
560 struct slab_obj *slab_obj_ptr = kmalloc_cache_group[index].cache_pool_entry; in kmalloc() local
566 slab_obj_ptr = kmalloc_create_slab_obj(kmalloc_cache_group[index].size); in kmalloc()
569 if (unlikely(slab_obj_ptr == NULL)) in kmalloc()
575 kmalloc_cache_group[index].count_total_free += slab_obj_ptr->count_free; in kmalloc()
576 list_add(&kmalloc_cache_group[index].cache_pool_entry->list, &slab_obj_ptr->list); in kmalloc()
583 if (slab_obj_ptr->count_free == 0) in kmalloc()
584 slab_obj_ptr = container_of(list_next(&slab_obj_ptr->list), struct slab_obj, list); in kmalloc()
587 } while (slab_obj_ptr != kmalloc_cache_group[index].cache_pool_entry); in kmalloc()
591 for (int i = 0; i < slab_obj_ptr->bmp_count; ++i) in kmalloc()
595 if (*(slab_obj_ptr->bmp + (i >> 6)) == 0xffffffffffffffffUL) in kmalloc()
602 if ((*(slab_obj_ptr->bmp + (i >> 6)) & (1UL << md)) == 0) in kmalloc()
604 *(slab_obj_ptr->bmp + (i >> 6)) |= (1UL << md); in kmalloc()
605 ++(slab_obj_ptr->count_using); in kmalloc()
606 --(slab_obj_ptr->count_free); in kmalloc()
613 result = (void *)((char *)slab_obj_ptr->vaddr + kmalloc_cache_group[index].size * i); in kmalloc()
638 struct slab_obj *slab_obj_ptr = NULL; in kfree() local
647 slab_obj_ptr = kmalloc_cache_group[i].cache_pool_entry; in kfree()
652 if (likely(slab_obj_ptr->vaddr != page_base_addr)) in kfree()
654 slab_obj_ptr = container_of(list_next(&slab_obj_ptr->list), struct slab_obj, list); in kfree()
661 index = (address - slab_obj_ptr->vaddr) / kmalloc_cache_group[i].size; in kfree()
664 *(slab_obj_ptr->bmp + (index >> 6)) ^= 1UL << (index % 64); in kfree()
666 ++(slab_obj_ptr->count_free); in kfree()
667 --(slab_obj_ptr->count_using); in kfree()
673slab_obj_ptr->count_using == 0) && (kmalloc_cache_group[i].count_total_free >= ((slab_obj_ptr->bmp… in kfree()
683 list_del(&slab_obj_ptr->list); in kfree()
685 kmalloc_cache_group[i].count_total_free -= slab_obj_ptr->bmp_count; in kfree()
686 page_clean(slab_obj_ptr->page); in kfree()
687 free_pages(slab_obj_ptr->page, 1); in kfree()
692 list_del(&slab_obj_ptr->list); in kfree()
693 kmalloc_cache_group[i].count_total_free -= slab_obj_ptr->bmp_count; in kfree()
695 kfree(slab_obj_ptr->bmp); in kfree()
697 page_clean(slab_obj_ptr->page); in kfree()
698 free_pages(slab_obj_ptr->page, 1); in kfree()
700 kfree(slab_obj_ptr); in kfree()
709 } while (slab_obj_ptr != kmalloc_cache_group[i].cache_pool_entry); in kfree()