Lines Matching refs:alloc
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, in binder_alloc_buffer_size() argument
63 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
64 return alloc->buffer + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
68 static void binder_insert_free_buffer(struct binder_alloc *alloc, in binder_insert_free_buffer() argument
71 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
79 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); in binder_insert_free_buffer()
83 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
90 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
98 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
102 struct binder_alloc *alloc, struct binder_buffer *new_buffer) in binder_insert_allocated_buffer_locked() argument
104 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
123 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
127 struct binder_alloc *alloc, in binder_alloc_prepare_to_free_locked() argument
130 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
170 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, in binder_alloc_prepare_to_free() argument
175 mutex_lock(&alloc->mutex); in binder_alloc_prepare_to_free()
176 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
177 mutex_unlock(&alloc->mutex); in binder_alloc_prepare_to_free()
181 static int binder_update_page_range(struct binder_alloc *alloc, int allocate, in binder_update_page_range() argument
192 "%d: %s pages %pK-%pK\n", alloc->pid, in binder_update_page_range()
198 trace_binder_update_page_range(alloc, allocate, start, end); in binder_update_page_range()
204 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; in binder_update_page_range()
211 if (need_mm && mmget_not_zero(alloc->mm)) in binder_update_page_range()
212 mm = alloc->mm; in binder_update_page_range()
216 vma = alloc->vma; in binder_update_page_range()
222 alloc->pid); in binder_update_page_range()
231 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
232 page = &alloc->pages[index]; in binder_update_page_range()
235 trace_binder_alloc_lru_start(alloc, index); in binder_update_page_range()
240 trace_binder_alloc_lru_end(alloc, index); in binder_update_page_range()
247 trace_binder_alloc_page_start(alloc, index); in binder_update_page_range()
253 alloc->pid, page_addr); in binder_update_page_range()
256 page->alloc = alloc; in binder_update_page_range()
263 alloc->pid, user_page_addr); in binder_update_page_range()
267 if (index + 1 > alloc->pages_high) in binder_update_page_range()
268 alloc->pages_high = index + 1; in binder_update_page_range()
270 trace_binder_alloc_page_end(alloc, index); in binder_update_page_range()
283 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_update_page_range()
284 page = &alloc->pages[index]; in binder_update_page_range()
286 trace_binder_free_lru_start(alloc, index); in binder_update_page_range()
291 trace_binder_free_lru_end(alloc, index); in binder_update_page_range()
312 static inline void binder_alloc_set_vma(struct binder_alloc *alloc, in binder_alloc_set_vma() argument
316 smp_store_release(&alloc->vma, vma); in binder_alloc_set_vma()
320 struct binder_alloc *alloc) in binder_alloc_get_vma() argument
323 return smp_load_acquire(&alloc->vma); in binder_alloc_get_vma()
326 static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) in debug_low_async_space_locked() argument
340 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked()
347 total_alloc_size += binder_alloc_buffer_size(alloc, buffer); in debug_low_async_space_locked()
356 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
359 alloc->pid, pid, num_buffers, total_alloc_size); in debug_low_async_space_locked()
360 if (!alloc->oneway_spam_detected) { in debug_low_async_space_locked()
361 alloc->oneway_spam_detected = true; in debug_low_async_space_locked()
369 struct binder_alloc *alloc, in binder_alloc_new_buf_locked() argument
376 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
386 if (!binder_alloc_get_vma(alloc)) { in binder_alloc_new_buf_locked()
389 alloc->pid); in binder_alloc_new_buf_locked()
399 alloc->pid, data_size, offsets_size); in binder_alloc_new_buf_locked()
406 alloc->pid, extra_buffers_size); in binder_alloc_new_buf_locked()
413 if (is_async && alloc->free_async_space < size) { in binder_alloc_new_buf_locked()
416 alloc->pid, size); in binder_alloc_new_buf_locked()
423 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
443 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in binder_alloc_new_buf_locked()
446 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
452 for (n = rb_first(&alloc->free_buffers); n != NULL; in binder_alloc_new_buf_locked()
455 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
463 alloc->pid, size); in binder_alloc_new_buf_locked()
473 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
478 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
487 ret = binder_update_page_range(alloc, 1, (void __user *) in binder_alloc_new_buf_locked()
498 __func__, alloc->pid); in binder_alloc_new_buf_locked()
504 binder_insert_free_buffer(alloc, new_buffer); in binder_alloc_new_buf_locked()
507 rb_erase(best_fit, &alloc->free_buffers); in binder_alloc_new_buf_locked()
510 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
513 alloc->pid, size, buffer); in binder_alloc_new_buf_locked()
521 alloc->free_async_space -= size; in binder_alloc_new_buf_locked()
524 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
525 if (alloc->free_async_space < alloc->buffer_size / 10) { in binder_alloc_new_buf_locked()
531 buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); in binder_alloc_new_buf_locked()
533 alloc->oneway_spam_detected = false; in binder_alloc_new_buf_locked()
539 binder_update_page_range(alloc, 0, (void __user *) in binder_alloc_new_buf_locked()
561 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, in binder_alloc_new_buf() argument
570 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
571 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, in binder_alloc_new_buf()
573 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
588 static void binder_delete_free_buffer(struct binder_alloc *alloc, in binder_delete_free_buffer() argument
594 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
601 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
605 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
611 alloc->pid, in binder_delete_free_buffer()
620 alloc->pid, buffer->user_data); in binder_delete_free_buffer()
627 alloc->pid, buffer->user_data, in binder_delete_free_buffer()
630 binder_update_page_range(alloc, 0, buffer_start_page(buffer), in binder_delete_free_buffer()
637 static void binder_free_buf_locked(struct binder_alloc *alloc, in binder_free_buf_locked() argument
642 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
650 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
655 BUG_ON(buffer->user_data < alloc->buffer); in binder_free_buf_locked()
656 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
659 alloc->free_async_space += buffer_size; in binder_free_buf_locked()
662 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
665 binder_update_page_range(alloc, 0, in binder_free_buf_locked()
670 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
672 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
676 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
677 binder_delete_free_buffer(alloc, next); in binder_free_buf_locked()
680 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
684 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
685 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
689 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
692 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
701 void binder_alloc_free_buf(struct binder_alloc *alloc, in binder_alloc_free_buf() argument
713 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
716 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
717 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
718 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
734 int binder_alloc_mmap_handler(struct binder_alloc *alloc, in binder_alloc_mmap_handler() argument
741 if (unlikely(vma->vm_mm != alloc->mm)) { in binder_alloc_mmap_handler()
748 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
753 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
757 alloc->buffer = (void __user *)vma->vm_start; in binder_alloc_mmap_handler()
759 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
760 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
762 if (alloc->pages == NULL) { in binder_alloc_mmap_handler()
775 buffer->user_data = alloc->buffer; in binder_alloc_mmap_handler()
776 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
778 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
779 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
782 binder_alloc_set_vma(alloc, vma); in binder_alloc_mmap_handler()
787 kfree(alloc->pages); in binder_alloc_mmap_handler()
788 alloc->pages = NULL; in binder_alloc_mmap_handler()
790 alloc->buffer = NULL; in binder_alloc_mmap_handler()
792 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
798 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
804 void binder_alloc_deferred_release(struct binder_alloc *alloc) in binder_alloc_deferred_release() argument
811 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
812 BUG_ON(alloc->vma); in binder_alloc_deferred_release()
814 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
821 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
824 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
828 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
829 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
834 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
839 if (alloc->pages) { in binder_alloc_deferred_release()
842 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
846 if (!alloc->pages[i].page_ptr) in binder_alloc_deferred_release()
850 &alloc->pages[i].lru); in binder_alloc_deferred_release()
851 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
854 __func__, alloc->pid, i, page_addr, in binder_alloc_deferred_release()
856 __free_page(alloc->pages[i].page_ptr); in binder_alloc_deferred_release()
859 kfree(alloc->pages); in binder_alloc_deferred_release()
861 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
862 if (alloc->mm) in binder_alloc_deferred_release()
863 mmdrop(alloc->mm); in binder_alloc_deferred_release()
867 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
889 struct binder_alloc *alloc) in binder_alloc_print_allocated() argument
893 mutex_lock(&alloc->mutex); in binder_alloc_print_allocated()
894 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_print_allocated()
897 mutex_unlock(&alloc->mutex); in binder_alloc_print_allocated()
906 struct binder_alloc *alloc) in binder_alloc_print_pages() argument
914 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
919 if (binder_alloc_get_vma(alloc) != NULL) { in binder_alloc_print_pages()
920 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
921 page = &alloc->pages[i]; in binder_alloc_print_pages()
930 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
932 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
941 int binder_alloc_get_allocated_count(struct binder_alloc *alloc) in binder_alloc_get_allocated_count() argument
946 mutex_lock(&alloc->mutex); in binder_alloc_get_allocated_count()
947 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
949 mutex_unlock(&alloc->mutex); in binder_alloc_get_allocated_count()
962 void binder_alloc_vma_close(struct binder_alloc *alloc) in binder_alloc_vma_close() argument
964 binder_alloc_set_vma(alloc, NULL); in binder_alloc_vma_close()
986 struct binder_alloc *alloc; in binder_alloc_free_page() local
991 alloc = page->alloc; in binder_alloc_free_page()
992 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
998 index = page - alloc->pages; in binder_alloc_free_page()
999 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
1001 mm = alloc->mm; in binder_alloc_free_page()
1007 if (vma && vma != binder_alloc_get_vma(alloc)) in binder_alloc_free_page()
1014 trace_binder_unmap_user_start(alloc, index); in binder_alloc_free_page()
1018 trace_binder_unmap_user_end(alloc, index); in binder_alloc_free_page()
1023 trace_binder_unmap_kernel_start(alloc, index); in binder_alloc_free_page()
1028 trace_binder_unmap_kernel_end(alloc, index); in binder_alloc_free_page()
1031 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1040 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1071 void binder_alloc_init(struct binder_alloc *alloc) in binder_alloc_init() argument
1073 alloc->pid = current->group_leader->pid; in binder_alloc_init()
1074 alloc->mm = current->mm; in binder_alloc_init()
1075 mmgrab(alloc->mm); in binder_alloc_init()
1076 mutex_init(&alloc->mutex); in binder_alloc_init()
1077 INIT_LIST_HEAD(&alloc->buffers); in binder_alloc_init()
1117 static inline bool check_buffer(struct binder_alloc *alloc, in check_buffer() argument
1121 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1149 static struct page *binder_alloc_get_page(struct binder_alloc *alloc, in binder_alloc_get_page() argument
1155 (buffer->user_data - alloc->buffer); in binder_alloc_get_page()
1160 lru_page = &alloc->pages[index]; in binder_alloc_get_page()
1172 static void binder_alloc_clear_buf(struct binder_alloc *alloc, in binder_alloc_clear_buf() argument
1175 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
1183 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
1205 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_user_to_buffer() argument
1211 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1221 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1236 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, in binder_alloc_do_buffer_copy() argument
1244 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1252 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1267 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_to_buffer() argument
1273 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1277 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, in binder_alloc_copy_from_buffer() argument
1283 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()