/linux-6.6.21/fs/squashfs/ |
D | decompressor_multi_percpu.c | 31 struct squashfs_stream __percpu *percpu; in squashfs_decompressor_create() local 34 percpu = alloc_percpu(struct squashfs_stream); in squashfs_decompressor_create() 35 if (percpu == NULL) in squashfs_decompressor_create() 39 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 49 return (__force void *) percpu; in squashfs_decompressor_create() 53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 57 free_percpu(percpu); in squashfs_decompressor_create() 63 struct squashfs_stream __percpu *percpu = in squashfs_decompressor_destroy() local 70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy() 73 free_percpu(percpu); in squashfs_decompressor_destroy() [all …]
|
/linux-6.6.21/kernel/bpf/ |
D | memalloc.c | 254 static void free_one(void *obj, bool percpu) in free_one() argument 256 if (percpu) { in free_one() 265 static int free_all(struct llist_node *llnode, bool percpu) in free_all() argument 271 free_one(pos, percpu); in free_all() 496 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) in bpf_mem_alloc_init() argument 504 ma->percpu = percpu; in bpf_mem_alloc_init() 511 if (percpu) in bpf_mem_alloc_init() 536 if (WARN_ON_ONCE(percpu)) in bpf_mem_alloc_init() 564 bool percpu = !!c->percpu_size; in drain_mem_cache() local 573 free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); in drain_mem_cache() [all …]
|
D | bpf_lru_list.c | 501 if (lru->percpu) in bpf_lru_pop_free() 558 if (lru->percpu) in bpf_lru_push_free() 616 if (lru->percpu) in bpf_lru_populate() 651 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, in bpf_lru_init() argument 656 if (percpu) { in bpf_lru_init() 686 lru->percpu = percpu; in bpf_lru_init() 696 if (lru->percpu) in bpf_lru_destroy()
|
D | bpf_lru_list.h | 62 bool percpu; member 71 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
|
/linux-6.6.21/arch/alpha/boot/ |
D | main.c | 65 struct percpu_struct * percpu; in pal_init() local 95 percpu = (struct percpu_struct *) in pal_init() 97 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
D | bootp.c | 71 struct percpu_struct * percpu; in pal_init() local 101 percpu = (struct percpu_struct *) in pal_init() 103 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
D | bootpz.c | 119 struct percpu_struct * percpu; in pal_init() local 149 percpu = (struct percpu_struct *) in pal_init() 151 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
/linux-6.6.21/net/rds/ |
D | ib_recv.c | 107 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); in rds_ib_recv_alloc_cache() 108 if (!cache->percpu) in rds_ib_recv_alloc_cache() 112 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 130 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_alloc_caches() 143 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists() 166 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_free_caches() 177 free_percpu(ic->i_cache_frags.percpu); in rds_ib_recv_free_caches() 487 chpfirst = __this_cpu_read(cache->percpu->first); in rds_ib_recv_cache_put() 493 __this_cpu_write(cache->percpu->first, new_item); in rds_ib_recv_cache_put() 494 __this_cpu_inc(cache->percpu->count); in rds_ib_recv_cache_put() [all …]
|
/linux-6.6.21/include/linux/ |
D | bpf_mem_alloc.h | 14 bool percpu; member 25 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
|
D | spinlock_rt.h | 11 struct lock_class_key *key, bool percpu); 14 struct lock_class_key *key, bool percpu) in __rt_spin_lock_init() argument
|
/linux-6.6.21/tools/testing/selftests/cgroup/ |
D | test_kmem.c | 356 long current, percpu; in test_percpu_basic() local 381 percpu = cg_read_key_long(parent, "memory.stat", "percpu "); in test_percpu_basic() 383 if (current > 0 && percpu > 0 && abs(current - percpu) < in test_percpu_basic() 388 current, percpu); in test_percpu_basic()
|
/linux-6.6.21/include/asm-generic/ |
D | vmlinux.lds.h | 951 *(.data..percpu..decrypted) \ 1029 *(.data..percpu..first) \ 1031 *(.data..percpu..page_aligned) \ 1033 *(.data..percpu..read_mostly) \ 1035 *(.data..percpu) \ 1036 *(.data..percpu..shared_aligned) \ 1066 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 1069 . = __per_cpu_load + SIZEOF(.data..percpu); 1085 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
|
/linux-6.6.21/drivers/md/ |
D | raid5.c | 1552 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) in to_addr_page() argument 1554 return percpu->scribble + i * percpu->scribble_obj_size; in to_addr_page() 1559 struct raid5_percpu *percpu, int i) in to_addr_conv() argument 1561 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv() 1568 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) in to_addr_offs() argument 1570 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); in to_addr_offs() 1574 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument 1577 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_compute5() 1578 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_compute5() 1604 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5() [all …]
|
/linux-6.6.21/drivers/clocksource/ |
D | timer-qcom.c | 153 bool percpu) in msm_timer_init() argument 159 msm_timer_has_ppi = percpu; in msm_timer_init() 167 if (percpu) in msm_timer_init()
|
D | timer-of.c | 28 if (of_irq->percpu) in timer_of_irq_exit() 72 ret = of_irq->percpu ? in timer_of_irq_init()
|
/linux-6.6.21/Documentation/trace/coresight/ |
D | coresight-trbe.rst | 13 Trace Buffer Extension (TRBE) is a percpu hardware which captures in system 14 memory, CPU traces generated from a corresponding percpu tracing unit. This
|
/linux-6.6.21/arch/sparc/kernel/ |
D | sun4m_irq.c | 107 bool percpu; member 200 if (handler_data->percpu) { in sun4m_mask_irq() 219 if (handler_data->percpu) { in sun4m_unmask_irq() 278 handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD; in sun4m_build_device_irq()
|
/linux-6.6.21/arch/x86/kernel/ |
D | vmlinux.lds.S | 107 percpu PT_LOAD FLAGS(6); /* RW_ */ 235 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 236 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
|
/linux-6.6.21/kernel/sched/ |
D | cpuacct.c | 213 u64 percpu; in __cpuacct_percpu_seq_show() local 217 percpu = cpuacct_cpuusage_read(ca, i, index); in __cpuacct_percpu_seq_show() 218 seq_printf(m, "%llu ", (unsigned long long) percpu); in __cpuacct_percpu_seq_show()
|
/linux-6.6.21/arch/ia64/kernel/ |
D | vmlinux.lds.S | 20 percpu PT_LOAD; 167 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
|
/linux-6.6.21/arch/arm64/kvm/hyp/nvhe/ |
D | hyp.lds.S | 25 BEGIN_HYP_SECTION(.data..percpu)
|
/linux-6.6.21/Documentation/locking/ |
D | index.rst | 24 percpu-rw-semaphore
|
/linux-6.6.21/Documentation/RCU/ |
D | rcuref.rst | 8 Please note that the percpu-ref feature is likely your first 10 include/linux/percpu-refcount.h for more information. However, in 11 those unusual cases where percpu-ref would consume too much memory,
|
/linux-6.6.21/kernel/locking/ |
D | spinlock_rt.c | 134 struct lock_class_key *key, bool percpu) in __rt_spin_lock_init() argument 136 u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL; in __rt_spin_lock_init()
|
/linux-6.6.21/arch/arm64/kernel/ |
D | vmlinux.lds.S | 28 HYP_SECTION_NAME(.data..percpu) : { \ 29 *(HYP_SECTION_NAME(.data..percpu)) \
|