/linux-6.6.21/fs/ubifs/ |
D | shrinker.c | 83 int freed; in shrink_tnc() local 116 freed = ubifs_destroy_tnc_subtree(c, znode); in shrink_tnc() 117 atomic_long_sub(freed, &ubifs_clean_zn_cnt); in shrink_tnc() 118 atomic_long_sub(freed, &c->clean_zn_cnt); in shrink_tnc() 119 total_freed += freed; in shrink_tnc() 149 int freed = 0; in shrink_tnc_trees() local 187 freed += shrink_tnc(c, nr, age, contention); in shrink_tnc_trees() 198 if (freed >= nr) in shrink_tnc_trees() 202 return freed; in shrink_tnc_trees() 284 unsigned long freed; in ubifs_shrink_scan() local [all …]
|
/linux-6.6.21/drivers/staging/octeon/ |
D | ethernet-mem.c | 26 int freed = elements; in cvm_oct_fill_hw_skbuff() local 28 while (freed) { in cvm_oct_fill_hw_skbuff() 36 freed--; in cvm_oct_fill_hw_skbuff() 38 return elements - freed; in cvm_oct_fill_hw_skbuff() 81 int freed = elements; in cvm_oct_fill_hw_memory() local 83 while (freed) { in cvm_oct_fill_hw_memory() 103 freed--; in cvm_oct_fill_hw_memory() 105 return elements - freed; in cvm_oct_fill_hw_memory() 139 int freed; in cvm_oct_mem_fill_fpa() local 142 freed = cvm_oct_fill_hw_skbuff(pool, size, elements); in cvm_oct_mem_fill_fpa() [all …]
|
/linux-6.6.21/fs/f2fs/ |
D | shrinker.c | 86 unsigned long freed = 0; in f2fs_shrink_scan() local 109 freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2); in f2fs_shrink_scan() 112 freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2); in f2fs_shrink_scan() 115 if (freed < nr) in f2fs_shrink_scan() 116 freed += f2fs_try_to_free_nats(sbi, nr - freed); in f2fs_shrink_scan() 119 if (freed < nr) in f2fs_shrink_scan() 120 freed += f2fs_try_to_free_nids(sbi, nr - freed); in f2fs_shrink_scan() 126 if (freed >= nr) in f2fs_shrink_scan() 130 return freed; in f2fs_shrink_scan()
|
/linux-6.6.21/drivers/gpu/drm/msm/ |
D | msm_gem_shrinker.c | 109 unsigned long freed; in msm_gem_shrinker_scan() member 119 unsigned long freed = 0; in msm_gem_shrinker_scan() local 125 stages[i].freed = in msm_gem_shrinker_scan() 129 nr -= stages[i].freed; in msm_gem_shrinker_scan() 130 freed += stages[i].freed; in msm_gem_shrinker_scan() 134 if (freed) { in msm_gem_shrinker_scan() 135 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed, in msm_gem_shrinker_scan() 136 stages[1].freed, stages[2].freed, in msm_gem_shrinker_scan() 137 stages[3].freed); in msm_gem_shrinker_scan() 140 return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP; in msm_gem_shrinker_scan()
|
/linux-6.6.21/drivers/gpu/drm/panfrost/ |
D | panfrost_gem_shrinker.c | 71 unsigned long freed = 0; in panfrost_gem_shrinker_scan() local 77 if (freed >= sc->nr_to_scan) in panfrost_gem_shrinker_scan() 81 freed += shmem->base.size >> PAGE_SHIFT; in panfrost_gem_shrinker_scan() 88 if (freed > 0) in panfrost_gem_shrinker_scan() 89 pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); in panfrost_gem_shrinker_scan() 91 return freed; in panfrost_gem_shrinker_scan()
|
/linux-6.6.21/fs/erofs/ |
D | utils.c | 155 unsigned int freed = 0; in erofs_shrink_workstation() local 165 ++freed; in erofs_shrink_workstation() 167 return freed; in erofs_shrink_workstation() 171 return freed; in erofs_shrink_workstation() 220 unsigned long freed = 0; in erofs_shrink_scan() local 247 freed += erofs_shrink_workstation(sbi, nr - freed); in erofs_shrink_scan() 260 if (freed >= nr) in erofs_shrink_scan() 264 return freed; in erofs_shrink_scan()
|
/linux-6.6.21/fs/nfsd/ |
D | nfscache.c | 124 unsigned long freed = 0; in nfsd_cacherep_dispose() local 130 freed++; in nfsd_cacherep_dispose() 132 return freed; in nfsd_cacherep_dispose() 288 unsigned int freed = 0; in nfsd_prune_bucket_locked() local 308 if (max && ++freed > max) in nfsd_prune_bucket_locked() 348 unsigned long freed = 0; in nfsd_reply_cache_scan() local 362 freed += nfsd_cacherep_dispose(&dispose); in nfsd_reply_cache_scan() 363 if (freed > sc->nr_to_scan) in nfsd_reply_cache_scan() 367 trace_nfsd_drc_gc(nn, freed); in nfsd_reply_cache_scan() 368 return freed; in nfsd_reply_cache_scan() [all …]
|
/linux-6.6.21/sound/soc/intel/atom/sst/ |
D | sst_ipc.c | 91 int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed) in sst_free_block() argument 98 if (block == freed) { in sst_free_block() 99 pr_debug("pvt_id freed --> %d\n", freed->drv_id); in sst_free_block() 101 list_del(&freed->node); in sst_free_block() 103 kfree(freed->data); in sst_free_block() 104 freed->data = NULL; in sst_free_block() 105 kfree(freed); in sst_free_block()
|
/linux-6.6.21/drivers/gpu/drm/i915/gem/ |
D | i915_gem_shrinker.c | 273 unsigned long freed = 0; in i915_gem_shrink_all() local 276 freed = i915_gem_shrink(NULL, i915, -1UL, NULL, in i915_gem_shrink_all() 281 return freed; in i915_gem_shrink_all() 318 unsigned long freed; in i915_gem_shrinker_scan() local 322 freed = i915_gem_shrink(NULL, i915, in i915_gem_shrinker_scan() 331 freed += i915_gem_shrink(NULL, i915, in i915_gem_shrinker_scan() 341 return sc->nr_scanned ? freed : SHRINK_STOP; in i915_gem_shrinker_scan()
|
D | i915_gem_object.c | 401 struct llist_node *freed) in __i915_gem_free_objects() argument 405 llist_for_each_entry_safe(obj, on, freed, freed) { in __i915_gem_free_objects() 423 struct llist_node *freed = llist_del_all(&i915->mm.free_list); in i915_gem_flush_free_objects() local 425 if (unlikely(freed)) in i915_gem_flush_free_objects() 426 __i915_gem_free_objects(i915, freed); in i915_gem_flush_free_objects() 463 if (llist_add(&obj->freed, &i915->mm.free_list)) in i915_gem_free_object()
|
/linux-6.6.21/net/sctp/ |
D | ulpqueue.c | 961 __u16 freed = 0; in sctp_ulpq_renege_list() local 981 freed += skb_headlen(skb); in sctp_ulpq_renege_list() 985 freed += skb_headlen(last); in sctp_ulpq_renege_list() 999 if (freed >= needed) in sctp_ulpq_renege_list() 1000 return freed; in sctp_ulpq_renege_list() 1003 return freed; in sctp_ulpq_renege_list() 1073 __u32 freed = 0; in sctp_ulpq_renege() local 1080 freed = sctp_ulpq_renege_order(ulpq, needed); in sctp_ulpq_renege() 1081 if (freed < needed) in sctp_ulpq_renege() 1082 freed += sctp_ulpq_renege_frags(ulpq, needed - freed); in sctp_ulpq_renege() [all …]
|
/linux-6.6.21/net/sunrpc/ |
D | auth.c | 431 long freed = 0; in rpcauth_prune_expired() local 451 freed++; in rpcauth_prune_expired() 454 return freed ? freed : SHRINK_STOP; in rpcauth_prune_expired() 461 unsigned long freed; in rpcauth_cache_do_shrink() local 464 freed = rpcauth_prune_expired(&free, nr_to_scan); in rpcauth_cache_do_shrink() 468 return freed; in rpcauth_cache_do_shrink()
|
/linux-6.6.21/include/trace/events/ |
D | jbd2.h | 330 unsigned long block_nr, unsigned long freed), 332 TP_ARGS(journal, first_tid, block_nr, freed), 339 __field(unsigned long, freed ) 347 __entry->freed = freed; 353 __entry->block_nr, __entry->freed)
|
/linux-6.6.21/Documentation/trace/ |
D | events-kmem.rst | 64 When a page is freed directly by the caller, the only mm_page_free event 68 When pages are freed in batch, the also mm_page_free_batched is triggered. 70 freed in batch with a page list. Significant amounts of activity here could 90 When the per-CPU list is too full, a number of pages are freed, each one 101 can be allocated and freed on the same CPU through some algorithm change.
|
/linux-6.6.21/drivers/firmware/arm_scmi/ |
D | notify.c | 1238 bool freed = false; in scmi_put_handler_unlocked() local 1244 freed = true; in scmi_put_handler_unlocked() 1247 return freed; in scmi_put_handler_unlocked() 1253 bool freed; in scmi_put_handler() local 1263 freed = scmi_put_handler_unlocked(ni, hndl); in scmi_put_handler() 1273 if (freed) in scmi_put_handler() 1282 bool freed; in scmi_put_active_handler() local 1287 freed = scmi_put_handler_unlocked(ni, hndl); in scmi_put_active_handler() 1289 if (freed) in scmi_put_active_handler()
|
/linux-6.6.21/Documentation/sound/designs/ |
D | jack-controls.rst | 30 Those kcontrols will be freed automatically when the Jack is freed.
|
/linux-6.6.21/net/rds/ |
D | ib_frmr.c | 363 unsigned int freed = *nfreed; in rds_ib_unreg_frmr() local 382 if (freed < goal || frmr->fr_state == FRMR_IS_STALE) { in rds_ib_unreg_frmr() 395 freed++; in rds_ib_unreg_frmr() 398 *nfreed = freed; in rds_ib_unreg_frmr()
|
/linux-6.6.21/sound/pci/hda/ |
D | hda_intel.h | 30 unsigned int freed:1; /* resources already released */ member
|
/linux-6.6.21/drivers/char/agp/ |
D | efficeon-agp.c | 160 int index, freed = 0; in efficeon_free_gatt_table() local 167 freed++; in efficeon_free_gatt_table() 174 printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed); in efficeon_free_gatt_table()
|
/linux-6.6.21/fs/jbd2/ |
D | checkpoint.c | 411 unsigned long freed; in jbd2_journal_shrink_checkpoint_list() local 441 freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list, in jbd2_journal_shrink_checkpoint_list() 443 nr_freed += freed; in jbd2_journal_shrink_checkpoint_list() 444 (*nr_to_scan) -= min(*nr_to_scan, freed); in jbd2_journal_shrink_checkpoint_list()
|
/linux-6.6.21/drivers/tty/ |
D | tty_buffer.c | 125 unsigned int freed = 0; in tty_buffer_free_all() local 130 freed += p->size; in tty_buffer_free_all() 143 WARN(still_used != freed, "we still have not freed %d bytes!", in tty_buffer_free_all() 144 still_used - freed); in tty_buffer_free_all()
|
/linux-6.6.21/fs/nfs/ |
D | nfs42xattr.c | 861 unsigned long freed; in nfs4_xattr_cache_scan() local 864 freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc, in nfs4_xattr_cache_scan() 874 return freed; in nfs4_xattr_cache_scan() 942 unsigned long freed; in nfs4_xattr_entry_scan() local 949 freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose); in nfs4_xattr_entry_scan() 965 return freed; in nfs4_xattr_entry_scan()
|
/linux-6.6.21/Documentation/core-api/ |
D | memory-allocation.rst | 173 When the allocated memory is no longer needed it must be freed. 175 Objects allocated by `kmalloc` can be freed by `kfree` or `kvfree`. Objects 176 allocated by `kmem_cache_alloc` can be freed with `kmem_cache_free`, `kfree` 182 Memory allocated by `vmalloc` can be freed with `vfree` or `kvfree`. 183 Memory allocated by `kvmalloc` can be freed with `kvfree`. 184 Caches created by `kmem_cache_create` should be freed with
|
/linux-6.6.21/drivers/net/wireless/intel/iwlwifi/dvm/ |
D | tx.c | 1121 int freed; in iwlagn_rx_reply_tx() local 1175 freed = 0; in iwlagn_rx_reply_tx() 1222 freed++; in iwlagn_rx_reply_tx() 1232 if (!is_agg && freed != 1) in iwlagn_rx_reply_tx() 1233 IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed); in iwlagn_rx_reply_tx() 1271 int freed; in iwlagn_rx_reply_compressed_ba() local 1351 freed = 0; in iwlagn_rx_reply_compressed_ba() 1358 freed++; in iwlagn_rx_reply_compressed_ba() 1371 if (freed == 1) { in iwlagn_rx_reply_compressed_ba()
|
/linux-6.6.21/mm/ |
D | Kconfig.debug | 22 pages are being allocated and freed, as unexpected state changes 53 allocation as well as poisoning memory on free to catch use of freed 128 reduce the risk of information leaks from freed data. This does 135 If you are only interested in sanitization of freed pages without 228 difference being that the orphan objects are not freed but 248 freed before kmemleak is fully initialised, use a static pool
|