/linux-6.6.21/arch/x86/hyperv/ |
D | mmu.c | 64 struct hv_tlb_flush *flush; in hyperv_flush_tlb_multi() local 76 flush = *this_cpu_ptr(hyperv_pcpu_input_arg); in hyperv_flush_tlb_multi() 78 if (unlikely(!flush)) { in hyperv_flush_tlb_multi() 88 flush->address_space = virt_to_phys(info->mm->pgd); in hyperv_flush_tlb_multi() 89 flush->address_space &= CR3_ADDR_MASK; in hyperv_flush_tlb_multi() 90 flush->flags = 0; in hyperv_flush_tlb_multi() 92 flush->address_space = 0; in hyperv_flush_tlb_multi() 93 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; in hyperv_flush_tlb_multi() 96 flush->processor_mask = 0; in hyperv_flush_tlb_multi() 98 flush->flags |= HV_FLUSH_ALL_PROCESSORS; in hyperv_flush_tlb_multi() [all …]
|
D | nested.c | 22 struct hv_guest_mapping_flush *flush; in hyperv_flush_guest_mapping() local 32 flush = *this_cpu_ptr(hyperv_pcpu_input_arg); in hyperv_flush_guest_mapping() 34 if (unlikely(!flush)) { in hyperv_flush_guest_mapping() 39 flush->address_space = as; in hyperv_flush_guest_mapping() 40 flush->flags = 0; in hyperv_flush_guest_mapping() 43 flush, NULL); in hyperv_flush_guest_mapping() 56 struct hv_guest_mapping_flush_list *flush, in hyperv_fill_flush_guest_mapping_list() argument 73 flush->gpa_list[gpa_n].page.additional_pages = additional_pages; in hyperv_fill_flush_guest_mapping_list() 74 flush->gpa_list[gpa_n].page.largepage = false; in hyperv_fill_flush_guest_mapping_list() 75 flush->gpa_list[gpa_n].page.basepfn = cur; in hyperv_fill_flush_guest_mapping_list() [all …]
|
/linux-6.6.21/fs/btrfs/ |
D | space-info.c | 346 enum btrfs_reserve_flush_enum flush) in calc_available_free_space() argument 373 if (flush == BTRFS_RESERVE_FLUSH_ALL) in calc_available_free_space() 382 enum btrfs_reserve_flush_enum flush) in btrfs_can_overcommit() argument 392 avail = calc_available_free_space(fs_info, space_info, flush); in btrfs_can_overcommit() 417 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; in btrfs_try_granting_tickets() local 432 flush)) { in btrfs_try_granting_tickets() 447 flush = BTRFS_RESERVE_FLUSH_ALL; in btrfs_try_granting_tickets() 1060 space_info->flush = 0; in btrfs_async_reclaim_metadata_space() 1072 space_info->flush = 0; in btrfs_async_reclaim_metadata_space() 1115 space_info->flush = 0; in btrfs_async_reclaim_metadata_space() [all …]
|
D | delalloc-space.c | 120 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_DATA; in btrfs_alloc_data_chunk_ondemand() local 126 flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE; in btrfs_alloc_data_chunk_ondemand() 128 return btrfs_reserve_data_bytes(fs_info, bytes, flush); in btrfs_alloc_data_chunk_ondemand() 136 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_DATA; in btrfs_check_data_free_space() local 145 flush = BTRFS_RESERVE_NO_FLUSH; in btrfs_check_data_free_space() 147 flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE; in btrfs_check_data_free_space() 149 ret = btrfs_reserve_data_bytes(fs_info, len, flush); in btrfs_check_data_free_space() 315 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; in btrfs_delalloc_reserve_metadata() local 328 flush = BTRFS_RESERVE_NO_FLUSH; in btrfs_delalloc_reserve_metadata() 331 flush = BTRFS_RESERVE_FLUSH_LIMIT; in btrfs_delalloc_reserve_metadata() [all …]
|
/linux-6.6.21/net/ipv4/ |
D | tcp_offload.c | 193 int flush = 1; in tcp_gro_receive() local 236 flush = NAPI_GRO_CB(p)->flush; in tcp_gro_receive() 237 flush |= (__force int)(flags & TCP_FLAG_CWR); in tcp_gro_receive() 238 flush |= (__force int)((flags ^ tcp_flag_word(th2)) & in tcp_gro_receive() 240 flush |= (__force int)(th->ack_seq ^ th2->ack_seq); in tcp_gro_receive() 242 flush |= *(u32 *)((u8 *)th + i) ^ in tcp_gro_receive() 252 flush |= NAPI_GRO_CB(p)->flush_id; in tcp_gro_receive() 263 flush |= (mss != skb_shinfo(skb)->gso_size); in tcp_gro_receive() 265 flush |= (len - 1) >= mss; in tcp_gro_receive() 267 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); in tcp_gro_receive() [all …]
|
/linux-6.6.21/lib/ |
D | decompress_inflate.c | 44 long (*flush)(void*, unsigned long), in __gunzip() 53 if (flush) { in __gunzip() 82 strm->workspace = malloc(flush ? zlib_inflate_workspacesize() : in __gunzip() 138 if (!flush) { in __gunzip() 159 if (flush && strm->next_out > out_buf) { in __gunzip() 161 if (l != flush(out_buf, l)) { in __gunzip() 193 if (flush) in __gunzip() 202 long (*flush)(void*, unsigned long), in gunzip() 207 return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error); in gunzip() 212 long (*flush)(void*, unsigned long), in __decompress() [all …]
|
D | decompress_unxz.c | 255 long (*flush)(void *src, unsigned long size), in unxz() 271 if (fill == NULL && flush == NULL) in unxz() 279 if (flush == NULL) { in unxz() 301 if (fill == NULL && flush == NULL) { in unxz() 327 if (flush != NULL && (b.out_pos == b.out_size in unxz() 334 if (flush(b.out, b.out_pos) != (long)b.out_pos) in unxz() 344 if (flush != NULL) in unxz() 384 if (flush != NULL) in unxz() 402 long (*flush)(void*, unsigned long), in __decompress() 407 return unxz(buf, len, fill, flush, out_buf, pos, error); in __decompress()
|
D | decompress_unzstd.c | 167 long (*flush)(void*, unsigned long), in __unzstd() 191 if (fill == NULL && flush == NULL) in __unzstd() 227 if (flush != NULL) { in __unzstd() 308 if (flush != NULL && out.pos > 0) { in __unzstd() 309 if (out.pos != flush(out.dst, out.pos)) { in __unzstd() 335 long (*flush)(void*, unsigned long), in unzstd() 340 return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error); in unzstd() 345 long (*flush)(void*, unsigned long), in __decompress() 350 return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error); in __decompress()
|
D | decompress_unlz4.c | 33 long (*flush)(void *, unsigned long), in unlz4() 52 } else if (!flush) { in unlz4() 174 if (flush && flush(outp, dest_len) != dest_len) in unlz4() 209 long (*flush)(void*, unsigned long), in __decompress() 215 return unlz4(buf, in_len - 4, fill, flush, output, posp, error); in __decompress()
|
D | decompress_unlzo.c | 99 long (*flush)(void *, unsigned long), in unlzo() 112 } else if (!flush) { in unlzo() 243 if (flush && flush(out_buf, dst_len) != dst_len) in unlzo() 279 long (*flush)(void*, unsigned long), in __decompress() 284 return unlzo(buf, len, fill, flush, out_buf, pos, error); in __decompress()
|
/linux-6.6.21/lib/zlib_deflate/ |
D | deflate.c | 60 #define DEFLATE_HOOK(strm, flush, bstate) 0 argument 69 typedef block_state (*compress_func) (deflate_state *s, int flush); 73 static block_state deflate_stored (deflate_state *s, int flush); 74 static block_state deflate_fast (deflate_state *s, int flush); 75 static block_state deflate_slow (deflate_state *s, int flush); 331 int flush in zlib_deflate() argument 338 flush > Z_FINISH || flush < 0) { in zlib_deflate() 344 (s->status == FINISH_STATE && flush != Z_FINISH)) { in zlib_deflate() 351 s->last_flush = flush; in zlib_deflate() 393 } else if (strm->avail_in == 0 && flush <= old_flush && in zlib_deflate() [all …]
|
/linux-6.6.21/tools/testing/selftests/drivers/net/netdevsim/ |
D | nexthop.sh | 147 $IP nexthop flush &> /dev/null 182 $IP nexthop flush &> /dev/null 202 $IP nexthop flush &> /dev/null 247 $IP nexthop flush &> /dev/null 267 $IP nexthop flush &> /dev/null 289 $IP nexthop flush &> /dev/null 314 $IP nexthop flush &> /dev/null 343 $IP nexthop flush &> /dev/null 373 $IP nexthop flush &> /dev/null 422 $IP nexthop flush &> /dev/null [all …]
|
/linux-6.6.21/Documentation/arch/x86/ |
D | tlb.rst | 12 from areas other than the one we are trying to flush will be 21 1. The size of the flush being performed. A flush of the entire 25 be no collateral damage caused by doing the global flush, and 26 all of the individual flush will have ended up being wasted 29 damage we do with a full flush. So, the larger the TLB, the 30 more attractive an individual flush looks. Data and 37 especially the contents of the TLB during a given flush. The 38 sizes of the flush will vary greatly depending on the workload as 48 This will cause us to do the global flush for more cases. 53 Despite the fact that a single individual flush on x86 is [all …]
|
/linux-6.6.21/include/net/ |
D | gro.h | 36 u16 flush; member 105 NAPI_GRO_CB(skb)->flush |= 1; in call_gro_receive() 120 NAPI_GRO_CB(skb)->flush |= 1; in call_gro_receive_sk() 354 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) in skb_gro_flush_final() argument 357 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final() 361 int flush, in skb_gro_flush_final_remcsum() argument 365 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum() 371 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) in skb_gro_flush_final() argument 373 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final() 377 int flush, in skb_gro_flush_final_remcsum() argument [all …]
|
/linux-6.6.21/lib/zlib_dfltcc/ |
D | dfltcc_deflate.c | 121 int flush, in dfltcc_deflate() argument 136 if (flush == Z_FULL_FLUSH) in dfltcc_deflate() 144 no_flush = flush == Z_NO_FLUSH; in dfltcc_deflate() 159 if (flush == Z_FINISH) in dfltcc_deflate() 162 if (flush == Z_FULL_FLUSH) in dfltcc_deflate() 217 need_empty_block = flush == Z_FINISH && param->bcf && !param->bhf; in dfltcc_deflate() 225 if (flush == Z_FINISH && !param->bcf) in dfltcc_deflate() 292 if (flush == Z_FINISH) { in dfltcc_deflate() 301 if (flush == Z_FULL_FLUSH) in dfltcc_deflate() 303 *result = flush == Z_NO_FLUSH ? need_more : block_done; in dfltcc_deflate()
|
D | dfltcc_inflate.h | 16 int flush, int *ret); 20 #define INFLATE_TYPEDO_HOOK(strm, flush) \ argument 25 action = dfltcc_inflate((strm), (flush), &ret); \
|
/linux-6.6.21/tools/testing/selftests/kvm/x86_64/ |
D | hyperv_tlb_flush.c | 204 struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva; in sender_guest_code() local 219 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; in sender_guest_code() 220 flush->processor_mask = BIT(WORKER_VCPU_ID_1); in sender_guest_code() 231 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; in sender_guest_code() 232 flush->processor_mask = BIT(WORKER_VCPU_ID_1); in sender_guest_code() 233 flush->gva_list[0] = (u64)data->test_pages; in sender_guest_code() 245 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | in sender_guest_code() 247 flush->processor_mask = 0; in sender_guest_code() 258 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | in sender_guest_code() 260 flush->gva_list[0] = (u64)data->test_pages; in sender_guest_code() [all …]
|
/linux-6.6.21/drivers/md/ |
D | dm-delay.c | 38 struct delay_class flush; member 132 if (dc->flush.dev) in delay_dtr() 133 dm_put_device(ti, dc->flush.dev); in delay_dtr() 206 ret = delay_class_ctr(ti, &dc->flush, argv); in delay_ctr() 216 ret = delay_class_ctr(ti, &dc->flush, argv + 3); in delay_ctr() 222 ret = delay_class_ctr(ti, &dc->flush, argv + 6); in delay_ctr() 299 c = &dc->flush; in delay_map() 323 DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops); in delay_status() 334 DMEMIT_DELAY_CLASS(&dc->flush); in delay_status() 356 ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data); in delay_iterate_devices()
|
/linux-6.6.21/net/ipv6/ |
D | ip6_offload.c | 36 NAPI_GRO_CB(skb)->flush |= 1, NULL : \ 199 u16 flush = 1; in ipv6_gro_receive() local 212 flush += ntohs(iph->payload_len) != skb_gro_len(skb); in ipv6_gro_receive() 233 flush--; in ipv6_gro_receive() 266 NAPI_GRO_CB(p)->flush |= !!((first_word & htonl(0x0FF00000)) | in ipv6_gro_receive() 268 NAPI_GRO_CB(p)->flush |= flush; in ipv6_gro_receive() 278 NAPI_GRO_CB(skb)->flush |= flush; in ipv6_gro_receive() 286 skb_gro_flush_final(skb, pp, flush); in ipv6_gro_receive() 297 NAPI_GRO_CB(skb)->flush = 1; in sit_ip6ip6_gro_receive() 312 NAPI_GRO_CB(skb)->flush = 1; in ip4ip6_gro_receive()
|
D | udp_offload.c | 138 goto flush; in udp6_gro_receive() 141 if (NAPI_GRO_CB(skb)->flush) in udp6_gro_receive() 146 goto flush; in udp6_gro_receive() 160 flush: in udp6_gro_receive() 161 NAPI_GRO_CB(skb)->flush = 1; in udp6_gro_receive()
|
/linux-6.6.21/drivers/gpu/drm/etnaviv/ |
D | etnaviv_buffer.c | 93 u32 flush = 0; in etnaviv_cmd_select_pipe() local 104 flush = VIVS_GL_FLUSH_CACHE_PE2D; in etnaviv_cmd_select_pipe() 106 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; in etnaviv_cmd_select_pipe() 108 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); in etnaviv_cmd_select_pipe() 237 u32 link_target, flush = 0; in etnaviv_buffer_end() local 244 flush = VIVS_GL_FLUSH_CACHE_PE2D; in etnaviv_buffer_end() 246 flush = VIVS_GL_FLUSH_CACHE_DEPTH | in etnaviv_buffer_end() 252 if (flush) { in etnaviv_buffer_end() 268 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); in etnaviv_buffer_end() 415 u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK | in etnaviv_buffer_queue() local [all …]
|
/linux-6.6.21/Documentation/block/ |
D | stat.rst | 44 flush I/Os requests number of flush I/Os processed 45 flush ticks milliseconds total wait time for flush requests 53 flush I/Os 56 These values increment when an flush I/O request completes. 58 Block layer combines flush requests and executes at most one at a time. 59 This counts flush requests executed by disk. Not tracked for partitions. 75 read ticks, write ticks, discard ticks, flush ticks
|
/linux-6.6.21/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_overlay.c | 63 SVGAEscapeVideoFlush flush; member 77 fill_escape(&cmd->escape, sizeof(cmd->flush)); in fill_flush() 78 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH; in fill_flush() 79 cmd->flush.streamId = stream_id; in fill_flush() 93 struct vmw_escape_video_flush *flush; in vmw_overlay_send_put() local 117 fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; in vmw_overlay_send_put() 125 flush = (struct vmw_escape_video_flush *)&items[num_items]; in vmw_overlay_send_put() 164 fill_flush(flush, arg->stream_id); in vmw_overlay_send_put() 184 struct vmw_escape_video_flush flush; in vmw_overlay_send_stop() member 206 fill_flush(&cmds->flush, stream_id); in vmw_overlay_send_stop()
|
/linux-6.6.21/arch/arm/mm/ |
D | cache-v4.S | 40 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 59 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 115 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
|
/linux-6.6.21/block/ |
D | blk-flush.c | 122 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq() 136 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() 171 BUG_ON(rq->flush.seq & seq); in blk_flush_complete_seq() 172 rq->flush.seq |= seq; in blk_flush_complete_seq() 392 rq->flush.seq = 0; in blk_rq_init_flush() 394 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ in blk_rq_init_flush() 451 rq->flush.seq |= REQ_FSEQ_PREFLUSH; in blk_insert_flush()
|