Home
last modified time | relevance | path

Searched refs:rl (Results 1 – 25 of 74) sorted by relevance

123

/linux-5.19.10/fs/ntfs/
Drunlist.c60 static inline runlist_element *ntfs_rl_realloc(runlist_element *rl, in ntfs_rl_realloc() argument
65 old_size = PAGE_ALIGN(old_size * sizeof(*rl)); in ntfs_rl_realloc()
66 new_size = PAGE_ALIGN(new_size * sizeof(*rl)); in ntfs_rl_realloc()
68 return rl; in ntfs_rl_realloc()
74 if (likely(rl != NULL)) { in ntfs_rl_realloc()
77 memcpy(new_rl, rl, old_size); in ntfs_rl_realloc()
78 ntfs_free(rl); in ntfs_rl_realloc()
106 static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl, in ntfs_rl_realloc_nofail() argument
111 old_size = PAGE_ALIGN(old_size * sizeof(*rl)); in ntfs_rl_realloc_nofail()
112 new_size = PAGE_ALIGN(new_size * sizeof(*rl)); in ntfs_rl_realloc_nofail()
[all …]
Dlcnalloc.c37 const runlist_element *rl) in ntfs_cluster_free_from_rl_nolock() argument
43 if (!rl) in ntfs_cluster_free_from_rl_nolock()
45 for (; rl->length; rl++) { in ntfs_cluster_free_from_rl_nolock()
48 if (rl->lcn < 0) in ntfs_cluster_free_from_rl_nolock()
50 err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length); in ntfs_cluster_free_from_rl_nolock()
142 runlist_element *rl = NULL; in ntfs_cluster_alloc() local
320 if ((rlpos + 2) * sizeof(*rl) > rlsize) { in ntfs_cluster_alloc()
324 if (!rl) in ntfs_cluster_alloc()
336 memcpy(rl2, rl, rlsize); in ntfs_cluster_alloc()
337 ntfs_free(rl); in ntfs_cluster_alloc()
[all …]
Ddebug.c120 void ntfs_debug_dump_runlist(const runlist_element *rl) in ntfs_debug_dump_runlist() argument
129 if (!rl) { in ntfs_debug_dump_runlist()
135 LCN lcn = (rl + i)->lcn; in ntfs_debug_dump_runlist()
143 (long long)(rl + i)->vcn, lcn_str[index], in ntfs_debug_dump_runlist()
144 (long long)(rl + i)->length, in ntfs_debug_dump_runlist()
145 (rl + i)->length ? "" : in ntfs_debug_dump_runlist()
149 (long long)(rl + i)->vcn, in ntfs_debug_dump_runlist()
150 (long long)(rl + i)->lcn, in ntfs_debug_dump_runlist()
151 (long long)(rl + i)->length, in ntfs_debug_dump_runlist()
152 (rl + i)->length ? "" : in ntfs_debug_dump_runlist()
[all …]
Drunlist.h41 runlist_element *rl; member
45 static inline void ntfs_init_runlist(runlist *rl) in ntfs_init_runlist() argument
47 rl->rl = NULL; in ntfs_init_runlist()
48 init_rwsem(&rl->lock); in ntfs_init_runlist()
65 extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn);
69 extern runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl,
73 const runlist_element *rl, const VCN first_vcn,
77 const int dst_len, const runlist_element *rl,
Dattrib.c77 runlist_element *rl; in ntfs_map_runlist_nolock() local
173 rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl); in ntfs_map_runlist_nolock()
174 if (IS_ERR(rl)) in ntfs_map_runlist_nolock()
175 err = PTR_ERR(rl); in ntfs_map_runlist_nolock()
177 ni->runlist.rl = rl; in ntfs_map_runlist_nolock()
290 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= in ntfs_map_runlist()
340 if (!ni->runlist.rl) { in ntfs_attr_vcn_to_lcn_nolock()
350 lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn); in ntfs_attr_vcn_to_lcn_nolock()
364 if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) != in ntfs_attr_vcn_to_lcn_nolock()
454 runlist_element *rl; in ntfs_attr_find_vcn_nolock() local
[all …]
Dlogfile.c718 runlist_element *rl; in ntfs_empty_logfile() local
745 rl = log_ni->runlist.rl; in ntfs_empty_logfile()
746 if (unlikely(!rl || vcn < rl->vcn || !rl->length)) { in ntfs_empty_logfile()
754 rl = log_ni->runlist.rl; in ntfs_empty_logfile()
755 BUG_ON(!rl || vcn < rl->vcn || !rl->length); in ntfs_empty_logfile()
758 while (rl->length && vcn >= rl[1].vcn) in ntfs_empty_logfile()
759 rl++; in ntfs_empty_logfile()
769 lcn = rl->lcn; in ntfs_empty_logfile()
771 vcn = rl->vcn; in ntfs_empty_logfile()
775 if (unlikely(!rl->length || lcn < LCN_HOLE)) in ntfs_empty_logfile()
[all …]
Dmft.c463 runlist_element *rl; in ntfs_sync_mft_mirror() local
511 rl = NULL; in ntfs_sync_mft_mirror()
535 if (!rl) { in ntfs_sync_mft_mirror()
538 rl = NTFS_I(vol->mftmirr_ino)->runlist.rl; in ntfs_sync_mft_mirror()
543 BUG_ON(!rl); in ntfs_sync_mft_mirror()
546 while (rl->length && rl[1].vcn <= vcn) in ntfs_sync_mft_mirror()
547 rl++; in ntfs_sync_mft_mirror()
548 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_sync_mft_mirror()
573 if (unlikely(rl)) in ntfs_sync_mft_mirror()
669 runlist_element *rl; in write_mft_record_nolock() local
[all …]
Daops.c173 runlist_element *rl; in ntfs_read_block() local
186 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); in ntfs_read_block()
225 rl = NULL; in ntfs_read_block()
246 if (!rl) { in ntfs_read_block()
249 rl = ni->runlist.rl; in ntfs_read_block()
251 if (likely(rl != NULL)) { in ntfs_read_block()
253 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_block()
254 rl++; in ntfs_read_block()
255 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_block()
286 rl = NULL; in ntfs_read_block()
[all …]
Dlcnalloc.h100 const runlist_element *rl);
119 const runlist_element *rl) in ntfs_cluster_free_from_rl() argument
124 ret = ntfs_cluster_free_from_rl_nolock(vol, rl); in ntfs_cluster_free_from_rl()
Dfile.c579 runlist_element *rl, *rl2; in ntfs_prepare_pages_for_non_resident_write() local
624 rl = NULL; in ntfs_prepare_pages_for_non_resident_write()
827 if (!rl) { in ntfs_prepare_pages_for_non_resident_write()
830 rl = ni->runlist.rl; in ntfs_prepare_pages_for_non_resident_write()
832 if (likely(rl != NULL)) { in ntfs_prepare_pages_for_non_resident_write()
834 while (rl->length && rl[1].vcn <= bh_cpos) in ntfs_prepare_pages_for_non_resident_write()
835 rl++; in ntfs_prepare_pages_for_non_resident_write()
836 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos); in ntfs_prepare_pages_for_non_resident_write()
844 vcn_len = rl[1].vcn - vcn; in ntfs_prepare_pages_for_non_resident_write()
861 rl = NULL; in ntfs_prepare_pages_for_non_resident_write()
[all …]
Dcompress.c470 runlist_element *rl; in ntfs_read_compressed_block() local
595 rl = NULL; in ntfs_read_compressed_block()
600 if (!rl) { in ntfs_read_compressed_block()
603 rl = ni->runlist.rl; in ntfs_read_compressed_block()
605 if (likely(rl != NULL)) { in ntfs_read_compressed_block()
607 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_compressed_block()
608 rl++; in ntfs_read_compressed_block()
609 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_compressed_block()
646 if (rl) in ntfs_read_compressed_block()
/linux-5.19.10/drivers/s390/scsi/
Dzfcp_reqlist.h41 struct zfcp_reqlist *rl; in zfcp_reqlist_alloc() local
43 rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL); in zfcp_reqlist_alloc()
44 if (!rl) in zfcp_reqlist_alloc()
47 spin_lock_init(&rl->lock); in zfcp_reqlist_alloc()
50 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc()
52 return rl; in zfcp_reqlist_alloc()
61 static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl) in zfcp_reqlist_isempty() argument
66 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty()
75 static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl) in zfcp_reqlist_free() argument
78 BUG_ON(!zfcp_reqlist_isempty(rl)); in zfcp_reqlist_free()
[all …]
/linux-5.19.10/crypto/
Dvmac.c104 #define ADD128(rh, rl, ih, il) \ argument
107 (rl) += (_il); \
108 if ((rl) < (_il)) \
115 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ argument
120 rl = MUL32(_i1, _i2); \
121 ADD128(rh, rl, (m >> 32), (m << 32)); \
124 #define MUL64(rh, rl, i1, i2) \ argument
130 rl = MUL32(_i1, _i2); \
131 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
132 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
[all …]
Dcrypto_user_stat.c198 struct crypto_stat_larval rl; in crypto_reportstat_one() local
200 memset(&rl, 0, sizeof(rl)); in crypto_reportstat_one()
201 strscpy(rl.type, "larval", sizeof(rl.type)); in crypto_reportstat_one()
202 if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl)) in crypto_reportstat_one()
/linux-5.19.10/tools/testing/selftests/kvm/
Dkvm_create_max_vcpus.c49 struct rlimit rl; in main() local
58 TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!"); in main()
60 if (rl.rlim_cur < nr_fds_wanted) { in main()
61 rl.rlim_cur = nr_fds_wanted; in main()
62 if (rl.rlim_max < nr_fds_wanted) { in main()
63 int old_rlim_max = rl.rlim_max; in main()
64 rl.rlim_max = nr_fds_wanted; in main()
66 int r = setrlimit(RLIMIT_NOFILE, &rl); in main()
73 TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); in main()
/linux-5.19.10/include/linux/
Djump_label_ratelimit.h41 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
45 #define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ argument
48 .timeout = (rl), \
54 #define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ argument
57 .timeout = (rl), \
73 #define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ argument
75 #define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ argument
91 unsigned long rl) in jump_label_rate_limit() argument
Dmath64.h204 } rl, rm, rn, rh, a0, b0; in mul_u64_u64_shr() local
210 rl.ll = mul_u32_u32(a0.l.low, b0.l.low); in mul_u64_u64_shr()
220 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; in mul_u64_u64_shr()
229 return rl.ll; in mul_u64_u64_shr()
231 return (rl.ll >> shift) | (rh.ll << (64 - shift)); in mul_u64_u64_shr()
268 } u, rl, rh; in mul_u64_u32_div() local
271 rl.ll = mul_u32_u32(u.l.low, mul); in mul_u64_u32_div()
272 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; in mul_u64_u32_div()
275 rl.l.high = do_div(rh.ll, divisor); in mul_u64_u32_div()
278 do_div(rl.ll, divisor); in mul_u64_u32_div()
[all …]
/linux-5.19.10/fs/dlm/
Drcom.c411 struct rcom_lock *rl) in pack_rcom_lock() argument
413 memset(rl, 0, sizeof(*rl)); in pack_rcom_lock()
415 rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); in pack_rcom_lock()
416 rl->rl_lkid = cpu_to_le32(lkb->lkb_id); in pack_rcom_lock()
417 rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); in pack_rcom_lock()
418 rl->rl_flags = cpu_to_le32(lkb->lkb_flags); in pack_rcom_lock()
419 rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); in pack_rcom_lock()
420 rl->rl_rqmode = lkb->lkb_rqmode; in pack_rcom_lock()
421 rl->rl_grmode = lkb->lkb_grmode; in pack_rcom_lock()
422 rl->rl_status = lkb->lkb_status; in pack_rcom_lock()
[all …]
/linux-5.19.10/arch/arm/mm/
Dproc-v7-3level.S57 #define rl r3 macro
60 #define rl r2 macro
73 tst rl, #L_PTE_VALID
76 bicne rl, #L_PTE_VALID
82 orrne rl, #PTE_AP2
83 biceq rl, #PTE_AP2
/linux-5.19.10/drivers/net/ethernet/mellanox/mlx5/core/
Drl.c316 struct mlx5_rate_limit *rl) in mlx5_rl_add_rate() argument
320 MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate); in mlx5_rl_add_rate()
322 rl->max_burst_sz); in mlx5_rl_add_rate()
324 rl->typical_pkt_sz); in mlx5_rl_add_rate()
333 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) in mlx5_rl_remove_rate() argument
340 if (rl->rate == 0) in mlx5_rl_remove_rate()
343 MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate); in mlx5_rl_remove_rate()
345 rl->max_burst_sz); in mlx5_rl_remove_rate()
347 rl->typical_pkt_sz); in mlx5_rl_remove_rate()
355 rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); in mlx5_rl_remove_rate()
/linux-5.19.10/drivers/slimbus/
Dslimbus.h128 u8 rl; member
140 #define DEFINE_SLIM_LDEST_TXN(name, mc, rl, la, msg) \ argument
141 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
144 #define DEFINE_SLIM_BCAST_TXN(name, mc, rl, la, msg) \ argument
145 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
148 #define DEFINE_SLIM_EDEST_TXN(name, mc, rl, la, msg) \ argument
149 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_ENUMADDR, 0,\
/linux-5.19.10/drivers/net/ethernet/mellanox/mlx5/core/en/
Dqos.c1043 void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl) in mlx5e_mqprio_rl_free() argument
1045 kvfree(rl); in mlx5e_mqprio_rl_free()
1048 int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc, in mlx5e_mqprio_rl_init() argument
1061 rl->mdev = mdev; in mlx5e_mqprio_rl_init()
1062 rl->num_tc = num_tc; in mlx5e_mqprio_rl_init()
1063 rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL); in mlx5e_mqprio_rl_init()
1064 if (!rl->leaves_id) in mlx5e_mqprio_rl_init()
1067 err = mlx5_qos_create_root_node(mdev, &rl->root_id); in mlx5e_mqprio_rl_init()
1071 qos_dbg(mdev, "Root created, id %#x\n", rl->root_id); in mlx5e_mqprio_rl_init()
1077 err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw, in mlx5e_mqprio_rl_init()
[all …]
Dqos.h47 void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl);
48 int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
50 void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl);
51 int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id);
/linux-5.19.10/arch/riscv/net/
Dbpf_jit.h270 static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1, in rv_amo_insn() argument
273 u8 funct7 = (funct5 << 2) | (aq << 1) | rl; in rv_amo_insn()
533 static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) in rv_amoadd_w() argument
535 return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f); in rv_amoadd_w()
538 static inline u32 rv_amoand_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) in rv_amoand_w() argument
540 return rv_amo_insn(0xc, aq, rl, rs2, rs1, 2, rd, 0x2f); in rv_amoand_w()
543 static inline u32 rv_amoor_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) in rv_amoor_w() argument
545 return rv_amo_insn(0x8, aq, rl, rs2, rs1, 2, rd, 0x2f); in rv_amoor_w()
548 static inline u32 rv_amoxor_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) in rv_amoxor_w() argument
550 return rv_amo_insn(0x4, aq, rl, rs2, rs1, 2, rd, 0x2f); in rv_amoxor_w()
[all …]
/linux-5.19.10/arch/arm/vfp/
Dvfp.h73 u64 rh, rma, rmb, rl; in mul64to128() local
77 rl = (u64)nl * ml; in mul64to128()
90 rl += rma; in mul64to128()
91 rh += (rl < rma); in mul64to128()
93 *resl = rl; in mul64to128()
105 u64 rh, rl; in vfp_hi64multiply64() local
106 mul64to128(&rh, &rl, n, m); in vfp_hi64multiply64()
107 return rh | (rl != 0); in vfp_hi64multiply64()

123