Home
last modified time | relevance | path

Searched refs:rl (Results 1 – 25 of 49) sorted by relevance

12

/linux-2.6.39/fs/ntfs/
Drunlist.c74 static inline runlist_element *ntfs_rl_realloc(runlist_element *rl, in ntfs_rl_realloc() argument
79 old_size = PAGE_ALIGN(old_size * sizeof(*rl)); in ntfs_rl_realloc()
80 new_size = PAGE_ALIGN(new_size * sizeof(*rl)); in ntfs_rl_realloc()
82 return rl; in ntfs_rl_realloc()
88 if (likely(rl != NULL)) { in ntfs_rl_realloc()
91 memcpy(new_rl, rl, old_size); in ntfs_rl_realloc()
92 ntfs_free(rl); in ntfs_rl_realloc()
120 static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl, in ntfs_rl_realloc_nofail() argument
125 old_size = PAGE_ALIGN(old_size * sizeof(*rl)); in ntfs_rl_realloc_nofail()
126 new_size = PAGE_ALIGN(new_size * sizeof(*rl)); in ntfs_rl_realloc_nofail()
[all …]
Dlcnalloc.c51 const runlist_element *rl) in ntfs_cluster_free_from_rl_nolock() argument
57 if (!rl) in ntfs_cluster_free_from_rl_nolock()
59 for (; rl->length; rl++) { in ntfs_cluster_free_from_rl_nolock()
62 if (rl->lcn < 0) in ntfs_cluster_free_from_rl_nolock()
64 err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length); in ntfs_cluster_free_from_rl_nolock()
156 runlist_element *rl = NULL; in ntfs_cluster_alloc() local
334 if ((rlpos + 2) * sizeof(*rl) > rlsize) { in ntfs_cluster_alloc()
338 if (!rl) in ntfs_cluster_alloc()
350 memcpy(rl2, rl, rlsize); in ntfs_cluster_alloc()
351 ntfs_free(rl); in ntfs_cluster_alloc()
[all …]
Drunlist.h55 runlist_element *rl; member
59 static inline void ntfs_init_runlist(runlist *rl) in ntfs_init_runlist() argument
61 rl->rl = NULL; in ntfs_init_runlist()
62 init_rwsem(&rl->lock); in ntfs_init_runlist()
79 extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn);
83 extern runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl,
87 const runlist_element *rl, const VCN first_vcn,
91 const int dst_len, const runlist_element *rl,
Ddebug.c144 void ntfs_debug_dump_runlist(const runlist_element *rl) in ntfs_debug_dump_runlist() argument
153 if (!rl) { in ntfs_debug_dump_runlist()
159 LCN lcn = (rl + i)->lcn; in ntfs_debug_dump_runlist()
167 (long long)(rl + i)->vcn, lcn_str[index], in ntfs_debug_dump_runlist()
168 (long long)(rl + i)->length, in ntfs_debug_dump_runlist()
169 (rl + i)->length ? "" : in ntfs_debug_dump_runlist()
173 (long long)(rl + i)->vcn, in ntfs_debug_dump_runlist()
174 (long long)(rl + i)->lcn, in ntfs_debug_dump_runlist()
175 (long long)(rl + i)->length, in ntfs_debug_dump_runlist()
176 (rl + i)->length ? "" : in ntfs_debug_dump_runlist()
[all …]
Dattrib.c91 runlist_element *rl; in ntfs_map_runlist_nolock() local
187 rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl); in ntfs_map_runlist_nolock()
188 if (IS_ERR(rl)) in ntfs_map_runlist_nolock()
189 err = PTR_ERR(rl); in ntfs_map_runlist_nolock()
191 ni->runlist.rl = rl; in ntfs_map_runlist_nolock()
304 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= in ntfs_map_runlist()
354 if (!ni->runlist.rl) { in ntfs_attr_vcn_to_lcn_nolock()
364 lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn); in ntfs_attr_vcn_to_lcn_nolock()
378 if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) != in ntfs_attr_vcn_to_lcn_nolock()
468 runlist_element *rl; in ntfs_attr_find_vcn_nolock() local
[all …]
Dlogfile.c732 runlist_element *rl; in ntfs_empty_logfile() local
759 rl = log_ni->runlist.rl; in ntfs_empty_logfile()
760 if (unlikely(!rl || vcn < rl->vcn || !rl->length)) { in ntfs_empty_logfile()
768 rl = log_ni->runlist.rl; in ntfs_empty_logfile()
769 BUG_ON(!rl || vcn < rl->vcn || !rl->length); in ntfs_empty_logfile()
772 while (rl->length && vcn >= rl[1].vcn) in ntfs_empty_logfile()
773 rl++; in ntfs_empty_logfile()
783 lcn = rl->lcn; in ntfs_empty_logfile()
785 vcn = rl->vcn; in ntfs_empty_logfile()
789 if (unlikely(!rl->length || lcn < LCN_HOLE)) in ntfs_empty_logfile()
[all …]
Dmft.c474 runlist_element *rl; in ntfs_sync_mft_mirror() local
520 rl = NULL; in ntfs_sync_mft_mirror()
544 if (!rl) { in ntfs_sync_mft_mirror()
547 rl = NTFS_I(vol->mftmirr_ino)->runlist.rl; in ntfs_sync_mft_mirror()
552 BUG_ON(!rl); in ntfs_sync_mft_mirror()
555 while (rl->length && rl[1].vcn <= vcn) in ntfs_sync_mft_mirror()
556 rl++; in ntfs_sync_mft_mirror()
557 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_sync_mft_mirror()
582 if (unlikely(rl)) in ntfs_sync_mft_mirror()
678 runlist_element *rl; in write_mft_record_nolock() local
[all …]
Daops.c194 runlist_element *rl; in ntfs_read_block() local
207 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); in ntfs_read_block()
246 rl = NULL; in ntfs_read_block()
267 if (!rl) { in ntfs_read_block()
270 rl = ni->runlist.rl; in ntfs_read_block()
272 if (likely(rl != NULL)) { in ntfs_read_block()
274 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_block()
275 rl++; in ntfs_read_block()
276 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_block()
307 rl = NULL; in ntfs_read_block()
[all …]
Dlcnalloc.h114 const runlist_element *rl);
133 const runlist_element *rl) in ntfs_cluster_free_from_rl() argument
138 ret = ntfs_cluster_free_from_rl_nolock(vol, rl); in ntfs_cluster_free_from_rl()
Dcompress.c490 runlist_element *rl; in ntfs_read_compressed_block() local
611 rl = NULL; in ntfs_read_compressed_block()
616 if (!rl) { in ntfs_read_compressed_block()
619 rl = ni->runlist.rl; in ntfs_read_compressed_block()
621 if (likely(rl != NULL)) { in ntfs_read_compressed_block()
623 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_compressed_block()
624 rl++; in ntfs_read_compressed_block()
625 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_compressed_block()
662 if (rl) in ntfs_read_compressed_block()
Dfile.c491 runlist_element *rl, *rl2; in ntfs_prepare_pages_for_non_resident_write() local
536 rl = NULL; in ntfs_prepare_pages_for_non_resident_write()
740 if (!rl) { in ntfs_prepare_pages_for_non_resident_write()
743 rl = ni->runlist.rl; in ntfs_prepare_pages_for_non_resident_write()
745 if (likely(rl != NULL)) { in ntfs_prepare_pages_for_non_resident_write()
747 while (rl->length && rl[1].vcn <= bh_cpos) in ntfs_prepare_pages_for_non_resident_write()
748 rl++; in ntfs_prepare_pages_for_non_resident_write()
749 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos); in ntfs_prepare_pages_for_non_resident_write()
757 vcn_len = rl[1].vcn - vcn; in ntfs_prepare_pages_for_non_resident_write()
774 rl = NULL; in ntfs_prepare_pages_for_non_resident_write()
[all …]
Ddebug.h46 extern void ntfs_debug_dump_runlist(const runlist_element *rl);
51 #define ntfs_debug_dump_runlist(rl) do {} while (0) argument
Dinode.c748 ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol, in ntfs_read_locked_inode()
750 if (IS_ERR(ni->attr_list_rl.rl)) { in ntfs_read_locked_inode()
751 err = PTR_ERR(ni->attr_list_rl.rl); in ntfs_read_locked_inode()
752 ni->attr_list_rl.rl = NULL; in ntfs_read_locked_inode()
1921 ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol, in ntfs_read_inode_mount()
1923 if (IS_ERR(ni->attr_list_rl.rl)) { in ntfs_read_inode_mount()
1924 err = PTR_ERR(ni->attr_list_rl.rl); in ntfs_read_inode_mount()
1925 ni->attr_list_rl.rl = NULL; in ntfs_read_inode_mount()
2052 nrl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl); in ntfs_read_inode_mount()
2059 ni->runlist.rl = nrl; in ntfs_read_inode_mount()
[all …]
/linux-2.6.39/drivers/s390/scsi/
Dzfcp_reqlist.h40 struct zfcp_reqlist *rl; in zfcp_reqlist_alloc() local
42 rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL); in zfcp_reqlist_alloc()
43 if (!rl) in zfcp_reqlist_alloc()
46 spin_lock_init(&rl->lock); in zfcp_reqlist_alloc()
49 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc()
51 return rl; in zfcp_reqlist_alloc()
60 static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl) in zfcp_reqlist_isempty() argument
65 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty()
74 static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl) in zfcp_reqlist_free() argument
77 BUG_ON(!zfcp_reqlist_isempty(rl)); in zfcp_reqlist_free()
[all …]
/linux-2.6.39/crypto/
Dvmac.c65 #define ADD128(rh, rl, ih, il) \ argument
68 (rl) += (_il); \
69 if ((rl) < (_il)) \
76 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ argument
81 rl = MUL32(_i1, _i2); \
82 ADD128(rh, rl, (m >> 32), (m << 32)); \
85 #define MUL64(rh, rl, i1, i2) \ argument
91 rl = MUL32(_i1, _i2); \
92 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
93 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
[all …]
Dcamellia.c340 #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ argument
344 lr = (lr << bits) + (rl >> (32 - bits)); \
345 rl = (rl << bits) + (rr >> (32 - bits)); \
349 #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ argument
353 ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \
354 lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
355 rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
854 #define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \ argument
860 rl ^= t2; \
864 t3 &= rl; \
/linux-2.6.39/fs/dlm/
Drcom.c309 struct rcom_lock *rl) in pack_rcom_lock() argument
311 memset(rl, 0, sizeof(*rl)); in pack_rcom_lock()
313 rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); in pack_rcom_lock()
314 rl->rl_lkid = cpu_to_le32(lkb->lkb_id); in pack_rcom_lock()
315 rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); in pack_rcom_lock()
316 rl->rl_flags = cpu_to_le32(lkb->lkb_flags); in pack_rcom_lock()
317 rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); in pack_rcom_lock()
318 rl->rl_rqmode = lkb->lkb_rqmode; in pack_rcom_lock()
319 rl->rl_grmode = lkb->lkb_grmode; in pack_rcom_lock()
320 rl->rl_status = lkb->lkb_status; in pack_rcom_lock()
[all …]
Dlock.c4397 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in receive_rcom_lock_args() local
4400 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); in receive_rcom_lock_args()
4401 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); in receive_rcom_lock_args()
4402 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); in receive_rcom_lock_args()
4403 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF; in receive_rcom_lock_args()
4405 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); in receive_rcom_lock_args()
4406 lkb->lkb_rqmode = rl->rl_rqmode; in receive_rcom_lock_args()
4407 lkb->lkb_grmode = rl->rl_grmode; in receive_rcom_lock_args()
4410 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL; in receive_rcom_lock_args()
4411 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL; in receive_rcom_lock_args()
[all …]
/linux-2.6.39/arch/arm/vfp/
Dvfp.h76 u64 rh, rma, rmb, rl; in mul64to128() local
80 rl = (u64)nl * ml; in mul64to128()
93 rl += rma; in mul64to128()
94 rh += (rl < rma); in mul64to128()
96 *resl = rl; in mul64to128()
108 u64 rh, rl; in vfp_hi64multiply64() local
109 mul64to128(&rh, &rl, n, m); in vfp_hi64multiply64()
110 return rh | (rl != 0); in vfp_hi64multiply64()
/linux-2.6.39/block/
Dblk-sysfs.c42 struct request_list *rl = &q->rq; in queue_requests_store() local
57 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) in queue_requests_store()
59 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) in queue_requests_store()
62 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) in queue_requests_store()
64 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) in queue_requests_store()
67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in queue_requests_store()
71 wake_up(&rl->wait[BLK_RW_SYNC]); in queue_requests_store()
74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { in queue_requests_store()
78 wake_up(&rl->wait[BLK_RW_ASYNC]); in queue_requests_store()
470 struct request_list *rl = &q->rq; in blk_release_queue() local
[all …]
Dblk-core.c380 struct request_list *rl = &q->rq; in blk_init_free_list() local
382 if (unlikely(rl->rq_pool)) in blk_init_free_list()
385 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; in blk_init_free_list()
386 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; in blk_init_free_list()
387 rl->elvpriv = 0; in blk_init_free_list()
388 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); in blk_init_free_list()
389 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); in blk_init_free_list()
391 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, in blk_init_free_list()
394 if (!rl->rq_pool) in blk_init_free_list()
638 struct request_list *rl = &q->rq; in __freed_request() local
[all …]
/linux-2.6.39/net/ipv4/
Dinetpeer.c307 struct inet_peer *rr, *rl, *rlr, *rll; in peer_avl_rebalance() local
310 rl = rcu_deref_locked(r->avl_left, base); in peer_avl_rebalance()
311 rlh = node_height(rl); in peer_avl_rebalance()
313 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ in peer_avl_rebalance()
321 rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */ in peer_avl_rebalance()
322 rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */ in peer_avl_rebalance()
329 RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ in peer_avl_rebalance()
330 RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ in peer_avl_rebalance()
331 rl->avl_height = lh + 2; in peer_avl_rebalance()
332 RCU_INIT_POINTER(*nodep, rl); in peer_avl_rebalance()
/linux-2.6.39/sound/oss/
Dswarm_cs4297a.c1170 unsigned char l, r, rl, rr, vidx; in mixer_ioctl() local
1331 rl = 63; in mixer_ioctl()
1334 rl = attentbl[(10 * l) / 100]; // Convert 0-100 vol to 63-0 atten. in mixer_ioctl()
1345 if ((rl > 60) && (rr > 60)) // If both l & r are 'low', in mixer_ioctl()
1350 temp1 |= (rl << 8) | rr; in mixer_ioctl()
1369 rl = 0; in mixer_ioctl()
1372 rl = (l * 2 - 5) / 13; // Convert 0-100 range to 0-15. in mixer_ioctl()
1373 l = (rl * 13 + 5) / 2; in mixer_ioctl()
1376 if (rl < 3) { in mixer_ioctl()
1378 rl = 0; in mixer_ioctl()
[all …]
/linux-2.6.39/fs/cifs/
Dsmbdes.c201 char *rl; in dohash() local
216 rl = pd1 + 64; in dohash()
295 concat(rl, r, l, 32, 32); in dohash()
297 permute(out, rl, perm6, 64); in dohash()
/linux-2.6.39/net/can/
Daf_can.c413 struct hlist_head *rl; in can_rx_register() local
430 rl = find_rcv_list(&can_id, &mask, d); in can_rx_register()
439 hlist_add_head_rcu(&r->list, rl); in can_rx_register()
481 struct hlist_head *rl; in can_rx_unregister() local
498 rl = find_rcv_list(&can_id, &mask, d); in can_rx_unregister()
506 hlist_for_each_entry_rcu(r, next, rl, list) { in can_rx_unregister()

12