Home
last modified time | relevance | path

Searched refs:nblocks (Results 1 – 25 of 79) sorted by relevance

1234

/linux-6.1.9/init/
Ddo_mounts_rd.c66 int nblocks = -1; in identify_ramdisk_image() local
96 nblocks = 0; in identify_ramdisk_image()
106 nblocks = (ntohl(romfsb->size)+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS; in identify_ramdisk_image()
114 nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; in identify_ramdisk_image()
123 nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1) in identify_ramdisk_image()
138 nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; in identify_ramdisk_image()
154 nblocks = minixsb->s_nzones << minixsb->s_log_zone_size; in identify_ramdisk_image()
164 nblocks = n; in identify_ramdisk_image()
174 return nblocks; in identify_ramdisk_image()
190 int nblocks, i; in rd_load_image() local
[all …]
/linux-6.1.9/fs/jfs/
Djfs_dmap.c65 int nblocks);
72 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
74 int nblocks);
76 int nblocks,
79 int nblocks);
80 static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
83 static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
85 static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
87 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
92 int nblocks);
[all …]
Djfs_discard.c32 void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks) in jfs_issue_discard() argument
37 r = sb_issue_discard(sb, blkno, nblocks, GFP_NOFS, 0); in jfs_issue_discard()
41 (unsigned long long)nblocks, r); in jfs_issue_discard()
46 (unsigned long long)nblocks, r); in jfs_issue_discard()
Djfs_dmap.h148 __le32 nblocks; /* 4: num blks covered by this dmap */ member
284 extern int dbFree(struct inode *ipbmap, s64 blkno, s64 nblocks);
287 int free, s64 blkno, s64 nblocks, struct tblock * tblk);
291 extern int dbAlloc(struct inode *ipbmap, s64 hint, s64 nblocks, s64 * results);
294 s64 blkno, s64 nblocks, s64 addnblocks, s64 * results);
297 extern int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks);
298 extern int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks);
Dresize.c62 s64 XAddress, XSize, nblocks, xoff, xaddr, t64; in jfs_extendfs() local
313 nblocks = min(t64 - mapSize, XSize); in jfs_extendfs()
322 if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) in jfs_extendfs()
332 XSize -= nblocks; in jfs_extendfs()
382 xlen = min(xlen, (int) nblocks) & ~(sbi->nbperpage - 1); in jfs_extendfs()
387 if ((rc = xtAppend(tid, ipbmap, 0, xoff, nblocks, &xlen, &xaddr, 0))) { in jfs_extendfs()
Djfs_extent.c299 extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno) in extBalloc() argument
315 if (*nblocks >= max && *nblocks > nbperpage) in extBalloc()
318 nb = nblks = *nblocks; in extBalloc()
336 *nblocks = nb; in extBalloc()
Dxattr.c207 int nblocks; in ea_write() local
225 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; in ea_write()
228 rc = dquot_alloc_block(ip, nblocks); in ea_write()
232 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); in ea_write()
235 dquot_free_block(ip, nblocks); in ea_write()
246 for (i = 0; i < nblocks; i += sbi->nbperpage) { in ea_write()
289 DXDlength(ea, nblocks); in ea_write()
300 dquot_free_block(ip, nblocks); in ea_write()
302 dbFree(ip, blkno, nblocks); in ea_write()
356 int nblocks; in ea_read() local
[all …]
/linux-6.1.9/arch/x86/crypto/
Dsm4_aesni_avx_glue.c23 const u8 *src, int nblocks);
25 const u8 *src, int nblocks);
61 unsigned int nblocks = min(nbytes >> 4, 4u); in ecb_do_crypt() local
62 sm4_aesni_avx_crypt4(rkey, dst, src, nblocks); in ecb_do_crypt()
63 dst += nblocks * SM4_BLOCK_SIZE; in ecb_do_crypt()
64 src += nblocks * SM4_BLOCK_SIZE; in ecb_do_crypt()
65 nbytes -= nblocks * SM4_BLOCK_SIZE; in ecb_do_crypt()
153 unsigned int nblocks = min(nbytes >> 4, 8u); in sm4_avx_cbc_decrypt() local
157 src, nblocks); in sm4_avx_cbc_decrypt()
159 src += ((int)nblocks - 2) * SM4_BLOCK_SIZE; in sm4_avx_cbc_decrypt()
[all …]
Dblake2s-glue.c20 const u8 *block, const size_t nblocks,
23 const u8 *block, const size_t nblocks,
30 size_t nblocks, const u32 inc) in blake2s_compress() argument
36 blake2s_compress_generic(state, block, nblocks, inc); in blake2s_compress()
41 const size_t blocks = min_t(size_t, nblocks, in blake2s_compress()
52 nblocks -= blocks; in blake2s_compress()
54 } while (nblocks); in blake2s_compress()
Dpolyval-clmulni_glue.c49 const u8 *in, size_t nblocks, u8 *accumulator);
58 const u8 *in, size_t nblocks, u8 *accumulator) in internal_polyval_update() argument
62 clmul_polyval_update(keys, in, nblocks, accumulator); in internal_polyval_update()
66 nblocks, accumulator); in internal_polyval_update()
116 unsigned int nblocks; in polyval_x86_update() local
136 nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; in polyval_x86_update()
137 internal_polyval_update(tctx, src, nblocks, dctx->buffer); in polyval_x86_update()
138 srclen -= nblocks * POLYVAL_BLOCK_SIZE; in polyval_x86_update()
139 src += nblocks * POLYVAL_BLOCK_SIZE; in polyval_x86_update()
/linux-6.1.9/arch/arm64/crypto/
Dpolyval-ce-glue.c45 const u8 *in, size_t nblocks, u8 *accumulator);
49 const u8 *in, size_t nblocks, u8 *accumulator) in internal_polyval_update() argument
53 pmull_polyval_update(keys, in, nblocks, accumulator); in internal_polyval_update()
57 nblocks, accumulator); in internal_polyval_update()
107 unsigned int nblocks; in polyval_arm64_update() local
127 nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; in polyval_arm64_update()
128 internal_polyval_update(tctx, src, nblocks, dctx->buffer); in polyval_arm64_update()
129 srclen -= nblocks * POLYVAL_BLOCK_SIZE; in polyval_arm64_update()
130 src += nblocks * POLYVAL_BLOCK_SIZE; in polyval_arm64_update()
/linux-6.1.9/arch/arm/crypto/
Dblake2b-neon-glue.c19 const u8 *block, size_t nblocks, u32 inc);
22 const u8 *block, size_t nblocks, u32 inc) in blake2b_compress_arch() argument
25 blake2b_compress_generic(state, block, nblocks, inc); in blake2b_compress_arch()
30 const size_t blocks = min_t(size_t, nblocks, in blake2b_compress_arch()
37 nblocks -= blocks; in blake2b_compress_arch()
39 } while (nblocks); in blake2b_compress_arch()
/linux-6.1.9/include/crypto/internal/
Dblake2b.h15 const u8 *block, size_t nblocks, u32 inc);
23 const u8 *block, size_t nblocks, u32 inc);
41 const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE); in __blake2b_update() local
43 (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE); in __blake2b_update()
44 in += BLAKE2B_BLOCK_SIZE * (nblocks - 1); in __blake2b_update()
45 inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1); in __blake2b_update()
Dblake2s.h14 size_t nblocks, const u32 inc);
17 size_t nblocks, const u32 inc);
/linux-6.1.9/lib/crypto/
Dblake2s-generic.c41 size_t nblocks, const u32 inc)
45 size_t nblocks, const u32 inc) in blake2s_compress_generic() argument
52 (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE)); in blake2s_compress_generic()
54 while (nblocks > 0) { in blake2s_compress_generic()
107 --nblocks; in blake2s_compress_generic()
Dblake2s.c38 const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); in blake2s_update() local
39 blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); in blake2s_update()
40 in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); in blake2s_update()
41 inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); in blake2s_update()
/linux-6.1.9/fs/reiserfs/
Dxattr.h75 size_t nblocks = JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); in reiserfs_xattr_jcreate_nblocks() local
78 nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); in reiserfs_xattr_jcreate_nblocks()
80 nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); in reiserfs_xattr_jcreate_nblocks()
83 return nblocks; in reiserfs_xattr_jcreate_nblocks()
Dxattr_acl.c369 int nblocks = 0; in reiserfs_cache_default_acl() local
383 nblocks = reiserfs_xattr_jcreate_nblocks(inode); in reiserfs_cache_default_acl()
384 nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); in reiserfs_cache_default_acl()
389 nblocks += reiserfs_xattr_nblocks(inode, size) * 4; in reiserfs_cache_default_acl()
393 return nblocks; in reiserfs_cache_default_acl()
/linux-6.1.9/fs/ext4/
Dext4_jbd2.h305 #define ext4_journal_start_sb(sb, type, nblocks) \ argument
306 __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0, \
309 #define ext4_journal_start(inode, type, nblocks) \ argument
310 __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0, \
344 static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke) in ext4_journal_extend() argument
347 return jbd2_journal_extend(handle, nblocks, revoke); in ext4_journal_extend()
351 static inline int ext4_journal_restart(handle_t *handle, int nblocks, in ext4_journal_restart() argument
355 return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS); in ext4_journal_restart()
/linux-6.1.9/fs/nilfs2/
Dsufile.c525 unsigned long nblocks, time64_t modtime) in nilfs_sufile_set_segment_usage() argument
542 su->su_nblocks = cpu_to_le32(nblocks); in nilfs_sufile_set_segment_usage()
1030 sector_t start = 0, nblocks = 0; in nilfs_sufile_trim_fs() local
1086 if (!nblocks) { in nilfs_sufile_trim_fs()
1089 nblocks = seg_end - seg_start + 1; in nilfs_sufile_trim_fs()
1093 if (start + nblocks == seg_start) { in nilfs_sufile_trim_fs()
1095 nblocks += seg_end - seg_start + 1; in nilfs_sufile_trim_fs()
1101 nblocks -= start_block - start; in nilfs_sufile_trim_fs()
1105 if (nblocks >= minlen) { in nilfs_sufile_trim_fs()
1110 nblocks * sects_per_block, in nilfs_sufile_trim_fs()
[all …]
Dsegbuf.h34 unsigned long nblocks; member
131 return segbuf->sb_sum.nblocks == segbuf->sb_sum.nsumblk; in nilfs_segbuf_empty()
139 segbuf->sb_sum.nblocks++; in nilfs_segbuf_add_segsum_buffer()
148 segbuf->sb_sum.nblocks++; in nilfs_segbuf_add_payload_buffer()
Dthe_nilfs.c713 sector_t start = 0, nblocks = 0; in nilfs_discard_segments() local
723 if (!nblocks) { in nilfs_discard_segments()
725 nblocks = seg_end - seg_start + 1; in nilfs_discard_segments()
726 } else if (start + nblocks == seg_start) { in nilfs_discard_segments()
727 nblocks += seg_end - seg_start + 1; in nilfs_discard_segments()
731 nblocks * sects_per_block, in nilfs_discard_segments()
735 nblocks = 0; in nilfs_discard_segments()
738 if (nblocks) in nilfs_discard_segments()
741 nblocks * sects_per_block, in nilfs_discard_segments()
746 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) in nilfs_count_free_blocks() argument
[all …]
/linux-6.1.9/block/partitions/
Dsysv68.c44 __be32 nblocks; /* slice size (in blocks) */ member
83 if (be32_to_cpu(slice->nblocks)) { in sysv68_partition()
86 be32_to_cpu(slice->nblocks)); in sysv68_partition()
/linux-6.1.9/fs/
Dmpage.c151 unsigned nblocks; in do_mpage_readpage() local
176 nblocks = map_bh->b_size >> blkbits; in do_mpage_readpage()
179 block_in_file < (args->first_logical_block + nblocks)) { in do_mpage_readpage()
181 unsigned last = nblocks - map_offset; in do_mpage_readpage()
239 nblocks = map_bh->b_size >> blkbits; in do_mpage_readpage()
241 if (relative_block == nblocks) { in do_mpage_readpage()
291 nblocks = map_bh->b_size >> blkbits; in do_mpage_readpage()
292 if ((buffer_boundary(map_bh) && relative_block == nblocks) || in do_mpage_readpage()
/linux-6.1.9/fs/jbd2/
Dtransaction.c474 static handle_t *new_handle(int nblocks) in new_handle() argument
479 handle->h_total_credits = nblocks; in new_handle()
485 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks, in jbd2__journal_start() argument
501 nblocks += DIV_ROUND_UP(revoke_records, in jbd2__journal_start()
503 handle = new_handle(nblocks); in jbd2__journal_start()
531 line_no, nblocks); in jbd2__journal_start()
557 handle_t *jbd2_journal_start(journal_t *journal, int nblocks) in jbd2_journal_start() argument
559 return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0); in jbd2_journal_start()
660 int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records) in jbd2_journal_extend() argument
678 "transaction not running\n", handle, nblocks); in jbd2_journal_extend()
[all …]

1234