/linux-6.1.9/Documentation/filesystems/ext4/ |
D | blockmap.rst | 6 | 0 to 11 | Direct map to file blocks 0 to 11. … 8 … | Indirect block: (file blocks 12 to (``$block_size`` / 4) + 11, or 12 to 1035 i… 13 … (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | … 16 …t block: (file blocks ``$block_size``/4 + 12 to (``$block_size`` / 4) ^ 2 + (``$block_size`` / 4) … 21 …$block_size`` / 4) | Map to (``$block_size`` / 4) indirect blocks (1024 if 4KiB blocks) … 26 … (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | … 30 …blocks (``$block_size`` / 4) ^ 2 + (``$block_size`` / 4) + 12 to (``$block_size`` / 4) ^ 3 + (``$b… 35 …size`` / 4) | Map to (``$block_size`` / 4) double indirect blocks (1024 if 4KiB blocks) … 40 …$block_size`` / 4) | Map to (``$block_size`` / 4) indirect blocks (1024 if 4KiB blocks) … 45 … (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | …
|
/linux-6.1.9/crypto/async_tx/ |
D | async_pq.c | 107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, in do_sync_gen_syndrome() argument 117 srcs = (void **) blocks; in do_sync_gen_syndrome() 120 if (blocks[i] == NULL) { in do_sync_gen_syndrome() 124 srcs[i] = page_address(blocks[i]) + offsets[i]; in do_sync_gen_syndrome() 177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, in async_gen_syndrome() argument 182 &P(blocks, disks), 2, in async_gen_syndrome() 183 blocks, src_cnt, len); in async_gen_syndrome() 187 BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks))); in async_gen_syndrome() 211 if (blocks[i] == NULL) in async_gen_syndrome() 213 unmap->addr[j] = dma_map_page(device->dev, blocks[i], in async_gen_syndrome() [all …]
|
D | async_raid6_recov.c | 154 struct page **blocks, unsigned int *offs, in __2data_recov_4() argument 168 p = blocks[disks-2]; in __2data_recov_4() 170 q = blocks[disks-1]; in __2data_recov_4() 173 a = blocks[faila]; in __2data_recov_4() 175 b = blocks[failb]; in __2data_recov_4() 204 struct page **blocks, unsigned int *offs, in __2data_recov_5() argument 222 if (blocks[i] == NULL) in __2data_recov_5() 231 p = blocks[disks-2]; in __2data_recov_5() 233 q = blocks[disks-1]; in __2data_recov_5() 235 g = blocks[good]; in __2data_recov_5() [all …]
|
/linux-6.1.9/crypto/ |
D | aegis128-core.c | 32 union aegis_block blocks[AEGIS128_STATE_BLOCKS]; member 66 tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; in crypto_aegis128_update() 68 crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], in crypto_aegis128_update() 69 &state->blocks[i]); in crypto_aegis128_update() 70 crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); in crypto_aegis128_update() 83 crypto_aegis_block_xor(&state->blocks[0], msg); in crypto_aegis128_update_a() 95 crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); in crypto_aegis128_update_u() 108 state->blocks[0] = key_iv; in crypto_aegis128_init() 109 state->blocks[1] = crypto_aegis_const[1]; in crypto_aegis128_init() 110 state->blocks[2] = crypto_aegis_const[0]; in crypto_aegis128_init() [all …]
|
/linux-6.1.9/arch/x86/crypto/ |
D | ecb_cbc_helpers.h | 31 #define ECB_WALK_ADVANCE(blocks) do { \ argument 32 dst += (blocks) * __bsize; \ 33 src += (blocks) * __bsize; \ 34 nbytes -= (blocks) * __bsize; \ 37 #define ECB_BLOCK(blocks, func) do { \ argument 38 while (nbytes >= (blocks) * __bsize) { \ 40 ECB_WALK_ADVANCE(blocks); \ 55 #define CBC_DEC_BLOCK(blocks, func) do { \ argument 56 while (nbytes >= (blocks) * __bsize) { \ 57 const u8 *__iv = src + ((blocks) - 1) * __bsize; \ [all …]
|
D | blake2s-glue.c | 41 const size_t blocks = min_t(size_t, nblocks, in blake2s_compress() local 47 blake2s_compress_avx512(state, block, blocks, inc); in blake2s_compress() 49 blake2s_compress_ssse3(state, block, blocks, inc); in blake2s_compress() 52 nblocks -= blocks; in blake2s_compress() 53 block += blocks * BLAKE2S_BLOCK_SIZE; in blake2s_compress()
|
/linux-6.1.9/arch/arm64/crypto/ |
D | aes-neonbs-glue.c | 29 int rounds, int blocks); 31 int rounds, int blocks); 34 int rounds, int blocks, u8 iv[]); 37 int rounds, int blocks, u8 iv[]); 40 int rounds, int blocks, u8 iv[]); 42 int rounds, int blocks, u8 iv[]); 46 int rounds, int blocks); 48 int rounds, int blocks, u8 iv[]); 96 int rounds, int blocks)) in __ecb_crypt() argument 106 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __ecb_crypt() local [all …]
|
D | sha512-ce-glue.c | 30 int blocks); 32 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); 35 int blocks) in __sha512_ce_transform() argument 37 while (blocks) { in __sha512_ce_transform() 41 rem = sha512_ce_transform(sst, src, blocks); in __sha512_ce_transform() 43 src += (blocks - rem) * SHA512_BLOCK_SIZE; in __sha512_ce_transform() 44 blocks = rem; in __sha512_ce_transform() 49 int blocks) in __sha512_block_data_order() argument 51 sha512_block_data_order(sst->state, src, blocks); in __sha512_block_data_order()
|
D | sha2-ce-glue.c | 34 int blocks); 37 int blocks) in __sha2_ce_transform() argument 39 while (blocks) { in __sha2_ce_transform() 44 sst), src, blocks); in __sha2_ce_transform() 46 src += (blocks - rem) * SHA256_BLOCK_SIZE; in __sha2_ce_transform() 47 blocks = rem; in __sha2_ce_transform() 56 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks); 59 int blocks) in __sha256_block_data_order() argument 61 sha256_block_data_order(sst->state, src, blocks); in __sha256_block_data_order()
|
D | aes-glue.c | 78 int rounds, int blocks); 80 int rounds, int blocks); 83 int rounds, int blocks, u8 iv[]); 85 int rounds, int blocks, u8 iv[]); 106 int rounds, int blocks, u8 iv[], 109 int rounds, int blocks, u8 iv[], 113 int blocks, u8 dg[], int enc_before, 185 unsigned int blocks; in ecb_encrypt() local 189 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { in ecb_encrypt() 192 ctx->key_enc, rounds, blocks); in ecb_encrypt() [all …]
|
/linux-6.1.9/drivers/mtd/ |
D | rfd_ftl.c | 88 struct block *blocks; member 95 struct block *block = &part->blocks[block_no]; in build_block_map() 188 part->blocks = kcalloc(part->total_blocks, sizeof(struct block), in scan_header() 190 if (!part->blocks) in scan_header() 235 kfree(part->blocks); in scan_header() 277 erase->addr = part->blocks[block].offset; in erase_block() 280 part->blocks[block].state = BLOCK_ERASING; in erase_block() 281 part->blocks[block].free_sectors = 0; in erase_block() 288 part->blocks[block].state = BLOCK_FAILED; in erase_block() 289 part->blocks[block].free_sectors = 0; in erase_block() [all …]
|
/linux-6.1.9/Documentation/userspace-api/media/v4l/ |
D | vidioc-g-edid.rst | 60 ``start_block``, ``blocks`` and ``edid`` fields, zero the ``reserved`` 62 ``start_block`` and of size ``blocks`` will be placed in the memory 64 ``blocks`` * 128 bytes large (the size of one block is 128 bytes). 66 If there are fewer blocks than specified, then the driver will set 67 ``blocks`` to the actual number of blocks. If there are no EDID blocks 70 If blocks have to be retrieved from the sink, then this call will block 73 If ``start_block`` and ``blocks`` are both set to 0 when 74 :ref:`VIDIOC_G_EDID <VIDIOC_G_EDID>` is called, then the driver will set ``blocks`` to the 75 total number of available EDID blocks and it will return 0 without 76 copying any data. This is an easy way to discover how many EDID blocks [all …]
|
/linux-6.1.9/arch/m68k/emu/ |
D | nfblock.c | 40 static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, in nfhd_get_capacity() argument 44 virt_to_phys(blocks), virt_to_phys(blocksize)); in nfhd_get_capacity() 55 u32 blocks, bsize; member 84 geo->cylinders = dev->blocks >> (6 - dev->bshift); in nfhd_getgeo() 97 static int __init nfhd_init_one(int id, u32 blocks, u32 bsize) in nfhd_init_one() argument 104 blocks, bsize); in nfhd_init_one() 116 dev->blocks = blocks; in nfhd_init_one() 130 set_capacity(dev->disk, (sector_t)blocks * (bsize / 512)); in nfhd_init_one() 150 u32 blocks, bsize; in nfhd_init() local 168 if (nfhd_get_capacity(i, 0, &blocks, &bsize)) in nfhd_init() [all …]
|
/linux-6.1.9/Documentation/admin-guide/device-mapper/ |
D | writecache.rst | 27 start writeback when the number of used blocks reach this 30 stop writeback when the number of used blocks drops below 33 limit the number of blocks that are in flight during 37 when the application writes this amount of blocks without 38 issuing the FLUSH request, the blocks are automatically 58 new writes (however, writes to already cached blocks are 63 blocks drops to zero, userspace can unload the 80 2. the number of blocks 81 3. the number of free blocks 82 4. the number of blocks under writeback [all …]
|
D | era.rst | 9 addition it keeps track of which blocks were written within a user 14 Use cases include tracking changed blocks for backup software, and 25 origin dev device holding data blocks that may change 55 <metadata block size> <#used metadata blocks>/<#total metadata blocks> 61 #used metadata blocks Number of metadata blocks used 62 #total metadata blocks Total number of metadata blocks 64 held metadata root The location, in blocks, of the metadata root 89 - Ascertain which blocks have been written since the snapshot was taken 91 - Invalidate those blocks in the caching software 99 that it uses a few 4k blocks for updating metadata::
|
/linux-6.1.9/Documentation/devicetree/bindings/sifive/ |
D | sifive-blocks-ip-versioning.txt | 1 DT compatible string versioning for SiFive open-source IP blocks 4 strings for open-source SiFive IP blocks. HDL for these IP blocks 7 https://github.com/sifive/sifive-blocks 14 https://github.com/sifive/sifive-blocks/blob/v1.0/src/main/scala/devices/uart/UART.scala#L43 16 Until these IP blocks (or IP integration) support version 17 auto-discovery, the maintainers of these IP blocks intend to increment 19 interface to these IP blocks changes, or when the functionality of the 20 underlying IP blocks changes in a way that software should be aware of. 25 upstream sifive-blocks commits. It is expected that most drivers will
|
/linux-6.1.9/drivers/mfd/ |
D | stmpe.c | 35 unsigned int blocks; member 41 static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks) in __stmpe_enable() argument 43 return stmpe->variant->enable(stmpe, blocks, true); in __stmpe_enable() 46 static int __stmpe_disable(struct stmpe *stmpe, unsigned int blocks) in __stmpe_disable() argument 48 return stmpe->variant->enable(stmpe, blocks, false); in __stmpe_disable() 126 int stmpe_enable(struct stmpe *stmpe, unsigned int blocks) in stmpe_enable() argument 131 ret = __stmpe_enable(stmpe, blocks); in stmpe_enable() 143 int stmpe_disable(struct stmpe *stmpe, unsigned int blocks) in stmpe_disable() argument 148 ret = __stmpe_disable(stmpe, blocks); in stmpe_disable() 408 static int stmpe801_enable(struct stmpe *stmpe, unsigned int blocks, in stmpe801_enable() argument [all …]
|
/linux-6.1.9/arch/arm/crypto/ |
D | aes-neonbs-glue.c | 32 int rounds, int blocks); 34 int rounds, int blocks); 37 int rounds, int blocks, u8 iv[]); 40 int rounds, int blocks, u8 ctr[]); 43 int rounds, int blocks, u8 iv[], int); 45 int rounds, int blocks, u8 iv[], int); 90 int rounds, int blocks)) in __ecb_crypt() argument 100 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __ecb_crypt() local 103 blocks = round_down(blocks, in __ecb_crypt() 108 ctx->rounds, blocks); in __ecb_crypt() [all …]
|
/linux-6.1.9/lib/crypto/ |
D | blake2s-selftest.c | 596 u8 blocks[BLAKE2S_BLOCK_SIZE * 2]; in blake2s_selftest() local 599 get_random_bytes(blocks, sizeof(blocks)); in blake2s_selftest() 606 blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE); in blake2s_selftest() 607 blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE); in blake2s_selftest() 616 blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE); in blake2s_selftest() 618 memcpy(unaligned_block + l, blocks, in blake2s_selftest()
|
/linux-6.1.9/fs/ |
D | mpage.c | 144 sector_t blocks[MAX_BUF_PER_PAGE]; in do_mpage_readpage() local 190 blocks[page_block] = map_bh->b_blocknr + map_offset + in do_mpage_readpage() 237 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) in do_mpage_readpage() 246 blocks[page_block] = map_bh->b_blocknr+relative_block; in do_mpage_readpage() 267 if (args->bio && (args->last_block_in_bio != blocks[0] - 1)) in do_mpage_readpage() 273 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), in do_mpage_readpage() 281 args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); in do_mpage_readpage() 296 args->last_block_in_bio = blocks[blocks_per_page - 1]; in do_mpage_readpage() 460 sector_t blocks[MAX_BUF_PER_PAGE]; in __mpage_writepage() local 498 if (bh->b_blocknr != blocks[page_block-1] + 1) in __mpage_writepage() [all …]
|
/linux-6.1.9/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vram_mgr.c | 38 struct list_head blocks; member 262 list_for_each_entry(block, &vres->blocks, link) in amdgpu_vram_mgr_bo_visible_size() 278 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { in amdgpu_vram_mgr_do_reserve() 296 list_move(&rsv->blocks, &mgr->reserved_pages); in amdgpu_vram_mgr_do_reserve() 319 INIT_LIST_HEAD(&rsv->blocks); in amdgpu_vram_mgr_reserve_range() 325 list_add_tail(&rsv->blocks, &mgr->reservations_pending); in amdgpu_vram_mgr_reserve_range() 351 list_for_each_entry(rsv, &mgr->reservations_pending, blocks) { in amdgpu_vram_mgr_query_page_status() 359 list_for_each_entry(rsv, &mgr->reserved_pages, blocks) { in amdgpu_vram_mgr_query_page_status() 433 INIT_LIST_HEAD(&vres->blocks); in amdgpu_vram_mgr_new() 482 &vres->blocks, in amdgpu_vram_mgr_new() [all …]
|
/linux-6.1.9/drivers/gpu/drm/tests/ |
D | drm_buddy_test.c | 141 struct list_head *blocks, u64 expected_size, bool is_contiguous) in check_blocks() argument 152 list_for_each_entry(block, blocks, link) { in check_blocks() 332 LIST_HEAD(blocks); in drm_test_buddy_alloc_pathological() 351 block = list_first_entry_or_null(&blocks, typeof(*block), link); in drm_test_buddy_alloc_pathological() 368 list_move_tail(&block->link, &blocks); in drm_test_buddy_alloc_pathological() 400 list_splice_tail(&holes, &blocks); in drm_test_buddy_alloc_pathological() 401 drm_buddy_free_list(&mm, &blocks); in drm_test_buddy_alloc_pathological() 428 LIST_HEAD(blocks); in drm_test_buddy_alloc_smoke() 463 list_move_tail(&block->link, &blocks); in drm_test_buddy_alloc_smoke() 476 err = check_blocks(test, &mm, &blocks, total, false); in drm_test_buddy_alloc_smoke() [all …]
|
/linux-6.1.9/Documentation/filesystems/ |
D | qnx6.rst | 19 concepts of blocks, inodes and directories. 31 The space in the device or file is split up into blocks. These are a fixed 49 are done by copying all modified blocks during that specific write request 57 If the level value is 0, up to 16 direct blocks can be addressed by each 61 addressing block holds up to blocksize / 4 bytes pointers to data blocks. 63 to 16 * 256 * 256 = 1048576 blocks that can be addressed by such a tree). 66 indirect addressing blocks or inodes. 75 information (total number of filesystem blocks) or by taking the highest 86 The inode structure contains pointers to the filesystem blocks which contain 90 size, number of blocks used, access time, change time and modification time. [all …]
|
/linux-6.1.9/include/crypto/ |
D | sha1_base.h | 19 typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks); 46 int blocks; in sha1_base_do_update() local 58 blocks = len / SHA1_BLOCK_SIZE; in sha1_base_do_update() 61 if (blocks) { in sha1_base_do_update() 62 block_fn(sctx, data, blocks); in sha1_base_do_update() 63 data += blocks * SHA1_BLOCK_SIZE; in sha1_base_do_update()
|
D | sha256_base.h | 20 int blocks); 49 int blocks; in sha256_base_do_update() local 61 blocks = len / SHA256_BLOCK_SIZE; in sha256_base_do_update() 64 if (blocks) { in sha256_base_do_update() 65 block_fn(sctx, data, blocks); in sha256_base_do_update() 66 data += blocks * SHA256_BLOCK_SIZE; in sha256_base_do_update()
|