/linux-6.6.21/drivers/gpu/drm/panfrost/ |
D | panfrost_gem.c | 36 WARN_ON_ONCE(!list_empty(&bo->mappings.list)); in panfrost_gem_free_object() 61 mutex_lock(&bo->mappings.lock); in panfrost_gem_mapping_get() 62 list_for_each_entry(iter, &bo->mappings.list, node) { in panfrost_gem_mapping_get() 69 mutex_unlock(&bo->mappings.lock); in panfrost_gem_mapping_get() 110 list_for_each_entry(mapping, &bo->mappings.list, node) in panfrost_gem_teardown_mappings_locked() 158 mutex_lock(&bo->mappings.lock); in panfrost_gem_open() 160 list_add_tail(&mapping->node, &bo->mappings.list); in panfrost_gem_open() 161 mutex_unlock(&bo->mappings.lock); in panfrost_gem_open() 175 mutex_lock(&bo->mappings.lock); in panfrost_gem_close() 176 list_for_each_entry(iter, &bo->mappings.list, node) { in panfrost_gem_close() [all …]
|
D | panfrost_drv.c | 157 job->mappings = kvmalloc_array(job->bo_count, in panfrost_lookup_bos() 160 if (!job->mappings) in panfrost_lookup_bos() 174 job->mappings[i] = mapping; in panfrost_lookup_bos() 416 mutex_lock(&bo->mappings.lock); in panfrost_ioctl_madvise() 420 first = list_first_entry(&bo->mappings.list, in panfrost_ioctl_madvise() 432 if (!list_is_singular(&bo->mappings.list) || in panfrost_ioctl_madvise() 450 mutex_unlock(&bo->mappings.lock); in panfrost_ioctl_madvise()
|
D | panfrost_gem_shrinker.c | 48 if (!mutex_trylock(&bo->mappings.lock)) in panfrost_gem_purge() 61 mutex_unlock(&bo->mappings.lock); in panfrost_gem_purge()
|
/linux-6.6.21/Documentation/mm/ |
D | highmem.rst | 15 at all times. This means the kernel needs to start using temporary mappings of 51 The kernel contains several ways of creating temporary mappings. The following 55 short term mappings. They can be invoked from any context (including 56 interrupts) but the mappings can only be used in the context which acquired 64 These mappings are thread-local and CPU-local, meaning that the mapping 89 mappings, the local mappings are only valid in the context of the caller 94 Most code can be designed to use thread local mappings. User should 99 Nesting kmap_local_page() and kmap_atomic() mappings is allowed to a certain 103 mappings. 110 effects of atomic mappings, i.e. disabling page faults or preemption, or both. [all …]
|
D | hugetlbfs_reserv.rst | 87 of mappings. Location differences are: 89 - For private mappings, the reservation map hangs off the VMA structure. 92 - For shared mappings, the reservation map hangs off the inode. Specifically, 93 inode->i_mapping->private_data. Since shared mappings are always backed 121 One of the big differences between PRIVATE and SHARED mappings is the way 124 - For shared mappings, an entry in the reservation map indicates a reservation 127 - For private mappings, the lack of an entry in the reservation map indicates 133 For private mappings, hugetlb_reserve_pages() creates the reservation map and 138 are needed for the current mapping/segment. For private mappings, this is 139 always the value (to - from). However, for shared mappings it is possible that [all …]
|
/linux-6.6.21/Documentation/arch/arm/ |
D | memory.rst | 62 Machine specific static mappings are also 72 PKMAP_BASE PAGE_OFFSET-1 Permanent kernel mappings 78 placed here using dynamic mappings. 85 00001000 TASK_SIZE-1 User space mappings 86 Per-thread mappings are placed here via 96 Please note that mappings which collide with the above areas may result 103 must set up their own mappings using open() and mmap().
|
/linux-6.6.21/drivers/gpu/drm/tegra/ |
D | submit.c | 150 xa_lock(&context->mappings); in tegra_drm_mapping_get() 152 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get() 156 xa_unlock(&context->mappings); in tegra_drm_mapping_get() 261 struct tegra_drm_used_mapping *mappings; in submit_process_bufs() local 273 mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL); in submit_process_bufs() 274 if (!mappings) { in submit_process_bufs() 303 mappings[i].mapping = mapping; in submit_process_bufs() 304 mappings[i].flags = buf->flags; in submit_process_bufs() 307 job_data->used_mappings = mappings; in submit_process_bufs() 316 tegra_drm_mapping_put(mappings[i].mapping); in submit_process_bufs() [all …]
|
D | uapi.c | 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 42 xa_destroy(&context->mappings); in tegra_drm_channel_context_close() 141 xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1); in tegra_drm_ioctl_channel_open() 252 err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX), in tegra_drm_ioctl_channel_map() 287 mapping = xa_erase(&context->mappings, args->mapping); in tegra_drm_ioctl_channel_unmap()
|
/linux-6.6.21/Documentation/arch/ia64/ |
D | aliasing.rst | 67 Linux/ia64 identity mappings are done with large pages, currently 68 either 16MB or 64MB, referred to as "granules." Cacheable mappings 78 Uncacheable mappings are not speculative, so the processor will 80 software. This allows UC identity mappings to cover granules that 87 User mappings are typically done with 16K or 64K pages. The smaller 94 There are several ways the kernel creates new mappings: 99 This uses remap_pfn_range(), which creates user mappings. These 100 mappings may be either WB or UC. If the region being mapped 167 region safely with kernel page table mappings, we can use 182 succeed. It may create either WB or UC user mappings, depending [all …]
|
/linux-6.6.21/Documentation/driver-api/ |
D | io-mapping.rst | 44 used with mappings created by io_mapping_create_wc() 46 Temporary mappings are only valid in the context of the caller. The mapping 56 Nested mappings need to be undone in reverse order because the mapping 65 The mappings are released with:: 83 The mappings are released with::
|
/linux-6.6.21/drivers/soc/aspeed/ |
D | Kconfig | 13 Control LPC firmware cycle mappings through ioctl()s. The driver 43 Control ASPEED P2A VGA MMIO to BMC mappings through ioctl()s. The 44 driver also provides an interface for userspace mappings to a
|
/linux-6.6.21/drivers/regulator/ |
D | rpi-panel-attiny-regulator.c | 58 static const struct gpio_signal_mappings mappings[NUM_GPIO] = { variable 224 last_val = attiny_get_port_state(state, mappings[off].reg); in attiny_gpio_set() 226 last_val |= mappings[off].mask; in attiny_gpio_set() 228 last_val &= ~mappings[off].mask; in attiny_gpio_set() 230 attiny_set_port_state(state, mappings[off].reg, last_val); in attiny_gpio_set()
|
/linux-6.6.21/Documentation/admin-guide/mm/ |
D | nommu-mmap.rst | 29 These behave very much like private mappings, except that they're 133 In the no-MMU case, however, anonymous mappings are backed by physical 147 (#) A list of all the private copy and anonymous mappings on the system is 150 (#) A list of all the mappings in use by a process is visible through 176 mappings made by a process or if the mapping in which the address lies does not 191 Shared mappings may not be moved. Shareable mappings may not be moved either, 196 mappings, move parts of existing mappings or resize parts of mappings. It must 243 mappings may still be mapped directly off the device under some 250 Provision of shared mappings on memory backed files is similar to the provision 253 of pages and permit mappings to be made on that. [all …]
|
/linux-6.6.21/Documentation/devicetree/bindings/ |
D | .yamllint | 36 forbid-in-block-mappings: true 37 forbid-in-flow-mappings: true
|
/linux-6.6.21/Documentation/devicetree/bindings/iommu/ |
D | xen,grant-dma.yaml | 13 The Xen IOMMU represents the Xen grant table interface. Grant mappings 16 The binding is required to restrict memory access using Xen grant mappings.
|
/linux-6.6.21/drivers/cxl/ |
D | pmem.c | 308 struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE]; in cxl_pmem_region_probe() local 320 memset(&mappings, 0, sizeof(mappings)); in cxl_pmem_region_probe() 379 mappings[i] = (struct nd_mapping_desc) { in cxl_pmem_region_probe() 389 ndr_desc.mapping = mappings; in cxl_pmem_region_probe()
|
/linux-6.6.21/Documentation/arch/arm64/ |
D | hugetlbpage.rst | 15 1) Block mappings at the pud/pmd level 20 mappings reduce the depth of page table walk needed to translate hugepage
|
D | memory.rst | 24 mappings while the user pgd contains only user (non-global) mappings. 38 fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down) 55 fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down) 101 mappings are created, since the host kernel runs directly in EL2.
|
/linux-6.6.21/Documentation/ABI/testing/ |
D | sysfs-firmware-efi-runtime-map | 10 the same physical to virtual address mappings as the first 11 kernel. The mappings are exported to sysfs so userspace tools
|
/linux-6.6.21/include/linux/ |
D | host1x.h | 42 struct list_head mappings; member 48 INIT_LIST_HEAD(&cache->mappings); in host1x_bo_cache_init() 153 struct list_head mappings; member 160 INIT_LIST_HEAD(&bo->mappings); in host1x_bo_init()
|
/linux-6.6.21/mm/ |
D | Kconfig.debug | 99 bool "Check for invalid mappings in user page tables" 163 bool "Warn on W+X mappings at boot" 168 Generate a warning if any W+X mappings are found at boot. 171 mappings after applying NX, as such mappings are a security risk. 175 <arch>/mm: Checked W+X mappings: passed, no W+X pages found. 179 <arch>/mm: Checked W+X mappings: failed, <N> W+X pages found. 182 still fine, as W+X mappings are not a security hole in
|
/linux-6.6.21/tools/testing/nvdimm/test/ |
D | ndtest.c | 408 struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING]; in ndtest_create_region() local 416 memset(&mappings, 0, sizeof(mappings)); in ndtest_create_region() 424 ndr_desc->mapping = mappings; in ndtest_create_region() 445 mappings[i].start = region->mapping[i].start; in ndtest_create_region() 446 mappings[i].size = region->mapping[i].size; in ndtest_create_region() 447 mappings[i].position = region->mapping[i].position; in ndtest_create_region() 448 mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm; in ndtest_create_region()
|
/linux-6.6.21/Documentation/driver-api/usb/ |
D | dma.rst | 19 manage dma mappings for existing dma-ready buffers (see below). 27 don't manage dma mappings for URBs. 41 IOMMU to manage the DMA mappings. It can cost MUCH more to set up and 42 tear down the IOMMU mappings with each request than perform the I/O! 64 "streaming" DMA mappings.)
|
/linux-6.6.21/Documentation/gpu/rfc/ |
D | i915_vm_bind.rst | 9 specified address space (VM). These mappings (also referred to as persistent 10 mappings) will be persistent across multiple GPU submissions (execbuf calls) 12 mappings during each submission (as required by older execbuf mode). 27 * Multiple Virtual Address (VA) mappings can map to the same physical pages 30 * Support capture of persistent mappings in the dump upon GPU error. 90 path (where required mappings are already bound) submission latency is O(1) 201 execbuf. VM_BIND allows bind/unbind of mappings required for the directly 231 mapped objects. Page table pages are similar to persistent mappings of a
|
/linux-6.6.21/Documentation/core-api/ |
D | dma-api-howto.rst | 35 mappings between physical and bus addresses. 172 The setup for streaming mappings is performed via a call to 214 coherent allocations, but supports full 64-bits for streaming mappings 237 kernel will use this information later when you make DMA mappings. 276 Types of DMA mappings 279 There are two types of DMA mappings: 281 - Consistent DMA mappings which are usually mapped at driver 294 Good examples of what to use consistent mappings for are: 303 versa. Consistent mappings guarantee this. 325 - Streaming DMA mappings which are usually mapped for one DMA [all …]
|