/linux-5.19.10/tools/testing/selftests/memory-hotplug/ |
D | mem-on-off-test.sh | 25 if ! ls $SYSFS/devices/system/memory/memory* > /dev/null 2>&1; then 26 echo $msg memory hotplug is not supported >&2 30 if ! grep -q 1 $SYSFS/devices/system/memory/memory*/removable; then 31 echo $msg no hot-pluggable memory >&2 43 for memory in $SYSFS/devices/system/memory/memory*; do 44 if grep -q 1 $memory/removable && 45 grep -q $state $memory/state; then 46 echo ${memory##/*/memory} 63 grep -q online $SYSFS/devices/system/memory/memory$1/state 68 grep -q offline $SYSFS/devices/system/memory/memory$1/state [all …]
|
/linux-5.19.10/Documentation/devicetree/bindings/memory-controllers/fsl/ |
D | fsl,ddr.yaml | 4 $id: http://devicetree.org/schemas/memory-controllers/fsl/fsl,ddr.yaml# 7 title: Freescale DDR memory controller 15 pattern: "^memory-controller@[0-9a-f]+$" 21 - fsl,qoriq-memory-controller-v4.4 22 - fsl,qoriq-memory-controller-v4.5 23 - fsl,qoriq-memory-controller-v4.7 24 - fsl,qoriq-memory-controller-v5.0 25 - const: fsl,qoriq-memory-controller 27 - fsl,bsc9132-memory-controller 28 - fsl,mpc8536-memory-controller [all …]
|
/linux-5.19.10/drivers/gpu/drm/nouveau/nvkm/core/ |
D | memory.c | 30 nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device, in nvkm_memory_tags_put() argument 39 kfree(memory->tags); in nvkm_memory_tags_put() 40 memory->tags = NULL; in nvkm_memory_tags_put() 48 nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, in nvkm_memory_tags_get() argument 56 if ((tags = memory->tags)) { in nvkm_memory_tags_get() 94 *ptags = memory->tags = tags; in nvkm_memory_tags_get() 101 struct nvkm_memory *memory) in nvkm_memory_ctor() argument 103 memory->func = func; in nvkm_memory_ctor() 104 kref_init(&memory->kref); in nvkm_memory_ctor() 110 struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref); in nvkm_memory_del() local [all …]
|
/linux-5.19.10/Documentation/admin-guide/mm/ |
D | memory-hotplug.rst | 7 This document describes generic Linux support for memory hot(un)plug with 16 memory available to a machine at runtime. In the simplest case, it consists of 22 - The physical memory available to a machine can be adjusted at runtime, up- or 23 downgrading the memory capacity. This dynamic memory resizing, sometimes 28 example is replacing failing memory modules. 30 - Reducing energy consumption either by physically unplugging memory modules or 31 by logically unplugging (parts of) memory modules from Linux. 33 Further, the basic memory hot(un)plug infrastructure in Linux is nowadays also 34 used to expose persistent memory, other performance-differentiated memory and 35 reserved memory regions as ordinary system RAM to Linux. [all …]
|
D | numaperf.rst | 7 Some platforms may have multiple types of memory attached to a compute 8 node. These disparate memory ranges may share some characteristics, such 12 A system supports such heterogeneous memory by grouping each memory type 14 characteristics. Some memory may share the same node as a CPU, and others 15 are provided as memory only nodes. While memory only nodes do not provide 18 nodes with local memory and a memory only node for each of compute node:: 29 A "memory initiator" is a node containing one or more devices such as 30 CPUs or separate memory I/O devices that can initiate memory requests. 31 A "memory target" is a node containing one or more physical address 32 ranges accessible from one or more memory initiators. [all …]
|
D | concepts.rst | 7 The memory management in Linux is a complex system that evolved over the 9 systems from MMU-less microcontrollers to supercomputers. The memory 21 The physical memory in a computer system is a limited resource and 22 even for systems that support memory hotplug there is a hard limit on 23 the amount of memory that can be installed. The physical memory is not 29 All this makes dealing directly with physical memory quite complex and 30 to avoid this complexity a concept of virtual memory was developed. 32 The virtual memory abstracts the details of physical memory from the 34 physical memory (demand paging) and provides a mechanism for the 37 With virtual memory, each and every memory access uses a virtual [all …]
|
/linux-5.19.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | mem.c | 22 #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory) 31 struct nvkm_memory memory; member 43 nvkm_mem_target(struct nvkm_memory *memory) in nvkm_mem_target() argument 45 return nvkm_mem(memory)->target; in nvkm_mem_target() 49 nvkm_mem_page(struct nvkm_memory *memory) in nvkm_mem_page() argument 55 nvkm_mem_addr(struct nvkm_memory *memory) in nvkm_mem_addr() argument 57 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_addr() 64 nvkm_mem_size(struct nvkm_memory *memory) in nvkm_mem_size() argument 66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size() 70 nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_mem_map_dma() argument [all …]
|
/linux-5.19.10/Documentation/ABI/testing/ |
D | sysfs-devices-memory | 1 What: /sys/devices/system/memory 5 The /sys/devices/system/memory contains a snapshot of the 6 internal state of the kernel memory blocks. Files could be 9 Users: hotplug memory add/remove tools 12 What: /sys/devices/system/memory/memoryX/removable 16 The file /sys/devices/system/memory/memoryX/removable is a 17 legacy interface used to indicated whether a memory block is 19 "1" if and only if the kernel supports memory offlining. 20 Users: hotplug memory remove tools 24 What: /sys/devices/system/memory/memoryX/phys_device [all …]
|
/linux-5.19.10/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/ |
D | nv50.c | 43 #define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory) 56 nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) in nv50_instobj_wr32_slow() argument 58 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_wr32_slow() 75 nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) in nv50_instobj_rd32_slow() argument 77 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_rd32_slow() 102 nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) in nv50_instobj_wr32() argument 104 iowrite32_native(data, nv50_instobj(memory)->map + offset); in nv50_instobj_wr32() 108 nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset) in nv50_instobj_rd32() argument 110 return ioread32_native(nv50_instobj(memory)->map + offset); in nv50_instobj_rd32() 124 struct nvkm_memory *memory = &iobj->base.memory; in nv50_instobj_kmap() local [all …]
|
D | base.c | 34 struct nvkm_memory *memory = &iobj->memory; in nvkm_instobj_load() local 35 const u64 size = nvkm_memory_size(memory); in nvkm_instobj_load() 39 if (!(map = nvkm_kmap(memory))) { in nvkm_instobj_load() 41 nvkm_wo32(memory, i, iobj->suspend[i / 4]); in nvkm_instobj_load() 45 nvkm_done(memory); in nvkm_instobj_load() 54 struct nvkm_memory *memory = &iobj->memory; in nvkm_instobj_save() local 55 const u64 size = nvkm_memory_size(memory); in nvkm_instobj_save() 63 if (!(map = nvkm_kmap(memory))) { in nvkm_instobj_save() 65 iobj->suspend[i / 4] = nvkm_ro32(memory, i); in nvkm_instobj_save() 69 nvkm_done(memory); in nvkm_instobj_save() [all …]
|
D | gk20a.c | 52 struct nvkm_memory memory; member 59 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) 116 gk20a_instobj_target(struct nvkm_memory *memory) in gk20a_instobj_target() argument 122 gk20a_instobj_page(struct nvkm_memory *memory) in gk20a_instobj_page() argument 128 gk20a_instobj_addr(struct nvkm_memory *memory) in gk20a_instobj_addr() argument 130 return (u64)gk20a_instobj(memory)->mn->offset << 12; in gk20a_instobj_addr() 134 gk20a_instobj_size(struct nvkm_memory *memory) in gk20a_instobj_size() argument 136 return (u64)gk20a_instobj(memory)->mn->length << 12; in gk20a_instobj_size() 151 imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); in gk20a_instobj_iommu_recycle_vaddr() 174 gk20a_instobj_acquire_dma(struct nvkm_memory *memory) in gk20a_instobj_acquire_dma() argument [all …]
|
D | nv04.c | 37 #define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory) 46 nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) in nv04_instobj_wr32() argument 48 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_wr32() 54 nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset) in nv04_instobj_rd32() argument 56 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_rd32() 68 nv04_instobj_release(struct nvkm_memory *memory) in nv04_instobj_release() argument 73 nv04_instobj_acquire(struct nvkm_memory *memory) in nv04_instobj_acquire() argument 75 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_acquire() 81 nv04_instobj_size(struct nvkm_memory *memory) in nv04_instobj_size() argument 83 return nv04_instobj(memory)->node->length; in nv04_instobj_size() [all …]
|
/linux-5.19.10/Documentation/admin-guide/cgroup-v1/ |
D | memory.rst | 13 memory controller in this document. Do not confuse memory controller 14 used here with the memory controller that is used in hardware. 17 When we mention a cgroup (cgroupfs's directory) with memory controller, 18 we call it "memory cgroup". When you see git-log and source code, you'll 22 Benefits and Purpose of the memory controller 25 The memory controller isolates the memory behaviour of a group of tasks 27 uses of the memory controller. The memory controller can be used to 31 amount of memory. 32 b. Create a cgroup with a limited amount of memory; this can be used 34 c. Virtualization solutions can control the amount of memory they want [all …]
|
/linux-5.19.10/Documentation/core-api/ |
D | memory-hotplug.rst | 12 There are six types of notification defined in ``include/linux/memory.h``: 15 Generated before new memory becomes available in order to be able to 16 prepare subsystems to handle memory. The page allocator is still unable 17 to allocate from the new memory. 23 Generated when memory has successfully brought online. The callback may 24 allocate pages from the new memory. 27 Generated to begin the process of offlining memory. Allocations are no 28 longer possible from the memory but some of the memory to be offlined 29 is still in use. The callback can be used to free memory known to a 30 subsystem from the indicated memory block. [all …]
|
/linux-5.19.10/arch/arm64/boot/dts/ti/ |
D | k3-j721e-som-p0.dtsi | 11 memory@80000000 { 12 device_type = "memory"; 18 reserved_memory: reserved-memory { 29 mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 { 35 mcu_r5fss0_core0_memory_region: r5f-memory@a0100000 { 41 mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 { 47 mcu_r5fss0_core1_memory_region: r5f-memory@a1100000 { 53 main_r5fss0_core0_dma_memory_region: r5f-dma-memory@a2000000 { 59 main_r5fss0_core0_memory_region: r5f-memory@a2100000 { 65 main_r5fss0_core1_dma_memory_region: r5f-dma-memory@a3000000 { [all …]
|
/linux-5.19.10/Documentation/vm/ |
D | memory-model.rst | 9 Physical memory in a system may be addressed in different ways. The 10 simplest case is when the physical memory starts at address 0 and 15 different memory banks are attached to different CPUs. 17 Linux abstracts this diversity using one of the two memory models: 19 memory models it supports, what the default memory model is and 22 All the memory models track the status of physical page frames using 25 Regardless of the selected memory model, there exists one-to-one 29 Each memory model defines :c:func:`pfn_to_page` and :c:func:`page_to_pfn` 36 The simplest memory model is FLATMEM. This model is suitable for 38 memory. [all …]
|
D | hmm.rst | 7 Provide infrastructure and helpers to integrate non-conventional memory (device 8 memory like GPU on board memory) into regular kernel path, with the cornerstone 9 of this being specialized struct page for such memory (see sections 5 to 7 of 20 related to using device specific memory allocators. In the second section, I 24 fifth section deals with how device memory is represented inside the kernel. 30 Problems of using a device specific memory allocator 33 Devices with a large amount of on board memory (several gigabytes) like GPUs 34 have historically managed their memory through dedicated driver specific APIs. 35 This creates a disconnect between memory allocated and managed by a device 36 driver and regular application memory (private anonymous, shared memory, or [all …]
|
D | numa.rst | 14 or more CPUs, local memory, and/or IO buses. For brevity and to 28 Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible 32 Memory access time and effective memory bandwidth varies depending on how far 33 away the cell containing the CPU or IO bus making the memory access is from the 34 cell containing the target memory. For example, access to memory by CPUs 36 bandwidths than accesses to memory on other, remote cells. NUMA platforms 41 memory bandwidth. However, to achieve scalable memory bandwidth, system and 42 application software must arrange for a large majority of the memory references 43 [cache misses] to be to "local" memory--memory on the same cell, if any--or 44 to the closest cell with memory. [all …]
|
/linux-5.19.10/drivers/staging/octeon/ |
D | ethernet-mem.c | 49 char *memory; in cvm_oct_free_hw_skbuff() local 52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() 53 if (memory) { in cvm_oct_free_hw_skbuff() 55 *(struct sk_buff **)(memory - sizeof(void *)); in cvm_oct_free_hw_skbuff() 59 } while (memory); in cvm_oct_free_hw_skbuff() 79 char *memory; in cvm_oct_fill_hw_memory() local 94 memory = kmalloc(size + 256, GFP_ATOMIC); in cvm_oct_fill_hw_memory() 95 if (unlikely(!memory)) { in cvm_oct_fill_hw_memory() 100 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL); in cvm_oct_fill_hw_memory() 101 *((char **)fpa - 1) = memory; in cvm_oct_fill_hw_memory() [all …]
|
/linux-5.19.10/Documentation/userspace-api/media/v4l/ |
D | dev-mem2mem.rst | 9 A V4L2 memory-to-memory device can compress, decompress, transform, or 10 otherwise convert video data from one format into another format, in memory. 11 Such memory-to-memory devices set the ``V4L2_CAP_VIDEO_M2M`` or 12 ``V4L2_CAP_VIDEO_M2M_MPLANE`` capability. Examples of memory-to-memory 16 A memory-to-memory video node acts just like a normal video node, but it 17 supports both output (sending frames from memory to the hardware) 19 memory) stream I/O. An application will have to setup the stream I/O for 23 Memory-to-memory devices function as a shared resource: you can 32 One of the most common memory-to-memory device is the codec. Codecs 35 See :ref:`codec-controls`. More details on how to use codec memory-to-memory
|
/linux-5.19.10/drivers/dax/ |
D | Kconfig | 3 tristate "DAX: direct access to differentiated memory" 14 latency...) memory via an mmap(2) capable character 16 platform memory resource that is differentiated from the 17 baseline memory pool. Mappings of a /dev/daxX.Y device impose 21 tristate "PMEM DAX: direct access to persistent memory" 25 Support raw access to persistent memory. Note that this 26 driver consumes memory ranges allocated and exported by the 32 tristate "HMEM DAX: direct access to 'specific purpose' memory" 38 memory. For example, a high bandwidth memory pool. The 40 memory from typical usage by default. This driver creates [all …]
|
/linux-5.19.10/Documentation/translations/zh_CN/vm/ |
D | frontswap.rst | 13 Frontswap为交换页提供了一个 “transcendent memory” 的接口。在一些环境中,由 17 .. _Transcendent memory in a nutshell: https://lwn.net/Articles/454795/ 20 储器被认为是一个同步并发安全的面向页面的“伪RAM设备”,符合transcendent memory 27 交换页。一个 “store” 将把该页复制到transcendent memory,并与该页的类型和偏移 28 量相关联。一个 “load” 将把该页,如果找到的话,从transcendent memory复制到内核 29 内存,但不会从transcendent memory中删除该页。一个 “invalidate_page” 将从 30 transcendent memory中删除该页,一个 “invalidate_area” 将删除所有与交换类型 35 经成功的保存到了transcendent memory中,并且避免了磁盘写入,如果后来再读回数据, 36 也避免了磁盘读取。如果存储返回失败,transcendent memory已经拒绝了该数据,且该页 39 请注意,如果一个页面被存储,而该页面已经存在于transcendent memory中(一个 “重复” [all …]
|
/linux-5.19.10/Documentation/powerpc/ |
D | firmware-assisted-dump.rst | 14 - Fadump uses the same firmware interfaces and memory reservation model 16 - Unlike phyp dump, FADump exports the memory dump through /proc/vmcore 21 - Unlike phyp dump, FADump allows user to release all the memory reserved 35 - Once the dump is copied out, the memory that held the dump 44 - The first kernel registers the sections of memory with the 46 These registered sections of memory are reserved by the first 50 low memory regions (boot memory) from source to destination area. 54 The term 'boot memory' means size of the low memory chunk 56 booted with restricted memory. By default, the boot memory 58 Alternatively, user can also specify boot memory size [all …]
|
/linux-5.19.10/Documentation/devicetree/bindings/memory-controllers/ |
D | nvidia,tegra210-emc.yaml | 4 $id: http://devicetree.org/schemas/memory-controllers/nvidia,tegra210-emc.yaml# 15 sent from the memory controller. 26 - description: external memory clock 36 memory-region: 39 phandle to a reserved memory region describing the table of EMC 42 nvidia,memory-controller: 45 phandle of the memory controller node 52 - nvidia,memory-controller 61 reserved-memory { 72 external-memory-controller@7001b000 { [all …]
|
/linux-5.19.10/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
D | ram.c | 24 #define nvkm_vram(p) container_of((p), struct nvkm_vram, memory) 31 struct nvkm_memory memory; member 38 nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_vram_map() argument 41 struct nvkm_vram *vram = nvkm_vram(memory); in nvkm_vram_map() 43 .memory = &vram->memory, in nvkm_vram_map() 52 nvkm_vram_size(struct nvkm_memory *memory) in nvkm_vram_size() argument 54 return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT; in nvkm_vram_size() 58 nvkm_vram_addr(struct nvkm_memory *memory) in nvkm_vram_addr() argument 60 struct nvkm_vram *vram = nvkm_vram(memory); in nvkm_vram_addr() 67 nvkm_vram_page(struct nvkm_memory *memory) in nvkm_vram_page() argument [all …]
|