Home
last modified time | relevance | path

Searched refs:L1 (Results 1 – 25 of 150) sorted by relevance

123456

/linux-6.1.9/Documentation/virt/kvm/x86/
Drunning-nested-guests.rst19 | L1 (Guest Hypervisor) |
33 - L1 – level-1 guest; a VM running on L0; also called the "guest
36 - L2 – level-2 guest; a VM running on L1, this is the "nested guest"
45 metal, running the LPAR hypervisor), L1 (host hypervisor), L2
49 L1, and L2) for all architectures; and will largely focus on
148 able to start an L1 guest with::
175 2. The guest hypervisor (L1) must be provided with the ``sie`` CPU
179 3. Now the KVM module can be loaded in the L1 (guest hypervisor)::
187 Migrating an L1 guest, with a *live* nested guest in it, to another
191 On AMD systems, once an L1 guest has started an L2 guest, the L1 guest
[all …]
/linux-6.1.9/arch/arc/kernel/
Dentry-compact.S152 ; if L2 IRQ interrupted a L1 ISR, disable preemption
154 ; This is to avoid a potential L1-L2-L1 scenario
155 ; -L1 IRQ taken
156 ; -L2 interrupts L1 (before L1 ISR could run)
160 ; But both L1 and L2 re-enabled, so another L1 can be taken
161 ; while prev L1 is still unserviced
165 ; L2 interrupting L1 implies both L2 and L1 active
167 ; need to check STATUS32_L2 to determine if L1 was active
170 bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
335 ; use the same priority as rtie: EXCPN, L2 IRQ, L1 IRQ, None
[all …]
/linux-6.1.9/security/apparmor/include/
Dperms.h122 #define xcheck_ns_labels(L1, L2, FN, args...) \ argument
125 fn_for_each((L1), __p1, FN(__p1, (L2), args)); \
129 #define xcheck_labels_profiles(L1, L2, FN, args...) \ argument
130 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
132 #define xcheck_labels(L1, L2, P, FN1, FN2) \ argument
133 xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
Dlabel.h163 #define next_comb(I, L1, L2) \ argument
174 #define label_for_each_comb(I, L1, L2, P1, P2) \ argument
176 ((P1) = (L1)->vec[(I).i]) && ((P2) = (L2)->vec[(I).j]); \
177 (I) = next_comb(I, L1, L2))
179 #define fn_for_each_comb(L1, L2, P1, P2, FN) \ argument
183 label_for_each_comb(i, (L1), (L2), (P1), (P2)) { \
243 #define fn_for_each2_XXX(L1, L2, P, FN, ...) \ argument
247 label_for_each ## __VA_ARGS__(i, (L1), (L2), (P)) { \
253 #define fn_for_each_in_merge(L1, L2, P, FN) \ argument
254 fn_for_each2_XXX((L1), (L2), P, FN, _in_merge)
[all …]
/linux-6.1.9/arch/arm/mm/
Dproc-xsc3.S68 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line
113 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
173 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
196 mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line
197 mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
224 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
229 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
245 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
250 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
269 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
[all …]
/linux-6.1.9/arch/powerpc/perf/
Dpower8-pmu.c133 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
136 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
137 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
138 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
139 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
140 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
Dpower9-pmu.c177 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
178 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
179 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
180 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
181 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
182 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
183 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
Dpower10-pmu.c133 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
135 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS);
136 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
137 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
138 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
139 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ);
/linux-6.1.9/arch/hexagon/lib/
Dmemset.S159 if (r2==#0) jump:nt .L1
186 if (p1) jump .L1
197 if (p0.new) jump:nt .L1
208 if (p0.new) jump:nt .L1
284 .L1: label
/linux-6.1.9/Documentation/driver-api/
Dedac.rst145 - CPU caches (L1 and L2)
155 For example, a cache could be composed of L1, L2 and L3 levels of cache.
156 Each CPU core would have its own L1 cache, while sharing L2 and maybe L3
164 cpu/cpu0/.. <L1 and L2 block directory>
165 /L1-cache/ce_count
169 cpu/cpu1/.. <L1 and L2 block directory>
170 /L1-cache/ce_count
176 the L1 and L2 directories would be "edac_device_block's"
/linux-6.1.9/Documentation/locking/
Dlockdep-design.rst22 dependency can be understood as lock order, where L1 -> L2 suggests that
23 a task is attempting to acquire L2 while holding L1. From lockdep's
24 perspective, the two locks (L1 and L2) are not necessarily related; that
145 <L1> -> <L2>
146 <L2> -> <L1>
521 L1 -> L2
523 , which means lockdep has seen L1 held before L2 held in the same context at runtime.
524 And in deadlock detection, we care whether we could get blocked on L2 with L1 held,
525 IOW, whether there is a locker L3 that L1 blocks L3 and L2 gets blocked by L3. So
526 we only care about 1) what L1 blocks and 2) what blocks L2. As a result, we can combine
[all …]
Drt-mutex-design.rst47 grab lock L1 (owned by C)
139 Mutexes: L1, L2, L3, L4
141 A owns: L1
142 B blocked on L1
152 E->L4->D->L3->C->L2->B->L1->A
159 F->L5->B->L1->A
168 +->B->L1->A
180 G->L2->B->L1->A
188 G-+ +->B->L1->A
230 L1, L2, and L3, and four separate functions func1, func2, func3 and func4.
[all …]
/linux-6.1.9/drivers/pci/pcie/
DKconfig69 state L0/L0s/L1.
95 Enable PCI Express ASPM L0s and L1 where possible, even if the
102 Same as PCIEASPM_POWERSAVE, except it also enables L1 substates where
103 possible. This would result in higher power savings while staying in L1
110 Disable PCI Express ASPM L0s and L1, even if the BIOS enabled them.
/linux-6.1.9/arch/m68k/fpsp040/
Dsetox.S104 | 3.1 R := X + N*L1, where L1 := single-precision(-log2/64).
105 | 3.2 R := R + N*L2, L2 := extended-precision(-log2/64 - L1).
106 | Notes: a) The way L1 and L2 are chosen ensures L1+L2 approximate
108 | b) N*L1 is exact because N is no longer than 22 bits and
109 | L1 is no longer than 24 bits.
110 | c) The calculation X+N*L1 is also exact due to cancellation.
111 | Thus, R is practically X+N(L1+L2) to full 64 bits.
505 fmuls #0xBC317218,%fp0 | ...N * L1, L1 = lead(-log2/64)
506 fmulx L2,%fp2 | ...N * L2, L1+L2 = -log2/64
507 faddx %fp1,%fp0 | ...X + N*L1
[all …]
/linux-6.1.9/arch/m68k/lib/
Ddivsi3.S95 jpl L1
102 L1: movel sp@(8), d0 /* d0 = dividend */ label
/linux-6.1.9/Documentation/devicetree/bindings/media/
Dst-rc.txt10 - rx-mode: can be "infrared" or "uhf". This property specifies the L1
13 - tx-mode: should be "infrared". This property specifies the L1
/linux-6.1.9/tools/perf/Documentation/
Dperf-c2c.txt207 L1Hit - store accesses that hit L1
208 L1Miss - store accesses that missed L1
211 Core Load Hit - FB, L1, L2
212 - count of load hits in FB (Fill Buffer), L1 and L2 cache
233 Store Refs - L1 Hit, L1 Miss, N/A
234 - % of store accesses that hit L1, missed L1 and N/A (no available) memory
/linux-6.1.9/arch/alpha/boot/
Dmain.c59 #define L1 ((unsigned long *) 0x200802000) macro
71 pcb_va->ptbr = L1[1] >> 32; in pal_init()
Dbootp.c65 #define L1 ((unsigned long *) 0x200802000) macro
77 pcb_va->ptbr = L1[1] >> 32; in pal_init()
/linux-6.1.9/Documentation/translations/zh_CN/arm64/
Dmemory.txt90 | | +---------------------> [38:30] L1 索引
105 | +-------------------------------> [47:42] L1 索引
/linux-6.1.9/arch/riscv/lib/
Dtishift.S10 beqz a2, .L1
21 .L1: label
/linux-6.1.9/lib/
Dtest_dynamic_debug.c92 enum cat_level_names { L0 = 22, L1, L2, L3, L4, L5, L6, L7 }; enumerator
133 prdbg(L1); in do_levels()
/linux-6.1.9/Documentation/translations/zh_TW/arm64/
Dmemory.txt94 | | +---------------------> [38:30] L1 索引
109 | +-------------------------------> [47:42] L1 索引
/linux-6.1.9/arch/arm/mach-omap2/
Dsram242x.S39 str r3, [r2] @ go to L1-freq operation
42 mov r9, #0x1 @ set up for L1 voltage call
101 orr r5, r5, r9 @ bulld value for L0/L1-volt operation.
105 str r5, [r4] @ Force transition to L1
196 orr r8, r8, r9 @ bulld value for L0/L1-volt operation.
200 str r8, [r10] @ Force transition to L1
Dsram243x.S39 str r3, [r2] @ go to L1-freq operation
42 mov r9, #0x1 @ set up for L1 voltage call
101 orr r5, r5, r9 @ bulld value for L0/L1-volt operation.
105 str r5, [r4] @ Force transition to L1
196 orr r8, r8, r9 @ bulld value for L0/L1-volt operation.
200 str r8, [r10] @ Force transition to L1

123456