1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2017 ARM Ltd.
4 */
5 #ifndef __ASM_DAIFFLAGS_H
6 #define __ASM_DAIFFLAGS_H
7
8 #include <linux/irqflags.h>
9
10 #include <asm/arch_gicv3.h>
11 #include <asm/barrier.h>
12 #include <asm/cpufeature.h>
13 #include <asm/ptrace.h>
14
15 #define DAIF_PROCCTX 0
16 #define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT)
17 #define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
18 #define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
19
20
21 /* mask/save/unmask/restore all exceptions, including interrupts. */
local_daif_mask(void)22 static inline void local_daif_mask(void)
23 {
24 WARN_ON(system_has_prio_mask_debugging() &&
25 (read_sysreg_s(SYS_ICC_PMR_EL1) == (GIC_PRIO_IRQOFF |
26 GIC_PRIO_PSR_I_SET)));
27
28 asm volatile(
29 "msr daifset, #0xf // local_daif_mask\n"
30 :
31 :
32 : "memory");
33
34 /* Don't really care for a dsb here, we don't intend to enable IRQs */
35 if (system_uses_irq_prio_masking())
36 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
37
38 trace_hardirqs_off();
39 }
40
local_daif_save_flags(void)41 static inline unsigned long local_daif_save_flags(void)
42 {
43 unsigned long flags;
44
45 flags = read_sysreg(daif);
46
47 if (system_uses_irq_prio_masking()) {
48 /* If IRQs are masked with PMR, reflect it in the flags */
49 if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
50 flags |= PSR_I_BIT | PSR_F_BIT;
51 }
52
53 return flags;
54 }
55
local_daif_save(void)56 static inline unsigned long local_daif_save(void)
57 {
58 unsigned long flags;
59
60 flags = local_daif_save_flags();
61
62 local_daif_mask();
63
64 return flags;
65 }
66
local_daif_restore(unsigned long flags)67 static inline void local_daif_restore(unsigned long flags)
68 {
69 bool irq_disabled = flags & PSR_I_BIT;
70
71 WARN_ON(system_has_prio_mask_debugging() &&
72 (read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT));
73
74 if (!irq_disabled) {
75 trace_hardirqs_on();
76
77 if (system_uses_irq_prio_masking()) {
78 gic_write_pmr(GIC_PRIO_IRQON);
79 pmr_sync();
80 }
81 } else if (system_uses_irq_prio_masking()) {
82 u64 pmr;
83
84 if (!(flags & PSR_A_BIT)) {
85 /*
86 * If interrupts are disabled but we can take
87 * asynchronous errors, we can take NMIs
88 */
89 flags &= ~(PSR_I_BIT | PSR_F_BIT);
90 pmr = GIC_PRIO_IRQOFF;
91 } else {
92 pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
93 }
94
95 /*
96 * There has been concern that the write to daif
97 * might be reordered before this write to PMR.
98 * From the ARM ARM DDI 0487D.a, section D1.7.1
99 * "Accessing PSTATE fields":
100 * Writes to the PSTATE fields have side-effects on
101 * various aspects of the PE operation. All of these
102 * side-effects are guaranteed:
103 * - Not to be visible to earlier instructions in
104 * the execution stream.
105 * - To be visible to later instructions in the
106 * execution stream
107 *
108 * Also, writes to PMR are self-synchronizing, so no
109 * interrupts with a lower priority than PMR is signaled
110 * to the PE after the write.
111 *
112 * So we don't need additional synchronization here.
113 */
114 gic_write_pmr(pmr);
115 }
116
117 write_sysreg(flags, daif);
118
119 if (irq_disabled)
120 trace_hardirqs_off();
121 }
122
123 /*
124 * Called by synchronous exception handlers to restore the DAIF bits that were
125 * modified by taking an exception.
126 */
local_daif_inherit(struct pt_regs * regs)127 static inline void local_daif_inherit(struct pt_regs *regs)
128 {
129 unsigned long flags = regs->pstate & DAIF_MASK;
130
131 if (interrupts_enabled(regs))
132 trace_hardirqs_on();
133
134 if (system_uses_irq_prio_masking())
135 gic_write_pmr(regs->pmr_save);
136
137 /*
138 * We can't use local_daif_restore(regs->pstate) here as
139 * system_has_prio_mask_debugging() won't restore the I bit if it can
140 * use the pmr instead.
141 */
142 write_sysreg(flags, daif);
143 }
144 #endif
145