1 /*
2  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3  *
4  * Provides a framework for enqueueing and running callbacks from hardirq
5  * context. The enqueueing is NMI-safe.
6  */
7 
8 #include <linux/bug.h>
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/irq_work.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/irqflags.h>
15 #include <asm/processor.h>
16 
17 /*
18  * An entry can be in one of four states:
19  *
20  * free	     NULL, 0 -> {claimed}       : free to be used
21  * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
22  * pending   next, 3 -> {busy}          : queued, pending callback
23  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
24  */
25 
26 #define IRQ_WORK_PENDING	1UL
27 #define IRQ_WORK_BUSY		2UL
28 #define IRQ_WORK_FLAGS		3UL
29 
30 static DEFINE_PER_CPU(struct llist_head, irq_work_list);
31 
32 /*
33  * Claim the entry so that no one else will poke at it.
34  */
irq_work_claim(struct irq_work * work)35 static bool irq_work_claim(struct irq_work *work)
36 {
37 	unsigned long flags, nflags;
38 
39 	for (;;) {
40 		flags = work->flags;
41 		if (flags & IRQ_WORK_PENDING)
42 			return false;
43 		nflags = flags | IRQ_WORK_FLAGS;
44 		if (cmpxchg(&work->flags, flags, nflags) == flags)
45 			break;
46 		cpu_relax();
47 	}
48 
49 	return true;
50 }
51 
arch_irq_work_raise(void)52 void __weak arch_irq_work_raise(void)
53 {
54 	/*
55 	 * Lame architectures will get the timer tick callback
56 	 */
57 }
58 
59 /*
60  * Queue the entry and raise the IPI if needed.
61  */
__irq_work_queue(struct irq_work * work)62 static void __irq_work_queue(struct irq_work *work)
63 {
64 	bool empty;
65 
66 	preempt_disable();
67 
68 	empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
69 	/* The list was empty, raise self-interrupt to start processing. */
70 	if (empty)
71 		arch_irq_work_raise();
72 
73 	preempt_enable();
74 }
75 
76 /*
77  * Enqueue the irq_work @entry, returns true on success, failure when the
78  * @entry was already enqueued by someone else.
79  *
80  * Can be re-enqueued while the callback is still in progress.
81  */
irq_work_queue(struct irq_work * work)82 bool irq_work_queue(struct irq_work *work)
83 {
84 	if (!irq_work_claim(work)) {
85 		/*
86 		 * Already enqueued, can't do!
87 		 */
88 		return false;
89 	}
90 
91 	__irq_work_queue(work);
92 	return true;
93 }
94 EXPORT_SYMBOL_GPL(irq_work_queue);
95 
96 /*
97  * Run the irq_work entries on this cpu. Requires to be ran from hardirq
98  * context with local IRQs disabled.
99  */
irq_work_run(void)100 void irq_work_run(void)
101 {
102 	struct irq_work *work;
103 	struct llist_head *this_list;
104 	struct llist_node *llnode;
105 
106 	this_list = &__get_cpu_var(irq_work_list);
107 	if (llist_empty(this_list))
108 		return;
109 
110 	BUG_ON(!in_irq());
111 	BUG_ON(!irqs_disabled());
112 
113 	llnode = llist_del_all(this_list);
114 	while (llnode != NULL) {
115 		work = llist_entry(llnode, struct irq_work, llnode);
116 
117 		llnode = llist_next(llnode);
118 
119 		/*
120 		 * Clear the PENDING bit, after this point the @work
121 		 * can be re-used.
122 		 */
123 		work->flags = IRQ_WORK_BUSY;
124 		work->func(work);
125 		/*
126 		 * Clear the BUSY bit and return to the free state if
127 		 * no-one else claimed it meanwhile.
128 		 */
129 		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
130 	}
131 }
132 EXPORT_SYMBOL_GPL(irq_work_run);
133 
134 /*
135  * Synchronize against the irq_work @entry, ensures the entry is not
136  * currently in use.
137  */
irq_work_sync(struct irq_work * work)138 void irq_work_sync(struct irq_work *work)
139 {
140 	WARN_ON_ONCE(irqs_disabled());
141 
142 	while (work->flags & IRQ_WORK_BUSY)
143 		cpu_relax();
144 }
145 EXPORT_SYMBOL_GPL(irq_work_sync);
146