1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoechr.c
4 * AoE character device driver
5 */
6
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/skbuff.h>
14 #include <linux/export.h>
15 #include "aoe.h"
16
17 enum {
18 //MINOR_STAT = 1, (moved to sysfs)
19 MINOR_ERR = 2,
20 MINOR_DISCOVER,
21 MINOR_INTERFACES,
22 MINOR_REVALIDATE,
23 MINOR_FLUSH,
24 MSGSZ = 2048,
25 NMSG = 100, /* message backlog to retain */
26 };
27
28 struct aoe_chardev {
29 ulong minor;
30 char name[32];
31 };
32
33 enum { EMFL_VALID = 1 };
34
35 struct ErrMsg {
36 short flags;
37 short len;
38 char *msg;
39 };
40
41 static DEFINE_MUTEX(aoechr_mutex);
42
43 /* A ring buffer of error messages, to be read through
44 * "/dev/etherd/err". When no messages are present,
45 * readers will block waiting for messages to appear.
46 */
47 static struct ErrMsg emsgs[NMSG];
48 static int emsgs_head_idx, emsgs_tail_idx;
49 static struct completion emsgs_comp;
50 static spinlock_t emsgs_lock;
51 static int nblocked_emsgs_readers;
52 static struct class *aoe_class;
53 static struct aoe_chardev chardevs[] = {
54 { MINOR_ERR, "err" },
55 { MINOR_DISCOVER, "discover" },
56 { MINOR_INTERFACES, "interfaces" },
57 { MINOR_REVALIDATE, "revalidate" },
58 { MINOR_FLUSH, "flush" },
59 };
60
61 static int
discover(void)62 discover(void)
63 {
64 aoecmd_cfg(0xffff, 0xff);
65 return 0;
66 }
67
68 static int
interfaces(const char __user * str,size_t size)69 interfaces(const char __user *str, size_t size)
70 {
71 if (set_aoe_iflist(str, size)) {
72 printk(KERN_ERR
73 "aoe: could not set interface list: too many interfaces\n");
74 return -EINVAL;
75 }
76 return 0;
77 }
78
79 static int
revalidate(const char __user * str,size_t size)80 revalidate(const char __user *str, size_t size)
81 {
82 int major, minor, n;
83 ulong flags;
84 struct aoedev *d;
85 struct sk_buff *skb;
86 char buf[16];
87
88 if (size >= sizeof buf)
89 return -EINVAL;
90 buf[sizeof buf - 1] = '\0';
91 if (copy_from_user(buf, str, size))
92 return -EFAULT;
93
94 n = sscanf(buf, "e%d.%d", &major, &minor);
95 if (n != 2) {
96 pr_err("aoe: invalid device specification %s\n", buf);
97 return -EINVAL;
98 }
99 d = aoedev_by_aoeaddr(major, minor, 0);
100 if (!d)
101 return -EINVAL;
102 spin_lock_irqsave(&d->lock, flags);
103 aoecmd_cleanslate(d);
104 aoecmd_cfg(major, minor);
105 loop:
106 skb = aoecmd_ata_id(d);
107 spin_unlock_irqrestore(&d->lock, flags);
108 /* try again if we are able to sleep a bit,
109 * otherwise give up this revalidation
110 */
111 if (!skb && !msleep_interruptible(250)) {
112 spin_lock_irqsave(&d->lock, flags);
113 goto loop;
114 }
115 aoedev_put(d);
116 if (skb) {
117 struct sk_buff_head queue;
118 __skb_queue_head_init(&queue);
119 __skb_queue_tail(&queue, skb);
120 aoenet_xmit(&queue);
121 }
122 return 0;
123 }
124
125 void
aoechr_error(char * msg)126 aoechr_error(char *msg)
127 {
128 struct ErrMsg *em;
129 char *mp;
130 ulong flags, n;
131
132 n = strlen(msg);
133
134 spin_lock_irqsave(&emsgs_lock, flags);
135
136 em = emsgs + emsgs_tail_idx;
137 if ((em->flags & EMFL_VALID)) {
138 bail: spin_unlock_irqrestore(&emsgs_lock, flags);
139 return;
140 }
141
142 mp = kmemdup(msg, n, GFP_ATOMIC);
143 if (!mp)
144 goto bail;
145
146 em->msg = mp;
147 em->flags |= EMFL_VALID;
148 em->len = n;
149
150 emsgs_tail_idx++;
151 emsgs_tail_idx %= ARRAY_SIZE(emsgs);
152
153 spin_unlock_irqrestore(&emsgs_lock, flags);
154
155 if (nblocked_emsgs_readers)
156 complete(&emsgs_comp);
157 }
158
159 static ssize_t
aoechr_write(struct file * filp,const char __user * buf,size_t cnt,loff_t * offp)160 aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
161 {
162 int ret = -EINVAL;
163
164 switch ((unsigned long) filp->private_data) {
165 default:
166 printk(KERN_INFO "aoe: can't write to that file.\n");
167 break;
168 case MINOR_DISCOVER:
169 ret = discover();
170 break;
171 case MINOR_INTERFACES:
172 ret = interfaces(buf, cnt);
173 break;
174 case MINOR_REVALIDATE:
175 ret = revalidate(buf, cnt);
176 break;
177 case MINOR_FLUSH:
178 ret = aoedev_flush(buf, cnt);
179 break;
180 }
181 if (ret == 0)
182 ret = cnt;
183 return ret;
184 }
185
186 static int
aoechr_open(struct inode * inode,struct file * filp)187 aoechr_open(struct inode *inode, struct file *filp)
188 {
189 int n, i;
190
191 mutex_lock(&aoechr_mutex);
192 n = iminor(inode);
193 filp->private_data = (void *) (unsigned long) n;
194
195 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
196 if (chardevs[i].minor == n) {
197 mutex_unlock(&aoechr_mutex);
198 return 0;
199 }
200 mutex_unlock(&aoechr_mutex);
201 return -EINVAL;
202 }
203
204 static int
aoechr_rel(struct inode * inode,struct file * filp)205 aoechr_rel(struct inode *inode, struct file *filp)
206 {
207 return 0;
208 }
209
210 static ssize_t
aoechr_read(struct file * filp,char __user * buf,size_t cnt,loff_t * off)211 aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
212 {
213 unsigned long n;
214 char *mp;
215 struct ErrMsg *em;
216 ssize_t len;
217 ulong flags;
218
219 n = (unsigned long) filp->private_data;
220 if (n != MINOR_ERR)
221 return -EFAULT;
222
223 spin_lock_irqsave(&emsgs_lock, flags);
224
225 for (;;) {
226 em = emsgs + emsgs_head_idx;
227 if ((em->flags & EMFL_VALID) != 0)
228 break;
229 if (filp->f_flags & O_NDELAY) {
230 spin_unlock_irqrestore(&emsgs_lock, flags);
231 return -EAGAIN;
232 }
233 nblocked_emsgs_readers++;
234
235 spin_unlock_irqrestore(&emsgs_lock, flags);
236
237 n = wait_for_completion_interruptible(&emsgs_comp);
238
239 spin_lock_irqsave(&emsgs_lock, flags);
240
241 nblocked_emsgs_readers--;
242
243 if (n) {
244 spin_unlock_irqrestore(&emsgs_lock, flags);
245 return -ERESTARTSYS;
246 }
247 }
248 if (em->len > cnt) {
249 spin_unlock_irqrestore(&emsgs_lock, flags);
250 return -EAGAIN;
251 }
252 mp = em->msg;
253 len = em->len;
254 em->msg = NULL;
255 em->flags &= ~EMFL_VALID;
256
257 emsgs_head_idx++;
258 emsgs_head_idx %= ARRAY_SIZE(emsgs);
259
260 spin_unlock_irqrestore(&emsgs_lock, flags);
261
262 n = copy_to_user(buf, mp, len);
263 kfree(mp);
264 return n == 0 ? len : -EFAULT;
265 }
266
267 static const struct file_operations aoe_fops = {
268 .write = aoechr_write,
269 .read = aoechr_read,
270 .open = aoechr_open,
271 .release = aoechr_rel,
272 .owner = THIS_MODULE,
273 .llseek = noop_llseek,
274 };
275
aoe_devnode(struct device * dev,umode_t * mode)276 static char *aoe_devnode(struct device *dev, umode_t *mode)
277 {
278 return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
279 }
280
281 int __init
aoechr_init(void)282 aoechr_init(void)
283 {
284 int n, i;
285
286 n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
287 if (n < 0) {
288 printk(KERN_ERR "aoe: can't register char device\n");
289 return n;
290 }
291 init_completion(&emsgs_comp);
292 spin_lock_init(&emsgs_lock);
293 aoe_class = class_create(THIS_MODULE, "aoe");
294 if (IS_ERR(aoe_class)) {
295 unregister_chrdev(AOE_MAJOR, "aoechr");
296 return PTR_ERR(aoe_class);
297 }
298 aoe_class->devnode = aoe_devnode;
299
300 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
301 device_create(aoe_class, NULL,
302 MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
303 chardevs[i].name);
304
305 return 0;
306 }
307
308 void
aoechr_exit(void)309 aoechr_exit(void)
310 {
311 int i;
312
313 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
314 device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
315 class_destroy(aoe_class);
316 unregister_chrdev(AOE_MAJOR, "aoechr");
317 }
318
319