1 /*
2  *  acpi_ipmi.c - ACPI IPMI opregion
3  *
4  *  Copyright (C) 2010 Intel Corporation
5  *  Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
6  *
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License as published by
11  *  the Free Software Foundation; either version 2 of the License, or (at
12  *  your option) any later version.
13  *
14  *  This program is distributed in the hope that it will be useful, but
15  *  WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  *  General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License along
20  *  with this program; if not, write to the Free Software Foundation, Inc.,
21  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22  *
23  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24  */
25 
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/delay.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/interrupt.h>
34 #include <linux/list.h>
35 #include <linux/spinlock.h>
36 #include <linux/io.h>
37 #include <acpi/acpi_bus.h>
38 #include <acpi/acpi_drivers.h>
39 #include <linux/ipmi.h>
40 #include <linux/device.h>
41 #include <linux/pnp.h>
42 #include <linux/spinlock.h>
43 
44 MODULE_AUTHOR("Zhao Yakui");
45 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
46 MODULE_LICENSE("GPL");
47 
48 #define IPMI_FLAGS_HANDLER_INSTALL	0
49 
50 #define ACPI_IPMI_OK			0
51 #define ACPI_IPMI_TIMEOUT		0x10
52 #define ACPI_IPMI_UNKNOWN		0x07
53 /* the IPMI timeout is 5s */
54 #define IPMI_TIMEOUT			(5 * HZ)
55 
56 struct acpi_ipmi_device {
57 	/* the device list attached to driver_data.ipmi_devices */
58 	struct list_head head;
59 	/* the IPMI request message list */
60 	struct list_head tx_msg_list;
61 	spinlock_t	tx_msg_lock;
62 	acpi_handle handle;
63 	struct pnp_dev *pnp_dev;
64 	ipmi_user_t	user_interface;
65 	int ipmi_ifnum; /* IPMI interface number */
66 	long curr_msgid;
67 	unsigned long flags;
68 	struct ipmi_smi_info smi_data;
69 };
70 
71 struct ipmi_driver_data {
72 	struct list_head	ipmi_devices;
73 	struct ipmi_smi_watcher	bmc_events;
74 	struct ipmi_user_hndl	ipmi_hndlrs;
75 	struct mutex		ipmi_lock;
76 };
77 
78 struct acpi_ipmi_msg {
79 	struct list_head head;
80 	/*
81 	 * General speaking the addr type should be SI_ADDR_TYPE. And
82 	 * the addr channel should be BMC.
83 	 * In fact it can also be IPMB type. But we will have to
84 	 * parse it from the Netfn command buffer. It is so complex
85 	 * that it is skipped.
86 	 */
87 	struct ipmi_addr addr;
88 	long tx_msgid;
89 	/* it is used to track whether the IPMI message is finished */
90 	struct completion tx_complete;
91 	struct kernel_ipmi_msg tx_message;
92 	int	msg_done;
93 	/* tx data . And copy it from ACPI object buffer */
94 	u8	tx_data[64];
95 	int	tx_len;
96 	u8	rx_data[64];
97 	int	rx_len;
98 	struct acpi_ipmi_device *device;
99 };
100 
101 /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
102 struct acpi_ipmi_buffer {
103 	u8 status;
104 	u8 length;
105 	u8 data[64];
106 };
107 
108 static void ipmi_register_bmc(int iface, struct device *dev);
109 static void ipmi_bmc_gone(int iface);
110 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
111 static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
112 static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
113 
114 static struct ipmi_driver_data driver_data = {
115 	.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
116 	.bmc_events = {
117 		.owner = THIS_MODULE,
118 		.new_smi = ipmi_register_bmc,
119 		.smi_gone = ipmi_bmc_gone,
120 	},
121 	.ipmi_hndlrs = {
122 		.ipmi_recv_hndl = ipmi_msg_handler,
123 	},
124 };
125 
acpi_alloc_ipmi_msg(struct acpi_ipmi_device * ipmi)126 static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
127 {
128 	struct acpi_ipmi_msg *ipmi_msg;
129 	struct pnp_dev *pnp_dev = ipmi->pnp_dev;
130 
131 	ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
132 	if (!ipmi_msg)	{
133 		dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
134 		return NULL;
135 	}
136 	init_completion(&ipmi_msg->tx_complete);
137 	INIT_LIST_HEAD(&ipmi_msg->head);
138 	ipmi_msg->device = ipmi;
139 	return ipmi_msg;
140 }
141 
142 #define		IPMI_OP_RGN_NETFN(offset)	((offset >> 8) & 0xff)
143 #define		IPMI_OP_RGN_CMD(offset)		(offset & 0xff)
acpi_format_ipmi_msg(struct acpi_ipmi_msg * tx_msg,acpi_physical_address address,acpi_integer * value)144 static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
145 				acpi_physical_address address,
146 				acpi_integer *value)
147 {
148 	struct kernel_ipmi_msg *msg;
149 	struct acpi_ipmi_buffer *buffer;
150 	struct acpi_ipmi_device *device;
151 	unsigned long flags;
152 
153 	msg = &tx_msg->tx_message;
154 	/*
155 	 * IPMI network function and command are encoded in the address
156 	 * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
157 	 */
158 	msg->netfn = IPMI_OP_RGN_NETFN(address);
159 	msg->cmd = IPMI_OP_RGN_CMD(address);
160 	msg->data = tx_msg->tx_data;
161 	/*
162 	 * value is the parameter passed by the IPMI opregion space handler.
163 	 * It points to the IPMI request message buffer
164 	 */
165 	buffer = (struct acpi_ipmi_buffer *)value;
166 	/* copy the tx message data */
167 	msg->data_len = buffer->length;
168 	memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
169 	/*
170 	 * now the default type is SYSTEM_INTERFACE and channel type is BMC.
171 	 * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
172 	 * the addr type should be changed to IPMB. Then we will have to parse
173 	 * the IPMI request message buffer to get the IPMB address.
174 	 * If so, please fix me.
175 	 */
176 	tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
177 	tx_msg->addr.channel = IPMI_BMC_CHANNEL;
178 	tx_msg->addr.data[0] = 0;
179 
180 	/* Get the msgid */
181 	device = tx_msg->device;
182 	spin_lock_irqsave(&device->tx_msg_lock, flags);
183 	device->curr_msgid++;
184 	tx_msg->tx_msgid = device->curr_msgid;
185 	spin_unlock_irqrestore(&device->tx_msg_lock, flags);
186 }
187 
acpi_format_ipmi_response(struct acpi_ipmi_msg * msg,acpi_integer * value,int rem_time)188 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
189 		acpi_integer *value, int rem_time)
190 {
191 	struct acpi_ipmi_buffer *buffer;
192 
193 	/*
194 	 * value is also used as output parameter. It represents the response
195 	 * IPMI message returned by IPMI command.
196 	 */
197 	buffer = (struct acpi_ipmi_buffer *)value;
198 	if (!rem_time && !msg->msg_done) {
199 		buffer->status = ACPI_IPMI_TIMEOUT;
200 		return;
201 	}
202 	/*
203 	 * If the flag of msg_done is not set or the recv length is zero, it
204 	 * means that the IPMI command is not executed correctly.
205 	 * The status code will be ACPI_IPMI_UNKNOWN.
206 	 */
207 	if (!msg->msg_done || !msg->rx_len) {
208 		buffer->status = ACPI_IPMI_UNKNOWN;
209 		return;
210 	}
211 	/*
212 	 * If the IPMI response message is obtained correctly, the status code
213 	 * will be ACPI_IPMI_OK
214 	 */
215 	buffer->status = ACPI_IPMI_OK;
216 	buffer->length = msg->rx_len;
217 	memcpy(buffer->data, msg->rx_data, msg->rx_len);
218 }
219 
ipmi_flush_tx_msg(struct acpi_ipmi_device * ipmi)220 static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
221 {
222 	struct acpi_ipmi_msg *tx_msg, *temp;
223 	int count = HZ / 10;
224 	struct pnp_dev *pnp_dev = ipmi->pnp_dev;
225 
226 	list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
227 		/* wake up the sleep thread on the Tx msg */
228 		complete(&tx_msg->tx_complete);
229 	}
230 
231 	/* wait for about 100ms to flush the tx message list */
232 	while (count--) {
233 		if (list_empty(&ipmi->tx_msg_list))
234 			break;
235 		schedule_timeout(1);
236 	}
237 	if (!list_empty(&ipmi->tx_msg_list))
238 		dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
239 }
240 
ipmi_msg_handler(struct ipmi_recv_msg * msg,void * user_msg_data)241 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
242 {
243 	struct acpi_ipmi_device *ipmi_device = user_msg_data;
244 	int msg_found = 0;
245 	struct acpi_ipmi_msg *tx_msg;
246 	struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
247 	unsigned long flags;
248 
249 	if (msg->user != ipmi_device->user_interface) {
250 		dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
251 			"returned user %p, expected user %p\n",
252 			msg->user, ipmi_device->user_interface);
253 		ipmi_free_recv_msg(msg);
254 		return;
255 	}
256 	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
257 	list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
258 		if (msg->msgid == tx_msg->tx_msgid) {
259 			msg_found = 1;
260 			break;
261 		}
262 	}
263 
264 	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
265 	if (!msg_found) {
266 		dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
267 			"returned.\n", msg->msgid);
268 		ipmi_free_recv_msg(msg);
269 		return;
270 	}
271 
272 	if (msg->msg.data_len) {
273 		/* copy the response data to Rx_data buffer */
274 		memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
275 		tx_msg->rx_len = msg->msg.data_len;
276 		tx_msg->msg_done = 1;
277 	}
278 	complete(&tx_msg->tx_complete);
279 	ipmi_free_recv_msg(msg);
280 };
281 
ipmi_register_bmc(int iface,struct device * dev)282 static void ipmi_register_bmc(int iface, struct device *dev)
283 {
284 	struct acpi_ipmi_device *ipmi_device, *temp;
285 	struct pnp_dev *pnp_dev;
286 	ipmi_user_t		user;
287 	int err;
288 	struct ipmi_smi_info smi_data;
289 	acpi_handle handle;
290 
291 	err = ipmi_get_smi_info(iface, &smi_data);
292 
293 	if (err)
294 		return;
295 
296 	if (smi_data.addr_src != SI_ACPI) {
297 		put_device(smi_data.dev);
298 		return;
299 	}
300 
301 	handle = smi_data.addr_info.acpi_info.acpi_handle;
302 
303 	mutex_lock(&driver_data.ipmi_lock);
304 	list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
305 		/*
306 		 * if the corresponding ACPI handle is already added
307 		 * to the device list, don't add it again.
308 		 */
309 		if (temp->handle == handle)
310 			goto out;
311 	}
312 
313 	ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
314 
315 	if (!ipmi_device)
316 		goto out;
317 
318 	pnp_dev = to_pnp_dev(smi_data.dev);
319 	ipmi_device->handle = handle;
320 	ipmi_device->pnp_dev = pnp_dev;
321 
322 	err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
323 					ipmi_device, &user);
324 	if (err) {
325 		dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
326 		kfree(ipmi_device);
327 		goto out;
328 	}
329 	acpi_add_ipmi_device(ipmi_device);
330 	ipmi_device->user_interface = user;
331 	ipmi_device->ipmi_ifnum = iface;
332 	mutex_unlock(&driver_data.ipmi_lock);
333 	memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
334 	return;
335 
336 out:
337 	mutex_unlock(&driver_data.ipmi_lock);
338 	put_device(smi_data.dev);
339 	return;
340 }
341 
ipmi_bmc_gone(int iface)342 static void ipmi_bmc_gone(int iface)
343 {
344 	struct acpi_ipmi_device *ipmi_device, *temp;
345 
346 	mutex_lock(&driver_data.ipmi_lock);
347 	list_for_each_entry_safe(ipmi_device, temp,
348 				&driver_data.ipmi_devices, head) {
349 		if (ipmi_device->ipmi_ifnum != iface)
350 			continue;
351 
352 		acpi_remove_ipmi_device(ipmi_device);
353 		put_device(ipmi_device->smi_data.dev);
354 		kfree(ipmi_device);
355 		break;
356 	}
357 	mutex_unlock(&driver_data.ipmi_lock);
358 }
359 /* --------------------------------------------------------------------------
360  *			Address Space Management
361  * -------------------------------------------------------------------------- */
362 /*
363  * This is the IPMI opregion space handler.
364  * @function: indicates the read/write. In fact as the IPMI message is driven
365  * by command, only write is meaningful.
366  * @address: This contains the netfn/command of IPMI request message.
367  * @bits   : not used.
368  * @value  : it is an in/out parameter. It points to the IPMI message buffer.
369  *	     Before the IPMI message is sent, it represents the actual request
370  *	     IPMI message. After the IPMI message is finished, it represents
371  *	     the response IPMI message returned by IPMI command.
372  * @handler_context: IPMI device context.
373  */
374 
375 static acpi_status
acpi_ipmi_space_handler(u32 function,acpi_physical_address address,u32 bits,acpi_integer * value,void * handler_context,void * region_context)376 acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
377 		      u32 bits, acpi_integer *value,
378 		      void *handler_context, void *region_context)
379 {
380 	struct acpi_ipmi_msg *tx_msg;
381 	struct acpi_ipmi_device *ipmi_device = handler_context;
382 	int err, rem_time;
383 	acpi_status status;
384 	unsigned long flags;
385 	/*
386 	 * IPMI opregion message.
387 	 * IPMI message is firstly written to the BMC and system software
388 	 * can get the respsonse. So it is unmeaningful for the read access
389 	 * of IPMI opregion.
390 	 */
391 	if ((function & ACPI_IO_MASK) == ACPI_READ)
392 		return AE_TYPE;
393 
394 	if (!ipmi_device->user_interface)
395 		return AE_NOT_EXIST;
396 
397 	tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
398 	if (!tx_msg)
399 		return AE_NO_MEMORY;
400 
401 	acpi_format_ipmi_msg(tx_msg, address, value);
402 	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
403 	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
404 	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
405 	err = ipmi_request_settime(ipmi_device->user_interface,
406 					&tx_msg->addr,
407 					tx_msg->tx_msgid,
408 					&tx_msg->tx_message,
409 					NULL, 0, 0, 0);
410 	if (err) {
411 		status = AE_ERROR;
412 		goto end_label;
413 	}
414 	rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
415 					IPMI_TIMEOUT);
416 	acpi_format_ipmi_response(tx_msg, value, rem_time);
417 	status = AE_OK;
418 
419 end_label:
420 	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
421 	list_del(&tx_msg->head);
422 	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
423 	kfree(tx_msg);
424 	return status;
425 }
426 
ipmi_remove_space_handler(struct acpi_ipmi_device * ipmi)427 static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
428 {
429 	if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
430 		return;
431 
432 	acpi_remove_address_space_handler(ipmi->handle,
433 				ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
434 
435 	clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
436 }
437 
ipmi_install_space_handler(struct acpi_ipmi_device * ipmi)438 static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
439 {
440 	acpi_status status;
441 
442 	if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
443 		return 0;
444 
445 	status = acpi_install_address_space_handler(ipmi->handle,
446 						    ACPI_ADR_SPACE_IPMI,
447 						    &acpi_ipmi_space_handler,
448 						    NULL, ipmi);
449 	if (ACPI_FAILURE(status)) {
450 		struct pnp_dev *pnp_dev = ipmi->pnp_dev;
451 		dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
452 			"handle\n");
453 		return -EINVAL;
454 	}
455 	set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
456 	return 0;
457 }
458 
acpi_add_ipmi_device(struct acpi_ipmi_device * ipmi_device)459 static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
460 {
461 
462 	INIT_LIST_HEAD(&ipmi_device->head);
463 
464 	spin_lock_init(&ipmi_device->tx_msg_lock);
465 	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
466 	ipmi_install_space_handler(ipmi_device);
467 
468 	list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
469 }
470 
acpi_remove_ipmi_device(struct acpi_ipmi_device * ipmi_device)471 static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
472 {
473 	/*
474 	 * If the IPMI user interface is created, it should be
475 	 * destroyed.
476 	 */
477 	if (ipmi_device->user_interface) {
478 		ipmi_destroy_user(ipmi_device->user_interface);
479 		ipmi_device->user_interface = NULL;
480 	}
481 	/* flush the Tx_msg list */
482 	if (!list_empty(&ipmi_device->tx_msg_list))
483 		ipmi_flush_tx_msg(ipmi_device);
484 
485 	list_del(&ipmi_device->head);
486 	ipmi_remove_space_handler(ipmi_device);
487 }
488 
acpi_ipmi_init(void)489 static int __init acpi_ipmi_init(void)
490 {
491 	int result = 0;
492 
493 	if (acpi_disabled)
494 		return result;
495 
496 	mutex_init(&driver_data.ipmi_lock);
497 
498 	result = ipmi_smi_watcher_register(&driver_data.bmc_events);
499 
500 	return result;
501 }
502 
acpi_ipmi_exit(void)503 static void __exit acpi_ipmi_exit(void)
504 {
505 	struct acpi_ipmi_device *ipmi_device, *temp;
506 
507 	if (acpi_disabled)
508 		return;
509 
510 	ipmi_smi_watcher_unregister(&driver_data.bmc_events);
511 
512 	/*
513 	 * When one smi_watcher is unregistered, it is only deleted
514 	 * from the smi_watcher list. But the smi_gone callback function
515 	 * is not called. So explicitly uninstall the ACPI IPMI oregion
516 	 * handler and free it.
517 	 */
518 	mutex_lock(&driver_data.ipmi_lock);
519 	list_for_each_entry_safe(ipmi_device, temp,
520 				&driver_data.ipmi_devices, head) {
521 		acpi_remove_ipmi_device(ipmi_device);
522 		put_device(ipmi_device->smi_data.dev);
523 		kfree(ipmi_device);
524 	}
525 	mutex_unlock(&driver_data.ipmi_lock);
526 }
527 
528 module_init(acpi_ipmi_init);
529 module_exit(acpi_ipmi_exit);
530