1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Xilinx Event Management Driver
4 *
5 * Copyright (C) 2021 Xilinx, Inc.
6 *
7 * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
8 */
9
10 #include <linux/cpuhotplug.h>
11 #include <linux/firmware/xlnx-event-manager.h>
12 #include <linux/firmware/xlnx-zynqmp.h>
13 #include <linux/hashtable.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/module.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21
22 static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
23
24 static int virq_sgi;
25 static int event_manager_availability = -EACCES;
26
27 /* SGI number used for Event management driver */
28 #define XLNX_EVENT_SGI_NUM (15)
29
30 /* Max number of driver can register for same event */
31 #define MAX_DRIVER_PER_EVENT (10U)
32
33 /* Max HashMap Order for PM API feature check (1<<7 = 128) */
34 #define REGISTERED_DRIVER_MAX_ORDER (7)
35
36 #define MAX_BITS (32U) /* Number of bits available for error mask */
37
38 #define FIRMWARE_VERSION_MASK (0xFFFFU)
39 #define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
40
41 static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
42 static int sgi_num = XLNX_EVENT_SGI_NUM;
43
44 static bool is_need_to_unregister;
45
46 /**
47 * struct agent_cb - Registered callback function and private data.
48 * @agent_data: Data passed back to handler function.
49 * @eve_cb: Function pointer to store the callback function.
50 * @list: member to create list.
51 */
52 struct agent_cb {
53 void *agent_data;
54 event_cb_func_t eve_cb;
55 struct list_head list;
56 };
57
58 /**
59 * struct registered_event_data - Registered Event Data.
60 * @key: key is the combine id(Node-Id | Event-Id) of type u64
61 * where upper u32 for Node-Id and lower u32 for Event-Id,
62 * And this used as key to index into hashmap.
63 * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
64 * @wake: If this flag set, firmware will wake up processor if is
65 * in sleep or power down state.
66 * @cb_list_head: Head of call back data list which contain the information
67 * about registered handler and private data.
68 * @hentry: hlist_node that hooks this entry into hashtable.
69 */
70 struct registered_event_data {
71 u64 key;
72 enum pm_api_cb_id cb_type;
73 bool wake;
74 struct list_head cb_list_head;
75 struct hlist_node hentry;
76 };
77
xlnx_is_error_event(const u32 node_id)78 static bool xlnx_is_error_event(const u32 node_id)
79 {
80 if (node_id == EVENT_ERROR_PMC_ERR1 ||
81 node_id == EVENT_ERROR_PMC_ERR2 ||
82 node_id == EVENT_ERROR_PSM_ERR1 ||
83 node_id == EVENT_ERROR_PSM_ERR2)
84 return true;
85
86 return false;
87 }
88
xlnx_add_cb_for_notify_event(const u32 node_id,const u32 event,const bool wake,event_cb_func_t cb_fun,void * data)89 static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
90 event_cb_func_t cb_fun, void *data)
91 {
92 u64 key = 0;
93 bool present_in_hash = false;
94 struct registered_event_data *eve_data;
95 struct agent_cb *cb_data;
96 struct agent_cb *cb_pos;
97 struct agent_cb *cb_next;
98
99 key = ((u64)node_id << 32U) | (u64)event;
100 /* Check for existing entry in hash table for given key id */
101 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
102 if (eve_data->key == key) {
103 present_in_hash = true;
104 break;
105 }
106 }
107
108 if (!present_in_hash) {
109 /* Add new entry if not present in HASH table */
110 eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
111 if (!eve_data)
112 return -ENOMEM;
113 eve_data->key = key;
114 eve_data->cb_type = PM_NOTIFY_CB;
115 eve_data->wake = wake;
116 INIT_LIST_HEAD(&eve_data->cb_list_head);
117
118 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
119 if (!cb_data)
120 return -ENOMEM;
121 cb_data->eve_cb = cb_fun;
122 cb_data->agent_data = data;
123
124 /* Add into callback list */
125 list_add(&cb_data->list, &eve_data->cb_list_head);
126
127 /* Add into HASH table */
128 hash_add(reg_driver_map, &eve_data->hentry, key);
129 } else {
130 /* Search for callback function and private data in list */
131 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
132 if (cb_pos->eve_cb == cb_fun &&
133 cb_pos->agent_data == data) {
134 return 0;
135 }
136 }
137
138 /* Add multiple handler and private data in list */
139 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
140 if (!cb_data)
141 return -ENOMEM;
142 cb_data->eve_cb = cb_fun;
143 cb_data->agent_data = data;
144
145 list_add(&cb_data->list, &eve_data->cb_list_head);
146 }
147
148 return 0;
149 }
150
xlnx_add_cb_for_suspend(event_cb_func_t cb_fun,void * data)151 static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
152 {
153 struct registered_event_data *eve_data;
154 struct agent_cb *cb_data;
155
156 /* Check for existing entry in hash table for given cb_type */
157 hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
158 if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
159 pr_err("Found as already registered\n");
160 return -EINVAL;
161 }
162 }
163
164 /* Add new entry if not present */
165 eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
166 if (!eve_data)
167 return -ENOMEM;
168
169 eve_data->key = 0;
170 eve_data->cb_type = PM_INIT_SUSPEND_CB;
171 INIT_LIST_HEAD(&eve_data->cb_list_head);
172
173 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
174 if (!cb_data)
175 return -ENOMEM;
176 cb_data->eve_cb = cb_fun;
177 cb_data->agent_data = data;
178
179 /* Add into callback list */
180 list_add(&cb_data->list, &eve_data->cb_list_head);
181
182 hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
183
184 return 0;
185 }
186
xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)187 static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
188 {
189 bool is_callback_found = false;
190 struct registered_event_data *eve_data;
191 struct agent_cb *cb_pos;
192 struct agent_cb *cb_next;
193
194 is_need_to_unregister = false;
195
196 /* Check for existing entry in hash table for given cb_type */
197 hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
198 if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
199 /* Delete the list of callback */
200 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
201 if (cb_pos->eve_cb == cb_fun) {
202 is_callback_found = true;
203 list_del_init(&cb_pos->list);
204 kfree(cb_pos);
205 }
206 }
207 /* remove an object from a hashtable */
208 hash_del(&eve_data->hentry);
209 kfree(eve_data);
210 is_need_to_unregister = true;
211 }
212 }
213 if (!is_callback_found) {
214 pr_warn("Didn't find any registered callback for suspend event\n");
215 return -EINVAL;
216 }
217
218 return 0;
219 }
220
xlnx_remove_cb_for_notify_event(const u32 node_id,const u32 event,event_cb_func_t cb_fun,void * data)221 static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
222 event_cb_func_t cb_fun, void *data)
223 {
224 bool is_callback_found = false;
225 struct registered_event_data *eve_data;
226 u64 key = ((u64)node_id << 32U) | (u64)event;
227 struct agent_cb *cb_pos;
228 struct agent_cb *cb_next;
229
230 is_need_to_unregister = false;
231
232 /* Check for existing entry in hash table for given key id */
233 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
234 if (eve_data->key == key) {
235 /* Delete the list of callback */
236 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
237 if (cb_pos->eve_cb == cb_fun &&
238 cb_pos->agent_data == data) {
239 is_callback_found = true;
240 list_del_init(&cb_pos->list);
241 kfree(cb_pos);
242 }
243 }
244
245 /* Remove HASH table if callback list is empty */
246 if (list_empty(&eve_data->cb_list_head)) {
247 /* remove an object from a HASH table */
248 hash_del(&eve_data->hentry);
249 kfree(eve_data);
250 is_need_to_unregister = true;
251 }
252 }
253 }
254 if (!is_callback_found) {
255 pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
256 node_id, event);
257 return -EINVAL;
258 }
259
260 return 0;
261 }
262
263 /**
264 * xlnx_register_event() - Register for the event.
265 * @cb_type: Type of callback from pm_api_cb_id,
266 * PM_NOTIFY_CB - for Error Events,
267 * PM_INIT_SUSPEND_CB - for suspend callback.
268 * @node_id: Node-Id related to event.
269 * @event: Event Mask for the Error Event.
270 * @wake: Flag specifying whether the subsystem should be woken upon
271 * event notification.
272 * @cb_fun: Function pointer to store the callback function.
273 * @data: Pointer for the driver instance.
274 *
275 * Return: Returns 0 on successful registration else error code.
276 */
xlnx_register_event(const enum pm_api_cb_id cb_type,const u32 node_id,const u32 event,const bool wake,event_cb_func_t cb_fun,void * data)277 int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
278 const bool wake, event_cb_func_t cb_fun, void *data)
279 {
280 int ret = 0;
281 u32 eve;
282 int pos;
283
284 if (event_manager_availability)
285 return event_manager_availability;
286
287 if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
288 pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
289 return -EINVAL;
290 }
291
292 if (!cb_fun)
293 return -EFAULT;
294
295 if (cb_type == PM_INIT_SUSPEND_CB) {
296 ret = xlnx_add_cb_for_suspend(cb_fun, data);
297 } else {
298 if (!xlnx_is_error_event(node_id)) {
299 /* Add entry for Node-Id/Event in hash table */
300 ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
301 } else {
302 /* Add into Hash table */
303 for (pos = 0; pos < MAX_BITS; pos++) {
304 eve = event & (1 << pos);
305 if (!eve)
306 continue;
307
308 /* Add entry for Node-Id/Eve in hash table */
309 ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
310 data);
311 /* Break the loop if got error */
312 if (ret)
313 break;
314 }
315 if (ret) {
316 /* Skip the Event for which got the error */
317 pos--;
318 /* Remove registered(during this call) event from hash table */
319 for ( ; pos >= 0; pos--) {
320 eve = event & (1 << pos);
321 if (!eve)
322 continue;
323 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
324 }
325 }
326 }
327
328 if (ret) {
329 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
330 event, ret);
331 return ret;
332 }
333
334 /* Register for Node-Id/Event combination in firmware */
335 ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
336 if (ret) {
337 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
338 event, ret);
339 /* Remove already registered event from hash table */
340 if (xlnx_is_error_event(node_id)) {
341 for (pos = 0; pos < MAX_BITS; pos++) {
342 eve = event & (1 << pos);
343 if (!eve)
344 continue;
345 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
346 }
347 } else {
348 xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
349 }
350 return ret;
351 }
352 }
353
354 return ret;
355 }
356 EXPORT_SYMBOL_GPL(xlnx_register_event);
357
358 /**
359 * xlnx_unregister_event() - Unregister for the event.
360 * @cb_type: Type of callback from pm_api_cb_id,
361 * PM_NOTIFY_CB - for Error Events,
362 * PM_INIT_SUSPEND_CB - for suspend callback.
363 * @node_id: Node-Id related to event.
364 * @event: Event Mask for the Error Event.
365 * @cb_fun: Function pointer of callback function.
366 * @data: Pointer of agent's private data.
367 *
368 * Return: Returns 0 on successful unregistration else error code.
369 */
xlnx_unregister_event(const enum pm_api_cb_id cb_type,const u32 node_id,const u32 event,event_cb_func_t cb_fun,void * data)370 int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
371 event_cb_func_t cb_fun, void *data)
372 {
373 int ret = 0;
374 u32 eve, pos;
375
376 is_need_to_unregister = false;
377
378 if (event_manager_availability)
379 return event_manager_availability;
380
381 if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
382 pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
383 return -EINVAL;
384 }
385
386 if (!cb_fun)
387 return -EFAULT;
388
389 if (cb_type == PM_INIT_SUSPEND_CB) {
390 ret = xlnx_remove_cb_for_suspend(cb_fun);
391 } else {
392 /* Remove Node-Id/Event from hash table */
393 if (!xlnx_is_error_event(node_id)) {
394 xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
395 } else {
396 for (pos = 0; pos < MAX_BITS; pos++) {
397 eve = event & (1 << pos);
398 if (!eve)
399 continue;
400
401 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
402 }
403 }
404
405 /* Un-register if list is empty */
406 if (is_need_to_unregister) {
407 /* Un-register for Node-Id/Event combination */
408 ret = zynqmp_pm_register_notifier(node_id, event, false, false);
409 if (ret) {
410 pr_err("%s() failed for 0x%x and 0x%x: %d\n",
411 __func__, node_id, event, ret);
412 return ret;
413 }
414 }
415 }
416
417 return ret;
418 }
419 EXPORT_SYMBOL_GPL(xlnx_unregister_event);
420
xlnx_call_suspend_cb_handler(const u32 * payload)421 static void xlnx_call_suspend_cb_handler(const u32 *payload)
422 {
423 bool is_callback_found = false;
424 struct registered_event_data *eve_data;
425 u32 cb_type = payload[0];
426 struct agent_cb *cb_pos;
427 struct agent_cb *cb_next;
428
429 /* Check for existing entry in hash table for given cb_type */
430 hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
431 if (eve_data->cb_type == cb_type) {
432 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
433 cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
434 is_callback_found = true;
435 }
436 }
437 }
438 if (!is_callback_found)
439 pr_warn("Didn't find any registered callback for suspend event\n");
440 }
441
xlnx_call_notify_cb_handler(const u32 * payload)442 static void xlnx_call_notify_cb_handler(const u32 *payload)
443 {
444 bool is_callback_found = false;
445 struct registered_event_data *eve_data;
446 u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
447 int ret;
448 struct agent_cb *cb_pos;
449 struct agent_cb *cb_next;
450
451 /* Check for existing entry in hash table for given key id */
452 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
453 if (eve_data->key == key) {
454 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
455 cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
456 is_callback_found = true;
457 }
458
459 /* re register with firmware to get future events */
460 ret = zynqmp_pm_register_notifier(payload[1], payload[2],
461 eve_data->wake, true);
462 if (ret) {
463 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
464 payload[1], payload[2], ret);
465 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
466 list) {
467 /* Remove already registered event from hash table */
468 xlnx_remove_cb_for_notify_event(payload[1], payload[2],
469 cb_pos->eve_cb,
470 cb_pos->agent_data);
471 }
472 }
473 }
474 }
475 if (!is_callback_found)
476 pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
477 payload[1], payload[2]);
478 }
479
xlnx_get_event_callback_data(u32 * buf)480 static void xlnx_get_event_callback_data(u32 *buf)
481 {
482 zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
483 }
484
xlnx_event_handler(int irq,void * dev_id)485 static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
486 {
487 u32 cb_type, node_id, event, pos;
488 u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
489 u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
490
491 /* Get event data */
492 xlnx_get_event_callback_data(payload);
493
494 /* First element is callback type, others are callback arguments */
495 cb_type = payload[0];
496
497 if (cb_type == PM_NOTIFY_CB) {
498 node_id = payload[1];
499 event = payload[2];
500 if (!xlnx_is_error_event(node_id)) {
501 xlnx_call_notify_cb_handler(payload);
502 } else {
503 /*
504 * Each call back function expecting payload as an input arguments.
505 * We can get multiple error events as in one call back through error
506 * mask. So payload[2] may can contain multiple error events.
507 * In reg_driver_map database we store data in the combination of single
508 * node_id-error combination.
509 * So coping the payload message into event_data and update the
510 * event_data[2] with Error Mask for single error event and use
511 * event_data as input argument for registered call back function.
512 *
513 */
514 memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
515 /* Support Multiple Error Event */
516 for (pos = 0; pos < MAX_BITS; pos++) {
517 if ((0 == (event & (1 << pos))))
518 continue;
519 event_data[2] = (event & (1 << pos));
520 xlnx_call_notify_cb_handler(event_data);
521 }
522 }
523 } else if (cb_type == PM_INIT_SUSPEND_CB) {
524 xlnx_call_suspend_cb_handler(payload);
525 } else {
526 pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
527 }
528
529 return IRQ_HANDLED;
530 }
531
xlnx_event_cpuhp_start(unsigned int cpu)532 static int xlnx_event_cpuhp_start(unsigned int cpu)
533 {
534 enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
535
536 return 0;
537 }
538
xlnx_event_cpuhp_down(unsigned int cpu)539 static int xlnx_event_cpuhp_down(unsigned int cpu)
540 {
541 disable_percpu_irq(virq_sgi);
542
543 return 0;
544 }
545
xlnx_disable_percpu_irq(void * data)546 static void xlnx_disable_percpu_irq(void *data)
547 {
548 disable_percpu_irq(virq_sgi);
549 }
550
xlnx_event_init_sgi(struct platform_device * pdev)551 static int xlnx_event_init_sgi(struct platform_device *pdev)
552 {
553 int ret = 0;
554 int cpu = smp_processor_id();
555 /*
556 * IRQ related structures are used for the following:
557 * for each SGI interrupt ensure its mapped by GIC IRQ domain
558 * and that each corresponding linux IRQ for the HW IRQ has
559 * a handler for when receiving an interrupt from the remote
560 * processor.
561 */
562 struct irq_domain *domain;
563 struct irq_fwspec sgi_fwspec;
564 struct device_node *interrupt_parent = NULL;
565 struct device *parent = pdev->dev.parent;
566
567 /* Find GIC controller to map SGIs. */
568 interrupt_parent = of_irq_find_parent(parent->of_node);
569 if (!interrupt_parent) {
570 dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
571 return -EINVAL;
572 }
573
574 /* Each SGI needs to be associated with GIC's IRQ domain. */
575 domain = irq_find_host(interrupt_parent);
576 of_node_put(interrupt_parent);
577
578 /* Each mapping needs GIC domain when finding IRQ mapping. */
579 sgi_fwspec.fwnode = domain->fwnode;
580
581 /*
582 * When irq domain looks at mapping each arg is as follows:
583 * 3 args for: interrupt type (SGI), interrupt # (set later), type
584 */
585 sgi_fwspec.param_count = 1;
586
587 /* Set SGI's hwirq */
588 sgi_fwspec.param[0] = sgi_num;
589 virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
590
591 per_cpu(cpu_number1, cpu) = cpu;
592 ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
593 &cpu_number1);
594 WARN_ON(ret);
595 if (ret) {
596 irq_dispose_mapping(virq_sgi);
597 return ret;
598 }
599
600 irq_to_desc(virq_sgi);
601 irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
602
603 return ret;
604 }
605
xlnx_event_cleanup_sgi(struct platform_device * pdev)606 static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
607 {
608 int cpu = smp_processor_id();
609
610 per_cpu(cpu_number1, cpu) = cpu;
611
612 cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
613
614 on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
615
616 irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
617 free_percpu_irq(virq_sgi, &cpu_number1);
618 irq_dispose_mapping(virq_sgi);
619 }
620
xlnx_event_manager_probe(struct platform_device * pdev)621 static int xlnx_event_manager_probe(struct platform_device *pdev)
622 {
623 int ret;
624
625 ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
626 if (ret < 0) {
627 dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
628 return ret;
629 }
630
631 if ((ret & FIRMWARE_VERSION_MASK) <
632 REGISTER_NOTIFIER_FIRMWARE_VERSION) {
633 dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
634 REGISTER_NOTIFIER_FIRMWARE_VERSION,
635 ret & FIRMWARE_VERSION_MASK);
636 return -EOPNOTSUPP;
637 }
638
639 /* Initialize the SGI */
640 ret = xlnx_event_init_sgi(pdev);
641 if (ret) {
642 dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
643 return ret;
644 }
645
646 /* Setup function for the CPU hot-plug cases */
647 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
648 xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
649
650 ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
651 0, NULL);
652 if (ret) {
653 dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
654 xlnx_event_cleanup_sgi(pdev);
655 return ret;
656 }
657
658 event_manager_availability = 0;
659
660 dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
661 dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
662
663 return ret;
664 }
665
xlnx_event_manager_remove(struct platform_device * pdev)666 static int xlnx_event_manager_remove(struct platform_device *pdev)
667 {
668 int i;
669 struct registered_event_data *eve_data;
670 struct hlist_node *tmp;
671 int ret;
672 struct agent_cb *cb_pos;
673 struct agent_cb *cb_next;
674
675 hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
676 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
677 list_del_init(&cb_pos->list);
678 kfree(cb_pos);
679 }
680 hash_del(&eve_data->hentry);
681 kfree(eve_data);
682 }
683
684 ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, 0, 1, NULL);
685 if (ret)
686 dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
687
688 xlnx_event_cleanup_sgi(pdev);
689
690 event_manager_availability = -EACCES;
691
692 return ret;
693 }
694
695 static struct platform_driver xlnx_event_manager_driver = {
696 .probe = xlnx_event_manager_probe,
697 .remove = xlnx_event_manager_remove,
698 .driver = {
699 .name = "xlnx_event_manager",
700 },
701 };
702 module_param(sgi_num, uint, 0);
703 module_platform_driver(xlnx_event_manager_driver);
704