1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel Speed Select Interface: Common functions
4 * Copyright (c) 2019, Intel Corporation.
5 * All rights reserved.
6 *
7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8 */
9
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/fs.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
21
22 #include "isst_if_common.h"
23
24 #define MSR_THREAD_ID_INFO 0x53
25 #define MSR_CPU_BUS_NUMBER 0x128
26
27 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
28
29 static int punit_msr_white_list[] = {
30 MSR_TURBO_RATIO_LIMIT,
31 MSR_CONFIG_TDP_CONTROL,
32 MSR_TURBO_RATIO_LIMIT1,
33 MSR_TURBO_RATIO_LIMIT2,
34 };
35
36 struct isst_valid_cmd_ranges {
37 u16 cmd;
38 u16 sub_cmd_beg;
39 u16 sub_cmd_end;
40 };
41
42 struct isst_cmd_set_req_type {
43 u16 cmd;
44 u16 sub_cmd;
45 u16 param;
46 };
47
48 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
49 {0xD0, 0x00, 0x03},
50 {0x7F, 0x00, 0x0B},
51 {0x7F, 0x10, 0x12},
52 {0x7F, 0x20, 0x23},
53 {0x94, 0x03, 0x03},
54 {0x95, 0x03, 0x03},
55 };
56
57 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
58 {0xD0, 0x00, 0x08},
59 {0xD0, 0x01, 0x08},
60 {0xD0, 0x02, 0x08},
61 {0xD0, 0x03, 0x08},
62 {0x7F, 0x02, 0x00},
63 {0x7F, 0x08, 0x00},
64 {0x95, 0x03, 0x03},
65 };
66
67 struct isst_cmd {
68 struct hlist_node hnode;
69 u64 data;
70 u32 cmd;
71 int cpu;
72 int mbox_cmd_type;
73 u32 param;
74 };
75
76 static DECLARE_HASHTABLE(isst_hash, 8);
77 static DEFINE_MUTEX(isst_hash_lock);
78
isst_store_new_cmd(int cmd,u32 cpu,int mbox_cmd_type,u32 param,u32 data)79 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
80 u32 data)
81 {
82 struct isst_cmd *sst_cmd;
83
84 sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
85 if (!sst_cmd)
86 return -ENOMEM;
87
88 sst_cmd->cpu = cpu;
89 sst_cmd->cmd = cmd;
90 sst_cmd->mbox_cmd_type = mbox_cmd_type;
91 sst_cmd->param = param;
92 sst_cmd->data = data;
93
94 hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
95
96 return 0;
97 }
98
isst_delete_hash(void)99 static void isst_delete_hash(void)
100 {
101 struct isst_cmd *sst_cmd;
102 struct hlist_node *tmp;
103 int i;
104
105 hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
106 hash_del(&sst_cmd->hnode);
107 kfree(sst_cmd);
108 }
109 }
110
111 /**
112 * isst_store_cmd() - Store command to a hash table
113 * @cmd: Mailbox command.
114 * @sub_cmd: Mailbox sub-command or MSR id.
115 * @mbox_cmd_type: Mailbox or MSR command.
116 * @param: Mailbox parameter.
117 * @data: Mailbox request data or MSR data.
118 *
119 * Stores the command to a hash table if there is no such command already
120 * stored. If already stored update the latest parameter and data for the
121 * command.
122 *
123 * Return: Return result of store to hash table, 0 for success, others for
124 * failure.
125 */
isst_store_cmd(int cmd,int sub_cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)126 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
127 u32 param, u64 data)
128 {
129 struct isst_cmd *sst_cmd;
130 int full_cmd, ret;
131
132 full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
133 full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
134 mutex_lock(&isst_hash_lock);
135 hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
136 if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
137 sst_cmd->mbox_cmd_type == mbox_cmd_type) {
138 sst_cmd->param = param;
139 sst_cmd->data = data;
140 mutex_unlock(&isst_hash_lock);
141 return 0;
142 }
143 }
144
145 ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
146 mutex_unlock(&isst_hash_lock);
147
148 return ret;
149 }
150 EXPORT_SYMBOL_GPL(isst_store_cmd);
151
isst_mbox_resume_command(struct isst_if_cmd_cb * cb,struct isst_cmd * sst_cmd)152 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
153 struct isst_cmd *sst_cmd)
154 {
155 struct isst_if_mbox_cmd mbox_cmd;
156 int wr_only;
157
158 mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
159 mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
160 mbox_cmd.parameter = sst_cmd->param;
161 mbox_cmd.req_data = sst_cmd->data;
162 mbox_cmd.logical_cpu = sst_cmd->cpu;
163 (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
164 }
165
166 /**
167 * isst_resume_common() - Process Resume request
168 *
169 * On resume replay all mailbox commands and MSRs.
170 *
171 * Return: None.
172 */
isst_resume_common(void)173 void isst_resume_common(void)
174 {
175 struct isst_cmd *sst_cmd;
176 int i;
177
178 hash_for_each(isst_hash, i, sst_cmd, hnode) {
179 struct isst_if_cmd_cb *cb;
180
181 if (sst_cmd->mbox_cmd_type) {
182 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
183 if (cb->registered)
184 isst_mbox_resume_command(cb, sst_cmd);
185 } else {
186 wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
187 sst_cmd->data);
188 }
189 }
190 }
191 EXPORT_SYMBOL_GPL(isst_resume_common);
192
isst_restore_msr_local(int cpu)193 static void isst_restore_msr_local(int cpu)
194 {
195 struct isst_cmd *sst_cmd;
196 int i;
197
198 mutex_lock(&isst_hash_lock);
199 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
200 if (!punit_msr_white_list[i])
201 break;
202
203 hash_for_each_possible(isst_hash, sst_cmd, hnode,
204 punit_msr_white_list[i]) {
205 if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
206 wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
207 }
208 }
209 mutex_unlock(&isst_hash_lock);
210 }
211
212 /**
213 * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
214 * @cmd: Pointer to the command structure to verify.
215 *
216 * Invalid command to PUNIT to may result in instability of the platform.
217 * This function has a whitelist of commands, which are allowed.
218 *
219 * Return: Return true if the command is invalid, else false.
220 */
isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd * cmd)221 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
222 {
223 int i;
224
225 if (cmd->logical_cpu >= nr_cpu_ids)
226 return true;
227
228 for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
229 if (cmd->command == isst_valid_cmds[i].cmd &&
230 (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
231 cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
232 return false;
233 }
234 }
235
236 return true;
237 }
238 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
239
240 /**
241 * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
242 * @cmd: Pointer to the command structure to verify.
243 *
244 * Check if the given mail box level is set request and not a get request.
245 *
246 * Return: Return true if the command is set_req, else false.
247 */
isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd * cmd)248 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
249 {
250 int i;
251
252 for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
253 if (cmd->command == isst_cmd_set_reqs[i].cmd &&
254 cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
255 cmd->parameter == isst_cmd_set_reqs[i].param) {
256 return true;
257 }
258 }
259
260 return false;
261 }
262 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
263
isst_if_get_platform_info(void __user * argp)264 static int isst_if_get_platform_info(void __user *argp)
265 {
266 struct isst_if_platform_info info;
267
268 info.api_version = ISST_IF_API_VERSION;
269 info.driver_version = ISST_IF_DRIVER_VERSION;
270 info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
271 info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
272 info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
273
274 if (copy_to_user(argp, &info, sizeof(info)))
275 return -EFAULT;
276
277 return 0;
278 }
279
280 #define ISST_MAX_BUS_NUMBER 2
281
282 struct isst_if_cpu_info {
283 /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
284 int bus_info[ISST_MAX_BUS_NUMBER];
285 struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
286 int punit_cpu_id;
287 int numa_node;
288 };
289
290 struct isst_if_pkg_info {
291 struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
292 };
293
294 static struct isst_if_cpu_info *isst_cpu_info;
295 static struct isst_if_pkg_info *isst_pkg_info;
296
297 #define ISST_MAX_PCI_DOMAINS 8
298
_isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)299 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
300 {
301 struct pci_dev *matched_pci_dev = NULL;
302 struct pci_dev *pci_dev = NULL;
303 int no_matches = 0, pkg_id;
304 int i, bus_number;
305
306 if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
307 cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
308 return NULL;
309
310 pkg_id = topology_physical_package_id(cpu);
311
312 bus_number = isst_cpu_info[cpu].bus_info[bus_no];
313 if (bus_number < 0)
314 return NULL;
315
316 for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
317 struct pci_dev *_pci_dev;
318 int node;
319
320 _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
321 if (!_pci_dev)
322 continue;
323
324 ++no_matches;
325 if (!matched_pci_dev)
326 matched_pci_dev = _pci_dev;
327
328 node = dev_to_node(&_pci_dev->dev);
329 if (node == NUMA_NO_NODE) {
330 pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
331 cpu, bus_no, dev, fn);
332 continue;
333 }
334
335 if (node == isst_cpu_info[cpu].numa_node) {
336 isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
337
338 pci_dev = _pci_dev;
339 break;
340 }
341 }
342
343 /*
344 * If there is no numa matched pci_dev, then there can be following cases:
345 * 1. CONFIG_NUMA is not defined: In this case if there is only single device
346 * match, then we don't need numa information. Simply return last match.
347 * Othewise return NULL.
348 * 2. NUMA information is not exposed via _SEG method. In this case it is similar
349 * to case 1.
350 * 3. Numa information doesn't match with CPU numa node and more than one match
351 * return NULL.
352 */
353 if (!pci_dev && no_matches == 1)
354 pci_dev = matched_pci_dev;
355
356 /* Return pci_dev pointer for any matched CPU in the package */
357 if (!pci_dev)
358 pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
359
360 return pci_dev;
361 }
362
363 /**
364 * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
365 * @cpu: Logical CPU number.
366 * @bus_number: The bus number assigned by the hardware.
367 * @dev: The device number assigned by the hardware.
368 * @fn: The function number assigned by the hardware.
369 *
370 * Using cached bus information, find out the PCI device for a bus number,
371 * device and function.
372 *
373 * Return: Return pci_dev pointer or NULL.
374 */
isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)375 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
376 {
377 struct pci_dev *pci_dev;
378
379 if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
380 cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
381 return NULL;
382
383 pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
384
385 if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
386 return pci_dev;
387
388 return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
389 }
390 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
391
isst_if_cpu_online(unsigned int cpu)392 static int isst_if_cpu_online(unsigned int cpu)
393 {
394 u64 data;
395 int ret;
396
397 isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
398
399 ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
400 if (ret) {
401 /* This is not a fatal error on MSR mailbox only I/F */
402 isst_cpu_info[cpu].bus_info[0] = -1;
403 isst_cpu_info[cpu].bus_info[1] = -1;
404 } else {
405 isst_cpu_info[cpu].bus_info[0] = data & 0xff;
406 isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
407 isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
408 isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
409 }
410
411 ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
412 if (ret) {
413 isst_cpu_info[cpu].punit_cpu_id = -1;
414 return ret;
415 }
416 isst_cpu_info[cpu].punit_cpu_id = data;
417
418 isst_restore_msr_local(cpu);
419
420 return 0;
421 }
422
423 static int isst_if_online_id;
424
isst_if_cpu_info_init(void)425 static int isst_if_cpu_info_init(void)
426 {
427 int ret;
428
429 isst_cpu_info = kcalloc(num_possible_cpus(),
430 sizeof(*isst_cpu_info),
431 GFP_KERNEL);
432 if (!isst_cpu_info)
433 return -ENOMEM;
434
435 isst_pkg_info = kcalloc(topology_max_packages(),
436 sizeof(*isst_pkg_info),
437 GFP_KERNEL);
438 if (!isst_pkg_info) {
439 kfree(isst_cpu_info);
440 return -ENOMEM;
441 }
442
443 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
444 "platform/x86/isst-if:online",
445 isst_if_cpu_online, NULL);
446 if (ret < 0) {
447 kfree(isst_pkg_info);
448 kfree(isst_cpu_info);
449 return ret;
450 }
451
452 isst_if_online_id = ret;
453
454 return 0;
455 }
456
isst_if_cpu_info_exit(void)457 static void isst_if_cpu_info_exit(void)
458 {
459 cpuhp_remove_state(isst_if_online_id);
460 kfree(isst_pkg_info);
461 kfree(isst_cpu_info);
462 };
463
isst_if_proc_phyid_req(u8 * cmd_ptr,int * write_only,int resume)464 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
465 {
466 struct isst_if_cpu_map *cpu_map;
467
468 cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
469 if (cpu_map->logical_cpu >= nr_cpu_ids ||
470 cpu_map->logical_cpu >= num_possible_cpus())
471 return -EINVAL;
472
473 *write_only = 0;
474 cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
475
476 return 0;
477 }
478
match_punit_msr_white_list(int msr)479 static bool match_punit_msr_white_list(int msr)
480 {
481 int i;
482
483 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
484 if (punit_msr_white_list[i] == msr)
485 return true;
486 }
487
488 return false;
489 }
490
isst_if_msr_cmd_req(u8 * cmd_ptr,int * write_only,int resume)491 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
492 {
493 struct isst_if_msr_cmd *msr_cmd;
494 int ret;
495
496 msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
497
498 if (!match_punit_msr_white_list(msr_cmd->msr))
499 return -EINVAL;
500
501 if (msr_cmd->logical_cpu >= nr_cpu_ids)
502 return -EINVAL;
503
504 if (msr_cmd->read_write) {
505 if (!capable(CAP_SYS_ADMIN))
506 return -EPERM;
507
508 ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
509 msr_cmd->msr,
510 msr_cmd->data);
511 *write_only = 1;
512 if (!ret && !resume)
513 ret = isst_store_cmd(0, msr_cmd->msr,
514 msr_cmd->logical_cpu,
515 0, 0, msr_cmd->data);
516 } else {
517 u64 data;
518
519 ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
520 msr_cmd->msr, &data);
521 if (!ret) {
522 msr_cmd->data = data;
523 *write_only = 0;
524 }
525 }
526
527
528 return ret;
529 }
530
isst_if_exec_multi_cmd(void __user * argp,struct isst_if_cmd_cb * cb)531 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
532 {
533 unsigned char __user *ptr;
534 u32 cmd_count;
535 u8 *cmd_ptr;
536 long ret;
537 int i;
538
539 /* Each multi command has u32 command count as the first field */
540 if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
541 return -EFAULT;
542
543 if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
544 return -EINVAL;
545
546 cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
547 if (!cmd_ptr)
548 return -ENOMEM;
549
550 /* cb->offset points to start of the command after the command count */
551 ptr = argp + cb->offset;
552
553 for (i = 0; i < cmd_count; ++i) {
554 int wr_only;
555
556 if (signal_pending(current)) {
557 ret = -EINTR;
558 break;
559 }
560
561 if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
562 ret = -EFAULT;
563 break;
564 }
565
566 ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
567 if (ret)
568 break;
569
570 if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
571 ret = -EFAULT;
572 break;
573 }
574
575 ptr += cb->cmd_size;
576 }
577
578 kfree(cmd_ptr);
579
580 return i ? i : ret;
581 }
582
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)583 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
584 unsigned long arg)
585 {
586 void __user *argp = (void __user *)arg;
587 struct isst_if_cmd_cb cmd_cb;
588 struct isst_if_cmd_cb *cb;
589 long ret = -ENOTTY;
590
591 switch (cmd) {
592 case ISST_IF_GET_PLATFORM_INFO:
593 ret = isst_if_get_platform_info(argp);
594 break;
595 case ISST_IF_GET_PHY_ID:
596 cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
597 cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
598 cmd_cb.cmd_callback = isst_if_proc_phyid_req;
599 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
600 break;
601 case ISST_IF_IO_CMD:
602 cb = &punit_callbacks[ISST_IF_DEV_MMIO];
603 if (cb->registered)
604 ret = isst_if_exec_multi_cmd(argp, cb);
605 break;
606 case ISST_IF_MBOX_COMMAND:
607 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
608 if (cb->registered)
609 ret = isst_if_exec_multi_cmd(argp, cb);
610 break;
611 case ISST_IF_MSR_COMMAND:
612 cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
613 cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
614 cmd_cb.cmd_callback = isst_if_msr_cmd_req;
615 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
616 break;
617 default:
618 break;
619 }
620
621 return ret;
622 }
623
624 /* Lock to prevent module registration when already opened by user space */
625 static DEFINE_MUTEX(punit_misc_dev_open_lock);
626 /* Lock to allow one share misc device for all ISST interace */
627 static DEFINE_MUTEX(punit_misc_dev_reg_lock);
628 static int misc_usage_count;
629 static int misc_device_ret;
630 static int misc_device_open;
631
isst_if_open(struct inode * inode,struct file * file)632 static int isst_if_open(struct inode *inode, struct file *file)
633 {
634 int i, ret = 0;
635
636 /* Fail open, if a module is going away */
637 mutex_lock(&punit_misc_dev_open_lock);
638 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
639 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
640
641 if (cb->registered && !try_module_get(cb->owner)) {
642 ret = -ENODEV;
643 break;
644 }
645 }
646 if (ret) {
647 int j;
648
649 for (j = 0; j < i; ++j) {
650 struct isst_if_cmd_cb *cb;
651
652 cb = &punit_callbacks[j];
653 if (cb->registered)
654 module_put(cb->owner);
655 }
656 } else {
657 misc_device_open++;
658 }
659 mutex_unlock(&punit_misc_dev_open_lock);
660
661 return ret;
662 }
663
isst_if_relase(struct inode * inode,struct file * f)664 static int isst_if_relase(struct inode *inode, struct file *f)
665 {
666 int i;
667
668 mutex_lock(&punit_misc_dev_open_lock);
669 misc_device_open--;
670 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
671 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
672
673 if (cb->registered)
674 module_put(cb->owner);
675 }
676 mutex_unlock(&punit_misc_dev_open_lock);
677
678 return 0;
679 }
680
681 static const struct file_operations isst_if_char_driver_ops = {
682 .open = isst_if_open,
683 .unlocked_ioctl = isst_if_def_ioctl,
684 .release = isst_if_relase,
685 };
686
687 static struct miscdevice isst_if_char_driver = {
688 .minor = MISC_DYNAMIC_MINOR,
689 .name = "isst_interface",
690 .fops = &isst_if_char_driver_ops,
691 };
692
isst_misc_reg(void)693 static int isst_misc_reg(void)
694 {
695 mutex_lock(&punit_misc_dev_reg_lock);
696 if (misc_device_ret)
697 goto unlock_exit;
698
699 if (!misc_usage_count) {
700 misc_device_ret = isst_if_cpu_info_init();
701 if (misc_device_ret)
702 goto unlock_exit;
703
704 misc_device_ret = misc_register(&isst_if_char_driver);
705 if (misc_device_ret) {
706 isst_if_cpu_info_exit();
707 goto unlock_exit;
708 }
709 }
710 misc_usage_count++;
711
712 unlock_exit:
713 mutex_unlock(&punit_misc_dev_reg_lock);
714
715 return misc_device_ret;
716 }
717
isst_misc_unreg(void)718 static void isst_misc_unreg(void)
719 {
720 mutex_lock(&punit_misc_dev_reg_lock);
721 if (misc_usage_count)
722 misc_usage_count--;
723 if (!misc_usage_count && !misc_device_ret) {
724 misc_deregister(&isst_if_char_driver);
725 isst_if_cpu_info_exit();
726 }
727 mutex_unlock(&punit_misc_dev_reg_lock);
728 }
729
730 /**
731 * isst_if_cdev_register() - Register callback for IOCTL
732 * @device_type: The device type this callback handling.
733 * @cb: Callback structure.
734 *
735 * This function registers a callback to device type. On very first call
736 * it will register a misc device, which is used for user kernel interface.
737 * Other calls simply increment ref count. Registry will fail, if the user
738 * already opened misc device for operation. Also if the misc device
739 * creation failed, then it will not try again and all callers will get
740 * failure code.
741 *
742 * Return: Return the return value from the misc creation device or -EINVAL
743 * for unsupported device type.
744 */
isst_if_cdev_register(int device_type,struct isst_if_cmd_cb * cb)745 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
746 {
747 int ret;
748
749 if (device_type >= ISST_IF_DEV_MAX)
750 return -EINVAL;
751
752 mutex_lock(&punit_misc_dev_open_lock);
753 /* Device is already open, we don't want to add new callbacks */
754 if (misc_device_open) {
755 mutex_unlock(&punit_misc_dev_open_lock);
756 return -EAGAIN;
757 }
758 memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
759 punit_callbacks[device_type].registered = 1;
760 mutex_unlock(&punit_misc_dev_open_lock);
761
762 ret = isst_misc_reg();
763 if (ret) {
764 /*
765 * No need of mutex as the misc device register failed
766 * as no one can open device yet. Hence no contention.
767 */
768 punit_callbacks[device_type].registered = 0;
769 return ret;
770 }
771 return 0;
772 }
773 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
774
775 /**
776 * isst_if_cdev_unregister() - Unregister callback for IOCTL
777 * @device_type: The device type to unregister.
778 *
779 * This function unregisters the previously registered callback. If this
780 * is the last callback unregistering, then misc device is removed.
781 *
782 * Return: None.
783 */
isst_if_cdev_unregister(int device_type)784 void isst_if_cdev_unregister(int device_type)
785 {
786 isst_misc_unreg();
787 mutex_lock(&punit_misc_dev_open_lock);
788 punit_callbacks[device_type].registered = 0;
789 if (device_type == ISST_IF_DEV_MBOX)
790 isst_delete_hash();
791 mutex_unlock(&punit_misc_dev_open_lock);
792 }
793 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
794
795 MODULE_LICENSE("GPL v2");
796