Lines Matching refs:hc
207 struct hpet_channel *hc = hpet_base.channels + i; in hpet_reserve_platform_timers() local
210 hd.hd_irq[i] = hc->irq; in hpet_reserve_platform_timers()
212 switch (hc->mode) { in hpet_reserve_platform_timers()
215 hc->mode = HPET_MODE_DEVICE; in hpet_reserve_platform_timers()
219 hpet_reserve_timer(&hd, hc->num); in hpet_reserve_platform_timers()
232 struct hpet_channel *hc = hpet_base.channels + i; in hpet_select_device_channel() local
235 if (hc->mode == HPET_MODE_UNUSED) { in hpet_select_device_channel()
236 hc->mode = HPET_MODE_DEVICE; in hpet_select_device_channel()
398 static void hpet_init_clockevent(struct hpet_channel *hc, unsigned int rating) in hpet_init_clockevent() argument
400 struct clock_event_device *evt = &hc->evt; in hpet_init_clockevent()
403 evt->irq = hc->irq; in hpet_init_clockevent()
404 evt->name = hc->name; in hpet_init_clockevent()
405 evt->cpumask = cpumask_of(hc->cpu); in hpet_init_clockevent()
411 if (hc->boot_cfg & HPET_TN_PERIODIC) { in hpet_init_clockevent()
417 static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc) in hpet_legacy_clockevent_register() argument
423 hc->cpu = boot_cpu_data.cpu_index; in hpet_legacy_clockevent_register()
424 strncpy(hc->name, "hpet", sizeof(hc->name)); in hpet_legacy_clockevent_register()
425 hpet_init_clockevent(hc, 50); in hpet_legacy_clockevent_register()
427 hc->evt.tick_resume = hpet_clkevt_legacy_resume; in hpet_legacy_clockevent_register()
457 hc->evt.features |= CLOCK_EVT_FEAT_PERIODIC; in hpet_legacy_clockevent_register()
458 hc->evt.set_state_periodic = hpet_clkevt_set_state_periodic; in hpet_legacy_clockevent_register()
463 clockevents_config_and_register(&hc->evt, hpet_freq, in hpet_legacy_clockevent_register()
465 global_clock_event = &hc->evt; in hpet_legacy_clockevent_register()
475 struct hpet_channel *hc = irq_data_get_irq_handler_data(data); in hpet_msi_unmask() local
478 cfg = hpet_readl(HPET_Tn_CFG(hc->num)); in hpet_msi_unmask()
480 hpet_writel(cfg, HPET_Tn_CFG(hc->num)); in hpet_msi_unmask()
485 struct hpet_channel *hc = irq_data_get_irq_handler_data(data); in hpet_msi_mask() local
488 cfg = hpet_readl(HPET_Tn_CFG(hc->num)); in hpet_msi_mask()
490 hpet_writel(cfg, HPET_Tn_CFG(hc->num)); in hpet_msi_mask()
493 static void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg) in hpet_msi_write() argument
495 hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num)); in hpet_msi_write()
496 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4); in hpet_msi_write()
595 static int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc, in hpet_assign_irq() argument
602 info.data = hc; in hpet_assign_irq()
611 struct hpet_channel *hc = clockevent_to_channel(evt); in hpet_clkevt_msi_resume() local
612 struct irq_data *data = irq_get_irq_data(hc->irq); in hpet_clkevt_msi_resume()
617 hpet_msi_write(hc, &msg); in hpet_clkevt_msi_resume()
624 struct hpet_channel *hc = data; in hpet_msi_interrupt_handler() local
625 struct clock_event_device *evt = &hc->evt; in hpet_msi_interrupt_handler()
628 pr_info("Spurious interrupt HPET channel %d\n", hc->num); in hpet_msi_interrupt_handler()
636 static int hpet_setup_msi_irq(struct hpet_channel *hc) in hpet_setup_msi_irq() argument
638 if (request_irq(hc->irq, hpet_msi_interrupt_handler, in hpet_setup_msi_irq()
640 hc->name, hc)) in hpet_setup_msi_irq()
643 disable_irq(hc->irq); in hpet_setup_msi_irq()
644 irq_set_affinity(hc->irq, cpumask_of(hc->cpu)); in hpet_setup_msi_irq()
645 enable_irq(hc->irq); in hpet_setup_msi_irq()
647 pr_debug("%s irq %u for MSI\n", hc->name, hc->irq); in hpet_setup_msi_irq()
653 static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu) in init_one_hpet_msi_clockevent() argument
655 struct clock_event_device *evt = &hc->evt; in init_one_hpet_msi_clockevent()
657 hc->cpu = cpu; in init_one_hpet_msi_clockevent()
658 per_cpu(cpu_hpet_channel, cpu) = hc; in init_one_hpet_msi_clockevent()
659 hpet_setup_msi_irq(hc); in init_one_hpet_msi_clockevent()
661 hpet_init_clockevent(hc, 110); in init_one_hpet_msi_clockevent()
673 struct hpet_channel *hc = hpet_base.channels + i; in hpet_get_unused_clockevent() local
675 if (hc->mode != HPET_MODE_CLOCKEVT || hc->in_use) in hpet_get_unused_clockevent()
677 hc->in_use = 1; in hpet_get_unused_clockevent()
678 return hc; in hpet_get_unused_clockevent()
685 struct hpet_channel *hc = hpet_get_unused_clockevent(); in hpet_cpuhp_online() local
687 if (hc) in hpet_cpuhp_online()
688 init_one_hpet_msi_clockevent(hc, cpu); in hpet_cpuhp_online()
694 struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu); in hpet_cpuhp_dead() local
696 if (!hc) in hpet_cpuhp_dead()
698 free_irq(hc->irq, hc); in hpet_cpuhp_dead()
699 hc->in_use = 0; in hpet_cpuhp_dead()
721 struct hpet_channel *hc = hpet_base.channels + i; in hpet_select_clockevents() local
724 if (hc->mode != HPET_MODE_UNUSED) in hpet_select_clockevents()
728 if (!(hc->boot_cfg & HPET_TN_FSB_CAP)) in hpet_select_clockevents()
731 sprintf(hc->name, "hpet%d", i); in hpet_select_clockevents()
733 irq = hpet_assign_irq(hpet_domain, hc, hc->num); in hpet_select_clockevents()
737 hc->irq = irq; in hpet_select_clockevents()
738 hc->mode = HPET_MODE_CLOCKEVT; in hpet_select_clockevents()
1004 struct hpet_channel *hc; in hpet_enable() local
1050 hc = kcalloc(channels, sizeof(*hc), GFP_KERNEL); in hpet_enable()
1051 if (!hc) { in hpet_enable()
1055 hpet_base.channels = hc; in hpet_enable()
1067 for (i = 0; i < channels; i++, hc++) { in hpet_enable()
1068 hc->num = i; in hpet_enable()
1071 hc->boot_cfg = cfg; in hpet_enable()
1073 hc->irq = irq; in hpet_enable()