Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 25 of 362) sorted by relevance

12345678910>>...15

/linux-2.4.37.9/arch/ppc/kernel/
Dtemp.c54 void set_thresholds(unsigned long cpu) in set_thresholds() argument
61 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); in set_thresholds()
66 mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); in set_thresholds()
69 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); in set_thresholds()
70 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); in set_thresholds()
74 void TAUupdate(int cpu) in TAUupdate() argument
86 if (tau[cpu].low >= step_size){ in TAUupdate()
87 tau[cpu].low -= step_size; in TAUupdate()
88 tau[cpu].high -= (step_size - window_expand); in TAUupdate()
90 tau[cpu].grew = 1; in TAUupdate()
[all …]
/linux-2.4.37.9/include/asm-ppc/
Dhardirq.h27 #define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp) argument
28 #define heartbeat_count(cpu) __IRQ_STAT((cpu), __heartbeat_count) argument
29 #define heartbeat_reset(cpu) __IRQ_STAT((cpu), __heartbeat_reset) argument
41 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
42 #define hardirq_endlock(cpu) do { } while (0) argument
44 #define hardirq_enter(cpu) (local_irq_count(cpu)++) argument
45 #define hardirq_exit(cpu) (local_irq_count(cpu)--) argument
57 static inline void release_irqlock(int cpu) in release_irqlock() argument
60 if (global_irq_holder == (unsigned char) cpu) { in release_irqlock()
66 static inline void hardirq_enter(int cpu) in hardirq_enter() argument
[all …]
/linux-2.4.37.9/include/asm-mips/
Dmmu_context.h34 #define cpu_context(cpu, mm) ((mm)->context[cpu]) argument
35 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) argument
36 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument
55 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) in enter_lazy_tlb() argument
67 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) in get_new_mmu_context() argument
69 unsigned long asid = asid_cache(cpu); in get_new_mmu_context()
77 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_new_mmu_context()
95 struct task_struct *tsk, unsigned cpu) in switch_mm() argument
102 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) in switch_mm()
103 get_new_mmu_context(next, cpu); in switch_mm()
[all …]
Dhardirq.h38 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
39 #define hardirq_endlock(cpu) do { } while (0) argument
41 #define irq_enter(cpu, irq) (local_irq_count(cpu)++) argument
42 #define irq_exit(cpu, irq) (local_irq_count(cpu)--) argument
65 static inline void release_irqlock(int cpu) in release_irqlock() argument
68 if (global_irq_holder == cpu) { in release_irqlock()
74 static inline int hardirq_trylock(int cpu) in hardirq_trylock() argument
76 return !local_irq_count(cpu) && !spin_is_locked(&global_irq_lock); in hardirq_trylock()
79 #define hardirq_endlock(cpu) do { } while (0) argument
81 static inline void irq_enter(int cpu, int irq) in irq_enter() argument
[all …]
/linux-2.4.37.9/include/asm-mips64/
Dmmu_context.h34 #define cpu_context(cpu, mm) ((mm)->context[cpu]) argument
35 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) argument
36 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument
55 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) in enter_lazy_tlb() argument
67 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) in get_new_mmu_context() argument
69 unsigned long asid = asid_cache(cpu); in get_new_mmu_context()
77 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_new_mmu_context()
95 struct task_struct *tsk, unsigned cpu) in switch_mm() argument
102 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) in switch_mm()
103 get_new_mmu_context(next, cpu); in switch_mm()
[all …]
Dhardirq.h37 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
38 #define hardirq_endlock(cpu) do { } while (0) argument
40 #define irq_enter(cpu, irq) (local_irq_count(cpu)++) argument
41 #define irq_exit(cpu, irq) (local_irq_count(cpu)--) argument
64 static inline void release_irqlock(int cpu) in release_irqlock() argument
67 if (global_irq_holder == cpu) { in release_irqlock()
73 static inline int hardirq_trylock(int cpu) in hardirq_trylock() argument
75 return !local_irq_count(cpu) && !spin_is_locked(&global_irq_lock); in hardirq_trylock()
78 #define hardirq_endlock(cpu) do { } while (0) argument
80 static inline void irq_enter(int cpu, int irq) in irq_enter() argument
[all …]
/linux-2.4.37.9/kernel/
Dsoftirq.c53 static inline void wakeup_softirqd(unsigned cpu) in wakeup_softirqd() argument
55 struct task_struct * tsk = ksoftirqd_task(cpu); in wakeup_softirqd()
63 int cpu = smp_processor_id(); in do_softirq() local
73 pending = softirq_pending(cpu); in do_softirq()
82 softirq_pending(cpu) = 0; in do_softirq()
97 pending = softirq_pending(cpu); in do_softirq()
105 wakeup_softirqd(cpu); in do_softirq()
114 inline fastcall void cpu_raise_softirq(unsigned int cpu, unsigned int nr) in cpu_raise_softirq() argument
116 __cpu_raise_softirq(cpu, nr); in cpu_raise_softirq()
127 if (!(local_irq_count(cpu) | local_bh_count(cpu))) in cpu_raise_softirq()
[all …]
/linux-2.4.37.9/include/asm-alpha/
Dhardirq.h34 #define irq_attempt(cpu, irq) (__irq_attempt[irq]) argument
36 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
37 #define hardirq_endlock(cpu) ((void) 0) argument
39 #define irq_enter(cpu, irq) (local_irq_count(cpu)++) argument
40 #define irq_exit(cpu, irq) (local_irq_count(cpu)--) argument
46 #define irq_attempt(cpu, irq) (cpu_data[cpu].irq_attempt[irq]) argument
65 static inline void release_irqlock(int cpu) in release_irqlock() argument
68 if (global_irq_holder == cpu) { in release_irqlock()
74 static inline void irq_enter(int cpu, int irq) in irq_enter() argument
76 ++local_irq_count(cpu); in irq_enter()
[all …]
/linux-2.4.37.9/include/linux/
Dirq_cpustat.h23 #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) argument
25 #define __IRQ_STAT(cpu, member) (irq_stat[((void)(cpu), 0)].member) argument
29 #define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending) argument
30 #define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) argument
31 #define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) argument
32 #define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count) argument
33 #define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task) argument
35 #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ argument
/linux-2.4.37.9/include/asm-sparc/
Dhardirq.h33 #define irq_enter(cpu, irq) ((void)(irq), local_irq_count(cpu)++) argument
34 #define irq_exit(cpu, irq) ((void)(irq), local_irq_count(cpu)--) argument
37 #define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK]) argument
38 #define irq_enter(cpu, irq) br_read_lock(BR_GLOBALIRQ_LOCK) argument
39 #define irq_exit(cpu, irq) br_read_unlock(BR_GLOBALIRQ_LOCK) argument
54 #define hardirq_trylock(cpu) ((void)(cpu), local_irq_count(smp_processor_id()) == 0) argument
55 #define hardirq_endlock(cpu) do { (void)(cpu); } while(0) argument
73 static inline void release_irqlock(int cpu) in release_irqlock() argument
76 if(global_irq_holder == (unsigned char) cpu) { in release_irqlock()
82 static inline int hardirq_trylock(int cpu) in hardirq_trylock() argument
[all …]
/linux-2.4.37.9/include/asm-sparc64/
Dhardirq.h33 #define irq_enter(cpu, irq) ((void)(irq), local_irq_count(cpu)++) argument
34 #define irq_exit(cpu, irq) ((void)(irq), local_irq_count(cpu)--) argument
37 #define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK]) argument
38 #define irq_enter(cpu, irq) br_read_lock(BR_GLOBALIRQ_LOCK) argument
39 #define irq_exit(cpu, irq) br_read_unlock(BR_GLOBALIRQ_LOCK) argument
54 #define hardirq_trylock(cpu) ((void)(cpu), local_irq_count(smp_processor_id()) == 0) argument
55 #define hardirq_endlock(cpu) do { (void)(cpu); } while(0) argument
73 static inline void release_irqlock(int cpu) in release_irqlock() argument
76 if(global_irq_holder == (unsigned char) cpu) { in release_irqlock()
82 static inline int hardirq_trylock(int cpu) in hardirq_trylock() argument
[all …]
/linux-2.4.37.9/include/asm-ppc64/
Dhardirq.h37 #define irq_enter(cpu) (local_irq_count(cpu)++) argument
38 #define irq_exit(cpu) (local_irq_count(cpu)--) argument
41 #define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK]) argument
42 #define irq_enter(cpu) br_read_lock(BR_GLOBALIRQ_LOCK) argument
43 #define irq_exit(cpu) br_read_unlock(BR_GLOBALIRQ_LOCK) argument
58 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
59 #define hardirq_endlock(cpu) do { } while (0) argument
77 static inline void release_irqlock(int cpu) in release_irqlock() argument
80 if(global_irq_holder == (unsigned char) cpu) { in release_irqlock()
86 static inline int hardirq_trylock(int cpu) in hardirq_trylock() argument
[all …]
/linux-2.4.37.9/arch/alpha/kernel/
Dirq_smp.c38 int cpu = smp_processor_id(); in show() local
40 printk("\n%s, CPU %d: %p\n", str, cpu, where); in show()
63 wait_on_irq(int cpu, void *where) in wait_on_irq() argument
75 if (local_bh_count(cpu) in wait_on_irq()
96 if (!local_bh_count(cpu) in wait_on_irq()
106 get_irqlock(int cpu, void* where) in get_irqlock() argument
110 if (cpu == global_irq_holder) in get_irqlock()
122 wait_on_irq(cpu, where); in get_irqlock()
131 global_irq_holder = cpu; in get_irqlock()
138 int cpu = smp_processor_id(); in __global_cli() local
[all …]
/linux-2.4.37.9/include/asm-i386/
Dhardirq.h31 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
32 #define hardirq_endlock(cpu) do { } while (0) argument
34 #define irq_enter(cpu, irq) (local_irq_count(cpu)++) argument
35 #define irq_exit(cpu, irq) (local_irq_count(cpu)--) argument
57 static inline void release_irqlock(int cpu) in release_irqlock() argument
60 if (global_irq_holder == (unsigned char) cpu) { in release_irqlock()
66 static inline void irq_enter(int cpu, int irq) in irq_enter() argument
68 ++local_irq_count(cpu); in irq_enter()
77 static inline void irq_exit(int cpu, int irq) in irq_exit() argument
79 --local_irq_count(cpu); in irq_exit()
[all …]
/linux-2.4.37.9/include/asm-parisc/
Dhardirq.h43 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
44 #define hardirq_endlock(cpu) do { } while (0) argument
46 #define irq_enter(cpu, irq) (local_irq_count(cpu)++) argument
47 #define irq_exit(cpu, irq) (local_irq_count(cpu)--) argument
72 static inline void release_irqlock(int cpu) in release_irqlock() argument
75 if (global_irq_holder == (unsigned char) cpu) { in release_irqlock()
81 static inline void irq_enter(int cpu, int irq) in irq_enter() argument
83 ++local_irq_count(cpu); in irq_enter()
89 static inline void irq_exit(int cpu, int irq) in irq_exit() argument
91 --local_irq_count(cpu); in irq_exit()
[all …]
/linux-2.4.37.9/include/asm-x86_64/
Dhardirq.h31 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
32 #define hardirq_endlock(cpu) do { } while (0) argument
34 #define irq_enter(cpu, irq) (local_irq_count(cpu)++) argument
35 #define irq_exit(cpu, irq) (local_irq_count(cpu)--) argument
57 static inline void release_irqlock(int cpu) in release_irqlock() argument
60 if (global_irq_holder == (unsigned char) cpu) { in release_irqlock()
66 static inline void irq_enter(int cpu, int irq) in irq_enter() argument
68 ++local_irq_count(cpu); in irq_enter()
77 static inline void irq_exit(int cpu, int irq) in irq_exit() argument
79 --local_irq_count(cpu); in irq_exit()
[all …]
/linux-2.4.37.9/include/asm-s390/
Dhardirq.h42 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
43 #define hardirq_endlock(cpu) do { } while (0) argument
45 #define hardirq_enter(cpu) (local_irq_count(cpu)++) argument
46 #define hardirq_exit(cpu) (local_irq_count(cpu)--) argument
59 static inline void release_irqlock(int cpu) in release_irqlock() argument
62 if (atomic_read(&global_irq_holder) == cpu) { in release_irqlock()
68 static inline void hardirq_enter(int cpu) in hardirq_enter() argument
70 ++local_irq_count(cpu); in hardirq_enter()
74 static inline void hardirq_exit(int cpu) in hardirq_exit() argument
77 --local_irq_count(cpu); in hardirq_exit()
[all …]
/linux-2.4.37.9/include/asm-s390x/
Dhardirq.h42 #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) argument
43 #define hardirq_endlock(cpu) do { } while (0) argument
45 #define hardirq_enter(cpu) (local_irq_count(cpu)++) argument
46 #define hardirq_exit(cpu) (local_irq_count(cpu)--) argument
59 static inline void release_irqlock(int cpu) in release_irqlock() argument
62 if (atomic_read(&global_irq_holder) == cpu) { in release_irqlock()
68 static inline void hardirq_enter(int cpu) in hardirq_enter() argument
70 ++local_irq_count(cpu); in hardirq_enter()
74 static inline void hardirq_exit(int cpu) in hardirq_exit() argument
77 --local_irq_count(cpu); in hardirq_exit()
[all …]
/linux-2.4.37.9/arch/parisc/kernel/
Dirq_smp.c36 int cpu = smp_processor_id(); in show() local
38 printk("\n%s, CPU %d: %p\n", str, cpu, where); in show()
51 wait_on_irq(int cpu, void *where) in wait_on_irq() argument
63 if (local_bh_count(cpu) in wait_on_irq()
84 if (!local_bh_count(cpu) in wait_on_irq()
94 get_irqlock(int cpu, void* where) in get_irqlock() argument
98 if (cpu == global_irq_holder) in get_irqlock()
110 wait_on_irq(cpu, where); in get_irqlock()
119 global_irq_holder = cpu; in get_irqlock()
142 int cpu = smp_processor_id(); in __global_cli() local
[all …]
/linux-2.4.37.9/arch/i386/kernel/
Dsmpboot.c513 int apicid, cpu; in init_cpu_to_apicid() local
519 for (cpu = 0; cpu < NR_CPUS; cpu++) { in init_cpu_to_apicid()
520 cpu_2_physical_apicid[cpu] = BAD_APICID; in init_cpu_to_apicid()
521 cpu_2_logical_apicid[cpu] = BAD_APICID; in init_cpu_to_apicid()
525 static inline void map_cpu_to_boot_apicid(int cpu, int apicid) in map_cpu_to_boot_apicid() argument
532 logical_apicid_2_cpu[apicid] = cpu; in map_cpu_to_boot_apicid()
533 cpu_2_logical_apicid[cpu] = apicid; in map_cpu_to_boot_apicid()
535 physical_apicid_2_cpu[apicid] = cpu; in map_cpu_to_boot_apicid()
536 cpu_2_physical_apicid[cpu] = apicid; in map_cpu_to_boot_apicid()
540 static inline void unmap_cpu_to_boot_apicid(int cpu, int apicid) in unmap_cpu_to_boot_apicid() argument
[all …]
/linux-2.4.37.9/arch/ia64/kernel/
Dsmpboot.c424 int timeout, cpu; in do_boot_cpu() local
426 cpu = ++cpucount; in do_boot_cpu()
432 panic("failed fork for CPU %d", cpu); in do_boot_cpu()
440 panic("No idle process for CPU %d", cpu); in do_boot_cpu()
442 task_set_cpu(idle, cpu); /* we schedule the first task manually */ in do_boot_cpu()
444 ia64_cpu_to_sapicid[cpu] = sapicid; in do_boot_cpu()
448 init_tasks[cpu] = idle; in do_boot_cpu()
450 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); in do_boot_cpu()
452 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); in do_boot_cpu()
459 if (test_bit(cpu, &cpu_callin_map)) in do_boot_cpu()
[all …]
/linux-2.4.37.9/arch/mips/sibyte/sb1250/
Dtime.c54 int cpu = smp_processor_id(); in sb1250_time_init() local
55 int irq = K_INT_TIMER_0+cpu; in sb1250_time_init()
58 if (cpu > 3) { in sb1250_time_init()
62 if (!cpu) { in sb1250_time_init()
67 sb1250_mask_irq(cpu, irq); in sb1250_time_init()
70 out64(IMR_IP4_VAL, KSEG1 + A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) in sb1250_time_init()
75 out64(0, KSEG1 + A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sb1250_time_init()
82 , KSEG1 + A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); in sb1250_time_init()
86 KSEG1 + A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); in sb1250_time_init()
88 sb1250_unmask_irq(cpu, irq); in sb1250_time_init()
[all …]
/linux-2.4.37.9/arch/mips64/mm/
Dpg-sb1.c168 int cpu = smp_processor_id(); in sb1_dma_init() local
169 uint64_t base_val = PHYSADDR(&page_descr[cpu]) | V_DM_DSCR_BASE_RINGSZ(1); in sb1_dma_init()
172 IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE)); in sb1_dma_init()
174 IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE)); in sb1_dma_init()
176 IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE)); in sb1_dma_init()
181 int cpu = smp_processor_id(); in clear_page() local
187 …page_descr[cpu].dscr_a = PHYSADDR(page) | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_I… in clear_page()
188 page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); in clear_page()
189 out64(1, IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)); in clear_page()
195 …while (!(in64(IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) & M_DM_DSCR_BASE_INTERRUPT… in clear_page()
[all …]
/linux-2.4.37.9/arch/mips/mm/
Dpg-sb1.c168 int cpu = smp_processor_id(); in sb1_dma_init() local
169 uint64_t base_val = PHYSADDR(&page_descr[cpu]) | V_DM_DSCR_BASE_RINGSZ(1); in sb1_dma_init()
172 IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE)); in sb1_dma_init()
174 IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE)); in sb1_dma_init()
176 IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE)); in sb1_dma_init()
181 int cpu = smp_processor_id(); in clear_page() local
187 …page_descr[cpu].dscr_a = PHYSADDR(page) | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_I… in clear_page()
188 page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); in clear_page()
189 out64(1, IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)); in clear_page()
195 …while (!(in64(IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) & M_DM_DSCR_BASE_INTERRUPT… in clear_page()
[all …]
/linux-2.4.37.9/arch/x86_64/kernel/
Dsmpboot.c521 int timeout, num_starts, j, cpu; in do_boot_cpu() local
524 cpu = ++cpucount; in do_boot_cpu()
531 panic("failed fork for CPU %d", cpu); in do_boot_cpu()
539 panic("No idle process for CPU %d", cpu); in do_boot_cpu()
541 idle->processor = cpu; in do_boot_cpu()
542 x86_cpu_to_apicid[cpu] = apicid; in do_boot_cpu()
543 x86_apicid_to_cpu[apicid] = cpu; in do_boot_cpu()
544 idle->cpus_runnable = 1<<cpu; in do_boot_cpu()
545 idle->cpus_allowed = 1<<cpu; in do_boot_cpu()
551 cpu_pda[cpu].pcurrent = init_tasks[cpu] = idle; in do_boot_cpu()
[all …]

12345678910>>...15