1 /*
2 * SN2 Platform specific SMP Support
3 *
4 * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 * Further, this software is distributed without any warranty that it is
15 * free of the rightful claim of any third person regarding infringement
16 * or the like. Any license provided herein, whether implied or
17 * otherwise, applies only to this software file. Patent licenses, if
18 * any, provided herein do not apply to combinations of this program with
19 * other software, or any other product whatsoever.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 *
25 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
26 * Mountain View, CA 94043, or:
27 *
28 * http://www.sgi.com
29 *
30 * For further information regarding this notice, see:
31 *
32 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
33 */
34
35 #include <linux/init.h>
36 #include <linux/kernel.h>
37 #include <linux/spinlock.h>
38 #include <linux/threads.h>
39 #include <linux/sched.h>
40 #include <linux/smp.h>
41 #include <linux/interrupt.h>
42 #include <linux/irq.h>
43 #include <linux/mmzone.h>
44
45 #include <asm/processor.h>
46 #include <asm/irq.h>
47 #include <asm/sal.h>
48 #include <asm/system.h>
49 #include <asm/delay.h>
50 #include <asm/io.h>
51 #include <asm/smp.h>
52 #include <asm/hw_irq.h>
53 #include <asm/current.h>
54 #include <asm/sn/sn_cpuid.h>
55 #include <asm/sn/addrs.h>
56 #include <asm/sn/sn2/shub_mmr.h>
57 #include <asm/sn/nodepda.h>
58 #include <asm/sn/rw_mmr.h>
59
60 void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1);
61
62
63 static spinlock_t sn2_global_ptc_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
64
65 static unsigned long sn2_ptc_deadlock_count;
66
67
68 static inline unsigned long
wait_piowc(void)69 wait_piowc(void)
70 {
71 volatile unsigned long *piows;
72 unsigned long ws;
73
74 piows = pda.pio_write_status_addr;
75 do {
76 __asm__ __volatile__ ("mf.a" ::: "memory");
77 } while (((ws = *piows) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
78 SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK);
79 return ws;
80 }
81
82
83
84 /**
85 * sn2_global_tlb_purge - globally purge translation cache of virtual address range
86 * @start: start of virtual address range
87 * @end: end of virtual address range
88 * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
89 *
90 * Purges the translation caches of all processors of the given virtual address
91 * range.
92 */
93
94 void
sn2_global_tlb_purge(unsigned long start,unsigned long end,unsigned long nbits)95 sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
96 {
97 int cnode, mycnode, nasid, flushed=0;
98 volatile unsigned long *ptc0, *ptc1;
99 unsigned long flags=0, data0, data1;
100
101 data0 = (1UL<<SH_PTC_0_A_SHFT) |
102 (nbits<<SH_PTC_0_PS_SHFT) |
103 ((ia64_get_rr(start)>>8)<<SH_PTC_0_RID_SHFT) |
104 (1UL<<SH_PTC_0_START_SHFT);
105
106 ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
107 ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
108
109 mycnode = numa_node_id();
110
111 spin_lock_irqsave(&sn2_global_ptc_lock, flags);
112
113 do {
114 data1 = start | (1UL<<SH_PTC_1_START_SHFT);
115 for (cnode = 0; cnode < numnodes; cnode++) {
116 if (is_headless_node(cnode))
117 continue;
118 if (cnode == mycnode) {
119 asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) : "memory");
120 } else if (current->shared_mm || test_bit(cnode, current->node_history)) {
121 nasid = cnodeid_to_nasid(cnode);
122 ptc0 = CHANGE_NASID(nasid, ptc0);
123 ptc1 = CHANGE_NASID(nasid, ptc1);
124 pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
125 flushed = 1;
126 }
127 }
128
129 if (flushed && (wait_piowc() & SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK)) {
130 sn2_ptc_deadlock_recovery(data0, data1);
131 }
132
133 start += (1UL << nbits);
134
135 } while (start < end);
136
137 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
138
139 }
140
141 /*
142 * sn2_ptc_deadlock_recovery
143 *
144 * Recover from PTC deadlocks conditions. Recovery requires stepping thru each
145 * TLB flush transaction. The recovery sequence is somewhat tricky & is
146 * coded in assembly language.
147 */
148 void
sn2_ptc_deadlock_recovery(unsigned long data0,unsigned long data1)149 sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
150 {
151 extern void sn2_ptc_deadlock_recovery_core(long*, long, long*, long, long*);
152 int cnode, mycnode, nasid;
153 long *ptc0, *ptc1, *piows;
154
155 sn2_ptc_deadlock_count++;
156
157 ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
158 ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
159 piows = (long*)pda.pio_write_status_addr;
160
161 mycnode = numa_node_id();
162
163 for (cnode = 0; cnode < numnodes; cnode++) {
164 if (is_headless_node(cnode) || cnode == mycnode)
165 continue;
166 nasid = cnodeid_to_nasid(cnode);
167 ptc0 = CHANGE_NASID(nasid, ptc0);
168 ptc1 = CHANGE_NASID(nasid, ptc1);
169 sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows);
170 }
171 }
172
173 /**
174 * sn_send_IPI_phys - send an IPI to a Nasid and slice
175 * @physid: physical cpuid to receive the interrupt.
176 * @vector: command to send
177 * @delivery_mode: delivery mechanism
178 *
179 * Sends an IPI (interprocessor interrupt) to the processor specified by
180 * @physid
181 *
182 * @delivery_mode can be one of the following
183 *
184 * %IA64_IPI_DM_INT - pend an interrupt
185 * %IA64_IPI_DM_PMI - pend a PMI
186 * %IA64_IPI_DM_NMI - pend an NMI
187 * %IA64_IPI_DM_INIT - pend an INIT interrupt
188 */
189 void
sn_send_IPI_phys(long physid,int vector,int delivery_mode)190 sn_send_IPI_phys(long physid, int vector, int delivery_mode)
191 {
192 long nasid, slice, val;
193 unsigned long flags=0;
194 volatile long *p;
195
196 nasid = cpu_physical_id_to_nasid(physid);
197 slice = cpu_physical_id_to_slice(physid);
198
199 p = (long*)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
200 val = (1UL<<SH_IPI_INT_SEND_SHFT) |
201 (physid<<SH_IPI_INT_PID_SHFT) |
202 ((long)delivery_mode<<SH_IPI_INT_TYPE_SHFT) |
203 ((long)vector<<SH_IPI_INT_IDX_SHFT) |
204 (0x000feeUL<<SH_IPI_INT_BASE_SHFT);
205
206 mb();
207 if (enable_shub_wars_1_1() ) {
208 spin_lock_irqsave(&sn2_global_ptc_lock, flags);
209 }
210 pio_phys_write_mmr(p, val);
211 if (enable_shub_wars_1_1() ) {
212 wait_piowc();
213 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
214 }
215
216 }
217
218 /**
219 * sn2_send_IPI - send an IPI to a processor
220 * @cpuid: target of the IPI
221 * @vector: command to send
222 * @delivery_mode: delivery mechanism
223 * @redirect: redirect the IPI?
224 *
225 * Sends an IPI (InterProcessor Interrupt) to the processor specified by
226 * @cpuid. @vector specifies the command to send, while @delivery_mode can
227 * be one of the following
228 *
229 * %IA64_IPI_DM_INT - pend an interrupt
230 * %IA64_IPI_DM_PMI - pend a PMI
231 * %IA64_IPI_DM_NMI - pend an NMI
232 * %IA64_IPI_DM_INIT - pend an INIT interrupt
233 */
234 void
sn2_send_IPI(int cpuid,int vector,int delivery_mode,int redirect)235 sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
236 {
237 long physid;
238
239 physid = cpu_physical_id(cpuid);
240
241 sn_send_IPI_phys(physid, vector, delivery_mode);
242 }
243