1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/types.h>
18 #include <bcmdefs.h>
19 #ifdef BRCM_FULLMAC
20 #include <linux/netdevice.h>
21 #endif
22 #include <bcmutils.h>
23 #include <siutils.h>
24 #include <bcmdevs.h>
25 #include <hndsoc.h>
26 #include <sbchipc.h>
27 #include <pci_core.h>
28 #include <pcicfg.h>
29 #include <sbpcmcia.h>
30 #include "siutils_priv.h"
31
32 /* local prototypes */
33 static uint _sb_coreidx(si_info_t *sii, u32 sba);
34 static uint _sb_scan(si_info_t *sii, u32 sba, void *regs, uint bus,
35 u32 sbba, uint ncores);
36 static u32 _sb_coresba(si_info_t *sii);
37 static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
38
39 #define SET_SBREG(sii, r, mask, val) \
40 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
41 #define REGS2SB(va) (sbconfig_t *) ((s8 *)(va) + SBCONFIGOFF)
42
43 /* sonicsrev */
44 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
45 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
46
47 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
48 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
49 #define AND_SBREG(sii, sbr, v) \
50 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
51 #define OR_SBREG(sii, sbr, v) \
52 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
53
sb_read_sbreg(si_info_t * sii,volatile u32 * sbr)54 static u32 sb_read_sbreg(si_info_t *sii, volatile u32 *sbr)
55 {
56 return R_REG(sbr);
57 }
58
sb_write_sbreg(si_info_t * sii,volatile u32 * sbr,u32 v)59 static void sb_write_sbreg(si_info_t *sii, volatile u32 *sbr, u32 v)
60 {
61 W_REG(sbr, v);
62 }
63
sb_coreid(si_t * sih)64 uint sb_coreid(si_t *sih)
65 {
66 si_info_t *sii;
67 sbconfig_t *sb;
68
69 sii = SI_INFO(sih);
70 sb = REGS2SB(sii->curmap);
71
72 return (R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >>
73 SBIDH_CC_SHIFT;
74 }
75
76 /* return core index of the core with address 'sba' */
_sb_coreidx(si_info_t * sii,u32 sba)77 static uint _sb_coreidx(si_info_t *sii, u32 sba)
78 {
79 uint i;
80
81 for (i = 0; i < sii->numcores; i++)
82 if (sba == sii->coresba[i])
83 return i;
84 return BADIDX;
85 }
86
87 /* return core address of the current core */
_sb_coresba(si_info_t * sii)88 static u32 _sb_coresba(si_info_t *sii)
89 {
90 u32 sbaddr = 0;
91
92 switch (sii->pub.bustype) {
93 case SPI_BUS:
94 case SDIO_BUS:
95 sbaddr = (u32)(unsigned long)sii->curmap;
96 break;
97 default:
98 ASSERT(0);
99 break;
100 }
101
102 return sbaddr;
103 }
104
sb_corerev(si_t * sih)105 uint sb_corerev(si_t *sih)
106 {
107 si_info_t *sii;
108 sbconfig_t *sb;
109 uint sbidh;
110
111 sii = SI_INFO(sih);
112 sb = REGS2SB(sii->curmap);
113 sbidh = R_SBREG(sii, &sb->sbidhigh);
114
115 return SBCOREREV(sbidh);
116 }
117
sb_iscoreup(si_t * sih)118 bool sb_iscoreup(si_t *sih)
119 {
120 si_info_t *sii;
121 sbconfig_t *sb;
122
123 sii = SI_INFO(sih);
124 sb = REGS2SB(sii->curmap);
125
126 return (R_SBREG(sii, &sb->sbtmstatelow) &
127 (SBTML_RESET | SBTML_REJ_MASK |
128 (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
129 (SICF_CLOCK_EN << SBTML_SICF_SHIFT);
130 }
131
132 /*
133 * Switch to 'coreidx', issue a single arbitrary 32bit
134 * register mask&set operation,
135 * switch back to the original core, and return the new value.
136 *
137 * When using the silicon backplane, no fidleing with interrupts
138 * or core switches are needed.
139 *
140 * Also, when using pci/pcie, we can optimize away the core switching
141 * for pci registers
142 * and (on newer pci cores) chipcommon registers.
143 */
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)144 uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
145 {
146 uint origidx = 0;
147 u32 *r = NULL;
148 uint w;
149 uint intr_val = 0;
150 bool fast = false;
151 si_info_t *sii;
152
153 sii = SI_INFO(sih);
154
155 ASSERT(GOODIDX(coreidx));
156 ASSERT(regoff < SI_CORE_SIZE);
157 ASSERT((val & ~mask) == 0);
158
159 if (coreidx >= SI_MAXCORES)
160 return 0;
161
162 if (!fast) {
163 INTR_OFF(sii, intr_val);
164
165 /* save current core index */
166 origidx = si_coreidx(&sii->pub);
167
168 /* switch core */
169 r = (u32 *) ((unsigned char *) sb_setcoreidx(&sii->pub, coreidx) +
170 regoff);
171 }
172 ASSERT(r != NULL);
173
174 /* mask and set */
175 if (mask || val) {
176 if (regoff >= SBCONFIGOFF) {
177 w = (R_SBREG(sii, r) & ~mask) | val;
178 W_SBREG(sii, r, w);
179 } else {
180 w = (R_REG(r) & ~mask) | val;
181 W_REG(r, w);
182 }
183 }
184
185 /* readback */
186 if (regoff >= SBCONFIGOFF)
187 w = R_SBREG(sii, r);
188 else
189 w = R_REG(r);
190
191 if (!fast) {
192 /* restore core index */
193 if (origidx != coreidx)
194 sb_setcoreidx(&sii->pub, origidx);
195
196 INTR_RESTORE(sii, intr_val);
197 }
198
199 return w;
200 }
201
202 /* Scan the enumeration space to find all cores starting from the given
203 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
204 * is the default core address at chip POR time and 'regs' is the virtual
205 * address that the default core is mapped at. 'ncores' is the number of
206 * cores expected on bus 'sbba'. It returns the total number of cores
207 * starting from bus 'sbba', inclusive.
208 */
209 #define SB_MAXBUSES 2
_sb_scan(si_info_t * sii,u32 sba,void * regs,uint bus,u32 sbba,uint numcores)210 static uint _sb_scan(si_info_t *sii, u32 sba, void *regs, uint bus, u32 sbba,
211 uint numcores)
212 {
213 uint next;
214 uint ncc = 0;
215 uint i;
216
217 if (bus >= SB_MAXBUSES) {
218 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to "
219 "scan\n", sbba, bus));
220 return 0;
221 }
222 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n",
223 sbba, numcores));
224
225 /* Scan all cores on the bus starting from core 0.
226 * Core addresses must be contiguous on each bus.
227 */
228 for (i = 0, next = sii->numcores;
229 i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
230 sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
231
232 /* change core to 'next' and read its coreid */
233 sii->curmap = _sb_setcoreidx(sii, next);
234 sii->curidx = next;
235
236 sii->coreid[next] = sb_coreid(&sii->pub);
237
238 /* core specific processing... */
239 /* chipc provides # cores */
240 if (sii->coreid[next] == CC_CORE_ID) {
241 chipcregs_t *cc = (chipcregs_t *) sii->curmap;
242 u32 ccrev = sb_corerev(&sii->pub);
243
244 /* determine numcores - this is the
245 total # cores in the chip */
246 if (((ccrev == 4) || (ccrev >= 6)))
247 numcores =
248 (R_REG(&cc->chipid) & CID_CC_MASK)
249 >> CID_CC_SHIFT;
250 else {
251 /* Older chips */
252 SI_ERROR(("sb_chip2numcores: unsupported chip "
253 "0x%x\n", sii->pub.chip));
254 ASSERT(0);
255 numcores = 1;
256 }
257
258 SI_VMSG(("_sb_scan: %u cores in the chip %s\n",
259 numcores, sii->pub.issim ? "QT" : ""));
260 }
261 /* scan bridged SB(s) and add results to the end of the list */
262 else if (sii->coreid[next] == OCP_CORE_ID) {
263 sbconfig_t *sb = REGS2SB(sii->curmap);
264 u32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
265 uint nsbcc;
266
267 sii->numcores = next + 1;
268
269 if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
270 continue;
271 nsbba &= 0xfffff000;
272 if (_sb_coreidx(sii, nsbba) != BADIDX)
273 continue;
274
275 nsbcc =
276 (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >>
277 16;
278 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
279 if (sbba == SI_ENUM_BASE)
280 numcores -= nsbcc;
281 ncc += nsbcc;
282 }
283 }
284
285 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
286
287 sii->numcores = i + ncc;
288 return sii->numcores;
289 }
290
291 /* scan the sb enumerated space to identify all cores */
sb_scan(si_t * sih,void * regs,uint devid)292 void sb_scan(si_t *sih, void *regs, uint devid)
293 {
294 si_info_t *sii;
295 u32 origsba;
296 sbconfig_t *sb;
297
298 sii = SI_INFO(sih);
299 sb = REGS2SB(sii->curmap);
300
301 sii->pub.socirev =
302 (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
303
304 /* Save the current core info and validate it later till we know
305 * for sure what is good and what is bad.
306 */
307 origsba = _sb_coresba(sii);
308
309 /* scan all SB(s) starting from SI_ENUM_BASE */
310 sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
311 }
312
313 /*
314 * This function changes logical "focus" to the indicated core;
315 * must be called with interrupts off.
316 * Moreover, callers should keep interrupts off during switching out of
317 * and back to d11 core
318 */
sb_setcoreidx(si_t * sih,uint coreidx)319 void *sb_setcoreidx(si_t *sih, uint coreidx)
320 {
321 si_info_t *sii;
322
323 sii = SI_INFO(sih);
324
325 if (coreidx >= sii->numcores)
326 return NULL;
327
328 /*
329 * If the user has provided an interrupt mask enabled function,
330 * then assert interrupts are disabled before switching the core.
331 */
332 ASSERT((sii->intrsenabled_fn == NULL)
333 || !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
334
335 sii->curmap = _sb_setcoreidx(sii, coreidx);
336 sii->curidx = coreidx;
337
338 return sii->curmap;
339 }
340
341 /* This function changes the logical "focus" to the indicated core.
342 * Return the current core's virtual address.
343 */
_sb_setcoreidx(si_info_t * sii,uint coreidx)344 static void *_sb_setcoreidx(si_info_t *sii, uint coreidx)
345 {
346 u32 sbaddr = sii->coresba[coreidx];
347 void *regs;
348
349 switch (sii->pub.bustype) {
350 #ifdef BCMSDIO
351 case SPI_BUS:
352 case SDIO_BUS:
353 /* map new one */
354 if (!sii->regs[coreidx]) {
355 sii->regs[coreidx] = (void *)sbaddr;
356 ASSERT(GOODREGS(sii->regs[coreidx]));
357 }
358 regs = sii->regs[coreidx];
359 break;
360 #endif /* BCMSDIO */
361 default:
362 ASSERT(0);
363 regs = NULL;
364 break;
365 }
366
367 return regs;
368 }
369
sb_core_disable(si_t * sih,u32 bits)370 void sb_core_disable(si_t *sih, u32 bits)
371 {
372 si_info_t *sii;
373 volatile u32 dummy;
374 sbconfig_t *sb;
375
376 sii = SI_INFO(sih);
377
378 ASSERT(GOODREGS(sii->curmap));
379 sb = REGS2SB(sii->curmap);
380
381 /* if core is already in reset, just return */
382 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
383 return;
384
385 /* if clocks are not enabled, put into reset and return */
386 if ((R_SBREG(sii, &sb->sbtmstatelow) &
387 (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
388 goto disable;
389
390 /* set target reject and spin until busy is clear
391 (preserve core-specific bits) */
392 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
393 dummy = R_SBREG(sii, &sb->sbtmstatelow);
394 udelay(1);
395 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
396 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
397 SI_ERROR(("%s: target state still busy\n", __func__));
398
399 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
400 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
401 dummy = R_SBREG(sii, &sb->sbimstate);
402 udelay(1);
403 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
404 }
405
406 /* set reset and reject while enabling the clocks */
407 W_SBREG(sii, &sb->sbtmstatelow,
408 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
409 SBTML_REJ | SBTML_RESET));
410 dummy = R_SBREG(sii, &sb->sbtmstatelow);
411 udelay(10);
412
413 /* don't forget to clear the initiator reject bit */
414 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
415 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
416
417 disable:
418 /* leave reset and reject asserted */
419 W_SBREG(sii, &sb->sbtmstatelow,
420 ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
421 udelay(1);
422 }
423
424 /* reset and re-enable a core
425 * inputs:
426 * bits - core specific bits that are set during and after reset sequence
427 * resetbits - core specific bits that are set only during reset sequence
428 */
sb_core_reset(si_t * sih,u32 bits,u32 resetbits)429 void sb_core_reset(si_t *sih, u32 bits, u32 resetbits)
430 {
431 si_info_t *sii;
432 sbconfig_t *sb;
433 volatile u32 dummy;
434
435 sii = SI_INFO(sih);
436 ASSERT(GOODREGS(sii->curmap));
437 sb = REGS2SB(sii->curmap);
438
439 /*
440 * Must do the disable sequence first to work for
441 * arbitrary current core state.
442 */
443 sb_core_disable(sih, (bits | resetbits));
444
445 /*
446 * Now do the initialization sequence.
447 */
448
449 /* set reset while enabling the clock and
450 forcing them on throughout the core */
451 W_SBREG(sii, &sb->sbtmstatelow,
452 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) <<
453 SBTML_SICF_SHIFT) | SBTML_RESET));
454 dummy = R_SBREG(sii, &sb->sbtmstatelow);
455 udelay(1);
456
457 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR)
458 W_SBREG(sii, &sb->sbtmstatehigh, 0);
459
460 dummy = R_SBREG(sii, &sb->sbimstate);
461 if (dummy & (SBIM_IBE | SBIM_TO))
462 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
463
464 /* clear reset and allow it to propagate throughout the core */
465 W_SBREG(sii, &sb->sbtmstatelow,
466 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) <<
467 SBTML_SICF_SHIFT));
468 dummy = R_SBREG(sii, &sb->sbtmstatelow);
469 udelay(1);
470
471 /* leave clock enabled */
472 W_SBREG(sii, &sb->sbtmstatelow,
473 ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
474 dummy = R_SBREG(sii, &sb->sbtmstatelow);
475 udelay(1);
476 }
477