1 /*
2  * Copyright (c) 2010 Broadcom Corporation
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/delay.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <bcmdefs.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <bcmutils.h>
24 #include <siutils.h>
25 #include <hndsoc.h>
26 #include <sbchipc.h>
27 #include <pcicfg.h>
28 #include <bcmdevs.h>
29 
30 #define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
31 		(sih->chiprev == 0) && \
32 		(sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
33 
34 /* EROM parsing */
35 
36 static u32
get_erom_ent(si_t * sih,u32 ** eromptr,u32 mask,u32 match)37 get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
38 {
39 	u32 ent;
40 	uint inv = 0, nom = 0;
41 
42 	while (true) {
43 		ent = R_REG(*eromptr);
44 		(*eromptr)++;
45 
46 		if (mask == 0)
47 			break;
48 
49 		if ((ent & ER_VALID) == 0) {
50 			inv++;
51 			continue;
52 		}
53 
54 		if (ent == (ER_END | ER_VALID))
55 			break;
56 
57 		if ((ent & mask) == match)
58 			break;
59 
60 		nom++;
61 	}
62 
63 	SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
64 	if (inv + nom) {
65 		SI_VMSG(("  after %d invalid and %d non-matching entries\n",
66 			 inv, nom));
67 	}
68 	return ent;
69 }
70 
71 static u32
get_asd(si_t * sih,u32 ** eromptr,uint sp,uint ad,uint st,u32 * addrl,u32 * addrh,u32 * sizel,u32 * sizeh)72 get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
73 	u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
74 {
75 	u32 asd, sz, szd;
76 
77 	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
78 	if (((asd & ER_TAG1) != ER_ADD) ||
79 	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
80 	    ((asd & AD_ST_MASK) != st)) {
81 		/* This is not what we want, "push" it back */
82 		(*eromptr)--;
83 		return 0;
84 	}
85 	*addrl = asd & AD_ADDR_MASK;
86 	if (asd & AD_AG32)
87 		*addrh = get_erom_ent(sih, eromptr, 0, 0);
88 	else
89 		*addrh = 0;
90 	*sizeh = 0;
91 	sz = asd & AD_SZ_MASK;
92 	if (sz == AD_SZ_SZD) {
93 		szd = get_erom_ent(sih, eromptr, 0, 0);
94 		*sizel = szd & SD_SZ_MASK;
95 		if (szd & SD_SG32)
96 			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
97 	} else
98 		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
99 
100 	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
101 		 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
102 
103 	return asd;
104 }
105 
ai_hwfixup(si_info_t * sii)106 static void ai_hwfixup(si_info_t *sii)
107 {
108 }
109 
110 /* parse the enumeration rom to identify all cores */
ai_scan(si_t * sih,void * regs,uint devid)111 void ai_scan(si_t *sih, void *regs, uint devid)
112 {
113 	si_info_t *sii = SI_INFO(sih);
114 	chipcregs_t *cc = (chipcregs_t *) regs;
115 	u32 erombase, *eromptr, *eromlim;
116 
117 	erombase = R_REG(&cc->eromptr);
118 
119 	switch (sih->bustype) {
120 	case SI_BUS:
121 		eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
122 		break;
123 
124 	case PCI_BUS:
125 		/* Set wrappers address */
126 		sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
127 
128 		/* Now point the window at the erom */
129 		pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
130 		eromptr = regs;
131 		break;
132 
133 	case SPI_BUS:
134 	case SDIO_BUS:
135 		eromptr = (u32 *)(unsigned long)erombase;
136 		break;
137 
138 	default:
139 		SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
140 			  sih->bustype));
141 		ASSERT(0);
142 		return;
143 	}
144 	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
145 
146 	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
147 	while (eromptr < eromlim) {
148 		u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
149 		u32 mpd, asd, addrl, addrh, sizel, sizeh;
150 		u32 *base;
151 		uint i, j, idx;
152 		bool br;
153 
154 		br = false;
155 
156 		/* Grok a component */
157 		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
158 		if (cia == (ER_END | ER_VALID)) {
159 			SI_VMSG(("Found END of erom after %d cores\n",
160 				 sii->numcores));
161 			ai_hwfixup(sii);
162 			return;
163 		}
164 		base = eromptr - 1;
165 		cib = get_erom_ent(sih, &eromptr, 0, 0);
166 
167 		if ((cib & ER_TAG) != ER_CI) {
168 			SI_ERROR(("CIA not followed by CIB\n"));
169 			goto error;
170 		}
171 
172 		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
173 		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
174 		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
175 		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
176 		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
177 		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
178 		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
179 
180 		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
181 
182 		if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
183 			continue;
184 		if ((nmw + nsw == 0)) {
185 			/* A component which is not a core */
186 			if (cid == OOB_ROUTER_CORE_ID) {
187 				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
188 					      &addrl, &addrh, &sizel, &sizeh);
189 				if (asd != 0) {
190 					sii->oob_router = addrl;
191 				}
192 			}
193 			continue;
194 		}
195 
196 		idx = sii->numcores;
197 /*		sii->eromptr[idx] = base; */
198 		sii->cia[idx] = cia;
199 		sii->cib[idx] = cib;
200 		sii->coreid[idx] = cid;
201 
202 		for (i = 0; i < nmp; i++) {
203 			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
204 			if ((mpd & ER_TAG) != ER_MP) {
205 				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
206 				goto error;
207 			}
208 			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
209 				 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
210 				 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
211 		}
212 
213 		/* First Slave Address Descriptor should be port 0:
214 		 * the main register space for the core
215 		 */
216 		asd =
217 		    get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
218 			    &sizel, &sizeh);
219 		if (asd == 0) {
220 			/* Try again to see if it is a bridge */
221 			asd =
222 			    get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
223 				    &addrh, &sizel, &sizeh);
224 			if (asd != 0)
225 				br = true;
226 			else if ((addrh != 0) || (sizeh != 0)
227 				 || (sizel != SI_CORE_SIZE)) {
228 				SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
229 				goto error;
230 			}
231 		}
232 		sii->coresba[idx] = addrl;
233 		sii->coresba_size[idx] = sizel;
234 		/* Get any more ASDs in port 0 */
235 		j = 1;
236 		do {
237 			asd =
238 			    get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
239 				    &addrh, &sizel, &sizeh);
240 			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
241 				sii->coresba2[idx] = addrl;
242 				sii->coresba2_size[idx] = sizel;
243 			}
244 			j++;
245 		} while (asd != 0);
246 
247 		/* Go through the ASDs for other slave ports */
248 		for (i = 1; i < nsp; i++) {
249 			j = 0;
250 			do {
251 				asd =
252 				    get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
253 					    &addrl, &addrh, &sizel, &sizeh);
254 			} while (asd != 0);
255 			if (j == 0) {
256 				SI_ERROR((" SP %d has no address descriptors\n",
257 					  i));
258 				goto error;
259 			}
260 		}
261 
262 		/* Now get master wrappers */
263 		for (i = 0; i < nmw; i++) {
264 			asd =
265 			    get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
266 				    &addrh, &sizel, &sizeh);
267 			if (asd == 0) {
268 				SI_ERROR(("Missing descriptor for MW %d\n", i));
269 				goto error;
270 			}
271 			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
272 				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
273 				goto error;
274 			}
275 			if (i == 0)
276 				sii->wrapba[idx] = addrl;
277 		}
278 
279 		/* And finally slave wrappers */
280 		for (i = 0; i < nsw; i++) {
281 			uint fwp = (nsp == 1) ? 0 : 1;
282 			asd =
283 			    get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
284 				    &addrl, &addrh, &sizel, &sizeh);
285 			if (asd == 0) {
286 				SI_ERROR(("Missing descriptor for SW %d\n", i));
287 				goto error;
288 			}
289 			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
290 				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
291 				goto error;
292 			}
293 			if ((nmw == 0) && (i == 0))
294 				sii->wrapba[idx] = addrl;
295 		}
296 
297 		/* Don't record bridges */
298 		if (br)
299 			continue;
300 
301 		/* Done with core */
302 		sii->numcores++;
303 	}
304 
305 	SI_ERROR(("Reached end of erom without finding END"));
306 
307  error:
308 	sii->numcores = 0;
309 	return;
310 }
311 
312 /* This function changes the logical "focus" to the indicated core.
313  * Return the current core's virtual address.
314  */
ai_setcoreidx(si_t * sih,uint coreidx)315 void *ai_setcoreidx(si_t *sih, uint coreidx)
316 {
317 	si_info_t *sii = SI_INFO(sih);
318 	u32 addr = sii->coresba[coreidx];
319 	u32 wrap = sii->wrapba[coreidx];
320 	void *regs;
321 
322 	if (coreidx >= sii->numcores)
323 		return NULL;
324 
325 	/*
326 	 * If the user has provided an interrupt mask enabled function,
327 	 * then assert interrupts are disabled before switching the core.
328 	 */
329 	ASSERT((sii->intrsenabled_fn == NULL)
330 	       || !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
331 
332 	switch (sih->bustype) {
333 	case SI_BUS:
334 		/* map new one */
335 		if (!sii->regs[coreidx]) {
336 			sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
337 			ASSERT(GOODREGS(sii->regs[coreidx]));
338 		}
339 		sii->curmap = regs = sii->regs[coreidx];
340 		if (!sii->wrappers[coreidx]) {
341 			sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
342 			ASSERT(GOODREGS(sii->wrappers[coreidx]));
343 		}
344 		sii->curwrap = sii->wrappers[coreidx];
345 		break;
346 
347 	case PCI_BUS:
348 		/* point bar0 window */
349 		pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
350 		regs = sii->curmap;
351 		/* point bar0 2nd 4KB window */
352 		pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
353 		break;
354 
355 	case SPI_BUS:
356 	case SDIO_BUS:
357 		sii->curmap = regs = (void *)(unsigned long)addr;
358 		sii->curwrap = (void *)(unsigned long)wrap;
359 		break;
360 
361 	default:
362 		ASSERT(0);
363 		regs = NULL;
364 		break;
365 	}
366 
367 	sii->curmap = regs;
368 	sii->curidx = coreidx;
369 
370 	return regs;
371 }
372 
373 /* Return the number of address spaces in current core */
ai_numaddrspaces(si_t * sih)374 int ai_numaddrspaces(si_t *sih)
375 {
376 	return 2;
377 }
378 
379 /* Return the address of the nth address space in the current core */
ai_addrspace(si_t * sih,uint asidx)380 u32 ai_addrspace(si_t *sih, uint asidx)
381 {
382 	si_info_t *sii;
383 	uint cidx;
384 
385 	sii = SI_INFO(sih);
386 	cidx = sii->curidx;
387 
388 	if (asidx == 0)
389 		return sii->coresba[cidx];
390 	else if (asidx == 1)
391 		return sii->coresba2[cidx];
392 	else {
393 		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
394 		return 0;
395 	}
396 }
397 
398 /* Return the size of the nth address space in the current core */
ai_addrspacesize(si_t * sih,uint asidx)399 u32 ai_addrspacesize(si_t *sih, uint asidx)
400 {
401 	si_info_t *sii;
402 	uint cidx;
403 
404 	sii = SI_INFO(sih);
405 	cidx = sii->curidx;
406 
407 	if (asidx == 0)
408 		return sii->coresba_size[cidx];
409 	else if (asidx == 1)
410 		return sii->coresba2_size[cidx];
411 	else {
412 		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
413 		return 0;
414 	}
415 }
416 
ai_flag(si_t * sih)417 uint ai_flag(si_t *sih)
418 {
419 	si_info_t *sii;
420 	aidmp_t *ai;
421 
422 	sii = SI_INFO(sih);
423 	if (BCM47162_DMP()) {
424 		SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
425 		return sii->curidx;
426 	}
427 	ai = sii->curwrap;
428 
429 	return R_REG(&ai->oobselouta30) & 0x1f;
430 }
431 
ai_setint(si_t * sih,int siflag)432 void ai_setint(si_t *sih, int siflag)
433 {
434 }
435 
ai_write_wrap_reg(si_t * sih,u32 offset,u32 val)436 void ai_write_wrap_reg(si_t *sih, u32 offset, u32 val)
437 {
438 	si_info_t *sii = SI_INFO(sih);
439 	u32 *w = (u32 *) sii->curwrap;
440 	W_REG(w + (offset / 4), val);
441 	return;
442 }
443 
ai_corevendor(si_t * sih)444 uint ai_corevendor(si_t *sih)
445 {
446 	si_info_t *sii;
447 	u32 cia;
448 
449 	sii = SI_INFO(sih);
450 	cia = sii->cia[sii->curidx];
451 	return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
452 }
453 
ai_corerev(si_t * sih)454 uint ai_corerev(si_t *sih)
455 {
456 	si_info_t *sii;
457 	u32 cib;
458 
459 	sii = SI_INFO(sih);
460 	cib = sii->cib[sii->curidx];
461 	return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
462 }
463 
ai_iscoreup(si_t * sih)464 bool ai_iscoreup(si_t *sih)
465 {
466 	si_info_t *sii;
467 	aidmp_t *ai;
468 
469 	sii = SI_INFO(sih);
470 	ai = sii->curwrap;
471 
472 	return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
473 		 SICF_CLOCK_EN)
474 		&& ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
475 }
476 
477 /*
478  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
479  * switch back to the original core, and return the new value.
480  *
481  * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
482  *
483  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
484  * and (on newer pci cores) chipcommon registers.
485  */
ai_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)486 uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
487 {
488 	uint origidx = 0;
489 	u32 *r = NULL;
490 	uint w;
491 	uint intr_val = 0;
492 	bool fast = false;
493 	si_info_t *sii;
494 
495 	sii = SI_INFO(sih);
496 
497 	ASSERT(GOODIDX(coreidx));
498 	ASSERT(regoff < SI_CORE_SIZE);
499 	ASSERT((val & ~mask) == 0);
500 
501 	if (coreidx >= SI_MAXCORES)
502 		return 0;
503 
504 	if (sih->bustype == SI_BUS) {
505 		/* If internal bus, we can always get at everything */
506 		fast = true;
507 		/* map if does not exist */
508 		if (!sii->regs[coreidx]) {
509 			sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
510 						     SI_CORE_SIZE);
511 			ASSERT(GOODREGS(sii->regs[coreidx]));
512 		}
513 		r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
514 	} else if (sih->bustype == PCI_BUS) {
515 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
516 
517 		if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
518 			/* Chipc registers are mapped at 12KB */
519 
520 			fast = true;
521 			r = (u32 *) ((char *)sii->curmap +
522 					PCI_16KB0_CCREGS_OFFSET + regoff);
523 		} else if (sii->pub.buscoreidx == coreidx) {
524 			/* pci registers are at either in the last 2KB of an 8KB window
525 			 * or, in pcie and pci rev 13 at 8KB
526 			 */
527 			fast = true;
528 			if (SI_FAST(sii))
529 				r = (u32 *) ((char *)sii->curmap +
530 						PCI_16KB0_PCIREGS_OFFSET +
531 						regoff);
532 			else
533 				r = (u32 *) ((char *)sii->curmap +
534 						((regoff >= SBCONFIGOFF) ?
535 						 PCI_BAR0_PCISBR_OFFSET :
536 						 PCI_BAR0_PCIREGS_OFFSET) +
537 						regoff);
538 		}
539 	}
540 
541 	if (!fast) {
542 		INTR_OFF(sii, intr_val);
543 
544 		/* save current core index */
545 		origidx = si_coreidx(&sii->pub);
546 
547 		/* switch core */
548 		r = (u32 *) ((unsigned char *) ai_setcoreidx(&sii->pub, coreidx) +
549 				regoff);
550 	}
551 	ASSERT(r != NULL);
552 
553 	/* mask and set */
554 	if (mask || val) {
555 		w = (R_REG(r) & ~mask) | val;
556 		W_REG(r, w);
557 	}
558 
559 	/* readback */
560 	w = R_REG(r);
561 
562 	if (!fast) {
563 		/* restore core index */
564 		if (origidx != coreidx)
565 			ai_setcoreidx(&sii->pub, origidx);
566 
567 		INTR_RESTORE(sii, intr_val);
568 	}
569 
570 	return w;
571 }
572 
ai_core_disable(si_t * sih,u32 bits)573 void ai_core_disable(si_t *sih, u32 bits)
574 {
575 	si_info_t *sii;
576 	volatile u32 dummy;
577 	aidmp_t *ai;
578 
579 	sii = SI_INFO(sih);
580 
581 	ASSERT(GOODREGS(sii->curwrap));
582 	ai = sii->curwrap;
583 
584 	/* if core is already in reset, just return */
585 	if (R_REG(&ai->resetctrl) & AIRC_RESET)
586 		return;
587 
588 	W_REG(&ai->ioctrl, bits);
589 	dummy = R_REG(&ai->ioctrl);
590 	udelay(10);
591 
592 	W_REG(&ai->resetctrl, AIRC_RESET);
593 	udelay(1);
594 }
595 
596 /* reset and re-enable a core
597  * inputs:
598  * bits - core specific bits that are set during and after reset sequence
599  * resetbits - core specific bits that are set only during reset sequence
600  */
ai_core_reset(si_t * sih,u32 bits,u32 resetbits)601 void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
602 {
603 	si_info_t *sii;
604 	aidmp_t *ai;
605 	volatile u32 dummy;
606 
607 	sii = SI_INFO(sih);
608 	ASSERT(GOODREGS(sii->curwrap));
609 	ai = sii->curwrap;
610 
611 	/*
612 	 * Must do the disable sequence first to work for arbitrary current core state.
613 	 */
614 	ai_core_disable(sih, (bits | resetbits));
615 
616 	/*
617 	 * Now do the initialization sequence.
618 	 */
619 	W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
620 	dummy = R_REG(&ai->ioctrl);
621 	W_REG(&ai->resetctrl, 0);
622 	udelay(1);
623 
624 	W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
625 	dummy = R_REG(&ai->ioctrl);
626 	udelay(1);
627 }
628 
ai_core_cflags_wo(si_t * sih,u32 mask,u32 val)629 void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
630 {
631 	si_info_t *sii;
632 	aidmp_t *ai;
633 	u32 w;
634 
635 	sii = SI_INFO(sih);
636 
637 	if (BCM47162_DMP()) {
638 		SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
639 			  __func__));
640 		return;
641 	}
642 
643 	ASSERT(GOODREGS(sii->curwrap));
644 	ai = sii->curwrap;
645 
646 	ASSERT((val & ~mask) == 0);
647 
648 	if (mask || val) {
649 		w = ((R_REG(&ai->ioctrl) & ~mask) | val);
650 		W_REG(&ai->ioctrl, w);
651 	}
652 }
653 
ai_core_cflags(si_t * sih,u32 mask,u32 val)654 u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
655 {
656 	si_info_t *sii;
657 	aidmp_t *ai;
658 	u32 w;
659 
660 	sii = SI_INFO(sih);
661 	if (BCM47162_DMP()) {
662 		SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
663 			  __func__));
664 		return 0;
665 	}
666 
667 	ASSERT(GOODREGS(sii->curwrap));
668 	ai = sii->curwrap;
669 
670 	ASSERT((val & ~mask) == 0);
671 
672 	if (mask || val) {
673 		w = ((R_REG(&ai->ioctrl) & ~mask) | val);
674 		W_REG(&ai->ioctrl, w);
675 	}
676 
677 	return R_REG(&ai->ioctrl);
678 }
679 
ai_core_sflags(si_t * sih,u32 mask,u32 val)680 u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
681 {
682 	si_info_t *sii;
683 	aidmp_t *ai;
684 	u32 w;
685 
686 	sii = SI_INFO(sih);
687 	if (BCM47162_DMP()) {
688 		SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
689 		return 0;
690 	}
691 
692 	ASSERT(GOODREGS(sii->curwrap));
693 	ai = sii->curwrap;
694 
695 	ASSERT((val & ~mask) == 0);
696 	ASSERT((mask & ~SISF_CORE_BITS) == 0);
697 
698 	if (mask || val) {
699 		w = ((R_REG(&ai->iostatus) & ~mask) | val);
700 		W_REG(&ai->iostatus, w);
701 	}
702 
703 	return R_REG(&ai->iostatus);
704 }
705 
706