2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * $Copyright Open Broadcom Corporation$
7 * $Id: aiutils.c 467150 2014-04-02 17:30:43Z $
19 #include "siutils_priv.h"
21 #define BCM47162_DMP() (0)
22 #define BCM5357_DMP() (0)
23 #define BCM4707_DMP() (0)
25 #define remap_coreid(sih, coreid) (coreid)
26 #define remap_corerev(sih, corerev) (corerev)
31 get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
34 uint inv = 0, nom = 0;
37 ent = R_REG(si_osh(sih), *eromptr);
43 if ((ent & ER_VALID) == 0) {
48 if (ent == (ER_END | ER_VALID))
51 if ((ent & mask) == match)
57 SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
59 SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
65 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
66 uint32 *sizel, uint32 *sizeh)
70 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
71 if (((asd & ER_TAG1) != ER_ADD) ||
72 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
73 ((asd & AD_ST_MASK) != st)) {
74 /* This is not what we want, "push" it back */
78 *addrl = asd & AD_ADDR_MASK;
80 *addrh = get_erom_ent(sih, eromptr, 0, 0);
84 sz = asd & AD_SZ_MASK;
85 if (sz == AD_SZ_SZD) {
86 szd = get_erom_ent(sih, eromptr, 0, 0);
87 *sizel = szd & SD_SZ_MASK;
89 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
91 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
93 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
94 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
100 ai_hwfixup(si_info_t *sii)
105 /* parse the enumeration rom to identify all cores */
107 ai_scan(si_t *sih, void *regs, uint devid)
109 si_info_t *sii = SI_INFO(sih);
110 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
111 chipcregs_t *cc = (chipcregs_t *)regs;
112 uint32 erombase, *eromptr, *eromlim;
114 erombase = R_REG(sii->osh, &cc->eromptr);
116 switch (BUSTYPE(sih->bustype)) {
118 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
122 /* Set wrappers address */
123 sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
125 /* Now point the window at the erom */
126 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
133 eromptr = (uint32 *)(uintptr)erombase;
139 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
143 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
145 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
146 regs, erombase, eromptr, eromlim));
147 while (eromptr < eromlim) {
148 uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
149 uint32 mpd, asd, addrl, addrh, sizel, sizeh;
155 /* Grok a component */
156 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
157 if (cia == (ER_END | ER_VALID)) {
158 SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
163 cib = get_erom_ent(sih, &eromptr, 0, 0);
165 if ((cib & ER_TAG) != ER_CI) {
166 SI_ERROR(("CIA not followed by CIB\n"));
170 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
171 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
172 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
173 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
174 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
175 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
176 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
179 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
180 "nsw = %d, nmp = %d & nsp = %d\n",
181 mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
186 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
188 if ((nmw + nsw == 0)) {
189 /* A component which is not a core */
190 if (cid == OOB_ROUTER_CORE_ID) {
191 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
192 &addrl, &addrh, &sizel, &sizeh);
194 sii->oob_router = addrl;
197 if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
198 cid != PMU_CORE_ID && cid != GCI_CORE_ID)
204 cores_info->cia[idx] = cia;
205 cores_info->cib[idx] = cib;
206 cores_info->coreid[idx] = remap_coreid(sih, cid);
208 for (i = 0; i < nmp; i++) {
209 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
210 if ((mpd & ER_TAG) != ER_MP) {
211 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
214 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
215 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
216 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
219 /* First Slave Address Descriptor should be port 0:
220 * the main register space for the core
222 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
225 /* Try again to see if it is a bridge */
226 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
234 else if ((addrh != 0) || (sizeh != 0) ||
235 (sizel != SI_CORE_SIZE)) {
236 SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
237 "0x%x\n", addrh, sizeh, sizel));
238 SI_ERROR(("First Slave ASD for"
239 "core 0x%04x malformed "
240 "(0x%08x)\n", cid, asd));
246 cores_info->coresba[idx] = addrl;
247 cores_info->coresba_size[idx] = sizel;
248 /* Get any more ASDs in port 0 */
251 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
253 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
254 cores_info->coresba2[idx] = addrl;
255 cores_info->coresba2_size[idx] = sizel;
260 /* Go through the ASDs for other slave ports */
261 for (i = 1; i < nsp; i++) {
264 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
272 SI_ERROR((" SP %d has no address descriptors\n", i));
277 /* Now get master wrappers */
278 for (i = 0; i < nmw; i++) {
279 asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
282 SI_ERROR(("Missing descriptor for MW %d\n", i));
285 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
286 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
290 cores_info->wrapba[idx] = addrl;
293 /* And finally slave wrappers */
294 for (i = 0; i < nsw; i++) {
295 uint fwp = (nsp == 1) ? 0 : 1;
296 asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
299 SI_ERROR(("Missing descriptor for SW %d\n", i));
302 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
303 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
306 if ((nmw == 0) && (i == 0))
307 cores_info->wrapba[idx] = addrl;
311 /* Don't record bridges */
319 SI_ERROR(("Reached end of erom without finding END"));
326 #define AI_SETCOREIDX_MAPSIZE(coreid) \
327 (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
329 /* This function changes the logical "focus" to the indicated core.
330 * Return the current core's virtual address.
333 ai_setcoreidx(si_t *sih, uint coreidx)
335 si_info_t *sii = SI_INFO(sih);
336 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
340 if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
343 addr = cores_info->coresba[coreidx];
344 wrap = cores_info->wrapba[coreidx];
347 * If the user has provided an interrupt mask enabled function,
348 * then assert interrupts are disabled before switching the core.
350 ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
352 switch (BUSTYPE(sih->bustype)) {
355 if (!cores_info->regs[coreidx]) {
356 cores_info->regs[coreidx] = REG_MAP(addr,
357 AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
358 ASSERT(GOODREGS(cores_info->regs[coreidx]));
360 sii->curmap = regs = cores_info->regs[coreidx];
361 if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
362 cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
363 ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
365 sii->curwrap = cores_info->wrappers[coreidx];
369 /* point bar0 window */
370 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
372 /* point bar0 2nd 4KB window to the primary wrapper */
374 OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
376 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
382 sii->curmap = regs = (void *)((uintptr)addr);
383 sii->curwrap = (void *)((uintptr)wrap);
395 sii->curidx = coreidx;
402 ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
404 si_info_t *sii = SI_INFO(sih);
405 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
406 chipcregs_t *cc = NULL;
407 uint32 erombase, *eromptr, *eromlim;
409 uint32 cia, cib, nmp, nsp;
410 uint32 asd, addrl, addrh, sizel, sizeh;
412 for (i = 0; i < sii->numcores; i++) {
413 if (cores_info->coreid[i] == CC_CORE_ID) {
414 cc = (chipcregs_t *)cores_info->regs[i];
421 erombase = R_REG(sii->osh, &cc->eromptr);
422 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
423 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
426 cia = cores_info->cia[cidx];
427 cib = cores_info->cib[cidx];
429 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
430 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
433 while (eromptr < eromlim) {
434 if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
435 (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
440 /* skip master ports */
441 for (i = 0; i < nmp; i++)
442 get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
444 /* Skip ASDs in port 0 */
445 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
447 /* Try again to see if it is a bridge */
448 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
454 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
459 /* Go through the ASDs for other slave ports */
460 for (i = 1; i < nsp; i++) {
463 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
477 SI_ERROR((" SP %d has no address descriptors\n", i));
487 /* Return the number of address spaces in current core */
489 ai_numaddrspaces(si_t *sih)
494 /* Return the address of the nth address space in the current core */
496 ai_addrspace(si_t *sih, uint asidx)
498 si_info_t *sii = SI_INFO(sih);
499 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
505 return cores_info->coresba[cidx];
507 return cores_info->coresba2[cidx];
509 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
510 __FUNCTION__, asidx));
515 /* Return the size of the nth address space in the current core */
517 ai_addrspacesize(si_t *sih, uint asidx)
519 si_info_t *sii = SI_INFO(sih);
520 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
526 return cores_info->coresba_size[cidx];
528 return cores_info->coresba2_size[cidx];
530 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
531 __FUNCTION__, asidx));
539 si_info_t *sii = SI_INFO(sih);
542 if (BCM47162_DMP()) {
543 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
547 SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
551 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
556 #ifdef REROUTE_OOBINT
558 SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
562 #endif /* REROUTE_OOBINT */
567 return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
571 ai_flag_alt(si_t *sih)
573 si_info_t *sii = SI_INFO(sih);
576 if (BCM47162_DMP()) {
577 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
581 SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
585 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
589 #ifdef REROUTE_OOBINT
591 SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
595 #endif /* REROUTE_OOBINT */
599 return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
603 ai_setint(si_t *sih, int siflag)
608 ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
610 si_info_t *sii = SI_INFO(sih);
611 uint32 *map = (uint32 *) sii->curwrap;
614 uint32 w = R_REG(sii->osh, map+(offset/4));
617 W_REG(sii->osh, map+(offset/4), w);
620 return (R_REG(sii->osh, map+(offset/4)));
624 ai_corevendor(si_t *sih)
626 si_info_t *sii = SI_INFO(sih);
627 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
630 cia = cores_info->cia[sii->curidx];
631 return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
635 ai_corerev(si_t *sih)
637 si_info_t *sii = SI_INFO(sih);
638 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
642 cib = cores_info->cib[sii->curidx];
643 return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
647 ai_iscoreup(si_t *sih)
649 si_info_t *sii = SI_INFO(sih);
654 return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
655 ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
659 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
660 * switch back to the original core, and return the new value.
662 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
664 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
665 * and (on newer pci cores) chipcommon registers.
668 ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
675 si_info_t *sii = SI_INFO(sih);
676 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
679 ASSERT(GOODIDX(coreidx));
680 ASSERT(regoff < SI_CORE_SIZE);
681 ASSERT((val & ~mask) == 0);
683 if (coreidx >= SI_MAXCORES)
686 if (BUSTYPE(sih->bustype) == SI_BUS) {
687 /* If internal bus, we can always get at everything */
689 /* map if does not exist */
690 if (!cores_info->regs[coreidx]) {
691 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
693 ASSERT(GOODREGS(cores_info->regs[coreidx]));
695 r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
696 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
697 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
699 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
700 /* Chipc registers are mapped at 12KB */
703 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
704 } else if (sii->pub.buscoreidx == coreidx) {
705 /* pci registers are at either in the last 2KB of an 8KB window
706 * or, in pcie and pci rev 13 at 8KB
710 r = (uint32 *)((char *)sii->curmap +
711 PCI_16KB0_PCIREGS_OFFSET + regoff);
713 r = (uint32 *)((char *)sii->curmap +
714 ((regoff >= SBCONFIGOFF) ?
715 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
721 INTR_OFF(sii, intr_val);
723 /* save current core index */
724 origidx = si_coreidx(&sii->pub);
727 r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
733 w = (R_REG(sii->osh, r) & ~mask) | val;
734 W_REG(sii->osh, r, w);
738 w = R_REG(sii->osh, r);
741 /* restore core index */
742 if (origidx != coreidx)
743 ai_setcoreidx(&sii->pub, origidx);
745 INTR_RESTORE(sii, intr_val);
752 * If there is no need for fiddling with interrupts or core switches (typically silicon
753 * back plane registers, pci registers and chipcommon registers), this function
754 * returns the register offset on this core to a mapped address. This address can
755 * be used for W_REG/R_REG directly.
757 * For accessing registers that would need a core switch, this function will return
761 ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
765 si_info_t *sii = SI_INFO(sih);
766 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
769 ASSERT(GOODIDX(coreidx));
770 ASSERT(regoff < SI_CORE_SIZE);
772 if (coreidx >= SI_MAXCORES)
775 if (BUSTYPE(sih->bustype) == SI_BUS) {
776 /* If internal bus, we can always get at everything */
778 /* map if does not exist */
779 if (!cores_info->regs[coreidx]) {
780 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
782 ASSERT(GOODREGS(cores_info->regs[coreidx]));
784 r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
785 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
786 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
788 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
789 /* Chipc registers are mapped at 12KB */
792 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
793 } else if (sii->pub.buscoreidx == coreidx) {
794 /* pci registers are at either in the last 2KB of an 8KB window
795 * or, in pcie and pci rev 13 at 8KB
799 r = (uint32 *)((char *)sii->curmap +
800 PCI_16KB0_PCIREGS_OFFSET + regoff);
802 r = (uint32 *)((char *)sii->curmap +
803 ((regoff >= SBCONFIGOFF) ?
804 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
816 ai_core_disable(si_t *sih, uint32 bits)
818 si_info_t *sii = SI_INFO(sih);
819 volatile uint32 dummy;
824 ASSERT(GOODREGS(sii->curwrap));
827 /* if core is already in reset, just return */
828 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
831 /* ensure there are no pending backplane operations */
832 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
834 /* if pending backplane ops still, try waiting longer */
836 /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
837 /* during driver load we may need more time */
838 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
839 /* if still pending ops, continue on and try disable anyway */
840 /* this is in big hammer path, so don't call wl_reinit in this case... */
843 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
844 dummy = R_REG(sii->osh, &ai->resetctrl);
845 BCM_REFERENCE(dummy);
848 W_REG(sii->osh, &ai->ioctrl, bits);
849 dummy = R_REG(sii->osh, &ai->ioctrl);
850 BCM_REFERENCE(dummy);
854 /* reset and re-enable a core
856 * bits - core specific bits that are set during and after reset sequence
857 * resetbits - core specific bits that are set only during reset sequence
860 ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
862 si_info_t *sii = SI_INFO(sih);
864 volatile uint32 dummy;
865 uint loop_counter = 10;
867 ASSERT(GOODREGS(sii->curwrap));
870 /* ensure there are no pending backplane operations */
871 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
874 /* put core into reset state */
875 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
878 /* ensure there are no pending backplane operations */
879 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
881 W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
882 dummy = R_REG(sii->osh, &ai->ioctrl);
883 BCM_REFERENCE(dummy);
885 /* ensure there are no pending backplane operations */
886 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
889 while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
890 /* ensure there are no pending backplane operations */
891 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
894 /* take core out of reset */
895 W_REG(sii->osh, &ai->resetctrl, 0);
897 /* ensure there are no pending backplane operations */
898 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
902 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
903 dummy = R_REG(sii->osh, &ai->ioctrl);
904 BCM_REFERENCE(dummy);
909 ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
911 si_info_t *sii = SI_INFO(sih);
916 if (BCM47162_DMP()) {
917 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
922 SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
927 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
932 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
937 ASSERT(GOODREGS(sii->curwrap));
940 ASSERT((val & ~mask) == 0);
943 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
944 W_REG(sii->osh, &ai->ioctrl, w);
949 ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
951 si_info_t *sii = SI_INFO(sih);
955 if (BCM47162_DMP()) {
956 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
961 SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
966 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
972 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
976 ASSERT(GOODREGS(sii->curwrap));
979 ASSERT((val & ~mask) == 0);
982 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
983 W_REG(sii->osh, &ai->ioctrl, w);
986 return R_REG(sii->osh, &ai->ioctrl);
990 ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
992 si_info_t *sii = SI_INFO(sih);
996 if (BCM47162_DMP()) {
997 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
1001 if (BCM5357_DMP()) {
1002 SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
1006 if (BCM4707_DMP()) {
1007 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1012 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1017 ASSERT(GOODREGS(sii->curwrap));
1020 ASSERT((val & ~mask) == 0);
1021 ASSERT((mask & ~SISF_CORE_BITS) == 0);
1024 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1025 W_REG(sii->osh, &ai->iostatus, w);
1028 return R_REG(sii->osh, &ai->iostatus);
1031 #if defined(BCMDBG_PHYDUMP)
1032 /* print interesting aidmp registers */
1034 ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
1036 si_info_t *sii = SI_INFO(sih);
1037 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1044 for (i = 0; i < sii->numcores; i++) {
1045 si_setcoreidx(&sii->pub, i);
1048 bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1049 if (BCM47162_DMP()) {
1050 bcm_bprintf(b, "Skipping mips74k in 47162a0\n");
1053 if (BCM5357_DMP()) {
1054 bcm_bprintf(b, "Skipping usb20h in 5357\n");
1057 if (BCM4707_DMP()) {
1058 bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
1063 bcm_bprintf(b, "Skipping pmu core\n");
1067 bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x"
1068 "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1069 "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1070 "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x"
1071 "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1072 "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1073 "intstatus 0x%x config 0x%x itcr 0x%x\n",
1074 R_REG(osh, &ai->ioctrlset),
1075 R_REG(osh, &ai->ioctrlclear),
1076 R_REG(osh, &ai->ioctrl),
1077 R_REG(osh, &ai->iostatus),
1078 R_REG(osh, &ai->ioctrlwidth),
1079 R_REG(osh, &ai->iostatuswidth),
1080 R_REG(osh, &ai->resetctrl),
1081 R_REG(osh, &ai->resetstatus),
1082 R_REG(osh, &ai->resetreadid),
1083 R_REG(osh, &ai->resetwriteid),
1084 R_REG(osh, &ai->errlogctrl),
1085 R_REG(osh, &ai->errlogdone),
1086 R_REG(osh, &ai->errlogstatus),
1087 R_REG(osh, &ai->errlogaddrlo),
1088 R_REG(osh, &ai->errlogaddrhi),
1089 R_REG(osh, &ai->errlogid),
1090 R_REG(osh, &ai->errloguser),
1091 R_REG(osh, &ai->errlogflags),
1092 R_REG(osh, &ai->intstatus),
1093 R_REG(osh, &ai->config),
1094 R_REG(osh, &ai->itcr));