2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * $Copyright Open Broadcom Corporation$
7 * $Id: sbutils.c 467150 2014-04-02 17:30:43Z $
22 #include "siutils_priv.h"
25 /* local prototypes */
26 static uint _sb_coreidx(si_info_t *sii, uint32 sba);
27 static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
29 static uint32 _sb_coresba(si_info_t *sii);
30 static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
31 #define SET_SBREG(sii, r, mask, val) \
32 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
33 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
36 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
37 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
39 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
40 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
41 #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
42 #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
45 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
47 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
49 uint32 val, intr_val = 0;
53 * compact flash only has 11 bits address, while we needs 12 bits address.
54 * MEM_SEG will be OR'd with other 11 bits address in hardware,
55 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
56 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
59 INTR_OFF(sii, intr_val);
61 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
62 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
65 val = R_REG(sii->osh, sbr);
69 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
70 INTR_RESTORE(sii, intr_val);
77 sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
79 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
81 volatile uint32 dummy;
86 * compact flash only has 11 bits address, while we needs 12 bits address.
87 * MEM_SEG will be OR'd with other 11 bits address in hardware,
88 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
89 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
92 INTR_OFF(sii, intr_val);
94 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
95 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
98 if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
99 dummy = R_REG(sii->osh, sbr);
100 BCM_REFERENCE(dummy);
101 W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
102 dummy = R_REG(sii->osh, sbr);
103 BCM_REFERENCE(dummy);
104 W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
106 W_REG(sii->osh, sbr, v);
110 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
111 INTR_RESTORE(sii, intr_val);
122 sb = REGS2SB(sii->curmap);
124 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
128 sb_intflag(si_t *sih)
130 si_info_t *sii = SI_INFO(sih);
131 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
134 uint origidx, intflag, intr_val = 0;
136 INTR_OFF(sii, intr_val);
137 origidx = si_coreidx(sih);
138 corereg = si_setcore(sih, CC_CORE_ID, 0);
139 ASSERT(corereg != NULL);
140 sb = REGS2SB(corereg);
141 intflag = R_SBREG(sii, &sb->sbflagst);
142 sb_setcoreidx(sih, origidx);
143 INTR_RESTORE(sii, intr_val);
155 sb = REGS2SB(sii->curmap);
157 return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
161 sb_setint(si_t *sih, int siflag)
168 sb = REGS2SB(sii->curmap);
174 W_SBREG(sii, &sb->sbintvec, vec);
177 /* return core index of the core with address 'sba' */
179 _sb_coreidx(si_info_t *sii, uint32 sba)
182 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
184 for (i = 0; i < sii->numcores; i ++)
185 if (sba == cores_info->coresba[i])
190 /* return core address of the current core */
192 _sb_coresba(si_info_t *sii)
197 switch (BUSTYPE(sii->pub.bustype)) {
199 sbconfig_t *sb = REGS2SB(sii->curmap);
200 sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
205 sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
210 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
211 sbaddr = (uint32)tmp << 12;
212 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
213 sbaddr |= (uint32)tmp << 16;
214 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
215 sbaddr |= (uint32)tmp << 24;
222 sbaddr = (uint32)(uintptr)sii->curmap;
228 sbaddr = BADCOREADDR;
236 sb_corevendor(si_t *sih)
242 sb = REGS2SB(sii->curmap);
244 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
248 sb_corerev(si_t *sih)
255 sb = REGS2SB(sii->curmap);
256 sbidh = R_SBREG(sii, &sb->sbidhigh);
258 return (SBCOREREV(sbidh));
261 /* set core-specific control flags */
263 sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
270 sb = REGS2SB(sii->curmap);
272 ASSERT((val & ~mask) == 0);
275 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
276 (val << SBTML_SICF_SHIFT);
277 W_SBREG(sii, &sb->sbtmstatelow, w);
280 /* set/clear core-specific control flags */
282 sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
289 sb = REGS2SB(sii->curmap);
291 ASSERT((val & ~mask) == 0);
295 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
296 (val << SBTML_SICF_SHIFT);
297 W_SBREG(sii, &sb->sbtmstatelow, w);
300 /* return the new value
301 * for write operation, the following readback ensures the completion of write opration.
303 return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
306 /* set/clear core-specific status flags */
308 sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
315 sb = REGS2SB(sii->curmap);
317 ASSERT((val & ~mask) == 0);
318 ASSERT((mask & ~SISF_CORE_BITS) == 0);
322 w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
323 (val << SBTMH_SISF_SHIFT);
324 W_SBREG(sii, &sb->sbtmstatehigh, w);
327 /* return the new value */
328 return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
332 sb_iscoreup(si_t *sih)
338 sb = REGS2SB(sii->curmap);
340 return ((R_SBREG(sii, &sb->sbtmstatelow) &
341 (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
342 (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
346 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
347 * switch back to the original core, and return the new value.
349 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
351 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
352 * and (on newer pci cores) chipcommon registers.
355 sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
362 si_info_t *sii = SI_INFO(sih);
363 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
365 ASSERT(GOODIDX(coreidx));
366 ASSERT(regoff < SI_CORE_SIZE);
367 ASSERT((val & ~mask) == 0);
369 if (coreidx >= SI_MAXCORES)
372 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
373 /* If internal bus, we can always get at everything */
375 /* map if does not exist */
376 if (!cores_info->regs[coreidx]) {
377 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
379 ASSERT(GOODREGS(cores_info->regs[coreidx]));
381 r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
382 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
383 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
385 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
386 /* Chipc registers are mapped at 12KB */
389 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
390 } else if (sii->pub.buscoreidx == coreidx) {
391 /* pci registers are at either in the last 2KB of an 8KB window
392 * or, in pcie and pci rev 13 at 8KB
396 r = (uint32 *)((char *)sii->curmap +
397 PCI_16KB0_PCIREGS_OFFSET + regoff);
399 r = (uint32 *)((char *)sii->curmap +
400 ((regoff >= SBCONFIGOFF) ?
401 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
407 INTR_OFF(sii, intr_val);
409 /* save current core index */
410 origidx = si_coreidx(&sii->pub);
413 r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
419 if (regoff >= SBCONFIGOFF) {
420 w = (R_SBREG(sii, r) & ~mask) | val;
423 w = (R_REG(sii->osh, r) & ~mask) | val;
424 W_REG(sii->osh, r, w);
429 if (regoff >= SBCONFIGOFF)
432 if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
433 (coreidx == SI_CC_IDX) &&
434 (regoff == OFFSETOF(chipcregs_t, watchdog))) {
437 w = R_REG(sii->osh, r);
441 /* restore core index */
442 if (origidx != coreidx)
443 sb_setcoreidx(&sii->pub, origidx);
445 INTR_RESTORE(sii, intr_val);
452 * If there is no need for fiddling with interrupts or core switches (typically silicon
453 * back plane registers, pci registers and chipcommon registers), this function
454 * returns the register offset on this core to a mapped address. This address can
455 * be used for W_REG/R_REG directly.
457 * For accessing registers that would need a core switch, this function will return
461 sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
465 si_info_t *sii = SI_INFO(sih);
466 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
468 ASSERT(GOODIDX(coreidx));
469 ASSERT(regoff < SI_CORE_SIZE);
471 if (coreidx >= SI_MAXCORES)
474 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
475 /* If internal bus, we can always get at everything */
477 /* map if does not exist */
478 if (!cores_info->regs[coreidx]) {
479 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
481 ASSERT(GOODREGS(cores_info->regs[coreidx]));
483 r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
484 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
485 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
487 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
488 /* Chipc registers are mapped at 12KB */
491 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
492 } else if (sii->pub.buscoreidx == coreidx) {
493 /* pci registers are at either in the last 2KB of an 8KB window
494 * or, in pcie and pci rev 13 at 8KB
498 r = (uint32 *)((char *)sii->curmap +
499 PCI_16KB0_PCIREGS_OFFSET + regoff);
501 r = (uint32 *)((char *)sii->curmap +
502 ((regoff >= SBCONFIGOFF) ?
503 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
514 /* Scan the enumeration space to find all cores starting from the given
515 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
516 * is the default core address at chip POR time and 'regs' is the virtual
517 * address that the default core is mapped at. 'ncores' is the number of
518 * cores expected on bus 'sbba'. It returns the total number of cores
519 * starting from bus 'sbba', inclusive.
521 #define SB_MAXBUSES 2
523 _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
528 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
530 if (bus >= SB_MAXBUSES) {
531 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
534 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
536 /* Scan all cores on the bus starting from core 0.
537 * Core addresses must be contiguous on each bus.
539 for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
540 cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
542 /* keep and reuse the initial register mapping */
543 if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
544 SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
545 cores_info->regs[next] = regs;
548 /* change core to 'next' and read its coreid */
549 sii->curmap = _sb_setcoreidx(sii, next);
552 cores_info->coreid[next] = sb_coreid(&sii->pub);
554 /* core specific processing... */
555 /* chipc provides # cores */
556 if (cores_info->coreid[next] == CC_CORE_ID) {
557 chipcregs_t *cc = (chipcregs_t *)sii->curmap;
558 uint32 ccrev = sb_corerev(&sii->pub);
560 /* determine numcores - this is the total # cores in the chip */
561 if (((ccrev == 4) || (ccrev >= 6))) {
563 numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
567 uint chip = CHIPID(sii->pub.chip);
569 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
571 else if (chip == BCM4704_CHIP_ID)
573 else if (chip == BCM5365_CHIP_ID)
576 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
582 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
583 sii->pub.issim ? "QT" : ""));
585 /* scan bridged SB(s) and add results to the end of the list */
586 else if (cores_info->coreid[next] == OCP_CORE_ID) {
587 sbconfig_t *sb = REGS2SB(sii->curmap);
588 uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
591 sii->numcores = next + 1;
593 if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
596 if (_sb_coreidx(sii, nsbba) != BADIDX)
599 nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
600 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
601 if (sbba == SI_ENUM_BASE)
607 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
609 sii->numcores = i + ncc;
610 return sii->numcores;
613 /* scan the sb enumerated space to identify all cores */
615 sb_scan(si_t *sih, void *regs, uint devid)
619 si_info_t *sii = SI_INFO(sih);
621 sb = REGS2SB(sii->curmap);
623 sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
625 /* Save the current core info and validate it later till we know
626 * for sure what is good and what is bad.
628 origsba = _sb_coresba(sii);
630 /* scan all SB(s) starting from SI_ENUM_BASE */
631 sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
635 * This function changes logical "focus" to the indicated core;
636 * must be called with interrupts off.
637 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
640 sb_setcoreidx(si_t *sih, uint coreidx)
642 si_info_t *sii = SI_INFO(sih);
644 if (coreidx >= sii->numcores)
648 * If the user has provided an interrupt mask enabled function,
649 * then assert interrupts are disabled before switching the core.
651 ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
653 sii->curmap = _sb_setcoreidx(sii, coreidx);
654 sii->curidx = coreidx;
656 return (sii->curmap);
659 /* This function changes the logical "focus" to the indicated core.
660 * Return the current core's virtual address.
663 _sb_setcoreidx(si_info_t *sii, uint coreidx)
665 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
666 uint32 sbaddr = cores_info->coresba[coreidx];
669 switch (BUSTYPE(sii->pub.bustype)) {
672 if (!cores_info->regs[coreidx]) {
673 cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
674 ASSERT(GOODREGS(cores_info->regs[coreidx]));
676 regs = cores_info->regs[coreidx];
680 /* point bar0 window */
681 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
686 uint8 tmp = (sbaddr >> 12) & 0x0f;
687 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
688 tmp = (sbaddr >> 16) & 0xff;
689 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
690 tmp = (sbaddr >> 24) & 0xff;
691 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
699 if (!cores_info->regs[coreidx]) {
700 cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
701 ASSERT(GOODREGS(cores_info->regs[coreidx]));
703 regs = cores_info->regs[coreidx];
717 /* Return the address of sbadmatch0/1/2/3 register */
718 static volatile uint32 *
719 sb_admatch(si_info_t *sii, uint asidx)
722 volatile uint32 *addrm;
724 sb = REGS2SB(sii->curmap);
728 addrm = &sb->sbadmatch0;
732 addrm = &sb->sbadmatch1;
736 addrm = &sb->sbadmatch2;
740 addrm = &sb->sbadmatch3;
744 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
751 /* Return the number of address spaces in current core */
753 sb_numaddrspaces(si_t *sih)
759 sb = REGS2SB(sii->curmap);
761 /* + 1 because of enumeration space */
762 return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
765 /* Return the address of the nth address space in the current core */
767 sb_addrspace(si_t *sih, uint asidx)
773 return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
776 /* Return the size of the nth address space in the current core */
778 sb_addrspacesize(si_t *sih, uint asidx)
784 return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
788 /* do buffered registers update */
792 si_info_t *sii = SI_INFO(sih);
793 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
797 origidx = sii->curidx;
798 ASSERT(GOODIDX(origidx));
800 INTR_OFF(sii, intr_val);
802 /* switch over to chipcommon core if there is one, else use pci */
803 if (sii->pub.ccrev != NOREV) {
804 chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
805 ASSERT(ccregs != NULL);
807 /* do the buffer registers update */
808 W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
809 W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
813 /* restore core index */
814 sb_setcoreidx(sih, origidx);
815 INTR_RESTORE(sii, intr_val);
819 sb_core_disable(si_t *sih, uint32 bits)
822 volatile uint32 dummy;
827 ASSERT(GOODREGS(sii->curmap));
828 sb = REGS2SB(sii->curmap);
830 /* if core is already in reset, just return */
831 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
834 /* if clocks are not enabled, put into reset and return */
835 if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
838 /* set target reject and spin until busy is clear (preserve core-specific bits) */
839 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
840 dummy = R_SBREG(sii, &sb->sbtmstatelow);
841 BCM_REFERENCE(dummy);
843 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
844 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
845 SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
847 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
848 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
849 dummy = R_SBREG(sii, &sb->sbimstate);
850 BCM_REFERENCE(dummy);
852 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
855 /* set reset and reject while enabling the clocks */
856 W_SBREG(sii, &sb->sbtmstatelow,
857 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
858 SBTML_REJ | SBTML_RESET));
859 dummy = R_SBREG(sii, &sb->sbtmstatelow);
860 BCM_REFERENCE(dummy);
863 /* don't forget to clear the initiator reject bit */
864 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
865 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
868 /* leave reset and reject asserted */
869 W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
873 /* reset and re-enable a core
875 * bits - core specific bits that are set during and after reset sequence
876 * resetbits - core specific bits that are set only during reset sequence
879 sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
883 volatile uint32 dummy;
886 ASSERT(GOODREGS(sii->curmap));
887 sb = REGS2SB(sii->curmap);
890 * Must do the disable sequence first to work for arbitrary current core state.
892 sb_core_disable(sih, (bits | resetbits));
895 * Now do the initialization sequence.
898 /* set reset while enabling the clock and forcing them on throughout the core */
899 W_SBREG(sii, &sb->sbtmstatelow,
900 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
902 dummy = R_SBREG(sii, &sb->sbtmstatelow);
903 BCM_REFERENCE(dummy);
906 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
907 W_SBREG(sii, &sb->sbtmstatehigh, 0);
909 if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
910 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
913 /* clear reset and allow it to propagate throughout the core */
914 W_SBREG(sii, &sb->sbtmstatelow,
915 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
916 dummy = R_SBREG(sii, &sb->sbtmstatelow);
917 BCM_REFERENCE(dummy);
920 /* leave clock enabled */
921 W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
922 dummy = R_SBREG(sii, &sb->sbtmstatelow);
923 BCM_REFERENCE(dummy);
928 * Set the initiator timeout for the "master core".
929 * The master core is defined to be the core in control
930 * of the chip and so it issues accesses to non-memory
931 * locations (Because of dma *any* core can access memeory).
933 * The routine uses the bus to decide who is the master:
936 * PCI_BUS => pci or pcie
937 * PCMCIA_BUS => pcmcia
940 * This routine exists so callers can disable initiator
941 * timeouts so accesses to very slow devices like otp
942 * won't cause an abort. The routine allows arbitrary
943 * settings of the service and request timeouts, though.
945 * Returns the timeout state before changing it or -1
949 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
952 sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
954 si_info_t *sii = SI_INFO(sih);
955 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
958 uint32 tmp, ret = 0xffffffff;
962 if ((to & ~TO_MASK) != 0)
965 /* Figure out the master core */
967 switch (BUSTYPE(sii->pub.bustype)) {
969 idx = sii->pub.buscoreidx;
978 idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
981 idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
990 INTR_OFF(sii, intr_val);
991 origidx = si_coreidx(sih);
993 sb = REGS2SB(sb_setcoreidx(sih, idx));
995 tmp = R_SBREG(sii, &sb->sbimconfiglow);
997 W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1000 sb_setcoreidx(sih, origidx);
1001 INTR_RESTORE(sii, intr_val);
1006 sb_base(uint32 admatch)
1011 type = admatch & SBAM_TYPE_MASK;
1017 base = admatch & SBAM_BASE0_MASK;
1018 } else if (type == 1) {
1019 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1020 base = admatch & SBAM_BASE1_MASK;
1021 } else if (type == 2) {
1022 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1023 base = admatch & SBAM_BASE2_MASK;
1030 sb_size(uint32 admatch)
1035 type = admatch & SBAM_TYPE_MASK;
1041 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1042 } else if (type == 1) {
1043 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1044 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1045 } else if (type == 2) {
1046 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1047 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1053 #if defined(BCMDBG_PHYDUMP)
1054 /* print interesting sbconfig registers */
1056 sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1059 uint origidx, i, intr_val = 0;
1060 si_info_t *sii = SI_INFO(sih);
1061 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1063 origidx = sii->curidx;
1065 INTR_OFF(sii, intr_val);
1067 for (i = 0; i < sii->numcores; i++) {
1068 sb = REGS2SB(sb_setcoreidx(sih, i));
1070 bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1072 if (sii->pub.socirev > SONICS_2_2)
1073 bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1074 sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1075 sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1077 bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1078 "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1079 R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
1080 R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1081 R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
1084 sb_setcoreidx(sih, origidx);
1085 INTR_RESTORE(sii, intr_val);