2 * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
4 * Copyright (C) 1999-2016, Broadcom Corporation
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Proprietary,Open:>>
27 * $Id: bcmsdh_sdmmc.c 591104 2015-10-07 04:45:18Z $
32 #include <bcmendian.h>
35 #include <sdio.h> /* SDIO Device and Protocol Specs */
36 #include <sdioh.h> /* Standard SDIO Host Controller Specification */
37 #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
38 #include <sdiovar.h> /* ioctl/iovars */
40 #include <linux/mmc/core.h>
41 #include <linux/mmc/host.h>
42 #include <linux/mmc/card.h>
43 #include <linux/mmc/sdio_func.h>
44 #include <linux/mmc/sdio_ids.h>
46 #include <dngl_stats.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
50 #include <linux/suspend.h>
51 extern volatile bool dhd_mmc_suspend;
53 #include "bcmsdh_sdmmc.h"
56 extern int sdio_function_init(void);
57 extern void sdio_function_cleanup(void);
58 #endif /* BCMSDH_MODULE */
60 #if !defined(OOB_INTR_ONLY)
61 static void IRQHandler(struct sdio_func *func);
62 static void IRQHandlerF2(struct sdio_func *func);
63 #endif /* !defined(OOB_INTR_ONLY) */
64 static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
65 #if defined(ENABLE_INSMOD_NO_FW_LOAD)
66 extern int sdio_reset_comm(struct mmc_card *card);
68 int sdio_reset_comm(struct mmc_card *card)
73 #ifdef GLOBAL_SDMMC_INSTANCE
74 extern PBCMSDH_SDMMC_INSTANCE gInstance;
77 #define DEFAULT_SDIO_F2_BLKSIZE 512
78 #ifndef CUSTOM_SDIO_F2_BLKSIZE
79 #define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
82 #define MAX_IO_RW_EXTENDED_BLK 511
84 uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
85 uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
86 uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
88 uint sd_power = 1; /* Default to SD Slot powered ON */
89 uint sd_clock = 1; /* Default to SD Clock turned ON */
90 uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
91 uint sd_msglevel = 0x01;
92 uint sd_use_dma = TRUE;
94 #ifndef CUSTOM_RXCHAIN
95 #define CUSTOM_RXCHAIN 0
98 DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
99 DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
100 DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
101 DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
103 #define DMA_ALIGN_MASK 0x03
104 #define MMC_SDIO_ABORT_RETRY_LIMIT 5
106 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
109 sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
115 sd_trace(("%s\n", __FUNCTION__));
117 /* Get the Card's common CIS address */
118 sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
119 sd->func_cis_ptr[0] = sd->com_cis_ptr;
120 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
122 /* Get the Card's function CIS (for each function) */
123 for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
124 func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
125 sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
126 sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
127 __FUNCTION__, func, sd->func_cis_ptr[func]));
130 sd->func_cis_ptr[0] = sd->com_cis_ptr;
131 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
133 /* Enable Function 1 */
134 sdio_claim_host(sd->func[1]);
135 err_ret = sdio_enable_func(sd->func[1]);
136 sdio_release_host(sd->func[1]);
138 sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
145 * Public entry points & extern's
147 extern sdioh_info_t *
148 sdioh_attach(osl_t *osh, struct sdio_func *func)
150 sdioh_info_t *sd = NULL;
153 sd_trace(("%s\n", __FUNCTION__));
156 sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
160 if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
161 sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
164 bzero((char *)sd, sizeof(sdioh_info_t));
166 sd->fake_func0.num = 0;
167 sd->fake_func0.card = func->card;
168 sd->func[0] = &sd->fake_func0;
169 #ifdef GLOBAL_SDMMC_INSTANCE
171 sd->func[1] = gInstance->func[1];
173 sd->func[1] = func->card->sdio_func[0];
175 sd->func[2] = func->card->sdio_func[1];
176 #ifdef GLOBAL_SDMMC_INSTANCE
177 sd->func[func->num] = func;
180 sd->sd_blockmode = TRUE;
181 sd->use_client_ints = TRUE;
182 sd->client_block_size[0] = 64;
183 sd->use_rxchain = CUSTOM_RXCHAIN;
184 if (sd->func[1] == NULL || sd->func[2] == NULL) {
185 sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
188 sdio_set_drvdata(sd->func[1], sd);
190 sdio_claim_host(sd->func[1]);
191 sd->client_block_size[1] = 64;
192 err_ret = sdio_set_block_size(sd->func[1], 64);
193 sdio_release_host(sd->func[1]);
195 sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
199 sdio_claim_host(sd->func[2]);
200 sd->client_block_size[2] = sd_f2_blocksize;
201 printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
202 err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
203 sdio_release_host(sd->func[2]);
205 sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
206 sd_f2_blocksize, err_ret));
210 sdioh_sdmmc_card_enablefuncs(sd);
212 sd_trace(("%s: Done\n", __FUNCTION__));
216 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
222 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
224 sd_trace(("%s\n", __FUNCTION__));
228 /* Disable Function 2 */
230 sdio_claim_host(sd->func[2]);
231 sdio_disable_func(sd->func[2]);
232 sdio_release_host(sd->func[2]);
235 /* Disable Function 1 */
237 sdio_claim_host(sd->func[1]);
238 sdio_disable_func(sd->func[1]);
239 sdio_release_host(sd->func[1]);
245 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
247 return SDIOH_API_RC_SUCCESS;
250 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
253 sdioh_enable_func_intr(sdioh_info_t *sd)
258 if (sd->func[0] == NULL) {
259 sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
260 return SDIOH_API_RC_FAIL;
263 sdio_claim_host(sd->func[0]);
264 reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
266 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
267 sdio_release_host(sd->func[0]);
268 return SDIOH_API_RC_FAIL;
270 /* Enable F1 and F2 interrupts, clear master enable */
271 reg &= ~INTR_CTL_MASTER_EN;
272 reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
273 sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
274 sdio_release_host(sd->func[0]);
277 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
278 return SDIOH_API_RC_FAIL;
281 return SDIOH_API_RC_SUCCESS;
285 sdioh_disable_func_intr(sdioh_info_t *sd)
290 if (sd->func[0] == NULL) {
291 sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
292 return SDIOH_API_RC_FAIL;
295 sdio_claim_host(sd->func[0]);
296 reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
298 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
299 sdio_release_host(sd->func[0]);
300 return SDIOH_API_RC_FAIL;
302 reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
303 /* Disable master interrupt with the last function interrupt */
306 sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
307 sdio_release_host(sd->func[0]);
310 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
311 return SDIOH_API_RC_FAIL;
314 return SDIOH_API_RC_SUCCESS;
316 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
318 /* Configure callback to client when we recieve client interrupt */
320 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
322 sd_trace(("%s: Entering\n", __FUNCTION__));
324 sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
325 return SDIOH_API_RC_FAIL;
327 #if !defined(OOB_INTR_ONLY)
328 sd->intr_handler = fn;
329 sd->intr_handler_arg = argh;
330 sd->intr_handler_valid = TRUE;
332 /* register and unmask irq */
334 sdio_claim_host(sd->func[2]);
335 sdio_claim_irq(sd->func[2], IRQHandlerF2);
336 sdio_release_host(sd->func[2]);
340 sdio_claim_host(sd->func[1]);
341 sdio_claim_irq(sd->func[1], IRQHandler);
342 sdio_release_host(sd->func[1]);
344 #elif defined(HW_OOB)
345 sdioh_enable_func_intr(sd);
346 #endif /* !defined(OOB_INTR_ONLY) */
348 return SDIOH_API_RC_SUCCESS;
352 sdioh_interrupt_deregister(sdioh_info_t *sd)
354 sd_trace(("%s: Entering\n", __FUNCTION__));
356 #if !defined(OOB_INTR_ONLY)
358 /* register and unmask irq */
359 sdio_claim_host(sd->func[1]);
360 sdio_release_irq(sd->func[1]);
361 sdio_release_host(sd->func[1]);
365 /* Claim host controller F2 */
366 sdio_claim_host(sd->func[2]);
367 sdio_release_irq(sd->func[2]);
368 /* Release host controller F2 */
369 sdio_release_host(sd->func[2]);
372 sd->intr_handler_valid = FALSE;
373 sd->intr_handler = NULL;
374 sd->intr_handler_arg = NULL;
375 #elif defined(HW_OOB)
376 if (dhd_download_fw_on_driverload)
377 sdioh_disable_func_intr(sd);
378 #endif /* !defined(OOB_INTR_ONLY) */
379 return SDIOH_API_RC_SUCCESS;
383 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
385 sd_trace(("%s: Entering\n", __FUNCTION__));
386 *onoff = sd->client_intr_enabled;
387 return SDIOH_API_RC_SUCCESS;
390 #if defined(DHD_DEBUG)
392 sdioh_interrupt_pending(sdioh_info_t *sd)
399 sdioh_query_iofnum(sdioh_info_t *sd)
401 return sd->num_funcs;
424 const bcm_iovar_t sdioh_iovars[] = {
425 {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
426 {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
427 {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
428 {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
429 {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
430 {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
431 {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
432 {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
433 {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
434 {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
435 {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
436 {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
437 {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
438 {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
439 {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
444 sdioh_iovar_op(sdioh_info_t *si, const char *name,
445 void *params, int plen, void *arg, int len, bool set)
447 const bcm_iovar_t *vi = NULL;
457 /* Get must have return space; Set does not take qualifiers */
458 ASSERT(set || (arg && len));
459 ASSERT(!set || (!params && !plen));
461 sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
463 if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
464 bcmerror = BCME_UNSUPPORTED;
468 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
471 /* Set up params so get and set can share the convenience variables */
472 if (params == NULL) {
477 if (vi->type == IOVT_VOID)
479 else if (vi->type == IOVT_BUFFER)
482 val_size = sizeof(int);
484 if (plen >= (int)sizeof(int_val))
485 bcopy(params, &int_val, sizeof(int_val));
487 bool_val = (int_val != 0) ? TRUE : FALSE;
488 BCM_REFERENCE(bool_val);
490 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
492 case IOV_GVAL(IOV_MSGLEVEL):
493 int_val = (int32)sd_msglevel;
494 bcopy(&int_val, arg, val_size);
497 case IOV_SVAL(IOV_MSGLEVEL):
498 sd_msglevel = int_val;
501 case IOV_GVAL(IOV_BLOCKMODE):
502 int_val = (int32)si->sd_blockmode;
503 bcopy(&int_val, arg, val_size);
506 case IOV_SVAL(IOV_BLOCKMODE):
507 si->sd_blockmode = (bool)int_val;
508 /* Haven't figured out how to make non-block mode with DMA */
511 case IOV_GVAL(IOV_BLOCKSIZE):
512 if ((uint32)int_val > si->num_funcs) {
513 bcmerror = BCME_BADARG;
516 int_val = (int32)si->client_block_size[int_val];
517 bcopy(&int_val, arg, val_size);
520 case IOV_SVAL(IOV_BLOCKSIZE):
522 uint func = ((uint32)int_val >> 16);
523 uint blksize = (uint16)int_val;
526 if (func > si->num_funcs) {
527 bcmerror = BCME_BADARG;
532 case 0: maxsize = 32; break;
533 case 1: maxsize = BLOCK_SIZE_4318; break;
534 case 2: maxsize = BLOCK_SIZE_4328; break;
535 default: maxsize = 0;
537 if (blksize > maxsize) {
538 bcmerror = BCME_BADARG;
546 si->client_block_size[func] = blksize;
548 #ifdef USE_DYNAMIC_F2_BLKSIZE
549 if (si->func[func] == NULL) {
550 sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
551 bcmerror = BCME_NORESOURCE;
554 sdio_claim_host(si->func[func]);
555 bcmerror = sdio_set_block_size(si->func[func], blksize);
557 sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
558 __FUNCTION__, func, blksize, bcmerror));
559 sdio_release_host(si->func[func]);
560 #endif /* USE_DYNAMIC_F2_BLKSIZE */
564 case IOV_GVAL(IOV_RXCHAIN):
565 int_val = (int32)si->use_rxchain;
566 bcopy(&int_val, arg, val_size);
569 case IOV_GVAL(IOV_DMA):
570 int_val = (int32)si->sd_use_dma;
571 bcopy(&int_val, arg, val_size);
574 case IOV_SVAL(IOV_DMA):
575 si->sd_use_dma = (bool)int_val;
578 case IOV_GVAL(IOV_USEINTS):
579 int_val = (int32)si->use_client_ints;
580 bcopy(&int_val, arg, val_size);
583 case IOV_SVAL(IOV_USEINTS):
584 si->use_client_ints = (bool)int_val;
585 if (si->use_client_ints)
586 si->intmask |= CLIENT_INTR;
588 si->intmask &= ~CLIENT_INTR;
592 case IOV_GVAL(IOV_DIVISOR):
593 int_val = (uint32)sd_divisor;
594 bcopy(&int_val, arg, val_size);
597 case IOV_SVAL(IOV_DIVISOR):
598 sd_divisor = int_val;
601 case IOV_GVAL(IOV_POWER):
602 int_val = (uint32)sd_power;
603 bcopy(&int_val, arg, val_size);
606 case IOV_SVAL(IOV_POWER):
610 case IOV_GVAL(IOV_CLOCK):
611 int_val = (uint32)sd_clock;
612 bcopy(&int_val, arg, val_size);
615 case IOV_SVAL(IOV_CLOCK):
619 case IOV_GVAL(IOV_SDMODE):
620 int_val = (uint32)sd_sdmode;
621 bcopy(&int_val, arg, val_size);
624 case IOV_SVAL(IOV_SDMODE):
628 case IOV_GVAL(IOV_HISPEED):
629 int_val = (uint32)sd_hiok;
630 bcopy(&int_val, arg, val_size);
633 case IOV_SVAL(IOV_HISPEED):
637 case IOV_GVAL(IOV_NUMINTS):
638 int_val = (int32)si->intrcount;
639 bcopy(&int_val, arg, val_size);
642 case IOV_GVAL(IOV_NUMLOCALINTS):
644 bcopy(&int_val, arg, val_size);
647 case IOV_GVAL(IOV_HOSTREG):
649 sdreg_t *sd_ptr = (sdreg_t *)params;
651 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
652 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
653 bcmerror = BCME_BADARG;
657 sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
658 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
660 if (sd_ptr->offset & 1)
661 int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
662 else if (sd_ptr->offset & 2)
663 int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
665 int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
667 bcopy(&int_val, arg, sizeof(int_val));
671 case IOV_SVAL(IOV_HOSTREG):
673 sdreg_t *sd_ptr = (sdreg_t *)params;
675 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
676 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
677 bcmerror = BCME_BADARG;
681 sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
682 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
687 case IOV_GVAL(IOV_DEVREG):
689 sdreg_t *sd_ptr = (sdreg_t *)params;
692 if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
693 bcmerror = BCME_SDIO_ERROR;
698 bcopy(&int_val, arg, sizeof(int_val));
702 case IOV_SVAL(IOV_DEVREG):
704 sdreg_t *sd_ptr = (sdreg_t *)params;
705 uint8 data = (uint8)sd_ptr->value;
707 if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
708 bcmerror = BCME_SDIO_ERROR;
715 bcmerror = BCME_UNSUPPORTED;
723 #if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
726 sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
732 #ifdef HW_OOB_LOW_LEVEL
733 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
735 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
738 data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
740 status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
743 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
746 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
749 /* No lock needed since sdioh_request_byte does locking */
750 status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
755 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
757 /* No lock needed since sdioh_request_byte does locking */
759 status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
764 sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
766 /* read 24 bits and return valid 17 bit addr */
768 uint32 scratch, regdata;
769 uint8 *ptr = (uint8 *)&scratch;
770 for (i = 0; i < 3; i++) {
771 if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
772 sd_err(("%s: Can't read!\n", __FUNCTION__));
774 *ptr++ = (uint8) regdata;
778 /* Only the lower 17-bits are valid */
779 scratch = ltoh32(scratch);
780 scratch &= 0x0001FFFF;
785 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
792 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
794 if (!sd->func_cis_ptr[func]) {
796 sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
797 return SDIOH_API_RC_FAIL;
800 sd_trace(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
802 for (count = 0; count < length; count++) {
803 offset = sd->func_cis_ptr[func] + count;
804 if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
805 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
806 return SDIOH_API_RC_FAIL;
809 *cis = (uint8)(foo & 0xff);
813 return SDIOH_API_RC_SUCCESS;
817 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
820 #if defined(MMC_SDIO_ABORT)
821 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
824 sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
826 DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
827 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
828 if(rw) { /* CMD52 Write */
830 /* Can only directly write to some F0 registers. Handle F2 enable
833 if (regaddr == SDIOD_CCCR_IOEN) {
835 sdio_claim_host(sd->func[2]);
836 if (*byte & SDIO_FUNC_ENABLE_2) {
837 /* Enable Function 2 */
838 err_ret = sdio_enable_func(sd->func[2]);
840 sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n",
844 /* Disable Function 2 */
845 err_ret = sdio_disable_func(sd->func[2]);
847 sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n",
851 sdio_release_host(sd->func[2]);
854 #if defined(MMC_SDIO_ABORT)
855 /* to allow abort command through F1 */
856 else if (regaddr == SDIOD_CCCR_IOABORT) {
857 while (sdio_abort_retry--) {
858 if (sd->func[func]) {
859 sdio_claim_host(sd->func[func]);
861 * this sdio_f0_writeb() can be replaced with
862 * another api depending upon MMC driver change.
863 * As of this time, this is temporaray one
865 sdio_writeb(sd->func[func],
866 *byte, regaddr, &err_ret);
867 sdio_release_host(sd->func[func]);
873 #endif /* MMC_SDIO_ABORT */
874 else if (regaddr < 0xF0) {
875 sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
877 /* Claim host controller, perform F0 write, and release */
878 if (sd->func[func]) {
879 sdio_claim_host(sd->func[func]);
880 sdio_f0_writeb(sd->func[func],
881 *byte, regaddr, &err_ret);
882 sdio_release_host(sd->func[func]);
886 /* Claim host controller, perform Fn write, and release */
887 if (sd->func[func]) {
888 sdio_claim_host(sd->func[func]);
889 sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
890 sdio_release_host(sd->func[func]);
893 } else { /* CMD52 Read */
894 /* Claim host controller, perform Fn read, and release */
895 if (sd->func[func]) {
896 sdio_claim_host(sd->func[func]);
898 *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
900 *byte = sdio_readb(sd->func[func], regaddr, &err_ret);
902 sdio_release_host(sd->func[func]);
907 if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
909 sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
910 rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
914 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
917 #if defined(SWTXGLOM)
918 static INLINE int sdioh_request_packet_align(uint pkt_len, uint write, uint func, int blk_size)
921 if (!write || pkt_len < 32)
922 pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
923 else if ((pkt_len > blk_size) && (pkt_len % blk_size)) {
924 if (func == SDIO_FUNC_2) {
925 sd_err(("%s: [%s] dhd_sdio must align %d bytes"
926 " packet larger than a %d bytes blk size by a blk size\n",
927 __FUNCTION__, write ? "W" : "R", pkt_len, blk_size));
929 pkt_len += blk_size - (pkt_len % blk_size);
931 #ifdef CONFIG_MMC_MSM7X00A
932 if ((pkt_len % 64) == 32) {
933 sd_err(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
936 #endif /* CONFIG_MMC_MSM7X00A */
941 sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
943 void *phead = sd->glom_info.glom_pkt_head;
944 void *ptail = sd->glom_info.glom_pkt_tail;
946 BCM_REFERENCE(frame);
948 ASSERT(!PKTLINK(pkt));
951 sd->glom_info.glom_pkt_head = sd->glom_info.glom_pkt_tail = pkt;
955 PKTSETNEXT(sd->osh, ptail, pkt);
956 sd->glom_info.glom_pkt_tail = pkt;
958 sd->glom_info.count++;
962 sdioh_glom_clear(sdioh_info_t *sd)
966 pnext = sd->glom_info.glom_pkt_head;
969 sd_err(("sdioh_glom_clear: no first packet to clear!\n"));
975 pnext = PKTNEXT(sd->osh, pnow);
976 PKTSETNEXT(sd->osh, pnow, NULL);
977 sd->glom_info.count--;
980 sd->glom_info.glom_pkt_head = NULL;
981 sd->glom_info.glom_pkt_tail = NULL;
982 if (sd->glom_info.count != 0) {
983 sd_err(("sdioh_glom_clear: glom count mismatch!\n"));
984 sd->glom_info.count = 0;
989 sdioh_request_swtxglom_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
990 uint addr, void *pkt)
992 bool fifo = (fix_inc == SDIOH_DATA_FIX);
996 uint ttl_len, dma_len, lft_len, xfred_len, pkt_len;
999 struct mmc_request mmc_req;
1000 struct mmc_command mmc_cmd;
1001 struct mmc_data mmc_dat;
1002 #ifdef BCMSDIOH_TXGLOM
1003 uint8 *localbuf = NULL;
1004 uint local_plen = 0;
1005 bool need_txglom = write &&
1006 (pkt == sd->glom_info.glom_pkt_tail) &&
1007 (sd->glom_info.glom_pkt_head != sd->glom_info.glom_pkt_tail);
1008 #endif /* BCMSDIOH_TXGLOM */
1010 sd_trace(("%s: Enter\n", __FUNCTION__));
1013 DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1014 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1016 ttl_len = xfred_len = 0;
1017 #ifdef BCMSDIOH_TXGLOM
1019 pkt = sd->glom_info.glom_pkt_head;
1021 #endif /* BCMSDIOH_TXGLOM */
1023 /* at least 4 bytes alignment of skb buff is guaranteed */
1024 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext))
1025 ttl_len += PKTLEN(sd->osh, pnext);
1027 blk_size = sd->client_block_size[func];
1028 if (((!write && sd->use_rxchain) ||
1029 #ifdef BCMSDIOH_TXGLOM
1030 (need_txglom && sd->txglom_mode == SDPCM_TXGLOM_MDESC) ||
1032 0) && (ttl_len >= blk_size)) {
1033 blk_num = ttl_len / blk_size;
1034 dma_len = blk_num * blk_size;
1040 lft_len = ttl_len - dma_len;
1042 sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n",
1043 __FUNCTION__, write ? "W" : "R",
1044 ttl_len, func, addr, blk_num, lft_len));
1047 memset(&mmc_req, 0, sizeof(struct mmc_request));
1048 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1049 memset(&mmc_dat, 0, sizeof(struct mmc_data));
1051 /* Set up DMA descriptors */
1054 pnext = PKTNEXT(sd->osh, pnext)) {
1055 pkt_len = PKTLEN(sd->osh, pnext);
1057 if (dma_len > pkt_len)
1060 pkt_len = xfred_len = dma_len;
1065 sg_set_buf(&sd->sg_list[SGCount++],
1066 (uint8*)PKTDATA(sd->osh, pnext),
1069 if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) {
1070 sd_err(("%s: sg list entries exceed limit\n",
1072 return (SDIOH_API_RC_FAIL);
1076 mmc_dat.sg = sd->sg_list;
1077 mmc_dat.sg_len = SGCount;
1078 mmc_dat.blksz = blk_size;
1079 mmc_dat.blocks = blk_num;
1080 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1082 mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1083 mmc_cmd.arg = write ? 1<<31 : 0;
1084 mmc_cmd.arg |= (func & 0x7) << 28;
1085 mmc_cmd.arg |= 1<<27;
1086 mmc_cmd.arg |= fifo ? 0 : 1<<26;
1087 mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1088 mmc_cmd.arg |= blk_num & 0x1FF;
1089 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1091 mmc_req.cmd = &mmc_cmd;
1092 mmc_req.data = &mmc_dat;
1094 sdio_claim_host(sd->func[func]);
1095 mmc_set_data_timeout(&mmc_dat, sd->func[func]->card);
1096 mmc_wait_for_req(sd->func[func]->card->host, &mmc_req);
1097 sdio_release_host(sd->func[func]);
1099 err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1101 sd_err(("%s:CMD53 %s failed with code %d\n",
1103 write ? "write" : "read",
1107 addr = addr + ttl_len - lft_len - dma_len;
1113 /* Claim host controller */
1114 sdio_claim_host(sd->func[func]);
1115 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1116 uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) +
1119 pkt_len = PKTLEN(sd->osh, pnext);
1120 if (0 != xfred_len) {
1121 pkt_len -= xfred_len;
1124 #ifdef BCMSDIOH_TXGLOM
1127 uint prev_lft_len = lft_len;
1128 lft_len = sdioh_request_packet_align(lft_len, write,
1131 if (lft_len > prev_lft_len) {
1132 sd_err(("%s: padding is unexpected! lft_len %d,"
1133 " prev_lft_len %d %s\n",
1134 __FUNCTION__, lft_len, prev_lft_len,
1135 write ? "Write" : "Read"));
1138 localbuf = (uint8 *)MALLOC(sd->osh, lft_len);
1139 if (localbuf == NULL) {
1140 sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
1141 __FUNCTION__, (write) ? "TX" : "RX"));
1142 need_txglom = FALSE;
1146 bcopy(buf, (localbuf + local_plen), pkt_len);
1147 local_plen += pkt_len;
1149 if (PKTNEXT(sd->osh, pnext)) {
1154 pkt_len = local_plen;
1158 #endif /* BCMSDIOH_TXGLOM */
1161 #ifdef BCMSDIOH_TXGLOM
1165 pkt_len = sdioh_request_packet_align(pkt_len, write,
1168 pad = pkt_len - PKTLEN(sd->osh, pnext);
1171 if (func == SDIO_FUNC_2) {
1172 sd_err(("%s: padding is unexpected! pkt_len %d,"
1173 " PKTLEN %d lft_len %d %s\n",
1174 __FUNCTION__, pkt_len, PKTLEN(sd->osh, pnext),
1175 lft_len, write ? "Write" : "Read"));
1177 if (PKTTAILROOM(sd->osh, pkt) < pad) {
1178 sd_info(("%s: insufficient tailroom %d, pad %d,"
1179 " lft_len %d pktlen %d, func %d %s\n",
1180 __FUNCTION__, (int)PKTTAILROOM(sd->osh, pkt),
1181 pad, lft_len, PKTLEN(sd->osh, pnext), func,
1182 write ? "W" : "R"));
1183 if (PKTPADTAILROOM(sd->osh, pkt, pad)) {
1184 sd_err(("%s: padding error size %d.\n",
1185 __FUNCTION__, pad));
1186 return SDIOH_API_RC_FAIL;
1192 if ((write) && (!fifo))
1193 err_ret = sdio_memcpy_toio(
1195 addr, buf, pkt_len);
1197 err_ret = sdio_memcpy_toio(
1199 addr, buf, pkt_len);
1201 err_ret = sdio_readsb(
1203 buf, addr, pkt_len);
1205 err_ret = sdio_memcpy_fromio(
1207 buf, addr, pkt_len);
1210 sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
1212 (write) ? "TX" : "RX",
1213 pnext, SGCount, addr, pkt_len, err_ret));
1215 sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
1217 (write) ? "TX" : "RX",
1218 pnext, SGCount, addr, pkt_len));
1224 sdio_release_host(sd->func[func]);
1226 #ifdef BCMSDIOH_TXGLOM
1228 MFREE(sd->osh, localbuf, lft_len);
1229 #endif /* BCMSDIOH_TXGLOM */
1231 sd_trace(("%s: Exit\n", __FUNCTION__));
1232 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1236 * This function takes a buffer or packet, and fixes everything up so that in the
1237 * end, a DMA-able packet is created.
1239 * A buffer does not have an associated packet pointer, and may or may not be aligned.
1240 * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
1241 * then all the packets in the chain must be properly aligned. If the packet data is not
1242 * aligned, then there may only be one packet, and in this case, it is copied to a new
1247 sdioh_request_swtxglom_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1248 uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
1250 SDIOH_API_RC Status;
1252 void *orig_buf = NULL;
1255 sd_trace(("%s: Enter\n", __FUNCTION__));
1257 DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1258 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1261 /* Case 1: we don't have a packet. */
1264 } else if ((ulong)PKTDATA(sd->osh, pkt) & DMA_ALIGN_MASK) {
1265 /* Case 2: We have a packet, but it is unaligned.
1266 * in this case, we cannot have a chain.
1268 ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
1270 orig_buf = PKTDATA(sd->osh, pkt);
1271 copylen = PKTLEN(sd->osh, pkt);
1276 tmppkt = PKTGET_STATIC(sd->osh, copylen, write ? TRUE : FALSE);
1277 if (tmppkt == NULL) {
1278 sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, copylen));
1279 return SDIOH_API_RC_FAIL;
1281 /* For a write, copy the buffer data into the packet. */
1283 bcopy(orig_buf, PKTDATA(sd->osh, tmppkt), copylen);
1286 Status = sdioh_request_swtxglom_packet(sd, fix_inc, write, func, addr, tmppkt);
1289 /* For a read, copy the packet data back to the buffer. */
1291 bcopy(PKTDATA(sd->osh, tmppkt), orig_buf, PKTLEN(sd->osh, tmppkt));
1292 PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1300 sdioh_set_mode(sdioh_info_t *sd, uint mode)
1302 if (mode == SDPCM_TXGLOM_CPY)
1303 sd->txglom_mode = mode;
1304 else if (mode == SDPCM_TXGLOM_MDESC)
1305 sd->txglom_mode = mode;
1306 printf("%s: set txglom_mode to %s\n", __FUNCTION__, mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy");
1308 return (sd->txglom_mode);
1312 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
1313 uint32 *word, uint nbytes)
1315 int err_ret = SDIOH_API_RC_FAIL;
1316 int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock
1317 #if defined(MMC_SDIO_ABORT)
1318 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
1322 sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
1323 return SDIOH_API_RC_FAIL;
1326 sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
1327 __FUNCTION__, cmd_type, rw, func, addr, nbytes));
1329 DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
1330 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1331 /* Claim host controller */
1332 sdio_claim_host(sd->func[func]);
1334 if(rw) { /* CMD52 Write */
1336 sdio_writel(sd->func[func], *word, addr, &err_ret);
1337 } else if (nbytes == 2) {
1338 sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
1340 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
1342 } else { /* CMD52 Read */
1344 *word = sdio_readl(sd->func[func], addr, &err_ret);
1345 } else if (nbytes == 2) {
1346 *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
1348 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
1352 /* Release host controller */
1353 sdio_release_host(sd->func[func]);
1356 #if defined(MMC_SDIO_ABORT)
1357 /* Any error on CMD53 transaction should abort that function using function 0. */
1358 while (sdio_abort_retry--) {
1360 sdio_claim_host(sd->func[0]);
1362 * this sdio_f0_writeb() can be replaced with another api
1363 * depending upon MMC driver change.
1364 * As of this time, this is temporaray one
1366 sdio_writeb(sd->func[0],
1367 func, SDIOD_CCCR_IOABORT, &err_ret2);
1368 sdio_release_host(sd->func[0]);
1374 #endif /* MMC_SDIO_ABORT */
1376 sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n",
1377 rw ? "Write" : "Read", func, addr, *word, err_ret));
1381 return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1385 sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1386 uint addr, void *pkt)
1388 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1391 uint ttl_len, pkt_offset;
1396 struct mmc_request mmc_req;
1397 struct mmc_command mmc_cmd;
1398 struct mmc_data mmc_dat;
1400 struct sdio_func *sdio_func = sd->func[func];
1401 struct mmc_host *host = sdio_func->card->host;
1402 #ifdef BCMSDIOH_TXGLOM
1403 uint8 *localbuf = NULL;
1404 uint local_plen = 0;
1406 #endif /* BCMSDIOH_TXGLOM */
1408 sd_trace(("%s: Enter\n", __FUNCTION__));
1410 DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1411 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1413 blk_size = sd->client_block_size[func];
1414 max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
1415 max_req_size = min(max_blk_count * blk_size, host->max_req_size);
1420 #ifdef BCMSDIOH_TXGLOM
1423 if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
1425 while (pnext != NULL) {
1428 memset(&mmc_req, 0, sizeof(struct mmc_request));
1429 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1430 memset(&mmc_dat, 0, sizeof(struct mmc_data));
1431 sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
1433 /* Set up scatter-gather DMA descriptors. this loop is to find out the max
1434 * data we can transfer with one command 53. blocks per command is limited by
1435 * host max_req_size and 9-bit max block number. when the total length of this
1436 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
1437 * commands (each transfer is still block aligned)
1439 while (pnext != NULL && ttl_len < max_req_size) {
1442 uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
1444 ASSERT(pdata != NULL);
1445 pkt_len = PKTLEN(sd->osh, pnext);
1446 sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
1447 /* sg_count is unlikely larger than the array size, and this is
1448 * NOT something we can handle here, but in case it happens, PLEASE put
1449 * a restriction on max tx/glom count (based on host->max_segs).
1451 if (sg_count >= ARRAYSIZE(sd->sg_list)) {
1452 sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__));
1453 return (SDIOH_API_RC_FAIL);
1455 pdata += pkt_offset;
1457 sg_data_size = pkt_len - pkt_offset;
1458 if (sg_data_size > max_req_size - ttl_len)
1459 sg_data_size = max_req_size - ttl_len;
1460 /* some platforms put a restriction on the data size of each scatter-gather
1461 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
1464 if (sg_data_size > host->max_seg_size)
1465 sg_data_size = host->max_seg_size;
1466 sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
1468 ttl_len += sg_data_size;
1469 pkt_offset += sg_data_size;
1470 if (pkt_offset == pkt_len) {
1471 pnext = PKTNEXT(sd->osh, pnext);
1476 if (ttl_len % blk_size != 0) {
1477 sd_err(("%s, data length %d not aligned to block size %d\n",
1478 __FUNCTION__, ttl_len, blk_size));
1479 return SDIOH_API_RC_FAIL;
1481 blk_num = ttl_len / blk_size;
1482 mmc_dat.sg = sd->sg_list;
1483 mmc_dat.sg_len = sg_count;
1484 mmc_dat.blksz = blk_size;
1485 mmc_dat.blocks = blk_num;
1486 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1487 mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1488 mmc_cmd.arg = write ? 1<<31 : 0;
1489 mmc_cmd.arg |= (func & 0x7) << 28;
1490 mmc_cmd.arg |= 1<<27;
1491 mmc_cmd.arg |= fifo ? 0 : 1<<26;
1492 mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1493 mmc_cmd.arg |= blk_num & 0x1FF;
1494 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1495 mmc_req.cmd = &mmc_cmd;
1496 mmc_req.data = &mmc_dat;
1500 sdio_claim_host(sdio_func);
1501 mmc_set_data_timeout(&mmc_dat, sdio_func->card);
1502 mmc_wait_for_req(host, &mmc_req);
1503 sdio_release_host(sdio_func);
1505 err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1507 sd_err(("%s:CMD53 %s failed with code %d\n",
1508 __FUNCTION__, write ? "write" : "read", err_ret));
1509 return SDIOH_API_RC_FAIL;
1512 #ifdef BCMSDIOH_TXGLOM
1513 } else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) {
1514 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1515 ttl_len += PKTLEN(sd->osh, pnext);
1517 /* Claim host controller */
1518 sdio_claim_host(sd->func[func]);
1519 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1520 uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext);
1521 pkt_len = PKTLEN(sd->osh, pnext);
1524 localbuf = (uint8 *)MALLOC(sd->osh, ttl_len);
1525 if (localbuf == NULL) {
1526 sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
1527 __FUNCTION__, (write) ? "TX" : "RX"));
1532 bcopy(buf, (localbuf + local_plen), pkt_len);
1533 local_plen += pkt_len;
1534 if (PKTNEXT(sd->osh, pnext))
1538 pkt_len = local_plen;
1541 if (!write || pkt_len < 32)
1542 pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
1543 else if (pkt_len % blk_size)
1544 pkt_len += blk_size - (pkt_len % blk_size);
1546 if ((write) && (!fifo))
1547 err_ret = sdio_memcpy_toio(
1549 addr, buf, pkt_len);
1551 err_ret = sdio_memcpy_toio(
1553 addr, buf, pkt_len);
1555 err_ret = sdio_readsb(
1557 buf, addr, pkt_len);
1559 err_ret = sdio_memcpy_fromio(
1561 buf, addr, pkt_len);
1564 sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
1566 (write) ? "TX" : "RX",
1567 pnext, sg_count, addr, pkt_len, err_ret));
1569 sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
1571 (write) ? "TX" : "RX",
1572 pnext, sg_count, addr, pkt_len));
1578 sdio_release_host(sd->func[func]);
1580 sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode));
1581 return SDIOH_API_RC_FAIL;
1585 MFREE(sd->osh, localbuf, ttl_len);
1586 #endif /* BCMSDIOH_TXGLOM */
1588 sd_trace(("%s: Exit\n", __FUNCTION__));
1589 return SDIOH_API_RC_SUCCESS;
1593 sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1594 uint addr, uint8 *buf, uint len)
1596 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1599 sd_trace(("%s: Enter\n", __FUNCTION__));
1603 * For all writes, each packet length is aligned to 32 (or 4)
1604 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
1605 * is aligned to block boundary. If you want to align each packet to
1606 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
1608 * For reads, the alignment is doen in sdioh_request_buffer.
1611 sdio_claim_host(sd->func[func]);
1613 if ((write) && (!fifo))
1614 err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1616 err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1618 err_ret = sdio_readsb(sd->func[func], buf, addr, len);
1620 err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
1622 sdio_release_host(sd->func[func]);
1625 sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
1626 (write) ? "TX" : "RX", buf, addr, len, err_ret));
1628 sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
1629 (write) ? "TX" : "RX", buf, addr, len));
1631 sd_trace(("%s: Exit\n", __FUNCTION__));
1632 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1637 * This function takes a buffer or packet, and fixes everything up so that in the
1638 * end, a DMA-able packet is created.
1640 * A buffer does not have an associated packet pointer, and may or may not be aligned.
1641 * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
1642 * then all the packets in the chain must be properly aligned. If the packet data is not
1643 * aligned, then there may only be one packet, and in this case, it is copied to a new
1648 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1649 uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
1651 SDIOH_API_RC status;
1654 sd_trace(("%s: Enter\n", __FUNCTION__));
1655 DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1656 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1659 /* packet chain, only used for tx/rx glom, all packets length
1660 * are aligned, total length is a block multiple
1662 if (PKTNEXT(sd->osh, pkt))
1663 return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
1665 /* non-glom mode, ignore the buffer parameter and use the packet pointer
1666 * (this shouldn't happen)
1668 buffer = PKTDATA(sd->osh, pkt);
1669 buf_len = PKTLEN(sd->osh, pkt);
1674 /* buffer and length are aligned, use it directly so we can avoid memory copy */
1675 if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
1676 return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
1678 sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
1679 __FUNCTION__, write, buffer, buf_len));
1681 /* otherwise, a memory copy is needed as the input buffer is not aligned */
1682 tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
1683 if (tmppkt == NULL) {
1684 sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
1685 return SDIOH_API_RC_FAIL;
1689 bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
1691 status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
1692 PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
1695 bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
1697 PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1702 /* this function performs "abort" for both of host & device */
1704 sdioh_abort(sdioh_info_t *sd, uint func)
1706 #if defined(MMC_SDIO_ABORT)
1707 char t_func = (char) func;
1708 #endif /* defined(MMC_SDIO_ABORT) */
1709 sd_trace(("%s: Enter\n", __FUNCTION__));
1711 #if defined(MMC_SDIO_ABORT)
1712 /* issue abort cmd52 command through F1 */
1713 sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
1714 #endif /* defined(MMC_SDIO_ABORT) */
1716 sd_trace(("%s: Exit\n", __FUNCTION__));
1717 return SDIOH_API_RC_SUCCESS;
1720 /* Reset and re-initialize the device */
1721 int sdioh_sdio_reset(sdioh_info_t *si)
1723 sd_trace(("%s: Enter\n", __FUNCTION__));
1724 sd_trace(("%s: Exit\n", __FUNCTION__));
1725 return SDIOH_API_RC_SUCCESS;
1728 /* Disable device interrupt */
1730 sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
1732 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1733 sd->intmask &= ~CLIENT_INTR;
1736 /* Enable device interrupt */
1738 sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
1740 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1741 sd->intmask |= CLIENT_INTR;
1744 /* Read client card reg */
1746 sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
1749 if ((func == 0) || (regsize == 1)) {
1752 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1755 sd_data(("%s: byte read data=0x%02x\n",
1756 __FUNCTION__, *data));
1758 sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
1762 sd_data(("%s: word read data=0x%08x\n",
1763 __FUNCTION__, *data));
1769 #if !defined(OOB_INTR_ONLY)
1770 /* bcmsdh_sdmmc interrupt handler */
1771 static void IRQHandler(struct sdio_func *func)
1775 sd = sdio_get_drvdata(func);
1778 sdio_release_host(sd->func[0]);
1780 if (sd->use_client_ints) {
1782 ASSERT(sd->intr_handler);
1783 ASSERT(sd->intr_handler_arg);
1784 (sd->intr_handler)(sd->intr_handler_arg);
1786 sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
1788 sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
1789 __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
1792 sdio_claim_host(sd->func[0]);
1795 /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
1796 static void IRQHandlerF2(struct sdio_func *func)
1798 sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
1800 #endif /* !defined(OOB_INTR_ONLY) */
1803 /* Write client card reg */
1805 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
1808 if ((func == 0) || (regsize == 1)) {
1812 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1813 sd_data(("%s: byte write data=0x%02x\n",
1814 __FUNCTION__, data));
1819 sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
1821 sd_data(("%s: word write data=0x%08x\n",
1822 __FUNCTION__, data));
1827 #endif /* NOTUSED */
1830 sdioh_start(sdioh_info_t *sd, int stage)
1835 sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
1839 /* Need to do this stages as we can't enable the interrupt till
1840 downloading of the firmware is complete, other wise polling
1841 sdio access will come in way
1845 /* Since the power to the chip is killed, we will have
1846 re enumerate the device again. Set the block size
1847 and enable the fucntion 1 for in preparation for
1848 downloading the code
1850 /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
1851 2.6.27. The implementation prior to that is buggy, and needs broadcom's
1854 if ((ret = sdio_reset_comm(sd->func[0]->card))) {
1855 sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
1860 sd->sd_blockmode = TRUE;
1861 sd->use_client_ints = TRUE;
1862 sd->client_block_size[0] = 64;
1865 /* Claim host controller */
1866 sdio_claim_host(sd->func[1]);
1868 sd->client_block_size[1] = 64;
1869 ret = sdio_set_block_size(sd->func[1], 64);
1871 sd_err(("bcmsdh_sdmmc: Failed to set F1 "
1872 "blocksize(%d)\n", ret));
1875 /* Release host controller F1 */
1876 sdio_release_host(sd->func[1]);
1880 /* Claim host controller F2 */
1881 sdio_claim_host(sd->func[2]);
1883 sd->client_block_size[2] = sd_f2_blocksize;
1884 printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
1885 ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
1887 sd_err(("bcmsdh_sdmmc: Failed to set F2 "
1888 "blocksize to %d(%d)\n", sd_f2_blocksize, ret));
1891 /* Release host controller F2 */
1892 sdio_release_host(sd->func[2]);
1895 sdioh_sdmmc_card_enablefuncs(sd);
1898 #if !defined(OOB_INTR_ONLY)
1899 sdio_claim_host(sd->func[0]);
1901 sdio_claim_irq(sd->func[2], IRQHandlerF2);
1903 sdio_claim_irq(sd->func[1], IRQHandler);
1904 sdio_release_host(sd->func[0]);
1905 #else /* defined(OOB_INTR_ONLY) */
1907 sdioh_enable_func_intr(sd);
1909 bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
1910 #endif /* !defined(OOB_INTR_ONLY) */
1914 sd_err(("%s Failed\n", __FUNCTION__));
1920 sdioh_stop(sdioh_info_t *sd)
1922 /* MSM7201A Android sdio stack has bug with interrupt
1923 So internaly within SDIO stack they are polling
1924 which cause issue when device is turned off. So
1925 unregister interrupt with SDIO stack to stop the
1929 #if !defined(OOB_INTR_ONLY)
1930 sdio_claim_host(sd->func[0]);
1932 sdio_release_irq(sd->func[1]);
1934 sdio_release_irq(sd->func[2]);
1935 sdio_release_host(sd->func[0]);
1936 #else /* defined(OOB_INTR_ONLY) */
1938 sdioh_disable_func_intr(sd);
1940 bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
1941 #endif /* !defined(OOB_INTR_ONLY) */
1944 sd_err(("%s Failed\n", __FUNCTION__));
1949 sdioh_waitlockfree(sdioh_info_t *sd)
1956 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1958 return SDIOH_API_RC_FAIL;
1962 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1964 return SDIOH_API_RC_FAIL;
1968 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1974 sdioh_gpio_init(sdioh_info_t *sd)
1976 return SDIOH_API_RC_FAIL;