}
return ERR_CONTINUE;
}
-
-static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+static int sdmmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
return err ? 0 : 1;
}
-static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+static int emmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ unsigned int from, nr, arg;
+ int err = 0;
+
+ if (!mmc_can_erase(card)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_trim(card))
+ arg = MMC_TRIM_ARG;
+ else
+ arg = MMC_ERASE_ARG;
+
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ 0);
+ if (err)
+ goto out;
+ }
+ err = mmc_erase(card, from, nr, arg);
+out:
+ blk_end_request(req, err, blk_rq_bytes(req));
+
+ return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+
+ if(HOST_IS_EMMC(card->host))
+ return emmc_blk_issue_discard_rq(mq, req);
+ else
+ return sdmmc_blk_issue_discard_rq(mq, req);
+}
+static int sdmmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct request *req)
{
struct mmc_blk_data *md = mq->data;
return err ? 0 : 1;
}
+static int emmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ unsigned int from, nr, arg;
+ int err = 0;
-static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+ if (!mmc_can_secure_erase_trim(card)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ arg = MMC_SECURE_TRIM1_ARG;
+ else
+ arg = MMC_SECURE_ERASE_ARG;
+
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_SECURE_TRIM1_ARG ?
+ INAND_CMD38_ARG_SECTRIM1 :
+ INAND_CMD38_ARG_SECERASE,
+ 0);
+ if (err)
+ goto out;
+ }
+ err = mmc_erase(card, from, nr, arg);
+ if (!err && arg == MMC_SECURE_TRIM1_ARG) {
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ INAND_CMD38_ARG_SECTRIM2,
+ 0);
+ if (err)
+ goto out;
+ }
+ err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+ }
+out:
+ blk_end_request(req, err, blk_rq_bytes(req));
+
+ return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+
+ if(HOST_IS_EMMC(card->host))
+ return emmc_blk_issue_secdiscard_rq(mq, req);
+ else
+ return sdmmc_blk_issue_secdiscard_rq(mq, req);
+}
+
+static int sdmmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
return 1;
}
+
+static int emmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ //struct mmc_blk_data *md = mq->data;
+
+ /*
+ * No-op, only service this because we need REQ_FUA for reliable
+ * writes.
+ */
+ blk_end_request_all(req, 0);
+
+ return 1;
+}
+
+static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+
+ if(HOST_IS_EMMC(card->host))
+ return emmc_blk_issue_flush(mq, req);
+ else
+ return sdmmc_blk_issue_flush(mq, req);
+}
/*
* Reformat current write as a reliable write, supporting
* both legacy and the enhanced reliable write MMC cards.
/*
* A block was successfully transferred.
*/
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0,
+ ret = blk_end_request(req, 0,
brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
if (status == MMC_BLK_SUCCESS && ret) {
/*
* The blk_end_request has returned non zero
* time, so we only reach here after trying to
* read a single sector.
*/
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO,
+ ret = blk_end_request(req, -EIO,
brq->data.blksz);
- spin_unlock_irq(&md->lock);
if (!ret)
goto start_new_req;
break;
blocks = mmc_sd_num_wr_blocks(card);
if (blocks != (u32)-1) {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, blocks << 9);
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, 0, blocks << 9);
}
} else {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
}
cmd_abort:
- spin_lock_irq(&md->lock);
while (ret)
- ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
start_new_req:
if (rqc) {