d854bff037a248e2c26e1f5f0d1993e4ac11e767
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / core / mmc.c
1 /*
2  *  linux/drivers/mmc/core/mmc.c
3  *
4  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  *  Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/stat.h>
16 #include <linux/pm_runtime.h>
17
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/mmc.h>
21
22 #include "core.h"
23 #include "bus.h"
24 #include "mmc_ops.h"
25 #include "sd_ops.h"
26
27 static const unsigned int tran_exp[] = {
28         10000,          100000,         1000000,        10000000,
29         0,              0,              0,              0
30 };
31
32 static const unsigned char tran_mant[] = {
33         0,      10,     12,     13,     15,     20,     25,     30,
34         35,     40,     45,     50,     55,     60,     70,     80,
35 };
36
37 static const unsigned int tacc_exp[] = {
38         1,      10,     100,    1000,   10000,  100000, 1000000, 10000000,
39 };
40
41 static const unsigned int tacc_mant[] = {
42         0,      10,     12,     13,     15,     20,     25,     30,
43         35,     40,     45,     50,     55,     60,     70,     80,
44 };
45
46 #define UNSTUFF_BITS(resp,start,size)                                   \
47         ({                                                              \
48                 const int __size = size;                                \
49                 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
50                 const int __off = 3 - ((start) / 32);                   \
51                 const int __shft = (start) & 31;                        \
52                 u32 __res;                                              \
53                                                                         \
54                 __res = resp[__off] >> __shft;                          \
55                 if (__size + __shft > 32)                               \
56                         __res |= resp[__off-1] << ((32 - __shft) % 32); \
57                 __res & __mask;                                         \
58         })
59
60 /*
61  * Given the decoded CSD structure, decode the raw CID to our CID structure.
62  */
63 static int mmc_decode_cid(struct mmc_card *card)
64 {
65         u32 *resp = card->raw_cid;
66
67         /*
68          * The selection of the format here is based upon published
69          * specs from sandisk and from what people have reported.
70          */
71         switch (card->csd.mmca_vsn) {
72         case 0: /* MMC v1.0 - v1.2 */
73         case 1: /* MMC v1.4 */
74                 card->cid.manfid        = UNSTUFF_BITS(resp, 104, 24);
75                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
76                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
77                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
78                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
79                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
80                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
81                 card->cid.prod_name[6]  = UNSTUFF_BITS(resp, 48, 8);
82                 card->cid.hwrev         = UNSTUFF_BITS(resp, 44, 4);
83                 card->cid.fwrev         = UNSTUFF_BITS(resp, 40, 4);
84                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 24);
85                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
86                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
87                 break;
88
89         case 2: /* MMC v2.0 - v2.2 */
90         case 3: /* MMC v3.1 - v3.3 */
91         case 4: /* MMC v4 */
92                 card->cid.manfid        = UNSTUFF_BITS(resp, 120, 8);
93                 card->cid.oemid         = UNSTUFF_BITS(resp, 104, 16);
94                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
95                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
96                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
97                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
98                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
99                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
100                 card->cid.prv           = UNSTUFF_BITS(resp, 48, 8);
101                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 32);
102                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
103                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
104                 break;
105
106         default:
107                 pr_err("%s: card has unknown MMCA version %d\n",
108                         mmc_hostname(card->host), card->csd.mmca_vsn);
109                 return -EINVAL;
110         }
111
112         return 0;
113 }
114
115 static void mmc_set_erase_size(struct mmc_card *card)
116 {
117         if (card->ext_csd.erase_group_def & 1)
118                 card->erase_size = card->ext_csd.hc_erase_size;
119         else
120                 card->erase_size = card->csd.erase_size;
121
122         mmc_init_erase(card);
123 }
124
125 /*
126  * Given a 128-bit response, decode to our card CSD structure.
127  */
128 static int mmc_decode_csd(struct mmc_card *card)
129 {
130         struct mmc_csd *csd = &card->csd;
131         unsigned int e, m, a, b;
132         u32 *resp = card->raw_csd;
133
134         /*
135          * We only understand CSD structure v1.1 and v1.2.
136          * v1.2 has extra information in bits 15, 11 and 10.
137          * We also support eMMC v4.4 & v4.41.
138          */
139         csd->structure = UNSTUFF_BITS(resp, 126, 2);
140         if (csd->structure == 0) {
141                 pr_err("%s: unrecognised CSD structure version %d\n",
142                         mmc_hostname(card->host), csd->structure);
143                 return -EINVAL;
144         }
145
146         csd->mmca_vsn    = UNSTUFF_BITS(resp, 122, 4);
147         m = UNSTUFF_BITS(resp, 115, 4);
148         e = UNSTUFF_BITS(resp, 112, 3);
149         csd->tacc_ns     = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
150         csd->tacc_clks   = UNSTUFF_BITS(resp, 104, 8) * 100;
151
152         m = UNSTUFF_BITS(resp, 99, 4);
153         e = UNSTUFF_BITS(resp, 96, 3);
154         csd->max_dtr      = tran_exp[e] * tran_mant[m];
155         csd->cmdclass     = UNSTUFF_BITS(resp, 84, 12);
156
157         e = UNSTUFF_BITS(resp, 47, 3);
158         m = UNSTUFF_BITS(resp, 62, 12);
159         csd->capacity     = (1 + m) << (e + 2);
160
161         csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
162         csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
163         csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
164         csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
165         csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
166         csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
167         csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
168         csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
169
170         if (csd->write_blkbits >= 9) {
171                 a = UNSTUFF_BITS(resp, 42, 5);
172                 b = UNSTUFF_BITS(resp, 37, 5);
173                 csd->erase_size = (a + 1) * (b + 1);
174                 csd->erase_size <<= csd->write_blkbits - 9;
175         }
176
177         return 0;
178 }
179
180 static void mmc_select_card_type(struct mmc_card *card)
181 {
182         struct mmc_host *host = card->host;
183         u8 card_type = card->ext_csd.raw_card_type;
184         u32 caps = host->caps, caps2 = host->caps2;
185         unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
186         unsigned int avail_type = 0;
187
188         if (caps & MMC_CAP_MMC_HIGHSPEED &&
189             card_type & EXT_CSD_CARD_TYPE_HS_26) {
190                 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
191                 avail_type |= EXT_CSD_CARD_TYPE_HS_26;
192         }
193
194         if (caps & MMC_CAP_MMC_HIGHSPEED &&
195             card_type & EXT_CSD_CARD_TYPE_HS_52) {
196                 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
197                 avail_type |= EXT_CSD_CARD_TYPE_HS_52;
198         }
199
200         if (caps & MMC_CAP_1_8V_DDR &&
201             card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
202                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
203                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
204         }
205
206         if (caps & MMC_CAP_1_2V_DDR &&
207             card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
208                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
209                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
210         }
211
212         if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
213             card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
214                 hs200_max_dtr = MMC_HS200_MAX_DTR;
215                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
216         }
217
218         if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
219             card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
220                 hs200_max_dtr = MMC_HS200_MAX_DTR;
221                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
222         }
223
224         if (caps2 & MMC_CAP2_HS400_1_8V &&
225             card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
226                 hs200_max_dtr = MMC_HS200_MAX_DTR;
227                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
228         }
229
230         if (caps2 & MMC_CAP2_HS400_1_2V &&
231             card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
232                 hs200_max_dtr = MMC_HS200_MAX_DTR;
233                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
234         }
235
236         card->ext_csd.hs_max_dtr = hs_max_dtr;
237         card->ext_csd.hs200_max_dtr = hs200_max_dtr;
238         card->mmc_avail_type = avail_type;
239 }
240
241 static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
242 {
243         u8 hc_erase_grp_sz, hc_wp_grp_sz;
244
245         /*
246          * Disable these attributes by default
247          */
248         card->ext_csd.enhanced_area_offset = -EINVAL;
249         card->ext_csd.enhanced_area_size = -EINVAL;
250
251         /*
252          * Enhanced area feature support -- check whether the eMMC
253          * card has the Enhanced area enabled.  If so, export enhanced
254          * area offset and size to user by adding sysfs interface.
255          */
256         if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
257             (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
258                 if (card->ext_csd.partition_setting_completed) {
259                         hc_erase_grp_sz =
260                                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
261                         hc_wp_grp_sz =
262                                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
263
264                         /*
265                          * calculate the enhanced data area offset, in bytes
266                          */
267                         card->ext_csd.enhanced_area_offset =
268                                 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
269                                 (ext_csd[137] << 8) + ext_csd[136];
270                         if (mmc_card_blockaddr(card))
271                                 card->ext_csd.enhanced_area_offset <<= 9;
272                         /*
273                          * calculate the enhanced data area size, in kilobytes
274                          */
275                         card->ext_csd.enhanced_area_size =
276                                 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
277                                 ext_csd[140];
278                         card->ext_csd.enhanced_area_size *=
279                                 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
280                         card->ext_csd.enhanced_area_size <<= 9;
281                 } else {
282                         pr_warn("%s: defines enhanced area without partition setting complete\n",
283                                 mmc_hostname(card->host));
284                 }
285         }
286 }
287
288 static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
289 {
290         int idx;
291         u8 hc_erase_grp_sz, hc_wp_grp_sz;
292         unsigned int part_size;
293
294         /*
295          * General purpose partition feature support --
296          * If ext_csd has the size of general purpose partitions,
297          * set size, part_cfg, partition name in mmc_part.
298          */
299         if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
300             EXT_CSD_PART_SUPPORT_PART_EN) {
301                 hc_erase_grp_sz =
302                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
303                 hc_wp_grp_sz =
304                         ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
305
306                 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
307                         if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
308                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
309                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
310                                 continue;
311                         if (card->ext_csd.partition_setting_completed == 0) {
312                                 pr_warn("%s: has partition size defined without partition complete\n",
313                                         mmc_hostname(card->host));
314                                 break;
315                         }
316                         part_size =
317                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
318                                 << 16) +
319                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
320                                 << 8) +
321                                 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
322                         part_size *= (size_t)(hc_erase_grp_sz *
323                                 hc_wp_grp_sz);
324                         mmc_part_add(card, part_size << 19,
325                                 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
326                                 "gp%d", idx, false,
327                                 MMC_BLK_DATA_AREA_GP);
328                 }
329         }
330 }
331
332 /*
333  * Decode extended CSD.
334  */
335 static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
336 {
337         int err = 0, idx;
338         unsigned int part_size;
339
340         /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
341         card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
342         if (card->csd.structure == 3) {
343                 if (card->ext_csd.raw_ext_csd_structure > 2) {
344                         pr_err("%s: unrecognised EXT_CSD structure "
345                                 "version %d\n", mmc_hostname(card->host),
346                                         card->ext_csd.raw_ext_csd_structure);
347                         err = -EINVAL;
348                         goto out;
349                 }
350         }
351
352         /*
353          * The EXT_CSD format is meant to be forward compatible. As long
354          * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
355          * are authorized, see JEDEC JESD84-B50 section B.8.
356          */
357         card->ext_csd.rev = ext_csd[EXT_CSD_REV];
358
359         card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
360         card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
361         card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
362         card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
363         if (card->ext_csd.rev >= 2) {
364                 card->ext_csd.sectors =
365                         ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
366                         ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
367                         ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
368                         ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
369
370                 /* Cards with density > 2GiB are sector addressed */
371                 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
372                         mmc_card_set_blockaddr(card);
373         }
374
375         card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
376         mmc_select_card_type(card);
377
378         card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
379         card->ext_csd.raw_erase_timeout_mult =
380                 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
381         card->ext_csd.raw_hc_erase_grp_size =
382                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
383         if (card->ext_csd.rev >= 3) {
384                 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
385                 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
386
387                 /* EXT_CSD value is in units of 10ms, but we store in ms */
388                 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
389
390                 /* Sleep / awake timeout in 100ns units */
391                 if (sa_shift > 0 && sa_shift <= 0x17)
392                         card->ext_csd.sa_timeout =
393                                         1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
394                 card->ext_csd.erase_group_def =
395                         ext_csd[EXT_CSD_ERASE_GROUP_DEF];
396                 card->ext_csd.hc_erase_timeout = 300 *
397                         ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
398                 card->ext_csd.hc_erase_size =
399                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
400
401                 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
402
403                 /*
404                  * There are two boot regions of equal size, defined in
405                  * multiples of 128K.
406                  */
407                 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
408                         for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
409                                 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
410                                 mmc_part_add(card, part_size,
411                                         EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
412                                         "boot%d", idx, true,
413                                         MMC_BLK_DATA_AREA_BOOT);
414                         }
415                 }
416         }
417
418         card->ext_csd.raw_hc_erase_gap_size =
419                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
420         card->ext_csd.raw_sec_trim_mult =
421                 ext_csd[EXT_CSD_SEC_TRIM_MULT];
422         card->ext_csd.raw_sec_erase_mult =
423                 ext_csd[EXT_CSD_SEC_ERASE_MULT];
424         card->ext_csd.raw_sec_feature_support =
425                 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
426         card->ext_csd.raw_trim_mult =
427                 ext_csd[EXT_CSD_TRIM_MULT];
428         card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
429         if (card->ext_csd.rev >= 4) {
430                 if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
431                     EXT_CSD_PART_SETTING_COMPLETED)
432                         card->ext_csd.partition_setting_completed = 1;
433                 else
434                         card->ext_csd.partition_setting_completed = 0;
435
436                 mmc_manage_enhanced_area(card, ext_csd);
437
438                 mmc_manage_gp_partitions(card, ext_csd);
439
440                 card->ext_csd.sec_trim_mult =
441                         ext_csd[EXT_CSD_SEC_TRIM_MULT];
442                 card->ext_csd.sec_erase_mult =
443                         ext_csd[EXT_CSD_SEC_ERASE_MULT];
444                 card->ext_csd.sec_feature_support =
445                         ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
446                 card->ext_csd.trim_timeout = 300 *
447                         ext_csd[EXT_CSD_TRIM_MULT];
448
449                 /*
450                  * Note that the call to mmc_part_add above defaults to read
451                  * only. If this default assumption is changed, the call must
452                  * take into account the value of boot_locked below.
453                  */
454                 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
455                 card->ext_csd.boot_ro_lockable = true;
456
457                 /* Save power class values */
458                 card->ext_csd.raw_pwr_cl_52_195 =
459                         ext_csd[EXT_CSD_PWR_CL_52_195];
460                 card->ext_csd.raw_pwr_cl_26_195 =
461                         ext_csd[EXT_CSD_PWR_CL_26_195];
462                 card->ext_csd.raw_pwr_cl_52_360 =
463                         ext_csd[EXT_CSD_PWR_CL_52_360];
464                 card->ext_csd.raw_pwr_cl_26_360 =
465                         ext_csd[EXT_CSD_PWR_CL_26_360];
466                 card->ext_csd.raw_pwr_cl_200_195 =
467                         ext_csd[EXT_CSD_PWR_CL_200_195];
468                 card->ext_csd.raw_pwr_cl_200_360 =
469                         ext_csd[EXT_CSD_PWR_CL_200_360];
470                 card->ext_csd.raw_pwr_cl_ddr_52_195 =
471                         ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
472                 card->ext_csd.raw_pwr_cl_ddr_52_360 =
473                         ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
474                 card->ext_csd.raw_pwr_cl_ddr_200_360 =
475                         ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
476         }
477
478         if (card->ext_csd.rev >= 5) {
479                 /* Adjust production date as per JEDEC JESD84-B451 */
480                 if (card->cid.year < 2010)
481                         card->cid.year += 16;
482
483                 /* check whether the eMMC card supports BKOPS */
484                 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
485                         card->ext_csd.bkops = 1;
486                         card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
487                         card->ext_csd.raw_bkops_status =
488                                 ext_csd[EXT_CSD_BKOPS_STATUS];
489                         if (!card->ext_csd.bkops_en)
490                                 pr_info("%s: BKOPS_EN bit is not set\n",
491                                         mmc_hostname(card->host));
492                 }
493
494                 /* check whether the eMMC card supports HPI */
495                 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
496                         card->ext_csd.hpi = 1;
497                         if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
498                                 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
499                         else
500                                 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
501                         /*
502                          * Indicate the maximum timeout to close
503                          * a command interrupted by HPI
504                          */
505                         card->ext_csd.out_of_int_time =
506                                 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
507                 }
508
509                 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
510                 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
511
512                 /*
513                  * RPMB regions are defined in multiples of 128K.
514                  */
515                 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
516                 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
517                         mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
518                                 EXT_CSD_PART_CONFIG_ACC_RPMB,
519                                 "rpmb", 0, false,
520                                 MMC_BLK_DATA_AREA_RPMB);
521                 }
522         }
523
524         card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
525         if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
526                 card->erased_byte = 0xFF;
527         else
528                 card->erased_byte = 0x0;
529
530         /* eMMC v4.5 or later */
531         if (card->ext_csd.rev >= 6) {
532                 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
533
534                 card->ext_csd.generic_cmd6_time = 10 *
535                         ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
536                 card->ext_csd.power_off_longtime = 10 *
537                         ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
538
539                 card->ext_csd.cache_size =
540                         ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
541                         ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
542                         ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
543                         ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
544
545                 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
546                         card->ext_csd.data_sector_size = 4096;
547                 else
548                         card->ext_csd.data_sector_size = 512;
549
550                 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
551                     (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
552                         card->ext_csd.data_tag_unit_size =
553                         ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
554                         (card->ext_csd.data_sector_size);
555                 } else {
556                         card->ext_csd.data_tag_unit_size = 0;
557                 }
558
559                 card->ext_csd.max_packed_writes =
560                         ext_csd[EXT_CSD_MAX_PACKED_WRITES];
561                 card->ext_csd.max_packed_reads =
562                         ext_csd[EXT_CSD_MAX_PACKED_READS];
563         } else {
564                 card->ext_csd.data_sector_size = 512;
565         }
566
567         /* eMMC v5 or later */
568         if (card->ext_csd.rev >= 7) {
569                 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
570                        MMC_FIRMWARE_LEN);
571                 card->ext_csd.ffu_capable =
572                         (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
573                         !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
574         }
575 out:
576         return err;
577 }
578
579 static int mmc_read_ext_csd(struct mmc_card *card)
580 {
581         u8 *ext_csd;
582         int err;
583
584         if (!mmc_can_ext_csd(card))
585                 return 0;
586
587         err = mmc_get_ext_csd(card, &ext_csd);
588         if (err) {
589                 /* If the host or the card can't do the switch,
590                  * fail more gracefully. */
591                 if ((err != -EINVAL)
592                  && (err != -ENOSYS)
593                  && (err != -EFAULT))
594                         return err;
595
596                 /*
597                  * High capacity cards should have this "magic" size
598                  * stored in their CSD.
599                  */
600                 if (card->csd.capacity == (4096 * 512)) {
601                         pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
602                                 mmc_hostname(card->host));
603                 } else {
604                         pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
605                                 mmc_hostname(card->host));
606                         err = 0;
607                 }
608
609                 return err;
610         }
611
612         err = mmc_decode_ext_csd(card, ext_csd);
613         kfree(ext_csd);
614         return err;
615 }
616
617 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
618 {
619         u8 *bw_ext_csd;
620         int err;
621
622         if (bus_width == MMC_BUS_WIDTH_1)
623                 return 0;
624
625         err = mmc_get_ext_csd(card, &bw_ext_csd);
626         if (err)
627                 return err;
628
629         /* only compare read only fields */
630         err = !((card->ext_csd.raw_partition_support ==
631                         bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
632                 (card->ext_csd.raw_erased_mem_count ==
633                         bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
634                 (card->ext_csd.rev ==
635                         bw_ext_csd[EXT_CSD_REV]) &&
636                 (card->ext_csd.raw_ext_csd_structure ==
637                         bw_ext_csd[EXT_CSD_STRUCTURE]) &&
638                 (card->ext_csd.raw_card_type ==
639                         bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
640                 (card->ext_csd.raw_s_a_timeout ==
641                         bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
642                 (card->ext_csd.raw_hc_erase_gap_size ==
643                         bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
644                 (card->ext_csd.raw_erase_timeout_mult ==
645                         bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
646                 (card->ext_csd.raw_hc_erase_grp_size ==
647                         bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
648                 (card->ext_csd.raw_sec_trim_mult ==
649                         bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
650                 (card->ext_csd.raw_sec_erase_mult ==
651                         bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
652                 (card->ext_csd.raw_sec_feature_support ==
653                         bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
654                 (card->ext_csd.raw_trim_mult ==
655                         bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
656                 (card->ext_csd.raw_sectors[0] ==
657                         bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
658                 (card->ext_csd.raw_sectors[1] ==
659                         bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
660                 (card->ext_csd.raw_sectors[2] ==
661                         bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
662                 (card->ext_csd.raw_sectors[3] ==
663                         bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
664                 (card->ext_csd.raw_pwr_cl_52_195 ==
665                         bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
666                 (card->ext_csd.raw_pwr_cl_26_195 ==
667                         bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
668                 (card->ext_csd.raw_pwr_cl_52_360 ==
669                         bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
670                 (card->ext_csd.raw_pwr_cl_26_360 ==
671                         bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
672                 (card->ext_csd.raw_pwr_cl_200_195 ==
673                         bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
674                 (card->ext_csd.raw_pwr_cl_200_360 ==
675                         bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
676                 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
677                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
678                 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
679                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
680                 (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
681                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
682
683         if (err)
684                 err = -EINVAL;
685
686         kfree(bw_ext_csd);
687         return err;
688 }
689
690 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
691         card->raw_cid[2], card->raw_cid[3]);
692 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
693         card->raw_csd[2], card->raw_csd[3]);
694 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
695 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
696 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
697 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
698 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
699 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
700 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
701 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
702 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
703 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
704 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
705                 card->ext_csd.enhanced_area_offset);
706 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
707 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
708 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
709
710 static ssize_t mmc_fwrev_show(struct device *dev,
711                               struct device_attribute *attr,
712                               char *buf)
713 {
714         struct mmc_card *card = mmc_dev_to_card(dev);
715
716         if (card->ext_csd.rev < 7) {
717                 return sprintf(buf, "0x%x\n", card->cid.fwrev);
718         } else {
719                 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
720                                card->ext_csd.fwrev);
721         }
722 }
723
724 static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
725
726 static struct attribute *mmc_std_attrs[] = {
727         &dev_attr_cid.attr,
728         &dev_attr_csd.attr,
729         &dev_attr_date.attr,
730         &dev_attr_erase_size.attr,
731         &dev_attr_preferred_erase_size.attr,
732         &dev_attr_fwrev.attr,
733         &dev_attr_ffu_capable.attr,
734         &dev_attr_hwrev.attr,
735         &dev_attr_manfid.attr,
736         &dev_attr_name.attr,
737         &dev_attr_oemid.attr,
738         &dev_attr_prv.attr,
739         &dev_attr_serial.attr,
740         &dev_attr_enhanced_area_offset.attr,
741         &dev_attr_enhanced_area_size.attr,
742         &dev_attr_raw_rpmb_size_mult.attr,
743         &dev_attr_rel_sectors.attr,
744         NULL,
745 };
746 ATTRIBUTE_GROUPS(mmc_std);
747
748 static struct device_type mmc_type = {
749         .groups = mmc_std_groups,
750 };
751
752 /*
753  * Select the PowerClass for the current bus width
754  * If power class is defined for 4/8 bit bus in the
755  * extended CSD register, select it by executing the
756  * mmc_switch command.
757  */
758 static int __mmc_select_powerclass(struct mmc_card *card,
759                                    unsigned int bus_width)
760 {
761         struct mmc_host *host = card->host;
762         struct mmc_ext_csd *ext_csd = &card->ext_csd;
763         unsigned int pwrclass_val = 0;
764         int err = 0;
765
766         switch (1 << host->ios.vdd) {
767         case MMC_VDD_165_195:
768                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
769                         pwrclass_val = ext_csd->raw_pwr_cl_26_195;
770                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
771                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
772                                 ext_csd->raw_pwr_cl_52_195 :
773                                 ext_csd->raw_pwr_cl_ddr_52_195;
774                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
775                         pwrclass_val = ext_csd->raw_pwr_cl_200_195;
776                 break;
777         case MMC_VDD_27_28:
778         case MMC_VDD_28_29:
779         case MMC_VDD_29_30:
780         case MMC_VDD_30_31:
781         case MMC_VDD_31_32:
782         case MMC_VDD_32_33:
783         case MMC_VDD_33_34:
784         case MMC_VDD_34_35:
785         case MMC_VDD_35_36:
786                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
787                         pwrclass_val = ext_csd->raw_pwr_cl_26_360;
788                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
789                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
790                                 ext_csd->raw_pwr_cl_52_360 :
791                                 ext_csd->raw_pwr_cl_ddr_52_360;
792                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
793                         pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
794                                 ext_csd->raw_pwr_cl_ddr_200_360 :
795                                 ext_csd->raw_pwr_cl_200_360;
796                 break;
797         default:
798                 pr_warn("%s: Voltage range not supported for power class\n",
799                         mmc_hostname(host));
800                 return -EINVAL;
801         }
802
803         if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
804                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
805                                 EXT_CSD_PWR_CL_8BIT_SHIFT;
806         else
807                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
808                                 EXT_CSD_PWR_CL_4BIT_SHIFT;
809
810         /* If the power class is different from the default value */
811         if (pwrclass_val > 0) {
812                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
813                                  EXT_CSD_POWER_CLASS,
814                                  pwrclass_val,
815                                  card->ext_csd.generic_cmd6_time);
816         }
817
818         return err;
819 }
820
821 static int mmc_select_powerclass(struct mmc_card *card)
822 {
823         struct mmc_host *host = card->host;
824         u32 bus_width, ext_csd_bits;
825         int err, ddr;
826
827         /* Power class selection is supported for versions >= 4.0 */
828         if (!mmc_can_ext_csd(card))
829                 return 0;
830
831         bus_width = host->ios.bus_width;
832         /* Power class values are defined only for 4/8 bit bus */
833         if (bus_width == MMC_BUS_WIDTH_1)
834                 return 0;
835
836         ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
837         if (ddr)
838                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
839                         EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
840         else
841                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
842                         EXT_CSD_BUS_WIDTH_8 :  EXT_CSD_BUS_WIDTH_4;
843
844         err = __mmc_select_powerclass(card, ext_csd_bits);
845         if (err)
846                 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
847                         mmc_hostname(host), 1 << bus_width, ddr);
848
849         return err;
850 }
851
852 /*
853  * Set the bus speed for the selected speed mode.
854  */
855 static void mmc_set_bus_speed(struct mmc_card *card)
856 {
857         unsigned int max_dtr = (unsigned int)-1;
858
859         if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
860              max_dtr > card->ext_csd.hs200_max_dtr)
861                 max_dtr = card->ext_csd.hs200_max_dtr;
862         else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
863                 max_dtr = card->ext_csd.hs_max_dtr;
864         else if (max_dtr > card->csd.max_dtr)
865                 max_dtr = card->csd.max_dtr;
866
867         mmc_set_clock(card->host, max_dtr);
868 }
869
870 /*
871  * Select the bus width amoung 4-bit and 8-bit(SDR).
872  * If the bus width is changed successfully, return the selected width value.
873  * Zero is returned instead of error value if the wide width is not supported.
874  */
875 static int mmc_select_bus_width(struct mmc_card *card)
876 {
877         static unsigned ext_csd_bits[] = {
878                 EXT_CSD_BUS_WIDTH_8,
879                 EXT_CSD_BUS_WIDTH_4,
880         };
881         static unsigned bus_widths[] = {
882                 MMC_BUS_WIDTH_8,
883                 MMC_BUS_WIDTH_4,
884         };
885         struct mmc_host *host = card->host;
886         unsigned idx, bus_width = 0;
887         int err = 0;
888
889         if (!mmc_can_ext_csd(card) ||
890             !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
891                 return 0;
892
893         idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
894
895         /*
896          * Unlike SD, MMC cards dont have a configuration register to notify
897          * supported bus width. So bus test command should be run to identify
898          * the supported bus width or compare the ext csd values of current
899          * bus width and ext csd values of 1 bit mode read earlier.
900          */
901         for (; idx < ARRAY_SIZE(bus_widths); idx++) {
902                 /*
903                  * Host is capable of 8bit transfer, then switch
904                  * the device to work in 8bit transfer mode. If the
905                  * mmc switch command returns error then switch to
906                  * 4bit transfer mode. On success set the corresponding
907                  * bus width on the host.
908                  */
909                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
910                                  EXT_CSD_BUS_WIDTH,
911                                  ext_csd_bits[idx],
912                                  card->ext_csd.generic_cmd6_time);
913                 if (err)
914                         continue;
915
916                 bus_width = bus_widths[idx];
917                 mmc_set_bus_width(host, bus_width);
918
919                 /*
920                  * If controller can't handle bus width test,
921                  * compare ext_csd previously read in 1 bit mode
922                  * against ext_csd at new bus width
923                  */
924                 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
925                         err = mmc_compare_ext_csds(card, bus_width);
926                 else
927                         err = mmc_bus_test(card, bus_width);
928
929                 if (!err) {
930                         err = bus_width;
931                         break;
932                 } else {
933                         pr_warn("%s: switch to bus width %d failed\n",
934                                 mmc_hostname(host), ext_csd_bits[idx]);
935                 }
936         }
937
938         return err;
939 }
940
941 /*
942  * Switch to the high-speed mode
943  */
944 static int mmc_select_hs(struct mmc_card *card)
945 {
946         int err;
947
948         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
949                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
950                            card->ext_csd.generic_cmd6_time,
951                            true, true, true);
952         if (!err)
953                 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
954
955         return err;
956 }
957
958 /*
959  * Activate wide bus and DDR if supported.
960  */
961 static int mmc_select_hs_ddr(struct mmc_card *card)
962 {
963         struct mmc_host *host = card->host;
964         u32 bus_width, ext_csd_bits;
965         int err = 0;
966
967         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
968                 return 0;
969
970         bus_width = host->ios.bus_width;
971         if (bus_width == MMC_BUS_WIDTH_1)
972                 return 0;
973
974         ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
975                 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
976
977         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
978                         EXT_CSD_BUS_WIDTH,
979                         ext_csd_bits,
980                         card->ext_csd.generic_cmd6_time);
981         if (err) {
982                 pr_err("%s: switch to bus width %d ddr failed\n",
983                         mmc_hostname(host), 1 << bus_width);
984                 return err;
985         }
986
987         /*
988          * eMMC cards can support 3.3V to 1.2V i/o (vccq)
989          * signaling.
990          *
991          * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
992          *
993          * 1.8V vccq at 3.3V core voltage (vcc) is not required
994          * in the JEDEC spec for DDR.
995          *
996          * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
997          * host controller can support this, like some of the SDHCI
998          * controller which connect to an eMMC device. Some of these
999          * host controller still needs to use 1.8v vccq for supporting
1000          * DDR mode.
1001          *
1002          * So the sequence will be:
1003          * if (host and device can both support 1.2v IO)
1004          *      use 1.2v IO;
1005          * else if (host and device can both support 1.8v IO)
1006          *      use 1.8v IO;
1007          * so if host and device can only support 3.3v IO, this is the
1008          * last choice.
1009          *
1010          * WARNING: eMMC rules are NOT the same as SD DDR
1011          */
1012         err = -EINVAL;
1013         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
1014                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1015
1016         if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1017                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1018
1019         /* make sure vccq is 3.3v after switching disaster */
1020         if (err)
1021                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1022
1023         if (!err)
1024                 mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1025
1026         return err;
1027 }
1028
1029 static int mmc_select_hs400(struct mmc_card *card)
1030 {
1031         struct mmc_host *host = card->host;
1032         int err = 0;
1033
1034         /*
1035          * HS400 mode requires 8-bit bus width
1036          */
1037         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1038               host->ios.bus_width == MMC_BUS_WIDTH_8))
1039                 return 0;
1040
1041         /*
1042          * Before switching to dual data rate operation for HS400,
1043          * it is required to convert from HS200 mode to HS mode.
1044          */
1045         mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1046         mmc_set_bus_speed(card);
1047
1048         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1049                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1050                            card->ext_csd.generic_cmd6_time,
1051                            true, true, true);
1052         if (err) {
1053                 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1054                         mmc_hostname(host), err);
1055                 return err;
1056         }
1057
1058         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1059                          EXT_CSD_BUS_WIDTH,
1060                          EXT_CSD_DDR_BUS_WIDTH_8,
1061                          card->ext_csd.generic_cmd6_time);
1062         if (err) {
1063                 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1064                         mmc_hostname(host), err);
1065                 return err;
1066         }
1067
1068         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1069                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400,
1070                            card->ext_csd.generic_cmd6_time,
1071                            true, true, true);
1072         if (err) {
1073                 pr_err("%s: switch to hs400 failed, err:%d\n",
1074                          mmc_hostname(host), err);
1075                 return err;
1076         }
1077
1078         mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1079         mmc_set_bus_speed(card);
1080
1081         return 0;
1082 }
1083
1084 /*
1085  * For device supporting HS200 mode, the following sequence
1086  * should be done before executing the tuning process.
1087  * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1088  * 2. switch to HS200 mode
1089  * 3. set the clock to > 52Mhz and <=200MHz
1090  */
1091 static int mmc_select_hs200(struct mmc_card *card)
1092 {
1093         struct mmc_host *host = card->host;
1094         int err = -EINVAL;
1095
1096         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1097                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1098
1099         if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1100                 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1101
1102         /* If fails try again during next card power cycle */
1103         if (err)
1104                 goto err;
1105
1106         /*
1107          * Set the bus width(4 or 8) with host's support and
1108          * switch to HS200 mode if bus width is set successfully.
1109          */
1110         err = mmc_select_bus_width(card);
1111         if (!IS_ERR_VALUE(err)) {
1112                 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1113                                    EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200,
1114                                    card->ext_csd.generic_cmd6_time,
1115                                    true, true, true);
1116                 if (!err)
1117                         mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1118         }
1119 err:
1120         return err;
1121 }
1122
1123 /*
1124  * Activate High Speed or HS200 mode if supported.
1125  */
1126 static int mmc_select_timing(struct mmc_card *card)
1127 {
1128         int err = 0;
1129
1130         if (!mmc_can_ext_csd(card))
1131                 goto bus_speed;
1132
1133         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
1134                 err = mmc_select_hs200(card);
1135         else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1136                 err = mmc_select_hs(card);
1137
1138         if (err && err != -EBADMSG)
1139                 return err;
1140
1141         if (err) {
1142                 pr_warn("%s: switch to %s failed\n",
1143                         mmc_card_hs(card) ? "high-speed" :
1144                         (mmc_card_hs200(card) ? "hs200" : ""),
1145                         mmc_hostname(card->host));
1146                 err = 0;
1147         }
1148
1149 bus_speed:
1150         /*
1151          * Set the bus speed to the selected bus timing.
1152          * If timing is not selected, backward compatible is the default.
1153          */
1154         mmc_set_bus_speed(card);
1155         return err;
1156 }
1157
1158 const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE] = {
1159         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
1160         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
1161         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
1162         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
1163         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
1164         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
1165         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
1166         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
1167 };
1168 EXPORT_SYMBOL(tuning_blk_pattern_4bit);
1169
1170 const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE] = {
1171         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
1172         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
1173         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
1174         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
1175         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
1176         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
1177         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
1178         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
1179         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
1180         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
1181         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
1182         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
1183         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
1184         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
1185         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
1186         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
1187 };
1188 EXPORT_SYMBOL(tuning_blk_pattern_8bit);
1189
1190 /*
1191  * Execute tuning sequence to seek the proper bus operating
1192  * conditions for HS200 and HS400, which sends CMD21 to the device.
1193  */
1194 static int mmc_hs200_tuning(struct mmc_card *card)
1195 {
1196         struct mmc_host *host = card->host;
1197         int err = 0;
1198
1199         /*
1200          * Timing should be adjusted to the HS400 target
1201          * operation frequency for tuning process
1202          */
1203         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1204             host->ios.bus_width == MMC_BUS_WIDTH_8)
1205                 if (host->ops->prepare_hs400_tuning)
1206                         host->ops->prepare_hs400_tuning(host, &host->ios);
1207
1208         if (host->ops->execute_tuning) {
1209                 mmc_host_clk_hold(host);
1210                 err = host->ops->execute_tuning(host,
1211                                 MMC_SEND_TUNING_BLOCK_HS200);
1212                 mmc_host_clk_release(host);
1213
1214                 if (err)
1215                         pr_err("%s: tuning execution failed\n",
1216                                 mmc_hostname(host));
1217         }
1218
1219         return err;
1220 }
1221
1222 /*
1223  * Handle the detection and initialisation of a card.
1224  *
1225  * In the case of a resume, "oldcard" will contain the card
1226  * we're trying to reinitialise.
1227  */
1228 static int mmc_init_card(struct mmc_host *host, u32 ocr,
1229         struct mmc_card *oldcard)
1230 {
1231         struct mmc_card *card;
1232         int err;
1233         u32 cid[4];
1234         u32 rocr;
1235
1236         BUG_ON(!host);
1237         WARN_ON(!host->claimed);
1238
1239         /* Set correct bus mode for MMC before attempting init */
1240         if (!mmc_host_is_spi(host))
1241                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1242
1243         /*
1244          * Since we're changing the OCR value, we seem to
1245          * need to tell some cards to go back to the idle
1246          * state.  We wait 1ms to give cards time to
1247          * respond.
1248          * mmc_go_idle is needed for eMMC that are asleep
1249          */
1250         mmc_go_idle(host);
1251
1252         /* The extra bit indicates that we support high capacity */
1253         err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1254         if (err)
1255                 goto err;
1256
1257         /*
1258          * For SPI, enable CRC as appropriate.
1259          */
1260         if (mmc_host_is_spi(host)) {
1261                 err = mmc_spi_set_crc(host, use_spi_crc);
1262                 if (err)
1263                         goto err;
1264         }
1265
1266         /*
1267          * Fetch CID from card.
1268          */
1269         if (mmc_host_is_spi(host))
1270                 err = mmc_send_cid(host, cid);
1271         else
1272                 err = mmc_all_send_cid(host, cid);
1273         if (err)
1274                 goto err;
1275
1276         if (oldcard) {
1277                 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1278                         err = -ENOENT;
1279                         goto err;
1280                 }
1281
1282                 card = oldcard;
1283         } else {
1284                 /*
1285                  * Allocate card structure.
1286                  */
1287                 card = mmc_alloc_card(host, &mmc_type);
1288                 if (IS_ERR(card)) {
1289                         err = PTR_ERR(card);
1290                         goto err;
1291                 }
1292
1293                 card->ocr = ocr;
1294                 card->type = MMC_TYPE_MMC;
1295                 card->rca = 1;
1296                 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1297         }
1298
1299         /*
1300          * Call the optional HC's init_card function to handle quirks.
1301          */
1302         if (host->ops->init_card)
1303                 host->ops->init_card(host, card);
1304
1305         /*
1306          * For native busses:  set card RCA and quit open drain mode.
1307          */
1308         if (!mmc_host_is_spi(host)) {
1309                 err = mmc_set_relative_addr(card);
1310                 if (err)
1311                         goto free_card;
1312
1313                 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1314         }
1315
1316         if (!oldcard) {
1317                 /*
1318                  * Fetch CSD from card.
1319                  */
1320                 err = mmc_send_csd(card, card->raw_csd);
1321                 if (err)
1322                         goto free_card;
1323
1324                 err = mmc_decode_csd(card);
1325                 if (err)
1326                         goto free_card;
1327                 err = mmc_decode_cid(card);
1328                 if (err)
1329                         goto free_card;
1330         }
1331
1332         /*
1333          * handling only for cards supporting DSR and hosts requesting
1334          * DSR configuration
1335          */
1336         if (card->csd.dsr_imp && host->dsr_req)
1337                 mmc_set_dsr(host);
1338
1339         /*
1340          * Select card, as all following commands rely on that.
1341          */
1342         if (!mmc_host_is_spi(host)) {
1343                 err = mmc_select_card(card);
1344                 if (err)
1345                         goto free_card;
1346         }
1347
1348         if (!oldcard) {
1349                 /* Read extended CSD. */
1350                 err = mmc_read_ext_csd(card);
1351                 if (err)
1352                         goto free_card;
1353
1354                 /* If doing byte addressing, check if required to do sector
1355                  * addressing.  Handle the case of <2GB cards needing sector
1356                  * addressing.  See section 8.1 JEDEC Standard JED84-A441;
1357                  * ocr register has bit 30 set for sector addressing.
1358                  */
1359                 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
1360                         mmc_card_set_blockaddr(card);
1361
1362                 /* Erase size depends on CSD and Extended CSD */
1363                 mmc_set_erase_size(card);
1364         }
1365
1366         /*
1367          * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
1368          * bit.  This bit will be lost every time after a reset or power off.
1369          */
1370         if (card->ext_csd.partition_setting_completed ||
1371             (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
1372                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1373                                  EXT_CSD_ERASE_GROUP_DEF, 1,
1374                                  card->ext_csd.generic_cmd6_time);
1375
1376                 if (err && err != -EBADMSG)
1377                         goto free_card;
1378
1379                 if (err) {
1380                         err = 0;
1381                         /*
1382                          * Just disable enhanced area off & sz
1383                          * will try to enable ERASE_GROUP_DEF
1384                          * during next time reinit
1385                          */
1386                         card->ext_csd.enhanced_area_offset = -EINVAL;
1387                         card->ext_csd.enhanced_area_size = -EINVAL;
1388                 } else {
1389                         card->ext_csd.erase_group_def = 1;
1390                         /*
1391                          * enable ERASE_GRP_DEF successfully.
1392                          * This will affect the erase size, so
1393                          * here need to reset erase size
1394                          */
1395                         mmc_set_erase_size(card);
1396                 }
1397         }
1398
1399         /*
1400          * Ensure eMMC user default partition is enabled
1401          */
1402         if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1403                 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1404                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1405                                  card->ext_csd.part_config,
1406                                  card->ext_csd.part_time);
1407                 if (err && err != -EBADMSG)
1408                         goto free_card;
1409         }
1410
1411         /*
1412          * Enable power_off_notification byte in the ext_csd register
1413          */
1414         if (card->ext_csd.rev >= 6) {
1415                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1416                                  EXT_CSD_POWER_OFF_NOTIFICATION,
1417                                  EXT_CSD_POWER_ON,
1418                                  card->ext_csd.generic_cmd6_time);
1419                 if (err && err != -EBADMSG)
1420                         goto free_card;
1421
1422                 /*
1423                  * The err can be -EBADMSG or 0,
1424                  * so check for success and update the flag
1425                  */
1426                 if (!err)
1427                         card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1428         }
1429
1430         /*
1431          * Select timing interface
1432          */
1433         err = mmc_select_timing(card);
1434         if (err)
1435                 goto free_card;
1436
1437         if (mmc_card_hs200(card)) {
1438                 err = mmc_hs200_tuning(card);
1439                 if (err)
1440                         goto free_card;
1441
1442                 err = mmc_select_hs400(card);
1443                 if (err)
1444                         goto free_card;
1445         } else if (mmc_card_hs(card)) {
1446                 /* Select the desired bus width optionally */
1447                 err = mmc_select_bus_width(card);
1448                 if (!IS_ERR_VALUE(err)) {
1449                         err = mmc_select_hs_ddr(card);
1450                         if (err)
1451                                 goto free_card;
1452                 }
1453         }
1454
1455         /*
1456          * Choose the power class with selected bus interface
1457          */
1458         mmc_select_powerclass(card);
1459
1460         /*
1461          * Enable HPI feature (if supported)
1462          */
1463         if (card->ext_csd.hpi) {
1464                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1465                                 EXT_CSD_HPI_MGMT, 1,
1466                                 card->ext_csd.generic_cmd6_time);
1467                 if (err && err != -EBADMSG)
1468                         goto free_card;
1469                 if (err) {
1470                         pr_warn("%s: Enabling HPI failed\n",
1471                                 mmc_hostname(card->host));
1472                         err = 0;
1473                 } else
1474                         card->ext_csd.hpi_en = 1;
1475         }
1476
1477         /*
1478          * If cache size is higher than 0, this indicates
1479          * the existence of cache and it can be turned on.
1480          */
1481         if (card->ext_csd.cache_size > 0) {
1482                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1483                                 EXT_CSD_CACHE_CTRL, 1,
1484                                 card->ext_csd.generic_cmd6_time);
1485                 if (err && err != -EBADMSG)
1486                         goto free_card;
1487
1488                 /*
1489                  * Only if no error, cache is turned on successfully.
1490                  */
1491                 if (err) {
1492                         pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1493                                 mmc_hostname(card->host), err);
1494                         card->ext_csd.cache_ctrl = 0;
1495                         err = 0;
1496                 } else {
1497                         card->ext_csd.cache_ctrl = 1;
1498                 }
1499         }
1500
1501         /*
1502          * The mandatory minimum values are defined for packed command.
1503          * read: 5, write: 3
1504          */
1505         if (card->ext_csd.max_packed_writes >= 3 &&
1506             card->ext_csd.max_packed_reads >= 5 &&
1507             host->caps2 & MMC_CAP2_PACKED_CMD) {
1508                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1509                                 EXT_CSD_EXP_EVENTS_CTRL,
1510                                 EXT_CSD_PACKED_EVENT_EN,
1511                                 card->ext_csd.generic_cmd6_time);
1512                 if (err && err != -EBADMSG)
1513                         goto free_card;
1514                 if (err) {
1515                         pr_warn("%s: Enabling packed event failed\n",
1516                                 mmc_hostname(card->host));
1517                         card->ext_csd.packed_event_en = 0;
1518                         err = 0;
1519                 } else {
1520                         card->ext_csd.packed_event_en = 1;
1521                 }
1522         }
1523
1524         if (!oldcard)
1525                 host->card = card;
1526
1527         return 0;
1528
1529 free_card:
1530         if (!oldcard)
1531                 mmc_remove_card(card);
1532 err:
1533         return err;
1534 }
1535
1536 static int mmc_can_sleep(struct mmc_card *card)
1537 {
1538         return (card && card->ext_csd.rev >= 3);
1539 }
1540
1541 static int mmc_sleep(struct mmc_host *host)
1542 {
1543         struct mmc_command cmd = {0};
1544         struct mmc_card *card = host->card;
1545         unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1546         int err;
1547
1548         err = mmc_deselect_cards(host);
1549         if (err)
1550                 return err;
1551
1552         cmd.opcode = MMC_SLEEP_AWAKE;
1553         cmd.arg = card->rca << 16;
1554         cmd.arg |= 1 << 15;
1555
1556         /*
1557          * If the max_busy_timeout of the host is specified, validate it against
1558          * the sleep cmd timeout. A failure means we need to prevent the host
1559          * from doing hw busy detection, which is done by converting to a R1
1560          * response instead of a R1B.
1561          */
1562         if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
1563                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1564         } else {
1565                 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1566                 cmd.busy_timeout = timeout_ms;
1567         }
1568
1569         err = mmc_wait_for_cmd(host, &cmd, 0);
1570         if (err)
1571                 return err;
1572
1573         /*
1574          * If the host does not wait while the card signals busy, then we will
1575          * will have to wait the sleep/awake timeout.  Note, we cannot use the
1576          * SEND_STATUS command to poll the status because that command (and most
1577          * others) is invalid while the card sleeps.
1578          */
1579         if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
1580                 mmc_delay(timeout_ms);
1581
1582         return err;
1583 }
1584
1585 static int mmc_can_poweroff_notify(const struct mmc_card *card)
1586 {
1587         return card &&
1588                 mmc_card_mmc(card) &&
1589                 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1590 }
1591
1592 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1593 {
1594         unsigned int timeout = card->ext_csd.generic_cmd6_time;
1595         int err;
1596
1597         /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1598         if (notify_type == EXT_CSD_POWER_OFF_LONG)
1599                 timeout = card->ext_csd.power_off_longtime;
1600
1601         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1602                         EXT_CSD_POWER_OFF_NOTIFICATION,
1603                         notify_type, timeout, true, false, false);
1604         if (err)
1605                 pr_err("%s: Power Off Notification timed out, %u\n",
1606                        mmc_hostname(card->host), timeout);
1607
1608         /* Disable the power off notification after the switch operation. */
1609         card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1610
1611         return err;
1612 }
1613
1614 /*
1615  * Host is being removed. Free up the current card.
1616  */
1617 static void mmc_remove(struct mmc_host *host)
1618 {
1619         BUG_ON(!host);
1620         BUG_ON(!host->card);
1621
1622         mmc_remove_card(host->card);
1623         host->card = NULL;
1624 }
1625
1626 /*
1627  * Card detection - card is alive.
1628  */
1629 static int mmc_alive(struct mmc_host *host)
1630 {
1631         return mmc_send_status(host->card, NULL);
1632 }
1633
1634 /*
1635  * Card detection callback from host.
1636  */
1637 static void mmc_detect(struct mmc_host *host)
1638 {
1639         int err;
1640
1641         BUG_ON(!host);
1642         BUG_ON(!host->card);
1643
1644         mmc_get_card(host->card);
1645
1646         /*
1647          * Just check if our card has been removed.
1648          */
1649         err = _mmc_detect_card_removed(host);
1650
1651         mmc_put_card(host->card);
1652
1653         if (err) {
1654                 mmc_remove(host);
1655
1656                 mmc_claim_host(host);
1657                 mmc_detach_bus(host);
1658                 mmc_power_off(host);
1659                 mmc_release_host(host);
1660         }
1661 }
1662
1663 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
1664 {
1665         int err = 0;
1666         unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
1667                                         EXT_CSD_POWER_OFF_LONG;
1668
1669         BUG_ON(!host);
1670         BUG_ON(!host->card);
1671
1672         mmc_claim_host(host);
1673
1674         if (mmc_card_suspended(host->card))
1675                 goto out;
1676
1677         if (mmc_card_doing_bkops(host->card)) {
1678                 err = mmc_stop_bkops(host->card);
1679                 if (err)
1680                         goto out;
1681         }
1682
1683         err = mmc_flush_cache(host->card);
1684         if (err)
1685                 goto out;
1686
1687         if (mmc_can_poweroff_notify(host->card) &&
1688                 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
1689                 err = mmc_poweroff_notify(host->card, notify_type);
1690         else if (mmc_can_sleep(host->card))
1691                 err = mmc_sleep(host);
1692         else if (!mmc_host_is_spi(host))
1693                 err = mmc_deselect_cards(host);
1694
1695         if (!err) {
1696                 mmc_power_off(host);
1697                 mmc_card_set_suspended(host->card);
1698         }
1699 out:
1700         mmc_release_host(host);
1701         return err;
1702 }
1703
1704 /*
1705  * Suspend callback
1706  */
1707 static int mmc_suspend(struct mmc_host *host)
1708 {
1709         int err;
1710
1711         err = _mmc_suspend(host, true);
1712         if (!err) {
1713                 pm_runtime_disable(&host->card->dev);
1714                 pm_runtime_set_suspended(&host->card->dev);
1715         }
1716
1717         return err;
1718 }
1719
1720 /*
1721  * This function tries to determine if the same card is still present
1722  * and, if so, restore all state to it.
1723  */
1724 static int _mmc_resume(struct mmc_host *host)
1725 {
1726         int err = 0;
1727
1728         BUG_ON(!host);
1729         BUG_ON(!host->card);
1730
1731         mmc_claim_host(host);
1732
1733         if (!mmc_card_suspended(host->card))
1734                 goto out;
1735
1736         mmc_power_up(host, host->card->ocr);
1737         err = mmc_init_card(host, host->card->ocr, host->card);
1738         mmc_card_clr_suspended(host->card);
1739
1740 out:
1741         mmc_release_host(host);
1742         return err;
1743 }
1744
1745 /*
1746  * Shutdown callback
1747  */
1748 static int mmc_shutdown(struct mmc_host *host)
1749 {
1750         int err = 0;
1751
1752         /*
1753          * In a specific case for poweroff notify, we need to resume the card
1754          * before we can shutdown it properly.
1755          */
1756         if (mmc_can_poweroff_notify(host->card) &&
1757                 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
1758                 err = _mmc_resume(host);
1759
1760         if (!err)
1761                 err = _mmc_suspend(host, false);
1762
1763         return err;
1764 }
1765
1766 /*
1767  * Callback for resume.
1768  */
1769 static int mmc_resume(struct mmc_host *host)
1770 {
1771         int err = 0;
1772
1773         if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
1774                 err = _mmc_resume(host);
1775                 pm_runtime_set_active(&host->card->dev);
1776                 pm_runtime_mark_last_busy(&host->card->dev);
1777         }
1778         pm_runtime_enable(&host->card->dev);
1779
1780         return err;
1781 }
1782
1783 /*
1784  * Callback for runtime_suspend.
1785  */
1786 static int mmc_runtime_suspend(struct mmc_host *host)
1787 {
1788         int err;
1789
1790         if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
1791                 return 0;
1792
1793         err = _mmc_suspend(host, true);
1794         if (err)
1795                 pr_err("%s: error %d doing aggessive suspend\n",
1796                         mmc_hostname(host), err);
1797
1798         return err;
1799 }
1800
1801 /*
1802  * Callback for runtime_resume.
1803  */
1804 static int mmc_runtime_resume(struct mmc_host *host)
1805 {
1806         int err;
1807
1808         if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
1809                 return 0;
1810
1811         err = _mmc_resume(host);
1812         if (err)
1813                 pr_err("%s: error %d doing aggessive resume\n",
1814                         mmc_hostname(host), err);
1815
1816         return 0;
1817 }
1818
1819 static int mmc_power_restore(struct mmc_host *host)
1820 {
1821         int ret;
1822
1823         mmc_claim_host(host);
1824         ret = mmc_init_card(host, host->card->ocr, host->card);
1825         mmc_release_host(host);
1826
1827         return ret;
1828 }
1829
1830 static const struct mmc_bus_ops mmc_ops = {
1831         .remove = mmc_remove,
1832         .detect = mmc_detect,
1833         .suspend = mmc_suspend,
1834         .resume = mmc_resume,
1835         .runtime_suspend = mmc_runtime_suspend,
1836         .runtime_resume = mmc_runtime_resume,
1837         .power_restore = mmc_power_restore,
1838         .alive = mmc_alive,
1839         .shutdown = mmc_shutdown,
1840 };
1841
1842 /*
1843  * Starting point for MMC card init.
1844  */
1845 int mmc_attach_mmc(struct mmc_host *host)
1846 {
1847         int err;
1848         u32 ocr, rocr;
1849
1850         BUG_ON(!host);
1851         WARN_ON(!host->claimed);
1852
1853         /* Set correct bus mode for MMC before attempting attach */
1854         if (!mmc_host_is_spi(host))
1855                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1856
1857         err = mmc_send_op_cond(host, 0, &ocr);
1858         if (err)
1859                 return err;
1860
1861         mmc_attach_bus(host, &mmc_ops);
1862         if (host->ocr_avail_mmc)
1863                 host->ocr_avail = host->ocr_avail_mmc;
1864
1865         /*
1866          * We need to get OCR a different way for SPI.
1867          */
1868         if (mmc_host_is_spi(host)) {
1869                 err = mmc_spi_read_ocr(host, 1, &ocr);
1870                 if (err)
1871                         goto err;
1872         }
1873
1874         rocr = mmc_select_voltage(host, ocr);
1875
1876         /*
1877          * Can we support the voltage of the card?
1878          */
1879         if (!rocr) {
1880                 err = -EINVAL;
1881                 goto err;
1882         }
1883
1884         /*
1885          * Detect and init the card.
1886          */
1887         err = mmc_init_card(host, rocr, NULL);
1888         if (err)
1889                 goto err;
1890
1891         mmc_release_host(host);
1892         err = mmc_add_card(host->card);
1893         mmc_claim_host(host);
1894         if (err)
1895                 goto remove_card;
1896
1897         return 0;
1898
1899 remove_card:
1900         mmc_release_host(host);
1901         mmc_remove_card(host->card);
1902         mmc_claim_host(host);
1903         host->card = NULL;
1904 err:
1905         mmc_detach_bus(host);
1906
1907         pr_err("%s: error %d whilst initialising MMC card\n",
1908                 mmc_hostname(host), err);
1909
1910         return err;
1911 }