treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[linux-2.6-block.git] / drivers / mmc / core / mmc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/mmc/core/mmc.c
4  *
5  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
6  *  Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  */
9
10 #include <linux/err.h>
11 #include <linux/of.h>
12 #include <linux/slab.h>
13 #include <linux/stat.h>
14 #include <linux/pm_runtime.h>
15
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
19
20 #include "core.h"
21 #include "card.h"
22 #include "host.h"
23 #include "bus.h"
24 #include "mmc_ops.h"
25 #include "quirks.h"
26 #include "sd_ops.h"
27 #include "pwrseq.h"
28
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
30 #define MIN_CACHE_EN_TIMEOUT_MS 1600
31
32 static const unsigned int tran_exp[] = {
33         10000,          100000,         1000000,        10000000,
34         0,              0,              0,              0
35 };
36
37 static const unsigned char tran_mant[] = {
38         0,      10,     12,     13,     15,     20,     25,     30,
39         35,     40,     45,     50,     55,     60,     70,     80,
40 };
41
42 static const unsigned int taac_exp[] = {
43         1,      10,     100,    1000,   10000,  100000, 1000000, 10000000,
44 };
45
46 static const unsigned int taac_mant[] = {
47         0,      10,     12,     13,     15,     20,     25,     30,
48         35,     40,     45,     50,     55,     60,     70,     80,
49 };
50
51 #define UNSTUFF_BITS(resp,start,size)                                   \
52         ({                                                              \
53                 const int __size = size;                                \
54                 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
55                 const int __off = 3 - ((start) / 32);                   \
56                 const int __shft = (start) & 31;                        \
57                 u32 __res;                                              \
58                                                                         \
59                 __res = resp[__off] >> __shft;                          \
60                 if (__size + __shft > 32)                               \
61                         __res |= resp[__off-1] << ((32 - __shft) % 32); \
62                 __res & __mask;                                         \
63         })
64
65 /*
66  * Given the decoded CSD structure, decode the raw CID to our CID structure.
67  */
68 static int mmc_decode_cid(struct mmc_card *card)
69 {
70         u32 *resp = card->raw_cid;
71
72         /*
73          * The selection of the format here is based upon published
74          * specs from sandisk and from what people have reported.
75          */
76         switch (card->csd.mmca_vsn) {
77         case 0: /* MMC v1.0 - v1.2 */
78         case 1: /* MMC v1.4 */
79                 card->cid.manfid        = UNSTUFF_BITS(resp, 104, 24);
80                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
81                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
82                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
83                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
84                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
85                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
86                 card->cid.prod_name[6]  = UNSTUFF_BITS(resp, 48, 8);
87                 card->cid.hwrev         = UNSTUFF_BITS(resp, 44, 4);
88                 card->cid.fwrev         = UNSTUFF_BITS(resp, 40, 4);
89                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 24);
90                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
91                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
92                 break;
93
94         case 2: /* MMC v2.0 - v2.2 */
95         case 3: /* MMC v3.1 - v3.3 */
96         case 4: /* MMC v4 */
97                 card->cid.manfid        = UNSTUFF_BITS(resp, 120, 8);
98                 card->cid.oemid         = UNSTUFF_BITS(resp, 104, 16);
99                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
100                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
101                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
102                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
103                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
104                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
105                 card->cid.prv           = UNSTUFF_BITS(resp, 48, 8);
106                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 32);
107                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
108                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
109                 break;
110
111         default:
112                 pr_err("%s: card has unknown MMCA version %d\n",
113                         mmc_hostname(card->host), card->csd.mmca_vsn);
114                 return -EINVAL;
115         }
116
117         return 0;
118 }
119
120 static void mmc_set_erase_size(struct mmc_card *card)
121 {
122         if (card->ext_csd.erase_group_def & 1)
123                 card->erase_size = card->ext_csd.hc_erase_size;
124         else
125                 card->erase_size = card->csd.erase_size;
126
127         mmc_init_erase(card);
128 }
129
130 /*
131  * Given a 128-bit response, decode to our card CSD structure.
132  */
133 static int mmc_decode_csd(struct mmc_card *card)
134 {
135         struct mmc_csd *csd = &card->csd;
136         unsigned int e, m, a, b;
137         u32 *resp = card->raw_csd;
138
139         /*
140          * We only understand CSD structure v1.1 and v1.2.
141          * v1.2 has extra information in bits 15, 11 and 10.
142          * We also support eMMC v4.4 & v4.41.
143          */
144         csd->structure = UNSTUFF_BITS(resp, 126, 2);
145         if (csd->structure == 0) {
146                 pr_err("%s: unrecognised CSD structure version %d\n",
147                         mmc_hostname(card->host), csd->structure);
148                 return -EINVAL;
149         }
150
151         csd->mmca_vsn    = UNSTUFF_BITS(resp, 122, 4);
152         m = UNSTUFF_BITS(resp, 115, 4);
153         e = UNSTUFF_BITS(resp, 112, 3);
154         csd->taac_ns     = (taac_exp[e] * taac_mant[m] + 9) / 10;
155         csd->taac_clks   = UNSTUFF_BITS(resp, 104, 8) * 100;
156
157         m = UNSTUFF_BITS(resp, 99, 4);
158         e = UNSTUFF_BITS(resp, 96, 3);
159         csd->max_dtr      = tran_exp[e] * tran_mant[m];
160         csd->cmdclass     = UNSTUFF_BITS(resp, 84, 12);
161
162         e = UNSTUFF_BITS(resp, 47, 3);
163         m = UNSTUFF_BITS(resp, 62, 12);
164         csd->capacity     = (1 + m) << (e + 2);
165
166         csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
167         csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
168         csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
169         csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
170         csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
171         csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
172         csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
173         csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
174
175         if (csd->write_blkbits >= 9) {
176                 a = UNSTUFF_BITS(resp, 42, 5);
177                 b = UNSTUFF_BITS(resp, 37, 5);
178                 csd->erase_size = (a + 1) * (b + 1);
179                 csd->erase_size <<= csd->write_blkbits - 9;
180         }
181
182         return 0;
183 }
184
185 static void mmc_select_card_type(struct mmc_card *card)
186 {
187         struct mmc_host *host = card->host;
188         u8 card_type = card->ext_csd.raw_card_type;
189         u32 caps = host->caps, caps2 = host->caps2;
190         unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
191         unsigned int avail_type = 0;
192
193         if (caps & MMC_CAP_MMC_HIGHSPEED &&
194             card_type & EXT_CSD_CARD_TYPE_HS_26) {
195                 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
196                 avail_type |= EXT_CSD_CARD_TYPE_HS_26;
197         }
198
199         if (caps & MMC_CAP_MMC_HIGHSPEED &&
200             card_type & EXT_CSD_CARD_TYPE_HS_52) {
201                 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
202                 avail_type |= EXT_CSD_CARD_TYPE_HS_52;
203         }
204
205         if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
206             card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
207                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
208                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
209         }
210
211         if (caps & MMC_CAP_1_2V_DDR &&
212             card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
213                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
214                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
215         }
216
217         if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
218             card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
219                 hs200_max_dtr = MMC_HS200_MAX_DTR;
220                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
221         }
222
223         if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
224             card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
225                 hs200_max_dtr = MMC_HS200_MAX_DTR;
226                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
227         }
228
229         if (caps2 & MMC_CAP2_HS400_1_8V &&
230             card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
231                 hs200_max_dtr = MMC_HS200_MAX_DTR;
232                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
233         }
234
235         if (caps2 & MMC_CAP2_HS400_1_2V &&
236             card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
237                 hs200_max_dtr = MMC_HS200_MAX_DTR;
238                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
239         }
240
241         if ((caps2 & MMC_CAP2_HS400_ES) &&
242             card->ext_csd.strobe_support &&
243             (avail_type & EXT_CSD_CARD_TYPE_HS400))
244                 avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
245
246         card->ext_csd.hs_max_dtr = hs_max_dtr;
247         card->ext_csd.hs200_max_dtr = hs200_max_dtr;
248         card->mmc_avail_type = avail_type;
249 }
250
251 static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
252 {
253         u8 hc_erase_grp_sz, hc_wp_grp_sz;
254
255         /*
256          * Disable these attributes by default
257          */
258         card->ext_csd.enhanced_area_offset = -EINVAL;
259         card->ext_csd.enhanced_area_size = -EINVAL;
260
261         /*
262          * Enhanced area feature support -- check whether the eMMC
263          * card has the Enhanced area enabled.  If so, export enhanced
264          * area offset and size to user by adding sysfs interface.
265          */
266         if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
267             (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
268                 if (card->ext_csd.partition_setting_completed) {
269                         hc_erase_grp_sz =
270                                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
271                         hc_wp_grp_sz =
272                                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
273
274                         /*
275                          * calculate the enhanced data area offset, in bytes
276                          */
277                         card->ext_csd.enhanced_area_offset =
278                                 (((unsigned long long)ext_csd[139]) << 24) +
279                                 (((unsigned long long)ext_csd[138]) << 16) +
280                                 (((unsigned long long)ext_csd[137]) << 8) +
281                                 (((unsigned long long)ext_csd[136]));
282                         if (mmc_card_blockaddr(card))
283                                 card->ext_csd.enhanced_area_offset <<= 9;
284                         /*
285                          * calculate the enhanced data area size, in kilobytes
286                          */
287                         card->ext_csd.enhanced_area_size =
288                                 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
289                                 ext_csd[140];
290                         card->ext_csd.enhanced_area_size *=
291                                 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
292                         card->ext_csd.enhanced_area_size <<= 9;
293                 } else {
294                         pr_warn("%s: defines enhanced area without partition setting complete\n",
295                                 mmc_hostname(card->host));
296                 }
297         }
298 }
299
300 static void mmc_part_add(struct mmc_card *card, unsigned int size,
301                          unsigned int part_cfg, char *name, int idx, bool ro,
302                          int area_type)
303 {
304         card->part[card->nr_parts].size = size;
305         card->part[card->nr_parts].part_cfg = part_cfg;
306         sprintf(card->part[card->nr_parts].name, name, idx);
307         card->part[card->nr_parts].force_ro = ro;
308         card->part[card->nr_parts].area_type = area_type;
309         card->nr_parts++;
310 }
311
312 static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
313 {
314         int idx;
315         u8 hc_erase_grp_sz, hc_wp_grp_sz;
316         unsigned int part_size;
317
318         /*
319          * General purpose partition feature support --
320          * If ext_csd has the size of general purpose partitions,
321          * set size, part_cfg, partition name in mmc_part.
322          */
323         if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
324             EXT_CSD_PART_SUPPORT_PART_EN) {
325                 hc_erase_grp_sz =
326                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
327                 hc_wp_grp_sz =
328                         ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
329
330                 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
331                         if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
332                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
333                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
334                                 continue;
335                         if (card->ext_csd.partition_setting_completed == 0) {
336                                 pr_warn("%s: has partition size defined without partition complete\n",
337                                         mmc_hostname(card->host));
338                                 break;
339                         }
340                         part_size =
341                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
342                                 << 16) +
343                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
344                                 << 8) +
345                                 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
346                         part_size *= (size_t)(hc_erase_grp_sz *
347                                 hc_wp_grp_sz);
348                         mmc_part_add(card, part_size << 19,
349                                 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
350                                 "gp%d", idx, false,
351                                 MMC_BLK_DATA_AREA_GP);
352                 }
353         }
354 }
355
356 /* Minimum partition switch timeout in milliseconds */
357 #define MMC_MIN_PART_SWITCH_TIME        300
358
359 /*
360  * Decode extended CSD.
361  */
362 static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
363 {
364         int err = 0, idx;
365         unsigned int part_size;
366         struct device_node *np;
367         bool broken_hpi = false;
368
369         /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
370         card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
371         if (card->csd.structure == 3) {
372                 if (card->ext_csd.raw_ext_csd_structure > 2) {
373                         pr_err("%s: unrecognised EXT_CSD structure "
374                                 "version %d\n", mmc_hostname(card->host),
375                                         card->ext_csd.raw_ext_csd_structure);
376                         err = -EINVAL;
377                         goto out;
378                 }
379         }
380
381         np = mmc_of_find_child_device(card->host, 0);
382         if (np && of_device_is_compatible(np, "mmc-card"))
383                 broken_hpi = of_property_read_bool(np, "broken-hpi");
384         of_node_put(np);
385
386         /*
387          * The EXT_CSD format is meant to be forward compatible. As long
388          * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
389          * are authorized, see JEDEC JESD84-B50 section B.8.
390          */
391         card->ext_csd.rev = ext_csd[EXT_CSD_REV];
392
393         /* fixup device after ext_csd revision field is updated */
394         mmc_fixup_device(card, mmc_ext_csd_fixups);
395
396         card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
397         card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
398         card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
399         card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
400         if (card->ext_csd.rev >= 2) {
401                 card->ext_csd.sectors =
402                         ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
403                         ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
404                         ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
405                         ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
406
407                 /* Cards with density > 2GiB are sector addressed */
408                 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
409                         mmc_card_set_blockaddr(card);
410         }
411
412         card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
413         card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
414         mmc_select_card_type(card);
415
416         card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
417         card->ext_csd.raw_erase_timeout_mult =
418                 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
419         card->ext_csd.raw_hc_erase_grp_size =
420                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
421         if (card->ext_csd.rev >= 3) {
422                 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
423                 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
424
425                 /* EXT_CSD value is in units of 10ms, but we store in ms */
426                 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
427                 /* Some eMMC set the value too low so set a minimum */
428                 if (card->ext_csd.part_time &&
429                     card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
430                         card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
431
432                 /* Sleep / awake timeout in 100ns units */
433                 if (sa_shift > 0 && sa_shift <= 0x17)
434                         card->ext_csd.sa_timeout =
435                                         1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
436                 card->ext_csd.erase_group_def =
437                         ext_csd[EXT_CSD_ERASE_GROUP_DEF];
438                 card->ext_csd.hc_erase_timeout = 300 *
439                         ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
440                 card->ext_csd.hc_erase_size =
441                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
442
443                 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
444
445                 /*
446                  * There are two boot regions of equal size, defined in
447                  * multiples of 128K.
448                  */
449                 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
450                         for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
451                                 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
452                                 mmc_part_add(card, part_size,
453                                         EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
454                                         "boot%d", idx, true,
455                                         MMC_BLK_DATA_AREA_BOOT);
456                         }
457                 }
458         }
459
460         card->ext_csd.raw_hc_erase_gap_size =
461                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
462         card->ext_csd.raw_sec_trim_mult =
463                 ext_csd[EXT_CSD_SEC_TRIM_MULT];
464         card->ext_csd.raw_sec_erase_mult =
465                 ext_csd[EXT_CSD_SEC_ERASE_MULT];
466         card->ext_csd.raw_sec_feature_support =
467                 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
468         card->ext_csd.raw_trim_mult =
469                 ext_csd[EXT_CSD_TRIM_MULT];
470         card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
471         card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
472         if (card->ext_csd.rev >= 4) {
473                 if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
474                     EXT_CSD_PART_SETTING_COMPLETED)
475                         card->ext_csd.partition_setting_completed = 1;
476                 else
477                         card->ext_csd.partition_setting_completed = 0;
478
479                 mmc_manage_enhanced_area(card, ext_csd);
480
481                 mmc_manage_gp_partitions(card, ext_csd);
482
483                 card->ext_csd.sec_trim_mult =
484                         ext_csd[EXT_CSD_SEC_TRIM_MULT];
485                 card->ext_csd.sec_erase_mult =
486                         ext_csd[EXT_CSD_SEC_ERASE_MULT];
487                 card->ext_csd.sec_feature_support =
488                         ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
489                 card->ext_csd.trim_timeout = 300 *
490                         ext_csd[EXT_CSD_TRIM_MULT];
491
492                 /*
493                  * Note that the call to mmc_part_add above defaults to read
494                  * only. If this default assumption is changed, the call must
495                  * take into account the value of boot_locked below.
496                  */
497                 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
498                 card->ext_csd.boot_ro_lockable = true;
499
500                 /* Save power class values */
501                 card->ext_csd.raw_pwr_cl_52_195 =
502                         ext_csd[EXT_CSD_PWR_CL_52_195];
503                 card->ext_csd.raw_pwr_cl_26_195 =
504                         ext_csd[EXT_CSD_PWR_CL_26_195];
505                 card->ext_csd.raw_pwr_cl_52_360 =
506                         ext_csd[EXT_CSD_PWR_CL_52_360];
507                 card->ext_csd.raw_pwr_cl_26_360 =
508                         ext_csd[EXT_CSD_PWR_CL_26_360];
509                 card->ext_csd.raw_pwr_cl_200_195 =
510                         ext_csd[EXT_CSD_PWR_CL_200_195];
511                 card->ext_csd.raw_pwr_cl_200_360 =
512                         ext_csd[EXT_CSD_PWR_CL_200_360];
513                 card->ext_csd.raw_pwr_cl_ddr_52_195 =
514                         ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
515                 card->ext_csd.raw_pwr_cl_ddr_52_360 =
516                         ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
517                 card->ext_csd.raw_pwr_cl_ddr_200_360 =
518                         ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
519         }
520
521         if (card->ext_csd.rev >= 5) {
522                 /* Adjust production date as per JEDEC JESD84-B451 */
523                 if (card->cid.year < 2010)
524                         card->cid.year += 16;
525
526                 /* check whether the eMMC card supports BKOPS */
527                 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
528                         card->ext_csd.bkops = 1;
529                         card->ext_csd.man_bkops_en =
530                                         (ext_csd[EXT_CSD_BKOPS_EN] &
531                                                 EXT_CSD_MANUAL_BKOPS_MASK);
532                         card->ext_csd.raw_bkops_status =
533                                 ext_csd[EXT_CSD_BKOPS_STATUS];
534                         if (card->ext_csd.man_bkops_en)
535                                 pr_debug("%s: MAN_BKOPS_EN bit is set\n",
536                                         mmc_hostname(card->host));
537                         card->ext_csd.auto_bkops_en =
538                                         (ext_csd[EXT_CSD_BKOPS_EN] &
539                                                 EXT_CSD_AUTO_BKOPS_MASK);
540                         if (card->ext_csd.auto_bkops_en)
541                                 pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
542                                         mmc_hostname(card->host));
543                 }
544
545                 /* check whether the eMMC card supports HPI */
546                 if (!mmc_card_broken_hpi(card) &&
547                     !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
548                         card->ext_csd.hpi = 1;
549                         if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
550                                 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
551                         else
552                                 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
553                         /*
554                          * Indicate the maximum timeout to close
555                          * a command interrupted by HPI
556                          */
557                         card->ext_csd.out_of_int_time =
558                                 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
559                 }
560
561                 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
562                 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
563
564                 /*
565                  * RPMB regions are defined in multiples of 128K.
566                  */
567                 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
568                 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
569                         mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
570                                 EXT_CSD_PART_CONFIG_ACC_RPMB,
571                                 "rpmb", 0, false,
572                                 MMC_BLK_DATA_AREA_RPMB);
573                 }
574         }
575
576         card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
577         if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
578                 card->erased_byte = 0xFF;
579         else
580                 card->erased_byte = 0x0;
581
582         /* eMMC v4.5 or later */
583         card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
584         if (card->ext_csd.rev >= 6) {
585                 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
586
587                 card->ext_csd.generic_cmd6_time = 10 *
588                         ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
589                 card->ext_csd.power_off_longtime = 10 *
590                         ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
591
592                 card->ext_csd.cache_size =
593                         ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
594                         ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
595                         ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
596                         ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
597
598                 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
599                         card->ext_csd.data_sector_size = 4096;
600                 else
601                         card->ext_csd.data_sector_size = 512;
602
603                 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
604                     (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
605                         card->ext_csd.data_tag_unit_size =
606                         ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
607                         (card->ext_csd.data_sector_size);
608                 } else {
609                         card->ext_csd.data_tag_unit_size = 0;
610                 }
611
612                 card->ext_csd.max_packed_writes =
613                         ext_csd[EXT_CSD_MAX_PACKED_WRITES];
614                 card->ext_csd.max_packed_reads =
615                         ext_csd[EXT_CSD_MAX_PACKED_READS];
616         } else {
617                 card->ext_csd.data_sector_size = 512;
618         }
619
620         /* eMMC v5 or later */
621         if (card->ext_csd.rev >= 7) {
622                 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
623                        MMC_FIRMWARE_LEN);
624                 card->ext_csd.ffu_capable =
625                         (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
626                         !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
627
628                 card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
629                 card->ext_csd.device_life_time_est_typ_a =
630                         ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
631                 card->ext_csd.device_life_time_est_typ_b =
632                         ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
633         }
634
635         /* eMMC v5.1 or later */
636         if (card->ext_csd.rev >= 8) {
637                 card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
638                                              EXT_CSD_CMDQ_SUPPORTED;
639                 card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
640                                             EXT_CSD_CMDQ_DEPTH_MASK) + 1;
641                 /* Exclude inefficiently small queue depths */
642                 if (card->ext_csd.cmdq_depth <= 2) {
643                         card->ext_csd.cmdq_support = false;
644                         card->ext_csd.cmdq_depth = 0;
645                 }
646                 if (card->ext_csd.cmdq_support) {
647                         pr_debug("%s: Command Queue supported depth %u\n",
648                                  mmc_hostname(card->host),
649                                  card->ext_csd.cmdq_depth);
650                 }
651         }
652 out:
653         return err;
654 }
655
656 static int mmc_read_ext_csd(struct mmc_card *card)
657 {
658         u8 *ext_csd;
659         int err;
660
661         if (!mmc_can_ext_csd(card))
662                 return 0;
663
664         err = mmc_get_ext_csd(card, &ext_csd);
665         if (err) {
666                 /* If the host or the card can't do the switch,
667                  * fail more gracefully. */
668                 if ((err != -EINVAL)
669                  && (err != -ENOSYS)
670                  && (err != -EFAULT))
671                         return err;
672
673                 /*
674                  * High capacity cards should have this "magic" size
675                  * stored in their CSD.
676                  */
677                 if (card->csd.capacity == (4096 * 512)) {
678                         pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
679                                 mmc_hostname(card->host));
680                 } else {
681                         pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
682                                 mmc_hostname(card->host));
683                         err = 0;
684                 }
685
686                 return err;
687         }
688
689         err = mmc_decode_ext_csd(card, ext_csd);
690         kfree(ext_csd);
691         return err;
692 }
693
694 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
695 {
696         u8 *bw_ext_csd;
697         int err;
698
699         if (bus_width == MMC_BUS_WIDTH_1)
700                 return 0;
701
702         err = mmc_get_ext_csd(card, &bw_ext_csd);
703         if (err)
704                 return err;
705
706         /* only compare read only fields */
707         err = !((card->ext_csd.raw_partition_support ==
708                         bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
709                 (card->ext_csd.raw_erased_mem_count ==
710                         bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
711                 (card->ext_csd.rev ==
712                         bw_ext_csd[EXT_CSD_REV]) &&
713                 (card->ext_csd.raw_ext_csd_structure ==
714                         bw_ext_csd[EXT_CSD_STRUCTURE]) &&
715                 (card->ext_csd.raw_card_type ==
716                         bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
717                 (card->ext_csd.raw_s_a_timeout ==
718                         bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
719                 (card->ext_csd.raw_hc_erase_gap_size ==
720                         bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
721                 (card->ext_csd.raw_erase_timeout_mult ==
722                         bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
723                 (card->ext_csd.raw_hc_erase_grp_size ==
724                         bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
725                 (card->ext_csd.raw_sec_trim_mult ==
726                         bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
727                 (card->ext_csd.raw_sec_erase_mult ==
728                         bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
729                 (card->ext_csd.raw_sec_feature_support ==
730                         bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
731                 (card->ext_csd.raw_trim_mult ==
732                         bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
733                 (card->ext_csd.raw_sectors[0] ==
734                         bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
735                 (card->ext_csd.raw_sectors[1] ==
736                         bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
737                 (card->ext_csd.raw_sectors[2] ==
738                         bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
739                 (card->ext_csd.raw_sectors[3] ==
740                         bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
741                 (card->ext_csd.raw_pwr_cl_52_195 ==
742                         bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
743                 (card->ext_csd.raw_pwr_cl_26_195 ==
744                         bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
745                 (card->ext_csd.raw_pwr_cl_52_360 ==
746                         bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
747                 (card->ext_csd.raw_pwr_cl_26_360 ==
748                         bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
749                 (card->ext_csd.raw_pwr_cl_200_195 ==
750                         bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
751                 (card->ext_csd.raw_pwr_cl_200_360 ==
752                         bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
753                 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
754                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
755                 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
756                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
757                 (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
758                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
759
760         if (err)
761                 err = -EINVAL;
762
763         kfree(bw_ext_csd);
764         return err;
765 }
766
767 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
768         card->raw_cid[2], card->raw_cid[3]);
769 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
770         card->raw_csd[2], card->raw_csd[3]);
771 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
772 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
773 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
774 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
775 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
776 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
777 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
778 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
779 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
780 MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
781 MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
782 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
783         card->ext_csd.device_life_time_est_typ_a,
784         card->ext_csd.device_life_time_est_typ_b);
785 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
786 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
787                 card->ext_csd.enhanced_area_offset);
788 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
789 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
790 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
791 MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
792 MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
793 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
794
795 static ssize_t mmc_fwrev_show(struct device *dev,
796                               struct device_attribute *attr,
797                               char *buf)
798 {
799         struct mmc_card *card = mmc_dev_to_card(dev);
800
801         if (card->ext_csd.rev < 7) {
802                 return sprintf(buf, "0x%x\n", card->cid.fwrev);
803         } else {
804                 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
805                                card->ext_csd.fwrev);
806         }
807 }
808
809 static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
810
811 static ssize_t mmc_dsr_show(struct device *dev,
812                             struct device_attribute *attr,
813                             char *buf)
814 {
815         struct mmc_card *card = mmc_dev_to_card(dev);
816         struct mmc_host *host = card->host;
817
818         if (card->csd.dsr_imp && host->dsr_req)
819                 return sprintf(buf, "0x%x\n", host->dsr);
820         else
821                 /* return default DSR value */
822                 return sprintf(buf, "0x%x\n", 0x404);
823 }
824
825 static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
826
827 static struct attribute *mmc_std_attrs[] = {
828         &dev_attr_cid.attr,
829         &dev_attr_csd.attr,
830         &dev_attr_date.attr,
831         &dev_attr_erase_size.attr,
832         &dev_attr_preferred_erase_size.attr,
833         &dev_attr_fwrev.attr,
834         &dev_attr_ffu_capable.attr,
835         &dev_attr_hwrev.attr,
836         &dev_attr_manfid.attr,
837         &dev_attr_name.attr,
838         &dev_attr_oemid.attr,
839         &dev_attr_prv.attr,
840         &dev_attr_rev.attr,
841         &dev_attr_pre_eol_info.attr,
842         &dev_attr_life_time.attr,
843         &dev_attr_serial.attr,
844         &dev_attr_enhanced_area_offset.attr,
845         &dev_attr_enhanced_area_size.attr,
846         &dev_attr_raw_rpmb_size_mult.attr,
847         &dev_attr_rel_sectors.attr,
848         &dev_attr_ocr.attr,
849         &dev_attr_rca.attr,
850         &dev_attr_dsr.attr,
851         &dev_attr_cmdq_en.attr,
852         NULL,
853 };
854 ATTRIBUTE_GROUPS(mmc_std);
855
856 static struct device_type mmc_type = {
857         .groups = mmc_std_groups,
858 };
859
860 /*
861  * Select the PowerClass for the current bus width
862  * If power class is defined for 4/8 bit bus in the
863  * extended CSD register, select it by executing the
864  * mmc_switch command.
865  */
866 static int __mmc_select_powerclass(struct mmc_card *card,
867                                    unsigned int bus_width)
868 {
869         struct mmc_host *host = card->host;
870         struct mmc_ext_csd *ext_csd = &card->ext_csd;
871         unsigned int pwrclass_val = 0;
872         int err = 0;
873
874         switch (1 << host->ios.vdd) {
875         case MMC_VDD_165_195:
876                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
877                         pwrclass_val = ext_csd->raw_pwr_cl_26_195;
878                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
879                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
880                                 ext_csd->raw_pwr_cl_52_195 :
881                                 ext_csd->raw_pwr_cl_ddr_52_195;
882                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
883                         pwrclass_val = ext_csd->raw_pwr_cl_200_195;
884                 break;
885         case MMC_VDD_27_28:
886         case MMC_VDD_28_29:
887         case MMC_VDD_29_30:
888         case MMC_VDD_30_31:
889         case MMC_VDD_31_32:
890         case MMC_VDD_32_33:
891         case MMC_VDD_33_34:
892         case MMC_VDD_34_35:
893         case MMC_VDD_35_36:
894                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
895                         pwrclass_val = ext_csd->raw_pwr_cl_26_360;
896                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
897                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
898                                 ext_csd->raw_pwr_cl_52_360 :
899                                 ext_csd->raw_pwr_cl_ddr_52_360;
900                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
901                         pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
902                                 ext_csd->raw_pwr_cl_ddr_200_360 :
903                                 ext_csd->raw_pwr_cl_200_360;
904                 break;
905         default:
906                 pr_warn("%s: Voltage range not supported for power class\n",
907                         mmc_hostname(host));
908                 return -EINVAL;
909         }
910
911         if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
912                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
913                                 EXT_CSD_PWR_CL_8BIT_SHIFT;
914         else
915                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
916                                 EXT_CSD_PWR_CL_4BIT_SHIFT;
917
918         /* If the power class is different from the default value */
919         if (pwrclass_val > 0) {
920                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
921                                  EXT_CSD_POWER_CLASS,
922                                  pwrclass_val,
923                                  card->ext_csd.generic_cmd6_time);
924         }
925
926         return err;
927 }
928
929 static int mmc_select_powerclass(struct mmc_card *card)
930 {
931         struct mmc_host *host = card->host;
932         u32 bus_width, ext_csd_bits;
933         int err, ddr;
934
935         /* Power class selection is supported for versions >= 4.0 */
936         if (!mmc_can_ext_csd(card))
937                 return 0;
938
939         bus_width = host->ios.bus_width;
940         /* Power class values are defined only for 4/8 bit bus */
941         if (bus_width == MMC_BUS_WIDTH_1)
942                 return 0;
943
944         ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
945         if (ddr)
946                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
947                         EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
948         else
949                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
950                         EXT_CSD_BUS_WIDTH_8 :  EXT_CSD_BUS_WIDTH_4;
951
952         err = __mmc_select_powerclass(card, ext_csd_bits);
953         if (err)
954                 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
955                         mmc_hostname(host), 1 << bus_width, ddr);
956
957         return err;
958 }
959
960 /*
961  * Set the bus speed for the selected speed mode.
962  */
963 static void mmc_set_bus_speed(struct mmc_card *card)
964 {
965         unsigned int max_dtr = (unsigned int)-1;
966
967         if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
968              max_dtr > card->ext_csd.hs200_max_dtr)
969                 max_dtr = card->ext_csd.hs200_max_dtr;
970         else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
971                 max_dtr = card->ext_csd.hs_max_dtr;
972         else if (max_dtr > card->csd.max_dtr)
973                 max_dtr = card->csd.max_dtr;
974
975         mmc_set_clock(card->host, max_dtr);
976 }
977
978 /*
979  * Select the bus width amoung 4-bit and 8-bit(SDR).
980  * If the bus width is changed successfully, return the selected width value.
981  * Zero is returned instead of error value if the wide width is not supported.
982  */
983 static int mmc_select_bus_width(struct mmc_card *card)
984 {
985         static unsigned ext_csd_bits[] = {
986                 EXT_CSD_BUS_WIDTH_8,
987                 EXT_CSD_BUS_WIDTH_4,
988         };
989         static unsigned bus_widths[] = {
990                 MMC_BUS_WIDTH_8,
991                 MMC_BUS_WIDTH_4,
992         };
993         struct mmc_host *host = card->host;
994         unsigned idx, bus_width = 0;
995         int err = 0;
996
997         if (!mmc_can_ext_csd(card) ||
998             !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
999                 return 0;
1000
1001         idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
1002
1003         /*
1004          * Unlike SD, MMC cards dont have a configuration register to notify
1005          * supported bus width. So bus test command should be run to identify
1006          * the supported bus width or compare the ext csd values of current
1007          * bus width and ext csd values of 1 bit mode read earlier.
1008          */
1009         for (; idx < ARRAY_SIZE(bus_widths); idx++) {
1010                 /*
1011                  * Host is capable of 8bit transfer, then switch
1012                  * the device to work in 8bit transfer mode. If the
1013                  * mmc switch command returns error then switch to
1014                  * 4bit transfer mode. On success set the corresponding
1015                  * bus width on the host.
1016                  */
1017                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1018                                  EXT_CSD_BUS_WIDTH,
1019                                  ext_csd_bits[idx],
1020                                  card->ext_csd.generic_cmd6_time);
1021                 if (err)
1022                         continue;
1023
1024                 bus_width = bus_widths[idx];
1025                 mmc_set_bus_width(host, bus_width);
1026
1027                 /*
1028                  * If controller can't handle bus width test,
1029                  * compare ext_csd previously read in 1 bit mode
1030                  * against ext_csd at new bus width
1031                  */
1032                 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
1033                         err = mmc_compare_ext_csds(card, bus_width);
1034                 else
1035                         err = mmc_bus_test(card, bus_width);
1036
1037                 if (!err) {
1038                         err = bus_width;
1039                         break;
1040                 } else {
1041                         pr_warn("%s: switch to bus width %d failed\n",
1042                                 mmc_hostname(host), 1 << bus_width);
1043                 }
1044         }
1045
1046         return err;
1047 }
1048
1049 /*
1050  * Switch to the high-speed mode
1051  */
1052 static int mmc_select_hs(struct mmc_card *card)
1053 {
1054         int err;
1055
1056         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1057                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1058                            card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
1059                            true, true, true);
1060         if (err)
1061                 pr_warn("%s: switch to high-speed failed, err:%d\n",
1062                         mmc_hostname(card->host), err);
1063
1064         return err;
1065 }
1066
1067 /*
1068  * Activate wide bus and DDR if supported.
1069  */
1070 static int mmc_select_hs_ddr(struct mmc_card *card)
1071 {
1072         struct mmc_host *host = card->host;
1073         u32 bus_width, ext_csd_bits;
1074         int err = 0;
1075
1076         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
1077                 return 0;
1078
1079         bus_width = host->ios.bus_width;
1080         if (bus_width == MMC_BUS_WIDTH_1)
1081                 return 0;
1082
1083         ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1084                 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
1085
1086         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1087                            EXT_CSD_BUS_WIDTH,
1088                            ext_csd_bits,
1089                            card->ext_csd.generic_cmd6_time,
1090                            MMC_TIMING_MMC_DDR52,
1091                            true, true, true);
1092         if (err) {
1093                 pr_err("%s: switch to bus width %d ddr failed\n",
1094                         mmc_hostname(host), 1 << bus_width);
1095                 return err;
1096         }
1097
1098         /*
1099          * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1100          * signaling.
1101          *
1102          * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1103          *
1104          * 1.8V vccq at 3.3V core voltage (vcc) is not required
1105          * in the JEDEC spec for DDR.
1106          *
1107          * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1108          * host controller can support this, like some of the SDHCI
1109          * controller which connect to an eMMC device. Some of these
1110          * host controller still needs to use 1.8v vccq for supporting
1111          * DDR mode.
1112          *
1113          * So the sequence will be:
1114          * if (host and device can both support 1.2v IO)
1115          *      use 1.2v IO;
1116          * else if (host and device can both support 1.8v IO)
1117          *      use 1.8v IO;
1118          * so if host and device can only support 3.3v IO, this is the
1119          * last choice.
1120          *
1121          * WARNING: eMMC rules are NOT the same as SD DDR
1122          */
1123         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
1124                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1125                 if (!err)
1126                         return 0;
1127         }
1128
1129         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
1130             host->caps & MMC_CAP_1_8V_DDR)
1131                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1132
1133         /* make sure vccq is 3.3v after switching disaster */
1134         if (err)
1135                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1136
1137         return err;
1138 }
1139
1140 static int mmc_select_hs400(struct mmc_card *card)
1141 {
1142         struct mmc_host *host = card->host;
1143         unsigned int max_dtr;
1144         int err = 0;
1145         u8 val;
1146
1147         /*
1148          * HS400 mode requires 8-bit bus width
1149          */
1150         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1151               host->ios.bus_width == MMC_BUS_WIDTH_8))
1152                 return 0;
1153
1154         /* Switch card to HS mode */
1155         val = EXT_CSD_TIMING_HS;
1156         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1157                            EXT_CSD_HS_TIMING, val,
1158                            card->ext_csd.generic_cmd6_time, 0,
1159                            true, false, true);
1160         if (err) {
1161                 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1162                         mmc_hostname(host), err);
1163                 return err;
1164         }
1165
1166         /* Set host controller to HS timing */
1167         mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1168
1169         /* Prepare host to downgrade to HS timing */
1170         if (host->ops->hs400_downgrade)
1171                 host->ops->hs400_downgrade(host);
1172
1173         /* Reduce frequency to HS frequency */
1174         max_dtr = card->ext_csd.hs_max_dtr;
1175         mmc_set_clock(host, max_dtr);
1176
1177         err = mmc_switch_status(card);
1178         if (err)
1179                 goto out_err;
1180
1181         if (host->ops->hs400_prepare_ddr)
1182                 host->ops->hs400_prepare_ddr(host);
1183
1184         /* Switch card to DDR */
1185         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1186                          EXT_CSD_BUS_WIDTH,
1187                          EXT_CSD_DDR_BUS_WIDTH_8,
1188                          card->ext_csd.generic_cmd6_time);
1189         if (err) {
1190                 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1191                         mmc_hostname(host), err);
1192                 return err;
1193         }
1194
1195         /* Switch card to HS400 */
1196         val = EXT_CSD_TIMING_HS400 |
1197               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1198         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1199                            EXT_CSD_HS_TIMING, val,
1200                            card->ext_csd.generic_cmd6_time, 0,
1201                            true, false, true);
1202         if (err) {
1203                 pr_err("%s: switch to hs400 failed, err:%d\n",
1204                          mmc_hostname(host), err);
1205                 return err;
1206         }
1207
1208         /* Set host controller to HS400 timing and frequency */
1209         mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1210         mmc_set_bus_speed(card);
1211
1212         err = mmc_switch_status(card);
1213         if (err)
1214                 goto out_err;
1215
1216         if (host->ops->hs400_complete)
1217                 host->ops->hs400_complete(host);
1218
1219         return 0;
1220
1221 out_err:
1222         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1223                __func__, err);
1224         return err;
1225 }
1226
1227 int mmc_hs200_to_hs400(struct mmc_card *card)
1228 {
1229         return mmc_select_hs400(card);
1230 }
1231
1232 int mmc_hs400_to_hs200(struct mmc_card *card)
1233 {
1234         struct mmc_host *host = card->host;
1235         unsigned int max_dtr;
1236         int err;
1237         u8 val;
1238
1239         /* Reduce frequency to HS */
1240         max_dtr = card->ext_csd.hs_max_dtr;
1241         mmc_set_clock(host, max_dtr);
1242
1243         /* Switch HS400 to HS DDR */
1244         val = EXT_CSD_TIMING_HS;
1245         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
1246                            val, card->ext_csd.generic_cmd6_time, 0,
1247                            true, false, true);
1248         if (err)
1249                 goto out_err;
1250
1251         mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1252
1253         err = mmc_switch_status(card);
1254         if (err)
1255                 goto out_err;
1256
1257         /* Switch HS DDR to HS */
1258         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1259                            EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
1260                            0, true, false, true);
1261         if (err)
1262                 goto out_err;
1263
1264         mmc_set_timing(host, MMC_TIMING_MMC_HS);
1265
1266         if (host->ops->hs400_downgrade)
1267                 host->ops->hs400_downgrade(host);
1268
1269         err = mmc_switch_status(card);
1270         if (err)
1271                 goto out_err;
1272
1273         /* Switch HS to HS200 */
1274         val = EXT_CSD_TIMING_HS200 |
1275               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1276         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
1277                            val, card->ext_csd.generic_cmd6_time, 0,
1278                            true, false, true);
1279         if (err)
1280                 goto out_err;
1281
1282         mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1283
1284         /*
1285          * For HS200, CRC errors are not a reliable way to know the switch
1286          * failed. If there really is a problem, we would expect tuning will
1287          * fail and the result ends up the same.
1288          */
1289         err = __mmc_switch_status(card, false);
1290         if (err)
1291                 goto out_err;
1292
1293         mmc_set_bus_speed(card);
1294
1295         /* Prepare tuning for HS400 mode. */
1296         if (host->ops->prepare_hs400_tuning)
1297                 host->ops->prepare_hs400_tuning(host, &host->ios);
1298
1299         return 0;
1300
1301 out_err:
1302         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1303                __func__, err);
1304         return err;
1305 }
1306
1307 static void mmc_select_driver_type(struct mmc_card *card)
1308 {
1309         int card_drv_type, drive_strength, drv_type = 0;
1310         int fixed_drv_type = card->host->fixed_drv_type;
1311
1312         card_drv_type = card->ext_csd.raw_driver_strength |
1313                         mmc_driver_type_mask(0);
1314
1315         if (fixed_drv_type >= 0)
1316                 drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
1317                                  ? fixed_drv_type : 0;
1318         else
1319                 drive_strength = mmc_select_drive_strength(card,
1320                                                            card->ext_csd.hs200_max_dtr,
1321                                                            card_drv_type, &drv_type);
1322
1323         card->drive_strength = drive_strength;
1324
1325         if (drv_type)
1326                 mmc_set_driver_type(card->host, drv_type);
1327 }
1328
1329 static int mmc_select_hs400es(struct mmc_card *card)
1330 {
1331         struct mmc_host *host = card->host;
1332         int err = -EINVAL;
1333         u8 val;
1334
1335         if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
1336                 err = -ENOTSUPP;
1337                 goto out_err;
1338         }
1339
1340         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
1341                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1342
1343         if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
1344                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1345
1346         /* If fails try again during next card power cycle */
1347         if (err)
1348                 goto out_err;
1349
1350         err = mmc_select_bus_width(card);
1351         if (err != MMC_BUS_WIDTH_8) {
1352                 pr_err("%s: switch to 8bit bus width failed, err:%d\n",
1353                         mmc_hostname(host), err);
1354                 err = err < 0 ? err : -ENOTSUPP;
1355                 goto out_err;
1356         }
1357
1358         /* Switch card to HS mode */
1359         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1360                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1361                            card->ext_csd.generic_cmd6_time, 0,
1362                            true, false, true);
1363         if (err) {
1364                 pr_err("%s: switch to hs for hs400es failed, err:%d\n",
1365                         mmc_hostname(host), err);
1366                 goto out_err;
1367         }
1368
1369         mmc_set_timing(host, MMC_TIMING_MMC_HS);
1370         err = mmc_switch_status(card);
1371         if (err)
1372                 goto out_err;
1373
1374         mmc_set_clock(host, card->ext_csd.hs_max_dtr);
1375
1376         /* Switch card to DDR with strobe bit */
1377         val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
1378         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1379                          EXT_CSD_BUS_WIDTH,
1380                          val,
1381                          card->ext_csd.generic_cmd6_time);
1382         if (err) {
1383                 pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
1384                         mmc_hostname(host), err);
1385                 goto out_err;
1386         }
1387
1388         mmc_select_driver_type(card);
1389
1390         /* Switch card to HS400 */
1391         val = EXT_CSD_TIMING_HS400 |
1392               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1393         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1394                            EXT_CSD_HS_TIMING, val,
1395                            card->ext_csd.generic_cmd6_time, 0,
1396                            true, false, true);
1397         if (err) {
1398                 pr_err("%s: switch to hs400es failed, err:%d\n",
1399                         mmc_hostname(host), err);
1400                 goto out_err;
1401         }
1402
1403         /* Set host controller to HS400 timing and frequency */
1404         mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1405
1406         /* Controller enable enhanced strobe function */
1407         host->ios.enhanced_strobe = true;
1408         if (host->ops->hs400_enhanced_strobe)
1409                 host->ops->hs400_enhanced_strobe(host, &host->ios);
1410
1411         err = mmc_switch_status(card);
1412         if (err)
1413                 goto out_err;
1414
1415         return 0;
1416
1417 out_err:
1418         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1419                __func__, err);
1420         return err;
1421 }
1422
1423 /*
1424  * For device supporting HS200 mode, the following sequence
1425  * should be done before executing the tuning process.
1426  * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1427  * 2. switch to HS200 mode
1428  * 3. set the clock to > 52Mhz and <=200MHz
1429  */
1430 static int mmc_select_hs200(struct mmc_card *card)
1431 {
1432         struct mmc_host *host = card->host;
1433         unsigned int old_timing, old_signal_voltage;
1434         int err = -EINVAL;
1435         u8 val;
1436
1437         old_signal_voltage = host->ios.signal_voltage;
1438         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1439                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1440
1441         if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1442                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1443
1444         /* If fails try again during next card power cycle */
1445         if (err)
1446                 return err;
1447
1448         mmc_select_driver_type(card);
1449
1450         /*
1451          * Set the bus width(4 or 8) with host's support and
1452          * switch to HS200 mode if bus width is set successfully.
1453          */
1454         err = mmc_select_bus_width(card);
1455         if (err > 0) {
1456                 val = EXT_CSD_TIMING_HS200 |
1457                       card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1458                 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1459                                    EXT_CSD_HS_TIMING, val,
1460                                    card->ext_csd.generic_cmd6_time, 0,
1461                                    true, false, true);
1462                 if (err)
1463                         goto err;
1464                 old_timing = host->ios.timing;
1465                 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1466
1467                 /*
1468                  * For HS200, CRC errors are not a reliable way to know the
1469                  * switch failed. If there really is a problem, we would expect
1470                  * tuning will fail and the result ends up the same.
1471                  */
1472                 err = __mmc_switch_status(card, false);
1473
1474                 /*
1475                  * mmc_select_timing() assumes timing has not changed if
1476                  * it is a switch error.
1477                  */
1478                 if (err == -EBADMSG)
1479                         mmc_set_timing(host, old_timing);
1480         }
1481 err:
1482         if (err) {
1483                 /* fall back to the old signal voltage, if fails report error */
1484                 if (mmc_set_signal_voltage(host, old_signal_voltage))
1485                         err = -EIO;
1486
1487                 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1488                        __func__, err);
1489         }
1490         return err;
1491 }
1492
1493 /*
1494  * Activate High Speed, HS200 or HS400ES mode if supported.
1495  */
1496 static int mmc_select_timing(struct mmc_card *card)
1497 {
1498         int err = 0;
1499
1500         if (!mmc_can_ext_csd(card))
1501                 goto bus_speed;
1502
1503         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
1504                 err = mmc_select_hs400es(card);
1505         else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
1506                 err = mmc_select_hs200(card);
1507         else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1508                 err = mmc_select_hs(card);
1509
1510         if (err && err != -EBADMSG)
1511                 return err;
1512
1513 bus_speed:
1514         /*
1515          * Set the bus speed to the selected bus timing.
1516          * If timing is not selected, backward compatible is the default.
1517          */
1518         mmc_set_bus_speed(card);
1519         return 0;
1520 }
1521
1522 /*
1523  * Execute tuning sequence to seek the proper bus operating
1524  * conditions for HS200 and HS400, which sends CMD21 to the device.
1525  */
1526 static int mmc_hs200_tuning(struct mmc_card *card)
1527 {
1528         struct mmc_host *host = card->host;
1529
1530         /*
1531          * Timing should be adjusted to the HS400 target
1532          * operation frequency for tuning process
1533          */
1534         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1535             host->ios.bus_width == MMC_BUS_WIDTH_8)
1536                 if (host->ops->prepare_hs400_tuning)
1537                         host->ops->prepare_hs400_tuning(host, &host->ios);
1538
1539         return mmc_execute_tuning(card);
1540 }
1541
1542 /*
1543  * Handle the detection and initialisation of a card.
1544  *
1545  * In the case of a resume, "oldcard" will contain the card
1546  * we're trying to reinitialise.
1547  */
1548 static int mmc_init_card(struct mmc_host *host, u32 ocr,
1549         struct mmc_card *oldcard)
1550 {
1551         struct mmc_card *card;
1552         int err;
1553         u32 cid[4];
1554         u32 rocr;
1555
1556         WARN_ON(!host->claimed);
1557
1558         /* Set correct bus mode for MMC before attempting init */
1559         if (!mmc_host_is_spi(host))
1560                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1561
1562         /*
1563          * Since we're changing the OCR value, we seem to
1564          * need to tell some cards to go back to the idle
1565          * state.  We wait 1ms to give cards time to
1566          * respond.
1567          * mmc_go_idle is needed for eMMC that are asleep
1568          */
1569         mmc_go_idle(host);
1570
1571         /* The extra bit indicates that we support high capacity */
1572         err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1573         if (err)
1574                 goto err;
1575
1576         /*
1577          * For SPI, enable CRC as appropriate.
1578          */
1579         if (mmc_host_is_spi(host)) {
1580                 err = mmc_spi_set_crc(host, use_spi_crc);
1581                 if (err)
1582                         goto err;
1583         }
1584
1585         /*
1586          * Fetch CID from card.
1587          */
1588         err = mmc_send_cid(host, cid);
1589         if (err)
1590                 goto err;
1591
1592         if (oldcard) {
1593                 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1594                         pr_debug("%s: Perhaps the card was replaced\n",
1595                                 mmc_hostname(host));
1596                         err = -ENOENT;
1597                         goto err;
1598                 }
1599
1600                 card = oldcard;
1601         } else {
1602                 /*
1603                  * Allocate card structure.
1604                  */
1605                 card = mmc_alloc_card(host, &mmc_type);
1606                 if (IS_ERR(card)) {
1607                         err = PTR_ERR(card);
1608                         goto err;
1609                 }
1610
1611                 card->ocr = ocr;
1612                 card->type = MMC_TYPE_MMC;
1613                 card->rca = 1;
1614                 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1615         }
1616
1617         /*
1618          * Call the optional HC's init_card function to handle quirks.
1619          */
1620         if (host->ops->init_card)
1621                 host->ops->init_card(host, card);
1622
1623         /*
1624          * For native busses:  set card RCA and quit open drain mode.
1625          */
1626         if (!mmc_host_is_spi(host)) {
1627                 err = mmc_set_relative_addr(card);
1628                 if (err)
1629                         goto free_card;
1630
1631                 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1632         }
1633
1634         if (!oldcard) {
1635                 /*
1636                  * Fetch CSD from card.
1637                  */
1638                 err = mmc_send_csd(card, card->raw_csd);
1639                 if (err)
1640                         goto free_card;
1641
1642                 err = mmc_decode_csd(card);
1643                 if (err)
1644                         goto free_card;
1645                 err = mmc_decode_cid(card);
1646                 if (err)
1647                         goto free_card;
1648         }
1649
1650         /*
1651          * handling only for cards supporting DSR and hosts requesting
1652          * DSR configuration
1653          */
1654         if (card->csd.dsr_imp && host->dsr_req)
1655                 mmc_set_dsr(host);
1656
1657         /*
1658          * Select card, as all following commands rely on that.
1659          */
1660         if (!mmc_host_is_spi(host)) {
1661                 err = mmc_select_card(card);
1662                 if (err)
1663                         goto free_card;
1664         }
1665
1666         if (!oldcard) {
1667                 /* Read extended CSD. */
1668                 err = mmc_read_ext_csd(card);
1669                 if (err)
1670                         goto free_card;
1671
1672                 /*
1673                  * If doing byte addressing, check if required to do sector
1674                  * addressing.  Handle the case of <2GB cards needing sector
1675                  * addressing.  See section 8.1 JEDEC Standard JED84-A441;
1676                  * ocr register has bit 30 set for sector addressing.
1677                  */
1678                 if (rocr & BIT(30))
1679                         mmc_card_set_blockaddr(card);
1680
1681                 /* Erase size depends on CSD and Extended CSD */
1682                 mmc_set_erase_size(card);
1683         }
1684
1685         /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
1686         if (card->ext_csd.rev >= 3) {
1687                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1688                                  EXT_CSD_ERASE_GROUP_DEF, 1,
1689                                  card->ext_csd.generic_cmd6_time);
1690
1691                 if (err && err != -EBADMSG)
1692                         goto free_card;
1693
1694                 if (err) {
1695                         err = 0;
1696                         /*
1697                          * Just disable enhanced area off & sz
1698                          * will try to enable ERASE_GROUP_DEF
1699                          * during next time reinit
1700                          */
1701                         card->ext_csd.enhanced_area_offset = -EINVAL;
1702                         card->ext_csd.enhanced_area_size = -EINVAL;
1703                 } else {
1704                         card->ext_csd.erase_group_def = 1;
1705                         /*
1706                          * enable ERASE_GRP_DEF successfully.
1707                          * This will affect the erase size, so
1708                          * here need to reset erase size
1709                          */
1710                         mmc_set_erase_size(card);
1711                 }
1712         }
1713
1714         /*
1715          * Ensure eMMC user default partition is enabled
1716          */
1717         if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1718                 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1719                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1720                                  card->ext_csd.part_config,
1721                                  card->ext_csd.part_time);
1722                 if (err && err != -EBADMSG)
1723                         goto free_card;
1724         }
1725
1726         /*
1727          * Enable power_off_notification byte in the ext_csd register
1728          */
1729         if (card->ext_csd.rev >= 6) {
1730                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1731                                  EXT_CSD_POWER_OFF_NOTIFICATION,
1732                                  EXT_CSD_POWER_ON,
1733                                  card->ext_csd.generic_cmd6_time);
1734                 if (err && err != -EBADMSG)
1735                         goto free_card;
1736
1737                 /*
1738                  * The err can be -EBADMSG or 0,
1739                  * so check for success and update the flag
1740                  */
1741                 if (!err)
1742                         card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1743         }
1744
1745         /* set erase_arg */
1746         if (mmc_can_discard(card))
1747                 card->erase_arg = MMC_DISCARD_ARG;
1748         else if (mmc_can_trim(card))
1749                 card->erase_arg = MMC_TRIM_ARG;
1750         else
1751                 card->erase_arg = MMC_ERASE_ARG;
1752
1753         /*
1754          * Select timing interface
1755          */
1756         err = mmc_select_timing(card);
1757         if (err)
1758                 goto free_card;
1759
1760         if (mmc_card_hs200(card)) {
1761                 err = mmc_hs200_tuning(card);
1762                 if (err)
1763                         goto free_card;
1764
1765                 err = mmc_select_hs400(card);
1766                 if (err)
1767                         goto free_card;
1768         } else if (!mmc_card_hs400es(card)) {
1769                 /* Select the desired bus width optionally */
1770                 err = mmc_select_bus_width(card);
1771                 if (err > 0 && mmc_card_hs(card)) {
1772                         err = mmc_select_hs_ddr(card);
1773                         if (err)
1774                                 goto free_card;
1775                 }
1776         }
1777
1778         /*
1779          * Choose the power class with selected bus interface
1780          */
1781         mmc_select_powerclass(card);
1782
1783         /*
1784          * Enable HPI feature (if supported)
1785          */
1786         if (card->ext_csd.hpi) {
1787                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1788                                 EXT_CSD_HPI_MGMT, 1,
1789                                 card->ext_csd.generic_cmd6_time);
1790                 if (err && err != -EBADMSG)
1791                         goto free_card;
1792                 if (err) {
1793                         pr_warn("%s: Enabling HPI failed\n",
1794                                 mmc_hostname(card->host));
1795                         card->ext_csd.hpi_en = 0;
1796                         err = 0;
1797                 } else {
1798                         card->ext_csd.hpi_en = 1;
1799                 }
1800         }
1801
1802         /*
1803          * If cache size is higher than 0, this indicates the existence of cache
1804          * and it can be turned on. Note that some eMMCs from Micron has been
1805          * reported to need ~800 ms timeout, while enabling the cache after
1806          * sudden power failure tests. Let's extend the timeout to a minimum of
1807          * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
1808          */
1809         if (card->ext_csd.cache_size > 0) {
1810                 unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
1811
1812                 timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
1813                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1814                                 EXT_CSD_CACHE_CTRL, 1, timeout_ms);
1815                 if (err && err != -EBADMSG)
1816                         goto free_card;
1817
1818                 /*
1819                  * Only if no error, cache is turned on successfully.
1820                  */
1821                 if (err) {
1822                         pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1823                                 mmc_hostname(card->host), err);
1824                         card->ext_csd.cache_ctrl = 0;
1825                         err = 0;
1826                 } else {
1827                         card->ext_csd.cache_ctrl = 1;
1828                 }
1829         }
1830
1831         /*
1832          * Enable Command Queue if supported. Note that Packed Commands cannot
1833          * be used with Command Queue.
1834          */
1835         card->ext_csd.cmdq_en = false;
1836         if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
1837                 err = mmc_cmdq_enable(card);
1838                 if (err && err != -EBADMSG)
1839                         goto free_card;
1840                 if (err) {
1841                         pr_warn("%s: Enabling CMDQ failed\n",
1842                                 mmc_hostname(card->host));
1843                         card->ext_csd.cmdq_support = false;
1844                         card->ext_csd.cmdq_depth = 0;
1845                         err = 0;
1846                 }
1847         }
1848         /*
1849          * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
1850          * disabled for a time, so a flag is needed to indicate to re-enable the
1851          * Command Queue.
1852          */
1853         card->reenable_cmdq = card->ext_csd.cmdq_en;
1854
1855         if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
1856                 err = host->cqe_ops->cqe_enable(host, card);
1857                 if (err) {
1858                         pr_err("%s: Failed to enable CQE, error %d\n",
1859                                 mmc_hostname(host), err);
1860                 } else {
1861                         host->cqe_enabled = true;
1862                         pr_info("%s: Command Queue Engine enabled\n",
1863                                 mmc_hostname(host));
1864                 }
1865         }
1866
1867         if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
1868             host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1869                 pr_err("%s: Host failed to negotiate down from 3.3V\n",
1870                         mmc_hostname(host));
1871                 err = -EINVAL;
1872                 goto free_card;
1873         }
1874
1875         if (!oldcard)
1876                 host->card = card;
1877
1878         return 0;
1879
1880 free_card:
1881         if (!oldcard)
1882                 mmc_remove_card(card);
1883 err:
1884         return err;
1885 }
1886
1887 static int mmc_can_sleep(struct mmc_card *card)
1888 {
1889         return (card && card->ext_csd.rev >= 3);
1890 }
1891
1892 static int mmc_sleep(struct mmc_host *host)
1893 {
1894         struct mmc_command cmd = {};
1895         struct mmc_card *card = host->card;
1896         unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1897         int err;
1898
1899         /* Re-tuning can't be done once the card is deselected */
1900         mmc_retune_hold(host);
1901
1902         err = mmc_deselect_cards(host);
1903         if (err)
1904                 goto out_release;
1905
1906         cmd.opcode = MMC_SLEEP_AWAKE;
1907         cmd.arg = card->rca << 16;
1908         cmd.arg |= 1 << 15;
1909
1910         /*
1911          * If the max_busy_timeout of the host is specified, validate it against
1912          * the sleep cmd timeout. A failure means we need to prevent the host
1913          * from doing hw busy detection, which is done by converting to a R1
1914          * response instead of a R1B.
1915          */
1916         if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
1917                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1918         } else {
1919                 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1920                 cmd.busy_timeout = timeout_ms;
1921         }
1922
1923         err = mmc_wait_for_cmd(host, &cmd, 0);
1924         if (err)
1925                 goto out_release;
1926
1927         /*
1928          * If the host does not wait while the card signals busy, then we will
1929          * will have to wait the sleep/awake timeout.  Note, we cannot use the
1930          * SEND_STATUS command to poll the status because that command (and most
1931          * others) is invalid while the card sleeps.
1932          */
1933         if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
1934                 mmc_delay(timeout_ms);
1935
1936 out_release:
1937         mmc_retune_release(host);
1938         return err;
1939 }
1940
1941 static int mmc_can_poweroff_notify(const struct mmc_card *card)
1942 {
1943         return card &&
1944                 mmc_card_mmc(card) &&
1945                 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1946 }
1947
1948 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1949 {
1950         unsigned int timeout = card->ext_csd.generic_cmd6_time;
1951         int err;
1952
1953         /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1954         if (notify_type == EXT_CSD_POWER_OFF_LONG)
1955                 timeout = card->ext_csd.power_off_longtime;
1956
1957         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1958                         EXT_CSD_POWER_OFF_NOTIFICATION,
1959                         notify_type, timeout, 0, true, false, false);
1960         if (err)
1961                 pr_err("%s: Power Off Notification timed out, %u\n",
1962                        mmc_hostname(card->host), timeout);
1963
1964         /* Disable the power off notification after the switch operation. */
1965         card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1966
1967         return err;
1968 }
1969
1970 /*
1971  * Host is being removed. Free up the current card.
1972  */
1973 static void mmc_remove(struct mmc_host *host)
1974 {
1975         mmc_remove_card(host->card);
1976         host->card = NULL;
1977 }
1978
1979 /*
1980  * Card detection - card is alive.
1981  */
1982 static int mmc_alive(struct mmc_host *host)
1983 {
1984         return mmc_send_status(host->card, NULL);
1985 }
1986
1987 /*
1988  * Card detection callback from host.
1989  */
1990 static void mmc_detect(struct mmc_host *host)
1991 {
1992         int err;
1993
1994         mmc_get_card(host->card, NULL);
1995
1996         /*
1997          * Just check if our card has been removed.
1998          */
1999         err = _mmc_detect_card_removed(host);
2000
2001         mmc_put_card(host->card, NULL);
2002
2003         if (err) {
2004                 mmc_remove(host);
2005
2006                 mmc_claim_host(host);
2007                 mmc_detach_bus(host);
2008                 mmc_power_off(host);
2009                 mmc_release_host(host);
2010         }
2011 }
2012
2013 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
2014 {
2015         int err = 0;
2016         unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
2017                                         EXT_CSD_POWER_OFF_LONG;
2018
2019         mmc_claim_host(host);
2020
2021         if (mmc_card_suspended(host->card))
2022                 goto out;
2023
2024         err = mmc_flush_cache(host->card);
2025         if (err)
2026                 goto out;
2027
2028         if (mmc_can_poweroff_notify(host->card) &&
2029                 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
2030                 err = mmc_poweroff_notify(host->card, notify_type);
2031         else if (mmc_can_sleep(host->card))
2032                 err = mmc_sleep(host);
2033         else if (!mmc_host_is_spi(host))
2034                 err = mmc_deselect_cards(host);
2035
2036         if (!err) {
2037                 mmc_power_off(host);
2038                 mmc_card_set_suspended(host->card);
2039         }
2040 out:
2041         mmc_release_host(host);
2042         return err;
2043 }
2044
2045 /*
2046  * Suspend callback
2047  */
2048 static int mmc_suspend(struct mmc_host *host)
2049 {
2050         int err;
2051
2052         err = _mmc_suspend(host, true);
2053         if (!err) {
2054                 pm_runtime_disable(&host->card->dev);
2055                 pm_runtime_set_suspended(&host->card->dev);
2056         }
2057
2058         return err;
2059 }
2060
2061 /*
2062  * This function tries to determine if the same card is still present
2063  * and, if so, restore all state to it.
2064  */
2065 static int _mmc_resume(struct mmc_host *host)
2066 {
2067         int err = 0;
2068
2069         mmc_claim_host(host);
2070
2071         if (!mmc_card_suspended(host->card))
2072                 goto out;
2073
2074         mmc_power_up(host, host->card->ocr);
2075         err = mmc_init_card(host, host->card->ocr, host->card);
2076         mmc_card_clr_suspended(host->card);
2077
2078 out:
2079         mmc_release_host(host);
2080         return err;
2081 }
2082
2083 /*
2084  * Shutdown callback
2085  */
2086 static int mmc_shutdown(struct mmc_host *host)
2087 {
2088         int err = 0;
2089
2090         /*
2091          * In a specific case for poweroff notify, we need to resume the card
2092          * before we can shutdown it properly.
2093          */
2094         if (mmc_can_poweroff_notify(host->card) &&
2095                 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
2096                 err = _mmc_resume(host);
2097
2098         if (!err)
2099                 err = _mmc_suspend(host, false);
2100
2101         return err;
2102 }
2103
2104 /*
2105  * Callback for resume.
2106  */
2107 static int mmc_resume(struct mmc_host *host)
2108 {
2109         pm_runtime_enable(&host->card->dev);
2110         return 0;
2111 }
2112
2113 /*
2114  * Callback for runtime_suspend.
2115  */
2116 static int mmc_runtime_suspend(struct mmc_host *host)
2117 {
2118         int err;
2119
2120         if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
2121                 return 0;
2122
2123         err = _mmc_suspend(host, true);
2124         if (err)
2125                 pr_err("%s: error %d doing aggressive suspend\n",
2126                         mmc_hostname(host), err);
2127
2128         return err;
2129 }
2130
2131 /*
2132  * Callback for runtime_resume.
2133  */
2134 static int mmc_runtime_resume(struct mmc_host *host)
2135 {
2136         int err;
2137
2138         err = _mmc_resume(host);
2139         if (err && err != -ENOMEDIUM)
2140                 pr_err("%s: error %d doing runtime resume\n",
2141                         mmc_hostname(host), err);
2142
2143         return 0;
2144 }
2145
2146 static int mmc_can_reset(struct mmc_card *card)
2147 {
2148         u8 rst_n_function;
2149
2150         rst_n_function = card->ext_csd.rst_n_function;
2151         if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
2152                 return 0;
2153         return 1;
2154 }
2155
2156 static int _mmc_hw_reset(struct mmc_host *host)
2157 {
2158         struct mmc_card *card = host->card;
2159
2160         /*
2161          * In the case of recovery, we can't expect flushing the cache to work
2162          * always, but we have a go and ignore errors.
2163          */
2164         mmc_flush_cache(host->card);
2165
2166         if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
2167              mmc_can_reset(card)) {
2168                 /* If the card accept RST_n signal, send it. */
2169                 mmc_set_clock(host, host->f_init);
2170                 host->ops->hw_reset(host);
2171                 /* Set initial state and call mmc_set_ios */
2172                 mmc_set_initial_state(host);
2173         } else {
2174                 /* Do a brute force power cycle */
2175                 mmc_power_cycle(host, card->ocr);
2176                 mmc_pwrseq_reset(host);
2177         }
2178         return mmc_init_card(host, card->ocr, card);
2179 }
2180
2181 static const struct mmc_bus_ops mmc_ops = {
2182         .remove = mmc_remove,
2183         .detect = mmc_detect,
2184         .suspend = mmc_suspend,
2185         .resume = mmc_resume,
2186         .runtime_suspend = mmc_runtime_suspend,
2187         .runtime_resume = mmc_runtime_resume,
2188         .alive = mmc_alive,
2189         .shutdown = mmc_shutdown,
2190         .hw_reset = _mmc_hw_reset,
2191 };
2192
2193 /*
2194  * Starting point for MMC card init.
2195  */
2196 int mmc_attach_mmc(struct mmc_host *host)
2197 {
2198         int err;
2199         u32 ocr, rocr;
2200
2201         WARN_ON(!host->claimed);
2202
2203         /* Set correct bus mode for MMC before attempting attach */
2204         if (!mmc_host_is_spi(host))
2205                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
2206
2207         err = mmc_send_op_cond(host, 0, &ocr);
2208         if (err)
2209                 return err;
2210
2211         mmc_attach_bus(host, &mmc_ops);
2212         if (host->ocr_avail_mmc)
2213                 host->ocr_avail = host->ocr_avail_mmc;
2214
2215         /*
2216          * We need to get OCR a different way for SPI.
2217          */
2218         if (mmc_host_is_spi(host)) {
2219                 err = mmc_spi_read_ocr(host, 1, &ocr);
2220                 if (err)
2221                         goto err;
2222         }
2223
2224         rocr = mmc_select_voltage(host, ocr);
2225
2226         /*
2227          * Can we support the voltage of the card?
2228          */
2229         if (!rocr) {
2230                 err = -EINVAL;
2231                 goto err;
2232         }
2233
2234         /*
2235          * Detect and init the card.
2236          */
2237         err = mmc_init_card(host, rocr, NULL);
2238         if (err)
2239                 goto err;
2240
2241         mmc_release_host(host);
2242         err = mmc_add_card(host->card);
2243         if (err)
2244                 goto remove_card;
2245
2246         mmc_claim_host(host);
2247         return 0;
2248
2249 remove_card:
2250         mmc_remove_card(host->card);
2251         mmc_claim_host(host);
2252         host->card = NULL;
2253 err:
2254         mmc_detach_bus(host);
2255
2256         pr_err("%s: error %d whilst initialising MMC card\n",
2257                 mmc_hostname(host), err);
2258
2259         return err;
2260 }