Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Block driver for media (i.e., flash cards) | |
3 | * | |
4 | * Copyright 2002 Hewlett-Packard Company | |
979ce720 | 5 | * Copyright 2005-2008 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * Use consistent with the GNU GPL is permitted, | |
8 | * provided that this copyright notice is | |
9 | * preserved in its entirety in all copies and derived works. | |
10 | * | |
11 | * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | |
12 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | |
13 | * FITNESS FOR ANY PARTICULAR PURPOSE. | |
14 | * | |
15 | * Many thanks to Alessandro Rubini and Jonathan Corbet! | |
16 | * | |
17 | * Author: Andrew Christian | |
18 | * 28 May 2002 | |
19 | */ | |
20 | #include <linux/moduleparam.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/init.h> | |
23 | ||
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/fs.h> | |
5a0e3ad6 | 26 | #include <linux/slab.h> |
1da177e4 LT |
27 | #include <linux/errno.h> |
28 | #include <linux/hdreg.h> | |
29 | #include <linux/kdev_t.h> | |
30 | #include <linux/blkdev.h> | |
97548575 | 31 | #include <linux/cdev.h> |
a621aaed | 32 | #include <linux/mutex.h> |
ec5a19dd | 33 | #include <linux/scatterlist.h> |
a7bbb573 | 34 | #include <linux/string_helpers.h> |
cb87ea28 JC |
35 | #include <linux/delay.h> |
36 | #include <linux/capability.h> | |
37 | #include <linux/compat.h> | |
e94cfef6 | 38 | #include <linux/pm_runtime.h> |
b10fa99e | 39 | #include <linux/idr.h> |
627c3ccf | 40 | #include <linux/debugfs.h> |
1da177e4 | 41 | |
cb87ea28 | 42 | #include <linux/mmc/ioctl.h> |
1da177e4 | 43 | #include <linux/mmc/card.h> |
385e3227 | 44 | #include <linux/mmc/host.h> |
da7fbe58 PO |
45 | #include <linux/mmc/mmc.h> |
46 | #include <linux/mmc/sd.h> | |
1da177e4 | 47 | |
7c0f6ba6 | 48 | #include <linux/uaccess.h> |
1da177e4 | 49 | |
98ac2162 | 50 | #include "queue.h" |
48ab086d | 51 | #include "block.h" |
55244c56 | 52 | #include "core.h" |
4facdde1 | 53 | #include "card.h" |
93f1c150 | 54 | #include "crypto.h" |
5857b29b | 55 | #include "host.h" |
4facdde1 | 56 | #include "bus.h" |
55244c56 | 57 | #include "mmc_ops.h" |
28fc64af | 58 | #include "quirks.h" |
55244c56 | 59 | #include "sd_ops.h" |
1da177e4 | 60 | |
6b0b6285 | 61 | MODULE_ALIAS("mmc:block"); |
5e71b7a6 OJ |
62 | #ifdef MODULE_PARAM_PREFIX |
63 | #undef MODULE_PARAM_PREFIX | |
64 | #endif | |
65 | #define MODULE_PARAM_PREFIX "mmcblk." | |
66 | ||
6b7a363d AH |
67 | /* |
68 | * Set a 10 second timeout for polling write request busy state. Note, mmc core | |
69 | * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 | |
70 | * second software timer to timeout the whole request, so 10 seconds should be | |
71 | * ample. | |
72 | */ | |
73 | #define MMC_BLK_TIMEOUT_MS (10 * 1000) | |
775a9362 | 74 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) |
a0e95766 | 75 | #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) |
6a7a6b45 | 76 | |
d3df0465 | 77 | #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ |
ce39f9d1 | 78 | (rq_data_dir(req) == WRITE)) |
5e71b7a6 | 79 | static DEFINE_MUTEX(block_mutex); |
6b0b6285 | 80 | |
1da177e4 | 81 | /* |
5e71b7a6 OJ |
82 | * The defaults come from config options but can be overriden by module |
83 | * or bootarg options. | |
1da177e4 | 84 | */ |
5e71b7a6 | 85 | static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; |
1dff3144 | 86 | |
5e71b7a6 OJ |
87 | /* |
88 | * We've only got one major, so number of mmcblk devices is | |
a26eba61 | 89 | * limited to (1 << 20) / number of minors per device. It is also |
b10fa99e | 90 | * limited by the MAX_DEVICES below. |
5e71b7a6 OJ |
91 | */ |
92 | static int max_devices; | |
93 | ||
a26eba61 BH |
94 | #define MAX_DEVICES 256 |
95 | ||
b10fa99e | 96 | static DEFINE_IDA(mmc_blk_ida); |
97548575 | 97 | static DEFINE_IDA(mmc_rpmb_ida); |
1da177e4 | 98 | |
1da177e4 LT |
99 | /* |
100 | * There is one mmc_blk_data per slot. | |
101 | */ | |
102 | struct mmc_blk_data { | |
307d8e6f | 103 | struct device *parent; |
1da177e4 LT |
104 | struct gendisk *disk; |
105 | struct mmc_queue queue; | |
371a689f | 106 | struct list_head part; |
97548575 | 107 | struct list_head rpmbs; |
1da177e4 | 108 | |
d0c97cfb AW |
109 | unsigned int flags; |
110 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ | |
111 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ | |
112 | ||
1da177e4 | 113 | unsigned int usage; |
a6f6c96b | 114 | unsigned int read_only; |
371a689f | 115 | unsigned int part_type; |
67716327 AH |
116 | unsigned int reset_done; |
117 | #define MMC_BLK_READ BIT(0) | |
118 | #define MMC_BLK_WRITE BIT(1) | |
119 | #define MMC_BLK_DISCARD BIT(2) | |
120 | #define MMC_BLK_SECDISCARD BIT(3) | |
1e8e55b6 | 121 | #define MMC_BLK_CQE_RECOVERY BIT(4) |
371a689f AW |
122 | |
123 | /* | |
124 | * Only set in main mmc_blk_data associated | |
fc95e30b | 125 | * with mmc_card with dev_set_drvdata, and keeps |
371a689f AW |
126 | * track of the current selected device partition. |
127 | */ | |
128 | unsigned int part_curr; | |
129 | struct device_attribute force_ro; | |
add710ea JR |
130 | struct device_attribute power_ro_lock; |
131 | int area_type; | |
f9f0da98 AH |
132 | |
133 | /* debugfs files (only in main mmc_blk_data) */ | |
134 | struct dentry *status_dentry; | |
135 | struct dentry *ext_csd_dentry; | |
1da177e4 LT |
136 | }; |
137 | ||
97548575 LW |
138 | /* Device type for RPMB character devices */ |
139 | static dev_t mmc_rpmb_devt; | |
140 | ||
141 | /* Bus type for RPMB character devices */ | |
142 | static struct bus_type mmc_rpmb_bus_type = { | |
143 | .name = "mmc_rpmb", | |
144 | }; | |
145 | ||
146 | /** | |
147 | * struct mmc_rpmb_data - special RPMB device type for these areas | |
148 | * @dev: the device for the RPMB area | |
149 | * @chrdev: character device for the RPMB area | |
150 | * @id: unique device ID number | |
151 | * @part_index: partition index (0 on first) | |
152 | * @md: parent MMC block device | |
153 | * @node: list item, so we can put this device on a list | |
154 | */ | |
155 | struct mmc_rpmb_data { | |
156 | struct device dev; | |
157 | struct cdev chrdev; | |
158 | int id; | |
159 | unsigned int part_index; | |
160 | struct mmc_blk_data *md; | |
161 | struct list_head node; | |
162 | }; | |
163 | ||
a621aaed | 164 | static DEFINE_MUTEX(open_lock); |
1da177e4 | 165 | |
5e71b7a6 OJ |
166 | module_param(perdev_minors, int, 0444); |
167 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); | |
168 | ||
8d1e977d | 169 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
1f797edc | 170 | unsigned int part_type); |
511ce378 BW |
171 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
172 | struct mmc_card *card, | |
173 | int disable_multi, | |
174 | struct mmc_queue *mq); | |
175 | static void mmc_blk_hsq_req_done(struct mmc_request *mrq); | |
cdf8a6fb | 176 | |
1da177e4 LT |
177 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
178 | { | |
179 | struct mmc_blk_data *md; | |
180 | ||
a621aaed | 181 | mutex_lock(&open_lock); |
1da177e4 LT |
182 | md = disk->private_data; |
183 | if (md && md->usage == 0) | |
184 | md = NULL; | |
185 | if (md) | |
186 | md->usage++; | |
a621aaed | 187 | mutex_unlock(&open_lock); |
1da177e4 LT |
188 | |
189 | return md; | |
190 | } | |
191 | ||
371a689f AW |
192 | static inline int mmc_get_devidx(struct gendisk *disk) |
193 | { | |
382c55f8 | 194 | int devidx = disk->first_minor / perdev_minors; |
371a689f AW |
195 | return devidx; |
196 | } | |
197 | ||
1da177e4 LT |
198 | static void mmc_blk_put(struct mmc_blk_data *md) |
199 | { | |
a621aaed | 200 | mutex_lock(&open_lock); |
1da177e4 LT |
201 | md->usage--; |
202 | if (md->usage == 0) { | |
371a689f | 203 | int devidx = mmc_get_devidx(md->disk); |
41e3efd0 | 204 | blk_put_queue(md->queue.queue); |
a04848c7 | 205 | ida_simple_remove(&mmc_blk_ida, devidx); |
1da177e4 | 206 | put_disk(md->disk); |
1da177e4 LT |
207 | kfree(md); |
208 | } | |
a621aaed | 209 | mutex_unlock(&open_lock); |
1da177e4 LT |
210 | } |
211 | ||
add710ea JR |
212 | static ssize_t power_ro_lock_show(struct device *dev, |
213 | struct device_attribute *attr, char *buf) | |
214 | { | |
215 | int ret; | |
216 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
217 | struct mmc_card *card = md->queue.card; | |
218 | int locked = 0; | |
219 | ||
220 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) | |
221 | locked = 2; | |
222 | else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) | |
223 | locked = 1; | |
224 | ||
225 | ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); | |
226 | ||
9098f84c TW |
227 | mmc_blk_put(md); |
228 | ||
add710ea JR |
229 | return ret; |
230 | } | |
231 | ||
232 | static ssize_t power_ro_lock_store(struct device *dev, | |
233 | struct device_attribute *attr, const char *buf, size_t count) | |
234 | { | |
235 | int ret; | |
236 | struct mmc_blk_data *md, *part_md; | |
0493f6fe LW |
237 | struct mmc_queue *mq; |
238 | struct request *req; | |
add710ea JR |
239 | unsigned long set; |
240 | ||
241 | if (kstrtoul(buf, 0, &set)) | |
242 | return -EINVAL; | |
243 | ||
244 | if (set != 1) | |
245 | return count; | |
246 | ||
247 | md = mmc_blk_get(dev_to_disk(dev)); | |
0493f6fe | 248 | mq = &md->queue; |
add710ea | 249 | |
0493f6fe | 250 | /* Dispatch locking to the block layer */ |
ff005a06 | 251 | req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0); |
fb8e456e AH |
252 | if (IS_ERR(req)) { |
253 | count = PTR_ERR(req); | |
254 | goto out_put; | |
255 | } | |
0493f6fe | 256 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; |
684da762 | 257 | blk_execute_rq(NULL, req, 0); |
0493f6fe | 258 | ret = req_to_mmc_queue_req(req)->drv_op_result; |
34c089e8 | 259 | blk_put_request(req); |
add710ea JR |
260 | |
261 | if (!ret) { | |
262 | pr_info("%s: Locking boot partition ro until next power on\n", | |
263 | md->disk->disk_name); | |
264 | set_disk_ro(md->disk, 1); | |
265 | ||
266 | list_for_each_entry(part_md, &md->part, part) | |
267 | if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { | |
268 | pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); | |
269 | set_disk_ro(part_md->disk, 1); | |
270 | } | |
271 | } | |
fb8e456e | 272 | out_put: |
add710ea JR |
273 | mmc_blk_put(md); |
274 | return count; | |
275 | } | |
276 | ||
371a689f AW |
277 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
278 | char *buf) | |
279 | { | |
280 | int ret; | |
281 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
282 | ||
0031a98a | 283 | ret = snprintf(buf, PAGE_SIZE, "%d\n", |
371a689f AW |
284 | get_disk_ro(dev_to_disk(dev)) ^ |
285 | md->read_only); | |
286 | mmc_blk_put(md); | |
287 | return ret; | |
288 | } | |
289 | ||
290 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, | |
291 | const char *buf, size_t count) | |
292 | { | |
293 | int ret; | |
294 | char *end; | |
295 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
296 | unsigned long set = simple_strtoul(buf, &end, 0); | |
297 | if (end == buf) { | |
298 | ret = -EINVAL; | |
299 | goto out; | |
300 | } | |
301 | ||
302 | set_disk_ro(dev_to_disk(dev), set || md->read_only); | |
303 | ret = count; | |
304 | out: | |
305 | mmc_blk_put(md); | |
306 | return ret; | |
307 | } | |
308 | ||
a5a1561f | 309 | static int mmc_blk_open(struct block_device *bdev, fmode_t mode) |
1da177e4 | 310 | { |
a5a1561f | 311 | struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); |
1da177e4 LT |
312 | int ret = -ENXIO; |
313 | ||
2a48fc0a | 314 | mutex_lock(&block_mutex); |
1da177e4 | 315 | if (md) { |
1da177e4 | 316 | ret = 0; |
a5a1561f | 317 | if ((mode & FMODE_WRITE) && md->read_only) { |
70bb0896 | 318 | mmc_blk_put(md); |
a00fc090 | 319 | ret = -EROFS; |
70bb0896 | 320 | } |
1da177e4 | 321 | } |
2a48fc0a | 322 | mutex_unlock(&block_mutex); |
1da177e4 LT |
323 | |
324 | return ret; | |
325 | } | |
326 | ||
db2a144b | 327 | static void mmc_blk_release(struct gendisk *disk, fmode_t mode) |
1da177e4 | 328 | { |
a5a1561f | 329 | struct mmc_blk_data *md = disk->private_data; |
1da177e4 | 330 | |
2a48fc0a | 331 | mutex_lock(&block_mutex); |
1da177e4 | 332 | mmc_blk_put(md); |
2a48fc0a | 333 | mutex_unlock(&block_mutex); |
1da177e4 LT |
334 | } |
335 | ||
336 | static int | |
a885c8c4 | 337 | mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1da177e4 | 338 | { |
a885c8c4 CH |
339 | geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); |
340 | geo->heads = 4; | |
341 | geo->sectors = 16; | |
342 | return 0; | |
1da177e4 LT |
343 | } |
344 | ||
cb87ea28 JC |
345 | struct mmc_blk_ioc_data { |
346 | struct mmc_ioc_cmd ic; | |
347 | unsigned char *buf; | |
348 | u64 buf_bytes; | |
97548575 | 349 | struct mmc_rpmb_data *rpmb; |
cb87ea28 JC |
350 | }; |
351 | ||
352 | static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( | |
353 | struct mmc_ioc_cmd __user *user) | |
354 | { | |
355 | struct mmc_blk_ioc_data *idata; | |
356 | int err; | |
357 | ||
1ff8950c | 358 | idata = kmalloc(sizeof(*idata), GFP_KERNEL); |
cb87ea28 JC |
359 | if (!idata) { |
360 | err = -ENOMEM; | |
aea253ec | 361 | goto out; |
cb87ea28 JC |
362 | } |
363 | ||
364 | if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { | |
365 | err = -EFAULT; | |
aea253ec | 366 | goto idata_err; |
cb87ea28 JC |
367 | } |
368 | ||
369 | idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; | |
370 | if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { | |
371 | err = -EOVERFLOW; | |
aea253ec | 372 | goto idata_err; |
cb87ea28 JC |
373 | } |
374 | ||
bfe5b1b1 VV |
375 | if (!idata->buf_bytes) { |
376 | idata->buf = NULL; | |
4d6144de | 377 | return idata; |
bfe5b1b1 | 378 | } |
4d6144de | 379 | |
97a0c313 ME |
380 | idata->buf = memdup_user((void __user *)(unsigned long) |
381 | idata->ic.data_ptr, idata->buf_bytes); | |
382 | if (IS_ERR(idata->buf)) { | |
383 | err = PTR_ERR(idata->buf); | |
aea253ec | 384 | goto idata_err; |
cb87ea28 JC |
385 | } |
386 | ||
cb87ea28 JC |
387 | return idata; |
388 | ||
aea253ec | 389 | idata_err: |
cb87ea28 | 390 | kfree(idata); |
aea253ec | 391 | out: |
cb87ea28 | 392 | return ERR_PTR(err); |
cb87ea28 JC |
393 | } |
394 | ||
a5f5774c JH |
395 | static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, |
396 | struct mmc_blk_ioc_data *idata) | |
397 | { | |
398 | struct mmc_ioc_cmd *ic = &idata->ic; | |
399 | ||
400 | if (copy_to_user(&(ic_ptr->response), ic->response, | |
401 | sizeof(ic->response))) | |
402 | return -EFAULT; | |
403 | ||
404 | if (!idata->ic.write_flag) { | |
405 | if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, | |
406 | idata->buf, idata->buf_bytes)) | |
407 | return -EFAULT; | |
408 | } | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
a0d4c7eb CJ |
413 | static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, |
414 | u32 *resp_errs) | |
415 | { | |
416 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | |
417 | int err = 0; | |
418 | u32 status; | |
419 | ||
420 | do { | |
421 | bool done = time_after(jiffies, timeout); | |
422 | ||
423 | err = __mmc_send_status(card, &status, 5); | |
424 | if (err) { | |
425 | dev_err(mmc_dev(card->host), | |
426 | "error %d requesting status\n", err); | |
427 | return err; | |
428 | } | |
429 | ||
430 | /* Accumulate any response error bits seen */ | |
431 | if (resp_errs) | |
432 | *resp_errs |= status; | |
433 | ||
434 | /* | |
435 | * Timeout if the device never becomes ready for data and never | |
436 | * leaves the program state. | |
437 | */ | |
438 | if (done) { | |
439 | dev_err(mmc_dev(card->host), | |
440 | "Card stuck in wrong state! %s status: %#x\n", | |
441 | __func__, status); | |
442 | return -ETIMEDOUT; | |
443 | } | |
40c96853 | 444 | } while (!mmc_ready_for_data(status)); |
a0d4c7eb CJ |
445 | |
446 | return err; | |
447 | } | |
448 | ||
a5f5774c JH |
449 | static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, |
450 | struct mmc_blk_ioc_data *idata) | |
cb87ea28 | 451 | { |
a44f7cb9 | 452 | struct mmc_command cmd = {}, sbc = {}; |
c7836d15 MY |
453 | struct mmc_data data = {}; |
454 | struct mmc_request mrq = {}; | |
cb87ea28 JC |
455 | struct scatterlist sg; |
456 | int err; | |
97548575 | 457 | unsigned int target_part; |
cb87ea28 | 458 | |
a5f5774c JH |
459 | if (!card || !md || !idata) |
460 | return -EINVAL; | |
cb87ea28 | 461 | |
97548575 LW |
462 | /* |
463 | * The RPMB accesses comes in from the character device, so we | |
464 | * need to target these explicitly. Else we just target the | |
465 | * partition type for the block device the ioctl() was issued | |
466 | * on. | |
467 | */ | |
468 | if (idata->rpmb) { | |
469 | /* Support multiple RPMB partitions */ | |
470 | target_part = idata->rpmb->part_index; | |
471 | target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; | |
472 | } else { | |
473 | target_part = md->part_type; | |
474 | } | |
8d1e977d | 475 | |
4d6144de JR |
476 | cmd.opcode = idata->ic.opcode; |
477 | cmd.arg = idata->ic.arg; | |
478 | cmd.flags = idata->ic.flags; | |
479 | ||
480 | if (idata->buf_bytes) { | |
481 | data.sg = &sg; | |
482 | data.sg_len = 1; | |
483 | data.blksz = idata->ic.blksz; | |
484 | data.blocks = idata->ic.blocks; | |
485 | ||
486 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); | |
487 | ||
488 | if (idata->ic.write_flag) | |
489 | data.flags = MMC_DATA_WRITE; | |
490 | else | |
491 | data.flags = MMC_DATA_READ; | |
492 | ||
493 | /* data.flags must already be set before doing this. */ | |
494 | mmc_set_data_timeout(&data, card); | |
495 | ||
496 | /* Allow overriding the timeout_ns for empirical tuning. */ | |
497 | if (idata->ic.data_timeout_ns) | |
498 | data.timeout_ns = idata->ic.data_timeout_ns; | |
499 | ||
500 | if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { | |
501 | /* | |
502 | * Pretend this is a data transfer and rely on the | |
503 | * host driver to compute timeout. When all host | |
504 | * drivers support cmd.cmd_timeout for R1B, this | |
505 | * can be changed to: | |
506 | * | |
507 | * mrq.data = NULL; | |
508 | * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; | |
509 | */ | |
510 | data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; | |
511 | } | |
512 | ||
513 | mrq.data = &data; | |
514 | } | |
515 | ||
516 | mrq.cmd = &cmd; | |
517 | ||
97548575 | 518 | err = mmc_blk_part_switch(card, target_part); |
8d1e977d | 519 | if (err) |
a5f5774c | 520 | return err; |
8d1e977d | 521 | |
cb87ea28 JC |
522 | if (idata->ic.is_acmd) { |
523 | err = mmc_app_cmd(card->host, card); | |
524 | if (err) | |
a5f5774c | 525 | return err; |
cb87ea28 JC |
526 | } |
527 | ||
97548575 | 528 | if (idata->rpmb) { |
a44f7cb9 WS |
529 | sbc.opcode = MMC_SET_BLOCK_COUNT; |
530 | /* | |
531 | * We don't do any blockcount validation because the max size | |
532 | * may be increased by a future standard. We just copy the | |
533 | * 'Reliable Write' bit here. | |
534 | */ | |
535 | sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); | |
536 | sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | |
537 | mrq.sbc = &sbc; | |
8d1e977d LP |
538 | } |
539 | ||
a82e484e | 540 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && |
55c2b8b9 | 541 | (cmd.opcode == MMC_SWITCH)) |
4f111d04 | 542 | return mmc_sanitize(card, idata->ic.cmd_timeout_ms); |
775a9362 | 543 | |
cb87ea28 JC |
544 | mmc_wait_for_req(card->host, &mrq); |
545 | ||
546 | if (cmd.error) { | |
547 | dev_err(mmc_dev(card->host), "%s: cmd error %d\n", | |
548 | __func__, cmd.error); | |
a5f5774c | 549 | return cmd.error; |
cb87ea28 JC |
550 | } |
551 | if (data.error) { | |
552 | dev_err(mmc_dev(card->host), "%s: data error %d\n", | |
553 | __func__, data.error); | |
a5f5774c | 554 | return data.error; |
cb87ea28 JC |
555 | } |
556 | ||
a0e95766 BS |
557 | /* |
558 | * Make sure the cache of the PARTITION_CONFIG register and | |
559 | * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write | |
560 | * changed it successfully. | |
561 | */ | |
562 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && | |
563 | (cmd.opcode == MMC_SWITCH)) { | |
564 | struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); | |
565 | u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); | |
566 | ||
567 | /* | |
568 | * Update cache so the next mmc_blk_part_switch call operates | |
569 | * on up-to-date data. | |
570 | */ | |
571 | card->ext_csd.part_config = value; | |
572 | main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; | |
573 | } | |
574 | ||
aea0440a AA |
575 | /* |
576 | * Make sure to update CACHE_CTRL in case it was changed. The cache | |
577 | * will get turned back on if the card is re-initialized, e.g. | |
578 | * suspend/resume or hw reset in recovery. | |
579 | */ | |
580 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) && | |
581 | (cmd.opcode == MMC_SWITCH)) { | |
582 | u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1; | |
583 | ||
584 | card->ext_csd.cache_ctrl = value; | |
585 | } | |
586 | ||
cb87ea28 JC |
587 | /* |
588 | * According to the SD specs, some commands require a delay after | |
589 | * issuing the command. | |
590 | */ | |
591 | if (idata->ic.postsleep_min_us) | |
592 | usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); | |
593 | ||
a5f5774c | 594 | memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); |
cb87ea28 | 595 | |
6246d7c9 | 596 | if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { |
8d1e977d | 597 | /* |
a0d4c7eb | 598 | * Ensure RPMB/R1B command has completed by polling CMD13 |
8d1e977d LP |
599 | * "Send Status". |
600 | */ | |
a0d4c7eb | 601 | err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL); |
8d1e977d LP |
602 | } |
603 | ||
a5f5774c JH |
604 | return err; |
605 | } | |
606 | ||
2fe20bae | 607 | static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, |
97548575 LW |
608 | struct mmc_ioc_cmd __user *ic_ptr, |
609 | struct mmc_rpmb_data *rpmb) | |
a5f5774c JH |
610 | { |
611 | struct mmc_blk_ioc_data *idata; | |
3ecd8cf2 | 612 | struct mmc_blk_ioc_data *idatas[1]; |
614f0388 | 613 | struct mmc_queue *mq; |
a5f5774c | 614 | struct mmc_card *card; |
b093410c | 615 | int err = 0, ioc_err = 0; |
614f0388 | 616 | struct request *req; |
a5f5774c JH |
617 | |
618 | idata = mmc_blk_ioctl_copy_from_user(ic_ptr); | |
619 | if (IS_ERR(idata)) | |
620 | return PTR_ERR(idata); | |
97548575 LW |
621 | /* This will be NULL on non-RPMB ioctl():s */ |
622 | idata->rpmb = rpmb; | |
a5f5774c | 623 | |
a5f5774c JH |
624 | card = md->queue.card; |
625 | if (IS_ERR(card)) { | |
626 | err = PTR_ERR(card); | |
627 | goto cmd_done; | |
628 | } | |
629 | ||
614f0388 LW |
630 | /* |
631 | * Dispatch the ioctl() into the block request queue. | |
632 | */ | |
633 | mq = &md->queue; | |
634 | req = blk_get_request(mq->queue, | |
ff005a06 | 635 | idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); |
fb8e456e AH |
636 | if (IS_ERR(req)) { |
637 | err = PTR_ERR(req); | |
638 | goto cmd_done; | |
639 | } | |
3ecd8cf2 | 640 | idatas[0] = idata; |
97548575 LW |
641 | req_to_mmc_queue_req(req)->drv_op = |
642 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; | |
69f7599e | 643 | req_to_mmc_queue_req(req)->drv_op_data = idatas; |
3ecd8cf2 | 644 | req_to_mmc_queue_req(req)->ioc_count = 1; |
684da762 | 645 | blk_execute_rq(NULL, req, 0); |
0493f6fe | 646 | ioc_err = req_to_mmc_queue_req(req)->drv_op_result; |
b093410c | 647 | err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); |
614f0388 | 648 | blk_put_request(req); |
a5f5774c | 649 | |
cb87ea28 | 650 | cmd_done: |
cb87ea28 JC |
651 | kfree(idata->buf); |
652 | kfree(idata); | |
b093410c | 653 | return ioc_err ? ioc_err : err; |
cb87ea28 JC |
654 | } |
655 | ||
2fe20bae | 656 | static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, |
97548575 LW |
657 | struct mmc_ioc_multi_cmd __user *user, |
658 | struct mmc_rpmb_data *rpmb) | |
a5f5774c JH |
659 | { |
660 | struct mmc_blk_ioc_data **idata = NULL; | |
661 | struct mmc_ioc_cmd __user *cmds = user->cmds; | |
662 | struct mmc_card *card; | |
3ecd8cf2 | 663 | struct mmc_queue *mq; |
b093410c | 664 | int i, err = 0, ioc_err = 0; |
a5f5774c | 665 | __u64 num_of_cmds; |
3ecd8cf2 | 666 | struct request *req; |
a5f5774c JH |
667 | |
668 | if (copy_from_user(&num_of_cmds, &user->num_of_cmds, | |
669 | sizeof(num_of_cmds))) | |
670 | return -EFAULT; | |
671 | ||
aab2ee03 GU |
672 | if (!num_of_cmds) |
673 | return 0; | |
674 | ||
a5f5774c JH |
675 | if (num_of_cmds > MMC_IOC_MAX_CMDS) |
676 | return -EINVAL; | |
677 | ||
678 | idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); | |
679 | if (!idata) | |
680 | return -ENOMEM; | |
681 | ||
682 | for (i = 0; i < num_of_cmds; i++) { | |
683 | idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); | |
684 | if (IS_ERR(idata[i])) { | |
685 | err = PTR_ERR(idata[i]); | |
686 | num_of_cmds = i; | |
687 | goto cmd_err; | |
688 | } | |
97548575 LW |
689 | /* This will be NULL on non-RPMB ioctl():s */ |
690 | idata[i]->rpmb = rpmb; | |
a5f5774c JH |
691 | } |
692 | ||
a5f5774c JH |
693 | card = md->queue.card; |
694 | if (IS_ERR(card)) { | |
695 | err = PTR_ERR(card); | |
2fe20bae | 696 | goto cmd_err; |
a5f5774c JH |
697 | } |
698 | ||
a5f5774c | 699 | |
3ecd8cf2 LW |
700 | /* |
701 | * Dispatch the ioctl()s into the block request queue. | |
702 | */ | |
703 | mq = &md->queue; | |
704 | req = blk_get_request(mq->queue, | |
ff005a06 | 705 | idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); |
fb8e456e AH |
706 | if (IS_ERR(req)) { |
707 | err = PTR_ERR(req); | |
708 | goto cmd_err; | |
709 | } | |
97548575 LW |
710 | req_to_mmc_queue_req(req)->drv_op = |
711 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; | |
69f7599e | 712 | req_to_mmc_queue_req(req)->drv_op_data = idata; |
3ecd8cf2 | 713 | req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; |
684da762 | 714 | blk_execute_rq(NULL, req, 0); |
0493f6fe | 715 | ioc_err = req_to_mmc_queue_req(req)->drv_op_result; |
a5f5774c JH |
716 | |
717 | /* copy to user if data and response */ | |
b093410c | 718 | for (i = 0; i < num_of_cmds && !err; i++) |
a5f5774c | 719 | err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); |
a5f5774c | 720 | |
3ecd8cf2 LW |
721 | blk_put_request(req); |
722 | ||
a5f5774c JH |
723 | cmd_err: |
724 | for (i = 0; i < num_of_cmds; i++) { | |
725 | kfree(idata[i]->buf); | |
726 | kfree(idata[i]); | |
727 | } | |
728 | kfree(idata); | |
b093410c | 729 | return ioc_err ? ioc_err : err; |
a5f5774c JH |
730 | } |
731 | ||
61fe0e2b LW |
732 | static int mmc_blk_check_blkdev(struct block_device *bdev) |
733 | { | |
734 | /* | |
735 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the | |
736 | * whole block device, not on a partition. This prevents overspray | |
737 | * between sibling partitions. | |
738 | */ | |
fa01b1e9 | 739 | if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) |
61fe0e2b LW |
740 | return -EPERM; |
741 | return 0; | |
742 | } | |
743 | ||
cb87ea28 JC |
744 | static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, |
745 | unsigned int cmd, unsigned long arg) | |
746 | { | |
2fe20bae | 747 | struct mmc_blk_data *md; |
61fe0e2b LW |
748 | int ret; |
749 | ||
a5f5774c JH |
750 | switch (cmd) { |
751 | case MMC_IOC_CMD: | |
61fe0e2b LW |
752 | ret = mmc_blk_check_blkdev(bdev); |
753 | if (ret) | |
754 | return ret; | |
2fe20bae LW |
755 | md = mmc_blk_get(bdev->bd_disk); |
756 | if (!md) | |
757 | return -EINVAL; | |
758 | ret = mmc_blk_ioctl_cmd(md, | |
97548575 LW |
759 | (struct mmc_ioc_cmd __user *)arg, |
760 | NULL); | |
2fe20bae LW |
761 | mmc_blk_put(md); |
762 | return ret; | |
a5f5774c | 763 | case MMC_IOC_MULTI_CMD: |
61fe0e2b LW |
764 | ret = mmc_blk_check_blkdev(bdev); |
765 | if (ret) | |
766 | return ret; | |
2fe20bae LW |
767 | md = mmc_blk_get(bdev->bd_disk); |
768 | if (!md) | |
769 | return -EINVAL; | |
770 | ret = mmc_blk_ioctl_multi_cmd(md, | |
97548575 LW |
771 | (struct mmc_ioc_multi_cmd __user *)arg, |
772 | NULL); | |
2fe20bae LW |
773 | mmc_blk_put(md); |
774 | return ret; | |
a5f5774c JH |
775 | default: |
776 | return -EINVAL; | |
777 | } | |
cb87ea28 JC |
778 | } |
779 | ||
780 | #ifdef CONFIG_COMPAT | |
781 | static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, | |
782 | unsigned int cmd, unsigned long arg) | |
783 | { | |
784 | return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); | |
785 | } | |
786 | #endif | |
787 | ||
83d5cde4 | 788 | static const struct block_device_operations mmc_bdops = { |
a5a1561f AV |
789 | .open = mmc_blk_open, |
790 | .release = mmc_blk_release, | |
a885c8c4 | 791 | .getgeo = mmc_blk_getgeo, |
1da177e4 | 792 | .owner = THIS_MODULE, |
cb87ea28 JC |
793 | .ioctl = mmc_blk_ioctl, |
794 | #ifdef CONFIG_COMPAT | |
795 | .compat_ioctl = mmc_blk_compat_ioctl, | |
796 | #endif | |
1da177e4 LT |
797 | }; |
798 | ||
025e3d5f AH |
799 | static int mmc_blk_part_switch_pre(struct mmc_card *card, |
800 | unsigned int part_type) | |
801 | { | |
802 | int ret = 0; | |
803 | ||
804 | if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { | |
805 | if (card->ext_csd.cmdq_en) { | |
806 | ret = mmc_cmdq_disable(card); | |
807 | if (ret) | |
808 | return ret; | |
809 | } | |
810 | mmc_retune_pause(card->host); | |
811 | } | |
812 | ||
813 | return ret; | |
814 | } | |
815 | ||
816 | static int mmc_blk_part_switch_post(struct mmc_card *card, | |
817 | unsigned int part_type) | |
818 | { | |
819 | int ret = 0; | |
820 | ||
821 | if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { | |
822 | mmc_retune_unpause(card->host); | |
823 | if (card->reenable_cmdq && !card->ext_csd.cmdq_en) | |
824 | ret = mmc_cmdq_enable(card); | |
825 | } | |
826 | ||
827 | return ret; | |
828 | } | |
829 | ||
371a689f | 830 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
1f797edc | 831 | unsigned int part_type) |
371a689f | 832 | { |
025e3d5f | 833 | int ret = 0; |
fc95e30b | 834 | struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); |
0d7d85ca | 835 | |
1f797edc | 836 | if (main_md->part_curr == part_type) |
371a689f AW |
837 | return 0; |
838 | ||
839 | if (mmc_card_mmc(card)) { | |
0d7d85ca AH |
840 | u8 part_config = card->ext_csd.part_config; |
841 | ||
1f797edc | 842 | ret = mmc_blk_part_switch_pre(card, part_type); |
025e3d5f AH |
843 | if (ret) |
844 | return ret; | |
57da0c04 | 845 | |
0d7d85ca | 846 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; |
1f797edc | 847 | part_config |= part_type; |
371a689f AW |
848 | |
849 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
0d7d85ca | 850 | EXT_CSD_PART_CONFIG, part_config, |
371a689f | 851 | card->ext_csd.part_time); |
57da0c04 | 852 | if (ret) { |
1f797edc | 853 | mmc_blk_part_switch_post(card, part_type); |
371a689f | 854 | return ret; |
57da0c04 | 855 | } |
0d7d85ca AH |
856 | |
857 | card->ext_csd.part_config = part_config; | |
57da0c04 | 858 | |
025e3d5f | 859 | ret = mmc_blk_part_switch_post(card, main_md->part_curr); |
67716327 | 860 | } |
371a689f | 861 | |
1f797edc | 862 | main_md->part_curr = part_type; |
025e3d5f | 863 | return ret; |
371a689f AW |
864 | } |
865 | ||
169f03a0 | 866 | static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) |
ec5a19dd PO |
867 | { |
868 | int err; | |
051913da BD |
869 | u32 result; |
870 | __be32 *blocks; | |
ec5a19dd | 871 | |
c7836d15 MY |
872 | struct mmc_request mrq = {}; |
873 | struct mmc_command cmd = {}; | |
874 | struct mmc_data data = {}; | |
ec5a19dd PO |
875 | |
876 | struct scatterlist sg; | |
877 | ||
ec5a19dd PO |
878 | cmd.opcode = MMC_APP_CMD; |
879 | cmd.arg = card->rca << 16; | |
7213d175 | 880 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
ec5a19dd PO |
881 | |
882 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | |
7213d175 | 883 | if (err) |
169f03a0 | 884 | return err; |
7213d175 | 885 | if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) |
169f03a0 | 886 | return -EIO; |
ec5a19dd PO |
887 | |
888 | memset(&cmd, 0, sizeof(struct mmc_command)); | |
889 | ||
890 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; | |
891 | cmd.arg = 0; | |
7213d175 | 892 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
ec5a19dd | 893 | |
ec5a19dd PO |
894 | data.blksz = 4; |
895 | data.blocks = 1; | |
896 | data.flags = MMC_DATA_READ; | |
897 | data.sg = &sg; | |
898 | data.sg_len = 1; | |
d380443c | 899 | mmc_set_data_timeout(&data, card); |
ec5a19dd | 900 | |
ec5a19dd PO |
901 | mrq.cmd = &cmd; |
902 | mrq.data = &data; | |
903 | ||
051913da BD |
904 | blocks = kmalloc(4, GFP_KERNEL); |
905 | if (!blocks) | |
169f03a0 | 906 | return -ENOMEM; |
051913da BD |
907 | |
908 | sg_init_one(&sg, blocks, 4); | |
ec5a19dd PO |
909 | |
910 | mmc_wait_for_req(card->host, &mrq); | |
911 | ||
051913da BD |
912 | result = ntohl(*blocks); |
913 | kfree(blocks); | |
914 | ||
17b0429d | 915 | if (cmd.error || data.error) |
169f03a0 LW |
916 | return -EIO; |
917 | ||
918 | *written_blocks = result; | |
ec5a19dd | 919 | |
169f03a0 | 920 | return 0; |
ec5a19dd PO |
921 | } |
922 | ||
92c0a0cc AH |
923 | static unsigned int mmc_blk_clock_khz(struct mmc_host *host) |
924 | { | |
925 | if (host->actual_clock) | |
926 | return host->actual_clock / 1000; | |
927 | ||
928 | /* Clock may be subject to a divisor, fudge it by a factor of 2. */ | |
929 | if (host->ios.clock) | |
930 | return host->ios.clock / 2000; | |
931 | ||
932 | /* How can there be no clock */ | |
933 | WARN_ON_ONCE(1); | |
934 | return 100; /* 100 kHz is minimum possible value */ | |
935 | } | |
936 | ||
937 | static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, | |
938 | struct mmc_data *data) | |
939 | { | |
940 | unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); | |
941 | unsigned int khz; | |
942 | ||
943 | if (data->timeout_clks) { | |
944 | khz = mmc_blk_clock_khz(host); | |
945 | ms += DIV_ROUND_UP(data->timeout_clks, khz); | |
946 | } | |
947 | ||
948 | return ms; | |
949 | } | |
950 | ||
67716327 AH |
951 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, |
952 | int type) | |
953 | { | |
954 | int err; | |
955 | ||
956 | if (md->reset_done & type) | |
957 | return -EEXIST; | |
958 | ||
959 | md->reset_done |= type; | |
960 | err = mmc_hw_reset(host); | |
961 | /* Ensure we switch back to the correct partition */ | |
fefdd3c9 | 962 | if (err) { |
fc95e30b UH |
963 | struct mmc_blk_data *main_md = |
964 | dev_get_drvdata(&host->card->dev); | |
67716327 AH |
965 | int part_err; |
966 | ||
967 | main_md->part_curr = main_md->part_type; | |
1f797edc | 968 | part_err = mmc_blk_part_switch(host->card, md->part_type); |
67716327 AH |
969 | if (part_err) { |
970 | /* | |
971 | * We have failed to get back into the correct | |
972 | * partition, so we need to abort the whole request. | |
973 | */ | |
974 | return -ENODEV; | |
975 | } | |
976 | } | |
977 | return err; | |
978 | } | |
979 | ||
980 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | |
981 | { | |
982 | md->reset_done &= ~type; | |
983 | } | |
984 | ||
5ec12396 LW |
985 | /* |
986 | * The non-block commands come back from the block layer after it queued it and | |
987 | * processed it with all other requests and then they get issued in this | |
988 | * function. | |
989 | */ | |
990 | static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) | |
991 | { | |
992 | struct mmc_queue_req *mq_rq; | |
993 | struct mmc_card *card = mq->card; | |
994 | struct mmc_blk_data *md = mq->blkdata; | |
69f7599e | 995 | struct mmc_blk_ioc_data **idata; |
97548575 | 996 | bool rpmb_ioctl; |
627c3ccf LW |
997 | u8 **ext_csd; |
998 | u32 status; | |
0493f6fe | 999 | int ret; |
5ec12396 LW |
1000 | int i; |
1001 | ||
1002 | mq_rq = req_to_mmc_queue_req(req); | |
97548575 | 1003 | rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); |
5ec12396 LW |
1004 | |
1005 | switch (mq_rq->drv_op) { | |
1006 | case MMC_DRV_OP_IOCTL: | |
97548575 | 1007 | case MMC_DRV_OP_IOCTL_RPMB: |
69f7599e | 1008 | idata = mq_rq->drv_op_data; |
7432b49b | 1009 | for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { |
69f7599e | 1010 | ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); |
0493f6fe | 1011 | if (ret) |
5ec12396 LW |
1012 | break; |
1013 | } | |
5ec12396 | 1014 | /* Always switch back to main area after RPMB access */ |
97548575 LW |
1015 | if (rpmb_ioctl) |
1016 | mmc_blk_part_switch(card, 0); | |
0493f6fe LW |
1017 | break; |
1018 | case MMC_DRV_OP_BOOT_WP: | |
1019 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, | |
1020 | card->ext_csd.boot_ro_lock | | |
1021 | EXT_CSD_BOOT_WP_B_PWR_WP_EN, | |
1022 | card->ext_csd.part_time); | |
1023 | if (ret) | |
1024 | pr_err("%s: Locking boot partition ro until next power on failed: %d\n", | |
1025 | md->disk->disk_name, ret); | |
1026 | else | |
1027 | card->ext_csd.boot_ro_lock |= | |
1028 | EXT_CSD_BOOT_WP_B_PWR_WP_EN; | |
5ec12396 | 1029 | break; |
627c3ccf LW |
1030 | case MMC_DRV_OP_GET_CARD_STATUS: |
1031 | ret = mmc_send_status(card, &status); | |
1032 | if (!ret) | |
1033 | ret = status; | |
1034 | break; | |
1035 | case MMC_DRV_OP_GET_EXT_CSD: | |
1036 | ext_csd = mq_rq->drv_op_data; | |
1037 | ret = mmc_get_ext_csd(card, ext_csd); | |
1038 | break; | |
5ec12396 | 1039 | default: |
0493f6fe LW |
1040 | pr_err("%s: unknown driver specific operation\n", |
1041 | md->disk->disk_name); | |
1042 | ret = -EINVAL; | |
5ec12396 LW |
1043 | break; |
1044 | } | |
0493f6fe | 1045 | mq_rq->drv_op_result = ret; |
0fbfd125 | 1046 | blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); |
5ec12396 LW |
1047 | } |
1048 | ||
df061588 | 1049 | static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
bd788c96 | 1050 | { |
7db3028e | 1051 | struct mmc_blk_data *md = mq->blkdata; |
bd788c96 | 1052 | struct mmc_card *card = md->queue.card; |
01904ff7 | 1053 | unsigned int from, nr; |
67716327 | 1054 | int err = 0, type = MMC_BLK_DISCARD; |
2a842aca | 1055 | blk_status_t status = BLK_STS_OK; |
bd788c96 | 1056 | |
bd788c96 | 1057 | if (!mmc_can_erase(card)) { |
2a842aca | 1058 | status = BLK_STS_NOTSUPP; |
8cb6ed17 | 1059 | goto fail; |
bd788c96 AH |
1060 | } |
1061 | ||
1062 | from = blk_rq_pos(req); | |
1063 | nr = blk_rq_sectors(req); | |
1064 | ||
164b50b3 GU |
1065 | do { |
1066 | err = 0; | |
1067 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | |
1068 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1069 | INAND_CMD38_ARG_EXT_CSD, | |
01904ff7 | 1070 | card->erase_arg == MMC_TRIM_ARG ? |
164b50b3 GU |
1071 | INAND_CMD38_ARG_TRIM : |
1072 | INAND_CMD38_ARG_ERASE, | |
ad91619a | 1073 | card->ext_csd.generic_cmd6_time); |
164b50b3 GU |
1074 | } |
1075 | if (!err) | |
01904ff7 | 1076 | err = mmc_erase(card, from, nr, card->erase_arg); |
164b50b3 | 1077 | } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); |
2a842aca CH |
1078 | if (err) |
1079 | status = BLK_STS_IOERR; | |
1080 | else | |
67716327 | 1081 | mmc_blk_reset_success(md, type); |
8cb6ed17 | 1082 | fail: |
0fbfd125 | 1083 | blk_mq_end_request(req, status); |
bd788c96 AH |
1084 | } |
1085 | ||
df061588 | 1086 | static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, |
49804548 AH |
1087 | struct request *req) |
1088 | { | |
7db3028e | 1089 | struct mmc_blk_data *md = mq->blkdata; |
49804548 | 1090 | struct mmc_card *card = md->queue.card; |
775a9362 | 1091 | unsigned int from, nr, arg; |
67716327 | 1092 | int err = 0, type = MMC_BLK_SECDISCARD; |
2a842aca | 1093 | blk_status_t status = BLK_STS_OK; |
49804548 | 1094 | |
775a9362 | 1095 | if (!(mmc_can_secure_erase_trim(card))) { |
2a842aca | 1096 | status = BLK_STS_NOTSUPP; |
49804548 AH |
1097 | goto out; |
1098 | } | |
1099 | ||
28302812 AH |
1100 | from = blk_rq_pos(req); |
1101 | nr = blk_rq_sectors(req); | |
1102 | ||
775a9362 ME |
1103 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) |
1104 | arg = MMC_SECURE_TRIM1_ARG; | |
1105 | else | |
1106 | arg = MMC_SECURE_ERASE_ARG; | |
d9ddd629 | 1107 | |
67716327 | 1108 | retry: |
6a7a6b45 AW |
1109 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1110 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1111 | INAND_CMD38_ARG_EXT_CSD, | |
1112 | arg == MMC_SECURE_TRIM1_ARG ? | |
1113 | INAND_CMD38_ARG_SECTRIM1 : | |
1114 | INAND_CMD38_ARG_SECERASE, | |
ad91619a | 1115 | card->ext_csd.generic_cmd6_time); |
6a7a6b45 | 1116 | if (err) |
28302812 | 1117 | goto out_retry; |
6a7a6b45 | 1118 | } |
28302812 | 1119 | |
49804548 | 1120 | err = mmc_erase(card, from, nr, arg); |
28302812 AH |
1121 | if (err == -EIO) |
1122 | goto out_retry; | |
2a842aca CH |
1123 | if (err) { |
1124 | status = BLK_STS_IOERR; | |
28302812 | 1125 | goto out; |
2a842aca | 1126 | } |
28302812 AH |
1127 | |
1128 | if (arg == MMC_SECURE_TRIM1_ARG) { | |
6a7a6b45 AW |
1129 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1130 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1131 | INAND_CMD38_ARG_EXT_CSD, | |
1132 | INAND_CMD38_ARG_SECTRIM2, | |
ad91619a | 1133 | card->ext_csd.generic_cmd6_time); |
6a7a6b45 | 1134 | if (err) |
28302812 | 1135 | goto out_retry; |
6a7a6b45 | 1136 | } |
28302812 | 1137 | |
49804548 | 1138 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
28302812 AH |
1139 | if (err == -EIO) |
1140 | goto out_retry; | |
2a842aca CH |
1141 | if (err) { |
1142 | status = BLK_STS_IOERR; | |
28302812 | 1143 | goto out; |
2a842aca | 1144 | } |
6a7a6b45 | 1145 | } |
28302812 | 1146 | |
28302812 AH |
1147 | out_retry: |
1148 | if (err && !mmc_blk_reset(md, card->host, type)) | |
67716327 AH |
1149 | goto retry; |
1150 | if (!err) | |
1151 | mmc_blk_reset_success(md, type); | |
28302812 | 1152 | out: |
0fbfd125 | 1153 | blk_mq_end_request(req, status); |
49804548 AH |
1154 | } |
1155 | ||
df061588 | 1156 | static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
f4c5522b | 1157 | { |
7db3028e | 1158 | struct mmc_blk_data *md = mq->blkdata; |
881d1c25 SJ |
1159 | struct mmc_card *card = md->queue.card; |
1160 | int ret = 0; | |
1161 | ||
1162 | ret = mmc_flush_cache(card); | |
0fbfd125 | 1163 | blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); |
f4c5522b AW |
1164 | } |
1165 | ||
1166 | /* | |
1167 | * Reformat current write as a reliable write, supporting | |
1168 | * both legacy and the enhanced reliable write MMC cards. | |
1169 | * In each transfer we'll handle only as much as a single | |
1170 | * reliable write can handle, thus finish the request in | |
1171 | * partial completions. | |
1172 | */ | |
d0c97cfb AW |
1173 | static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, |
1174 | struct mmc_card *card, | |
1175 | struct request *req) | |
f4c5522b | 1176 | { |
f4c5522b AW |
1177 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { |
1178 | /* Legacy mode imposes restrictions on transfers. */ | |
9cb38f7a | 1179 | if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) |
f4c5522b AW |
1180 | brq->data.blocks = 1; |
1181 | ||
1182 | if (brq->data.blocks > card->ext_csd.rel_sectors) | |
1183 | brq->data.blocks = card->ext_csd.rel_sectors; | |
1184 | else if (brq->data.blocks < card->ext_csd.rel_sectors) | |
1185 | brq->data.blocks = 1; | |
1186 | } | |
f4c5522b AW |
1187 | } |
1188 | ||
f47a1fe3 AH |
1189 | #define CMD_ERRORS_EXCL_OOR \ |
1190 | (R1_ADDRESS_ERROR | /* Misaligned address */ \ | |
4c2b8f26 RKAL |
1191 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ |
1192 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ | |
a04e6bae | 1193 | R1_CARD_ECC_FAILED | /* Card ECC failed */ \ |
4c2b8f26 RKAL |
1194 | R1_CC_ERROR | /* Card controller error */ \ |
1195 | R1_ERROR) /* General/unknown error */ | |
1196 | ||
f47a1fe3 AH |
1197 | #define CMD_ERRORS \ |
1198 | (CMD_ERRORS_EXCL_OOR | \ | |
1199 | R1_OUT_OF_RANGE) /* Command argument out of range */ \ | |
1200 | ||
d83c2dba | 1201 | static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) |
a04e6bae | 1202 | { |
d83c2dba | 1203 | u32 val; |
a04e6bae | 1204 | |
d83c2dba SL |
1205 | /* |
1206 | * Per the SD specification(physical layer version 4.10)[1], | |
1207 | * section 4.3.3, it explicitly states that "When the last | |
1208 | * block of user area is read using CMD18, the host should | |
1209 | * ignore OUT_OF_RANGE error that may occur even the sequence | |
1210 | * is correct". And JESD84-B51 for eMMC also has a similar | |
1211 | * statement on section 6.8.3. | |
1212 | * | |
1213 | * Multiple block read/write could be done by either predefined | |
1214 | * method, namely CMD23, or open-ending mode. For open-ending mode, | |
1215 | * we should ignore the OUT_OF_RANGE error as it's normal behaviour. | |
1216 | * | |
1217 | * However the spec[1] doesn't tell us whether we should also | |
1218 | * ignore that for predefined method. But per the spec[1], section | |
1219 | * 4.15 Set Block Count Command, it says"If illegal block count | |
1220 | * is set, out of range error will be indicated during read/write | |
1221 | * operation (For example, data transfer is stopped at user area | |
1222 | * boundary)." In another word, we could expect a out of range error | |
1223 | * in the response for the following CMD18/25. And if argument of | |
1224 | * CMD23 + the argument of CMD18/25 exceed the max number of blocks, | |
1225 | * we could also expect to get a -ETIMEDOUT or any error number from | |
1226 | * the host drivers due to missing data response(for write)/data(for | |
1227 | * read), as the cards will stop the data transfer by itself per the | |
1228 | * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. | |
1229 | */ | |
1230 | ||
1231 | if (!brq->stop.error) { | |
1232 | bool oor_with_open_end; | |
1233 | /* If there is no error yet, check R1 response */ | |
1234 | ||
1235 | val = brq->stop.resp[0] & CMD_ERRORS; | |
1236 | oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; | |
1237 | ||
1238 | if (val && !oor_with_open_end) | |
1239 | brq->stop.error = -EIO; | |
1240 | } | |
a04e6bae WS |
1241 | } |
1242 | ||
ca5717f7 | 1243 | static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, |
d3377c01 AH |
1244 | int disable_multi, bool *do_rel_wr_p, |
1245 | bool *do_data_tag_p) | |
1da177e4 | 1246 | { |
ca5717f7 AH |
1247 | struct mmc_blk_data *md = mq->blkdata; |
1248 | struct mmc_card *card = md->queue.card; | |
54d49d77 | 1249 | struct mmc_blk_request *brq = &mqrq->brq; |
67e69d52 | 1250 | struct request *req = mmc_queue_req_to_req(mqrq); |
d3377c01 | 1251 | bool do_rel_wr, do_data_tag; |
1da177e4 | 1252 | |
f4c5522b AW |
1253 | /* |
1254 | * Reliable writes are used to implement Forced Unit Access and | |
d3df0465 | 1255 | * are supported only on MMCs. |
f4c5522b | 1256 | */ |
d3377c01 AH |
1257 | do_rel_wr = (req->cmd_flags & REQ_FUA) && |
1258 | rq_data_dir(req) == WRITE && | |
1259 | (md->flags & MMC_BLK_REL_WR); | |
f4c5522b | 1260 | |
54d49d77 | 1261 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
ca5717f7 | 1262 | |
93f1c150 EB |
1263 | mmc_crypto_prepare_req(mqrq); |
1264 | ||
54d49d77 | 1265 | brq->mrq.data = &brq->data; |
93482b3d | 1266 | brq->mrq.tag = req->tag; |
1da177e4 | 1267 | |
54d49d77 PF |
1268 | brq->stop.opcode = MMC_STOP_TRANSMISSION; |
1269 | brq->stop.arg = 0; | |
ca5717f7 AH |
1270 | |
1271 | if (rq_data_dir(req) == READ) { | |
1272 | brq->data.flags = MMC_DATA_READ; | |
1273 | brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; | |
1274 | } else { | |
1275 | brq->data.flags = MMC_DATA_WRITE; | |
1276 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | |
1277 | } | |
1278 | ||
1279 | brq->data.blksz = 512; | |
54d49d77 | 1280 | brq->data.blocks = blk_rq_sectors(req); |
93482b3d AH |
1281 | brq->data.blk_addr = blk_rq_pos(req); |
1282 | ||
1283 | /* | |
1284 | * The command queue supports 2 priorities: "high" (1) and "simple" (0). | |
1285 | * The eMMC will give "high" priority tasks priority over "simple" | |
1286 | * priority tasks. Here we always set "simple" priority by not setting | |
1287 | * MMC_DATA_PRIO. | |
1288 | */ | |
6a79e391 | 1289 | |
54d49d77 PF |
1290 | /* |
1291 | * The block layer doesn't support all sector count | |
1292 | * restrictions, so we need to be prepared for too big | |
1293 | * requests. | |
1294 | */ | |
1295 | if (brq->data.blocks > card->host->max_blk_count) | |
1296 | brq->data.blocks = card->host->max_blk_count; | |
1da177e4 | 1297 | |
2bf22b39 | 1298 | if (brq->data.blocks > 1) { |
41591b38 CB |
1299 | /* |
1300 | * Some SD cards in SPI mode return a CRC error or even lock up | |
1301 | * completely when trying to read the last block using a | |
1302 | * multiblock read command. | |
1303 | */ | |
1304 | if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && | |
1305 | (blk_rq_pos(req) + blk_rq_sectors(req) == | |
1306 | get_capacity(md->disk))) | |
1307 | brq->data.blocks--; | |
1308 | ||
2bf22b39 PW |
1309 | /* |
1310 | * After a read error, we redo the request one sector | |
1311 | * at a time in order to accurately determine which | |
1312 | * sectors can be read successfully. | |
1313 | */ | |
1314 | if (disable_multi) | |
1315 | brq->data.blocks = 1; | |
1316 | ||
2e47e842 KM |
1317 | /* |
1318 | * Some controllers have HW issues while operating | |
1319 | * in multiple I/O mode | |
1320 | */ | |
1321 | if (card->host->ops->multi_io_quirk) | |
1322 | brq->data.blocks = card->host->ops->multi_io_quirk(card, | |
1323 | (rq_data_dir(req) == READ) ? | |
1324 | MMC_DATA_READ : MMC_DATA_WRITE, | |
1325 | brq->data.blocks); | |
2bf22b39 | 1326 | } |
d0c97cfb | 1327 | |
93482b3d | 1328 | if (do_rel_wr) { |
ca5717f7 | 1329 | mmc_apply_rel_rw(brq, card, req); |
93482b3d AH |
1330 | brq->data.flags |= MMC_DATA_REL_WR; |
1331 | } | |
ca5717f7 AH |
1332 | |
1333 | /* | |
1334 | * Data tag is used only during writing meta data to speed | |
1335 | * up write and any subsequent read of this meta data | |
1336 | */ | |
d3377c01 AH |
1337 | do_data_tag = card->ext_csd.data_tag_unit_size && |
1338 | (req->cmd_flags & REQ_META) && | |
1339 | (rq_data_dir(req) == WRITE) && | |
1340 | ((brq->data.blocks * brq->data.blksz) >= | |
1341 | card->ext_csd.data_tag_unit_size); | |
ca5717f7 | 1342 | |
93482b3d AH |
1343 | if (do_data_tag) |
1344 | brq->data.flags |= MMC_DATA_DAT_TAG; | |
1345 | ||
ca5717f7 AH |
1346 | mmc_set_data_timeout(&brq->data, card); |
1347 | ||
1348 | brq->data.sg = mqrq->sg; | |
1349 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); | |
1350 | ||
1351 | /* | |
1352 | * Adjust the sg list so it is the same size as the | |
1353 | * request. | |
1354 | */ | |
1355 | if (brq->data.blocks != blk_rq_sectors(req)) { | |
1356 | int i, data_size = brq->data.blocks << 9; | |
1357 | struct scatterlist *sg; | |
1358 | ||
1359 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { | |
1360 | data_size -= sg->length; | |
1361 | if (data_size <= 0) { | |
1362 | sg->length += data_size; | |
1363 | i++; | |
1364 | break; | |
1365 | } | |
1366 | } | |
1367 | brq->data.sg_len = i; | |
1368 | } | |
1369 | ||
d3377c01 AH |
1370 | if (do_rel_wr_p) |
1371 | *do_rel_wr_p = do_rel_wr; | |
1372 | ||
1373 | if (do_data_tag_p) | |
1374 | *do_data_tag_p = do_data_tag; | |
ca5717f7 AH |
1375 | } |
1376 | ||
1e8e55b6 AH |
1377 | #define MMC_CQE_RETRIES 2 |
1378 | ||
1379 | static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) | |
1380 | { | |
1381 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1382 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
1383 | struct request_queue *q = req->q; | |
1384 | struct mmc_host *host = mq->card->host; | |
e6bfb1bf | 1385 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
1e8e55b6 AH |
1386 | unsigned long flags; |
1387 | bool put_card; | |
1388 | int err; | |
1389 | ||
1390 | mmc_cqe_post_req(host, mrq); | |
1391 | ||
1392 | if (mrq->cmd && mrq->cmd->error) | |
1393 | err = mrq->cmd->error; | |
1394 | else if (mrq->data && mrq->data->error) | |
1395 | err = mrq->data->error; | |
1396 | else | |
1397 | err = 0; | |
1398 | ||
1399 | if (err) { | |
1400 | if (mqrq->retries++ < MMC_CQE_RETRIES) | |
1401 | blk_mq_requeue_request(req, true); | |
1402 | else | |
1403 | blk_mq_end_request(req, BLK_STS_IOERR); | |
1404 | } else if (mrq->data) { | |
1405 | if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered)) | |
1406 | blk_mq_requeue_request(req, true); | |
1407 | else | |
1408 | __blk_mq_end_request(req, BLK_STS_OK); | |
1409 | } else { | |
1410 | blk_mq_end_request(req, BLK_STS_OK); | |
1411 | } | |
1412 | ||
f5d72c5c | 1413 | spin_lock_irqsave(&mq->lock, flags); |
1e8e55b6 | 1414 | |
e6bfb1bf | 1415 | mq->in_flight[issue_type] -= 1; |
1e8e55b6 AH |
1416 | |
1417 | put_card = (mmc_tot_in_flight(mq) == 0); | |
1418 | ||
1419 | mmc_cqe_check_busy(mq); | |
1420 | ||
f5d72c5c | 1421 | spin_unlock_irqrestore(&mq->lock, flags); |
1e8e55b6 AH |
1422 | |
1423 | if (!mq->cqe_busy) | |
1424 | blk_mq_run_hw_queues(q, true); | |
1425 | ||
1426 | if (put_card) | |
1427 | mmc_put_card(mq->card, &mq->ctx); | |
1428 | } | |
1429 | ||
1430 | void mmc_blk_cqe_recovery(struct mmc_queue *mq) | |
1431 | { | |
1432 | struct mmc_card *card = mq->card; | |
1433 | struct mmc_host *host = card->host; | |
1434 | int err; | |
1435 | ||
1436 | pr_debug("%s: CQE recovery start\n", mmc_hostname(host)); | |
1437 | ||
1438 | err = mmc_cqe_recovery(host); | |
1439 | if (err) | |
1440 | mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); | |
1441 | else | |
1442 | mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); | |
1443 | ||
1444 | pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); | |
1445 | } | |
1446 | ||
1447 | static void mmc_blk_cqe_req_done(struct mmc_request *mrq) | |
1448 | { | |
1449 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, | |
1450 | brq.mrq); | |
1451 | struct request *req = mmc_queue_req_to_req(mqrq); | |
1452 | struct request_queue *q = req->q; | |
1453 | struct mmc_queue *mq = q->queuedata; | |
1454 | ||
1455 | /* | |
1456 | * Block layer timeouts race with completions which means the normal | |
1457 | * completion path cannot be used during recovery. | |
1458 | */ | |
1459 | if (mq->in_recovery) | |
1460 | mmc_blk_cqe_complete_rq(mq, req); | |
15f73f5b | 1461 | else if (likely(!blk_should_fake_timeout(req->q))) |
1e8e55b6 AH |
1462 | blk_mq_complete_request(req); |
1463 | } | |
1464 | ||
1465 | static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) | |
1466 | { | |
1467 | mrq->done = mmc_blk_cqe_req_done; | |
1468 | mrq->recovery_notifier = mmc_cqe_recovery_notifier; | |
1469 | ||
1470 | return mmc_cqe_start_req(host, mrq); | |
1471 | } | |
1472 | ||
1473 | static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, | |
1474 | struct request *req) | |
1475 | { | |
1476 | struct mmc_blk_request *brq = &mqrq->brq; | |
1477 | ||
1478 | memset(brq, 0, sizeof(*brq)); | |
1479 | ||
1480 | brq->mrq.cmd = &brq->cmd; | |
1481 | brq->mrq.tag = req->tag; | |
1482 | ||
1483 | return &brq->mrq; | |
1484 | } | |
1485 | ||
1486 | static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) | |
1487 | { | |
1488 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1489 | struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); | |
1490 | ||
1491 | mrq->cmd->opcode = MMC_SWITCH; | |
1492 | mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | | |
1493 | (EXT_CSD_FLUSH_CACHE << 16) | | |
1494 | (1 << 8) | | |
1495 | EXT_CSD_CMD_SET_NORMAL; | |
1496 | mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; | |
1497 | ||
1498 | return mmc_blk_cqe_start_req(mq->card->host, mrq); | |
1499 | } | |
1500 | ||
511ce378 BW |
1501 | static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) |
1502 | { | |
1503 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1504 | struct mmc_host *host = mq->card->host; | |
1505 | int err; | |
1506 | ||
1507 | mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); | |
1508 | mqrq->brq.mrq.done = mmc_blk_hsq_req_done; | |
1509 | mmc_pre_req(host, &mqrq->brq.mrq); | |
1510 | ||
1511 | err = mmc_cqe_start_req(host, &mqrq->brq.mrq); | |
1512 | if (err) | |
1513 | mmc_post_req(host, &mqrq->brq.mrq, err); | |
1514 | ||
1515 | return err; | |
1516 | } | |
1517 | ||
1e8e55b6 AH |
1518 | static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) |
1519 | { | |
1520 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
511ce378 BW |
1521 | struct mmc_host *host = mq->card->host; |
1522 | ||
1523 | if (host->hsq_enabled) | |
1524 | return mmc_blk_hsq_issue_rw_rq(mq, req); | |
1e8e55b6 AH |
1525 | |
1526 | mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); | |
1527 | ||
1528 | return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); | |
1529 | } | |
1530 | ||
ca5717f7 AH |
1531 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
1532 | struct mmc_card *card, | |
1533 | int disable_multi, | |
1534 | struct mmc_queue *mq) | |
1535 | { | |
1536 | u32 readcmd, writecmd; | |
1537 | struct mmc_blk_request *brq = &mqrq->brq; | |
67e69d52 | 1538 | struct request *req = mmc_queue_req_to_req(mqrq); |
ca5717f7 AH |
1539 | struct mmc_blk_data *md = mq->blkdata; |
1540 | bool do_rel_wr, do_data_tag; | |
1541 | ||
1542 | mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); | |
1543 | ||
1544 | brq->mrq.cmd = &brq->cmd; | |
1545 | ||
1546 | brq->cmd.arg = blk_rq_pos(req); | |
1547 | if (!mmc_card_blockaddr(card)) | |
1548 | brq->cmd.arg <<= 9; | |
1549 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | |
1550 | ||
54d49d77 PF |
1551 | if (brq->data.blocks > 1 || do_rel_wr) { |
1552 | /* SPI multiblock writes terminate using a special | |
1553 | * token, not a STOP_TRANSMISSION request. | |
d0c97cfb | 1554 | */ |
54d49d77 PF |
1555 | if (!mmc_host_is_spi(card->host) || |
1556 | rq_data_dir(req) == READ) | |
1557 | brq->mrq.stop = &brq->stop; | |
1558 | readcmd = MMC_READ_MULTIPLE_BLOCK; | |
1559 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | |
1560 | } else { | |
1561 | brq->mrq.stop = NULL; | |
1562 | readcmd = MMC_READ_SINGLE_BLOCK; | |
1563 | writecmd = MMC_WRITE_BLOCK; | |
1564 | } | |
ca5717f7 | 1565 | brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; |
4265900e | 1566 | |
54d49d77 PF |
1567 | /* |
1568 | * Pre-defined multi-block transfers are preferable to | |
1569 | * open ended-ones (and necessary for reliable writes). | |
1570 | * However, it is not sufficient to just send CMD23, | |
1571 | * and avoid the final CMD12, as on an error condition | |
1572 | * CMD12 (stop) needs to be sent anyway. This, coupled | |
1573 | * with Auto-CMD23 enhancements provided by some | |
1574 | * hosts, means that the complexity of dealing | |
1575 | * with this is best left to the host. If CMD23 is | |
1576 | * supported by card and host, we'll fill sbc in and let | |
1577 | * the host deal with handling it correctly. This means | |
1578 | * that for hosts that don't expose MMC_CAP_CMD23, no | |
1579 | * change of behavior will be observed. | |
1580 | * | |
1581 | * N.B: Some MMC cards experience perf degradation. | |
1582 | * We'll avoid using CMD23-bounded multiblock writes for | |
1583 | * these, while retaining features like reliable writes. | |
1584 | */ | |
4265900e SD |
1585 | if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && |
1586 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || | |
1587 | do_data_tag)) { | |
54d49d77 PF |
1588 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; |
1589 | brq->sbc.arg = brq->data.blocks | | |
4265900e SD |
1590 | (do_rel_wr ? (1 << 31) : 0) | |
1591 | (do_data_tag ? (1 << 29) : 0); | |
54d49d77 PF |
1592 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; |
1593 | brq->mrq.sbc = &brq->sbc; | |
1594 | } | |
54d49d77 | 1595 | } |
6a79e391 | 1596 | |
81196976 | 1597 | #define MMC_MAX_RETRIES 5 |
7eb43d53 | 1598 | #define MMC_DATA_RETRIES 2 |
81196976 AH |
1599 | #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) |
1600 | ||
7eb43d53 AH |
1601 | static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) |
1602 | { | |
1603 | struct mmc_command cmd = { | |
1604 | .opcode = MMC_STOP_TRANSMISSION, | |
1605 | .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, | |
1606 | /* Some hosts wait for busy anyway, so provide a busy timeout */ | |
1607 | .busy_timeout = timeout, | |
1608 | }; | |
1609 | ||
1610 | return mmc_wait_for_cmd(card->host, &cmd, 5); | |
1611 | } | |
1612 | ||
1613 | static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) | |
1614 | { | |
1615 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1616 | struct mmc_blk_request *brq = &mqrq->brq; | |
1617 | unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); | |
1618 | int err; | |
1619 | ||
1620 | mmc_retune_hold_now(card->host); | |
1621 | ||
1622 | mmc_blk_send_stop(card, timeout); | |
1623 | ||
3869468e | 1624 | err = card_busy_detect(card, timeout, NULL); |
7eb43d53 AH |
1625 | |
1626 | mmc_retune_release(card->host); | |
1627 | ||
1628 | return err; | |
1629 | } | |
1630 | ||
81196976 AH |
1631 | #define MMC_READ_SINGLE_RETRIES 2 |
1632 | ||
1633 | /* Single sector read during recovery */ | |
1634 | static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) | |
1635 | { | |
1636 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1637 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
1638 | struct mmc_card *card = mq->card; | |
1639 | struct mmc_host *host = card->host; | |
1640 | blk_status_t error = BLK_STS_OK; | |
1641 | int retries = 0; | |
1642 | ||
1643 | do { | |
1644 | u32 status; | |
1645 | int err; | |
1646 | ||
1647 | mmc_blk_rw_rq_prep(mqrq, card, 1, mq); | |
1648 | ||
1649 | mmc_wait_for_req(host, mrq); | |
1650 | ||
1651 | err = mmc_send_status(card, &status); | |
1652 | if (err) | |
1653 | goto error_exit; | |
1654 | ||
1655 | if (!mmc_host_is_spi(host) && | |
40c96853 | 1656 | !mmc_ready_for_data(status)) { |
7eb43d53 | 1657 | err = mmc_blk_fix_state(card, req); |
81196976 AH |
1658 | if (err) |
1659 | goto error_exit; | |
1660 | } | |
1661 | ||
1662 | if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) | |
1663 | continue; | |
1664 | ||
1665 | retries = 0; | |
1666 | ||
1667 | if (mrq->cmd->error || | |
1668 | mrq->data->error || | |
1669 | (!mmc_host_is_spi(host) && | |
1670 | (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) | |
1671 | error = BLK_STS_IOERR; | |
1672 | else | |
1673 | error = BLK_STS_OK; | |
1674 | ||
1675 | } while (blk_update_request(req, error, 512)); | |
1676 | ||
1677 | return; | |
1678 | ||
1679 | error_exit: | |
1680 | mrq->data->bytes_xfered = 0; | |
1681 | blk_update_request(req, BLK_STS_IOERR, 512); | |
1682 | /* Let it try the remaining request again */ | |
1683 | if (mqrq->retries > MMC_MAX_RETRIES - 1) | |
1684 | mqrq->retries = MMC_MAX_RETRIES - 1; | |
1685 | } | |
1686 | ||
7eb43d53 AH |
1687 | static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) |
1688 | { | |
1689 | return !!brq->mrq.sbc; | |
1690 | } | |
1691 | ||
1692 | static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) | |
1693 | { | |
1694 | return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; | |
1695 | } | |
1696 | ||
1697 | /* | |
1698 | * Check for errors the host controller driver might not have seen such as | |
1699 | * response mode errors or invalid card state. | |
1700 | */ | |
1701 | static bool mmc_blk_status_error(struct request *req, u32 status) | |
1702 | { | |
1703 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1704 | struct mmc_blk_request *brq = &mqrq->brq; | |
1705 | struct mmc_queue *mq = req->q->queuedata; | |
1706 | u32 stop_err_bits; | |
1707 | ||
1708 | if (mmc_host_is_spi(mq->card->host)) | |
aa950144 | 1709 | return false; |
7eb43d53 AH |
1710 | |
1711 | stop_err_bits = mmc_blk_stop_err_bits(brq); | |
1712 | ||
1713 | return brq->cmd.resp[0] & CMD_ERRORS || | |
1714 | brq->stop.resp[0] & stop_err_bits || | |
1715 | status & stop_err_bits || | |
40c96853 | 1716 | (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); |
7eb43d53 AH |
1717 | } |
1718 | ||
1719 | static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) | |
1720 | { | |
1721 | return !brq->sbc.error && !brq->cmd.error && | |
1722 | !(brq->cmd.resp[0] & CMD_ERRORS); | |
1723 | } | |
1724 | ||
1725 | /* | |
1726 | * Requests are completed by mmc_blk_mq_complete_rq() which sets simple | |
1727 | * policy: | |
1728 | * 1. A request that has transferred at least some data is considered | |
1729 | * successful and will be requeued if there is remaining data to | |
1730 | * transfer. | |
1731 | * 2. Otherwise the number of retries is incremented and the request | |
1732 | * will be requeued if there are remaining retries. | |
1733 | * 3. Otherwise the request will be errored out. | |
1734 | * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and | |
1735 | * mqrq->retries. So there are only 4 possible actions here: | |
1736 | * 1. do not accept the bytes_xfered value i.e. set it to zero | |
1737 | * 2. change mqrq->retries to determine the number of retries | |
1738 | * 3. try to reset the card | |
1739 | * 4. read one sector at a time | |
1740 | */ | |
81196976 AH |
1741 | static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) |
1742 | { | |
1743 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | |
1744 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1745 | struct mmc_blk_request *brq = &mqrq->brq; | |
1746 | struct mmc_blk_data *md = mq->blkdata; | |
1747 | struct mmc_card *card = mq->card; | |
7eb43d53 AH |
1748 | u32 status; |
1749 | u32 blocks; | |
1750 | int err; | |
81196976 | 1751 | |
7eb43d53 AH |
1752 | /* |
1753 | * Some errors the host driver might not have seen. Set the number of | |
1754 | * bytes transferred to zero in that case. | |
1755 | */ | |
1756 | err = __mmc_send_status(card, &status, 0); | |
1757 | if (err || mmc_blk_status_error(req, status)) | |
1758 | brq->data.bytes_xfered = 0; | |
81196976 AH |
1759 | |
1760 | mmc_retune_release(card->host); | |
1761 | ||
1762 | /* | |
7eb43d53 AH |
1763 | * Try again to get the status. This also provides an opportunity for |
1764 | * re-tuning. | |
81196976 | 1765 | */ |
7eb43d53 AH |
1766 | if (err) |
1767 | err = __mmc_send_status(card, &status, 0); | |
81196976 | 1768 | |
7eb43d53 AH |
1769 | /* |
1770 | * Nothing more to do after the number of bytes transferred has been | |
1771 | * updated and there is no card. | |
1772 | */ | |
1773 | if (err && mmc_detect_card_removed(card->host)) | |
1774 | return; | |
81196976 | 1775 | |
7eb43d53 AH |
1776 | /* Try to get back to "tran" state */ |
1777 | if (!mmc_host_is_spi(mq->card->host) && | |
40c96853 | 1778 | (err || !mmc_ready_for_data(status))) |
7eb43d53 AH |
1779 | err = mmc_blk_fix_state(mq->card, req); |
1780 | ||
1781 | /* | |
1782 | * Special case for SD cards where the card might record the number of | |
1783 | * blocks written. | |
1784 | */ | |
1785 | if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && | |
1786 | rq_data_dir(req) == WRITE) { | |
1787 | if (mmc_sd_num_wr_blocks(card, &blocks)) | |
1788 | brq->data.bytes_xfered = 0; | |
1789 | else | |
1790 | brq->data.bytes_xfered = blocks << 9; | |
81196976 | 1791 | } |
7eb43d53 AH |
1792 | |
1793 | /* Reset if the card is in a bad state */ | |
1794 | if (!mmc_host_is_spi(mq->card->host) && | |
1795 | err && mmc_blk_reset(md, card->host, type)) { | |
1796 | pr_err("%s: recovery failed!\n", req->rq_disk->disk_name); | |
81196976 | 1797 | mqrq->retries = MMC_NO_RETRIES; |
7eb43d53 AH |
1798 | return; |
1799 | } | |
1800 | ||
1801 | /* | |
1802 | * If anything was done, just return and if there is anything remaining | |
1803 | * on the request it will get requeued. | |
1804 | */ | |
1805 | if (brq->data.bytes_xfered) | |
1806 | return; | |
1807 | ||
1808 | /* Reset before last retry */ | |
1809 | if (mqrq->retries + 1 == MMC_MAX_RETRIES) | |
1810 | mmc_blk_reset(md, card->host, type); | |
1811 | ||
1812 | /* Command errors fail fast, so use all MMC_MAX_RETRIES */ | |
1813 | if (brq->sbc.error || brq->cmd.error) | |
1814 | return; | |
1815 | ||
1816 | /* Reduce the remaining retries for data errors */ | |
1817 | if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { | |
1818 | mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; | |
1819 | return; | |
1820 | } | |
1821 | ||
1822 | /* FIXME: Missing single sector read for large sector size */ | |
1823 | if (!mmc_large_sector(card) && rq_data_dir(req) == READ && | |
1824 | brq->data.blocks > 1) { | |
1825 | /* Read one sector at a time */ | |
1826 | mmc_blk_read_single(mq, req); | |
1827 | return; | |
81196976 AH |
1828 | } |
1829 | } | |
1830 | ||
10f21df4 AH |
1831 | static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) |
1832 | { | |
1833 | mmc_blk_eval_resp_error(brq); | |
1834 | ||
1835 | return brq->sbc.error || brq->cmd.error || brq->stop.error || | |
1836 | brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; | |
1837 | } | |
1838 | ||
88a51646 AH |
1839 | static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) |
1840 | { | |
1841 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
f47a1fe3 | 1842 | u32 status = 0; |
88a51646 AH |
1843 | int err; |
1844 | ||
1845 | if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) | |
1846 | return 0; | |
1847 | ||
3869468e | 1848 | err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status); |
88a51646 | 1849 | |
f47a1fe3 AH |
1850 | /* |
1851 | * Do not assume data transferred correctly if there are any error bits | |
1852 | * set. | |
1853 | */ | |
1854 | if (status & mmc_blk_stop_err_bits(&mqrq->brq)) { | |
1855 | mqrq->brq.data.bytes_xfered = 0; | |
88a51646 AH |
1856 | err = err ? err : -EIO; |
1857 | } | |
1858 | ||
f47a1fe3 AH |
1859 | /* Copy the exception bit so it will be seen later on */ |
1860 | if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT) | |
1861 | mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; | |
1862 | ||
88a51646 AH |
1863 | return err; |
1864 | } | |
1865 | ||
10f21df4 AH |
1866 | static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, |
1867 | struct request *req) | |
1868 | { | |
1869 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | |
1870 | ||
1871 | mmc_blk_reset_success(mq->blkdata, type); | |
1872 | } | |
1873 | ||
81196976 AH |
1874 | static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) |
1875 | { | |
1876 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1877 | unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; | |
1878 | ||
1879 | if (nr_bytes) { | |
1880 | if (blk_update_request(req, BLK_STS_OK, nr_bytes)) | |
1881 | blk_mq_requeue_request(req, true); | |
1882 | else | |
1883 | __blk_mq_end_request(req, BLK_STS_OK); | |
1884 | } else if (!blk_rq_bytes(req)) { | |
1885 | __blk_mq_end_request(req, BLK_STS_IOERR); | |
1886 | } else if (mqrq->retries++ < MMC_MAX_RETRIES) { | |
1887 | blk_mq_requeue_request(req, true); | |
1888 | } else { | |
1889 | if (mmc_card_removed(mq->card)) | |
1890 | req->rq_flags |= RQF_QUIET; | |
1891 | blk_mq_end_request(req, BLK_STS_IOERR); | |
1892 | } | |
1893 | } | |
1894 | ||
1895 | static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, | |
1896 | struct mmc_queue_req *mqrq) | |
1897 | { | |
1898 | return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && | |
1899 | (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || | |
1900 | mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); | |
1901 | } | |
1902 | ||
1903 | static void mmc_blk_urgent_bkops(struct mmc_queue *mq, | |
1904 | struct mmc_queue_req *mqrq) | |
1905 | { | |
1906 | if (mmc_blk_urgent_bkops_needed(mq, mqrq)) | |
0c204979 | 1907 | mmc_run_bkops(mq->card); |
81196976 AH |
1908 | } |
1909 | ||
511ce378 BW |
1910 | static void mmc_blk_hsq_req_done(struct mmc_request *mrq) |
1911 | { | |
1912 | struct mmc_queue_req *mqrq = | |
1913 | container_of(mrq, struct mmc_queue_req, brq.mrq); | |
1914 | struct request *req = mmc_queue_req_to_req(mqrq); | |
1915 | struct request_queue *q = req->q; | |
1916 | struct mmc_queue *mq = q->queuedata; | |
1917 | struct mmc_host *host = mq->card->host; | |
1918 | unsigned long flags; | |
1919 | ||
1920 | if (mmc_blk_rq_error(&mqrq->brq) || | |
1921 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { | |
1922 | spin_lock_irqsave(&mq->lock, flags); | |
1923 | mq->recovery_needed = true; | |
1924 | mq->recovery_req = req; | |
1925 | spin_unlock_irqrestore(&mq->lock, flags); | |
1926 | ||
1927 | host->cqe_ops->cqe_recovery_start(host); | |
1928 | ||
1929 | schedule_work(&mq->recovery_work); | |
1930 | return; | |
1931 | } | |
1932 | ||
1933 | mmc_blk_rw_reset_success(mq, req); | |
1934 | ||
1935 | /* | |
1936 | * Block layer timeouts race with completions which means the normal | |
1937 | * completion path cannot be used during recovery. | |
1938 | */ | |
1939 | if (mq->in_recovery) | |
1940 | mmc_blk_cqe_complete_rq(mq, req); | |
15f73f5b | 1941 | else if (likely(!blk_should_fake_timeout(req->q))) |
511ce378 BW |
1942 | blk_mq_complete_request(req); |
1943 | } | |
1944 | ||
81196976 AH |
1945 | void mmc_blk_mq_complete(struct request *req) |
1946 | { | |
1947 | struct mmc_queue *mq = req->q->queuedata; | |
407a1c57 | 1948 | struct mmc_host *host = mq->card->host; |
81196976 | 1949 | |
407a1c57 | 1950 | if (host->cqe_enabled) |
1e8e55b6 | 1951 | mmc_blk_cqe_complete_rq(mq, req); |
15f73f5b | 1952 | else if (likely(!blk_should_fake_timeout(req->q))) |
1e8e55b6 | 1953 | mmc_blk_mq_complete_rq(mq, req); |
81196976 AH |
1954 | } |
1955 | ||
1956 | static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, | |
1957 | struct request *req) | |
1958 | { | |
1959 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
88a51646 | 1960 | struct mmc_host *host = mq->card->host; |
81196976 | 1961 | |
88a51646 AH |
1962 | if (mmc_blk_rq_error(&mqrq->brq) || |
1963 | mmc_blk_card_busy(mq->card, req)) { | |
1964 | mmc_blk_mq_rw_recovery(mq, req); | |
1965 | } else { | |
1966 | mmc_blk_rw_reset_success(mq, req); | |
1967 | mmc_retune_release(host); | |
1968 | } | |
81196976 AH |
1969 | |
1970 | mmc_blk_urgent_bkops(mq, mqrq); | |
1971 | } | |
1972 | ||
1973 | static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) | |
1974 | { | |
81196976 AH |
1975 | unsigned long flags; |
1976 | bool put_card; | |
1977 | ||
f5d72c5c | 1978 | spin_lock_irqsave(&mq->lock, flags); |
81196976 AH |
1979 | |
1980 | mq->in_flight[mmc_issue_type(mq, req)] -= 1; | |
1981 | ||
1982 | put_card = (mmc_tot_in_flight(mq) == 0); | |
1983 | ||
f5d72c5c | 1984 | spin_unlock_irqrestore(&mq->lock, flags); |
81196976 AH |
1985 | |
1986 | if (put_card) | |
1987 | mmc_put_card(mq->card, &mq->ctx); | |
1988 | } | |
1989 | ||
1990 | static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) | |
1991 | { | |
1992 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1993 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
1994 | struct mmc_host *host = mq->card->host; | |
1995 | ||
1996 | mmc_post_req(host, mrq, 0); | |
1997 | ||
10f21df4 AH |
1998 | /* |
1999 | * Block layer timeouts race with completions which means the normal | |
2000 | * completion path cannot be used during recovery. | |
2001 | */ | |
2002 | if (mq->in_recovery) | |
2003 | mmc_blk_mq_complete_rq(mq, req); | |
15f73f5b | 2004 | else if (likely(!blk_should_fake_timeout(req->q))) |
10f21df4 | 2005 | blk_mq_complete_request(req); |
81196976 AH |
2006 | |
2007 | mmc_blk_mq_dec_in_flight(mq, req); | |
2008 | } | |
2009 | ||
10f21df4 AH |
2010 | void mmc_blk_mq_recovery(struct mmc_queue *mq) |
2011 | { | |
2012 | struct request *req = mq->recovery_req; | |
2013 | struct mmc_host *host = mq->card->host; | |
2014 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
2015 | ||
2016 | mq->recovery_req = NULL; | |
2017 | mq->rw_wait = false; | |
2018 | ||
2019 | if (mmc_blk_rq_error(&mqrq->brq)) { | |
2020 | mmc_retune_hold_now(host); | |
2021 | mmc_blk_mq_rw_recovery(mq, req); | |
2022 | } | |
2023 | ||
2024 | mmc_blk_urgent_bkops(mq, mqrq); | |
2025 | ||
2026 | mmc_blk_mq_post_req(mq, req); | |
2027 | } | |
2028 | ||
81196976 AH |
2029 | static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, |
2030 | struct request **prev_req) | |
2031 | { | |
10f21df4 AH |
2032 | if (mmc_host_done_complete(mq->card->host)) |
2033 | return; | |
2034 | ||
81196976 AH |
2035 | mutex_lock(&mq->complete_lock); |
2036 | ||
2037 | if (!mq->complete_req) | |
2038 | goto out_unlock; | |
2039 | ||
2040 | mmc_blk_mq_poll_completion(mq, mq->complete_req); | |
2041 | ||
2042 | if (prev_req) | |
2043 | *prev_req = mq->complete_req; | |
2044 | else | |
2045 | mmc_blk_mq_post_req(mq, mq->complete_req); | |
2046 | ||
2047 | mq->complete_req = NULL; | |
2048 | ||
2049 | out_unlock: | |
2050 | mutex_unlock(&mq->complete_lock); | |
2051 | } | |
2052 | ||
2053 | void mmc_blk_mq_complete_work(struct work_struct *work) | |
2054 | { | |
2055 | struct mmc_queue *mq = container_of(work, struct mmc_queue, | |
2056 | complete_work); | |
2057 | ||
2058 | mmc_blk_mq_complete_prev_req(mq, NULL); | |
2059 | } | |
2060 | ||
2061 | static void mmc_blk_mq_req_done(struct mmc_request *mrq) | |
2062 | { | |
2063 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, | |
2064 | brq.mrq); | |
2065 | struct request *req = mmc_queue_req_to_req(mqrq); | |
2066 | struct request_queue *q = req->q; | |
2067 | struct mmc_queue *mq = q->queuedata; | |
10f21df4 | 2068 | struct mmc_host *host = mq->card->host; |
81196976 | 2069 | unsigned long flags; |
81196976 | 2070 | |
10f21df4 AH |
2071 | if (!mmc_host_done_complete(host)) { |
2072 | bool waiting; | |
81196976 | 2073 | |
10f21df4 AH |
2074 | /* |
2075 | * We cannot complete the request in this context, so record | |
2076 | * that there is a request to complete, and that a following | |
2077 | * request does not need to wait (although it does need to | |
2078 | * complete complete_req first). | |
2079 | */ | |
f5d72c5c | 2080 | spin_lock_irqsave(&mq->lock, flags); |
10f21df4 AH |
2081 | mq->complete_req = req; |
2082 | mq->rw_wait = false; | |
2083 | waiting = mq->waiting; | |
f5d72c5c | 2084 | spin_unlock_irqrestore(&mq->lock, flags); |
10f21df4 AH |
2085 | |
2086 | /* | |
2087 | * If 'waiting' then the waiting task will complete this | |
2088 | * request, otherwise queue a work to do it. Note that | |
2089 | * complete_work may still race with the dispatch of a following | |
2090 | * request. | |
2091 | */ | |
2092 | if (waiting) | |
2093 | wake_up(&mq->wait); | |
2094 | else | |
dcf6e2e3 | 2095 | queue_work(mq->card->complete_wq, &mq->complete_work); |
10f21df4 AH |
2096 | |
2097 | return; | |
2098 | } | |
2099 | ||
2100 | /* Take the recovery path for errors or urgent background operations */ | |
2101 | if (mmc_blk_rq_error(&mqrq->brq) || | |
2102 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { | |
f5d72c5c | 2103 | spin_lock_irqsave(&mq->lock, flags); |
10f21df4 AH |
2104 | mq->recovery_needed = true; |
2105 | mq->recovery_req = req; | |
f5d72c5c | 2106 | spin_unlock_irqrestore(&mq->lock, flags); |
81196976 | 2107 | wake_up(&mq->wait); |
10f21df4 AH |
2108 | schedule_work(&mq->recovery_work); |
2109 | return; | |
2110 | } | |
2111 | ||
2112 | mmc_blk_rw_reset_success(mq, req); | |
2113 | ||
2114 | mq->rw_wait = false; | |
2115 | wake_up(&mq->wait); | |
2116 | ||
2117 | mmc_blk_mq_post_req(mq, req); | |
81196976 AH |
2118 | } |
2119 | ||
2120 | static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) | |
2121 | { | |
81196976 AH |
2122 | unsigned long flags; |
2123 | bool done; | |
2124 | ||
2125 | /* | |
10f21df4 AH |
2126 | * Wait while there is another request in progress, but not if recovery |
2127 | * is needed. Also indicate whether there is a request waiting to start. | |
81196976 | 2128 | */ |
f5d72c5c | 2129 | spin_lock_irqsave(&mq->lock, flags); |
10f21df4 AH |
2130 | if (mq->recovery_needed) { |
2131 | *err = -EBUSY; | |
2132 | done = true; | |
2133 | } else { | |
2134 | done = !mq->rw_wait; | |
2135 | } | |
81196976 | 2136 | mq->waiting = !done; |
f5d72c5c | 2137 | spin_unlock_irqrestore(&mq->lock, flags); |
81196976 AH |
2138 | |
2139 | return done; | |
2140 | } | |
2141 | ||
2142 | static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) | |
2143 | { | |
2144 | int err = 0; | |
2145 | ||
2146 | wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); | |
2147 | ||
2148 | /* Always complete the previous request if there is one */ | |
2149 | mmc_blk_mq_complete_prev_req(mq, prev_req); | |
2150 | ||
2151 | return err; | |
2152 | } | |
2153 | ||
2154 | static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, | |
2155 | struct request *req) | |
2156 | { | |
2157 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
2158 | struct mmc_host *host = mq->card->host; | |
2159 | struct request *prev_req = NULL; | |
2160 | int err = 0; | |
2161 | ||
2162 | mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); | |
2163 | ||
2164 | mqrq->brq.mrq.done = mmc_blk_mq_req_done; | |
2165 | ||
2166 | mmc_pre_req(host, &mqrq->brq.mrq); | |
2167 | ||
2168 | err = mmc_blk_rw_wait(mq, &prev_req); | |
2169 | if (err) | |
2170 | goto out_post_req; | |
2171 | ||
2172 | mq->rw_wait = true; | |
2173 | ||
2174 | err = mmc_start_request(host, &mqrq->brq.mrq); | |
2175 | ||
2176 | if (prev_req) | |
2177 | mmc_blk_mq_post_req(mq, prev_req); | |
2178 | ||
10f21df4 | 2179 | if (err) |
81196976 | 2180 | mq->rw_wait = false; |
10f21df4 AH |
2181 | |
2182 | /* Release re-tuning here where there is no synchronization required */ | |
2183 | if (err || mmc_host_done_complete(host)) | |
81196976 | 2184 | mmc_retune_release(host); |
81196976 AH |
2185 | |
2186 | out_post_req: | |
2187 | if (err) | |
2188 | mmc_post_req(host, &mqrq->brq.mrq, err); | |
2189 | ||
2190 | return err; | |
2191 | } | |
2192 | ||
2193 | static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) | |
2194 | { | |
407a1c57 | 2195 | if (host->cqe_enabled) |
1e8e55b6 AH |
2196 | return host->cqe_ops->cqe_wait_for_idle(host); |
2197 | ||
81196976 AH |
2198 | return mmc_blk_rw_wait(mq, NULL); |
2199 | } | |
2200 | ||
2201 | enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) | |
2202 | { | |
2203 | struct mmc_blk_data *md = mq->blkdata; | |
2204 | struct mmc_card *card = md->queue.card; | |
2205 | struct mmc_host *host = card->host; | |
2206 | int ret; | |
2207 | ||
2208 | ret = mmc_blk_part_switch(card, md->part_type); | |
2209 | if (ret) | |
2210 | return MMC_REQ_FAILED_TO_START; | |
2211 | ||
2212 | switch (mmc_issue_type(mq, req)) { | |
2213 | case MMC_ISSUE_SYNC: | |
2214 | ret = mmc_blk_wait_for_idle(mq, host); | |
2215 | if (ret) | |
2216 | return MMC_REQ_BUSY; | |
2217 | switch (req_op(req)) { | |
2218 | case REQ_OP_DRV_IN: | |
2219 | case REQ_OP_DRV_OUT: | |
2220 | mmc_blk_issue_drv_op(mq, req); | |
2221 | break; | |
2222 | case REQ_OP_DISCARD: | |
2223 | mmc_blk_issue_discard_rq(mq, req); | |
2224 | break; | |
2225 | case REQ_OP_SECURE_ERASE: | |
2226 | mmc_blk_issue_secdiscard_rq(mq, req); | |
2227 | break; | |
2228 | case REQ_OP_FLUSH: | |
2229 | mmc_blk_issue_flush(mq, req); | |
2230 | break; | |
2231 | default: | |
2232 | WARN_ON_ONCE(1); | |
2233 | return MMC_REQ_FAILED_TO_START; | |
2234 | } | |
2235 | return MMC_REQ_FINISHED; | |
1e8e55b6 | 2236 | case MMC_ISSUE_DCMD: |
81196976 AH |
2237 | case MMC_ISSUE_ASYNC: |
2238 | switch (req_op(req)) { | |
1e8e55b6 | 2239 | case REQ_OP_FLUSH: |
97fce126 AA |
2240 | if (!mmc_cache_enabled(host)) { |
2241 | blk_mq_end_request(req, BLK_STS_OK); | |
2242 | return MMC_REQ_FINISHED; | |
2243 | } | |
1e8e55b6 AH |
2244 | ret = mmc_blk_cqe_issue_flush(mq, req); |
2245 | break; | |
81196976 AH |
2246 | case REQ_OP_READ: |
2247 | case REQ_OP_WRITE: | |
407a1c57 | 2248 | if (host->cqe_enabled) |
1e8e55b6 AH |
2249 | ret = mmc_blk_cqe_issue_rw_rq(mq, req); |
2250 | else | |
2251 | ret = mmc_blk_mq_issue_rw_rq(mq, req); | |
81196976 AH |
2252 | break; |
2253 | default: | |
2254 | WARN_ON_ONCE(1); | |
2255 | ret = -EINVAL; | |
2256 | } | |
2257 | if (!ret) | |
2258 | return MMC_REQ_STARTED; | |
2259 | return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; | |
2260 | default: | |
2261 | WARN_ON_ONCE(1); | |
2262 | return MMC_REQ_FAILED_TO_START; | |
2263 | } | |
2264 | } | |
2265 | ||
a6f6c96b RK |
2266 | static inline int mmc_blk_readonly(struct mmc_card *card) |
2267 | { | |
2268 | return mmc_card_readonly(card) || | |
2269 | !(card->csd.cmdclass & CCC_BLOCK_WRITE); | |
2270 | } | |
2271 | ||
371a689f AW |
2272 | static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, |
2273 | struct device *parent, | |
2274 | sector_t size, | |
2275 | bool default_ro, | |
add710ea JR |
2276 | const char *subname, |
2277 | int area_type) | |
1da177e4 LT |
2278 | { |
2279 | struct mmc_blk_data *md; | |
2280 | int devidx, ret; | |
ce999ed1 | 2281 | char cap_str[10]; |
1da177e4 | 2282 | |
a04848c7 | 2283 | devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); |
e7b42769 SL |
2284 | if (devidx < 0) { |
2285 | /* | |
2286 | * We get -ENOSPC because there are no more any available | |
2287 | * devidx. The reason may be that, either userspace haven't yet | |
2288 | * unmounted the partitions, which postpones mmc_blk_release() | |
2289 | * from being called, or the device has more partitions than | |
2290 | * what we support. | |
2291 | */ | |
2292 | if (devidx == -ENOSPC) | |
2293 | dev_err(mmc_dev(card->host), | |
2294 | "no more device IDs available\n"); | |
2295 | ||
a04848c7 | 2296 | return ERR_PTR(devidx); |
e7b42769 | 2297 | } |
1da177e4 | 2298 | |
dd00cc48 | 2299 | md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); |
a6f6c96b RK |
2300 | if (!md) { |
2301 | ret = -ENOMEM; | |
2302 | goto out; | |
2303 | } | |
1da177e4 | 2304 | |
add710ea JR |
2305 | md->area_type = area_type; |
2306 | ||
a6f6c96b RK |
2307 | /* |
2308 | * Set the read-only status based on the supported commands | |
2309 | * and the write protect switch. | |
2310 | */ | |
2311 | md->read_only = mmc_blk_readonly(card); | |
1da177e4 | 2312 | |
5e71b7a6 | 2313 | md->disk = alloc_disk(perdev_minors); |
a6f6c96b RK |
2314 | if (md->disk == NULL) { |
2315 | ret = -ENOMEM; | |
2316 | goto err_kfree; | |
2317 | } | |
1da177e4 | 2318 | |
371a689f | 2319 | INIT_LIST_HEAD(&md->part); |
97548575 | 2320 | INIT_LIST_HEAD(&md->rpmbs); |
a6f6c96b | 2321 | md->usage = 1; |
1da177e4 | 2322 | |
f5d72c5c | 2323 | ret = mmc_init_queue(&md->queue, card); |
a6f6c96b RK |
2324 | if (ret) |
2325 | goto err_putdisk; | |
1da177e4 | 2326 | |
7db3028e | 2327 | md->queue.blkdata = md; |
d2b18394 | 2328 | |
41e3efd0 AH |
2329 | /* |
2330 | * Keep an extra reference to the queue so that we can shutdown the | |
2331 | * queue (i.e. call blk_cleanup_queue()) while there are still | |
2332 | * references to the 'md'. The corresponding blk_put_queue() is in | |
2333 | * mmc_blk_put(). | |
2334 | */ | |
2335 | if (!blk_get_queue(md->queue.queue)) { | |
2336 | mmc_cleanup_queue(&md->queue); | |
2361bfb0 | 2337 | ret = -ENODEV; |
41e3efd0 AH |
2338 | goto err_putdisk; |
2339 | } | |
2340 | ||
fe6b4c88 | 2341 | md->disk->major = MMC_BLOCK_MAJOR; |
5e71b7a6 | 2342 | md->disk->first_minor = devidx * perdev_minors; |
a6f6c96b RK |
2343 | md->disk->fops = &mmc_bdops; |
2344 | md->disk->private_data = md; | |
2345 | md->disk->queue = md->queue.queue; | |
307d8e6f | 2346 | md->parent = parent; |
371a689f | 2347 | set_disk_ro(md->disk, md->read_only || default_ro); |
382c55f8 | 2348 | md->disk->flags = GENHD_FL_EXT_DEVT; |
f5b4d71f | 2349 | if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) |
207b652c AG |
2350 | md->disk->flags |= GENHD_FL_NO_PART_SCAN |
2351 | | GENHD_FL_SUPPRESS_PARTITION_INFO; | |
a6f6c96b RK |
2352 | |
2353 | /* | |
2354 | * As discussed on lkml, GENHD_FL_REMOVABLE should: | |
2355 | * | |
2356 | * - be set for removable media with permanent block devices | |
2357 | * - be unset for removable block devices with permanent media | |
2358 | * | |
2359 | * Since MMC block devices clearly fall under the second | |
2360 | * case, we do not set GENHD_FL_REMOVABLE. Userspace | |
2361 | * should use the block device creation/destruction hotplug | |
2362 | * messages to tell when the card is present. | |
2363 | */ | |
2364 | ||
f06c9153 | 2365 | snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), |
9aaf3437 | 2366 | "mmcblk%u%s", card->host->index, subname ? subname : ""); |
a6f6c96b | 2367 | |
371a689f | 2368 | set_capacity(md->disk, size); |
d0c97cfb | 2369 | |
f0d89972 | 2370 | if (mmc_host_cmd23(card->host)) { |
0ed50abb DG |
2371 | if ((mmc_card_mmc(card) && |
2372 | card->csd.mmca_vsn >= CSD_SPEC_VER_3) || | |
f0d89972 AW |
2373 | (mmc_card_sd(card) && |
2374 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) | |
2375 | md->flags |= MMC_BLK_CMD23; | |
2376 | } | |
d0c97cfb AW |
2377 | |
2378 | if (mmc_card_mmc(card) && | |
2379 | md->flags & MMC_BLK_CMD23 && | |
2380 | ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || | |
2381 | card->ext_csd.rel_sectors)) { | |
2382 | md->flags |= MMC_BLK_REL_WR; | |
e9d5c746 | 2383 | blk_queue_write_cache(md->queue.queue, true, true); |
d0c97cfb AW |
2384 | } |
2385 | ||
ce999ed1 UH |
2386 | string_get_size((u64)size, 512, STRING_UNITS_2, |
2387 | cap_str, sizeof(cap_str)); | |
2388 | pr_info("%s: %s %s %s %s\n", | |
2389 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), | |
2390 | cap_str, md->read_only ? "(ro)" : ""); | |
2391 | ||
371a689f AW |
2392 | return md; |
2393 | ||
2394 | err_putdisk: | |
2395 | put_disk(md->disk); | |
2396 | err_kfree: | |
2397 | kfree(md); | |
2398 | out: | |
a04848c7 | 2399 | ida_simple_remove(&mmc_blk_ida, devidx); |
371a689f AW |
2400 | return ERR_PTR(ret); |
2401 | } | |
2402 | ||
2403 | static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | |
2404 | { | |
2405 | sector_t size; | |
a6f6c96b | 2406 | |
85a18ad9 PO |
2407 | if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { |
2408 | /* | |
2409 | * The EXT_CSD sector count is in number or 512 byte | |
2410 | * sectors. | |
2411 | */ | |
371a689f | 2412 | size = card->ext_csd.sectors; |
85a18ad9 PO |
2413 | } else { |
2414 | /* | |
2415 | * The CSD capacity field is in units of read_blkbits. | |
2416 | * set_capacity takes units of 512 bytes. | |
2417 | */ | |
087de9ed KM |
2418 | size = (typeof(sector_t))card->csd.capacity |
2419 | << (card->csd.read_blkbits - 9); | |
85a18ad9 | 2420 | } |
371a689f | 2421 | |
7a30f2af | 2422 | return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, |
add710ea | 2423 | MMC_BLK_DATA_AREA_MAIN); |
371a689f | 2424 | } |
a6f6c96b | 2425 | |
371a689f AW |
2426 | static int mmc_blk_alloc_part(struct mmc_card *card, |
2427 | struct mmc_blk_data *md, | |
2428 | unsigned int part_type, | |
2429 | sector_t size, | |
2430 | bool default_ro, | |
add710ea JR |
2431 | const char *subname, |
2432 | int area_type) | |
371a689f | 2433 | { |
371a689f AW |
2434 | struct mmc_blk_data *part_md; |
2435 | ||
2436 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, | |
add710ea | 2437 | subname, area_type); |
371a689f AW |
2438 | if (IS_ERR(part_md)) |
2439 | return PTR_ERR(part_md); | |
2440 | part_md->part_type = part_type; | |
2441 | list_add(&part_md->part, &md->part); | |
2442 | ||
371a689f AW |
2443 | return 0; |
2444 | } | |
2445 | ||
97548575 LW |
2446 | /** |
2447 | * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev | |
2448 | * @filp: the character device file | |
2449 | * @cmd: the ioctl() command | |
2450 | * @arg: the argument from userspace | |
2451 | * | |
2452 | * This will essentially just redirect the ioctl()s coming in over to | |
2453 | * the main block device spawning the RPMB character device. | |
2454 | */ | |
2455 | static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, | |
2456 | unsigned long arg) | |
2457 | { | |
2458 | struct mmc_rpmb_data *rpmb = filp->private_data; | |
2459 | int ret; | |
2460 | ||
2461 | switch (cmd) { | |
2462 | case MMC_IOC_CMD: | |
2463 | ret = mmc_blk_ioctl_cmd(rpmb->md, | |
2464 | (struct mmc_ioc_cmd __user *)arg, | |
2465 | rpmb); | |
2466 | break; | |
2467 | case MMC_IOC_MULTI_CMD: | |
2468 | ret = mmc_blk_ioctl_multi_cmd(rpmb->md, | |
2469 | (struct mmc_ioc_multi_cmd __user *)arg, | |
2470 | rpmb); | |
2471 | break; | |
2472 | default: | |
2473 | ret = -EINVAL; | |
2474 | break; | |
2475 | } | |
2476 | ||
b25b750d | 2477 | return ret; |
97548575 LW |
2478 | } |
2479 | ||
2480 | #ifdef CONFIG_COMPAT | |
2481 | static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, | |
2482 | unsigned long arg) | |
2483 | { | |
2484 | return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | |
2485 | } | |
2486 | #endif | |
2487 | ||
2488 | static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) | |
2489 | { | |
2490 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, | |
2491 | struct mmc_rpmb_data, chrdev); | |
2492 | ||
2493 | get_device(&rpmb->dev); | |
2494 | filp->private_data = rpmb; | |
1c87f735 | 2495 | mmc_blk_get(rpmb->md->disk); |
97548575 LW |
2496 | |
2497 | return nonseekable_open(inode, filp); | |
2498 | } | |
2499 | ||
2500 | static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) | |
2501 | { | |
2502 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, | |
2503 | struct mmc_rpmb_data, chrdev); | |
2504 | ||
1c87f735 | 2505 | mmc_blk_put(rpmb->md); |
202500d2 | 2506 | put_device(&rpmb->dev); |
97548575 LW |
2507 | |
2508 | return 0; | |
2509 | } | |
2510 | ||
2511 | static const struct file_operations mmc_rpmb_fileops = { | |
2512 | .release = mmc_rpmb_chrdev_release, | |
2513 | .open = mmc_rpmb_chrdev_open, | |
2514 | .owner = THIS_MODULE, | |
2515 | .llseek = no_llseek, | |
2516 | .unlocked_ioctl = mmc_rpmb_ioctl, | |
2517 | #ifdef CONFIG_COMPAT | |
2518 | .compat_ioctl = mmc_rpmb_ioctl_compat, | |
2519 | #endif | |
2520 | }; | |
2521 | ||
1c87f735 LW |
2522 | static void mmc_blk_rpmb_device_release(struct device *dev) |
2523 | { | |
2524 | struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); | |
2525 | ||
2526 | ida_simple_remove(&mmc_rpmb_ida, rpmb->id); | |
2527 | kfree(rpmb); | |
2528 | } | |
97548575 LW |
2529 | |
2530 | static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, | |
2531 | struct mmc_blk_data *md, | |
2532 | unsigned int part_index, | |
2533 | sector_t size, | |
2534 | const char *subname) | |
2535 | { | |
2536 | int devidx, ret; | |
2537 | char rpmb_name[DISK_NAME_LEN]; | |
2538 | char cap_str[10]; | |
2539 | struct mmc_rpmb_data *rpmb; | |
2540 | ||
2541 | /* This creates the minor number for the RPMB char device */ | |
2542 | devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL); | |
2543 | if (devidx < 0) | |
2544 | return devidx; | |
2545 | ||
2546 | rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); | |
1c87f735 LW |
2547 | if (!rpmb) { |
2548 | ida_simple_remove(&mmc_rpmb_ida, devidx); | |
97548575 | 2549 | return -ENOMEM; |
1c87f735 | 2550 | } |
97548575 LW |
2551 | |
2552 | snprintf(rpmb_name, sizeof(rpmb_name), | |
2553 | "mmcblk%u%s", card->host->index, subname ? subname : ""); | |
2554 | ||
2555 | rpmb->id = devidx; | |
2556 | rpmb->part_index = part_index; | |
2557 | rpmb->dev.init_name = rpmb_name; | |
2558 | rpmb->dev.bus = &mmc_rpmb_bus_type; | |
2559 | rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); | |
2560 | rpmb->dev.parent = &card->dev; | |
1c87f735 | 2561 | rpmb->dev.release = mmc_blk_rpmb_device_release; |
97548575 LW |
2562 | device_initialize(&rpmb->dev); |
2563 | dev_set_drvdata(&rpmb->dev, rpmb); | |
2564 | rpmb->md = md; | |
2565 | ||
2566 | cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); | |
2567 | rpmb->chrdev.owner = THIS_MODULE; | |
2568 | ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev); | |
2569 | if (ret) { | |
2570 | pr_err("%s: could not add character device\n", rpmb_name); | |
1c87f735 | 2571 | goto out_put_device; |
97548575 LW |
2572 | } |
2573 | ||
2574 | list_add(&rpmb->node, &md->rpmbs); | |
2575 | ||
2576 | string_get_size((u64)size, 512, STRING_UNITS_2, | |
2577 | cap_str, sizeof(cap_str)); | |
2578 | ||
ce999ed1 UH |
2579 | pr_info("%s: %s %s %s, chardev (%d:%d)\n", |
2580 | rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str, | |
97548575 LW |
2581 | MAJOR(mmc_rpmb_devt), rpmb->id); |
2582 | ||
2583 | return 0; | |
2584 | ||
1c87f735 LW |
2585 | out_put_device: |
2586 | put_device(&rpmb->dev); | |
97548575 LW |
2587 | return ret; |
2588 | } | |
2589 | ||
2590 | static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) | |
1c87f735 | 2591 | |
97548575 LW |
2592 | { |
2593 | cdev_device_del(&rpmb->chrdev, &rpmb->dev); | |
1c87f735 | 2594 | put_device(&rpmb->dev); |
97548575 LW |
2595 | } |
2596 | ||
e0c368d5 NJ |
2597 | /* MMC Physical partitions consist of two boot partitions and |
2598 | * up to four general purpose partitions. | |
2599 | * For each partition enabled in EXT_CSD a block device will be allocatedi | |
2600 | * to provide access to the partition. | |
2601 | */ | |
2602 | ||
371a689f AW |
2603 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) |
2604 | { | |
97548575 | 2605 | int idx, ret; |
371a689f AW |
2606 | |
2607 | if (!mmc_card_mmc(card)) | |
2608 | return 0; | |
2609 | ||
e0c368d5 | 2610 | for (idx = 0; idx < card->nr_parts; idx++) { |
97548575 LW |
2611 | if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { |
2612 | /* | |
2613 | * RPMB partitions does not provide block access, they | |
2614 | * are only accessed using ioctl():s. Thus create | |
2615 | * special RPMB block devices that do not have a | |
2616 | * backing block queue for these. | |
2617 | */ | |
2618 | ret = mmc_blk_alloc_rpmb_part(card, md, | |
2619 | card->part[idx].part_cfg, | |
2620 | card->part[idx].size >> 9, | |
2621 | card->part[idx].name); | |
2622 | if (ret) | |
2623 | return ret; | |
2624 | } else if (card->part[idx].size) { | |
e0c368d5 NJ |
2625 | ret = mmc_blk_alloc_part(card, md, |
2626 | card->part[idx].part_cfg, | |
2627 | card->part[idx].size >> 9, | |
2628 | card->part[idx].force_ro, | |
add710ea JR |
2629 | card->part[idx].name, |
2630 | card->part[idx].area_type); | |
e0c368d5 NJ |
2631 | if (ret) |
2632 | return ret; | |
2633 | } | |
371a689f AW |
2634 | } |
2635 | ||
97548575 | 2636 | return 0; |
1da177e4 LT |
2637 | } |
2638 | ||
371a689f AW |
2639 | static void mmc_blk_remove_req(struct mmc_blk_data *md) |
2640 | { | |
add710ea JR |
2641 | struct mmc_card *card; |
2642 | ||
371a689f | 2643 | if (md) { |
fdfa20c1 PT |
2644 | /* |
2645 | * Flush remaining requests and free queues. It | |
2646 | * is freeing the queue that stops new requests | |
2647 | * from being accepted. | |
2648 | */ | |
8efb83a2 | 2649 | card = md->queue.card; |
371a689f AW |
2650 | if (md->disk->flags & GENHD_FL_UP) { |
2651 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
add710ea JR |
2652 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && |
2653 | card->ext_csd.boot_ro_lockable) | |
2654 | device_remove_file(disk_to_dev(md->disk), | |
2655 | &md->power_ro_lock); | |
371a689f | 2656 | |
371a689f AW |
2657 | del_gendisk(md->disk); |
2658 | } | |
57678e5a | 2659 | mmc_cleanup_queue(&md->queue); |
371a689f AW |
2660 | mmc_blk_put(md); |
2661 | } | |
2662 | } | |
2663 | ||
2664 | static void mmc_blk_remove_parts(struct mmc_card *card, | |
2665 | struct mmc_blk_data *md) | |
2666 | { | |
2667 | struct list_head *pos, *q; | |
2668 | struct mmc_blk_data *part_md; | |
97548575 | 2669 | struct mmc_rpmb_data *rpmb; |
371a689f | 2670 | |
97548575 LW |
2671 | /* Remove RPMB partitions */ |
2672 | list_for_each_safe(pos, q, &md->rpmbs) { | |
2673 | rpmb = list_entry(pos, struct mmc_rpmb_data, node); | |
2674 | list_del(pos); | |
2675 | mmc_blk_remove_rpmb_part(rpmb); | |
2676 | } | |
2677 | /* Remove block partitions */ | |
371a689f AW |
2678 | list_for_each_safe(pos, q, &md->part) { |
2679 | part_md = list_entry(pos, struct mmc_blk_data, part); | |
2680 | list_del(pos); | |
2681 | mmc_blk_remove_req(part_md); | |
2682 | } | |
2683 | } | |
2684 | ||
2685 | static int mmc_add_disk(struct mmc_blk_data *md) | |
2686 | { | |
2687 | int ret; | |
add710ea | 2688 | struct mmc_card *card = md->queue.card; |
371a689f | 2689 | |
fef912bf | 2690 | device_add_disk(md->parent, md->disk, NULL); |
371a689f AW |
2691 | md->force_ro.show = force_ro_show; |
2692 | md->force_ro.store = force_ro_store; | |
641c3187 | 2693 | sysfs_attr_init(&md->force_ro.attr); |
371a689f AW |
2694 | md->force_ro.attr.name = "force_ro"; |
2695 | md->force_ro.attr.mode = S_IRUGO | S_IWUSR; | |
2696 | ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); | |
2697 | if (ret) | |
add710ea JR |
2698 | goto force_ro_fail; |
2699 | ||
2700 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && | |
2701 | card->ext_csd.boot_ro_lockable) { | |
88187398 | 2702 | umode_t mode; |
add710ea JR |
2703 | |
2704 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) | |
2705 | mode = S_IRUGO; | |
2706 | else | |
2707 | mode = S_IRUGO | S_IWUSR; | |
2708 | ||
2709 | md->power_ro_lock.show = power_ro_lock_show; | |
2710 | md->power_ro_lock.store = power_ro_lock_store; | |
00d9ac08 | 2711 | sysfs_attr_init(&md->power_ro_lock.attr); |
add710ea JR |
2712 | md->power_ro_lock.attr.mode = mode; |
2713 | md->power_ro_lock.attr.name = | |
2714 | "ro_lock_until_next_power_on"; | |
2715 | ret = device_create_file(disk_to_dev(md->disk), | |
2716 | &md->power_ro_lock); | |
2717 | if (ret) | |
2718 | goto power_ro_lock_fail; | |
2719 | } | |
2720 | return ret; | |
2721 | ||
2722 | power_ro_lock_fail: | |
2723 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
2724 | force_ro_fail: | |
2725 | del_gendisk(md->disk); | |
371a689f AW |
2726 | |
2727 | return ret; | |
2728 | } | |
2729 | ||
627c3ccf LW |
2730 | #ifdef CONFIG_DEBUG_FS |
2731 | ||
2732 | static int mmc_dbg_card_status_get(void *data, u64 *val) | |
2733 | { | |
2734 | struct mmc_card *card = data; | |
2735 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); | |
2736 | struct mmc_queue *mq = &md->queue; | |
2737 | struct request *req; | |
2738 | int ret; | |
2739 | ||
2740 | /* Ask the block layer about the card status */ | |
ff005a06 | 2741 | req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0); |
fb8e456e AH |
2742 | if (IS_ERR(req)) |
2743 | return PTR_ERR(req); | |
627c3ccf | 2744 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; |
684da762 | 2745 | blk_execute_rq(NULL, req, 0); |
627c3ccf LW |
2746 | ret = req_to_mmc_queue_req(req)->drv_op_result; |
2747 | if (ret >= 0) { | |
2748 | *val = ret; | |
2749 | ret = 0; | |
2750 | } | |
34c089e8 | 2751 | blk_put_request(req); |
627c3ccf LW |
2752 | |
2753 | return ret; | |
2754 | } | |
f6a3d9d9 Y |
2755 | DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, |
2756 | NULL, "%08llx\n"); | |
627c3ccf LW |
2757 | |
2758 | /* That is two digits * 512 + 1 for newline */ | |
2759 | #define EXT_CSD_STR_LEN 1025 | |
2760 | ||
2761 | static int mmc_ext_csd_open(struct inode *inode, struct file *filp) | |
2762 | { | |
2763 | struct mmc_card *card = inode->i_private; | |
2764 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); | |
2765 | struct mmc_queue *mq = &md->queue; | |
2766 | struct request *req; | |
2767 | char *buf; | |
2768 | ssize_t n = 0; | |
2769 | u8 *ext_csd; | |
2770 | int err, i; | |
2771 | ||
2772 | buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); | |
2773 | if (!buf) | |
2774 | return -ENOMEM; | |
2775 | ||
2776 | /* Ask the block layer for the EXT CSD */ | |
ff005a06 | 2777 | req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0); |
fb8e456e AH |
2778 | if (IS_ERR(req)) { |
2779 | err = PTR_ERR(req); | |
2780 | goto out_free; | |
2781 | } | |
627c3ccf LW |
2782 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; |
2783 | req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; | |
684da762 | 2784 | blk_execute_rq(NULL, req, 0); |
627c3ccf | 2785 | err = req_to_mmc_queue_req(req)->drv_op_result; |
34c089e8 | 2786 | blk_put_request(req); |
627c3ccf LW |
2787 | if (err) { |
2788 | pr_err("FAILED %d\n", err); | |
2789 | goto out_free; | |
2790 | } | |
2791 | ||
2792 | for (i = 0; i < 512; i++) | |
2793 | n += sprintf(buf + n, "%02x", ext_csd[i]); | |
2794 | n += sprintf(buf + n, "\n"); | |
2795 | ||
2796 | if (n != EXT_CSD_STR_LEN) { | |
2797 | err = -EINVAL; | |
0be55579 | 2798 | kfree(ext_csd); |
627c3ccf LW |
2799 | goto out_free; |
2800 | } | |
2801 | ||
2802 | filp->private_data = buf; | |
2803 | kfree(ext_csd); | |
2804 | return 0; | |
2805 | ||
2806 | out_free: | |
2807 | kfree(buf); | |
2808 | return err; | |
2809 | } | |
2810 | ||
2811 | static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, | |
2812 | size_t cnt, loff_t *ppos) | |
2813 | { | |
2814 | char *buf = filp->private_data; | |
2815 | ||
2816 | return simple_read_from_buffer(ubuf, cnt, ppos, | |
2817 | buf, EXT_CSD_STR_LEN); | |
2818 | } | |
2819 | ||
2820 | static int mmc_ext_csd_release(struct inode *inode, struct file *file) | |
2821 | { | |
2822 | kfree(file->private_data); | |
2823 | return 0; | |
2824 | } | |
2825 | ||
2826 | static const struct file_operations mmc_dbg_ext_csd_fops = { | |
2827 | .open = mmc_ext_csd_open, | |
2828 | .read = mmc_ext_csd_read, | |
2829 | .release = mmc_ext_csd_release, | |
2830 | .llseek = default_llseek, | |
2831 | }; | |
2832 | ||
f9f0da98 | 2833 | static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
627c3ccf LW |
2834 | { |
2835 | struct dentry *root; | |
2836 | ||
2837 | if (!card->debugfs_root) | |
2838 | return 0; | |
2839 | ||
2840 | root = card->debugfs_root; | |
2841 | ||
2842 | if (mmc_card_mmc(card) || mmc_card_sd(card)) { | |
f9f0da98 | 2843 | md->status_dentry = |
f6a3d9d9 Y |
2844 | debugfs_create_file_unsafe("status", 0400, root, |
2845 | card, | |
2846 | &mmc_dbg_card_status_fops); | |
f9f0da98 | 2847 | if (!md->status_dentry) |
627c3ccf LW |
2848 | return -EIO; |
2849 | } | |
2850 | ||
2851 | if (mmc_card_mmc(card)) { | |
f9f0da98 AH |
2852 | md->ext_csd_dentry = |
2853 | debugfs_create_file("ext_csd", S_IRUSR, root, card, | |
2854 | &mmc_dbg_ext_csd_fops); | |
2855 | if (!md->ext_csd_dentry) | |
627c3ccf LW |
2856 | return -EIO; |
2857 | } | |
2858 | ||
2859 | return 0; | |
2860 | } | |
2861 | ||
f9f0da98 AH |
2862 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2863 | struct mmc_blk_data *md) | |
2864 | { | |
2865 | if (!card->debugfs_root) | |
2866 | return; | |
2867 | ||
2868 | if (!IS_ERR_OR_NULL(md->status_dentry)) { | |
2869 | debugfs_remove(md->status_dentry); | |
2870 | md->status_dentry = NULL; | |
2871 | } | |
2872 | ||
2873 | if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) { | |
2874 | debugfs_remove(md->ext_csd_dentry); | |
2875 | md->ext_csd_dentry = NULL; | |
2876 | } | |
2877 | } | |
627c3ccf LW |
2878 | |
2879 | #else | |
2880 | ||
f9f0da98 | 2881 | static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
627c3ccf LW |
2882 | { |
2883 | return 0; | |
2884 | } | |
2885 | ||
f9f0da98 AH |
2886 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2887 | struct mmc_blk_data *md) | |
2888 | { | |
2889 | } | |
2890 | ||
627c3ccf LW |
2891 | #endif /* CONFIG_DEBUG_FS */ |
2892 | ||
96541bac | 2893 | static int mmc_blk_probe(struct mmc_card *card) |
1da177e4 | 2894 | { |
371a689f | 2895 | struct mmc_blk_data *md, *part_md; |
6f1d3247 | 2896 | int ret = 0; |
a7bbb573 | 2897 | |
912490db PO |
2898 | /* |
2899 | * Check that the card supports the command class(es) we need. | |
2900 | */ | |
2901 | if (!(card->csd.cmdclass & CCC_BLOCK_READ)) | |
1da177e4 LT |
2902 | return -ENODEV; |
2903 | ||
8c7cdbf9 | 2904 | mmc_fixup_device(card, mmc_blk_fixups); |
5204d00f | 2905 | |
dcf6e2e3 ZH |
2906 | card->complete_wq = alloc_workqueue("mmc_complete", |
2907 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | |
1d848c28 | 2908 | if (!card->complete_wq) { |
dcf6e2e3 ZH |
2909 | pr_err("Failed to create mmc completion workqueue"); |
2910 | return -ENOMEM; | |
2911 | } | |
2912 | ||
1da177e4 | 2913 | md = mmc_blk_alloc(card); |
6f1d3247 UH |
2914 | if (IS_ERR(md)) { |
2915 | ret = PTR_ERR(md); | |
2916 | goto out_free; | |
2917 | } | |
1da177e4 | 2918 | |
6f1d3247 UH |
2919 | ret = mmc_blk_alloc_parts(card, md); |
2920 | if (ret) | |
371a689f AW |
2921 | goto out; |
2922 | ||
96541bac | 2923 | dev_set_drvdata(&card->dev, md); |
6f60c222 | 2924 | |
6f1d3247 UH |
2925 | ret = mmc_add_disk(md); |
2926 | if (ret) | |
371a689f AW |
2927 | goto out; |
2928 | ||
2929 | list_for_each_entry(part_md, &md->part, part) { | |
6f1d3247 UH |
2930 | ret = mmc_add_disk(part_md); |
2931 | if (ret) | |
371a689f AW |
2932 | goto out; |
2933 | } | |
e94cfef6 | 2934 | |
627c3ccf | 2935 | /* Add two debugfs entries */ |
f9f0da98 | 2936 | mmc_blk_add_debugfs(card, md); |
627c3ccf | 2937 | |
e94cfef6 UH |
2938 | pm_runtime_set_autosuspend_delay(&card->dev, 3000); |
2939 | pm_runtime_use_autosuspend(&card->dev); | |
2940 | ||
2941 | /* | |
2942 | * Don't enable runtime PM for SD-combo cards here. Leave that | |
2943 | * decision to be taken during the SDIO init sequence instead. | |
2944 | */ | |
2945 | if (card->type != MMC_TYPE_SD_COMBO) { | |
2946 | pm_runtime_set_active(&card->dev); | |
2947 | pm_runtime_enable(&card->dev); | |
2948 | } | |
2949 | ||
1da177e4 LT |
2950 | return 0; |
2951 | ||
6f1d3247 | 2952 | out: |
371a689f AW |
2953 | mmc_blk_remove_parts(card, md); |
2954 | mmc_blk_remove_req(md); | |
6f1d3247 UH |
2955 | out_free: |
2956 | destroy_workqueue(card->complete_wq); | |
2957 | return ret; | |
1da177e4 LT |
2958 | } |
2959 | ||
96541bac | 2960 | static void mmc_blk_remove(struct mmc_card *card) |
1da177e4 | 2961 | { |
96541bac | 2962 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); |
1da177e4 | 2963 | |
f9f0da98 | 2964 | mmc_blk_remove_debugfs(card, md); |
371a689f | 2965 | mmc_blk_remove_parts(card, md); |
e94cfef6 | 2966 | pm_runtime_get_sync(&card->dev); |
65f9e20e SL |
2967 | if (md->part_curr != md->part_type) { |
2968 | mmc_claim_host(card->host); | |
2969 | mmc_blk_part_switch(card, md->part_type); | |
2970 | mmc_release_host(card->host); | |
2971 | } | |
e94cfef6 UH |
2972 | if (card->type != MMC_TYPE_SD_COMBO) |
2973 | pm_runtime_disable(&card->dev); | |
2974 | pm_runtime_put_noidle(&card->dev); | |
371a689f | 2975 | mmc_blk_remove_req(md); |
96541bac | 2976 | dev_set_drvdata(&card->dev, NULL); |
dcf6e2e3 | 2977 | destroy_workqueue(card->complete_wq); |
1da177e4 LT |
2978 | } |
2979 | ||
96541bac | 2980 | static int _mmc_blk_suspend(struct mmc_card *card) |
1da177e4 | 2981 | { |
371a689f | 2982 | struct mmc_blk_data *part_md; |
96541bac | 2983 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); |
1da177e4 LT |
2984 | |
2985 | if (md) { | |
2986 | mmc_queue_suspend(&md->queue); | |
371a689f AW |
2987 | list_for_each_entry(part_md, &md->part, part) { |
2988 | mmc_queue_suspend(&part_md->queue); | |
2989 | } | |
1da177e4 LT |
2990 | } |
2991 | return 0; | |
2992 | } | |
2993 | ||
96541bac | 2994 | static void mmc_blk_shutdown(struct mmc_card *card) |
76287748 | 2995 | { |
96541bac | 2996 | _mmc_blk_suspend(card); |
76287748 UH |
2997 | } |
2998 | ||
0967edc6 UH |
2999 | #ifdef CONFIG_PM_SLEEP |
3000 | static int mmc_blk_suspend(struct device *dev) | |
76287748 | 3001 | { |
96541bac UH |
3002 | struct mmc_card *card = mmc_dev_to_card(dev); |
3003 | ||
3004 | return _mmc_blk_suspend(card); | |
76287748 UH |
3005 | } |
3006 | ||
0967edc6 | 3007 | static int mmc_blk_resume(struct device *dev) |
1da177e4 | 3008 | { |
371a689f | 3009 | struct mmc_blk_data *part_md; |
fc95e30b | 3010 | struct mmc_blk_data *md = dev_get_drvdata(dev); |
1da177e4 LT |
3011 | |
3012 | if (md) { | |
371a689f AW |
3013 | /* |
3014 | * Resume involves the card going into idle state, | |
3015 | * so current partition is always the main one. | |
3016 | */ | |
3017 | md->part_curr = md->part_type; | |
1da177e4 | 3018 | mmc_queue_resume(&md->queue); |
371a689f AW |
3019 | list_for_each_entry(part_md, &md->part, part) { |
3020 | mmc_queue_resume(&part_md->queue); | |
3021 | } | |
1da177e4 LT |
3022 | } |
3023 | return 0; | |
3024 | } | |
1da177e4 LT |
3025 | #endif |
3026 | ||
0967edc6 UH |
3027 | static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); |
3028 | ||
96541bac UH |
3029 | static struct mmc_driver mmc_driver = { |
3030 | .drv = { | |
3031 | .name = "mmcblk", | |
3032 | .pm = &mmc_blk_pm_ops, | |
3033 | }, | |
1da177e4 LT |
3034 | .probe = mmc_blk_probe, |
3035 | .remove = mmc_blk_remove, | |
76287748 | 3036 | .shutdown = mmc_blk_shutdown, |
1da177e4 LT |
3037 | }; |
3038 | ||
3039 | static int __init mmc_blk_init(void) | |
3040 | { | |
9d4e98e9 | 3041 | int res; |
1da177e4 | 3042 | |
97548575 LW |
3043 | res = bus_register(&mmc_rpmb_bus_type); |
3044 | if (res < 0) { | |
3045 | pr_err("mmcblk: could not register RPMB bus type\n"); | |
3046 | return res; | |
3047 | } | |
3048 | res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb"); | |
3049 | if (res < 0) { | |
3050 | pr_err("mmcblk: failed to allocate rpmb chrdev region\n"); | |
3051 | goto out_bus_unreg; | |
3052 | } | |
3053 | ||
5e71b7a6 OJ |
3054 | if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) |
3055 | pr_info("mmcblk: using %d minors per device\n", perdev_minors); | |
3056 | ||
a26eba61 | 3057 | max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); |
5e71b7a6 | 3058 | |
fe6b4c88 PO |
3059 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
3060 | if (res) | |
97548575 | 3061 | goto out_chrdev_unreg; |
1da177e4 | 3062 | |
9d4e98e9 AM |
3063 | res = mmc_register_driver(&mmc_driver); |
3064 | if (res) | |
97548575 | 3065 | goto out_blkdev_unreg; |
1da177e4 | 3066 | |
9d4e98e9 | 3067 | return 0; |
97548575 LW |
3068 | |
3069 | out_blkdev_unreg: | |
9d4e98e9 | 3070 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
97548575 LW |
3071 | out_chrdev_unreg: |
3072 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); | |
3073 | out_bus_unreg: | |
3074 | bus_unregister(&mmc_rpmb_bus_type); | |
1da177e4 LT |
3075 | return res; |
3076 | } | |
3077 | ||
3078 | static void __exit mmc_blk_exit(void) | |
3079 | { | |
3080 | mmc_unregister_driver(&mmc_driver); | |
fe6b4c88 | 3081 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
97548575 | 3082 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); |
d0a0852b | 3083 | bus_unregister(&mmc_rpmb_bus_type); |
1da177e4 LT |
3084 | } |
3085 | ||
3086 | module_init(mmc_blk_init); | |
3087 | module_exit(mmc_blk_exit); | |
3088 | ||
3089 | MODULE_LICENSE("GPL"); | |
3090 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); | |
3091 |