Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Block driver for media (i.e., flash cards) | |
3 | * | |
4 | * Copyright 2002 Hewlett-Packard Company | |
979ce720 | 5 | * Copyright 2005-2008 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * Use consistent with the GNU GPL is permitted, | |
8 | * provided that this copyright notice is | |
9 | * preserved in its entirety in all copies and derived works. | |
10 | * | |
11 | * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | |
12 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | |
13 | * FITNESS FOR ANY PARTICULAR PURPOSE. | |
14 | * | |
15 | * Many thanks to Alessandro Rubini and Jonathan Corbet! | |
16 | * | |
17 | * Author: Andrew Christian | |
18 | * 28 May 2002 | |
19 | */ | |
20 | #include <linux/moduleparam.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/init.h> | |
23 | ||
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/fs.h> | |
5a0e3ad6 | 26 | #include <linux/slab.h> |
1da177e4 LT |
27 | #include <linux/errno.h> |
28 | #include <linux/hdreg.h> | |
29 | #include <linux/kdev_t.h> | |
30 | #include <linux/blkdev.h> | |
97548575 | 31 | #include <linux/cdev.h> |
a621aaed | 32 | #include <linux/mutex.h> |
ec5a19dd | 33 | #include <linux/scatterlist.h> |
a7bbb573 | 34 | #include <linux/string_helpers.h> |
cb87ea28 JC |
35 | #include <linux/delay.h> |
36 | #include <linux/capability.h> | |
37 | #include <linux/compat.h> | |
e94cfef6 | 38 | #include <linux/pm_runtime.h> |
b10fa99e | 39 | #include <linux/idr.h> |
627c3ccf | 40 | #include <linux/debugfs.h> |
1da177e4 | 41 | |
cb87ea28 | 42 | #include <linux/mmc/ioctl.h> |
1da177e4 | 43 | #include <linux/mmc/card.h> |
385e3227 | 44 | #include <linux/mmc/host.h> |
da7fbe58 PO |
45 | #include <linux/mmc/mmc.h> |
46 | #include <linux/mmc/sd.h> | |
1da177e4 | 47 | |
7c0f6ba6 | 48 | #include <linux/uaccess.h> |
1da177e4 | 49 | |
98ac2162 | 50 | #include "queue.h" |
48ab086d | 51 | #include "block.h" |
55244c56 | 52 | #include "core.h" |
4facdde1 | 53 | #include "card.h" |
5857b29b | 54 | #include "host.h" |
4facdde1 | 55 | #include "bus.h" |
55244c56 | 56 | #include "mmc_ops.h" |
28fc64af | 57 | #include "quirks.h" |
55244c56 | 58 | #include "sd_ops.h" |
1da177e4 | 59 | |
6b0b6285 | 60 | MODULE_ALIAS("mmc:block"); |
5e71b7a6 OJ |
61 | #ifdef MODULE_PARAM_PREFIX |
62 | #undef MODULE_PARAM_PREFIX | |
63 | #endif | |
64 | #define MODULE_PARAM_PREFIX "mmcblk." | |
65 | ||
6b7a363d AH |
66 | /* |
67 | * Set a 10 second timeout for polling write request busy state. Note, mmc core | |
68 | * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 | |
69 | * second software timer to timeout the whole request, so 10 seconds should be | |
70 | * ample. | |
71 | */ | |
72 | #define MMC_BLK_TIMEOUT_MS (10 * 1000) | |
775a9362 ME |
73 | #define MMC_SANITIZE_REQ_TIMEOUT 240000 |
74 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) | |
6a7a6b45 | 75 | |
d3df0465 | 76 | #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ |
ce39f9d1 | 77 | (rq_data_dir(req) == WRITE)) |
5e71b7a6 | 78 | static DEFINE_MUTEX(block_mutex); |
6b0b6285 | 79 | |
1da177e4 | 80 | /* |
5e71b7a6 OJ |
81 | * The defaults come from config options but can be overriden by module |
82 | * or bootarg options. | |
1da177e4 | 83 | */ |
5e71b7a6 | 84 | static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; |
1dff3144 | 85 | |
5e71b7a6 OJ |
86 | /* |
87 | * We've only got one major, so number of mmcblk devices is | |
a26eba61 | 88 | * limited to (1 << 20) / number of minors per device. It is also |
b10fa99e | 89 | * limited by the MAX_DEVICES below. |
5e71b7a6 OJ |
90 | */ |
91 | static int max_devices; | |
92 | ||
a26eba61 BH |
93 | #define MAX_DEVICES 256 |
94 | ||
b10fa99e | 95 | static DEFINE_IDA(mmc_blk_ida); |
97548575 | 96 | static DEFINE_IDA(mmc_rpmb_ida); |
1da177e4 | 97 | |
1da177e4 LT |
98 | /* |
99 | * There is one mmc_blk_data per slot. | |
100 | */ | |
101 | struct mmc_blk_data { | |
102 | spinlock_t lock; | |
307d8e6f | 103 | struct device *parent; |
1da177e4 LT |
104 | struct gendisk *disk; |
105 | struct mmc_queue queue; | |
371a689f | 106 | struct list_head part; |
97548575 | 107 | struct list_head rpmbs; |
1da177e4 | 108 | |
d0c97cfb AW |
109 | unsigned int flags; |
110 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ | |
111 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ | |
112 | ||
1da177e4 | 113 | unsigned int usage; |
a6f6c96b | 114 | unsigned int read_only; |
371a689f | 115 | unsigned int part_type; |
67716327 AH |
116 | unsigned int reset_done; |
117 | #define MMC_BLK_READ BIT(0) | |
118 | #define MMC_BLK_WRITE BIT(1) | |
119 | #define MMC_BLK_DISCARD BIT(2) | |
120 | #define MMC_BLK_SECDISCARD BIT(3) | |
1e8e55b6 | 121 | #define MMC_BLK_CQE_RECOVERY BIT(4) |
371a689f AW |
122 | |
123 | /* | |
124 | * Only set in main mmc_blk_data associated | |
fc95e30b | 125 | * with mmc_card with dev_set_drvdata, and keeps |
371a689f AW |
126 | * track of the current selected device partition. |
127 | */ | |
128 | unsigned int part_curr; | |
129 | struct device_attribute force_ro; | |
add710ea JR |
130 | struct device_attribute power_ro_lock; |
131 | int area_type; | |
f9f0da98 AH |
132 | |
133 | /* debugfs files (only in main mmc_blk_data) */ | |
134 | struct dentry *status_dentry; | |
135 | struct dentry *ext_csd_dentry; | |
1da177e4 LT |
136 | }; |
137 | ||
97548575 LW |
138 | /* Device type for RPMB character devices */ |
139 | static dev_t mmc_rpmb_devt; | |
140 | ||
141 | /* Bus type for RPMB character devices */ | |
142 | static struct bus_type mmc_rpmb_bus_type = { | |
143 | .name = "mmc_rpmb", | |
144 | }; | |
145 | ||
146 | /** | |
147 | * struct mmc_rpmb_data - special RPMB device type for these areas | |
148 | * @dev: the device for the RPMB area | |
149 | * @chrdev: character device for the RPMB area | |
150 | * @id: unique device ID number | |
151 | * @part_index: partition index (0 on first) | |
152 | * @md: parent MMC block device | |
153 | * @node: list item, so we can put this device on a list | |
154 | */ | |
155 | struct mmc_rpmb_data { | |
156 | struct device dev; | |
157 | struct cdev chrdev; | |
158 | int id; | |
159 | unsigned int part_index; | |
160 | struct mmc_blk_data *md; | |
161 | struct list_head node; | |
162 | }; | |
163 | ||
a621aaed | 164 | static DEFINE_MUTEX(open_lock); |
1da177e4 | 165 | |
5e71b7a6 OJ |
166 | module_param(perdev_minors, int, 0444); |
167 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); | |
168 | ||
8d1e977d | 169 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
1f797edc | 170 | unsigned int part_type); |
cdf8a6fb | 171 | |
1da177e4 LT |
172 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
173 | { | |
174 | struct mmc_blk_data *md; | |
175 | ||
a621aaed | 176 | mutex_lock(&open_lock); |
1da177e4 LT |
177 | md = disk->private_data; |
178 | if (md && md->usage == 0) | |
179 | md = NULL; | |
180 | if (md) | |
181 | md->usage++; | |
a621aaed | 182 | mutex_unlock(&open_lock); |
1da177e4 LT |
183 | |
184 | return md; | |
185 | } | |
186 | ||
371a689f AW |
187 | static inline int mmc_get_devidx(struct gendisk *disk) |
188 | { | |
382c55f8 | 189 | int devidx = disk->first_minor / perdev_minors; |
371a689f AW |
190 | return devidx; |
191 | } | |
192 | ||
1da177e4 LT |
193 | static void mmc_blk_put(struct mmc_blk_data *md) |
194 | { | |
a621aaed | 195 | mutex_lock(&open_lock); |
1da177e4 LT |
196 | md->usage--; |
197 | if (md->usage == 0) { | |
371a689f | 198 | int devidx = mmc_get_devidx(md->disk); |
41e3efd0 | 199 | blk_put_queue(md->queue.queue); |
a04848c7 | 200 | ida_simple_remove(&mmc_blk_ida, devidx); |
1da177e4 | 201 | put_disk(md->disk); |
1da177e4 LT |
202 | kfree(md); |
203 | } | |
a621aaed | 204 | mutex_unlock(&open_lock); |
1da177e4 LT |
205 | } |
206 | ||
add710ea JR |
207 | static ssize_t power_ro_lock_show(struct device *dev, |
208 | struct device_attribute *attr, char *buf) | |
209 | { | |
210 | int ret; | |
211 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
212 | struct mmc_card *card = md->queue.card; | |
213 | int locked = 0; | |
214 | ||
215 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) | |
216 | locked = 2; | |
217 | else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) | |
218 | locked = 1; | |
219 | ||
220 | ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); | |
221 | ||
9098f84c TW |
222 | mmc_blk_put(md); |
223 | ||
add710ea JR |
224 | return ret; |
225 | } | |
226 | ||
227 | static ssize_t power_ro_lock_store(struct device *dev, | |
228 | struct device_attribute *attr, const char *buf, size_t count) | |
229 | { | |
230 | int ret; | |
231 | struct mmc_blk_data *md, *part_md; | |
0493f6fe LW |
232 | struct mmc_queue *mq; |
233 | struct request *req; | |
add710ea JR |
234 | unsigned long set; |
235 | ||
236 | if (kstrtoul(buf, 0, &set)) | |
237 | return -EINVAL; | |
238 | ||
239 | if (set != 1) | |
240 | return count; | |
241 | ||
242 | md = mmc_blk_get(dev_to_disk(dev)); | |
0493f6fe | 243 | mq = &md->queue; |
add710ea | 244 | |
0493f6fe LW |
245 | /* Dispatch locking to the block layer */ |
246 | req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); | |
fb8e456e AH |
247 | if (IS_ERR(req)) { |
248 | count = PTR_ERR(req); | |
249 | goto out_put; | |
250 | } | |
0493f6fe LW |
251 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; |
252 | blk_execute_rq(mq->queue, NULL, req, 0); | |
253 | ret = req_to_mmc_queue_req(req)->drv_op_result; | |
34c089e8 | 254 | blk_put_request(req); |
add710ea JR |
255 | |
256 | if (!ret) { | |
257 | pr_info("%s: Locking boot partition ro until next power on\n", | |
258 | md->disk->disk_name); | |
259 | set_disk_ro(md->disk, 1); | |
260 | ||
261 | list_for_each_entry(part_md, &md->part, part) | |
262 | if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { | |
263 | pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); | |
264 | set_disk_ro(part_md->disk, 1); | |
265 | } | |
266 | } | |
fb8e456e | 267 | out_put: |
add710ea JR |
268 | mmc_blk_put(md); |
269 | return count; | |
270 | } | |
271 | ||
371a689f AW |
272 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
273 | char *buf) | |
274 | { | |
275 | int ret; | |
276 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
277 | ||
0031a98a | 278 | ret = snprintf(buf, PAGE_SIZE, "%d\n", |
371a689f AW |
279 | get_disk_ro(dev_to_disk(dev)) ^ |
280 | md->read_only); | |
281 | mmc_blk_put(md); | |
282 | return ret; | |
283 | } | |
284 | ||
285 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, | |
286 | const char *buf, size_t count) | |
287 | { | |
288 | int ret; | |
289 | char *end; | |
290 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
291 | unsigned long set = simple_strtoul(buf, &end, 0); | |
292 | if (end == buf) { | |
293 | ret = -EINVAL; | |
294 | goto out; | |
295 | } | |
296 | ||
297 | set_disk_ro(dev_to_disk(dev), set || md->read_only); | |
298 | ret = count; | |
299 | out: | |
300 | mmc_blk_put(md); | |
301 | return ret; | |
302 | } | |
303 | ||
a5a1561f | 304 | static int mmc_blk_open(struct block_device *bdev, fmode_t mode) |
1da177e4 | 305 | { |
a5a1561f | 306 | struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); |
1da177e4 LT |
307 | int ret = -ENXIO; |
308 | ||
2a48fc0a | 309 | mutex_lock(&block_mutex); |
1da177e4 LT |
310 | if (md) { |
311 | if (md->usage == 2) | |
a5a1561f | 312 | check_disk_change(bdev); |
1da177e4 | 313 | ret = 0; |
a00fc090 | 314 | |
a5a1561f | 315 | if ((mode & FMODE_WRITE) && md->read_only) { |
70bb0896 | 316 | mmc_blk_put(md); |
a00fc090 | 317 | ret = -EROFS; |
70bb0896 | 318 | } |
1da177e4 | 319 | } |
2a48fc0a | 320 | mutex_unlock(&block_mutex); |
1da177e4 LT |
321 | |
322 | return ret; | |
323 | } | |
324 | ||
db2a144b | 325 | static void mmc_blk_release(struct gendisk *disk, fmode_t mode) |
1da177e4 | 326 | { |
a5a1561f | 327 | struct mmc_blk_data *md = disk->private_data; |
1da177e4 | 328 | |
2a48fc0a | 329 | mutex_lock(&block_mutex); |
1da177e4 | 330 | mmc_blk_put(md); |
2a48fc0a | 331 | mutex_unlock(&block_mutex); |
1da177e4 LT |
332 | } |
333 | ||
334 | static int | |
a885c8c4 | 335 | mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1da177e4 | 336 | { |
a885c8c4 CH |
337 | geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); |
338 | geo->heads = 4; | |
339 | geo->sectors = 16; | |
340 | return 0; | |
1da177e4 LT |
341 | } |
342 | ||
cb87ea28 JC |
343 | struct mmc_blk_ioc_data { |
344 | struct mmc_ioc_cmd ic; | |
345 | unsigned char *buf; | |
346 | u64 buf_bytes; | |
97548575 | 347 | struct mmc_rpmb_data *rpmb; |
cb87ea28 JC |
348 | }; |
349 | ||
350 | static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( | |
351 | struct mmc_ioc_cmd __user *user) | |
352 | { | |
353 | struct mmc_blk_ioc_data *idata; | |
354 | int err; | |
355 | ||
1ff8950c | 356 | idata = kmalloc(sizeof(*idata), GFP_KERNEL); |
cb87ea28 JC |
357 | if (!idata) { |
358 | err = -ENOMEM; | |
aea253ec | 359 | goto out; |
cb87ea28 JC |
360 | } |
361 | ||
362 | if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { | |
363 | err = -EFAULT; | |
aea253ec | 364 | goto idata_err; |
cb87ea28 JC |
365 | } |
366 | ||
367 | idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; | |
368 | if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { | |
369 | err = -EOVERFLOW; | |
aea253ec | 370 | goto idata_err; |
cb87ea28 JC |
371 | } |
372 | ||
bfe5b1b1 VV |
373 | if (!idata->buf_bytes) { |
374 | idata->buf = NULL; | |
4d6144de | 375 | return idata; |
bfe5b1b1 | 376 | } |
4d6144de | 377 | |
1ff8950c | 378 | idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL); |
cb87ea28 JC |
379 | if (!idata->buf) { |
380 | err = -ENOMEM; | |
aea253ec | 381 | goto idata_err; |
cb87ea28 JC |
382 | } |
383 | ||
384 | if (copy_from_user(idata->buf, (void __user *)(unsigned long) | |
385 | idata->ic.data_ptr, idata->buf_bytes)) { | |
386 | err = -EFAULT; | |
387 | goto copy_err; | |
388 | } | |
389 | ||
390 | return idata; | |
391 | ||
392 | copy_err: | |
393 | kfree(idata->buf); | |
aea253ec | 394 | idata_err: |
cb87ea28 | 395 | kfree(idata); |
aea253ec | 396 | out: |
cb87ea28 | 397 | return ERR_PTR(err); |
cb87ea28 JC |
398 | } |
399 | ||
a5f5774c JH |
400 | static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, |
401 | struct mmc_blk_ioc_data *idata) | |
402 | { | |
403 | struct mmc_ioc_cmd *ic = &idata->ic; | |
404 | ||
405 | if (copy_to_user(&(ic_ptr->response), ic->response, | |
406 | sizeof(ic->response))) | |
407 | return -EFAULT; | |
408 | ||
409 | if (!idata->ic.write_flag) { | |
410 | if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, | |
411 | idata->buf, idata->buf_bytes)) | |
412 | return -EFAULT; | |
413 | } | |
414 | ||
415 | return 0; | |
416 | } | |
417 | ||
8d1e977d LP |
418 | static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, |
419 | u32 retries_max) | |
420 | { | |
421 | int err; | |
422 | u32 retry_count = 0; | |
423 | ||
424 | if (!status || !retries_max) | |
425 | return -EINVAL; | |
426 | ||
427 | do { | |
2185bc2c | 428 | err = __mmc_send_status(card, status, 5); |
8d1e977d LP |
429 | if (err) |
430 | break; | |
431 | ||
432 | if (!R1_STATUS(*status) && | |
433 | (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) | |
434 | break; /* RPMB programming operation complete */ | |
435 | ||
436 | /* | |
437 | * Rechedule to give the MMC device a chance to continue | |
438 | * processing the previous command without being polled too | |
439 | * frequently. | |
440 | */ | |
441 | usleep_range(1000, 5000); | |
442 | } while (++retry_count < retries_max); | |
443 | ||
444 | if (retry_count == retries_max) | |
445 | err = -EPERM; | |
446 | ||
447 | return err; | |
448 | } | |
449 | ||
775a9362 ME |
450 | static int ioctl_do_sanitize(struct mmc_card *card) |
451 | { | |
452 | int err; | |
453 | ||
a2d1086d | 454 | if (!mmc_can_sanitize(card)) { |
775a9362 ME |
455 | pr_warn("%s: %s - SANITIZE is not supported\n", |
456 | mmc_hostname(card->host), __func__); | |
457 | err = -EOPNOTSUPP; | |
458 | goto out; | |
459 | } | |
460 | ||
461 | pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", | |
462 | mmc_hostname(card->host), __func__); | |
463 | ||
464 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
465 | EXT_CSD_SANITIZE_START, 1, | |
466 | MMC_SANITIZE_REQ_TIMEOUT); | |
467 | ||
468 | if (err) | |
469 | pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", | |
470 | mmc_hostname(card->host), __func__, err); | |
471 | ||
472 | pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), | |
473 | __func__); | |
474 | out: | |
475 | return err; | |
476 | } | |
477 | ||
a5f5774c JH |
478 | static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, |
479 | struct mmc_blk_ioc_data *idata) | |
cb87ea28 | 480 | { |
c7836d15 MY |
481 | struct mmc_command cmd = {}; |
482 | struct mmc_data data = {}; | |
483 | struct mmc_request mrq = {}; | |
cb87ea28 JC |
484 | struct scatterlist sg; |
485 | int err; | |
97548575 | 486 | unsigned int target_part; |
8d1e977d | 487 | u32 status = 0; |
cb87ea28 | 488 | |
a5f5774c JH |
489 | if (!card || !md || !idata) |
490 | return -EINVAL; | |
cb87ea28 | 491 | |
97548575 LW |
492 | /* |
493 | * The RPMB accesses comes in from the character device, so we | |
494 | * need to target these explicitly. Else we just target the | |
495 | * partition type for the block device the ioctl() was issued | |
496 | * on. | |
497 | */ | |
498 | if (idata->rpmb) { | |
499 | /* Support multiple RPMB partitions */ | |
500 | target_part = idata->rpmb->part_index; | |
501 | target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; | |
502 | } else { | |
503 | target_part = md->part_type; | |
504 | } | |
8d1e977d | 505 | |
4d6144de JR |
506 | cmd.opcode = idata->ic.opcode; |
507 | cmd.arg = idata->ic.arg; | |
508 | cmd.flags = idata->ic.flags; | |
509 | ||
510 | if (idata->buf_bytes) { | |
511 | data.sg = &sg; | |
512 | data.sg_len = 1; | |
513 | data.blksz = idata->ic.blksz; | |
514 | data.blocks = idata->ic.blocks; | |
515 | ||
516 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); | |
517 | ||
518 | if (idata->ic.write_flag) | |
519 | data.flags = MMC_DATA_WRITE; | |
520 | else | |
521 | data.flags = MMC_DATA_READ; | |
522 | ||
523 | /* data.flags must already be set before doing this. */ | |
524 | mmc_set_data_timeout(&data, card); | |
525 | ||
526 | /* Allow overriding the timeout_ns for empirical tuning. */ | |
527 | if (idata->ic.data_timeout_ns) | |
528 | data.timeout_ns = idata->ic.data_timeout_ns; | |
529 | ||
530 | if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { | |
531 | /* | |
532 | * Pretend this is a data transfer and rely on the | |
533 | * host driver to compute timeout. When all host | |
534 | * drivers support cmd.cmd_timeout for R1B, this | |
535 | * can be changed to: | |
536 | * | |
537 | * mrq.data = NULL; | |
538 | * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; | |
539 | */ | |
540 | data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; | |
541 | } | |
542 | ||
543 | mrq.data = &data; | |
544 | } | |
545 | ||
546 | mrq.cmd = &cmd; | |
547 | ||
97548575 | 548 | err = mmc_blk_part_switch(card, target_part); |
8d1e977d | 549 | if (err) |
a5f5774c | 550 | return err; |
8d1e977d | 551 | |
cb87ea28 JC |
552 | if (idata->ic.is_acmd) { |
553 | err = mmc_app_cmd(card->host, card); | |
554 | if (err) | |
a5f5774c | 555 | return err; |
cb87ea28 JC |
556 | } |
557 | ||
97548575 | 558 | if (idata->rpmb) { |
8d1e977d LP |
559 | err = mmc_set_blockcount(card, data.blocks, |
560 | idata->ic.write_flag & (1 << 31)); | |
561 | if (err) | |
a5f5774c | 562 | return err; |
8d1e977d LP |
563 | } |
564 | ||
a82e484e YG |
565 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && |
566 | (cmd.opcode == MMC_SWITCH)) { | |
775a9362 ME |
567 | err = ioctl_do_sanitize(card); |
568 | ||
569 | if (err) | |
570 | pr_err("%s: ioctl_do_sanitize() failed. err = %d", | |
571 | __func__, err); | |
572 | ||
a5f5774c | 573 | return err; |
775a9362 ME |
574 | } |
575 | ||
cb87ea28 JC |
576 | mmc_wait_for_req(card->host, &mrq); |
577 | ||
578 | if (cmd.error) { | |
579 | dev_err(mmc_dev(card->host), "%s: cmd error %d\n", | |
580 | __func__, cmd.error); | |
a5f5774c | 581 | return cmd.error; |
cb87ea28 JC |
582 | } |
583 | if (data.error) { | |
584 | dev_err(mmc_dev(card->host), "%s: data error %d\n", | |
585 | __func__, data.error); | |
a5f5774c | 586 | return data.error; |
cb87ea28 JC |
587 | } |
588 | ||
589 | /* | |
590 | * According to the SD specs, some commands require a delay after | |
591 | * issuing the command. | |
592 | */ | |
593 | if (idata->ic.postsleep_min_us) | |
594 | usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); | |
595 | ||
a5f5774c | 596 | memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); |
cb87ea28 | 597 | |
97548575 | 598 | if (idata->rpmb) { |
8d1e977d LP |
599 | /* |
600 | * Ensure RPMB command has completed by polling CMD13 | |
601 | * "Send Status". | |
602 | */ | |
603 | err = ioctl_rpmb_card_status_poll(card, &status, 5); | |
604 | if (err) | |
605 | dev_err(mmc_dev(card->host), | |
606 | "%s: Card Status=0x%08X, error %d\n", | |
607 | __func__, status, err); | |
608 | } | |
609 | ||
a5f5774c JH |
610 | return err; |
611 | } | |
612 | ||
2fe20bae | 613 | static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, |
97548575 LW |
614 | struct mmc_ioc_cmd __user *ic_ptr, |
615 | struct mmc_rpmb_data *rpmb) | |
a5f5774c JH |
616 | { |
617 | struct mmc_blk_ioc_data *idata; | |
3ecd8cf2 | 618 | struct mmc_blk_ioc_data *idatas[1]; |
614f0388 | 619 | struct mmc_queue *mq; |
a5f5774c | 620 | struct mmc_card *card; |
b093410c | 621 | int err = 0, ioc_err = 0; |
614f0388 | 622 | struct request *req; |
a5f5774c JH |
623 | |
624 | idata = mmc_blk_ioctl_copy_from_user(ic_ptr); | |
625 | if (IS_ERR(idata)) | |
626 | return PTR_ERR(idata); | |
97548575 LW |
627 | /* This will be NULL on non-RPMB ioctl():s */ |
628 | idata->rpmb = rpmb; | |
a5f5774c | 629 | |
a5f5774c JH |
630 | card = md->queue.card; |
631 | if (IS_ERR(card)) { | |
632 | err = PTR_ERR(card); | |
633 | goto cmd_done; | |
634 | } | |
635 | ||
614f0388 LW |
636 | /* |
637 | * Dispatch the ioctl() into the block request queue. | |
638 | */ | |
639 | mq = &md->queue; | |
640 | req = blk_get_request(mq->queue, | |
641 | idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, | |
642 | __GFP_RECLAIM); | |
fb8e456e AH |
643 | if (IS_ERR(req)) { |
644 | err = PTR_ERR(req); | |
645 | goto cmd_done; | |
646 | } | |
3ecd8cf2 | 647 | idatas[0] = idata; |
97548575 LW |
648 | req_to_mmc_queue_req(req)->drv_op = |
649 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; | |
69f7599e | 650 | req_to_mmc_queue_req(req)->drv_op_data = idatas; |
3ecd8cf2 | 651 | req_to_mmc_queue_req(req)->ioc_count = 1; |
614f0388 | 652 | blk_execute_rq(mq->queue, NULL, req, 0); |
0493f6fe | 653 | ioc_err = req_to_mmc_queue_req(req)->drv_op_result; |
b093410c | 654 | err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); |
614f0388 | 655 | blk_put_request(req); |
a5f5774c | 656 | |
cb87ea28 | 657 | cmd_done: |
cb87ea28 JC |
658 | kfree(idata->buf); |
659 | kfree(idata); | |
b093410c | 660 | return ioc_err ? ioc_err : err; |
cb87ea28 JC |
661 | } |
662 | ||
2fe20bae | 663 | static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, |
97548575 LW |
664 | struct mmc_ioc_multi_cmd __user *user, |
665 | struct mmc_rpmb_data *rpmb) | |
a5f5774c JH |
666 | { |
667 | struct mmc_blk_ioc_data **idata = NULL; | |
668 | struct mmc_ioc_cmd __user *cmds = user->cmds; | |
669 | struct mmc_card *card; | |
3ecd8cf2 | 670 | struct mmc_queue *mq; |
b093410c | 671 | int i, err = 0, ioc_err = 0; |
a5f5774c | 672 | __u64 num_of_cmds; |
3ecd8cf2 | 673 | struct request *req; |
a5f5774c JH |
674 | |
675 | if (copy_from_user(&num_of_cmds, &user->num_of_cmds, | |
676 | sizeof(num_of_cmds))) | |
677 | return -EFAULT; | |
678 | ||
aab2ee03 GU |
679 | if (!num_of_cmds) |
680 | return 0; | |
681 | ||
a5f5774c JH |
682 | if (num_of_cmds > MMC_IOC_MAX_CMDS) |
683 | return -EINVAL; | |
684 | ||
685 | idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); | |
686 | if (!idata) | |
687 | return -ENOMEM; | |
688 | ||
689 | for (i = 0; i < num_of_cmds; i++) { | |
690 | idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); | |
691 | if (IS_ERR(idata[i])) { | |
692 | err = PTR_ERR(idata[i]); | |
693 | num_of_cmds = i; | |
694 | goto cmd_err; | |
695 | } | |
97548575 LW |
696 | /* This will be NULL on non-RPMB ioctl():s */ |
697 | idata[i]->rpmb = rpmb; | |
a5f5774c JH |
698 | } |
699 | ||
a5f5774c JH |
700 | card = md->queue.card; |
701 | if (IS_ERR(card)) { | |
702 | err = PTR_ERR(card); | |
2fe20bae | 703 | goto cmd_err; |
a5f5774c JH |
704 | } |
705 | ||
a5f5774c | 706 | |
3ecd8cf2 LW |
707 | /* |
708 | * Dispatch the ioctl()s into the block request queue. | |
709 | */ | |
710 | mq = &md->queue; | |
711 | req = blk_get_request(mq->queue, | |
712 | idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, | |
713 | __GFP_RECLAIM); | |
fb8e456e AH |
714 | if (IS_ERR(req)) { |
715 | err = PTR_ERR(req); | |
716 | goto cmd_err; | |
717 | } | |
97548575 LW |
718 | req_to_mmc_queue_req(req)->drv_op = |
719 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; | |
69f7599e | 720 | req_to_mmc_queue_req(req)->drv_op_data = idata; |
3ecd8cf2 LW |
721 | req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; |
722 | blk_execute_rq(mq->queue, NULL, req, 0); | |
0493f6fe | 723 | ioc_err = req_to_mmc_queue_req(req)->drv_op_result; |
a5f5774c JH |
724 | |
725 | /* copy to user if data and response */ | |
b093410c | 726 | for (i = 0; i < num_of_cmds && !err; i++) |
a5f5774c | 727 | err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); |
a5f5774c | 728 | |
3ecd8cf2 LW |
729 | blk_put_request(req); |
730 | ||
a5f5774c JH |
731 | cmd_err: |
732 | for (i = 0; i < num_of_cmds; i++) { | |
733 | kfree(idata[i]->buf); | |
734 | kfree(idata[i]); | |
735 | } | |
736 | kfree(idata); | |
b093410c | 737 | return ioc_err ? ioc_err : err; |
a5f5774c JH |
738 | } |
739 | ||
61fe0e2b LW |
740 | static int mmc_blk_check_blkdev(struct block_device *bdev) |
741 | { | |
742 | /* | |
743 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the | |
744 | * whole block device, not on a partition. This prevents overspray | |
745 | * between sibling partitions. | |
746 | */ | |
747 | if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) | |
748 | return -EPERM; | |
749 | return 0; | |
750 | } | |
751 | ||
cb87ea28 JC |
752 | static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, |
753 | unsigned int cmd, unsigned long arg) | |
754 | { | |
2fe20bae | 755 | struct mmc_blk_data *md; |
61fe0e2b LW |
756 | int ret; |
757 | ||
a5f5774c JH |
758 | switch (cmd) { |
759 | case MMC_IOC_CMD: | |
61fe0e2b LW |
760 | ret = mmc_blk_check_blkdev(bdev); |
761 | if (ret) | |
762 | return ret; | |
2fe20bae LW |
763 | md = mmc_blk_get(bdev->bd_disk); |
764 | if (!md) | |
765 | return -EINVAL; | |
766 | ret = mmc_blk_ioctl_cmd(md, | |
97548575 LW |
767 | (struct mmc_ioc_cmd __user *)arg, |
768 | NULL); | |
2fe20bae LW |
769 | mmc_blk_put(md); |
770 | return ret; | |
a5f5774c | 771 | case MMC_IOC_MULTI_CMD: |
61fe0e2b LW |
772 | ret = mmc_blk_check_blkdev(bdev); |
773 | if (ret) | |
774 | return ret; | |
2fe20bae LW |
775 | md = mmc_blk_get(bdev->bd_disk); |
776 | if (!md) | |
777 | return -EINVAL; | |
778 | ret = mmc_blk_ioctl_multi_cmd(md, | |
97548575 LW |
779 | (struct mmc_ioc_multi_cmd __user *)arg, |
780 | NULL); | |
2fe20bae LW |
781 | mmc_blk_put(md); |
782 | return ret; | |
a5f5774c JH |
783 | default: |
784 | return -EINVAL; | |
785 | } | |
cb87ea28 JC |
786 | } |
787 | ||
788 | #ifdef CONFIG_COMPAT | |
789 | static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, | |
790 | unsigned int cmd, unsigned long arg) | |
791 | { | |
792 | return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); | |
793 | } | |
794 | #endif | |
795 | ||
83d5cde4 | 796 | static const struct block_device_operations mmc_bdops = { |
a5a1561f AV |
797 | .open = mmc_blk_open, |
798 | .release = mmc_blk_release, | |
a885c8c4 | 799 | .getgeo = mmc_blk_getgeo, |
1da177e4 | 800 | .owner = THIS_MODULE, |
cb87ea28 JC |
801 | .ioctl = mmc_blk_ioctl, |
802 | #ifdef CONFIG_COMPAT | |
803 | .compat_ioctl = mmc_blk_compat_ioctl, | |
804 | #endif | |
1da177e4 LT |
805 | }; |
806 | ||
025e3d5f AH |
807 | static int mmc_blk_part_switch_pre(struct mmc_card *card, |
808 | unsigned int part_type) | |
809 | { | |
810 | int ret = 0; | |
811 | ||
812 | if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { | |
813 | if (card->ext_csd.cmdq_en) { | |
814 | ret = mmc_cmdq_disable(card); | |
815 | if (ret) | |
816 | return ret; | |
817 | } | |
818 | mmc_retune_pause(card->host); | |
819 | } | |
820 | ||
821 | return ret; | |
822 | } | |
823 | ||
824 | static int mmc_blk_part_switch_post(struct mmc_card *card, | |
825 | unsigned int part_type) | |
826 | { | |
827 | int ret = 0; | |
828 | ||
829 | if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { | |
830 | mmc_retune_unpause(card->host); | |
831 | if (card->reenable_cmdq && !card->ext_csd.cmdq_en) | |
832 | ret = mmc_cmdq_enable(card); | |
833 | } | |
834 | ||
835 | return ret; | |
836 | } | |
837 | ||
371a689f | 838 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
1f797edc | 839 | unsigned int part_type) |
371a689f | 840 | { |
025e3d5f | 841 | int ret = 0; |
fc95e30b | 842 | struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); |
0d7d85ca | 843 | |
1f797edc | 844 | if (main_md->part_curr == part_type) |
371a689f AW |
845 | return 0; |
846 | ||
847 | if (mmc_card_mmc(card)) { | |
0d7d85ca AH |
848 | u8 part_config = card->ext_csd.part_config; |
849 | ||
1f797edc | 850 | ret = mmc_blk_part_switch_pre(card, part_type); |
025e3d5f AH |
851 | if (ret) |
852 | return ret; | |
57da0c04 | 853 | |
0d7d85ca | 854 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; |
1f797edc | 855 | part_config |= part_type; |
371a689f AW |
856 | |
857 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
0d7d85ca | 858 | EXT_CSD_PART_CONFIG, part_config, |
371a689f | 859 | card->ext_csd.part_time); |
57da0c04 | 860 | if (ret) { |
1f797edc | 861 | mmc_blk_part_switch_post(card, part_type); |
371a689f | 862 | return ret; |
57da0c04 | 863 | } |
0d7d85ca AH |
864 | |
865 | card->ext_csd.part_config = part_config; | |
57da0c04 | 866 | |
025e3d5f | 867 | ret = mmc_blk_part_switch_post(card, main_md->part_curr); |
67716327 | 868 | } |
371a689f | 869 | |
1f797edc | 870 | main_md->part_curr = part_type; |
025e3d5f | 871 | return ret; |
371a689f AW |
872 | } |
873 | ||
169f03a0 | 874 | static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) |
ec5a19dd PO |
875 | { |
876 | int err; | |
051913da BD |
877 | u32 result; |
878 | __be32 *blocks; | |
ec5a19dd | 879 | |
c7836d15 MY |
880 | struct mmc_request mrq = {}; |
881 | struct mmc_command cmd = {}; | |
882 | struct mmc_data data = {}; | |
ec5a19dd PO |
883 | |
884 | struct scatterlist sg; | |
885 | ||
ec5a19dd PO |
886 | cmd.opcode = MMC_APP_CMD; |
887 | cmd.arg = card->rca << 16; | |
7213d175 | 888 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
ec5a19dd PO |
889 | |
890 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | |
7213d175 | 891 | if (err) |
169f03a0 | 892 | return err; |
7213d175 | 893 | if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) |
169f03a0 | 894 | return -EIO; |
ec5a19dd PO |
895 | |
896 | memset(&cmd, 0, sizeof(struct mmc_command)); | |
897 | ||
898 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; | |
899 | cmd.arg = 0; | |
7213d175 | 900 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
ec5a19dd | 901 | |
ec5a19dd PO |
902 | data.blksz = 4; |
903 | data.blocks = 1; | |
904 | data.flags = MMC_DATA_READ; | |
905 | data.sg = &sg; | |
906 | data.sg_len = 1; | |
d380443c | 907 | mmc_set_data_timeout(&data, card); |
ec5a19dd | 908 | |
ec5a19dd PO |
909 | mrq.cmd = &cmd; |
910 | mrq.data = &data; | |
911 | ||
051913da BD |
912 | blocks = kmalloc(4, GFP_KERNEL); |
913 | if (!blocks) | |
169f03a0 | 914 | return -ENOMEM; |
051913da BD |
915 | |
916 | sg_init_one(&sg, blocks, 4); | |
ec5a19dd PO |
917 | |
918 | mmc_wait_for_req(card->host, &mrq); | |
919 | ||
051913da BD |
920 | result = ntohl(*blocks); |
921 | kfree(blocks); | |
922 | ||
17b0429d | 923 | if (cmd.error || data.error) |
169f03a0 LW |
924 | return -EIO; |
925 | ||
926 | *written_blocks = result; | |
ec5a19dd | 927 | |
169f03a0 | 928 | return 0; |
ec5a19dd PO |
929 | } |
930 | ||
92c0a0cc AH |
931 | static unsigned int mmc_blk_clock_khz(struct mmc_host *host) |
932 | { | |
933 | if (host->actual_clock) | |
934 | return host->actual_clock / 1000; | |
935 | ||
936 | /* Clock may be subject to a divisor, fudge it by a factor of 2. */ | |
937 | if (host->ios.clock) | |
938 | return host->ios.clock / 2000; | |
939 | ||
940 | /* How can there be no clock */ | |
941 | WARN_ON_ONCE(1); | |
942 | return 100; /* 100 kHz is minimum possible value */ | |
943 | } | |
944 | ||
945 | static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, | |
946 | struct mmc_data *data) | |
947 | { | |
948 | unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); | |
949 | unsigned int khz; | |
950 | ||
951 | if (data->timeout_clks) { | |
952 | khz = mmc_blk_clock_khz(host); | |
953 | ms += DIV_ROUND_UP(data->timeout_clks, khz); | |
954 | } | |
955 | ||
956 | return ms; | |
957 | } | |
958 | ||
0987c6b0 AH |
959 | static inline bool mmc_blk_in_tran_state(u32 status) |
960 | { | |
961 | /* | |
962 | * Some cards mishandle the status bits, so make sure to check both the | |
963 | * busy indication and the card state. | |
964 | */ | |
965 | return status & R1_READY_FOR_DATA && | |
966 | (R1_CURRENT_STATE(status) == R1_STATE_TRAN); | |
967 | } | |
968 | ||
c49433fb | 969 | static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, |
0fbfd125 | 970 | struct request *req, u32 *resp_errs) |
c49433fb UH |
971 | { |
972 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | |
973 | int err = 0; | |
974 | u32 status; | |
975 | ||
976 | do { | |
7701885e AH |
977 | bool done = time_after(jiffies, timeout); |
978 | ||
2185bc2c | 979 | err = __mmc_send_status(card, &status, 5); |
c49433fb UH |
980 | if (err) { |
981 | pr_err("%s: error %d requesting status\n", | |
982 | req->rq_disk->disk_name, err); | |
983 | return err; | |
984 | } | |
985 | ||
c89b4851 AH |
986 | /* Accumulate any response error bits seen */ |
987 | if (resp_errs) | |
988 | *resp_errs |= status; | |
c49433fb UH |
989 | |
990 | /* | |
991 | * Timeout if the device never becomes ready for data and never | |
992 | * leaves the program state. | |
993 | */ | |
7701885e | 994 | if (done) { |
0987c6b0 | 995 | pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n", |
c49433fb | 996 | mmc_hostname(card->host), |
0987c6b0 | 997 | req->rq_disk->disk_name, __func__, status); |
c49433fb UH |
998 | return -ETIMEDOUT; |
999 | } | |
1000 | ||
1001 | /* | |
1002 | * Some cards mishandle the status bits, | |
1003 | * so make sure to check both the busy | |
1004 | * indication and the card state. | |
1005 | */ | |
0987c6b0 | 1006 | } while (!mmc_blk_in_tran_state(status)); |
c49433fb UH |
1007 | |
1008 | return err; | |
1009 | } | |
1010 | ||
67716327 AH |
1011 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, |
1012 | int type) | |
1013 | { | |
1014 | int err; | |
1015 | ||
1016 | if (md->reset_done & type) | |
1017 | return -EEXIST; | |
1018 | ||
1019 | md->reset_done |= type; | |
1020 | err = mmc_hw_reset(host); | |
1021 | /* Ensure we switch back to the correct partition */ | |
1022 | if (err != -EOPNOTSUPP) { | |
fc95e30b UH |
1023 | struct mmc_blk_data *main_md = |
1024 | dev_get_drvdata(&host->card->dev); | |
67716327 AH |
1025 | int part_err; |
1026 | ||
1027 | main_md->part_curr = main_md->part_type; | |
1f797edc | 1028 | part_err = mmc_blk_part_switch(host->card, md->part_type); |
67716327 AH |
1029 | if (part_err) { |
1030 | /* | |
1031 | * We have failed to get back into the correct | |
1032 | * partition, so we need to abort the whole request. | |
1033 | */ | |
1034 | return -ENODEV; | |
1035 | } | |
1036 | } | |
1037 | return err; | |
1038 | } | |
1039 | ||
1040 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | |
1041 | { | |
1042 | md->reset_done &= ~type; | |
1043 | } | |
1044 | ||
5ec12396 LW |
1045 | /* |
1046 | * The non-block commands come back from the block layer after it queued it and | |
1047 | * processed it with all other requests and then they get issued in this | |
1048 | * function. | |
1049 | */ | |
1050 | static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) | |
1051 | { | |
1052 | struct mmc_queue_req *mq_rq; | |
1053 | struct mmc_card *card = mq->card; | |
1054 | struct mmc_blk_data *md = mq->blkdata; | |
69f7599e | 1055 | struct mmc_blk_ioc_data **idata; |
97548575 | 1056 | bool rpmb_ioctl; |
627c3ccf LW |
1057 | u8 **ext_csd; |
1058 | u32 status; | |
0493f6fe | 1059 | int ret; |
5ec12396 LW |
1060 | int i; |
1061 | ||
1062 | mq_rq = req_to_mmc_queue_req(req); | |
97548575 | 1063 | rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); |
5ec12396 LW |
1064 | |
1065 | switch (mq_rq->drv_op) { | |
1066 | case MMC_DRV_OP_IOCTL: | |
97548575 | 1067 | case MMC_DRV_OP_IOCTL_RPMB: |
69f7599e | 1068 | idata = mq_rq->drv_op_data; |
7432b49b | 1069 | for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { |
69f7599e | 1070 | ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); |
0493f6fe | 1071 | if (ret) |
5ec12396 LW |
1072 | break; |
1073 | } | |
5ec12396 | 1074 | /* Always switch back to main area after RPMB access */ |
97548575 LW |
1075 | if (rpmb_ioctl) |
1076 | mmc_blk_part_switch(card, 0); | |
0493f6fe LW |
1077 | break; |
1078 | case MMC_DRV_OP_BOOT_WP: | |
1079 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, | |
1080 | card->ext_csd.boot_ro_lock | | |
1081 | EXT_CSD_BOOT_WP_B_PWR_WP_EN, | |
1082 | card->ext_csd.part_time); | |
1083 | if (ret) | |
1084 | pr_err("%s: Locking boot partition ro until next power on failed: %d\n", | |
1085 | md->disk->disk_name, ret); | |
1086 | else | |
1087 | card->ext_csd.boot_ro_lock |= | |
1088 | EXT_CSD_BOOT_WP_B_PWR_WP_EN; | |
5ec12396 | 1089 | break; |
627c3ccf LW |
1090 | case MMC_DRV_OP_GET_CARD_STATUS: |
1091 | ret = mmc_send_status(card, &status); | |
1092 | if (!ret) | |
1093 | ret = status; | |
1094 | break; | |
1095 | case MMC_DRV_OP_GET_EXT_CSD: | |
1096 | ext_csd = mq_rq->drv_op_data; | |
1097 | ret = mmc_get_ext_csd(card, ext_csd); | |
1098 | break; | |
5ec12396 | 1099 | default: |
0493f6fe LW |
1100 | pr_err("%s: unknown driver specific operation\n", |
1101 | md->disk->disk_name); | |
1102 | ret = -EINVAL; | |
5ec12396 LW |
1103 | break; |
1104 | } | |
0493f6fe | 1105 | mq_rq->drv_op_result = ret; |
0fbfd125 | 1106 | blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); |
5ec12396 LW |
1107 | } |
1108 | ||
df061588 | 1109 | static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
bd788c96 | 1110 | { |
7db3028e | 1111 | struct mmc_blk_data *md = mq->blkdata; |
bd788c96 AH |
1112 | struct mmc_card *card = md->queue.card; |
1113 | unsigned int from, nr, arg; | |
67716327 | 1114 | int err = 0, type = MMC_BLK_DISCARD; |
2a842aca | 1115 | blk_status_t status = BLK_STS_OK; |
bd788c96 | 1116 | |
bd788c96 | 1117 | if (!mmc_can_erase(card)) { |
2a842aca | 1118 | status = BLK_STS_NOTSUPP; |
8cb6ed17 | 1119 | goto fail; |
bd788c96 AH |
1120 | } |
1121 | ||
1122 | from = blk_rq_pos(req); | |
1123 | nr = blk_rq_sectors(req); | |
1124 | ||
b3bf9153 KP |
1125 | if (mmc_can_discard(card)) |
1126 | arg = MMC_DISCARD_ARG; | |
1127 | else if (mmc_can_trim(card)) | |
bd788c96 AH |
1128 | arg = MMC_TRIM_ARG; |
1129 | else | |
1130 | arg = MMC_ERASE_ARG; | |
164b50b3 GU |
1131 | do { |
1132 | err = 0; | |
1133 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | |
1134 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1135 | INAND_CMD38_ARG_EXT_CSD, | |
1136 | arg == MMC_TRIM_ARG ? | |
1137 | INAND_CMD38_ARG_TRIM : | |
1138 | INAND_CMD38_ARG_ERASE, | |
1139 | 0); | |
1140 | } | |
1141 | if (!err) | |
1142 | err = mmc_erase(card, from, nr, arg); | |
1143 | } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); | |
2a842aca CH |
1144 | if (err) |
1145 | status = BLK_STS_IOERR; | |
1146 | else | |
67716327 | 1147 | mmc_blk_reset_success(md, type); |
8cb6ed17 | 1148 | fail: |
0fbfd125 | 1149 | blk_mq_end_request(req, status); |
bd788c96 AH |
1150 | } |
1151 | ||
df061588 | 1152 | static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, |
49804548 AH |
1153 | struct request *req) |
1154 | { | |
7db3028e | 1155 | struct mmc_blk_data *md = mq->blkdata; |
49804548 | 1156 | struct mmc_card *card = md->queue.card; |
775a9362 | 1157 | unsigned int from, nr, arg; |
67716327 | 1158 | int err = 0, type = MMC_BLK_SECDISCARD; |
2a842aca | 1159 | blk_status_t status = BLK_STS_OK; |
49804548 | 1160 | |
775a9362 | 1161 | if (!(mmc_can_secure_erase_trim(card))) { |
2a842aca | 1162 | status = BLK_STS_NOTSUPP; |
49804548 AH |
1163 | goto out; |
1164 | } | |
1165 | ||
28302812 AH |
1166 | from = blk_rq_pos(req); |
1167 | nr = blk_rq_sectors(req); | |
1168 | ||
775a9362 ME |
1169 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) |
1170 | arg = MMC_SECURE_TRIM1_ARG; | |
1171 | else | |
1172 | arg = MMC_SECURE_ERASE_ARG; | |
d9ddd629 | 1173 | |
67716327 | 1174 | retry: |
6a7a6b45 AW |
1175 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1176 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1177 | INAND_CMD38_ARG_EXT_CSD, | |
1178 | arg == MMC_SECURE_TRIM1_ARG ? | |
1179 | INAND_CMD38_ARG_SECTRIM1 : | |
1180 | INAND_CMD38_ARG_SECERASE, | |
1181 | 0); | |
1182 | if (err) | |
28302812 | 1183 | goto out_retry; |
6a7a6b45 | 1184 | } |
28302812 | 1185 | |
49804548 | 1186 | err = mmc_erase(card, from, nr, arg); |
28302812 AH |
1187 | if (err == -EIO) |
1188 | goto out_retry; | |
2a842aca CH |
1189 | if (err) { |
1190 | status = BLK_STS_IOERR; | |
28302812 | 1191 | goto out; |
2a842aca | 1192 | } |
28302812 AH |
1193 | |
1194 | if (arg == MMC_SECURE_TRIM1_ARG) { | |
6a7a6b45 AW |
1195 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1196 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
1197 | INAND_CMD38_ARG_EXT_CSD, | |
1198 | INAND_CMD38_ARG_SECTRIM2, | |
1199 | 0); | |
1200 | if (err) | |
28302812 | 1201 | goto out_retry; |
6a7a6b45 | 1202 | } |
28302812 | 1203 | |
49804548 | 1204 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
28302812 AH |
1205 | if (err == -EIO) |
1206 | goto out_retry; | |
2a842aca CH |
1207 | if (err) { |
1208 | status = BLK_STS_IOERR; | |
28302812 | 1209 | goto out; |
2a842aca | 1210 | } |
6a7a6b45 | 1211 | } |
28302812 | 1212 | |
28302812 AH |
1213 | out_retry: |
1214 | if (err && !mmc_blk_reset(md, card->host, type)) | |
67716327 AH |
1215 | goto retry; |
1216 | if (!err) | |
1217 | mmc_blk_reset_success(md, type); | |
28302812 | 1218 | out: |
0fbfd125 | 1219 | blk_mq_end_request(req, status); |
49804548 AH |
1220 | } |
1221 | ||
df061588 | 1222 | static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
f4c5522b | 1223 | { |
7db3028e | 1224 | struct mmc_blk_data *md = mq->blkdata; |
881d1c25 SJ |
1225 | struct mmc_card *card = md->queue.card; |
1226 | int ret = 0; | |
1227 | ||
1228 | ret = mmc_flush_cache(card); | |
0fbfd125 | 1229 | blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); |
f4c5522b AW |
1230 | } |
1231 | ||
1232 | /* | |
1233 | * Reformat current write as a reliable write, supporting | |
1234 | * both legacy and the enhanced reliable write MMC cards. | |
1235 | * In each transfer we'll handle only as much as a single | |
1236 | * reliable write can handle, thus finish the request in | |
1237 | * partial completions. | |
1238 | */ | |
d0c97cfb AW |
1239 | static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, |
1240 | struct mmc_card *card, | |
1241 | struct request *req) | |
f4c5522b | 1242 | { |
f4c5522b AW |
1243 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { |
1244 | /* Legacy mode imposes restrictions on transfers. */ | |
9cb38f7a | 1245 | if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) |
f4c5522b AW |
1246 | brq->data.blocks = 1; |
1247 | ||
1248 | if (brq->data.blocks > card->ext_csd.rel_sectors) | |
1249 | brq->data.blocks = card->ext_csd.rel_sectors; | |
1250 | else if (brq->data.blocks < card->ext_csd.rel_sectors) | |
1251 | brq->data.blocks = 1; | |
1252 | } | |
f4c5522b AW |
1253 | } |
1254 | ||
f47a1fe3 AH |
1255 | #define CMD_ERRORS_EXCL_OOR \ |
1256 | (R1_ADDRESS_ERROR | /* Misaligned address */ \ | |
4c2b8f26 RKAL |
1257 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ |
1258 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ | |
a04e6bae | 1259 | R1_CARD_ECC_FAILED | /* Card ECC failed */ \ |
4c2b8f26 RKAL |
1260 | R1_CC_ERROR | /* Card controller error */ \ |
1261 | R1_ERROR) /* General/unknown error */ | |
1262 | ||
f47a1fe3 AH |
1263 | #define CMD_ERRORS \ |
1264 | (CMD_ERRORS_EXCL_OOR | \ | |
1265 | R1_OUT_OF_RANGE) /* Command argument out of range */ \ | |
1266 | ||
d83c2dba | 1267 | static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) |
a04e6bae | 1268 | { |
d83c2dba | 1269 | u32 val; |
a04e6bae | 1270 | |
d83c2dba SL |
1271 | /* |
1272 | * Per the SD specification(physical layer version 4.10)[1], | |
1273 | * section 4.3.3, it explicitly states that "When the last | |
1274 | * block of user area is read using CMD18, the host should | |
1275 | * ignore OUT_OF_RANGE error that may occur even the sequence | |
1276 | * is correct". And JESD84-B51 for eMMC also has a similar | |
1277 | * statement on section 6.8.3. | |
1278 | * | |
1279 | * Multiple block read/write could be done by either predefined | |
1280 | * method, namely CMD23, or open-ending mode. For open-ending mode, | |
1281 | * we should ignore the OUT_OF_RANGE error as it's normal behaviour. | |
1282 | * | |
1283 | * However the spec[1] doesn't tell us whether we should also | |
1284 | * ignore that for predefined method. But per the spec[1], section | |
1285 | * 4.15 Set Block Count Command, it says"If illegal block count | |
1286 | * is set, out of range error will be indicated during read/write | |
1287 | * operation (For example, data transfer is stopped at user area | |
1288 | * boundary)." In another word, we could expect a out of range error | |
1289 | * in the response for the following CMD18/25. And if argument of | |
1290 | * CMD23 + the argument of CMD18/25 exceed the max number of blocks, | |
1291 | * we could also expect to get a -ETIMEDOUT or any error number from | |
1292 | * the host drivers due to missing data response(for write)/data(for | |
1293 | * read), as the cards will stop the data transfer by itself per the | |
1294 | * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. | |
1295 | */ | |
1296 | ||
1297 | if (!brq->stop.error) { | |
1298 | bool oor_with_open_end; | |
1299 | /* If there is no error yet, check R1 response */ | |
1300 | ||
1301 | val = brq->stop.resp[0] & CMD_ERRORS; | |
1302 | oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; | |
1303 | ||
1304 | if (val && !oor_with_open_end) | |
1305 | brq->stop.error = -EIO; | |
1306 | } | |
a04e6bae WS |
1307 | } |
1308 | ||
ca5717f7 | 1309 | static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, |
d3377c01 AH |
1310 | int disable_multi, bool *do_rel_wr_p, |
1311 | bool *do_data_tag_p) | |
1da177e4 | 1312 | { |
ca5717f7 AH |
1313 | struct mmc_blk_data *md = mq->blkdata; |
1314 | struct mmc_card *card = md->queue.card; | |
54d49d77 | 1315 | struct mmc_blk_request *brq = &mqrq->brq; |
67e69d52 | 1316 | struct request *req = mmc_queue_req_to_req(mqrq); |
d3377c01 | 1317 | bool do_rel_wr, do_data_tag; |
1da177e4 | 1318 | |
f4c5522b AW |
1319 | /* |
1320 | * Reliable writes are used to implement Forced Unit Access and | |
d3df0465 | 1321 | * are supported only on MMCs. |
f4c5522b | 1322 | */ |
d3377c01 AH |
1323 | do_rel_wr = (req->cmd_flags & REQ_FUA) && |
1324 | rq_data_dir(req) == WRITE && | |
1325 | (md->flags & MMC_BLK_REL_WR); | |
f4c5522b | 1326 | |
54d49d77 | 1327 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
ca5717f7 | 1328 | |
54d49d77 | 1329 | brq->mrq.data = &brq->data; |
93482b3d | 1330 | brq->mrq.tag = req->tag; |
1da177e4 | 1331 | |
54d49d77 PF |
1332 | brq->stop.opcode = MMC_STOP_TRANSMISSION; |
1333 | brq->stop.arg = 0; | |
ca5717f7 AH |
1334 | |
1335 | if (rq_data_dir(req) == READ) { | |
1336 | brq->data.flags = MMC_DATA_READ; | |
1337 | brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; | |
1338 | } else { | |
1339 | brq->data.flags = MMC_DATA_WRITE; | |
1340 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | |
1341 | } | |
1342 | ||
1343 | brq->data.blksz = 512; | |
54d49d77 | 1344 | brq->data.blocks = blk_rq_sectors(req); |
93482b3d AH |
1345 | brq->data.blk_addr = blk_rq_pos(req); |
1346 | ||
1347 | /* | |
1348 | * The command queue supports 2 priorities: "high" (1) and "simple" (0). | |
1349 | * The eMMC will give "high" priority tasks priority over "simple" | |
1350 | * priority tasks. Here we always set "simple" priority by not setting | |
1351 | * MMC_DATA_PRIO. | |
1352 | */ | |
6a79e391 | 1353 | |
54d49d77 PF |
1354 | /* |
1355 | * The block layer doesn't support all sector count | |
1356 | * restrictions, so we need to be prepared for too big | |
1357 | * requests. | |
1358 | */ | |
1359 | if (brq->data.blocks > card->host->max_blk_count) | |
1360 | brq->data.blocks = card->host->max_blk_count; | |
1da177e4 | 1361 | |
2bf22b39 PW |
1362 | if (brq->data.blocks > 1) { |
1363 | /* | |
1364 | * After a read error, we redo the request one sector | |
1365 | * at a time in order to accurately determine which | |
1366 | * sectors can be read successfully. | |
1367 | */ | |
1368 | if (disable_multi) | |
1369 | brq->data.blocks = 1; | |
1370 | ||
2e47e842 KM |
1371 | /* |
1372 | * Some controllers have HW issues while operating | |
1373 | * in multiple I/O mode | |
1374 | */ | |
1375 | if (card->host->ops->multi_io_quirk) | |
1376 | brq->data.blocks = card->host->ops->multi_io_quirk(card, | |
1377 | (rq_data_dir(req) == READ) ? | |
1378 | MMC_DATA_READ : MMC_DATA_WRITE, | |
1379 | brq->data.blocks); | |
2bf22b39 | 1380 | } |
d0c97cfb | 1381 | |
93482b3d | 1382 | if (do_rel_wr) { |
ca5717f7 | 1383 | mmc_apply_rel_rw(brq, card, req); |
93482b3d AH |
1384 | brq->data.flags |= MMC_DATA_REL_WR; |
1385 | } | |
ca5717f7 AH |
1386 | |
1387 | /* | |
1388 | * Data tag is used only during writing meta data to speed | |
1389 | * up write and any subsequent read of this meta data | |
1390 | */ | |
d3377c01 AH |
1391 | do_data_tag = card->ext_csd.data_tag_unit_size && |
1392 | (req->cmd_flags & REQ_META) && | |
1393 | (rq_data_dir(req) == WRITE) && | |
1394 | ((brq->data.blocks * brq->data.blksz) >= | |
1395 | card->ext_csd.data_tag_unit_size); | |
ca5717f7 | 1396 | |
93482b3d AH |
1397 | if (do_data_tag) |
1398 | brq->data.flags |= MMC_DATA_DAT_TAG; | |
1399 | ||
ca5717f7 AH |
1400 | mmc_set_data_timeout(&brq->data, card); |
1401 | ||
1402 | brq->data.sg = mqrq->sg; | |
1403 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); | |
1404 | ||
1405 | /* | |
1406 | * Adjust the sg list so it is the same size as the | |
1407 | * request. | |
1408 | */ | |
1409 | if (brq->data.blocks != blk_rq_sectors(req)) { | |
1410 | int i, data_size = brq->data.blocks << 9; | |
1411 | struct scatterlist *sg; | |
1412 | ||
1413 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { | |
1414 | data_size -= sg->length; | |
1415 | if (data_size <= 0) { | |
1416 | sg->length += data_size; | |
1417 | i++; | |
1418 | break; | |
1419 | } | |
1420 | } | |
1421 | brq->data.sg_len = i; | |
1422 | } | |
1423 | ||
d3377c01 AH |
1424 | if (do_rel_wr_p) |
1425 | *do_rel_wr_p = do_rel_wr; | |
1426 | ||
1427 | if (do_data_tag_p) | |
1428 | *do_data_tag_p = do_data_tag; | |
ca5717f7 AH |
1429 | } |
1430 | ||
1e8e55b6 AH |
1431 | #define MMC_CQE_RETRIES 2 |
1432 | ||
1433 | static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) | |
1434 | { | |
1435 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1436 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
1437 | struct request_queue *q = req->q; | |
1438 | struct mmc_host *host = mq->card->host; | |
1439 | unsigned long flags; | |
1440 | bool put_card; | |
1441 | int err; | |
1442 | ||
1443 | mmc_cqe_post_req(host, mrq); | |
1444 | ||
1445 | if (mrq->cmd && mrq->cmd->error) | |
1446 | err = mrq->cmd->error; | |
1447 | else if (mrq->data && mrq->data->error) | |
1448 | err = mrq->data->error; | |
1449 | else | |
1450 | err = 0; | |
1451 | ||
1452 | if (err) { | |
1453 | if (mqrq->retries++ < MMC_CQE_RETRIES) | |
1454 | blk_mq_requeue_request(req, true); | |
1455 | else | |
1456 | blk_mq_end_request(req, BLK_STS_IOERR); | |
1457 | } else if (mrq->data) { | |
1458 | if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered)) | |
1459 | blk_mq_requeue_request(req, true); | |
1460 | else | |
1461 | __blk_mq_end_request(req, BLK_STS_OK); | |
1462 | } else { | |
1463 | blk_mq_end_request(req, BLK_STS_OK); | |
1464 | } | |
1465 | ||
1466 | spin_lock_irqsave(q->queue_lock, flags); | |
1467 | ||
1468 | mq->in_flight[mmc_issue_type(mq, req)] -= 1; | |
1469 | ||
1470 | put_card = (mmc_tot_in_flight(mq) == 0); | |
1471 | ||
1472 | mmc_cqe_check_busy(mq); | |
1473 | ||
1474 | spin_unlock_irqrestore(q->queue_lock, flags); | |
1475 | ||
1476 | if (!mq->cqe_busy) | |
1477 | blk_mq_run_hw_queues(q, true); | |
1478 | ||
1479 | if (put_card) | |
1480 | mmc_put_card(mq->card, &mq->ctx); | |
1481 | } | |
1482 | ||
1483 | void mmc_blk_cqe_recovery(struct mmc_queue *mq) | |
1484 | { | |
1485 | struct mmc_card *card = mq->card; | |
1486 | struct mmc_host *host = card->host; | |
1487 | int err; | |
1488 | ||
1489 | pr_debug("%s: CQE recovery start\n", mmc_hostname(host)); | |
1490 | ||
1491 | err = mmc_cqe_recovery(host); | |
1492 | if (err) | |
1493 | mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); | |
1494 | else | |
1495 | mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); | |
1496 | ||
1497 | pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); | |
1498 | } | |
1499 | ||
1500 | static void mmc_blk_cqe_req_done(struct mmc_request *mrq) | |
1501 | { | |
1502 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, | |
1503 | brq.mrq); | |
1504 | struct request *req = mmc_queue_req_to_req(mqrq); | |
1505 | struct request_queue *q = req->q; | |
1506 | struct mmc_queue *mq = q->queuedata; | |
1507 | ||
1508 | /* | |
1509 | * Block layer timeouts race with completions which means the normal | |
1510 | * completion path cannot be used during recovery. | |
1511 | */ | |
1512 | if (mq->in_recovery) | |
1513 | mmc_blk_cqe_complete_rq(mq, req); | |
1514 | else | |
1515 | blk_mq_complete_request(req); | |
1516 | } | |
1517 | ||
1518 | static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) | |
1519 | { | |
1520 | mrq->done = mmc_blk_cqe_req_done; | |
1521 | mrq->recovery_notifier = mmc_cqe_recovery_notifier; | |
1522 | ||
1523 | return mmc_cqe_start_req(host, mrq); | |
1524 | } | |
1525 | ||
1526 | static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, | |
1527 | struct request *req) | |
1528 | { | |
1529 | struct mmc_blk_request *brq = &mqrq->brq; | |
1530 | ||
1531 | memset(brq, 0, sizeof(*brq)); | |
1532 | ||
1533 | brq->mrq.cmd = &brq->cmd; | |
1534 | brq->mrq.tag = req->tag; | |
1535 | ||
1536 | return &brq->mrq; | |
1537 | } | |
1538 | ||
1539 | static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) | |
1540 | { | |
1541 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1542 | struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); | |
1543 | ||
1544 | mrq->cmd->opcode = MMC_SWITCH; | |
1545 | mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | | |
1546 | (EXT_CSD_FLUSH_CACHE << 16) | | |
1547 | (1 << 8) | | |
1548 | EXT_CSD_CMD_SET_NORMAL; | |
1549 | mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; | |
1550 | ||
1551 | return mmc_blk_cqe_start_req(mq->card->host, mrq); | |
1552 | } | |
1553 | ||
1554 | static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |
1555 | { | |
1556 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1557 | ||
1558 | mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); | |
1559 | ||
1560 | return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); | |
1561 | } | |
1562 | ||
ca5717f7 AH |
1563 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
1564 | struct mmc_card *card, | |
1565 | int disable_multi, | |
1566 | struct mmc_queue *mq) | |
1567 | { | |
1568 | u32 readcmd, writecmd; | |
1569 | struct mmc_blk_request *brq = &mqrq->brq; | |
67e69d52 | 1570 | struct request *req = mmc_queue_req_to_req(mqrq); |
ca5717f7 AH |
1571 | struct mmc_blk_data *md = mq->blkdata; |
1572 | bool do_rel_wr, do_data_tag; | |
1573 | ||
1574 | mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); | |
1575 | ||
1576 | brq->mrq.cmd = &brq->cmd; | |
1577 | ||
1578 | brq->cmd.arg = blk_rq_pos(req); | |
1579 | if (!mmc_card_blockaddr(card)) | |
1580 | brq->cmd.arg <<= 9; | |
1581 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | |
1582 | ||
54d49d77 PF |
1583 | if (brq->data.blocks > 1 || do_rel_wr) { |
1584 | /* SPI multiblock writes terminate using a special | |
1585 | * token, not a STOP_TRANSMISSION request. | |
d0c97cfb | 1586 | */ |
54d49d77 PF |
1587 | if (!mmc_host_is_spi(card->host) || |
1588 | rq_data_dir(req) == READ) | |
1589 | brq->mrq.stop = &brq->stop; | |
1590 | readcmd = MMC_READ_MULTIPLE_BLOCK; | |
1591 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | |
1592 | } else { | |
1593 | brq->mrq.stop = NULL; | |
1594 | readcmd = MMC_READ_SINGLE_BLOCK; | |
1595 | writecmd = MMC_WRITE_BLOCK; | |
1596 | } | |
ca5717f7 | 1597 | brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; |
4265900e | 1598 | |
54d49d77 PF |
1599 | /* |
1600 | * Pre-defined multi-block transfers are preferable to | |
1601 | * open ended-ones (and necessary for reliable writes). | |
1602 | * However, it is not sufficient to just send CMD23, | |
1603 | * and avoid the final CMD12, as on an error condition | |
1604 | * CMD12 (stop) needs to be sent anyway. This, coupled | |
1605 | * with Auto-CMD23 enhancements provided by some | |
1606 | * hosts, means that the complexity of dealing | |
1607 | * with this is best left to the host. If CMD23 is | |
1608 | * supported by card and host, we'll fill sbc in and let | |
1609 | * the host deal with handling it correctly. This means | |
1610 | * that for hosts that don't expose MMC_CAP_CMD23, no | |
1611 | * change of behavior will be observed. | |
1612 | * | |
1613 | * N.B: Some MMC cards experience perf degradation. | |
1614 | * We'll avoid using CMD23-bounded multiblock writes for | |
1615 | * these, while retaining features like reliable writes. | |
1616 | */ | |
4265900e SD |
1617 | if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && |
1618 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || | |
1619 | do_data_tag)) { | |
54d49d77 PF |
1620 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; |
1621 | brq->sbc.arg = brq->data.blocks | | |
4265900e SD |
1622 | (do_rel_wr ? (1 << 31) : 0) | |
1623 | (do_data_tag ? (1 << 29) : 0); | |
54d49d77 PF |
1624 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; |
1625 | brq->mrq.sbc = &brq->sbc; | |
1626 | } | |
54d49d77 | 1627 | } |
6a79e391 | 1628 | |
81196976 | 1629 | #define MMC_MAX_RETRIES 5 |
7eb43d53 | 1630 | #define MMC_DATA_RETRIES 2 |
81196976 AH |
1631 | #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) |
1632 | ||
7eb43d53 AH |
1633 | static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) |
1634 | { | |
1635 | struct mmc_command cmd = { | |
1636 | .opcode = MMC_STOP_TRANSMISSION, | |
1637 | .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, | |
1638 | /* Some hosts wait for busy anyway, so provide a busy timeout */ | |
1639 | .busy_timeout = timeout, | |
1640 | }; | |
1641 | ||
1642 | return mmc_wait_for_cmd(card->host, &cmd, 5); | |
1643 | } | |
1644 | ||
1645 | static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) | |
1646 | { | |
1647 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1648 | struct mmc_blk_request *brq = &mqrq->brq; | |
1649 | unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); | |
1650 | int err; | |
1651 | ||
1652 | mmc_retune_hold_now(card->host); | |
1653 | ||
1654 | mmc_blk_send_stop(card, timeout); | |
1655 | ||
0fbfd125 | 1656 | err = card_busy_detect(card, timeout, req, NULL); |
7eb43d53 AH |
1657 | |
1658 | mmc_retune_release(card->host); | |
1659 | ||
1660 | return err; | |
1661 | } | |
1662 | ||
81196976 AH |
1663 | #define MMC_READ_SINGLE_RETRIES 2 |
1664 | ||
1665 | /* Single sector read during recovery */ | |
1666 | static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) | |
1667 | { | |
1668 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1669 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
1670 | struct mmc_card *card = mq->card; | |
1671 | struct mmc_host *host = card->host; | |
1672 | blk_status_t error = BLK_STS_OK; | |
1673 | int retries = 0; | |
1674 | ||
1675 | do { | |
1676 | u32 status; | |
1677 | int err; | |
1678 | ||
1679 | mmc_blk_rw_rq_prep(mqrq, card, 1, mq); | |
1680 | ||
1681 | mmc_wait_for_req(host, mrq); | |
1682 | ||
1683 | err = mmc_send_status(card, &status); | |
1684 | if (err) | |
1685 | goto error_exit; | |
1686 | ||
1687 | if (!mmc_host_is_spi(host) && | |
7eb43d53 AH |
1688 | !mmc_blk_in_tran_state(status)) { |
1689 | err = mmc_blk_fix_state(card, req); | |
81196976 AH |
1690 | if (err) |
1691 | goto error_exit; | |
1692 | } | |
1693 | ||
1694 | if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) | |
1695 | continue; | |
1696 | ||
1697 | retries = 0; | |
1698 | ||
1699 | if (mrq->cmd->error || | |
1700 | mrq->data->error || | |
1701 | (!mmc_host_is_spi(host) && | |
1702 | (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) | |
1703 | error = BLK_STS_IOERR; | |
1704 | else | |
1705 | error = BLK_STS_OK; | |
1706 | ||
1707 | } while (blk_update_request(req, error, 512)); | |
1708 | ||
1709 | return; | |
1710 | ||
1711 | error_exit: | |
1712 | mrq->data->bytes_xfered = 0; | |
1713 | blk_update_request(req, BLK_STS_IOERR, 512); | |
1714 | /* Let it try the remaining request again */ | |
1715 | if (mqrq->retries > MMC_MAX_RETRIES - 1) | |
1716 | mqrq->retries = MMC_MAX_RETRIES - 1; | |
1717 | } | |
1718 | ||
7eb43d53 AH |
1719 | static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) |
1720 | { | |
1721 | return !!brq->mrq.sbc; | |
1722 | } | |
1723 | ||
1724 | static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) | |
1725 | { | |
1726 | return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; | |
1727 | } | |
1728 | ||
1729 | /* | |
1730 | * Check for errors the host controller driver might not have seen such as | |
1731 | * response mode errors or invalid card state. | |
1732 | */ | |
1733 | static bool mmc_blk_status_error(struct request *req, u32 status) | |
1734 | { | |
1735 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1736 | struct mmc_blk_request *brq = &mqrq->brq; | |
1737 | struct mmc_queue *mq = req->q->queuedata; | |
1738 | u32 stop_err_bits; | |
1739 | ||
1740 | if (mmc_host_is_spi(mq->card->host)) | |
aa950144 | 1741 | return false; |
7eb43d53 AH |
1742 | |
1743 | stop_err_bits = mmc_blk_stop_err_bits(brq); | |
1744 | ||
1745 | return brq->cmd.resp[0] & CMD_ERRORS || | |
1746 | brq->stop.resp[0] & stop_err_bits || | |
1747 | status & stop_err_bits || | |
1748 | (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status)); | |
1749 | } | |
1750 | ||
1751 | static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) | |
1752 | { | |
1753 | return !brq->sbc.error && !brq->cmd.error && | |
1754 | !(brq->cmd.resp[0] & CMD_ERRORS); | |
1755 | } | |
1756 | ||
1757 | /* | |
1758 | * Requests are completed by mmc_blk_mq_complete_rq() which sets simple | |
1759 | * policy: | |
1760 | * 1. A request that has transferred at least some data is considered | |
1761 | * successful and will be requeued if there is remaining data to | |
1762 | * transfer. | |
1763 | * 2. Otherwise the number of retries is incremented and the request | |
1764 | * will be requeued if there are remaining retries. | |
1765 | * 3. Otherwise the request will be errored out. | |
1766 | * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and | |
1767 | * mqrq->retries. So there are only 4 possible actions here: | |
1768 | * 1. do not accept the bytes_xfered value i.e. set it to zero | |
1769 | * 2. change mqrq->retries to determine the number of retries | |
1770 | * 3. try to reset the card | |
1771 | * 4. read one sector at a time | |
1772 | */ | |
81196976 AH |
1773 | static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) |
1774 | { | |
1775 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | |
1776 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1777 | struct mmc_blk_request *brq = &mqrq->brq; | |
1778 | struct mmc_blk_data *md = mq->blkdata; | |
1779 | struct mmc_card *card = mq->card; | |
7eb43d53 AH |
1780 | u32 status; |
1781 | u32 blocks; | |
1782 | int err; | |
81196976 | 1783 | |
7eb43d53 AH |
1784 | /* |
1785 | * Some errors the host driver might not have seen. Set the number of | |
1786 | * bytes transferred to zero in that case. | |
1787 | */ | |
1788 | err = __mmc_send_status(card, &status, 0); | |
1789 | if (err || mmc_blk_status_error(req, status)) | |
1790 | brq->data.bytes_xfered = 0; | |
81196976 AH |
1791 | |
1792 | mmc_retune_release(card->host); | |
1793 | ||
1794 | /* | |
7eb43d53 AH |
1795 | * Try again to get the status. This also provides an opportunity for |
1796 | * re-tuning. | |
81196976 | 1797 | */ |
7eb43d53 AH |
1798 | if (err) |
1799 | err = __mmc_send_status(card, &status, 0); | |
81196976 | 1800 | |
7eb43d53 AH |
1801 | /* |
1802 | * Nothing more to do after the number of bytes transferred has been | |
1803 | * updated and there is no card. | |
1804 | */ | |
1805 | if (err && mmc_detect_card_removed(card->host)) | |
1806 | return; | |
81196976 | 1807 | |
7eb43d53 AH |
1808 | /* Try to get back to "tran" state */ |
1809 | if (!mmc_host_is_spi(mq->card->host) && | |
1810 | (err || !mmc_blk_in_tran_state(status))) | |
1811 | err = mmc_blk_fix_state(mq->card, req); | |
1812 | ||
1813 | /* | |
1814 | * Special case for SD cards where the card might record the number of | |
1815 | * blocks written. | |
1816 | */ | |
1817 | if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && | |
1818 | rq_data_dir(req) == WRITE) { | |
1819 | if (mmc_sd_num_wr_blocks(card, &blocks)) | |
1820 | brq->data.bytes_xfered = 0; | |
1821 | else | |
1822 | brq->data.bytes_xfered = blocks << 9; | |
81196976 | 1823 | } |
7eb43d53 AH |
1824 | |
1825 | /* Reset if the card is in a bad state */ | |
1826 | if (!mmc_host_is_spi(mq->card->host) && | |
1827 | err && mmc_blk_reset(md, card->host, type)) { | |
1828 | pr_err("%s: recovery failed!\n", req->rq_disk->disk_name); | |
81196976 | 1829 | mqrq->retries = MMC_NO_RETRIES; |
7eb43d53 AH |
1830 | return; |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * If anything was done, just return and if there is anything remaining | |
1835 | * on the request it will get requeued. | |
1836 | */ | |
1837 | if (brq->data.bytes_xfered) | |
1838 | return; | |
1839 | ||
1840 | /* Reset before last retry */ | |
1841 | if (mqrq->retries + 1 == MMC_MAX_RETRIES) | |
1842 | mmc_blk_reset(md, card->host, type); | |
1843 | ||
1844 | /* Command errors fail fast, so use all MMC_MAX_RETRIES */ | |
1845 | if (brq->sbc.error || brq->cmd.error) | |
1846 | return; | |
1847 | ||
1848 | /* Reduce the remaining retries for data errors */ | |
1849 | if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { | |
1850 | mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; | |
1851 | return; | |
1852 | } | |
1853 | ||
1854 | /* FIXME: Missing single sector read for large sector size */ | |
1855 | if (!mmc_large_sector(card) && rq_data_dir(req) == READ && | |
1856 | brq->data.blocks > 1) { | |
1857 | /* Read one sector at a time */ | |
1858 | mmc_blk_read_single(mq, req); | |
1859 | return; | |
81196976 AH |
1860 | } |
1861 | } | |
1862 | ||
10f21df4 AH |
1863 | static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) |
1864 | { | |
1865 | mmc_blk_eval_resp_error(brq); | |
1866 | ||
1867 | return brq->sbc.error || brq->cmd.error || brq->stop.error || | |
1868 | brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; | |
1869 | } | |
1870 | ||
88a51646 AH |
1871 | static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) |
1872 | { | |
1873 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
f47a1fe3 | 1874 | u32 status = 0; |
88a51646 AH |
1875 | int err; |
1876 | ||
1877 | if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) | |
1878 | return 0; | |
1879 | ||
0fbfd125 | 1880 | err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status); |
88a51646 | 1881 | |
f47a1fe3 AH |
1882 | /* |
1883 | * Do not assume data transferred correctly if there are any error bits | |
1884 | * set. | |
1885 | */ | |
1886 | if (status & mmc_blk_stop_err_bits(&mqrq->brq)) { | |
1887 | mqrq->brq.data.bytes_xfered = 0; | |
88a51646 AH |
1888 | err = err ? err : -EIO; |
1889 | } | |
1890 | ||
f47a1fe3 AH |
1891 | /* Copy the exception bit so it will be seen later on */ |
1892 | if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT) | |
1893 | mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; | |
1894 | ||
88a51646 AH |
1895 | return err; |
1896 | } | |
1897 | ||
10f21df4 AH |
1898 | static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, |
1899 | struct request *req) | |
1900 | { | |
1901 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | |
1902 | ||
1903 | mmc_blk_reset_success(mq->blkdata, type); | |
1904 | } | |
1905 | ||
81196976 AH |
1906 | static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) |
1907 | { | |
1908 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1909 | unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; | |
1910 | ||
1911 | if (nr_bytes) { | |
1912 | if (blk_update_request(req, BLK_STS_OK, nr_bytes)) | |
1913 | blk_mq_requeue_request(req, true); | |
1914 | else | |
1915 | __blk_mq_end_request(req, BLK_STS_OK); | |
1916 | } else if (!blk_rq_bytes(req)) { | |
1917 | __blk_mq_end_request(req, BLK_STS_IOERR); | |
1918 | } else if (mqrq->retries++ < MMC_MAX_RETRIES) { | |
1919 | blk_mq_requeue_request(req, true); | |
1920 | } else { | |
1921 | if (mmc_card_removed(mq->card)) | |
1922 | req->rq_flags |= RQF_QUIET; | |
1923 | blk_mq_end_request(req, BLK_STS_IOERR); | |
1924 | } | |
1925 | } | |
1926 | ||
1927 | static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, | |
1928 | struct mmc_queue_req *mqrq) | |
1929 | { | |
1930 | return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && | |
1931 | (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || | |
1932 | mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); | |
1933 | } | |
1934 | ||
1935 | static void mmc_blk_urgent_bkops(struct mmc_queue *mq, | |
1936 | struct mmc_queue_req *mqrq) | |
1937 | { | |
1938 | if (mmc_blk_urgent_bkops_needed(mq, mqrq)) | |
1939 | mmc_start_bkops(mq->card, true); | |
1940 | } | |
1941 | ||
1942 | void mmc_blk_mq_complete(struct request *req) | |
1943 | { | |
1944 | struct mmc_queue *mq = req->q->queuedata; | |
1945 | ||
1e8e55b6 AH |
1946 | if (mq->use_cqe) |
1947 | mmc_blk_cqe_complete_rq(mq, req); | |
1948 | else | |
1949 | mmc_blk_mq_complete_rq(mq, req); | |
81196976 AH |
1950 | } |
1951 | ||
1952 | static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, | |
1953 | struct request *req) | |
1954 | { | |
1955 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
88a51646 | 1956 | struct mmc_host *host = mq->card->host; |
81196976 | 1957 | |
88a51646 AH |
1958 | if (mmc_blk_rq_error(&mqrq->brq) || |
1959 | mmc_blk_card_busy(mq->card, req)) { | |
1960 | mmc_blk_mq_rw_recovery(mq, req); | |
1961 | } else { | |
1962 | mmc_blk_rw_reset_success(mq, req); | |
1963 | mmc_retune_release(host); | |
1964 | } | |
81196976 AH |
1965 | |
1966 | mmc_blk_urgent_bkops(mq, mqrq); | |
1967 | } | |
1968 | ||
1969 | static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) | |
1970 | { | |
1971 | struct request_queue *q = req->q; | |
1972 | unsigned long flags; | |
1973 | bool put_card; | |
1974 | ||
1975 | spin_lock_irqsave(q->queue_lock, flags); | |
1976 | ||
1977 | mq->in_flight[mmc_issue_type(mq, req)] -= 1; | |
1978 | ||
1979 | put_card = (mmc_tot_in_flight(mq) == 0); | |
1980 | ||
1981 | spin_unlock_irqrestore(q->queue_lock, flags); | |
1982 | ||
1983 | if (put_card) | |
1984 | mmc_put_card(mq->card, &mq->ctx); | |
1985 | } | |
1986 | ||
1987 | static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) | |
1988 | { | |
1989 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
1990 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
1991 | struct mmc_host *host = mq->card->host; | |
1992 | ||
1993 | mmc_post_req(host, mrq, 0); | |
1994 | ||
10f21df4 AH |
1995 | /* |
1996 | * Block layer timeouts race with completions which means the normal | |
1997 | * completion path cannot be used during recovery. | |
1998 | */ | |
1999 | if (mq->in_recovery) | |
2000 | mmc_blk_mq_complete_rq(mq, req); | |
2001 | else | |
2002 | blk_mq_complete_request(req); | |
81196976 AH |
2003 | |
2004 | mmc_blk_mq_dec_in_flight(mq, req); | |
2005 | } | |
2006 | ||
10f21df4 AH |
2007 | void mmc_blk_mq_recovery(struct mmc_queue *mq) |
2008 | { | |
2009 | struct request *req = mq->recovery_req; | |
2010 | struct mmc_host *host = mq->card->host; | |
2011 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
2012 | ||
2013 | mq->recovery_req = NULL; | |
2014 | mq->rw_wait = false; | |
2015 | ||
2016 | if (mmc_blk_rq_error(&mqrq->brq)) { | |
2017 | mmc_retune_hold_now(host); | |
2018 | mmc_blk_mq_rw_recovery(mq, req); | |
2019 | } | |
2020 | ||
2021 | mmc_blk_urgent_bkops(mq, mqrq); | |
2022 | ||
2023 | mmc_blk_mq_post_req(mq, req); | |
2024 | } | |
2025 | ||
81196976 AH |
2026 | static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, |
2027 | struct request **prev_req) | |
2028 | { | |
10f21df4 AH |
2029 | if (mmc_host_done_complete(mq->card->host)) |
2030 | return; | |
2031 | ||
81196976 AH |
2032 | mutex_lock(&mq->complete_lock); |
2033 | ||
2034 | if (!mq->complete_req) | |
2035 | goto out_unlock; | |
2036 | ||
2037 | mmc_blk_mq_poll_completion(mq, mq->complete_req); | |
2038 | ||
2039 | if (prev_req) | |
2040 | *prev_req = mq->complete_req; | |
2041 | else | |
2042 | mmc_blk_mq_post_req(mq, mq->complete_req); | |
2043 | ||
2044 | mq->complete_req = NULL; | |
2045 | ||
2046 | out_unlock: | |
2047 | mutex_unlock(&mq->complete_lock); | |
2048 | } | |
2049 | ||
2050 | void mmc_blk_mq_complete_work(struct work_struct *work) | |
2051 | { | |
2052 | struct mmc_queue *mq = container_of(work, struct mmc_queue, | |
2053 | complete_work); | |
2054 | ||
2055 | mmc_blk_mq_complete_prev_req(mq, NULL); | |
2056 | } | |
2057 | ||
2058 | static void mmc_blk_mq_req_done(struct mmc_request *mrq) | |
2059 | { | |
2060 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, | |
2061 | brq.mrq); | |
2062 | struct request *req = mmc_queue_req_to_req(mqrq); | |
2063 | struct request_queue *q = req->q; | |
2064 | struct mmc_queue *mq = q->queuedata; | |
10f21df4 | 2065 | struct mmc_host *host = mq->card->host; |
81196976 | 2066 | unsigned long flags; |
81196976 | 2067 | |
10f21df4 AH |
2068 | if (!mmc_host_done_complete(host)) { |
2069 | bool waiting; | |
81196976 | 2070 | |
10f21df4 AH |
2071 | /* |
2072 | * We cannot complete the request in this context, so record | |
2073 | * that there is a request to complete, and that a following | |
2074 | * request does not need to wait (although it does need to | |
2075 | * complete complete_req first). | |
2076 | */ | |
2077 | spin_lock_irqsave(q->queue_lock, flags); | |
2078 | mq->complete_req = req; | |
2079 | mq->rw_wait = false; | |
2080 | waiting = mq->waiting; | |
2081 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2082 | ||
2083 | /* | |
2084 | * If 'waiting' then the waiting task will complete this | |
2085 | * request, otherwise queue a work to do it. Note that | |
2086 | * complete_work may still race with the dispatch of a following | |
2087 | * request. | |
2088 | */ | |
2089 | if (waiting) | |
2090 | wake_up(&mq->wait); | |
2091 | else | |
2092 | kblockd_schedule_work(&mq->complete_work); | |
2093 | ||
2094 | return; | |
2095 | } | |
2096 | ||
2097 | /* Take the recovery path for errors or urgent background operations */ | |
2098 | if (mmc_blk_rq_error(&mqrq->brq) || | |
2099 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { | |
2100 | spin_lock_irqsave(q->queue_lock, flags); | |
2101 | mq->recovery_needed = true; | |
2102 | mq->recovery_req = req; | |
2103 | spin_unlock_irqrestore(q->queue_lock, flags); | |
81196976 | 2104 | wake_up(&mq->wait); |
10f21df4 AH |
2105 | schedule_work(&mq->recovery_work); |
2106 | return; | |
2107 | } | |
2108 | ||
2109 | mmc_blk_rw_reset_success(mq, req); | |
2110 | ||
2111 | mq->rw_wait = false; | |
2112 | wake_up(&mq->wait); | |
2113 | ||
2114 | mmc_blk_mq_post_req(mq, req); | |
81196976 AH |
2115 | } |
2116 | ||
2117 | static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) | |
2118 | { | |
2119 | struct request_queue *q = mq->queue; | |
2120 | unsigned long flags; | |
2121 | bool done; | |
2122 | ||
2123 | /* | |
10f21df4 AH |
2124 | * Wait while there is another request in progress, but not if recovery |
2125 | * is needed. Also indicate whether there is a request waiting to start. | |
81196976 AH |
2126 | */ |
2127 | spin_lock_irqsave(q->queue_lock, flags); | |
10f21df4 AH |
2128 | if (mq->recovery_needed) { |
2129 | *err = -EBUSY; | |
2130 | done = true; | |
2131 | } else { | |
2132 | done = !mq->rw_wait; | |
2133 | } | |
81196976 AH |
2134 | mq->waiting = !done; |
2135 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2136 | ||
2137 | return done; | |
2138 | } | |
2139 | ||
2140 | static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) | |
2141 | { | |
2142 | int err = 0; | |
2143 | ||
2144 | wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); | |
2145 | ||
2146 | /* Always complete the previous request if there is one */ | |
2147 | mmc_blk_mq_complete_prev_req(mq, prev_req); | |
2148 | ||
2149 | return err; | |
2150 | } | |
2151 | ||
2152 | static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, | |
2153 | struct request *req) | |
2154 | { | |
2155 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
2156 | struct mmc_host *host = mq->card->host; | |
2157 | struct request *prev_req = NULL; | |
2158 | int err = 0; | |
2159 | ||
2160 | mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); | |
2161 | ||
2162 | mqrq->brq.mrq.done = mmc_blk_mq_req_done; | |
2163 | ||
2164 | mmc_pre_req(host, &mqrq->brq.mrq); | |
2165 | ||
2166 | err = mmc_blk_rw_wait(mq, &prev_req); | |
2167 | if (err) | |
2168 | goto out_post_req; | |
2169 | ||
2170 | mq->rw_wait = true; | |
2171 | ||
2172 | err = mmc_start_request(host, &mqrq->brq.mrq); | |
2173 | ||
2174 | if (prev_req) | |
2175 | mmc_blk_mq_post_req(mq, prev_req); | |
2176 | ||
10f21df4 | 2177 | if (err) |
81196976 | 2178 | mq->rw_wait = false; |
10f21df4 AH |
2179 | |
2180 | /* Release re-tuning here where there is no synchronization required */ | |
2181 | if (err || mmc_host_done_complete(host)) | |
81196976 | 2182 | mmc_retune_release(host); |
81196976 AH |
2183 | |
2184 | out_post_req: | |
2185 | if (err) | |
2186 | mmc_post_req(host, &mqrq->brq.mrq, err); | |
2187 | ||
2188 | return err; | |
2189 | } | |
2190 | ||
2191 | static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) | |
2192 | { | |
1e8e55b6 AH |
2193 | if (mq->use_cqe) |
2194 | return host->cqe_ops->cqe_wait_for_idle(host); | |
2195 | ||
81196976 AH |
2196 | return mmc_blk_rw_wait(mq, NULL); |
2197 | } | |
2198 | ||
2199 | enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) | |
2200 | { | |
2201 | struct mmc_blk_data *md = mq->blkdata; | |
2202 | struct mmc_card *card = md->queue.card; | |
2203 | struct mmc_host *host = card->host; | |
2204 | int ret; | |
2205 | ||
2206 | ret = mmc_blk_part_switch(card, md->part_type); | |
2207 | if (ret) | |
2208 | return MMC_REQ_FAILED_TO_START; | |
2209 | ||
2210 | switch (mmc_issue_type(mq, req)) { | |
2211 | case MMC_ISSUE_SYNC: | |
2212 | ret = mmc_blk_wait_for_idle(mq, host); | |
2213 | if (ret) | |
2214 | return MMC_REQ_BUSY; | |
2215 | switch (req_op(req)) { | |
2216 | case REQ_OP_DRV_IN: | |
2217 | case REQ_OP_DRV_OUT: | |
2218 | mmc_blk_issue_drv_op(mq, req); | |
2219 | break; | |
2220 | case REQ_OP_DISCARD: | |
2221 | mmc_blk_issue_discard_rq(mq, req); | |
2222 | break; | |
2223 | case REQ_OP_SECURE_ERASE: | |
2224 | mmc_blk_issue_secdiscard_rq(mq, req); | |
2225 | break; | |
2226 | case REQ_OP_FLUSH: | |
2227 | mmc_blk_issue_flush(mq, req); | |
2228 | break; | |
2229 | default: | |
2230 | WARN_ON_ONCE(1); | |
2231 | return MMC_REQ_FAILED_TO_START; | |
2232 | } | |
2233 | return MMC_REQ_FINISHED; | |
1e8e55b6 | 2234 | case MMC_ISSUE_DCMD: |
81196976 AH |
2235 | case MMC_ISSUE_ASYNC: |
2236 | switch (req_op(req)) { | |
1e8e55b6 AH |
2237 | case REQ_OP_FLUSH: |
2238 | ret = mmc_blk_cqe_issue_flush(mq, req); | |
2239 | break; | |
81196976 AH |
2240 | case REQ_OP_READ: |
2241 | case REQ_OP_WRITE: | |
1e8e55b6 AH |
2242 | if (mq->use_cqe) |
2243 | ret = mmc_blk_cqe_issue_rw_rq(mq, req); | |
2244 | else | |
2245 | ret = mmc_blk_mq_issue_rw_rq(mq, req); | |
81196976 AH |
2246 | break; |
2247 | default: | |
2248 | WARN_ON_ONCE(1); | |
2249 | ret = -EINVAL; | |
2250 | } | |
2251 | if (!ret) | |
2252 | return MMC_REQ_STARTED; | |
2253 | return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; | |
2254 | default: | |
2255 | WARN_ON_ONCE(1); | |
2256 | return MMC_REQ_FAILED_TO_START; | |
2257 | } | |
2258 | } | |
2259 | ||
a6f6c96b RK |
2260 | static inline int mmc_blk_readonly(struct mmc_card *card) |
2261 | { | |
2262 | return mmc_card_readonly(card) || | |
2263 | !(card->csd.cmdclass & CCC_BLOCK_WRITE); | |
2264 | } | |
2265 | ||
371a689f AW |
2266 | static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, |
2267 | struct device *parent, | |
2268 | sector_t size, | |
2269 | bool default_ro, | |
add710ea JR |
2270 | const char *subname, |
2271 | int area_type) | |
1da177e4 LT |
2272 | { |
2273 | struct mmc_blk_data *md; | |
2274 | int devidx, ret; | |
2275 | ||
a04848c7 | 2276 | devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); |
e7b42769 SL |
2277 | if (devidx < 0) { |
2278 | /* | |
2279 | * We get -ENOSPC because there are no more any available | |
2280 | * devidx. The reason may be that, either userspace haven't yet | |
2281 | * unmounted the partitions, which postpones mmc_blk_release() | |
2282 | * from being called, or the device has more partitions than | |
2283 | * what we support. | |
2284 | */ | |
2285 | if (devidx == -ENOSPC) | |
2286 | dev_err(mmc_dev(card->host), | |
2287 | "no more device IDs available\n"); | |
2288 | ||
a04848c7 | 2289 | return ERR_PTR(devidx); |
e7b42769 | 2290 | } |
1da177e4 | 2291 | |
dd00cc48 | 2292 | md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); |
a6f6c96b RK |
2293 | if (!md) { |
2294 | ret = -ENOMEM; | |
2295 | goto out; | |
2296 | } | |
1da177e4 | 2297 | |
add710ea JR |
2298 | md->area_type = area_type; |
2299 | ||
a6f6c96b RK |
2300 | /* |
2301 | * Set the read-only status based on the supported commands | |
2302 | * and the write protect switch. | |
2303 | */ | |
2304 | md->read_only = mmc_blk_readonly(card); | |
1da177e4 | 2305 | |
5e71b7a6 | 2306 | md->disk = alloc_disk(perdev_minors); |
a6f6c96b RK |
2307 | if (md->disk == NULL) { |
2308 | ret = -ENOMEM; | |
2309 | goto err_kfree; | |
2310 | } | |
1da177e4 | 2311 | |
a6f6c96b | 2312 | spin_lock_init(&md->lock); |
371a689f | 2313 | INIT_LIST_HEAD(&md->part); |
97548575 | 2314 | INIT_LIST_HEAD(&md->rpmbs); |
a6f6c96b | 2315 | md->usage = 1; |
1da177e4 | 2316 | |
d09408ad | 2317 | ret = mmc_init_queue(&md->queue, card, &md->lock, subname); |
a6f6c96b RK |
2318 | if (ret) |
2319 | goto err_putdisk; | |
1da177e4 | 2320 | |
7db3028e | 2321 | md->queue.blkdata = md; |
d2b18394 | 2322 | |
41e3efd0 AH |
2323 | /* |
2324 | * Keep an extra reference to the queue so that we can shutdown the | |
2325 | * queue (i.e. call blk_cleanup_queue()) while there are still | |
2326 | * references to the 'md'. The corresponding blk_put_queue() is in | |
2327 | * mmc_blk_put(). | |
2328 | */ | |
2329 | if (!blk_get_queue(md->queue.queue)) { | |
2330 | mmc_cleanup_queue(&md->queue); | |
2361bfb0 | 2331 | ret = -ENODEV; |
41e3efd0 AH |
2332 | goto err_putdisk; |
2333 | } | |
2334 | ||
fe6b4c88 | 2335 | md->disk->major = MMC_BLOCK_MAJOR; |
5e71b7a6 | 2336 | md->disk->first_minor = devidx * perdev_minors; |
a6f6c96b RK |
2337 | md->disk->fops = &mmc_bdops; |
2338 | md->disk->private_data = md; | |
2339 | md->disk->queue = md->queue.queue; | |
307d8e6f | 2340 | md->parent = parent; |
371a689f | 2341 | set_disk_ro(md->disk, md->read_only || default_ro); |
382c55f8 | 2342 | md->disk->flags = GENHD_FL_EXT_DEVT; |
f5b4d71f | 2343 | if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) |
53d8f974 | 2344 | md->disk->flags |= GENHD_FL_NO_PART_SCAN; |
a6f6c96b RK |
2345 | |
2346 | /* | |
2347 | * As discussed on lkml, GENHD_FL_REMOVABLE should: | |
2348 | * | |
2349 | * - be set for removable media with permanent block devices | |
2350 | * - be unset for removable block devices with permanent media | |
2351 | * | |
2352 | * Since MMC block devices clearly fall under the second | |
2353 | * case, we do not set GENHD_FL_REMOVABLE. Userspace | |
2354 | * should use the block device creation/destruction hotplug | |
2355 | * messages to tell when the card is present. | |
2356 | */ | |
2357 | ||
f06c9153 | 2358 | snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), |
9aaf3437 | 2359 | "mmcblk%u%s", card->host->index, subname ? subname : ""); |
a6f6c96b | 2360 | |
a5075eb9 SD |
2361 | if (mmc_card_mmc(card)) |
2362 | blk_queue_logical_block_size(md->queue.queue, | |
2363 | card->ext_csd.data_sector_size); | |
2364 | else | |
2365 | blk_queue_logical_block_size(md->queue.queue, 512); | |
2366 | ||
371a689f | 2367 | set_capacity(md->disk, size); |
d0c97cfb | 2368 | |
f0d89972 | 2369 | if (mmc_host_cmd23(card->host)) { |
0ed50abb DG |
2370 | if ((mmc_card_mmc(card) && |
2371 | card->csd.mmca_vsn >= CSD_SPEC_VER_3) || | |
f0d89972 AW |
2372 | (mmc_card_sd(card) && |
2373 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) | |
2374 | md->flags |= MMC_BLK_CMD23; | |
2375 | } | |
d0c97cfb AW |
2376 | |
2377 | if (mmc_card_mmc(card) && | |
2378 | md->flags & MMC_BLK_CMD23 && | |
2379 | ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || | |
2380 | card->ext_csd.rel_sectors)) { | |
2381 | md->flags |= MMC_BLK_REL_WR; | |
e9d5c746 | 2382 | blk_queue_write_cache(md->queue.queue, true, true); |
d0c97cfb AW |
2383 | } |
2384 | ||
371a689f AW |
2385 | return md; |
2386 | ||
2387 | err_putdisk: | |
2388 | put_disk(md->disk); | |
2389 | err_kfree: | |
2390 | kfree(md); | |
2391 | out: | |
a04848c7 | 2392 | ida_simple_remove(&mmc_blk_ida, devidx); |
371a689f AW |
2393 | return ERR_PTR(ret); |
2394 | } | |
2395 | ||
2396 | static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | |
2397 | { | |
2398 | sector_t size; | |
a6f6c96b | 2399 | |
85a18ad9 PO |
2400 | if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { |
2401 | /* | |
2402 | * The EXT_CSD sector count is in number or 512 byte | |
2403 | * sectors. | |
2404 | */ | |
371a689f | 2405 | size = card->ext_csd.sectors; |
85a18ad9 PO |
2406 | } else { |
2407 | /* | |
2408 | * The CSD capacity field is in units of read_blkbits. | |
2409 | * set_capacity takes units of 512 bytes. | |
2410 | */ | |
087de9ed KM |
2411 | size = (typeof(sector_t))card->csd.capacity |
2412 | << (card->csd.read_blkbits - 9); | |
85a18ad9 | 2413 | } |
371a689f | 2414 | |
7a30f2af | 2415 | return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, |
add710ea | 2416 | MMC_BLK_DATA_AREA_MAIN); |
371a689f | 2417 | } |
a6f6c96b | 2418 | |
371a689f AW |
2419 | static int mmc_blk_alloc_part(struct mmc_card *card, |
2420 | struct mmc_blk_data *md, | |
2421 | unsigned int part_type, | |
2422 | sector_t size, | |
2423 | bool default_ro, | |
add710ea JR |
2424 | const char *subname, |
2425 | int area_type) | |
371a689f AW |
2426 | { |
2427 | char cap_str[10]; | |
2428 | struct mmc_blk_data *part_md; | |
2429 | ||
2430 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, | |
add710ea | 2431 | subname, area_type); |
371a689f AW |
2432 | if (IS_ERR(part_md)) |
2433 | return PTR_ERR(part_md); | |
2434 | part_md->part_type = part_type; | |
2435 | list_add(&part_md->part, &md->part); | |
2436 | ||
b9f28d86 | 2437 | string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2, |
371a689f | 2438 | cap_str, sizeof(cap_str)); |
a3c76eb9 | 2439 | pr_info("%s: %s %s partition %u %s\n", |
371a689f AW |
2440 | part_md->disk->disk_name, mmc_card_id(card), |
2441 | mmc_card_name(card), part_md->part_type, cap_str); | |
2442 | return 0; | |
2443 | } | |
2444 | ||
97548575 LW |
2445 | /** |
2446 | * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev | |
2447 | * @filp: the character device file | |
2448 | * @cmd: the ioctl() command | |
2449 | * @arg: the argument from userspace | |
2450 | * | |
2451 | * This will essentially just redirect the ioctl()s coming in over to | |
2452 | * the main block device spawning the RPMB character device. | |
2453 | */ | |
2454 | static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, | |
2455 | unsigned long arg) | |
2456 | { | |
2457 | struct mmc_rpmb_data *rpmb = filp->private_data; | |
2458 | int ret; | |
2459 | ||
2460 | switch (cmd) { | |
2461 | case MMC_IOC_CMD: | |
2462 | ret = mmc_blk_ioctl_cmd(rpmb->md, | |
2463 | (struct mmc_ioc_cmd __user *)arg, | |
2464 | rpmb); | |
2465 | break; | |
2466 | case MMC_IOC_MULTI_CMD: | |
2467 | ret = mmc_blk_ioctl_multi_cmd(rpmb->md, | |
2468 | (struct mmc_ioc_multi_cmd __user *)arg, | |
2469 | rpmb); | |
2470 | break; | |
2471 | default: | |
2472 | ret = -EINVAL; | |
2473 | break; | |
2474 | } | |
2475 | ||
2476 | return 0; | |
2477 | } | |
2478 | ||
2479 | #ifdef CONFIG_COMPAT | |
2480 | static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, | |
2481 | unsigned long arg) | |
2482 | { | |
2483 | return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | |
2484 | } | |
2485 | #endif | |
2486 | ||
2487 | static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) | |
2488 | { | |
2489 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, | |
2490 | struct mmc_rpmb_data, chrdev); | |
2491 | ||
2492 | get_device(&rpmb->dev); | |
2493 | filp->private_data = rpmb; | |
1c87f735 | 2494 | mmc_blk_get(rpmb->md->disk); |
97548575 LW |
2495 | |
2496 | return nonseekable_open(inode, filp); | |
2497 | } | |
2498 | ||
2499 | static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) | |
2500 | { | |
2501 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, | |
2502 | struct mmc_rpmb_data, chrdev); | |
2503 | ||
2504 | put_device(&rpmb->dev); | |
1c87f735 | 2505 | mmc_blk_put(rpmb->md); |
97548575 LW |
2506 | |
2507 | return 0; | |
2508 | } | |
2509 | ||
2510 | static const struct file_operations mmc_rpmb_fileops = { | |
2511 | .release = mmc_rpmb_chrdev_release, | |
2512 | .open = mmc_rpmb_chrdev_open, | |
2513 | .owner = THIS_MODULE, | |
2514 | .llseek = no_llseek, | |
2515 | .unlocked_ioctl = mmc_rpmb_ioctl, | |
2516 | #ifdef CONFIG_COMPAT | |
2517 | .compat_ioctl = mmc_rpmb_ioctl_compat, | |
2518 | #endif | |
2519 | }; | |
2520 | ||
1c87f735 LW |
2521 | static void mmc_blk_rpmb_device_release(struct device *dev) |
2522 | { | |
2523 | struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); | |
2524 | ||
2525 | ida_simple_remove(&mmc_rpmb_ida, rpmb->id); | |
2526 | kfree(rpmb); | |
2527 | } | |
97548575 LW |
2528 | |
2529 | static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, | |
2530 | struct mmc_blk_data *md, | |
2531 | unsigned int part_index, | |
2532 | sector_t size, | |
2533 | const char *subname) | |
2534 | { | |
2535 | int devidx, ret; | |
2536 | char rpmb_name[DISK_NAME_LEN]; | |
2537 | char cap_str[10]; | |
2538 | struct mmc_rpmb_data *rpmb; | |
2539 | ||
2540 | /* This creates the minor number for the RPMB char device */ | |
2541 | devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL); | |
2542 | if (devidx < 0) | |
2543 | return devidx; | |
2544 | ||
2545 | rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); | |
1c87f735 LW |
2546 | if (!rpmb) { |
2547 | ida_simple_remove(&mmc_rpmb_ida, devidx); | |
97548575 | 2548 | return -ENOMEM; |
1c87f735 | 2549 | } |
97548575 LW |
2550 | |
2551 | snprintf(rpmb_name, sizeof(rpmb_name), | |
2552 | "mmcblk%u%s", card->host->index, subname ? subname : ""); | |
2553 | ||
2554 | rpmb->id = devidx; | |
2555 | rpmb->part_index = part_index; | |
2556 | rpmb->dev.init_name = rpmb_name; | |
2557 | rpmb->dev.bus = &mmc_rpmb_bus_type; | |
2558 | rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); | |
2559 | rpmb->dev.parent = &card->dev; | |
1c87f735 | 2560 | rpmb->dev.release = mmc_blk_rpmb_device_release; |
97548575 LW |
2561 | device_initialize(&rpmb->dev); |
2562 | dev_set_drvdata(&rpmb->dev, rpmb); | |
2563 | rpmb->md = md; | |
2564 | ||
2565 | cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); | |
2566 | rpmb->chrdev.owner = THIS_MODULE; | |
2567 | ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev); | |
2568 | if (ret) { | |
2569 | pr_err("%s: could not add character device\n", rpmb_name); | |
1c87f735 | 2570 | goto out_put_device; |
97548575 LW |
2571 | } |
2572 | ||
2573 | list_add(&rpmb->node, &md->rpmbs); | |
2574 | ||
2575 | string_get_size((u64)size, 512, STRING_UNITS_2, | |
2576 | cap_str, sizeof(cap_str)); | |
2577 | ||
2578 | pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n", | |
2579 | rpmb_name, mmc_card_id(card), | |
2580 | mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str, | |
2581 | MAJOR(mmc_rpmb_devt), rpmb->id); | |
2582 | ||
2583 | return 0; | |
2584 | ||
1c87f735 LW |
2585 | out_put_device: |
2586 | put_device(&rpmb->dev); | |
97548575 LW |
2587 | return ret; |
2588 | } | |
2589 | ||
2590 | static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) | |
1c87f735 | 2591 | |
97548575 LW |
2592 | { |
2593 | cdev_device_del(&rpmb->chrdev, &rpmb->dev); | |
1c87f735 | 2594 | put_device(&rpmb->dev); |
97548575 LW |
2595 | } |
2596 | ||
e0c368d5 NJ |
2597 | /* MMC Physical partitions consist of two boot partitions and |
2598 | * up to four general purpose partitions. | |
2599 | * For each partition enabled in EXT_CSD a block device will be allocatedi | |
2600 | * to provide access to the partition. | |
2601 | */ | |
2602 | ||
371a689f AW |
2603 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) |
2604 | { | |
97548575 | 2605 | int idx, ret; |
371a689f AW |
2606 | |
2607 | if (!mmc_card_mmc(card)) | |
2608 | return 0; | |
2609 | ||
e0c368d5 | 2610 | for (idx = 0; idx < card->nr_parts; idx++) { |
97548575 LW |
2611 | if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { |
2612 | /* | |
2613 | * RPMB partitions does not provide block access, they | |
2614 | * are only accessed using ioctl():s. Thus create | |
2615 | * special RPMB block devices that do not have a | |
2616 | * backing block queue for these. | |
2617 | */ | |
2618 | ret = mmc_blk_alloc_rpmb_part(card, md, | |
2619 | card->part[idx].part_cfg, | |
2620 | card->part[idx].size >> 9, | |
2621 | card->part[idx].name); | |
2622 | if (ret) | |
2623 | return ret; | |
2624 | } else if (card->part[idx].size) { | |
e0c368d5 NJ |
2625 | ret = mmc_blk_alloc_part(card, md, |
2626 | card->part[idx].part_cfg, | |
2627 | card->part[idx].size >> 9, | |
2628 | card->part[idx].force_ro, | |
add710ea JR |
2629 | card->part[idx].name, |
2630 | card->part[idx].area_type); | |
e0c368d5 NJ |
2631 | if (ret) |
2632 | return ret; | |
2633 | } | |
371a689f AW |
2634 | } |
2635 | ||
97548575 | 2636 | return 0; |
1da177e4 LT |
2637 | } |
2638 | ||
371a689f AW |
2639 | static void mmc_blk_remove_req(struct mmc_blk_data *md) |
2640 | { | |
add710ea JR |
2641 | struct mmc_card *card; |
2642 | ||
371a689f | 2643 | if (md) { |
fdfa20c1 PT |
2644 | /* |
2645 | * Flush remaining requests and free queues. It | |
2646 | * is freeing the queue that stops new requests | |
2647 | * from being accepted. | |
2648 | */ | |
8efb83a2 | 2649 | card = md->queue.card; |
fdfa20c1 | 2650 | mmc_cleanup_queue(&md->queue); |
371a689f AW |
2651 | if (md->disk->flags & GENHD_FL_UP) { |
2652 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
add710ea JR |
2653 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && |
2654 | card->ext_csd.boot_ro_lockable) | |
2655 | device_remove_file(disk_to_dev(md->disk), | |
2656 | &md->power_ro_lock); | |
371a689f | 2657 | |
371a689f AW |
2658 | del_gendisk(md->disk); |
2659 | } | |
371a689f AW |
2660 | mmc_blk_put(md); |
2661 | } | |
2662 | } | |
2663 | ||
2664 | static void mmc_blk_remove_parts(struct mmc_card *card, | |
2665 | struct mmc_blk_data *md) | |
2666 | { | |
2667 | struct list_head *pos, *q; | |
2668 | struct mmc_blk_data *part_md; | |
97548575 | 2669 | struct mmc_rpmb_data *rpmb; |
371a689f | 2670 | |
97548575 LW |
2671 | /* Remove RPMB partitions */ |
2672 | list_for_each_safe(pos, q, &md->rpmbs) { | |
2673 | rpmb = list_entry(pos, struct mmc_rpmb_data, node); | |
2674 | list_del(pos); | |
2675 | mmc_blk_remove_rpmb_part(rpmb); | |
2676 | } | |
2677 | /* Remove block partitions */ | |
371a689f AW |
2678 | list_for_each_safe(pos, q, &md->part) { |
2679 | part_md = list_entry(pos, struct mmc_blk_data, part); | |
2680 | list_del(pos); | |
2681 | mmc_blk_remove_req(part_md); | |
2682 | } | |
2683 | } | |
2684 | ||
2685 | static int mmc_add_disk(struct mmc_blk_data *md) | |
2686 | { | |
2687 | int ret; | |
add710ea | 2688 | struct mmc_card *card = md->queue.card; |
371a689f | 2689 | |
307d8e6f | 2690 | device_add_disk(md->parent, md->disk); |
371a689f AW |
2691 | md->force_ro.show = force_ro_show; |
2692 | md->force_ro.store = force_ro_store; | |
641c3187 | 2693 | sysfs_attr_init(&md->force_ro.attr); |
371a689f AW |
2694 | md->force_ro.attr.name = "force_ro"; |
2695 | md->force_ro.attr.mode = S_IRUGO | S_IWUSR; | |
2696 | ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); | |
2697 | if (ret) | |
add710ea JR |
2698 | goto force_ro_fail; |
2699 | ||
2700 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && | |
2701 | card->ext_csd.boot_ro_lockable) { | |
88187398 | 2702 | umode_t mode; |
add710ea JR |
2703 | |
2704 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) | |
2705 | mode = S_IRUGO; | |
2706 | else | |
2707 | mode = S_IRUGO | S_IWUSR; | |
2708 | ||
2709 | md->power_ro_lock.show = power_ro_lock_show; | |
2710 | md->power_ro_lock.store = power_ro_lock_store; | |
00d9ac08 | 2711 | sysfs_attr_init(&md->power_ro_lock.attr); |
add710ea JR |
2712 | md->power_ro_lock.attr.mode = mode; |
2713 | md->power_ro_lock.attr.name = | |
2714 | "ro_lock_until_next_power_on"; | |
2715 | ret = device_create_file(disk_to_dev(md->disk), | |
2716 | &md->power_ro_lock); | |
2717 | if (ret) | |
2718 | goto power_ro_lock_fail; | |
2719 | } | |
2720 | return ret; | |
2721 | ||
2722 | power_ro_lock_fail: | |
2723 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
2724 | force_ro_fail: | |
2725 | del_gendisk(md->disk); | |
371a689f AW |
2726 | |
2727 | return ret; | |
2728 | } | |
2729 | ||
627c3ccf LW |
2730 | #ifdef CONFIG_DEBUG_FS |
2731 | ||
2732 | static int mmc_dbg_card_status_get(void *data, u64 *val) | |
2733 | { | |
2734 | struct mmc_card *card = data; | |
2735 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); | |
2736 | struct mmc_queue *mq = &md->queue; | |
2737 | struct request *req; | |
2738 | int ret; | |
2739 | ||
2740 | /* Ask the block layer about the card status */ | |
2741 | req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); | |
fb8e456e AH |
2742 | if (IS_ERR(req)) |
2743 | return PTR_ERR(req); | |
627c3ccf LW |
2744 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; |
2745 | blk_execute_rq(mq->queue, NULL, req, 0); | |
2746 | ret = req_to_mmc_queue_req(req)->drv_op_result; | |
2747 | if (ret >= 0) { | |
2748 | *val = ret; | |
2749 | ret = 0; | |
2750 | } | |
34c089e8 | 2751 | blk_put_request(req); |
627c3ccf LW |
2752 | |
2753 | return ret; | |
2754 | } | |
2755 | DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, | |
2756 | NULL, "%08llx\n"); | |
2757 | ||
2758 | /* That is two digits * 512 + 1 for newline */ | |
2759 | #define EXT_CSD_STR_LEN 1025 | |
2760 | ||
2761 | static int mmc_ext_csd_open(struct inode *inode, struct file *filp) | |
2762 | { | |
2763 | struct mmc_card *card = inode->i_private; | |
2764 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); | |
2765 | struct mmc_queue *mq = &md->queue; | |
2766 | struct request *req; | |
2767 | char *buf; | |
2768 | ssize_t n = 0; | |
2769 | u8 *ext_csd; | |
2770 | int err, i; | |
2771 | ||
2772 | buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); | |
2773 | if (!buf) | |
2774 | return -ENOMEM; | |
2775 | ||
2776 | /* Ask the block layer for the EXT CSD */ | |
2777 | req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); | |
fb8e456e AH |
2778 | if (IS_ERR(req)) { |
2779 | err = PTR_ERR(req); | |
2780 | goto out_free; | |
2781 | } | |
627c3ccf LW |
2782 | req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; |
2783 | req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; | |
2784 | blk_execute_rq(mq->queue, NULL, req, 0); | |
2785 | err = req_to_mmc_queue_req(req)->drv_op_result; | |
34c089e8 | 2786 | blk_put_request(req); |
627c3ccf LW |
2787 | if (err) { |
2788 | pr_err("FAILED %d\n", err); | |
2789 | goto out_free; | |
2790 | } | |
2791 | ||
2792 | for (i = 0; i < 512; i++) | |
2793 | n += sprintf(buf + n, "%02x", ext_csd[i]); | |
2794 | n += sprintf(buf + n, "\n"); | |
2795 | ||
2796 | if (n != EXT_CSD_STR_LEN) { | |
2797 | err = -EINVAL; | |
2798 | goto out_free; | |
2799 | } | |
2800 | ||
2801 | filp->private_data = buf; | |
2802 | kfree(ext_csd); | |
2803 | return 0; | |
2804 | ||
2805 | out_free: | |
2806 | kfree(buf); | |
2807 | return err; | |
2808 | } | |
2809 | ||
2810 | static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, | |
2811 | size_t cnt, loff_t *ppos) | |
2812 | { | |
2813 | char *buf = filp->private_data; | |
2814 | ||
2815 | return simple_read_from_buffer(ubuf, cnt, ppos, | |
2816 | buf, EXT_CSD_STR_LEN); | |
2817 | } | |
2818 | ||
2819 | static int mmc_ext_csd_release(struct inode *inode, struct file *file) | |
2820 | { | |
2821 | kfree(file->private_data); | |
2822 | return 0; | |
2823 | } | |
2824 | ||
2825 | static const struct file_operations mmc_dbg_ext_csd_fops = { | |
2826 | .open = mmc_ext_csd_open, | |
2827 | .read = mmc_ext_csd_read, | |
2828 | .release = mmc_ext_csd_release, | |
2829 | .llseek = default_llseek, | |
2830 | }; | |
2831 | ||
f9f0da98 | 2832 | static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
627c3ccf LW |
2833 | { |
2834 | struct dentry *root; | |
2835 | ||
2836 | if (!card->debugfs_root) | |
2837 | return 0; | |
2838 | ||
2839 | root = card->debugfs_root; | |
2840 | ||
2841 | if (mmc_card_mmc(card) || mmc_card_sd(card)) { | |
f9f0da98 AH |
2842 | md->status_dentry = |
2843 | debugfs_create_file("status", S_IRUSR, root, card, | |
2844 | &mmc_dbg_card_status_fops); | |
2845 | if (!md->status_dentry) | |
627c3ccf LW |
2846 | return -EIO; |
2847 | } | |
2848 | ||
2849 | if (mmc_card_mmc(card)) { | |
f9f0da98 AH |
2850 | md->ext_csd_dentry = |
2851 | debugfs_create_file("ext_csd", S_IRUSR, root, card, | |
2852 | &mmc_dbg_ext_csd_fops); | |
2853 | if (!md->ext_csd_dentry) | |
627c3ccf LW |
2854 | return -EIO; |
2855 | } | |
2856 | ||
2857 | return 0; | |
2858 | } | |
2859 | ||
f9f0da98 AH |
2860 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2861 | struct mmc_blk_data *md) | |
2862 | { | |
2863 | if (!card->debugfs_root) | |
2864 | return; | |
2865 | ||
2866 | if (!IS_ERR_OR_NULL(md->status_dentry)) { | |
2867 | debugfs_remove(md->status_dentry); | |
2868 | md->status_dentry = NULL; | |
2869 | } | |
2870 | ||
2871 | if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) { | |
2872 | debugfs_remove(md->ext_csd_dentry); | |
2873 | md->ext_csd_dentry = NULL; | |
2874 | } | |
2875 | } | |
627c3ccf LW |
2876 | |
2877 | #else | |
2878 | ||
f9f0da98 | 2879 | static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
627c3ccf LW |
2880 | { |
2881 | return 0; | |
2882 | } | |
2883 | ||
f9f0da98 AH |
2884 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2885 | struct mmc_blk_data *md) | |
2886 | { | |
2887 | } | |
2888 | ||
627c3ccf LW |
2889 | #endif /* CONFIG_DEBUG_FS */ |
2890 | ||
96541bac | 2891 | static int mmc_blk_probe(struct mmc_card *card) |
1da177e4 | 2892 | { |
371a689f | 2893 | struct mmc_blk_data *md, *part_md; |
a7bbb573 PO |
2894 | char cap_str[10]; |
2895 | ||
912490db PO |
2896 | /* |
2897 | * Check that the card supports the command class(es) we need. | |
2898 | */ | |
2899 | if (!(card->csd.cmdclass & CCC_BLOCK_READ)) | |
1da177e4 LT |
2900 | return -ENODEV; |
2901 | ||
8c7cdbf9 | 2902 | mmc_fixup_device(card, mmc_blk_fixups); |
5204d00f | 2903 | |
1da177e4 | 2904 | md = mmc_blk_alloc(card); |
304419d8 | 2905 | if (IS_ERR(md)) |
1da177e4 LT |
2906 | return PTR_ERR(md); |
2907 | ||
b9f28d86 | 2908 | string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, |
a7bbb573 | 2909 | cap_str, sizeof(cap_str)); |
a3c76eb9 | 2910 | pr_info("%s: %s %s %s %s\n", |
1da177e4 | 2911 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), |
a7bbb573 | 2912 | cap_str, md->read_only ? "(ro)" : ""); |
1da177e4 | 2913 | |
371a689f AW |
2914 | if (mmc_blk_alloc_parts(card, md)) |
2915 | goto out; | |
2916 | ||
96541bac | 2917 | dev_set_drvdata(&card->dev, md); |
6f60c222 | 2918 | |
371a689f AW |
2919 | if (mmc_add_disk(md)) |
2920 | goto out; | |
2921 | ||
2922 | list_for_each_entry(part_md, &md->part, part) { | |
2923 | if (mmc_add_disk(part_md)) | |
2924 | goto out; | |
2925 | } | |
e94cfef6 | 2926 | |
627c3ccf | 2927 | /* Add two debugfs entries */ |
f9f0da98 | 2928 | mmc_blk_add_debugfs(card, md); |
627c3ccf | 2929 | |
e94cfef6 UH |
2930 | pm_runtime_set_autosuspend_delay(&card->dev, 3000); |
2931 | pm_runtime_use_autosuspend(&card->dev); | |
2932 | ||
2933 | /* | |
2934 | * Don't enable runtime PM for SD-combo cards here. Leave that | |
2935 | * decision to be taken during the SDIO init sequence instead. | |
2936 | */ | |
2937 | if (card->type != MMC_TYPE_SD_COMBO) { | |
2938 | pm_runtime_set_active(&card->dev); | |
2939 | pm_runtime_enable(&card->dev); | |
2940 | } | |
2941 | ||
1da177e4 LT |
2942 | return 0; |
2943 | ||
2944 | out: | |
371a689f AW |
2945 | mmc_blk_remove_parts(card, md); |
2946 | mmc_blk_remove_req(md); | |
5865f287 | 2947 | return 0; |
1da177e4 LT |
2948 | } |
2949 | ||
96541bac | 2950 | static void mmc_blk_remove(struct mmc_card *card) |
1da177e4 | 2951 | { |
96541bac | 2952 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); |
1da177e4 | 2953 | |
f9f0da98 | 2954 | mmc_blk_remove_debugfs(card, md); |
371a689f | 2955 | mmc_blk_remove_parts(card, md); |
e94cfef6 | 2956 | pm_runtime_get_sync(&card->dev); |
ddd6fa7e | 2957 | mmc_claim_host(card->host); |
1f797edc | 2958 | mmc_blk_part_switch(card, md->part_type); |
ddd6fa7e | 2959 | mmc_release_host(card->host); |
e94cfef6 UH |
2960 | if (card->type != MMC_TYPE_SD_COMBO) |
2961 | pm_runtime_disable(&card->dev); | |
2962 | pm_runtime_put_noidle(&card->dev); | |
371a689f | 2963 | mmc_blk_remove_req(md); |
96541bac | 2964 | dev_set_drvdata(&card->dev, NULL); |
1da177e4 LT |
2965 | } |
2966 | ||
96541bac | 2967 | static int _mmc_blk_suspend(struct mmc_card *card) |
1da177e4 | 2968 | { |
371a689f | 2969 | struct mmc_blk_data *part_md; |
96541bac | 2970 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); |
1da177e4 LT |
2971 | |
2972 | if (md) { | |
2973 | mmc_queue_suspend(&md->queue); | |
371a689f AW |
2974 | list_for_each_entry(part_md, &md->part, part) { |
2975 | mmc_queue_suspend(&part_md->queue); | |
2976 | } | |
1da177e4 LT |
2977 | } |
2978 | return 0; | |
2979 | } | |
2980 | ||
96541bac | 2981 | static void mmc_blk_shutdown(struct mmc_card *card) |
76287748 | 2982 | { |
96541bac | 2983 | _mmc_blk_suspend(card); |
76287748 UH |
2984 | } |
2985 | ||
0967edc6 UH |
2986 | #ifdef CONFIG_PM_SLEEP |
2987 | static int mmc_blk_suspend(struct device *dev) | |
76287748 | 2988 | { |
96541bac UH |
2989 | struct mmc_card *card = mmc_dev_to_card(dev); |
2990 | ||
2991 | return _mmc_blk_suspend(card); | |
76287748 UH |
2992 | } |
2993 | ||
0967edc6 | 2994 | static int mmc_blk_resume(struct device *dev) |
1da177e4 | 2995 | { |
371a689f | 2996 | struct mmc_blk_data *part_md; |
fc95e30b | 2997 | struct mmc_blk_data *md = dev_get_drvdata(dev); |
1da177e4 LT |
2998 | |
2999 | if (md) { | |
371a689f AW |
3000 | /* |
3001 | * Resume involves the card going into idle state, | |
3002 | * so current partition is always the main one. | |
3003 | */ | |
3004 | md->part_curr = md->part_type; | |
1da177e4 | 3005 | mmc_queue_resume(&md->queue); |
371a689f AW |
3006 | list_for_each_entry(part_md, &md->part, part) { |
3007 | mmc_queue_resume(&part_md->queue); | |
3008 | } | |
1da177e4 LT |
3009 | } |
3010 | return 0; | |
3011 | } | |
1da177e4 LT |
3012 | #endif |
3013 | ||
0967edc6 UH |
3014 | static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); |
3015 | ||
96541bac UH |
3016 | static struct mmc_driver mmc_driver = { |
3017 | .drv = { | |
3018 | .name = "mmcblk", | |
3019 | .pm = &mmc_blk_pm_ops, | |
3020 | }, | |
1da177e4 LT |
3021 | .probe = mmc_blk_probe, |
3022 | .remove = mmc_blk_remove, | |
76287748 | 3023 | .shutdown = mmc_blk_shutdown, |
1da177e4 LT |
3024 | }; |
3025 | ||
3026 | static int __init mmc_blk_init(void) | |
3027 | { | |
9d4e98e9 | 3028 | int res; |
1da177e4 | 3029 | |
97548575 LW |
3030 | res = bus_register(&mmc_rpmb_bus_type); |
3031 | if (res < 0) { | |
3032 | pr_err("mmcblk: could not register RPMB bus type\n"); | |
3033 | return res; | |
3034 | } | |
3035 | res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb"); | |
3036 | if (res < 0) { | |
3037 | pr_err("mmcblk: failed to allocate rpmb chrdev region\n"); | |
3038 | goto out_bus_unreg; | |
3039 | } | |
3040 | ||
5e71b7a6 OJ |
3041 | if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) |
3042 | pr_info("mmcblk: using %d minors per device\n", perdev_minors); | |
3043 | ||
a26eba61 | 3044 | max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); |
5e71b7a6 | 3045 | |
fe6b4c88 PO |
3046 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
3047 | if (res) | |
97548575 | 3048 | goto out_chrdev_unreg; |
1da177e4 | 3049 | |
9d4e98e9 AM |
3050 | res = mmc_register_driver(&mmc_driver); |
3051 | if (res) | |
97548575 | 3052 | goto out_blkdev_unreg; |
1da177e4 | 3053 | |
9d4e98e9 | 3054 | return 0; |
97548575 LW |
3055 | |
3056 | out_blkdev_unreg: | |
9d4e98e9 | 3057 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
97548575 LW |
3058 | out_chrdev_unreg: |
3059 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); | |
3060 | out_bus_unreg: | |
3061 | bus_unregister(&mmc_rpmb_bus_type); | |
1da177e4 LT |
3062 | return res; |
3063 | } | |
3064 | ||
3065 | static void __exit mmc_blk_exit(void) | |
3066 | { | |
3067 | mmc_unregister_driver(&mmc_driver); | |
fe6b4c88 | 3068 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
97548575 | 3069 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); |
1da177e4 LT |
3070 | } |
3071 | ||
3072 | module_init(mmc_blk_init); | |
3073 | module_exit(mmc_blk_exit); | |
3074 | ||
3075 | MODULE_LICENSE("GPL"); | |
3076 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); | |
3077 |