Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / drivers / mmc / core / block.c
CommitLineData
7c45b226 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * Block driver for media (i.e., flash cards)
4 *
5 * Copyright 2002 Hewlett-Packard Company
979ce720 6 * Copyright 2005-2008 Pierre Ossman
1da177e4
LT
7 *
8 * Use consistent with the GNU GPL is permitted,
9 * provided that this copyright notice is
10 * preserved in its entirety in all copies and derived works.
11 *
12 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
13 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
14 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 *
16 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 *
18 * Author: Andrew Christian
19 * 28 May 2002
20 */
21#include <linux/moduleparam.h>
22#include <linux/module.h>
23#include <linux/init.h>
24
1da177e4
LT
25#include <linux/kernel.h>
26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/hdreg.h>
30#include <linux/kdev_t.h>
edb25572 31#include <linux/kref.h>
1da177e4 32#include <linux/blkdev.h>
97548575 33#include <linux/cdev.h>
a621aaed 34#include <linux/mutex.h>
ec5a19dd 35#include <linux/scatterlist.h>
7852028a 36#include <linux/string.h>
a7bbb573 37#include <linux/string_helpers.h>
cb87ea28
JC
38#include <linux/delay.h>
39#include <linux/capability.h>
40#include <linux/compat.h>
e94cfef6 41#include <linux/pm_runtime.h>
b10fa99e 42#include <linux/idr.h>
627c3ccf 43#include <linux/debugfs.h>
7852028a 44#include <linux/rpmb.h>
1da177e4 45
cb87ea28 46#include <linux/mmc/ioctl.h>
1da177e4 47#include <linux/mmc/card.h>
385e3227 48#include <linux/mmc/host.h>
da7fbe58
PO
49#include <linux/mmc/mmc.h>
50#include <linux/mmc/sd.h>
1da177e4 51
7c0f6ba6 52#include <linux/uaccess.h>
449f34a3 53#include <linux/unaligned.h>
1da177e4 54
98ac2162 55#include "queue.h"
48ab086d 56#include "block.h"
55244c56 57#include "core.h"
4facdde1 58#include "card.h"
93f1c150 59#include "crypto.h"
5857b29b 60#include "host.h"
4facdde1 61#include "bus.h"
55244c56 62#include "mmc_ops.h"
28fc64af 63#include "quirks.h"
55244c56 64#include "sd_ops.h"
1da177e4 65
6b0b6285 66MODULE_ALIAS("mmc:block");
5e71b7a6
OJ
67#ifdef MODULE_PARAM_PREFIX
68#undef MODULE_PARAM_PREFIX
69#endif
70#define MODULE_PARAM_PREFIX "mmcblk."
71
6b7a363d
AH
72/*
73 * Set a 10 second timeout for polling write request busy state. Note, mmc core
74 * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
75 * second software timer to timeout the whole request, so 10 seconds should be
76 * ample.
77 */
78#define MMC_BLK_TIMEOUT_MS (10 * 1000)
775a9362 79#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
a0e95766 80#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
6a7a6b45 81
7852028a
JW
82/**
83 * struct rpmb_frame - rpmb frame as defined by eMMC 5.1 (JESD84-B51)
84 *
85 * @stuff : stuff bytes
86 * @key_mac : The authentication key or the message authentication
87 * code (MAC) depending on the request/response type.
88 * The MAC will be delivered in the last (or the only)
89 * block of data.
90 * @data : Data to be written or read by signed access.
91 * @nonce : Random number generated by the host for the requests
92 * and copied to the response by the RPMB engine.
93 * @write_counter: Counter value for the total amount of the successful
94 * authenticated data write requests made by the host.
95 * @addr : Address of the data to be programmed to or read
96 * from the RPMB. Address is the serial number of
97 * the accessed block (half sector 256B).
98 * @block_count : Number of blocks (half sectors, 256B) requested to be
99 * read/programmed.
100 * @result : Includes information about the status of the write counter
101 * (valid, expired) and result of the access made to the RPMB.
102 * @req_resp : Defines the type of request and response to/from the memory.
103 *
104 * The stuff bytes and big-endian properties are modeled to fit to the spec.
105 */
106struct rpmb_frame {
107 u8 stuff[196];
108 u8 key_mac[32];
109 u8 data[256];
110 u8 nonce[16];
111 __be32 write_counter;
112 __be16 addr;
113 __be16 block_count;
114 __be16 result;
115 __be16 req_resp;
116} __packed;
117
118#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */
119#define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */
120#define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */
121#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */
122#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */
123
5e71b7a6 124static DEFINE_MUTEX(block_mutex);
6b0b6285 125
1da177e4 126/*
5e71b7a6
OJ
127 * The defaults come from config options but can be overriden by module
128 * or bootarg options.
1da177e4 129 */
5e71b7a6 130static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
1dff3144 131
5e71b7a6
OJ
132/*
133 * We've only got one major, so number of mmcblk devices is
a26eba61 134 * limited to (1 << 20) / number of minors per device. It is also
b10fa99e 135 * limited by the MAX_DEVICES below.
5e71b7a6
OJ
136 */
137static int max_devices;
138
a26eba61
BH
139#define MAX_DEVICES 256
140
b10fa99e 141static DEFINE_IDA(mmc_blk_ida);
97548575 142static DEFINE_IDA(mmc_rpmb_ida);
1da177e4 143
6966e609
UH
144struct mmc_blk_busy_data {
145 struct mmc_card *card;
146 u32 status;
147};
148
1da177e4
LT
149/*
150 * There is one mmc_blk_data per slot.
151 */
152struct mmc_blk_data {
307d8e6f 153 struct device *parent;
1da177e4
LT
154 struct gendisk *disk;
155 struct mmc_queue queue;
371a689f 156 struct list_head part;
97548575 157 struct list_head rpmbs;
1da177e4 158
d0c97cfb
AW
159 unsigned int flags;
160#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
161#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
162
edb25572 163 struct kref kref;
a6f6c96b 164 unsigned int read_only;
371a689f 165 unsigned int part_type;
67716327
AH
166 unsigned int reset_done;
167#define MMC_BLK_READ BIT(0)
168#define MMC_BLK_WRITE BIT(1)
169#define MMC_BLK_DISCARD BIT(2)
170#define MMC_BLK_SECDISCARD BIT(3)
1e8e55b6 171#define MMC_BLK_CQE_RECOVERY BIT(4)
f7b6fc32 172#define MMC_BLK_TRIM BIT(5)
371a689f
AW
173
174 /*
175 * Only set in main mmc_blk_data associated
fc95e30b 176 * with mmc_card with dev_set_drvdata, and keeps
371a689f
AW
177 * track of the current selected device partition.
178 */
179 unsigned int part_curr;
406e1480 180#define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */
add710ea 181 int area_type;
f9f0da98
AH
182
183 /* debugfs files (only in main mmc_blk_data) */
184 struct dentry *status_dentry;
185 struct dentry *ext_csd_dentry;
1da177e4
LT
186};
187
97548575
LW
188/* Device type for RPMB character devices */
189static dev_t mmc_rpmb_devt;
190
191/* Bus type for RPMB character devices */
6bf26a0e 192static const struct bus_type mmc_rpmb_bus_type = {
97548575
LW
193 .name = "mmc_rpmb",
194};
195
196/**
197 * struct mmc_rpmb_data - special RPMB device type for these areas
198 * @dev: the device for the RPMB area
199 * @chrdev: character device for the RPMB area
200 * @id: unique device ID number
201 * @part_index: partition index (0 on first)
202 * @md: parent MMC block device
7852028a 203 * @rdev: registered RPMB device
97548575
LW
204 * @node: list item, so we can put this device on a list
205 */
206struct mmc_rpmb_data {
207 struct device dev;
208 struct cdev chrdev;
209 int id;
210 unsigned int part_index;
211 struct mmc_blk_data *md;
7852028a 212 struct rpmb_dev *rdev;
97548575
LW
213 struct list_head node;
214};
215
a621aaed 216static DEFINE_MUTEX(open_lock);
1da177e4 217
5e71b7a6
OJ
218module_param(perdev_minors, int, 0444);
219MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
220
8d1e977d 221static inline int mmc_blk_part_switch(struct mmc_card *card,
1f797edc 222 unsigned int part_type);
511ce378
BW
223static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
224 struct mmc_card *card,
b3fa3e6d 225 int recovery_mode,
511ce378
BW
226 struct mmc_queue *mq);
227static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
568898cb 228static int mmc_spi_err_check(struct mmc_card *card);
f19c5a73 229static int mmc_blk_busy_cb(void *cb_data, bool *busy);
cdf8a6fb 230
1da177e4
LT
231static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
232{
233 struct mmc_blk_data *md;
234
a621aaed 235 mutex_lock(&open_lock);
1da177e4 236 md = disk->private_data;
edb25572 237 if (md && !kref_get_unless_zero(&md->kref))
1da177e4 238 md = NULL;
a621aaed 239 mutex_unlock(&open_lock);
1da177e4
LT
240
241 return md;
242}
243
371a689f
AW
244static inline int mmc_get_devidx(struct gendisk *disk)
245{
382c55f8 246 int devidx = disk->first_minor / perdev_minors;
371a689f
AW
247 return devidx;
248}
249
edb25572 250static void mmc_blk_kref_release(struct kref *ref)
1da177e4 251{
edb25572
SB
252 struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref);
253 int devidx;
249cda33 254
edb25572 255 devidx = mmc_get_devidx(md->disk);
18cbe816 256 ida_free(&mmc_blk_ida, devidx);
edb25572
SB
257
258 mutex_lock(&open_lock);
259 md->disk->private_data = NULL;
a621aaed 260 mutex_unlock(&open_lock);
edb25572
SB
261
262 put_disk(md->disk);
263 kfree(md);
264}
265
266static void mmc_blk_put(struct mmc_blk_data *md)
267{
268 kref_put(&md->kref, mmc_blk_kref_release);
1da177e4
LT
269}
270
add710ea
JR
271static ssize_t power_ro_lock_show(struct device *dev,
272 struct device_attribute *attr, char *buf)
273{
274 int ret;
275 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
276 struct mmc_card *card = md->queue.card;
277 int locked = 0;
278
279 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
280 locked = 2;
281 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
282 locked = 1;
283
4c7a022c 284 ret = sysfs_emit(buf, "%d\n", locked);
add710ea 285
9098f84c
TW
286 mmc_blk_put(md);
287
add710ea
JR
288 return ret;
289}
290
291static ssize_t power_ro_lock_store(struct device *dev,
292 struct device_attribute *attr, const char *buf, size_t count)
293{
294 int ret;
295 struct mmc_blk_data *md, *part_md;
0493f6fe
LW
296 struct mmc_queue *mq;
297 struct request *req;
add710ea
JR
298 unsigned long set;
299
300 if (kstrtoul(buf, 0, &set))
301 return -EINVAL;
302
303 if (set != 1)
304 return count;
305
306 md = mmc_blk_get(dev_to_disk(dev));
0493f6fe 307 mq = &md->queue;
add710ea 308
0493f6fe 309 /* Dispatch locking to the block layer */
0bf6d96c 310 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
fb8e456e
AH
311 if (IS_ERR(req)) {
312 count = PTR_ERR(req);
313 goto out_put;
314 }
0493f6fe 315 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
003fb0a5 316 req_to_mmc_queue_req(req)->drv_op_result = -EIO;
b84ba30b 317 blk_execute_rq(req, false);
0493f6fe 318 ret = req_to_mmc_queue_req(req)->drv_op_result;
0bf6d96c 319 blk_mq_free_request(req);
add710ea
JR
320
321 if (!ret) {
322 pr_info("%s: Locking boot partition ro until next power on\n",
323 md->disk->disk_name);
324 set_disk_ro(md->disk, 1);
325
326 list_for_each_entry(part_md, &md->part, part)
327 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
328 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
329 set_disk_ro(part_md->disk, 1);
330 }
331 }
fb8e456e 332out_put:
add710ea
JR
333 mmc_blk_put(md);
334 return count;
335}
336
29e6a5e0
CH
337static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
338 power_ro_lock_show, power_ro_lock_store);
339
371a689f
AW
340static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
341 char *buf)
342{
343 int ret;
344 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
345
4c7a022c
LZ
346 ret = sysfs_emit(buf, "%d\n",
347 get_disk_ro(dev_to_disk(dev)) ^
348 md->read_only);
371a689f
AW
349 mmc_blk_put(md);
350 return ret;
351}
352
353static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
354 const char *buf, size_t count)
355{
356 int ret;
371a689f 357 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
6f25e5de
RD
358 unsigned long set;
359
360 if (kstrtoul(buf, 0, &set)) {
371a689f
AW
361 ret = -EINVAL;
362 goto out;
363 }
364
365 set_disk_ro(dev_to_disk(dev), set || md->read_only);
366 ret = count;
367out:
368 mmc_blk_put(md);
369 return ret;
370}
371
29e6a5e0
CH
372static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);
373
374static struct attribute *mmc_disk_attrs[] = {
375 &dev_attr_force_ro.attr,
376 &dev_attr_ro_lock_until_next_power_on.attr,
377 NULL,
378};
379
380static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
381 struct attribute *a, int n)
382{
0e8bb666 383 struct device *dev = kobj_to_dev(kobj);
29e6a5e0
CH
384 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
385 umode_t mode = a->mode;
386
387 if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
388 (md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
389 md->queue.card->ext_csd.boot_ro_lockable) {
390 mode = S_IRUGO;
391 if (!(md->queue.card->ext_csd.boot_ro_lock &
392 EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
393 mode |= S_IWUSR;
394 }
395
396 mmc_blk_put(md);
397 return mode;
398}
399
400static const struct attribute_group mmc_disk_attr_group = {
401 .is_visible = mmc_disk_attrs_is_visible,
402 .attrs = mmc_disk_attrs,
403};
404
405static const struct attribute_group *mmc_disk_attr_groups[] = {
406 &mmc_disk_attr_group,
407 NULL,
408};
409
05bdb996 410static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode)
1da177e4 411{
d32e2bf8 412 struct mmc_blk_data *md = mmc_blk_get(disk);
1da177e4
LT
413 int ret = -ENXIO;
414
2a48fc0a 415 mutex_lock(&block_mutex);
1da177e4 416 if (md) {
1da177e4 417 ret = 0;
05bdb996 418 if ((mode & BLK_OPEN_WRITE) && md->read_only) {
70bb0896 419 mmc_blk_put(md);
a00fc090 420 ret = -EROFS;
70bb0896 421 }
1da177e4 422 }
2a48fc0a 423 mutex_unlock(&block_mutex);
1da177e4
LT
424
425 return ret;
426}
427
ae220766 428static void mmc_blk_release(struct gendisk *disk)
1da177e4 429{
a5a1561f 430 struct mmc_blk_data *md = disk->private_data;
1da177e4 431
2a48fc0a 432 mutex_lock(&block_mutex);
1da177e4 433 mmc_blk_put(md);
2a48fc0a 434 mutex_unlock(&block_mutex);
1da177e4
LT
435}
436
437static int
a885c8c4 438mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1da177e4 439{
a885c8c4
CH
440 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
441 geo->heads = 4;
442 geo->sectors = 16;
443 return 0;
1da177e4
LT
444}
445
cb87ea28
JC
446struct mmc_blk_ioc_data {
447 struct mmc_ioc_cmd ic;
448 unsigned char *buf;
449 u64 buf_bytes;
4d0c8d0a
AA
450 unsigned int flags;
451#define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */
452#define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */
453
97548575 454 struct mmc_rpmb_data *rpmb;
cb87ea28
JC
455};
456
457static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
458 struct mmc_ioc_cmd __user *user)
459{
460 struct mmc_blk_ioc_data *idata;
461 int err;
462
0cdfe5b0 463 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
cb87ea28
JC
464 if (!idata) {
465 err = -ENOMEM;
aea253ec 466 goto out;
cb87ea28
JC
467 }
468
469 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
470 err = -EFAULT;
aea253ec 471 goto idata_err;
cb87ea28
JC
472 }
473
474 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
475 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
476 err = -EOVERFLOW;
aea253ec 477 goto idata_err;
cb87ea28
JC
478 }
479
bfe5b1b1
VV
480 if (!idata->buf_bytes) {
481 idata->buf = NULL;
4d6144de 482 return idata;
bfe5b1b1 483 }
4d6144de 484
97a0c313
ME
485 idata->buf = memdup_user((void __user *)(unsigned long)
486 idata->ic.data_ptr, idata->buf_bytes);
487 if (IS_ERR(idata->buf)) {
488 err = PTR_ERR(idata->buf);
aea253ec 489 goto idata_err;
cb87ea28
JC
490 }
491
cb87ea28
JC
492 return idata;
493
aea253ec 494idata_err:
cb87ea28 495 kfree(idata);
aea253ec 496out:
cb87ea28 497 return ERR_PTR(err);
cb87ea28
JC
498}
499
a5f5774c
JH
500static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
501 struct mmc_blk_ioc_data *idata)
502{
503 struct mmc_ioc_cmd *ic = &idata->ic;
504
505 if (copy_to_user(&(ic_ptr->response), ic->response,
506 sizeof(ic->response)))
507 return -EFAULT;
508
509 if (!idata->ic.write_flag) {
510 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
511 idata->buf, idata->buf_bytes))
512 return -EFAULT;
513 }
514
515 return 0;
516}
517
a5f5774c 518static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
4d0c8d0a 519 struct mmc_blk_ioc_data **idatas, int i)
cb87ea28 520{
a44f7cb9 521 struct mmc_command cmd = {}, sbc = {};
c7836d15
MY
522 struct mmc_data data = {};
523 struct mmc_request mrq = {};
cb87ea28 524 struct scatterlist sg;
f19c5a73 525 bool r1b_resp;
51f5b305 526 unsigned int busy_timeout_ms;
cb87ea28 527 int err;
97548575 528 unsigned int target_part;
4d0c8d0a
AA
529 struct mmc_blk_ioc_data *idata = idatas[i];
530 struct mmc_blk_ioc_data *prev_idata = NULL;
cb87ea28 531
a5f5774c
JH
532 if (!card || !md || !idata)
533 return -EINVAL;
cb87ea28 534
4d0c8d0a
AA
535 if (idata->flags & MMC_BLK_IOC_DROP)
536 return 0;
537
cf55a7ac 538 if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
4d0c8d0a
AA
539 prev_idata = idatas[i - 1];
540
97548575
LW
541 /*
542 * The RPMB accesses comes in from the character device, so we
543 * need to target these explicitly. Else we just target the
544 * partition type for the block device the ioctl() was issued
545 * on.
546 */
547 if (idata->rpmb) {
548 /* Support multiple RPMB partitions */
549 target_part = idata->rpmb->part_index;
550 target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
551 } else {
552 target_part = md->part_type;
553 }
8d1e977d 554
4d6144de
JR
555 cmd.opcode = idata->ic.opcode;
556 cmd.arg = idata->ic.arg;
557 cmd.flags = idata->ic.flags;
558
559 if (idata->buf_bytes) {
560 data.sg = &sg;
561 data.sg_len = 1;
562 data.blksz = idata->ic.blksz;
563 data.blocks = idata->ic.blocks;
564
565 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
566
567 if (idata->ic.write_flag)
568 data.flags = MMC_DATA_WRITE;
569 else
570 data.flags = MMC_DATA_READ;
571
572 /* data.flags must already be set before doing this. */
573 mmc_set_data_timeout(&data, card);
574
575 /* Allow overriding the timeout_ns for empirical tuning. */
576 if (idata->ic.data_timeout_ns)
577 data.timeout_ns = idata->ic.data_timeout_ns;
578
4d6144de
JR
579 mrq.data = &data;
580 }
581
582 mrq.cmd = &cmd;
583
97548575 584 err = mmc_blk_part_switch(card, target_part);
8d1e977d 585 if (err)
a5f5774c 586 return err;
8d1e977d 587
cb87ea28
JC
588 if (idata->ic.is_acmd) {
589 err = mmc_app_cmd(card->host, card);
590 if (err)
a5f5774c 591 return err;
cb87ea28
JC
592 }
593
4d0c8d0a 594 if (idata->rpmb || prev_idata) {
a44f7cb9
WS
595 sbc.opcode = MMC_SET_BLOCK_COUNT;
596 /*
597 * We don't do any blockcount validation because the max size
598 * may be increased by a future standard. We just copy the
599 * 'Reliable Write' bit here.
600 */
601 sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
4d0c8d0a
AA
602 if (prev_idata)
603 sbc.arg = prev_idata->ic.arg;
a44f7cb9
WS
604 sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
605 mrq.sbc = &sbc;
8d1e977d
LP
606 }
607
a82e484e 608 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
55c2b8b9 609 (cmd.opcode == MMC_SWITCH))
4f111d04 610 return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
775a9362 611
51f5b305
UH
612 /* If it's an R1B response we need some more preparations. */
613 busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
614 r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
615 if (r1b_resp)
f19c5a73 616 mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
51f5b305 617
cb87ea28 618 mmc_wait_for_req(card->host, &mrq);
e72a55f2 619 memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
cb87ea28 620
4d0c8d0a
AA
621 if (prev_idata) {
622 memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp));
623 if (sbc.error) {
624 dev_err(mmc_dev(card->host), "%s: sbc error %d\n",
625 __func__, sbc.error);
626 return sbc.error;
627 }
628 }
629
cb87ea28
JC
630 if (cmd.error) {
631 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
632 __func__, cmd.error);
a5f5774c 633 return cmd.error;
cb87ea28
JC
634 }
635 if (data.error) {
636 dev_err(mmc_dev(card->host), "%s: data error %d\n",
637 __func__, data.error);
a5f5774c 638 return data.error;
cb87ea28
JC
639 }
640
a0e95766
BS
641 /*
642 * Make sure the cache of the PARTITION_CONFIG register and
643 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
644 * changed it successfully.
645 */
646 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
647 (cmd.opcode == MMC_SWITCH)) {
648 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
649 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
650
651 /*
652 * Update cache so the next mmc_blk_part_switch call operates
653 * on up-to-date data.
654 */
655 card->ext_csd.part_config = value;
656 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
657 }
658
aea0440a
AA
659 /*
660 * Make sure to update CACHE_CTRL in case it was changed. The cache
661 * will get turned back on if the card is re-initialized, e.g.
662 * suspend/resume or hw reset in recovery.
663 */
664 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
665 (cmd.opcode == MMC_SWITCH)) {
666 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
667
668 card->ext_csd.cache_ctrl = value;
669 }
670
cb87ea28
JC
671 /*
672 * According to the SD specs, some commands require a delay after
673 * issuing the command.
674 */
675 if (idata->ic.postsleep_min_us)
676 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
677
568898cb
CL
678 if (mmc_host_is_spi(card->host)) {
679 if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
680 return mmc_spi_err_check(card);
681 return err;
682 }
f19c5a73
UH
683
684 /*
685 * Ensure RPMB, writes and R1B responses are completed by polling with
686 * CMD13. Note that, usually we don't need to poll when using HW busy
687 * detection, but here it's needed since some commands may indicate the
688 * error through the R1 status bits.
689 */
690 if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
691 struct mmc_blk_busy_data cb_data = {
692 .card = card,
693 };
694
695 err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
696 &mmc_blk_busy_cb, &cb_data);
697
698 idata->ic.response[0] = cb_data.status;
699 }
8d1e977d 700
a5f5774c
JH
701 return err;
702}
703
2fe20bae 704static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
97548575
LW
705 struct mmc_ioc_cmd __user *ic_ptr,
706 struct mmc_rpmb_data *rpmb)
a5f5774c
JH
707{
708 struct mmc_blk_ioc_data *idata;
3ecd8cf2 709 struct mmc_blk_ioc_data *idatas[1];
614f0388 710 struct mmc_queue *mq;
a5f5774c 711 struct mmc_card *card;
b093410c 712 int err = 0, ioc_err = 0;
614f0388 713 struct request *req;
a5f5774c
JH
714
715 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
716 if (IS_ERR(idata))
717 return PTR_ERR(idata);
97548575
LW
718 /* This will be NULL on non-RPMB ioctl():s */
719 idata->rpmb = rpmb;
a5f5774c 720
a5f5774c
JH
721 card = md->queue.card;
722 if (IS_ERR(card)) {
723 err = PTR_ERR(card);
724 goto cmd_done;
725 }
726
614f0388
LW
727 /*
728 * Dispatch the ioctl() into the block request queue.
729 */
730 mq = &md->queue;
0bf6d96c 731 req = blk_mq_alloc_request(mq->queue,
ff005a06 732 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
fb8e456e
AH
733 if (IS_ERR(req)) {
734 err = PTR_ERR(req);
735 goto cmd_done;
736 }
3ecd8cf2 737 idatas[0] = idata;
97548575
LW
738 req_to_mmc_queue_req(req)->drv_op =
739 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
003fb0a5 740 req_to_mmc_queue_req(req)->drv_op_result = -EIO;
69f7599e 741 req_to_mmc_queue_req(req)->drv_op_data = idatas;
3ecd8cf2 742 req_to_mmc_queue_req(req)->ioc_count = 1;
b84ba30b 743 blk_execute_rq(req, false);
0493f6fe 744 ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
b093410c 745 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
0bf6d96c 746 blk_mq_free_request(req);
a5f5774c 747
cb87ea28 748cmd_done:
cb87ea28
JC
749 kfree(idata->buf);
750 kfree(idata);
b093410c 751 return ioc_err ? ioc_err : err;
cb87ea28
JC
752}
753
2fe20bae 754static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
97548575
LW
755 struct mmc_ioc_multi_cmd __user *user,
756 struct mmc_rpmb_data *rpmb)
a5f5774c
JH
757{
758 struct mmc_blk_ioc_data **idata = NULL;
759 struct mmc_ioc_cmd __user *cmds = user->cmds;
760 struct mmc_card *card;
3ecd8cf2 761 struct mmc_queue *mq;
103da066 762 int err = 0, ioc_err = 0;
a5f5774c 763 __u64 num_of_cmds;
103da066 764 unsigned int i, n;
3ecd8cf2 765 struct request *req;
a5f5774c
JH
766
767 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
768 sizeof(num_of_cmds)))
769 return -EFAULT;
770
aab2ee03
GU
771 if (!num_of_cmds)
772 return 0;
773
a5f5774c
JH
774 if (num_of_cmds > MMC_IOC_MAX_CMDS)
775 return -EINVAL;
776
103da066
SS
777 n = num_of_cmds;
778 idata = kcalloc(n, sizeof(*idata), GFP_KERNEL);
a5f5774c
JH
779 if (!idata)
780 return -ENOMEM;
781
103da066 782 for (i = 0; i < n; i++) {
a5f5774c
JH
783 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
784 if (IS_ERR(idata[i])) {
785 err = PTR_ERR(idata[i]);
103da066 786 n = i;
a5f5774c
JH
787 goto cmd_err;
788 }
97548575
LW
789 /* This will be NULL on non-RPMB ioctl():s */
790 idata[i]->rpmb = rpmb;
a5f5774c
JH
791 }
792
a5f5774c
JH
793 card = md->queue.card;
794 if (IS_ERR(card)) {
795 err = PTR_ERR(card);
2fe20bae 796 goto cmd_err;
a5f5774c
JH
797 }
798
a5f5774c 799
3ecd8cf2
LW
800 /*
801 * Dispatch the ioctl()s into the block request queue.
802 */
803 mq = &md->queue;
0bf6d96c 804 req = blk_mq_alloc_request(mq->queue,
ff005a06 805 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
fb8e456e
AH
806 if (IS_ERR(req)) {
807 err = PTR_ERR(req);
808 goto cmd_err;
809 }
97548575
LW
810 req_to_mmc_queue_req(req)->drv_op =
811 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
003fb0a5 812 req_to_mmc_queue_req(req)->drv_op_result = -EIO;
69f7599e 813 req_to_mmc_queue_req(req)->drv_op_data = idata;
103da066 814 req_to_mmc_queue_req(req)->ioc_count = n;
b84ba30b 815 blk_execute_rq(req, false);
0493f6fe 816 ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
a5f5774c
JH
817
818 /* copy to user if data and response */
103da066 819 for (i = 0; i < n && !err; i++)
a5f5774c 820 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
a5f5774c 821
0bf6d96c 822 blk_mq_free_request(req);
3ecd8cf2 823
a5f5774c 824cmd_err:
103da066 825 for (i = 0; i < n; i++) {
a5f5774c
JH
826 kfree(idata[i]->buf);
827 kfree(idata[i]);
828 }
829 kfree(idata);
b093410c 830 return ioc_err ? ioc_err : err;
a5f5774c
JH
831}
832
61fe0e2b
LW
833static int mmc_blk_check_blkdev(struct block_device *bdev)
834{
835 /*
836 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
837 * whole block device, not on a partition. This prevents overspray
838 * between sibling partitions.
839 */
fa01b1e9 840 if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev))
61fe0e2b
LW
841 return -EPERM;
842 return 0;
843}
844
05bdb996 845static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
cb87ea28
JC
846 unsigned int cmd, unsigned long arg)
847{
2fe20bae 848 struct mmc_blk_data *md;
61fe0e2b
LW
849 int ret;
850
a5f5774c
JH
851 switch (cmd) {
852 case MMC_IOC_CMD:
61fe0e2b
LW
853 ret = mmc_blk_check_blkdev(bdev);
854 if (ret)
855 return ret;
2fe20bae
LW
856 md = mmc_blk_get(bdev->bd_disk);
857 if (!md)
858 return -EINVAL;
859 ret = mmc_blk_ioctl_cmd(md,
97548575
LW
860 (struct mmc_ioc_cmd __user *)arg,
861 NULL);
2fe20bae
LW
862 mmc_blk_put(md);
863 return ret;
a5f5774c 864 case MMC_IOC_MULTI_CMD:
61fe0e2b
LW
865 ret = mmc_blk_check_blkdev(bdev);
866 if (ret)
867 return ret;
2fe20bae
LW
868 md = mmc_blk_get(bdev->bd_disk);
869 if (!md)
870 return -EINVAL;
871 ret = mmc_blk_ioctl_multi_cmd(md,
97548575
LW
872 (struct mmc_ioc_multi_cmd __user *)arg,
873 NULL);
2fe20bae
LW
874 mmc_blk_put(md);
875 return ret;
a5f5774c
JH
876 default:
877 return -EINVAL;
878 }
cb87ea28
JC
879}
880
881#ifdef CONFIG_COMPAT
05bdb996 882static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
cb87ea28
JC
883 unsigned int cmd, unsigned long arg)
884{
885 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
886}
887#endif
888
dc913385
DO
889static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
890 sector_t *sector)
891{
892 struct mmc_blk_data *md;
893 int ret;
894
895 md = mmc_blk_get(disk);
896 if (!md)
897 return -EINVAL;
898
899 if (md->queue.card)
900 ret = mmc_card_alternative_gpt_sector(md->queue.card, sector);
901 else
902 ret = -ENODEV;
903
904 mmc_blk_put(md);
905
906 return ret;
907}
908
83d5cde4 909static const struct block_device_operations mmc_bdops = {
a5a1561f
AV
910 .open = mmc_blk_open,
911 .release = mmc_blk_release,
a885c8c4 912 .getgeo = mmc_blk_getgeo,
1da177e4 913 .owner = THIS_MODULE,
cb87ea28
JC
914 .ioctl = mmc_blk_ioctl,
915#ifdef CONFIG_COMPAT
916 .compat_ioctl = mmc_blk_compat_ioctl,
917#endif
dc913385 918 .alternative_gpt_sector = mmc_blk_alternative_gpt_sector,
1da177e4
LT
919};
920
025e3d5f
AH
921static int mmc_blk_part_switch_pre(struct mmc_card *card,
922 unsigned int part_type)
923{
4af59a8d
DM
924 const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
925 const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
025e3d5f
AH
926 int ret = 0;
927
4af59a8d 928 if ((part_type & mask) == rpmb) {
025e3d5f
AH
929 if (card->ext_csd.cmdq_en) {
930 ret = mmc_cmdq_disable(card);
931 if (ret)
932 return ret;
933 }
934 mmc_retune_pause(card->host);
935 }
936
937 return ret;
938}
939
940static int mmc_blk_part_switch_post(struct mmc_card *card,
941 unsigned int part_type)
942{
4af59a8d
DM
943 const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
944 const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
025e3d5f
AH
945 int ret = 0;
946
4af59a8d 947 if ((part_type & mask) == rpmb) {
025e3d5f
AH
948 mmc_retune_unpause(card->host);
949 if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
950 ret = mmc_cmdq_enable(card);
951 }
952
953 return ret;
954}
955
371a689f 956static inline int mmc_blk_part_switch(struct mmc_card *card,
1f797edc 957 unsigned int part_type)
371a689f 958{
025e3d5f 959 int ret = 0;
fc95e30b 960 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
0d7d85ca 961
1f797edc 962 if (main_md->part_curr == part_type)
371a689f
AW
963 return 0;
964
965 if (mmc_card_mmc(card)) {
0d7d85ca
AH
966 u8 part_config = card->ext_csd.part_config;
967
1f797edc 968 ret = mmc_blk_part_switch_pre(card, part_type);
025e3d5f
AH
969 if (ret)
970 return ret;
57da0c04 971
0d7d85ca 972 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1f797edc 973 part_config |= part_type;
371a689f
AW
974
975 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
0d7d85ca 976 EXT_CSD_PART_CONFIG, part_config,
371a689f 977 card->ext_csd.part_time);
57da0c04 978 if (ret) {
1f797edc 979 mmc_blk_part_switch_post(card, part_type);
371a689f 980 return ret;
57da0c04 981 }
0d7d85ca
AH
982
983 card->ext_csd.part_config = part_config;
57da0c04 984
025e3d5f 985 ret = mmc_blk_part_switch_post(card, main_md->part_curr);
67716327 986 }
371a689f 987
1f797edc 988 main_md->part_curr = part_type;
025e3d5f 989 return ret;
371a689f
AW
990}
991
169f03a0 992static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
ec5a19dd
PO
993{
994 int err;
051913da
BD
995 u32 result;
996 __be32 *blocks;
449f34a3 997 u8 resp_sz = mmc_card_ult_capacity(card) ? 8 : 4;
869d3747 998 unsigned int noio_flag;
ec5a19dd 999
c7836d15
MY
1000 struct mmc_request mrq = {};
1001 struct mmc_command cmd = {};
1002 struct mmc_data data = {};
ec5a19dd
PO
1003 struct scatterlist sg;
1004
13433b9e 1005 err = mmc_app_cmd(card->host, card);
7213d175 1006 if (err)
169f03a0 1007 return err;
ec5a19dd
PO
1008
1009 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1010 cmd.arg = 0;
7213d175 1011 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
ec5a19dd 1012
449f34a3 1013 data.blksz = resp_sz;
ec5a19dd
PO
1014 data.blocks = 1;
1015 data.flags = MMC_DATA_READ;
1016 data.sg = &sg;
1017 data.sg_len = 1;
d380443c 1018 mmc_set_data_timeout(&data, card);
ec5a19dd 1019
ec5a19dd
PO
1020 mrq.cmd = &cmd;
1021 mrq.data = &data;
1022
869d3747 1023 noio_flag = memalloc_noio_save();
449f34a3 1024 blocks = kmalloc(resp_sz, GFP_KERNEL);
869d3747 1025 memalloc_noio_restore(noio_flag);
051913da 1026 if (!blocks)
169f03a0 1027 return -ENOMEM;
051913da 1028
449f34a3 1029 sg_init_one(&sg, blocks, resp_sz);
ec5a19dd
PO
1030
1031 mmc_wait_for_req(card->host, &mrq);
1032
449f34a3
AA
1033 if (mmc_card_ult_capacity(card)) {
1034 /*
1035 * Normally, ACMD22 returns the number of written sectors as
1036 * u32. SDUC, however, returns it as u64. This is not a
1037 * superfluous requirement, because SDUC writes may exceed 2TB.
1038 * For Linux mmc however, the previously write operation could
1039 * not be more than the block layer limits, thus just make room
1040 * for a u64 and cast the response back to u32.
1041 */
1042 result = clamp_val(get_unaligned_be64(blocks), 0, UINT_MAX);
1043 } else {
1044 result = ntohl(*blocks);
1045 }
051913da
BD
1046 kfree(blocks);
1047
17b0429d 1048 if (cmd.error || data.error)
169f03a0
LW
1049 return -EIO;
1050
1051 *written_blocks = result;
ec5a19dd 1052
169f03a0 1053 return 0;
ec5a19dd
PO
1054}
1055
92c0a0cc
AH
1056static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
1057{
1058 if (host->actual_clock)
1059 return host->actual_clock / 1000;
1060
1061 /* Clock may be subject to a divisor, fudge it by a factor of 2. */
1062 if (host->ios.clock)
1063 return host->ios.clock / 2000;
1064
1065 /* How can there be no clock */
1066 WARN_ON_ONCE(1);
1067 return 100; /* 100 kHz is minimum possible value */
1068}
1069
1070static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
1071 struct mmc_data *data)
1072{
1073 unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
1074 unsigned int khz;
1075
1076 if (data->timeout_clks) {
1077 khz = mmc_blk_clock_khz(host);
1078 ms += DIV_ROUND_UP(data->timeout_clks, khz);
1079 }
1080
1081 return ms;
1082}
1083
406e1480
CL
1084/*
1085 * Attempts to reset the card and get back to the requested partition.
1086 * Therefore any error here must result in cancelling the block layer
1087 * request, it must not be reattempted without going through the mmc_blk
1088 * partition sanity checks.
1089 */
67716327
AH
1090static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1091 int type)
1092{
1093 int err;
406e1480 1094 struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev);
67716327
AH
1095
1096 if (md->reset_done & type)
1097 return -EEXIST;
1098
1099 md->reset_done |= type;
b71597ed 1100 err = mmc_hw_reset(host->card);
406e1480
CL
1101 /*
1102 * A successful reset will leave the card in the main partition, but
1103 * upon failure it might not be, so set it to MMC_BLK_PART_INVALID
1104 * in that case.
1105 */
1106 main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type;
1107 if (err)
1108 return err;
67716327 1109 /* Ensure we switch back to the correct partition */
406e1480
CL
1110 if (mmc_blk_part_switch(host->card, md->part_type))
1111 /*
1112 * We have failed to get back into the correct
1113 * partition, so we need to abort the whole request.
1114 */
1115 return -ENODEV;
1116 return 0;
67716327
AH
1117}
1118
1119static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1120{
1121 md->reset_done &= ~type;
1122}
1123
4d0c8d0a
AA
1124static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq)
1125{
1126 struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data;
1127 int i;
1128
1129 for (i = 1; i < mq_rq->ioc_count; i++) {
1130 if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT &&
1131 mmc_op_multi(idata[i]->ic.opcode)) {
1132 idata[i - 1]->flags |= MMC_BLK_IOC_DROP;
1133 idata[i]->flags |= MMC_BLK_IOC_SBC;
1134 }
1135 }
1136}
1137
5ec12396
LW
1138/*
1139 * The non-block commands come back from the block layer after it queued it and
1140 * processed it with all other requests and then they get issued in this
1141 * function.
1142 */
1143static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
1144{
1145 struct mmc_queue_req *mq_rq;
1146 struct mmc_card *card = mq->card;
1147 struct mmc_blk_data *md = mq->blkdata;
69f7599e 1148 struct mmc_blk_ioc_data **idata;
97548575 1149 bool rpmb_ioctl;
627c3ccf
LW
1150 u8 **ext_csd;
1151 u32 status;
0493f6fe 1152 int ret;
5ec12396
LW
1153 int i;
1154
1155 mq_rq = req_to_mmc_queue_req(req);
97548575 1156 rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
5ec12396
LW
1157
1158 switch (mq_rq->drv_op) {
1159 case MMC_DRV_OP_IOCTL:
70b52f09
BH
1160 if (card->ext_csd.cmdq_en) {
1161 ret = mmc_cmdq_disable(card);
1162 if (ret)
1163 break;
1164 }
4d0c8d0a
AA
1165
1166 mmc_blk_check_sbc(mq_rq);
1167
70b52f09 1168 fallthrough;
97548575 1169 case MMC_DRV_OP_IOCTL_RPMB:
69f7599e 1170 idata = mq_rq->drv_op_data;
7432b49b 1171 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
4d0c8d0a 1172 ret = __mmc_blk_ioctl_cmd(card, md, idata, i);
0493f6fe 1173 if (ret)
5ec12396
LW
1174 break;
1175 }
5ec12396 1176 /* Always switch back to main area after RPMB access */
97548575
LW
1177 if (rpmb_ioctl)
1178 mmc_blk_part_switch(card, 0);
70b52f09
BH
1179 else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
1180 mmc_cmdq_enable(card);
0493f6fe
LW
1181 break;
1182 case MMC_DRV_OP_BOOT_WP:
1183 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
1184 card->ext_csd.boot_ro_lock |
1185 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
1186 card->ext_csd.part_time);
1187 if (ret)
1188 pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
1189 md->disk->disk_name, ret);
1190 else
1191 card->ext_csd.boot_ro_lock |=
1192 EXT_CSD_BOOT_WP_B_PWR_WP_EN;
5ec12396 1193 break;
627c3ccf
LW
1194 case MMC_DRV_OP_GET_CARD_STATUS:
1195 ret = mmc_send_status(card, &status);
1196 if (!ret)
1197 ret = status;
1198 break;
1199 case MMC_DRV_OP_GET_EXT_CSD:
1200 ext_csd = mq_rq->drv_op_data;
1201 ret = mmc_get_ext_csd(card, ext_csd);
1202 break;
5ec12396 1203 default:
0493f6fe
LW
1204 pr_err("%s: unknown driver specific operation\n",
1205 md->disk->disk_name);
1206 ret = -EINVAL;
5ec12396
LW
1207 break;
1208 }
0493f6fe 1209 mq_rq->drv_op_result = ret;
0fbfd125 1210 blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
5ec12396
LW
1211}
1212
f7b6fc32
VW
1213static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
1214 int type, unsigned int erase_arg)
bd788c96 1215{
7db3028e 1216 struct mmc_blk_data *md = mq->blkdata;
bd788c96 1217 struct mmc_card *card = md->queue.card;
9b9c665a
AA
1218 unsigned int nr;
1219 sector_t from;
f7b6fc32 1220 int err = 0;
2a842aca 1221 blk_status_t status = BLK_STS_OK;
bd788c96 1222
5513d9be 1223 if (!mmc_card_can_erase(card)) {
2a842aca 1224 status = BLK_STS_NOTSUPP;
8cb6ed17 1225 goto fail;
bd788c96
AH
1226 }
1227
1228 from = blk_rq_pos(req);
1229 nr = blk_rq_sectors(req);
1230
164b50b3
GU
1231 do {
1232 err = 0;
1233 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1234 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1235 INAND_CMD38_ARG_EXT_CSD,
f7b6fc32 1236 erase_arg == MMC_TRIM_ARG ?
164b50b3
GU
1237 INAND_CMD38_ARG_TRIM :
1238 INAND_CMD38_ARG_ERASE,
ad91619a 1239 card->ext_csd.generic_cmd6_time);
164b50b3
GU
1240 }
1241 if (!err)
f7b6fc32 1242 err = mmc_erase(card, from, nr, erase_arg);
164b50b3 1243 } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
2a842aca
CH
1244 if (err)
1245 status = BLK_STS_IOERR;
1246 else
67716327 1247 mmc_blk_reset_success(md, type);
8cb6ed17 1248fail:
0fbfd125 1249 blk_mq_end_request(req, status);
bd788c96
AH
1250}
1251
f7b6fc32
VW
1252static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req)
1253{
1254 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG);
1255}
1256
1257static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1258{
1259 struct mmc_blk_data *md = mq->blkdata;
1260 struct mmc_card *card = md->queue.card;
07d2872b 1261 unsigned int arg = card->erase_arg;
f7b6fc32 1262
07d2872b
AA
1263 if (mmc_card_broken_sd_discard(card))
1264 arg = SD_ERASE_ARG;
1265
1266 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg);
f7b6fc32
VW
1267}
1268
df061588 1269static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
49804548
AH
1270 struct request *req)
1271{
7db3028e 1272 struct mmc_blk_data *md = mq->blkdata;
49804548 1273 struct mmc_card *card = md->queue.card;
9b9c665a
AA
1274 unsigned int nr, arg;
1275 sector_t from;
67716327 1276 int err = 0, type = MMC_BLK_SECDISCARD;
2a842aca 1277 blk_status_t status = BLK_STS_OK;
49804548 1278
55e0961a 1279 if (!(mmc_card_can_secure_erase_trim(card))) {
2a842aca 1280 status = BLK_STS_NOTSUPP;
49804548
AH
1281 goto out;
1282 }
1283
28302812
AH
1284 from = blk_rq_pos(req);
1285 nr = blk_rq_sectors(req);
1286
b89d05f6 1287 if (mmc_card_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
775a9362
ME
1288 arg = MMC_SECURE_TRIM1_ARG;
1289 else
1290 arg = MMC_SECURE_ERASE_ARG;
d9ddd629 1291
67716327 1292retry:
6a7a6b45
AW
1293 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1294 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1295 INAND_CMD38_ARG_EXT_CSD,
1296 arg == MMC_SECURE_TRIM1_ARG ?
1297 INAND_CMD38_ARG_SECTRIM1 :
1298 INAND_CMD38_ARG_SECERASE,
ad91619a 1299 card->ext_csd.generic_cmd6_time);
6a7a6b45 1300 if (err)
28302812 1301 goto out_retry;
6a7a6b45 1302 }
28302812 1303
49804548 1304 err = mmc_erase(card, from, nr, arg);
28302812
AH
1305 if (err == -EIO)
1306 goto out_retry;
2a842aca
CH
1307 if (err) {
1308 status = BLK_STS_IOERR;
28302812 1309 goto out;
2a842aca 1310 }
28302812
AH
1311
1312 if (arg == MMC_SECURE_TRIM1_ARG) {
6a7a6b45
AW
1313 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1314 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1315 INAND_CMD38_ARG_EXT_CSD,
1316 INAND_CMD38_ARG_SECTRIM2,
ad91619a 1317 card->ext_csd.generic_cmd6_time);
6a7a6b45 1318 if (err)
28302812 1319 goto out_retry;
6a7a6b45 1320 }
28302812 1321
49804548 1322 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
28302812
AH
1323 if (err == -EIO)
1324 goto out_retry;
2a842aca
CH
1325 if (err) {
1326 status = BLK_STS_IOERR;
28302812 1327 goto out;
2a842aca 1328 }
6a7a6b45 1329 }
28302812 1330
28302812
AH
1331out_retry:
1332 if (err && !mmc_blk_reset(md, card->host, type))
67716327
AH
1333 goto retry;
1334 if (!err)
1335 mmc_blk_reset_success(md, type);
28302812 1336out:
0fbfd125 1337 blk_mq_end_request(req, status);
49804548
AH
1338}
1339
df061588 1340static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
f4c5522b 1341{
7db3028e 1342 struct mmc_blk_data *md = mq->blkdata;
881d1c25
SJ
1343 struct mmc_card *card = md->queue.card;
1344 int ret = 0;
1345
8ae11ede 1346 ret = mmc_flush_cache(card->host);
0fbfd125 1347 blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
f4c5522b
AW
1348}
1349
1350/*
1351 * Reformat current write as a reliable write, supporting
1352 * both legacy and the enhanced reliable write MMC cards.
1353 * In each transfer we'll handle only as much as a single
1354 * reliable write can handle, thus finish the request in
1355 * partial completions.
1356 */
d0c97cfb
AW
1357static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1358 struct mmc_card *card,
1359 struct request *req)
f4c5522b 1360{
f4c5522b
AW
1361 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1362 /* Legacy mode imposes restrictions on transfers. */
9cb38f7a 1363 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
f4c5522b
AW
1364 brq->data.blocks = 1;
1365
1366 if (brq->data.blocks > card->ext_csd.rel_sectors)
1367 brq->data.blocks = card->ext_csd.rel_sectors;
1368 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1369 brq->data.blocks = 1;
1370 }
f4c5522b
AW
1371}
1372
f47a1fe3
AH
1373#define CMD_ERRORS_EXCL_OOR \
1374 (R1_ADDRESS_ERROR | /* Misaligned address */ \
4c2b8f26
RKAL
1375 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1376 R1_WP_VIOLATION | /* Tried to write to protected block */ \
a04e6bae 1377 R1_CARD_ECC_FAILED | /* Card ECC failed */ \
4c2b8f26
RKAL
1378 R1_CC_ERROR | /* Card controller error */ \
1379 R1_ERROR) /* General/unknown error */
1380
f47a1fe3
AH
1381#define CMD_ERRORS \
1382 (CMD_ERRORS_EXCL_OOR | \
1383 R1_OUT_OF_RANGE) /* Command argument out of range */ \
1384
d83c2dba 1385static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
a04e6bae 1386{
d83c2dba 1387 u32 val;
a04e6bae 1388
d83c2dba
SL
1389 /*
1390 * Per the SD specification(physical layer version 4.10)[1],
1391 * section 4.3.3, it explicitly states that "When the last
1392 * block of user area is read using CMD18, the host should
1393 * ignore OUT_OF_RANGE error that may occur even the sequence
1394 * is correct". And JESD84-B51 for eMMC also has a similar
1395 * statement on section 6.8.3.
1396 *
1397 * Multiple block read/write could be done by either predefined
1398 * method, namely CMD23, or open-ending mode. For open-ending mode,
1399 * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1400 *
1401 * However the spec[1] doesn't tell us whether we should also
1402 * ignore that for predefined method. But per the spec[1], section
1403 * 4.15 Set Block Count Command, it says"If illegal block count
1404 * is set, out of range error will be indicated during read/write
1405 * operation (For example, data transfer is stopped at user area
1406 * boundary)." In another word, we could expect a out of range error
1407 * in the response for the following CMD18/25. And if argument of
1408 * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1409 * we could also expect to get a -ETIMEDOUT or any error number from
1410 * the host drivers due to missing data response(for write)/data(for
1411 * read), as the cards will stop the data transfer by itself per the
1412 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1413 */
1414
1415 if (!brq->stop.error) {
1416 bool oor_with_open_end;
1417 /* If there is no error yet, check R1 response */
1418
1419 val = brq->stop.resp[0] & CMD_ERRORS;
1420 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
1421
1422 if (val && !oor_with_open_end)
1423 brq->stop.error = -EIO;
1424 }
a04e6bae
WS
1425}
1426
ca5717f7 1427static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
b3fa3e6d 1428 int recovery_mode, bool *do_rel_wr_p,
d3377c01 1429 bool *do_data_tag_p)
1da177e4 1430{
ca5717f7
AH
1431 struct mmc_blk_data *md = mq->blkdata;
1432 struct mmc_card *card = md->queue.card;
54d49d77 1433 struct mmc_blk_request *brq = &mqrq->brq;
67e69d52 1434 struct request *req = mmc_queue_req_to_req(mqrq);
d3377c01 1435 bool do_rel_wr, do_data_tag;
1da177e4 1436
f4c5522b
AW
1437 /*
1438 * Reliable writes are used to implement Forced Unit Access and
d3df0465 1439 * are supported only on MMCs.
f4c5522b 1440 */
d3377c01
AH
1441 do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1442 rq_data_dir(req) == WRITE &&
1443 (md->flags & MMC_BLK_REL_WR);
f4c5522b 1444
54d49d77 1445 memset(brq, 0, sizeof(struct mmc_blk_request));
ca5717f7 1446
93f1c150
EB
1447 mmc_crypto_prepare_req(mqrq);
1448
54d49d77 1449 brq->mrq.data = &brq->data;
93482b3d 1450 brq->mrq.tag = req->tag;
1da177e4 1451
54d49d77
PF
1452 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1453 brq->stop.arg = 0;
ca5717f7
AH
1454
1455 if (rq_data_dir(req) == READ) {
1456 brq->data.flags = MMC_DATA_READ;
1457 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1458 } else {
1459 brq->data.flags = MMC_DATA_WRITE;
1460 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1461 }
1462
1463 brq->data.blksz = 512;
54d49d77 1464 brq->data.blocks = blk_rq_sectors(req);
93482b3d
AH
1465 brq->data.blk_addr = blk_rq_pos(req);
1466
1467 /*
1468 * The command queue supports 2 priorities: "high" (1) and "simple" (0).
1469 * The eMMC will give "high" priority tasks priority over "simple"
1470 * priority tasks. Here we always set "simple" priority by not setting
1471 * MMC_DATA_PRIO.
1472 */
6a79e391 1473
54d49d77
PF
1474 /*
1475 * The block layer doesn't support all sector count
1476 * restrictions, so we need to be prepared for too big
1477 * requests.
1478 */
1479 if (brq->data.blocks > card->host->max_blk_count)
1480 brq->data.blocks = card->host->max_blk_count;
1da177e4 1481
2bf22b39 1482 if (brq->data.blocks > 1) {
41591b38
CB
1483 /*
1484 * Some SD cards in SPI mode return a CRC error or even lock up
1485 * completely when trying to read the last block using a
1486 * multiblock read command.
1487 */
1488 if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
1489 (blk_rq_pos(req) + blk_rq_sectors(req) ==
1490 get_capacity(md->disk)))
1491 brq->data.blocks--;
1492
2bf22b39 1493 /*
b3fa3e6d 1494 * After a read error, we redo the request one (native) sector
2bf22b39
PW
1495 * at a time in order to accurately determine which
1496 * sectors can be read successfully.
1497 */
b3fa3e6d
CL
1498 if (recovery_mode)
1499 brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
2bf22b39 1500
2e47e842
KM
1501 /*
1502 * Some controllers have HW issues while operating
1503 * in multiple I/O mode
1504 */
1505 if (card->host->ops->multi_io_quirk)
1506 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1507 (rq_data_dir(req) == READ) ?
1508 MMC_DATA_READ : MMC_DATA_WRITE,
1509 brq->data.blocks);
2bf22b39 1510 }
d0c97cfb 1511
93482b3d 1512 if (do_rel_wr) {
ca5717f7 1513 mmc_apply_rel_rw(brq, card, req);
93482b3d
AH
1514 brq->data.flags |= MMC_DATA_REL_WR;
1515 }
ca5717f7
AH
1516
1517 /*
1518 * Data tag is used only during writing meta data to speed
1519 * up write and any subsequent read of this meta data
1520 */
d3377c01
AH
1521 do_data_tag = card->ext_csd.data_tag_unit_size &&
1522 (req->cmd_flags & REQ_META) &&
1523 (rq_data_dir(req) == WRITE) &&
1524 ((brq->data.blocks * brq->data.blksz) >=
1525 card->ext_csd.data_tag_unit_size);
ca5717f7 1526
93482b3d
AH
1527 if (do_data_tag)
1528 brq->data.flags |= MMC_DATA_DAT_TAG;
1529
ca5717f7
AH
1530 mmc_set_data_timeout(&brq->data, card);
1531
1532 brq->data.sg = mqrq->sg;
1533 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1534
1535 /*
1536 * Adjust the sg list so it is the same size as the
1537 * request.
1538 */
1539 if (brq->data.blocks != blk_rq_sectors(req)) {
1540 int i, data_size = brq->data.blocks << 9;
1541 struct scatterlist *sg;
1542
1543 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1544 data_size -= sg->length;
1545 if (data_size <= 0) {
1546 sg->length += data_size;
1547 i++;
1548 break;
1549 }
1550 }
1551 brq->data.sg_len = i;
1552 }
1553
d3377c01
AH
1554 if (do_rel_wr_p)
1555 *do_rel_wr_p = do_rel_wr;
1556
1557 if (do_data_tag_p)
1558 *do_data_tag_p = do_data_tag;
ca5717f7
AH
1559}
1560
1e8e55b6
AH
1561#define MMC_CQE_RETRIES 2
1562
1563static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
1564{
1565 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1566 struct mmc_request *mrq = &mqrq->brq.mrq;
1567 struct request_queue *q = req->q;
1568 struct mmc_host *host = mq->card->host;
e6bfb1bf 1569 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
1e8e55b6
AH
1570 unsigned long flags;
1571 bool put_card;
1572 int err;
1573
1574 mmc_cqe_post_req(host, mrq);
1575
1576 if (mrq->cmd && mrq->cmd->error)
1577 err = mrq->cmd->error;
1578 else if (mrq->data && mrq->data->error)
1579 err = mrq->data->error;
1580 else
1581 err = 0;
1582
1583 if (err) {
1584 if (mqrq->retries++ < MMC_CQE_RETRIES)
1585 blk_mq_requeue_request(req, true);
1586 else
1587 blk_mq_end_request(req, BLK_STS_IOERR);
1588 } else if (mrq->data) {
1589 if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
1590 blk_mq_requeue_request(req, true);
1591 else
1592 __blk_mq_end_request(req, BLK_STS_OK);
174925d3
AH
1593 } else if (mq->in_recovery) {
1594 blk_mq_requeue_request(req, true);
1e8e55b6
AH
1595 } else {
1596 blk_mq_end_request(req, BLK_STS_OK);
1597 }
1598
f5d72c5c 1599 spin_lock_irqsave(&mq->lock, flags);
1e8e55b6 1600
e6bfb1bf 1601 mq->in_flight[issue_type] -= 1;
1e8e55b6
AH
1602
1603 put_card = (mmc_tot_in_flight(mq) == 0);
1604
1605 mmc_cqe_check_busy(mq);
1606
f5d72c5c 1607 spin_unlock_irqrestore(&mq->lock, flags);
1e8e55b6
AH
1608
1609 if (!mq->cqe_busy)
1610 blk_mq_run_hw_queues(q, true);
1611
1612 if (put_card)
1613 mmc_put_card(mq->card, &mq->ctx);
1614}
1615
1616void mmc_blk_cqe_recovery(struct mmc_queue *mq)
1617{
1618 struct mmc_card *card = mq->card;
1619 struct mmc_host *host = card->host;
1620 int err;
1621
1622 pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
1623
1624 err = mmc_cqe_recovery(host);
1625 if (err)
1626 mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
a051246b 1627 mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
1e8e55b6
AH
1628
1629 pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
1630}
1631
1632static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
1633{
1634 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
1635 brq.mrq);
1636 struct request *req = mmc_queue_req_to_req(mqrq);
1637 struct request_queue *q = req->q;
1638 struct mmc_queue *mq = q->queuedata;
1639
1640 /*
1641 * Block layer timeouts race with completions which means the normal
1642 * completion path cannot be used during recovery.
1643 */
1644 if (mq->in_recovery)
1645 mmc_blk_cqe_complete_rq(mq, req);
15f73f5b 1646 else if (likely(!blk_should_fake_timeout(req->q)))
1e8e55b6
AH
1647 blk_mq_complete_request(req);
1648}
1649
1650static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
1651{
1652 mrq->done = mmc_blk_cqe_req_done;
1653 mrq->recovery_notifier = mmc_cqe_recovery_notifier;
1654
1655 return mmc_cqe_start_req(host, mrq);
1656}
1657
1658static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
1659 struct request *req)
1660{
1661 struct mmc_blk_request *brq = &mqrq->brq;
1662
1663 memset(brq, 0, sizeof(*brq));
1664
1665 brq->mrq.cmd = &brq->cmd;
1666 brq->mrq.tag = req->tag;
1667
1668 return &brq->mrq;
1669}
1670
1671static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
1672{
1673 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1674 struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
1675
1676 mrq->cmd->opcode = MMC_SWITCH;
1677 mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1678 (EXT_CSD_FLUSH_CACHE << 16) |
1679 (1 << 8) |
1680 EXT_CSD_CMD_SET_NORMAL;
1681 mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
1682
1683 return mmc_blk_cqe_start_req(mq->card->host, mrq);
1684}
1685
511ce378
BW
1686static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1687{
1688 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1689 struct mmc_host *host = mq->card->host;
1690 int err;
1691
1692 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
1693 mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
1694 mmc_pre_req(host, &mqrq->brq.mrq);
1695
1696 err = mmc_cqe_start_req(host, &mqrq->brq.mrq);
1697 if (err)
1698 mmc_post_req(host, &mqrq->brq.mrq, err);
1699
1700 return err;
1701}
1702
1e8e55b6
AH
1703static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1704{
1705 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
511ce378
BW
1706 struct mmc_host *host = mq->card->host;
1707
1708 if (host->hsq_enabled)
1709 return mmc_blk_hsq_issue_rw_rq(mq, req);
1e8e55b6
AH
1710
1711 mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
1712
1713 return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
1714}
1715
ca5717f7
AH
1716static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1717 struct mmc_card *card,
b3fa3e6d 1718 int recovery_mode,
ca5717f7
AH
1719 struct mmc_queue *mq)
1720{
1721 u32 readcmd, writecmd;
1722 struct mmc_blk_request *brq = &mqrq->brq;
67e69d52 1723 struct request *req = mmc_queue_req_to_req(mqrq);
ca5717f7
AH
1724 struct mmc_blk_data *md = mq->blkdata;
1725 bool do_rel_wr, do_data_tag;
1726
b3fa3e6d 1727 mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
ca5717f7
AH
1728
1729 brq->mrq.cmd = &brq->cmd;
1730
1731 brq->cmd.arg = blk_rq_pos(req);
1732 if (!mmc_card_blockaddr(card))
1733 brq->cmd.arg <<= 9;
1734 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1735
54d49d77
PF
1736 if (brq->data.blocks > 1 || do_rel_wr) {
1737 /* SPI multiblock writes terminate using a special
1738 * token, not a STOP_TRANSMISSION request.
d0c97cfb 1739 */
54d49d77
PF
1740 if (!mmc_host_is_spi(card->host) ||
1741 rq_data_dir(req) == READ)
1742 brq->mrq.stop = &brq->stop;
1743 readcmd = MMC_READ_MULTIPLE_BLOCK;
1744 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1745 } else {
1746 brq->mrq.stop = NULL;
1747 readcmd = MMC_READ_SINGLE_BLOCK;
1748 writecmd = MMC_WRITE_BLOCK;
1749 }
ca5717f7 1750 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
4265900e 1751
54d49d77
PF
1752 /*
1753 * Pre-defined multi-block transfers are preferable to
1754 * open ended-ones (and necessary for reliable writes).
1755 * However, it is not sufficient to just send CMD23,
1756 * and avoid the final CMD12, as on an error condition
1757 * CMD12 (stop) needs to be sent anyway. This, coupled
1758 * with Auto-CMD23 enhancements provided by some
1759 * hosts, means that the complexity of dealing
1760 * with this is best left to the host. If CMD23 is
1761 * supported by card and host, we'll fill sbc in and let
1762 * the host deal with handling it correctly. This means
1763 * that for hosts that don't expose MMC_CAP_CMD23, no
1764 * change of behavior will be observed.
1765 *
1766 * N.B: Some MMC cards experience perf degradation.
1767 * We'll avoid using CMD23-bounded multiblock writes for
1768 * these, while retaining features like reliable writes.
1769 */
4265900e
SD
1770 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1771 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1772 do_data_tag)) {
54d49d77
PF
1773 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1774 brq->sbc.arg = brq->data.blocks |
4265900e
SD
1775 (do_rel_wr ? (1 << 31) : 0) |
1776 (do_data_tag ? (1 << 29) : 0);
54d49d77
PF
1777 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1778 brq->mrq.sbc = &brq->sbc;
1779 }
403a0293
AA
1780
1781 if (mmc_card_ult_capacity(card)) {
1782 brq->cmd.ext_addr = blk_rq_pos(req) >> 32;
1783 brq->cmd.has_ext_addr = true;
1784 }
54d49d77 1785}
6a79e391 1786
81196976 1787#define MMC_MAX_RETRIES 5
7eb43d53 1788#define MMC_DATA_RETRIES 2
81196976
AH
1789#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
1790
7eb43d53
AH
1791static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
1792{
1793 struct mmc_command cmd = {
1794 .opcode = MMC_STOP_TRANSMISSION,
1795 .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
1796 /* Some hosts wait for busy anyway, so provide a busy timeout */
1797 .busy_timeout = timeout,
1798 };
1799
1800 return mmc_wait_for_cmd(card->host, &cmd, 5);
1801}
1802
1803static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
1804{
1805 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1806 struct mmc_blk_request *brq = &mqrq->brq;
1807 unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
1808 int err;
1809
1810 mmc_retune_hold_now(card->host);
1811
1812 mmc_blk_send_stop(card, timeout);
1813
972d5084 1814 err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO);
7eb43d53
AH
1815
1816 mmc_retune_release(card->host);
1817
1818 return err;
1819}
1820
81196976
AH
1821#define MMC_READ_SINGLE_RETRIES 2
1822
b3fa3e6d 1823/* Single (native) sector read during recovery */
81196976
AH
1824static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
1825{
1826 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1827 struct mmc_request *mrq = &mqrq->brq.mrq;
1828 struct mmc_card *card = mq->card;
1829 struct mmc_host *host = card->host;
1830 blk_status_t error = BLK_STS_OK;
b3fa3e6d 1831 size_t bytes_per_read = queue_physical_block_size(mq->queue);
81196976
AH
1832
1833 do {
1834 u32 status;
1835 int err;
54309fde 1836 int retries = 0;
81196976 1837
54309fde
CL
1838 while (retries++ <= MMC_READ_SINGLE_RETRIES) {
1839 mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
81196976 1840
54309fde 1841 mmc_wait_for_req(host, mrq);
81196976 1842
54309fde 1843 err = mmc_send_status(card, &status);
81196976
AH
1844 if (err)
1845 goto error_exit;
81196976 1846
54309fde
CL
1847 if (!mmc_host_is_spi(host) &&
1848 !mmc_ready_for_data(status)) {
1849 err = mmc_blk_fix_state(card, req);
1850 if (err)
1851 goto error_exit;
1852 }
81196976 1853
54309fde
CL
1854 if (!mrq->cmd->error)
1855 break;
1856 }
81196976
AH
1857
1858 if (mrq->cmd->error ||
1859 mrq->data->error ||
1860 (!mmc_host_is_spi(host) &&
1861 (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
1862 error = BLK_STS_IOERR;
1863 else
1864 error = BLK_STS_OK;
1865
b3fa3e6d 1866 } while (blk_update_request(req, error, bytes_per_read));
81196976
AH
1867
1868 return;
1869
1870error_exit:
1871 mrq->data->bytes_xfered = 0;
b3fa3e6d 1872 blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
81196976
AH
1873 /* Let it try the remaining request again */
1874 if (mqrq->retries > MMC_MAX_RETRIES - 1)
1875 mqrq->retries = MMC_MAX_RETRIES - 1;
1876}
1877
7eb43d53
AH
1878static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
1879{
1880 return !!brq->mrq.sbc;
1881}
1882
1883static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
1884{
1885 return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
1886}
1887
1888/*
1889 * Check for errors the host controller driver might not have seen such as
1890 * response mode errors or invalid card state.
1891 */
1892static bool mmc_blk_status_error(struct request *req, u32 status)
1893{
1894 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1895 struct mmc_blk_request *brq = &mqrq->brq;
1896 struct mmc_queue *mq = req->q->queuedata;
1897 u32 stop_err_bits;
1898
1899 if (mmc_host_is_spi(mq->card->host))
aa950144 1900 return false;
7eb43d53
AH
1901
1902 stop_err_bits = mmc_blk_stop_err_bits(brq);
1903
1904 return brq->cmd.resp[0] & CMD_ERRORS ||
1905 brq->stop.resp[0] & stop_err_bits ||
1906 status & stop_err_bits ||
40c96853 1907 (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status));
7eb43d53
AH
1908}
1909
1910static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
1911{
1912 return !brq->sbc.error && !brq->cmd.error &&
1913 !(brq->cmd.resp[0] & CMD_ERRORS);
1914}
1915
1916/*
1917 * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
1918 * policy:
1919 * 1. A request that has transferred at least some data is considered
1920 * successful and will be requeued if there is remaining data to
1921 * transfer.
1922 * 2. Otherwise the number of retries is incremented and the request
1923 * will be requeued if there are remaining retries.
1924 * 3. Otherwise the request will be errored out.
1925 * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
1926 * mqrq->retries. So there are only 4 possible actions here:
1927 * 1. do not accept the bytes_xfered value i.e. set it to zero
1928 * 2. change mqrq->retries to determine the number of retries
1929 * 3. try to reset the card
1930 * 4. read one sector at a time
1931 */
81196976
AH
1932static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
1933{
1934 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1935 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1936 struct mmc_blk_request *brq = &mqrq->brq;
1937 struct mmc_blk_data *md = mq->blkdata;
1938 struct mmc_card *card = mq->card;
7eb43d53
AH
1939 u32 status;
1940 u32 blocks;
1941 int err;
81196976 1942
7eb43d53
AH
1943 /*
1944 * Some errors the host driver might not have seen. Set the number of
1945 * bytes transferred to zero in that case.
1946 */
1947 err = __mmc_send_status(card, &status, 0);
1948 if (err || mmc_blk_status_error(req, status))
1949 brq->data.bytes_xfered = 0;
81196976
AH
1950
1951 mmc_retune_release(card->host);
1952
1953 /*
7eb43d53
AH
1954 * Try again to get the status. This also provides an opportunity for
1955 * re-tuning.
81196976 1956 */
7eb43d53
AH
1957 if (err)
1958 err = __mmc_send_status(card, &status, 0);
81196976 1959
7eb43d53
AH
1960 /*
1961 * Nothing more to do after the number of bytes transferred has been
1962 * updated and there is no card.
1963 */
1964 if (err && mmc_detect_card_removed(card->host))
1965 return;
81196976 1966
7eb43d53
AH
1967 /* Try to get back to "tran" state */
1968 if (!mmc_host_is_spi(mq->card->host) &&
40c96853 1969 (err || !mmc_ready_for_data(status)))
7eb43d53
AH
1970 err = mmc_blk_fix_state(mq->card, req);
1971
1972 /*
1973 * Special case for SD cards where the card might record the number of
1974 * blocks written.
1975 */
1976 if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
1977 rq_data_dir(req) == WRITE) {
1978 if (mmc_sd_num_wr_blocks(card, &blocks))
1979 brq->data.bytes_xfered = 0;
1980 else
1981 brq->data.bytes_xfered = blocks << 9;
81196976 1982 }
7eb43d53
AH
1983
1984 /* Reset if the card is in a bad state */
1985 if (!mmc_host_is_spi(mq->card->host) &&
1986 err && mmc_blk_reset(md, card->host, type)) {
f3fa33ac 1987 pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
81196976 1988 mqrq->retries = MMC_NO_RETRIES;
7eb43d53
AH
1989 return;
1990 }
1991
1992 /*
1993 * If anything was done, just return and if there is anything remaining
1994 * on the request it will get requeued.
1995 */
1996 if (brq->data.bytes_xfered)
1997 return;
1998
1999 /* Reset before last retry */
406e1480
CL
2000 if (mqrq->retries + 1 == MMC_MAX_RETRIES &&
2001 mmc_blk_reset(md, card->host, type))
2002 return;
7eb43d53
AH
2003
2004 /* Command errors fail fast, so use all MMC_MAX_RETRIES */
2005 if (brq->sbc.error || brq->cmd.error)
2006 return;
2007
2008 /* Reduce the remaining retries for data errors */
2009 if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
2010 mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
2011 return;
2012 }
2013
b3fa3e6d
CL
2014 if (rq_data_dir(req) == READ && brq->data.blocks >
2015 queue_physical_block_size(mq->queue) >> 9) {
2016 /* Read one (native) sector at a time */
7eb43d53
AH
2017 mmc_blk_read_single(mq, req);
2018 return;
81196976
AH
2019 }
2020}
2021
10f21df4
AH
2022static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
2023{
2024 mmc_blk_eval_resp_error(brq);
2025
2026 return brq->sbc.error || brq->cmd.error || brq->stop.error ||
2027 brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
2028}
2029
5d435933
CL
2030static int mmc_spi_err_check(struct mmc_card *card)
2031{
2032 u32 status = 0;
2033 int err;
2034
2035 /*
2036 * SPI does not have a TRAN state we have to wait on, instead the
2037 * card is ready again when it no longer holds the line LOW.
2038 * We still have to ensure two things here before we know the write
2039 * was successful:
2040 * 1. The card has not disconnected during busy and we actually read our
2041 * own pull-up, thinking it was still connected, so ensure it
2042 * still responds.
2043 * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
2044 * just reconnected card after being disconnected during busy.
2045 */
2046 err = __mmc_send_status(card, &status, 0);
2047 if (err)
2048 return err;
2049 /* All R1 and R2 bits of SPI are errors in our case */
2050 if (status)
2051 return -EIO;
2052 return 0;
2053}
2054
6966e609
UH
2055static int mmc_blk_busy_cb(void *cb_data, bool *busy)
2056{
2057 struct mmc_blk_busy_data *data = cb_data;
2058 u32 status = 0;
2059 int err;
2060
2061 err = mmc_send_status(data->card, &status);
2062 if (err)
2063 return err;
2064
2065 /* Accumulate response error bits. */
2066 data->status |= status;
2067
2068 *busy = !mmc_ready_for_data(status);
2069 return 0;
2070}
2071
88a51646
AH
2072static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
2073{
2074 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
6966e609 2075 struct mmc_blk_busy_data cb_data;
88a51646
AH
2076 int err;
2077
5d435933 2078 if (rq_data_dir(req) == READ)
88a51646
AH
2079 return 0;
2080
5d435933
CL
2081 if (mmc_host_is_spi(card->host)) {
2082 err = mmc_spi_err_check(card);
2083 if (err)
2084 mqrq->brq.data.bytes_xfered = 0;
2085 return err;
2086 }
2087
6966e609
UH
2088 cb_data.card = card;
2089 cb_data.status = 0;
1760fdb6 2090 err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
2ebbdace 2091 &mmc_blk_busy_cb, &cb_data);
88a51646 2092
f47a1fe3
AH
2093 /*
2094 * Do not assume data transferred correctly if there are any error bits
2095 * set.
2096 */
6966e609 2097 if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
f47a1fe3 2098 mqrq->brq.data.bytes_xfered = 0;
88a51646
AH
2099 err = err ? err : -EIO;
2100 }
2101
f47a1fe3 2102 /* Copy the exception bit so it will be seen later on */
6966e609 2103 if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
f47a1fe3
AH
2104 mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
2105
88a51646
AH
2106 return err;
2107}
2108
10f21df4
AH
2109static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
2110 struct request *req)
2111{
2112 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
2113
2114 mmc_blk_reset_success(mq->blkdata, type);
2115}
2116
81196976
AH
2117static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
2118{
2119 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
2120 unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
2121
2122 if (nr_bytes) {
2123 if (blk_update_request(req, BLK_STS_OK, nr_bytes))
2124 blk_mq_requeue_request(req, true);
2125 else
2126 __blk_mq_end_request(req, BLK_STS_OK);
2127 } else if (!blk_rq_bytes(req)) {
2128 __blk_mq_end_request(req, BLK_STS_IOERR);
2129 } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
2130 blk_mq_requeue_request(req, true);
2131 } else {
2132 if (mmc_card_removed(mq->card))
2133 req->rq_flags |= RQF_QUIET;
2134 blk_mq_end_request(req, BLK_STS_IOERR);
2135 }
2136}
2137
2138static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
2139 struct mmc_queue_req *mqrq)
2140{
2141 return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
2142 (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
2143 mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
2144}
2145
2146static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
2147 struct mmc_queue_req *mqrq)
2148{
2149 if (mmc_blk_urgent_bkops_needed(mq, mqrq))
0c204979 2150 mmc_run_bkops(mq->card);
81196976
AH
2151}
2152
511ce378
BW
2153static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
2154{
2155 struct mmc_queue_req *mqrq =
2156 container_of(mrq, struct mmc_queue_req, brq.mrq);
2157 struct request *req = mmc_queue_req_to_req(mqrq);
2158 struct request_queue *q = req->q;
2159 struct mmc_queue *mq = q->queuedata;
2160 struct mmc_host *host = mq->card->host;
2161 unsigned long flags;
2162
2163 if (mmc_blk_rq_error(&mqrq->brq) ||
2164 mmc_blk_urgent_bkops_needed(mq, mqrq)) {
2165 spin_lock_irqsave(&mq->lock, flags);
2166 mq->recovery_needed = true;
2167 mq->recovery_req = req;
2168 spin_unlock_irqrestore(&mq->lock, flags);
2169
2170 host->cqe_ops->cqe_recovery_start(host);
2171
2172 schedule_work(&mq->recovery_work);
2173 return;
2174 }
2175
2176 mmc_blk_rw_reset_success(mq, req);
2177
2178 /*
2179 * Block layer timeouts race with completions which means the normal
2180 * completion path cannot be used during recovery.
2181 */
2182 if (mq->in_recovery)
2183 mmc_blk_cqe_complete_rq(mq, req);
15f73f5b 2184 else if (likely(!blk_should_fake_timeout(req->q)))
511ce378
BW
2185 blk_mq_complete_request(req);
2186}
2187
81196976
AH
2188void mmc_blk_mq_complete(struct request *req)
2189{
2190 struct mmc_queue *mq = req->q->queuedata;
407a1c57 2191 struct mmc_host *host = mq->card->host;
81196976 2192
407a1c57 2193 if (host->cqe_enabled)
1e8e55b6 2194 mmc_blk_cqe_complete_rq(mq, req);
15f73f5b 2195 else if (likely(!blk_should_fake_timeout(req->q)))
1e8e55b6 2196 mmc_blk_mq_complete_rq(mq, req);
81196976
AH
2197}
2198
2199static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
2200 struct request *req)
2201{
2202 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
88a51646 2203 struct mmc_host *host = mq->card->host;
81196976 2204
88a51646
AH
2205 if (mmc_blk_rq_error(&mqrq->brq) ||
2206 mmc_blk_card_busy(mq->card, req)) {
2207 mmc_blk_mq_rw_recovery(mq, req);
2208 } else {
2209 mmc_blk_rw_reset_success(mq, req);
2210 mmc_retune_release(host);
2211 }
81196976
AH
2212
2213 mmc_blk_urgent_bkops(mq, mqrq);
2214}
2215
4b430d4a 2216static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
81196976 2217{
81196976
AH
2218 unsigned long flags;
2219 bool put_card;
2220
f5d72c5c 2221 spin_lock_irqsave(&mq->lock, flags);
81196976 2222
4b430d4a 2223 mq->in_flight[issue_type] -= 1;
81196976
AH
2224
2225 put_card = (mmc_tot_in_flight(mq) == 0);
2226
f5d72c5c 2227 spin_unlock_irqrestore(&mq->lock, flags);
81196976
AH
2228
2229 if (put_card)
2230 mmc_put_card(mq->card, &mq->ctx);
2231}
2232
639d3531
SAS
2233static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
2234 bool can_sleep)
81196976 2235{
4b430d4a 2236 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
81196976
AH
2237 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
2238 struct mmc_request *mrq = &mqrq->brq.mrq;
2239 struct mmc_host *host = mq->card->host;
2240
2241 mmc_post_req(host, mrq, 0);
2242
10f21df4
AH
2243 /*
2244 * Block layer timeouts race with completions which means the normal
2245 * completion path cannot be used during recovery.
2246 */
639d3531 2247 if (mq->in_recovery) {
10f21df4 2248 mmc_blk_mq_complete_rq(mq, req);
639d3531
SAS
2249 } else if (likely(!blk_should_fake_timeout(req->q))) {
2250 if (can_sleep)
2251 blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
2252 else
2253 blk_mq_complete_request(req);
2254 }
81196976 2255
4b430d4a 2256 mmc_blk_mq_dec_in_flight(mq, issue_type);
81196976
AH
2257}
2258
10f21df4
AH
2259void mmc_blk_mq_recovery(struct mmc_queue *mq)
2260{
2261 struct request *req = mq->recovery_req;
2262 struct mmc_host *host = mq->card->host;
2263 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
2264
2265 mq->recovery_req = NULL;
2266 mq->rw_wait = false;
2267
2268 if (mmc_blk_rq_error(&mqrq->brq)) {
2269 mmc_retune_hold_now(host);
2270 mmc_blk_mq_rw_recovery(mq, req);
2271 }
2272
2273 mmc_blk_urgent_bkops(mq, mqrq);
2274
639d3531 2275 mmc_blk_mq_post_req(mq, req, true);
10f21df4
AH
2276}
2277
81196976
AH
2278static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
2279 struct request **prev_req)
2280{
dc03e2e9 2281 if (mmc_host_can_done_complete(mq->card->host))
10f21df4
AH
2282 return;
2283
81196976
AH
2284 mutex_lock(&mq->complete_lock);
2285
2286 if (!mq->complete_req)
2287 goto out_unlock;
2288
2289 mmc_blk_mq_poll_completion(mq, mq->complete_req);
2290
2291 if (prev_req)
2292 *prev_req = mq->complete_req;
2293 else
639d3531 2294 mmc_blk_mq_post_req(mq, mq->complete_req, true);
81196976
AH
2295
2296 mq->complete_req = NULL;
2297
2298out_unlock:
2299 mutex_unlock(&mq->complete_lock);
2300}
2301
2302void mmc_blk_mq_complete_work(struct work_struct *work)
2303{
2304 struct mmc_queue *mq = container_of(work, struct mmc_queue,
2305 complete_work);
2306
2307 mmc_blk_mq_complete_prev_req(mq, NULL);
2308}
2309
2310static void mmc_blk_mq_req_done(struct mmc_request *mrq)
2311{
2312 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
2313 brq.mrq);
2314 struct request *req = mmc_queue_req_to_req(mqrq);
2315 struct request_queue *q = req->q;
2316 struct mmc_queue *mq = q->queuedata;
10f21df4 2317 struct mmc_host *host = mq->card->host;
81196976 2318 unsigned long flags;
81196976 2319
dc03e2e9 2320 if (!mmc_host_can_done_complete(host)) {
10f21df4 2321 bool waiting;
81196976 2322
10f21df4
AH
2323 /*
2324 * We cannot complete the request in this context, so record
2325 * that there is a request to complete, and that a following
2326 * request does not need to wait (although it does need to
2327 * complete complete_req first).
2328 */
f5d72c5c 2329 spin_lock_irqsave(&mq->lock, flags);
10f21df4
AH
2330 mq->complete_req = req;
2331 mq->rw_wait = false;
2332 waiting = mq->waiting;
f5d72c5c 2333 spin_unlock_irqrestore(&mq->lock, flags);
10f21df4
AH
2334
2335 /*
2336 * If 'waiting' then the waiting task will complete this
2337 * request, otherwise queue a work to do it. Note that
2338 * complete_work may still race with the dispatch of a following
2339 * request.
2340 */
2341 if (waiting)
2342 wake_up(&mq->wait);
2343 else
dcf6e2e3 2344 queue_work(mq->card->complete_wq, &mq->complete_work);
10f21df4
AH
2345
2346 return;
2347 }
2348
2349 /* Take the recovery path for errors or urgent background operations */
2350 if (mmc_blk_rq_error(&mqrq->brq) ||
2351 mmc_blk_urgent_bkops_needed(mq, mqrq)) {
f5d72c5c 2352 spin_lock_irqsave(&mq->lock, flags);
10f21df4
AH
2353 mq->recovery_needed = true;
2354 mq->recovery_req = req;
f5d72c5c 2355 spin_unlock_irqrestore(&mq->lock, flags);
81196976 2356 wake_up(&mq->wait);
10f21df4
AH
2357 schedule_work(&mq->recovery_work);
2358 return;
2359 }
2360
2361 mmc_blk_rw_reset_success(mq, req);
2362
2363 mq->rw_wait = false;
2364 wake_up(&mq->wait);
2365
639d3531
SAS
2366 /* context unknown */
2367 mmc_blk_mq_post_req(mq, req, false);
81196976
AH
2368}
2369
2370static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
2371{
81196976
AH
2372 unsigned long flags;
2373 bool done;
2374
2375 /*
10f21df4
AH
2376 * Wait while there is another request in progress, but not if recovery
2377 * is needed. Also indicate whether there is a request waiting to start.
81196976 2378 */
f5d72c5c 2379 spin_lock_irqsave(&mq->lock, flags);
10f21df4
AH
2380 if (mq->recovery_needed) {
2381 *err = -EBUSY;
2382 done = true;
2383 } else {
2384 done = !mq->rw_wait;
2385 }
81196976 2386 mq->waiting = !done;
f5d72c5c 2387 spin_unlock_irqrestore(&mq->lock, flags);
81196976
AH
2388
2389 return done;
2390}
2391
2392static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
2393{
2394 int err = 0;
2395
2396 wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
2397
2398 /* Always complete the previous request if there is one */
2399 mmc_blk_mq_complete_prev_req(mq, prev_req);
2400
2401 return err;
2402}
2403
2404static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
2405 struct request *req)
2406{
2407 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
2408 struct mmc_host *host = mq->card->host;
2409 struct request *prev_req = NULL;
2410 int err = 0;
2411
2412 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
2413
2414 mqrq->brq.mrq.done = mmc_blk_mq_req_done;
2415
2416 mmc_pre_req(host, &mqrq->brq.mrq);
2417
2418 err = mmc_blk_rw_wait(mq, &prev_req);
2419 if (err)
2420 goto out_post_req;
2421
2422 mq->rw_wait = true;
2423
2424 err = mmc_start_request(host, &mqrq->brq.mrq);
2425
2426 if (prev_req)
639d3531 2427 mmc_blk_mq_post_req(mq, prev_req, true);
81196976 2428
10f21df4 2429 if (err)
81196976 2430 mq->rw_wait = false;
10f21df4
AH
2431
2432 /* Release re-tuning here where there is no synchronization required */
dc03e2e9 2433 if (err || mmc_host_can_done_complete(host))
81196976 2434 mmc_retune_release(host);
81196976
AH
2435
2436out_post_req:
2437 if (err)
2438 mmc_post_req(host, &mqrq->brq.mrq, err);
2439
2440 return err;
2441}
2442
2443static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
2444{
407a1c57 2445 if (host->cqe_enabled)
1e8e55b6
AH
2446 return host->cqe_ops->cqe_wait_for_idle(host);
2447
81196976
AH
2448 return mmc_blk_rw_wait(mq, NULL);
2449}
2450
2451enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
2452{
2453 struct mmc_blk_data *md = mq->blkdata;
2454 struct mmc_card *card = md->queue.card;
2455 struct mmc_host *host = card->host;
2456 int ret;
2457
2458 ret = mmc_blk_part_switch(card, md->part_type);
2459 if (ret)
2460 return MMC_REQ_FAILED_TO_START;
2461
2462 switch (mmc_issue_type(mq, req)) {
2463 case MMC_ISSUE_SYNC:
2464 ret = mmc_blk_wait_for_idle(mq, host);
2465 if (ret)
2466 return MMC_REQ_BUSY;
2467 switch (req_op(req)) {
2468 case REQ_OP_DRV_IN:
2469 case REQ_OP_DRV_OUT:
2470 mmc_blk_issue_drv_op(mq, req);
2471 break;
2472 case REQ_OP_DISCARD:
2473 mmc_blk_issue_discard_rq(mq, req);
2474 break;
2475 case REQ_OP_SECURE_ERASE:
2476 mmc_blk_issue_secdiscard_rq(mq, req);
2477 break;
f7b6fc32
VW
2478 case REQ_OP_WRITE_ZEROES:
2479 mmc_blk_issue_trim_rq(mq, req);
2480 break;
81196976
AH
2481 case REQ_OP_FLUSH:
2482 mmc_blk_issue_flush(mq, req);
2483 break;
2484 default:
2485 WARN_ON_ONCE(1);
2486 return MMC_REQ_FAILED_TO_START;
2487 }
2488 return MMC_REQ_FINISHED;
1e8e55b6 2489 case MMC_ISSUE_DCMD:
81196976
AH
2490 case MMC_ISSUE_ASYNC:
2491 switch (req_op(req)) {
1e8e55b6 2492 case REQ_OP_FLUSH:
97fce126
AA
2493 if (!mmc_cache_enabled(host)) {
2494 blk_mq_end_request(req, BLK_STS_OK);
2495 return MMC_REQ_FINISHED;
2496 }
1e8e55b6
AH
2497 ret = mmc_blk_cqe_issue_flush(mq, req);
2498 break;
81196976 2499 case REQ_OP_WRITE:
ed9009ad
BH
2500 card->written_flag = true;
2501 fallthrough;
2502 case REQ_OP_READ:
407a1c57 2503 if (host->cqe_enabled)
1e8e55b6
AH
2504 ret = mmc_blk_cqe_issue_rw_rq(mq, req);
2505 else
2506 ret = mmc_blk_mq_issue_rw_rq(mq, req);
81196976
AH
2507 break;
2508 default:
2509 WARN_ON_ONCE(1);
2510 ret = -EINVAL;
2511 }
2512 if (!ret)
2513 return MMC_REQ_STARTED;
2514 return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
2515 default:
2516 WARN_ON_ONCE(1);
2517 return MMC_REQ_FAILED_TO_START;
2518 }
2519}
2520
a6f6c96b
RK
2521static inline int mmc_blk_readonly(struct mmc_card *card)
2522{
2523 return mmc_card_readonly(card) ||
2524 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2525}
2526
3ec7cb11
CM
2527/*
2528 * Search for a declared partitions node for the disk in mmc-card related node.
2529 *
2530 * This is to permit support for partition table defined in DT in special case
2531 * where a partition table is not written in the disk and is expected to be
2532 * passed from the running system.
2533 *
2534 * For the user disk, "partitions" node is searched.
2535 * For the special HW disk, "partitions-" node with the appended name is used
2536 * following this conversion table (to adhere to JEDEC naming)
2537 * - boot0 -> partitions-boot1
2538 * - boot1 -> partitions-boot2
2539 * - gp0 -> partitions-gp1
2540 * - gp1 -> partitions-gp2
2541 * - gp2 -> partitions-gp3
2542 * - gp3 -> partitions-gp4
2543 */
2544static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev,
2545 const char *subname)
2546{
2547 const char *node_name = "partitions";
2548
2549 if (subname) {
2550 mmc_dev = mmc_dev->parent;
2551
2552 /*
2553 * Check if we are allocating a BOOT disk boot0/1 disk.
2554 * In DT we use the JEDEC naming boot1/2.
2555 */
2556 if (!strcmp(subname, "boot0"))
2557 node_name = "partitions-boot1";
2558 if (!strcmp(subname, "boot1"))
2559 node_name = "partitions-boot2";
2560 /*
2561 * Check if we are allocating a GP disk gp0/1/2/3 disk.
2562 * In DT we use the JEDEC naming gp1/2/3/4.
2563 */
2564 if (!strcmp(subname, "gp0"))
2565 node_name = "partitions-gp1";
2566 if (!strcmp(subname, "gp1"))
2567 node_name = "partitions-gp2";
2568 if (!strcmp(subname, "gp2"))
2569 node_name = "partitions-gp3";
2570 if (!strcmp(subname, "gp3"))
2571 node_name = "partitions-gp4";
2572 }
2573
2574 return device_get_named_child_node(mmc_dev, node_name);
2575}
2576
371a689f
AW
2577static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2578 struct device *parent,
2579 sector_t size,
2580 bool default_ro,
add710ea 2581 const char *subname,
a94dcfce
CH
2582 int area_type,
2583 unsigned int part_type)
1da177e4 2584{
3ec7cb11 2585 struct fwnode_handle *disk_fwnode;
1da177e4
LT
2586 struct mmc_blk_data *md;
2587 int devidx, ret;
ce999ed1 2588 char cap_str[10];
1122c0c1 2589 unsigned int features = 0;
1da177e4 2590
18cbe816 2591 devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL);
e7b42769
SL
2592 if (devidx < 0) {
2593 /*
2594 * We get -ENOSPC because there are no more any available
2595 * devidx. The reason may be that, either userspace haven't yet
2596 * unmounted the partitions, which postpones mmc_blk_release()
2597 * from being called, or the device has more partitions than
2598 * what we support.
2599 */
2600 if (devidx == -ENOSPC)
2601 dev_err(mmc_dev(card->host),
2602 "no more device IDs available\n");
2603
a04848c7 2604 return ERR_PTR(devidx);
e7b42769 2605 }
1da177e4 2606
d2253bfa 2607 md = kzalloc(sizeof(*md), GFP_KERNEL);
a6f6c96b
RK
2608 if (!md) {
2609 ret = -ENOMEM;
2610 goto out;
2611 }
1da177e4 2612
add710ea
JR
2613 md->area_type = area_type;
2614
a6f6c96b
RK
2615 /*
2616 * Set the read-only status based on the supported commands
2617 * and the write protect switch.
2618 */
2619 md->read_only = mmc_blk_readonly(card);
1da177e4 2620
03b31a06 2621 if (mmc_host_can_cmd23(card->host)) {
1122c0c1
CH
2622 if ((mmc_card_mmc(card) &&
2623 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
93387385 2624 (mmc_card_sd(card) && !mmc_card_ult_capacity(card) &&
1122c0c1
CH
2625 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2626 md->flags |= MMC_BLK_CMD23;
2627 }
2628
2629 if (md->flags & MMC_BLK_CMD23 &&
2630 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2631 card->ext_csd.rel_sectors)) {
2632 md->flags |= MMC_BLK_REL_WR;
2633 features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
2634 } else if (mmc_cache_enabled(card->host)) {
2635 features |= BLK_FEAT_WRITE_CACHE;
2636 }
2637
2638 md->disk = mmc_init_queue(&md->queue, card, features);
607d968a
CH
2639 if (IS_ERR(md->disk)) {
2640 ret = PTR_ERR(md->disk);
a6f6c96b
RK
2641 goto err_kfree;
2642 }
1da177e4 2643
371a689f 2644 INIT_LIST_HEAD(&md->part);
97548575 2645 INIT_LIST_HEAD(&md->rpmbs);
edb25572
SB
2646 kref_init(&md->kref);
2647
7db3028e 2648 md->queue.blkdata = md;
a94dcfce 2649 md->part_type = part_type;
d2b18394 2650
fe6b4c88 2651 md->disk->major = MMC_BLOCK_MAJOR;
1033d103 2652 md->disk->minors = perdev_minors;
5e71b7a6 2653 md->disk->first_minor = devidx * perdev_minors;
a6f6c96b
RK
2654 md->disk->fops = &mmc_bdops;
2655 md->disk->private_data = md;
307d8e6f 2656 md->parent = parent;
371a689f 2657 set_disk_ro(md->disk, md->read_only || default_ro);
f0534aac 2658 if (area_type & MMC_BLK_DATA_AREA_RPMB)
79b0f79a 2659 md->disk->flags |= GENHD_FL_NO_PART;
a6f6c96b
RK
2660
2661 /*
2662 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2663 *
2664 * - be set for removable media with permanent block devices
2665 * - be unset for removable block devices with permanent media
2666 *
2667 * Since MMC block devices clearly fall under the second
2668 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2669 * should use the block device creation/destruction hotplug
2670 * messages to tell when the card is present.
2671 */
2672
f06c9153 2673 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
9aaf3437 2674 "mmcblk%u%s", card->host->index, subname ? subname : "");
a6f6c96b 2675
371a689f 2676 set_capacity(md->disk, size);
d0c97cfb 2677
ce999ed1
UH
2678 string_get_size((u64)size, 512, STRING_UNITS_2,
2679 cap_str, sizeof(cap_str));
78ce88e7 2680 pr_info("%s: %s %s %s%s\n",
ce999ed1 2681 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
78ce88e7 2682 cap_str, md->read_only ? " (ro)" : "");
ce999ed1 2683
a94dcfce
CH
2684 /* used in ->open, must be set before add_disk: */
2685 if (area_type == MMC_BLK_DATA_AREA_MAIN)
2686 dev_set_drvdata(&card->dev, md);
3ec7cb11
CM
2687 disk_fwnode = mmc_blk_get_partitions_node(parent, subname);
2688 ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups,
2689 disk_fwnode);
9c1aaec4 2690 if (ret)
bf14fad1 2691 goto err_put_disk;
371a689f
AW
2692 return md;
2693
bf14fad1
ML
2694 err_put_disk:
2695 put_disk(md->disk);
9c1aaec4 2696 blk_mq_free_tag_set(&md->queue.tag_set);
371a689f
AW
2697 err_kfree:
2698 kfree(md);
2699 out:
18cbe816 2700 ida_free(&mmc_blk_ida, devidx);
371a689f
AW
2701 return ERR_PTR(ret);
2702}
2703
2704static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2705{
2706 sector_t size;
a6f6c96b 2707
85a18ad9
PO
2708 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2709 /*
2710 * The EXT_CSD sector count is in number or 512 byte
2711 * sectors.
2712 */
371a689f 2713 size = card->ext_csd.sectors;
85a18ad9
PO
2714 } else {
2715 /*
2716 * The CSD capacity field is in units of read_blkbits.
2717 * set_capacity takes units of 512 bytes.
2718 */
087de9ed
KM
2719 size = (typeof(sector_t))card->csd.capacity
2720 << (card->csd.read_blkbits - 9);
85a18ad9 2721 }
371a689f 2722
7a30f2af 2723 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
a94dcfce 2724 MMC_BLK_DATA_AREA_MAIN, 0);
371a689f 2725}
a6f6c96b 2726
371a689f
AW
2727static int mmc_blk_alloc_part(struct mmc_card *card,
2728 struct mmc_blk_data *md,
2729 unsigned int part_type,
2730 sector_t size,
2731 bool default_ro,
add710ea
JR
2732 const char *subname,
2733 int area_type)
371a689f 2734{
371a689f
AW
2735 struct mmc_blk_data *part_md;
2736
2737 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
a94dcfce 2738 subname, area_type, part_type);
371a689f
AW
2739 if (IS_ERR(part_md))
2740 return PTR_ERR(part_md);
371a689f
AW
2741 list_add(&part_md->part, &md->part);
2742
371a689f
AW
2743 return 0;
2744}
2745
97548575
LW
2746/**
2747 * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
2748 * @filp: the character device file
2749 * @cmd: the ioctl() command
2750 * @arg: the argument from userspace
2751 *
2752 * This will essentially just redirect the ioctl()s coming in over to
2753 * the main block device spawning the RPMB character device.
2754 */
2755static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
2756 unsigned long arg)
2757{
2758 struct mmc_rpmb_data *rpmb = filp->private_data;
2759 int ret;
2760
2761 switch (cmd) {
2762 case MMC_IOC_CMD:
2763 ret = mmc_blk_ioctl_cmd(rpmb->md,
2764 (struct mmc_ioc_cmd __user *)arg,
2765 rpmb);
2766 break;
2767 case MMC_IOC_MULTI_CMD:
2768 ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
2769 (struct mmc_ioc_multi_cmd __user *)arg,
2770 rpmb);
2771 break;
2772 default:
2773 ret = -EINVAL;
2774 break;
2775 }
2776
b25b750d 2777 return ret;
97548575
LW
2778}
2779
2780#ifdef CONFIG_COMPAT
2781static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
2782 unsigned long arg)
2783{
2784 return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2785}
2786#endif
2787
2788static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
2789{
2790 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
2791 struct mmc_rpmb_data, chrdev);
2792
2793 get_device(&rpmb->dev);
2794 filp->private_data = rpmb;
97548575
LW
2795
2796 return nonseekable_open(inode, filp);
2797}
2798
2799static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
2800{
2801 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
2802 struct mmc_rpmb_data, chrdev);
2803
202500d2 2804 put_device(&rpmb->dev);
97548575
LW
2805
2806 return 0;
2807}
2808
2809static const struct file_operations mmc_rpmb_fileops = {
2810 .release = mmc_rpmb_chrdev_release,
2811 .open = mmc_rpmb_chrdev_open,
2812 .owner = THIS_MODULE,
97548575
LW
2813 .unlocked_ioctl = mmc_rpmb_ioctl,
2814#ifdef CONFIG_COMPAT
2815 .compat_ioctl = mmc_rpmb_ioctl_compat,
2816#endif
2817};
2818
1c87f735
LW
2819static void mmc_blk_rpmb_device_release(struct device *dev)
2820{
2821 struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
2822
7852028a
JW
2823 rpmb_dev_unregister(rpmb->rdev);
2824 mmc_blk_put(rpmb->md);
18cbe816 2825 ida_free(&mmc_rpmb_ida, rpmb->id);
1c87f735
LW
2826 kfree(rpmb);
2827}
97548575 2828
7852028a
JW
2829static void free_idata(struct mmc_blk_ioc_data **idata, unsigned int cmd_count)
2830{
2831 unsigned int n;
2832
2833 for (n = 0; n < cmd_count; n++)
2834 kfree(idata[n]);
2835 kfree(idata);
2836}
2837
2838static struct mmc_blk_ioc_data **alloc_idata(struct mmc_rpmb_data *rpmb,
2839 unsigned int cmd_count)
2840{
2841 struct mmc_blk_ioc_data **idata;
2842 unsigned int n;
2843
2844 idata = kcalloc(cmd_count, sizeof(*idata), GFP_KERNEL);
2845 if (!idata)
2846 return NULL;
2847
2848 for (n = 0; n < cmd_count; n++) {
2849 idata[n] = kcalloc(1, sizeof(**idata), GFP_KERNEL);
2850 if (!idata[n]) {
2851 free_idata(idata, n);
2852 return NULL;
2853 }
2854 idata[n]->rpmb = rpmb;
2855 }
2856
2857 return idata;
2858}
2859
2860static void set_idata(struct mmc_blk_ioc_data *idata, u32 opcode,
2861 int write_flag, u8 *buf, unsigned int buf_bytes)
2862{
2863 /*
2864 * The size of an RPMB frame must match what's expected by the
2865 * hardware.
2866 */
2867 BUILD_BUG_ON(sizeof(struct rpmb_frame) != 512);
2868
2869 idata->ic.opcode = opcode;
2870 idata->ic.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2871 idata->ic.write_flag = write_flag;
2872 idata->ic.blksz = sizeof(struct rpmb_frame);
2873 idata->ic.blocks = buf_bytes / idata->ic.blksz;
2874 idata->buf = buf;
2875 idata->buf_bytes = buf_bytes;
2876}
2877
2878static int mmc_route_rpmb_frames(struct device *dev, u8 *req,
2879 unsigned int req_len, u8 *resp,
2880 unsigned int resp_len)
2881{
2882 struct rpmb_frame *frm = (struct rpmb_frame *)req;
2883 struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
2884 struct mmc_blk_data *md = rpmb->md;
2885 struct mmc_blk_ioc_data **idata;
2886 struct mmc_queue_req *mq_rq;
2887 unsigned int cmd_count;
2888 struct request *rq;
2889 u16 req_type;
2890 bool write;
2891 int ret;
2892
2893 if (IS_ERR(md->queue.card))
2894 return PTR_ERR(md->queue.card);
2895
2896 if (req_len < sizeof(*frm))
2897 return -EINVAL;
2898
2899 req_type = be16_to_cpu(frm->req_resp);
2900 switch (req_type) {
2901 case RPMB_PROGRAM_KEY:
2902 if (req_len != sizeof(struct rpmb_frame) ||
2903 resp_len != sizeof(struct rpmb_frame))
2904 return -EINVAL;
2905 write = true;
2906 break;
2907 case RPMB_GET_WRITE_COUNTER:
2908 if (req_len != sizeof(struct rpmb_frame) ||
2909 resp_len != sizeof(struct rpmb_frame))
2910 return -EINVAL;
2911 write = false;
2912 break;
2913 case RPMB_WRITE_DATA:
2914 if (req_len % sizeof(struct rpmb_frame) ||
2915 resp_len != sizeof(struct rpmb_frame))
2916 return -EINVAL;
2917 write = true;
2918 break;
2919 case RPMB_READ_DATA:
2920 if (req_len != sizeof(struct rpmb_frame) ||
2921 resp_len % sizeof(struct rpmb_frame))
2922 return -EINVAL;
2923 write = false;
2924 break;
2925 default:
2926 return -EINVAL;
2927 }
2928
2929 if (write)
2930 cmd_count = 3;
2931 else
2932 cmd_count = 2;
2933
2934 idata = alloc_idata(rpmb, cmd_count);
2935 if (!idata)
2936 return -ENOMEM;
2937
2938 if (write) {
2939 struct rpmb_frame *frm = (struct rpmb_frame *)resp;
2940
2941 /* Send write request frame(s) */
2942 set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK,
2943 1 | MMC_CMD23_ARG_REL_WR, req, req_len);
2944
2945 /* Send result request frame */
2946 memset(frm, 0, sizeof(*frm));
2947 frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
2948 set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp,
2949 resp_len);
2950
2951 /* Read response frame */
2952 set_idata(idata[2], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len);
2953 } else {
2954 /* Send write request frame(s) */
2955 set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1, req, req_len);
2956
2957 /* Read response frame */
2958 set_idata(idata[1], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len);
2959 }
2960
2961 rq = blk_mq_alloc_request(md->queue.queue, REQ_OP_DRV_OUT, 0);
2962 if (IS_ERR(rq)) {
2963 ret = PTR_ERR(rq);
2964 goto out;
2965 }
2966
2967 mq_rq = req_to_mmc_queue_req(rq);
2968 mq_rq->drv_op = MMC_DRV_OP_IOCTL_RPMB;
2969 mq_rq->drv_op_result = -EIO;
2970 mq_rq->drv_op_data = idata;
2971 mq_rq->ioc_count = cmd_count;
2972 blk_execute_rq(rq, false);
2973 ret = req_to_mmc_queue_req(rq)->drv_op_result;
2974
2975 blk_mq_free_request(rq);
2976
2977out:
2978 free_idata(idata, cmd_count);
2979 return ret;
2980}
2981
97548575
LW
2982static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
2983 struct mmc_blk_data *md,
2984 unsigned int part_index,
2985 sector_t size,
2986 const char *subname)
2987{
2988 int devidx, ret;
2989 char rpmb_name[DISK_NAME_LEN];
2990 char cap_str[10];
2991 struct mmc_rpmb_data *rpmb;
2992
2993 /* This creates the minor number for the RPMB char device */
18cbe816 2994 devidx = ida_alloc_max(&mmc_rpmb_ida, max_devices - 1, GFP_KERNEL);
97548575
LW
2995 if (devidx < 0)
2996 return devidx;
2997
2998 rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
1c87f735 2999 if (!rpmb) {
18cbe816 3000 ida_free(&mmc_rpmb_ida, devidx);
97548575 3001 return -ENOMEM;
1c87f735 3002 }
97548575
LW
3003
3004 snprintf(rpmb_name, sizeof(rpmb_name),
3005 "mmcblk%u%s", card->host->index, subname ? subname : "");
3006
3007 rpmb->id = devidx;
3008 rpmb->part_index = part_index;
3009 rpmb->dev.init_name = rpmb_name;
3010 rpmb->dev.bus = &mmc_rpmb_bus_type;
3011 rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
3012 rpmb->dev.parent = &card->dev;
1c87f735 3013 rpmb->dev.release = mmc_blk_rpmb_device_release;
97548575
LW
3014 device_initialize(&rpmb->dev);
3015 dev_set_drvdata(&rpmb->dev, rpmb);
7852028a 3016 mmc_blk_get(md->disk);
97548575
LW
3017 rpmb->md = md;
3018
3019 cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
3020 rpmb->chrdev.owner = THIS_MODULE;
3021 ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
3022 if (ret) {
3023 pr_err("%s: could not add character device\n", rpmb_name);
1c87f735 3024 goto out_put_device;
97548575
LW
3025 }
3026
3027 list_add(&rpmb->node, &md->rpmbs);
3028
3029 string_get_size((u64)size, 512, STRING_UNITS_2,
3030 cap_str, sizeof(cap_str));
3031
ce999ed1
UH
3032 pr_info("%s: %s %s %s, chardev (%d:%d)\n",
3033 rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str,
97548575
LW
3034 MAJOR(mmc_rpmb_devt), rpmb->id);
3035
3036 return 0;
3037
1c87f735
LW
3038out_put_device:
3039 put_device(&rpmb->dev);
97548575
LW
3040 return ret;
3041}
3042
3043static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
1c87f735 3044
97548575
LW
3045{
3046 cdev_device_del(&rpmb->chrdev, &rpmb->dev);
1c87f735 3047 put_device(&rpmb->dev);
97548575
LW
3048}
3049
e0c368d5
NJ
3050/* MMC Physical partitions consist of two boot partitions and
3051 * up to four general purpose partitions.
3052 * For each partition enabled in EXT_CSD a block device will be allocatedi
3053 * to provide access to the partition.
3054 */
3055
371a689f
AW
3056static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
3057{
97548575 3058 int idx, ret;
371a689f
AW
3059
3060 if (!mmc_card_mmc(card))
3061 return 0;
3062
e0c368d5 3063 for (idx = 0; idx < card->nr_parts; idx++) {
97548575
LW
3064 if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
3065 /*
3066 * RPMB partitions does not provide block access, they
3067 * are only accessed using ioctl():s. Thus create
3068 * special RPMB block devices that do not have a
3069 * backing block queue for these.
3070 */
3071 ret = mmc_blk_alloc_rpmb_part(card, md,
3072 card->part[idx].part_cfg,
3073 card->part[idx].size >> 9,
3074 card->part[idx].name);
3075 if (ret)
3076 return ret;
3077 } else if (card->part[idx].size) {
e0c368d5
NJ
3078 ret = mmc_blk_alloc_part(card, md,
3079 card->part[idx].part_cfg,
3080 card->part[idx].size >> 9,
3081 card->part[idx].force_ro,
add710ea
JR
3082 card->part[idx].name,
3083 card->part[idx].area_type);
e0c368d5
NJ
3084 if (ret)
3085 return ret;
3086 }
371a689f
AW
3087 }
3088
97548575 3089 return 0;
1da177e4
LT
3090}
3091
371a689f
AW
3092static void mmc_blk_remove_req(struct mmc_blk_data *md)
3093{
a94dcfce
CH
3094 /*
3095 * Flush remaining requests and free queues. It is freeing the queue
3096 * that stops new requests from being accepted.
3097 */
3098 del_gendisk(md->disk);
3099 mmc_cleanup_queue(&md->queue);
3100 mmc_blk_put(md);
371a689f
AW
3101}
3102
3103static void mmc_blk_remove_parts(struct mmc_card *card,
3104 struct mmc_blk_data *md)
3105{
3106 struct list_head *pos, *q;
3107 struct mmc_blk_data *part_md;
97548575 3108 struct mmc_rpmb_data *rpmb;
371a689f 3109
97548575
LW
3110 /* Remove RPMB partitions */
3111 list_for_each_safe(pos, q, &md->rpmbs) {
3112 rpmb = list_entry(pos, struct mmc_rpmb_data, node);
3113 list_del(pos);
3114 mmc_blk_remove_rpmb_part(rpmb);
3115 }
3116 /* Remove block partitions */
371a689f
AW
3117 list_for_each_safe(pos, q, &md->part) {
3118 part_md = list_entry(pos, struct mmc_blk_data, part);
3119 list_del(pos);
3120 mmc_blk_remove_req(part_md);
3121 }
3122}
3123
627c3ccf
LW
3124#ifdef CONFIG_DEBUG_FS
3125
3126static int mmc_dbg_card_status_get(void *data, u64 *val)
3127{
3128 struct mmc_card *card = data;
3129 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
3130 struct mmc_queue *mq = &md->queue;
3131 struct request *req;
3132 int ret;
3133
3134 /* Ask the block layer about the card status */
0bf6d96c 3135 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
fb8e456e
AH
3136 if (IS_ERR(req))
3137 return PTR_ERR(req);
627c3ccf 3138 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
003fb0a5 3139 req_to_mmc_queue_req(req)->drv_op_result = -EIO;
b84ba30b 3140 blk_execute_rq(req, false);
627c3ccf
LW
3141 ret = req_to_mmc_queue_req(req)->drv_op_result;
3142 if (ret >= 0) {
3143 *val = ret;
3144 ret = 0;
3145 }
0bf6d96c 3146 blk_mq_free_request(req);
627c3ccf
LW
3147
3148 return ret;
3149}
f6a3d9d9
Y
3150DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
3151 NULL, "%08llx\n");
627c3ccf
LW
3152
3153/* That is two digits * 512 + 1 for newline */
3154#define EXT_CSD_STR_LEN 1025
3155
3156static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
3157{
3158 struct mmc_card *card = inode->i_private;
3159 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
3160 struct mmc_queue *mq = &md->queue;
3161 struct request *req;
3162 char *buf;
3163 ssize_t n = 0;
3164 u8 *ext_csd;
3165 int err, i;
3166
3167 buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
3168 if (!buf)
3169 return -ENOMEM;
3170
3171 /* Ask the block layer for the EXT CSD */
0bf6d96c 3172 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
fb8e456e
AH
3173 if (IS_ERR(req)) {
3174 err = PTR_ERR(req);
3175 goto out_free;
3176 }
627c3ccf 3177 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
003fb0a5 3178 req_to_mmc_queue_req(req)->drv_op_result = -EIO;
627c3ccf 3179 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
b84ba30b 3180 blk_execute_rq(req, false);
627c3ccf 3181 err = req_to_mmc_queue_req(req)->drv_op_result;
0bf6d96c 3182 blk_mq_free_request(req);
627c3ccf
LW
3183 if (err) {
3184 pr_err("FAILED %d\n", err);
3185 goto out_free;
3186 }
3187
3188 for (i = 0; i < 512; i++)
3189 n += sprintf(buf + n, "%02x", ext_csd[i]);
3190 n += sprintf(buf + n, "\n");
3191
3192 if (n != EXT_CSD_STR_LEN) {
3193 err = -EINVAL;
0be55579 3194 kfree(ext_csd);
627c3ccf
LW
3195 goto out_free;
3196 }
3197
3198 filp->private_data = buf;
3199 kfree(ext_csd);
3200 return 0;
3201
3202out_free:
3203 kfree(buf);
3204 return err;
3205}
3206
3207static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
3208 size_t cnt, loff_t *ppos)
3209{
3210 char *buf = filp->private_data;
3211
3212 return simple_read_from_buffer(ubuf, cnt, ppos,
3213 buf, EXT_CSD_STR_LEN);
3214}
3215
3216static int mmc_ext_csd_release(struct inode *inode, struct file *file)
3217{
3218 kfree(file->private_data);
3219 return 0;
3220}
3221
3222static const struct file_operations mmc_dbg_ext_csd_fops = {
3223 .open = mmc_ext_csd_open,
3224 .read = mmc_ext_csd_read,
3225 .release = mmc_ext_csd_release,
3226 .llseek = default_llseek,
3227};
3228
584f5488 3229static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
627c3ccf
LW
3230{
3231 struct dentry *root;
3232
3233 if (!card->debugfs_root)
584f5488 3234 return;
627c3ccf
LW
3235
3236 root = card->debugfs_root;
3237
3238 if (mmc_card_mmc(card) || mmc_card_sd(card)) {
f9f0da98 3239 md->status_dentry =
f6a3d9d9
Y
3240 debugfs_create_file_unsafe("status", 0400, root,
3241 card,
3242 &mmc_dbg_card_status_fops);
627c3ccf
LW
3243 }
3244
3245 if (mmc_card_mmc(card)) {
f9f0da98
AH
3246 md->ext_csd_dentry =
3247 debugfs_create_file("ext_csd", S_IRUSR, root, card,
3248 &mmc_dbg_ext_csd_fops);
627c3ccf 3249 }
627c3ccf
LW
3250}
3251
f9f0da98
AH
3252static void mmc_blk_remove_debugfs(struct mmc_card *card,
3253 struct mmc_blk_data *md)
3254{
3255 if (!card->debugfs_root)
3256 return;
3257
584f5488
YF
3258 debugfs_remove(md->status_dentry);
3259 md->status_dentry = NULL;
f9f0da98 3260
584f5488
YF
3261 debugfs_remove(md->ext_csd_dentry);
3262 md->ext_csd_dentry = NULL;
f9f0da98 3263}
627c3ccf
LW
3264
3265#else
3266
584f5488 3267static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
627c3ccf 3268{
627c3ccf
LW
3269}
3270
f9f0da98
AH
3271static void mmc_blk_remove_debugfs(struct mmc_card *card,
3272 struct mmc_blk_data *md)
3273{
3274}
3275
627c3ccf
LW
3276#endif /* CONFIG_DEBUG_FS */
3277
7852028a
JW
3278static void mmc_blk_rpmb_add(struct mmc_card *card)
3279{
3280 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
3281 struct mmc_rpmb_data *rpmb;
3282 struct rpmb_dev *rdev;
3283 unsigned int n;
3284 u32 cid[4];
3285 struct rpmb_descr descr = {
3286 .type = RPMB_TYPE_EMMC,
3287 .route_frames = mmc_route_rpmb_frames,
3288 .reliable_wr_count = card->ext_csd.enhanced_rpmb_supported ?
3289 2 : 32,
3290 .capacity = card->ext_csd.raw_rpmb_size_mult,
3291 .dev_id = (void *)cid,
3292 .dev_id_len = sizeof(cid),
3293 };
3294
3295 /*
3296 * Provice CID as an octet array. The CID needs to be interpreted
3297 * when used as input to derive the RPMB key since some fields
3298 * will change due to firmware updates.
3299 */
3300 for (n = 0; n < 4; n++)
3301 cid[n] = be32_to_cpu((__force __be32)card->raw_cid[n]);
3302
3303 list_for_each_entry(rpmb, &md->rpmbs, node) {
3304 rdev = rpmb_dev_register(&rpmb->dev, &descr);
3305 if (IS_ERR(rdev)) {
3306 pr_warn("%s: could not register RPMB device\n",
3307 dev_name(&rpmb->dev));
3308 continue;
3309 }
3310 rpmb->rdev = rdev;
3311 }
3312}
3313
96541bac 3314static int mmc_blk_probe(struct mmc_card *card)
1da177e4 3315{
a94dcfce 3316 struct mmc_blk_data *md;
6f1d3247 3317 int ret = 0;
a7bbb573 3318
912490db
PO
3319 /*
3320 * Check that the card supports the command class(es) we need.
3321 */
3322 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1da177e4
LT
3323 return -ENODEV;
3324
8c7cdbf9 3325 mmc_fixup_device(card, mmc_blk_fixups);
5204d00f 3326
dcf6e2e3
ZH
3327 card->complete_wq = alloc_workqueue("mmc_complete",
3328 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1d848c28 3329 if (!card->complete_wq) {
dcf6e2e3
ZH
3330 pr_err("Failed to create mmc completion workqueue");
3331 return -ENOMEM;
3332 }
3333
1da177e4 3334 md = mmc_blk_alloc(card);
6f1d3247
UH
3335 if (IS_ERR(md)) {
3336 ret = PTR_ERR(md);
3337 goto out_free;
3338 }
1da177e4 3339
6f1d3247
UH
3340 ret = mmc_blk_alloc_parts(card, md);
3341 if (ret)
371a689f
AW
3342 goto out;
3343
627c3ccf 3344 /* Add two debugfs entries */
f9f0da98 3345 mmc_blk_add_debugfs(card, md);
627c3ccf 3346
e94cfef6
UH
3347 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
3348 pm_runtime_use_autosuspend(&card->dev);
3349
3350 /*
3351 * Don't enable runtime PM for SD-combo cards here. Leave that
3352 * decision to be taken during the SDIO init sequence instead.
3353 */
3beb0ab5 3354 if (!mmc_card_sd_combo(card)) {
e94cfef6
UH
3355 pm_runtime_set_active(&card->dev);
3356 pm_runtime_enable(&card->dev);
3357 }
3358
7852028a
JW
3359 mmc_blk_rpmb_add(card);
3360
1da177e4
LT
3361 return 0;
3362
6f1d3247 3363out:
371a689f
AW
3364 mmc_blk_remove_parts(card, md);
3365 mmc_blk_remove_req(md);
6f1d3247
UH
3366out_free:
3367 destroy_workqueue(card->complete_wq);
3368 return ret;
1da177e4
LT
3369}
3370
96541bac 3371static void mmc_blk_remove(struct mmc_card *card)
1da177e4 3372{
96541bac 3373 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
1da177e4 3374
f9f0da98 3375 mmc_blk_remove_debugfs(card, md);
371a689f 3376 mmc_blk_remove_parts(card, md);
e94cfef6 3377 pm_runtime_get_sync(&card->dev);
65f9e20e
SL
3378 if (md->part_curr != md->part_type) {
3379 mmc_claim_host(card->host);
3380 mmc_blk_part_switch(card, md->part_type);
3381 mmc_release_host(card->host);
3382 }
3beb0ab5 3383 if (!mmc_card_sd_combo(card))
e94cfef6
UH
3384 pm_runtime_disable(&card->dev);
3385 pm_runtime_put_noidle(&card->dev);
371a689f 3386 mmc_blk_remove_req(md);
dcf6e2e3 3387 destroy_workqueue(card->complete_wq);
1da177e4
LT
3388}
3389
96541bac 3390static int _mmc_blk_suspend(struct mmc_card *card)
1da177e4 3391{
371a689f 3392 struct mmc_blk_data *part_md;
96541bac 3393 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
1da177e4
LT
3394
3395 if (md) {
3396 mmc_queue_suspend(&md->queue);
371a689f
AW
3397 list_for_each_entry(part_md, &md->part, part) {
3398 mmc_queue_suspend(&part_md->queue);
3399 }
1da177e4
LT
3400 }
3401 return 0;
3402}
3403
96541bac 3404static void mmc_blk_shutdown(struct mmc_card *card)
76287748 3405{
96541bac 3406 _mmc_blk_suspend(card);
76287748
UH
3407}
3408
0967edc6
UH
3409#ifdef CONFIG_PM_SLEEP
3410static int mmc_blk_suspend(struct device *dev)
76287748 3411{
96541bac
UH
3412 struct mmc_card *card = mmc_dev_to_card(dev);
3413
3414 return _mmc_blk_suspend(card);
76287748
UH
3415}
3416
0967edc6 3417static int mmc_blk_resume(struct device *dev)
1da177e4 3418{
371a689f 3419 struct mmc_blk_data *part_md;
fc95e30b 3420 struct mmc_blk_data *md = dev_get_drvdata(dev);
1da177e4
LT
3421
3422 if (md) {
371a689f
AW
3423 /*
3424 * Resume involves the card going into idle state,
3425 * so current partition is always the main one.
3426 */
3427 md->part_curr = md->part_type;
1da177e4 3428 mmc_queue_resume(&md->queue);
371a689f
AW
3429 list_for_each_entry(part_md, &md->part, part) {
3430 mmc_queue_resume(&part_md->queue);
3431 }
1da177e4
LT
3432 }
3433 return 0;
3434}
1da177e4
LT
3435#endif
3436
0967edc6
UH
3437static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
3438
96541bac
UH
3439static struct mmc_driver mmc_driver = {
3440 .drv = {
3441 .name = "mmcblk",
3442 .pm = &mmc_blk_pm_ops,
3443 },
1da177e4
LT
3444 .probe = mmc_blk_probe,
3445 .remove = mmc_blk_remove,
76287748 3446 .shutdown = mmc_blk_shutdown,
1da177e4
LT
3447};
3448
3449static int __init mmc_blk_init(void)
3450{
9d4e98e9 3451 int res;
1da177e4 3452
97548575
LW
3453 res = bus_register(&mmc_rpmb_bus_type);
3454 if (res < 0) {
3455 pr_err("mmcblk: could not register RPMB bus type\n");
3456 return res;
3457 }
3458 res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
3459 if (res < 0) {
3460 pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
3461 goto out_bus_unreg;
3462 }
3463
5e71b7a6
OJ
3464 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
3465 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
3466
a26eba61 3467 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
5e71b7a6 3468
fe6b4c88
PO
3469 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3470 if (res)
97548575 3471 goto out_chrdev_unreg;
1da177e4 3472
9d4e98e9
AM
3473 res = mmc_register_driver(&mmc_driver);
3474 if (res)
97548575 3475 goto out_blkdev_unreg;
1da177e4 3476
9d4e98e9 3477 return 0;
97548575
LW
3478
3479out_blkdev_unreg:
9d4e98e9 3480 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
97548575
LW
3481out_chrdev_unreg:
3482 unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
3483out_bus_unreg:
3484 bus_unregister(&mmc_rpmb_bus_type);
1da177e4
LT
3485 return res;
3486}
3487
3488static void __exit mmc_blk_exit(void)
3489{
3490 mmc_unregister_driver(&mmc_driver);
fe6b4c88 3491 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
97548575 3492 unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
d0a0852b 3493 bus_unregister(&mmc_rpmb_bus_type);
1da177e4
LT
3494}
3495
3496module_init(mmc_blk_init);
3497module_exit(mmc_blk_exit);
3498
3499MODULE_LICENSE("GPL");
3500MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");