Commit | Line | Data |
---|---|---|
6a55d2cd | 1 | // SPDX-License-Identifier: GPL-2.0 |
f30664e2 SO |
2 | /* |
3 | * Block driver for s390 storage class memory. | |
4 | * | |
5 | * Copyright IBM Corp. 2012 | |
6 | * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> | |
7 | */ | |
8 | ||
9 | #define KMSG_COMPONENT "scm_block" | |
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
11 | ||
12 | #include <linux/interrupt.h> | |
13 | #include <linux/spinlock.h> | |
9d4df77f | 14 | #include <linux/mempool.h> |
f30664e2 SO |
15 | #include <linux/module.h> |
16 | #include <linux/blkdev.h> | |
12d90762 | 17 | #include <linux/blk-mq.h> |
f30664e2 SO |
18 | #include <linux/genhd.h> |
19 | #include <linux/slab.h> | |
20 | #include <linux/list.h> | |
21 | #include <asm/eadm.h> | |
22 | #include "scm_blk.h" | |
23 | ||
24 | debug_info_t *scm_debug; | |
25 | static int scm_major; | |
9d4df77f | 26 | static mempool_t *aidaw_pool; |
f30664e2 SO |
27 | static DEFINE_SPINLOCK(list_lock); |
28 | static LIST_HEAD(inactive_requests); | |
29 | static unsigned int nr_requests = 64; | |
8622384f | 30 | static unsigned int nr_requests_per_io = 8; |
f30664e2 SO |
31 | static atomic_t nr_devices = ATOMIC_INIT(0); |
32 | module_param(nr_requests, uint, S_IRUGO); | |
33 | MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); | |
34 | ||
8622384f SO |
35 | module_param(nr_requests_per_io, uint, S_IRUGO); |
36 | MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO."); | |
37 | ||
f30664e2 SO |
38 | MODULE_DESCRIPTION("Block driver for s390 storage class memory."); |
39 | MODULE_LICENSE("GPL"); | |
40 | MODULE_ALIAS("scm:scmdev*"); | |
41 | ||
42 | static void __scm_free_rq(struct scm_request *scmrq) | |
43 | { | |
44 | struct aob_rq_header *aobrq = to_aobrq(scmrq); | |
45 | ||
46 | free_page((unsigned long) scmrq->aob); | |
8622384f | 47 | kfree(scmrq->request); |
f30664e2 SO |
48 | kfree(aobrq); |
49 | } | |
50 | ||
51 | static void scm_free_rqs(void) | |
52 | { | |
53 | struct list_head *iter, *safe; | |
54 | struct scm_request *scmrq; | |
55 | ||
56 | spin_lock_irq(&list_lock); | |
57 | list_for_each_safe(iter, safe, &inactive_requests) { | |
58 | scmrq = list_entry(iter, struct scm_request, list); | |
59 | list_del(&scmrq->list); | |
60 | __scm_free_rq(scmrq); | |
61 | } | |
62 | spin_unlock_irq(&list_lock); | |
9d4df77f SO |
63 | |
64 | mempool_destroy(aidaw_pool); | |
f30664e2 SO |
65 | } |
66 | ||
67 | static int __scm_alloc_rq(void) | |
68 | { | |
69 | struct aob_rq_header *aobrq; | |
70 | struct scm_request *scmrq; | |
71 | ||
72 | aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL); | |
73 | if (!aobrq) | |
74 | return -ENOMEM; | |
75 | ||
76 | scmrq = (void *) aobrq->data; | |
f30664e2 | 77 | scmrq->aob = (void *) get_zeroed_page(GFP_DMA); |
8622384f SO |
78 | if (!scmrq->aob) |
79 | goto free; | |
0d804b20 | 80 | |
8622384f SO |
81 | scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]), |
82 | GFP_KERNEL); | |
83 | if (!scmrq->request) | |
84 | goto free; | |
85 | ||
f30664e2 SO |
86 | INIT_LIST_HEAD(&scmrq->list); |
87 | spin_lock_irq(&list_lock); | |
88 | list_add(&scmrq->list, &inactive_requests); | |
89 | spin_unlock_irq(&list_lock); | |
90 | ||
91 | return 0; | |
8622384f SO |
92 | free: |
93 | __scm_free_rq(scmrq); | |
94 | return -ENOMEM; | |
f30664e2 SO |
95 | } |
96 | ||
97 | static int scm_alloc_rqs(unsigned int nrqs) | |
98 | { | |
99 | int ret = 0; | |
100 | ||
9d4df77f SO |
101 | aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0); |
102 | if (!aidaw_pool) | |
103 | return -ENOMEM; | |
104 | ||
f30664e2 SO |
105 | while (nrqs-- && !ret) |
106 | ret = __scm_alloc_rq(); | |
107 | ||
108 | return ret; | |
109 | } | |
110 | ||
111 | static struct scm_request *scm_request_fetch(void) | |
112 | { | |
113 | struct scm_request *scmrq = NULL; | |
114 | ||
12d90762 | 115 | spin_lock_irq(&list_lock); |
f30664e2 SO |
116 | if (list_empty(&inactive_requests)) |
117 | goto out; | |
118 | scmrq = list_first_entry(&inactive_requests, struct scm_request, list); | |
119 | list_del(&scmrq->list); | |
120 | out: | |
12d90762 | 121 | spin_unlock_irq(&list_lock); |
f30664e2 SO |
122 | return scmrq; |
123 | } | |
124 | ||
125 | static void scm_request_done(struct scm_request *scmrq) | |
126 | { | |
127 | unsigned long flags; | |
bbc610a9 SO |
128 | struct msb *msb; |
129 | u64 aidaw; | |
130 | int i; | |
f30664e2 | 131 | |
8622384f | 132 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { |
bbc610a9 SO |
133 | msb = &scmrq->aob->msb[i]; |
134 | aidaw = msb->data_addr; | |
135 | ||
136 | if ((msb->flags & MSB_FLAG_IDA) && aidaw && | |
137 | IS_ALIGNED(aidaw, PAGE_SIZE)) | |
138 | mempool_free(virt_to_page(aidaw), aidaw_pool); | |
139 | } | |
9d4df77f | 140 | |
f30664e2 SO |
141 | spin_lock_irqsave(&list_lock, flags); |
142 | list_add(&scmrq->list, &inactive_requests); | |
143 | spin_unlock_irqrestore(&list_lock, flags); | |
144 | } | |
145 | ||
4fa3c019 SO |
146 | static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) |
147 | { | |
148 | return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; | |
149 | } | |
150 | ||
de88d0d2 | 151 | static inline struct aidaw *scm_aidaw_alloc(void) |
9d4df77f SO |
152 | { |
153 | struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); | |
154 | ||
155 | return page ? page_address(page) : NULL; | |
156 | } | |
157 | ||
de88d0d2 SO |
158 | static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw) |
159 | { | |
160 | unsigned long _aidaw = (unsigned long) aidaw; | |
161 | unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw; | |
162 | ||
163 | return (bytes / sizeof(*aidaw)) * PAGE_SIZE; | |
164 | } | |
165 | ||
166 | struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes) | |
167 | { | |
168 | struct aidaw *aidaw; | |
169 | ||
170 | if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes) | |
171 | return scmrq->next_aidaw; | |
172 | ||
173 | aidaw = scm_aidaw_alloc(); | |
174 | if (aidaw) | |
175 | memset(aidaw, 0, PAGE_SIZE); | |
176 | return aidaw; | |
177 | } | |
178 | ||
9d4df77f | 179 | static int scm_request_prepare(struct scm_request *scmrq) |
f30664e2 SO |
180 | { |
181 | struct scm_blk_dev *bdev = scmrq->bdev; | |
182 | struct scm_device *scmdev = bdev->gendisk->private_data; | |
bbc610a9 SO |
183 | int pos = scmrq->aob->request.msb_count; |
184 | struct msb *msb = &scmrq->aob->msb[pos]; | |
185 | struct request *req = scmrq->request[pos]; | |
f30664e2 | 186 | struct req_iterator iter; |
de88d0d2 | 187 | struct aidaw *aidaw; |
7988613b | 188 | struct bio_vec bv; |
f30664e2 | 189 | |
bbc610a9 | 190 | aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req)); |
9d4df77f SO |
191 | if (!aidaw) |
192 | return -ENOMEM; | |
193 | ||
f30664e2 | 194 | msb->bs = MSB_BS_4K; |
bbc610a9 SO |
195 | scmrq->aob->request.msb_count++; |
196 | msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); | |
197 | msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; | |
f30664e2 SO |
198 | msb->flags |= MSB_FLAG_IDA; |
199 | msb->data_addr = (u64) aidaw; | |
200 | ||
bbc610a9 | 201 | rq_for_each_segment(bv, req, iter) { |
7988613b KO |
202 | WARN_ON(bv.bv_offset); |
203 | msb->blk_count += bv.bv_len >> 12; | |
204 | aidaw->data_addr = (u64) page_address(bv.bv_page); | |
f30664e2 SO |
205 | aidaw++; |
206 | } | |
9d4df77f | 207 | |
bbc610a9 | 208 | scmrq->next_aidaw = aidaw; |
9d4df77f | 209 | return 0; |
f30664e2 SO |
210 | } |
211 | ||
bbc610a9 SO |
212 | static inline void scm_request_set(struct scm_request *scmrq, |
213 | struct request *req) | |
214 | { | |
215 | scmrq->request[scmrq->aob->request.msb_count] = req; | |
216 | } | |
217 | ||
f30664e2 | 218 | static inline void scm_request_init(struct scm_blk_dev *bdev, |
bbc610a9 | 219 | struct scm_request *scmrq) |
f30664e2 SO |
220 | { |
221 | struct aob_rq_header *aobrq = to_aobrq(scmrq); | |
222 | struct aob *aob = scmrq->aob; | |
223 | ||
8622384f SO |
224 | memset(scmrq->request, 0, |
225 | nr_requests_per_io * sizeof(scmrq->request[0])); | |
f30664e2 | 226 | memset(aob, 0, sizeof(*aob)); |
f30664e2 SO |
227 | aobrq->scmdev = bdev->scmdev; |
228 | aob->request.cmd_code = ARQB_CMD_MOVE; | |
229 | aob->request.data = (u64) aobrq; | |
f30664e2 SO |
230 | scmrq->bdev = bdev; |
231 | scmrq->retries = 4; | |
2a842aca | 232 | scmrq->error = BLK_STS_OK; |
de88d0d2 | 233 | /* We don't use all msbs - place aidaws at the end of the aob page. */ |
8622384f | 234 | scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; |
f30664e2 SO |
235 | } |
236 | ||
94d26bfc | 237 | static void scm_request_requeue(struct scm_request *scmrq) |
f30664e2 SO |
238 | { |
239 | struct scm_blk_dev *bdev = scmrq->bdev; | |
bbc610a9 | 240 | int i; |
f30664e2 | 241 | |
8622384f | 242 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) |
12d90762 | 243 | blk_mq_requeue_request(scmrq->request[i], false); |
bbc610a9 | 244 | |
8360cb5f | 245 | atomic_dec(&bdev->queued_reqs); |
f30664e2 | 246 | scm_request_done(scmrq); |
12d90762 | 247 | blk_mq_kick_requeue_list(bdev->rq); |
f30664e2 SO |
248 | } |
249 | ||
94d26bfc | 250 | static void scm_request_finish(struct scm_request *scmrq) |
f30664e2 | 251 | { |
8360cb5f | 252 | struct scm_blk_dev *bdev = scmrq->bdev; |
c8b85024 | 253 | blk_status_t *error; |
bbc610a9 | 254 | int i; |
8360cb5f | 255 | |
12d90762 | 256 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { |
a3c1a219 SO |
257 | error = blk_mq_rq_to_pdu(scmrq->request[i]); |
258 | *error = scmrq->error; | |
259 | blk_mq_complete_request(scmrq->request[i]); | |
12d90762 | 260 | } |
bbc610a9 | 261 | |
8360cb5f | 262 | atomic_dec(&bdev->queued_reqs); |
f30664e2 SO |
263 | scm_request_done(scmrq); |
264 | } | |
265 | ||
12d90762 | 266 | static void scm_request_start(struct scm_request *scmrq) |
bbc610a9 SO |
267 | { |
268 | struct scm_blk_dev *bdev = scmrq->bdev; | |
bbc610a9 SO |
269 | |
270 | atomic_inc(&bdev->queued_reqs); | |
12d90762 | 271 | if (eadm_start_aob(scmrq->aob)) { |
bbc610a9 SO |
272 | SCM_LOG(5, "no subchannel"); |
273 | scm_request_requeue(scmrq); | |
274 | } | |
bbc610a9 SO |
275 | } |
276 | ||
9861dbd5 SO |
277 | struct scm_queue { |
278 | struct scm_request *scmrq; | |
279 | spinlock_t lock; | |
280 | }; | |
281 | ||
dd1023c8 | 282 | static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, |
12d90762 | 283 | const struct blk_mq_queue_data *qd) |
f30664e2 | 284 | { |
12d90762 | 285 | struct scm_device *scmdev = hctx->queue->queuedata; |
f30664e2 | 286 | struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); |
9861dbd5 | 287 | struct scm_queue *sq = hctx->driver_data; |
12d90762 SO |
288 | struct request *req = qd->rq; |
289 | struct scm_request *scmrq; | |
f30664e2 | 290 | |
9861dbd5 | 291 | spin_lock(&sq->lock); |
12d90762 | 292 | if (!scm_permit_request(bdev, req)) { |
9861dbd5 | 293 | spin_unlock(&sq->lock); |
dd1023c8 | 294 | return BLK_STS_RESOURCE; |
12d90762 | 295 | } |
bbc610a9 | 296 | |
9861dbd5 | 297 | scmrq = sq->scmrq; |
12d90762 SO |
298 | if (!scmrq) { |
299 | scmrq = scm_request_fetch(); | |
f30664e2 | 300 | if (!scmrq) { |
12d90762 | 301 | SCM_LOG(5, "no request"); |
9861dbd5 | 302 | spin_unlock(&sq->lock); |
dd1023c8 | 303 | return BLK_STS_RESOURCE; |
f30664e2 | 304 | } |
12d90762 | 305 | scm_request_init(bdev, scmrq); |
9861dbd5 | 306 | sq->scmrq = scmrq; |
12d90762 SO |
307 | } |
308 | scm_request_set(scmrq, req); | |
bbc610a9 | 309 | |
12d90762 SO |
310 | if (scm_request_prepare(scmrq)) { |
311 | SCM_LOG(5, "aidaw alloc failed"); | |
312 | scm_request_set(scmrq, NULL); | |
bbc610a9 | 313 | |
12d90762 SO |
314 | if (scmrq->aob->request.msb_count) |
315 | scm_request_start(scmrq); | |
bbc610a9 | 316 | |
9861dbd5 SO |
317 | sq->scmrq = NULL; |
318 | spin_unlock(&sq->lock); | |
dd1023c8 | 319 | return BLK_STS_RESOURCE; |
f30664e2 | 320 | } |
12d90762 | 321 | blk_mq_start_request(req); |
9d4df77f | 322 | |
12d90762 | 323 | if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) { |
bbc610a9 | 324 | scm_request_start(scmrq); |
9861dbd5 | 325 | sq->scmrq = NULL; |
12d90762 | 326 | } |
9861dbd5 | 327 | spin_unlock(&sq->lock); |
dd1023c8 | 328 | return BLK_STS_OK; |
f30664e2 | 329 | } |
f30664e2 | 330 | |
9861dbd5 SO |
331 | static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
332 | unsigned int idx) | |
333 | { | |
334 | struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL); | |
bbc610a9 | 335 | |
9861dbd5 SO |
336 | if (!qd) |
337 | return -ENOMEM; | |
bbc610a9 | 338 | |
9861dbd5 SO |
339 | spin_lock_init(&qd->lock); |
340 | hctx->driver_data = qd; | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) | |
346 | { | |
347 | struct scm_queue *qd = hctx->driver_data; | |
348 | ||
349 | WARN_ON(qd->scmrq); | |
350 | kfree(hctx->driver_data); | |
351 | hctx->driver_data = NULL; | |
f30664e2 SO |
352 | } |
353 | ||
354 | static void __scmrq_log_error(struct scm_request *scmrq) | |
355 | { | |
356 | struct aob *aob = scmrq->aob; | |
357 | ||
2a842aca | 358 | if (scmrq->error == BLK_STS_TIMEOUT) |
f30664e2 SO |
359 | SCM_LOG(1, "Request timeout"); |
360 | else { | |
361 | SCM_LOG(1, "Request error"); | |
362 | SCM_LOG_HEX(1, &aob->response, sizeof(aob->response)); | |
363 | } | |
364 | if (scmrq->retries) | |
365 | SCM_LOG(1, "Retry request"); | |
366 | else | |
367 | pr_err("An I/O operation to SCM failed with rc=%d\n", | |
368 | scmrq->error); | |
369 | } | |
370 | ||
4fa3c019 SO |
371 | static void scm_blk_handle_error(struct scm_request *scmrq) |
372 | { | |
373 | struct scm_blk_dev *bdev = scmrq->bdev; | |
374 | unsigned long flags; | |
375 | ||
2a842aca | 376 | if (scmrq->error != BLK_STS_IOERR) |
4fa3c019 SO |
377 | goto restart; |
378 | ||
379 | /* For -EIO the response block is valid. */ | |
380 | switch (scmrq->aob->response.eqc) { | |
381 | case EQC_WR_PROHIBIT: | |
382 | spin_lock_irqsave(&bdev->lock, flags); | |
383 | if (bdev->state != SCM_WR_PROHIBIT) | |
3bff6038 | 384 | pr_info("%lx: Write access to the SCM increment is suspended\n", |
4fa3c019 SO |
385 | (unsigned long) bdev->scmdev->address); |
386 | bdev->state = SCM_WR_PROHIBIT; | |
387 | spin_unlock_irqrestore(&bdev->lock, flags); | |
388 | goto requeue; | |
389 | default: | |
390 | break; | |
391 | } | |
392 | ||
393 | restart: | |
605c3698 | 394 | if (!eadm_start_aob(scmrq->aob)) |
4fa3c019 SO |
395 | return; |
396 | ||
397 | requeue: | |
4fa3c019 | 398 | scm_request_requeue(scmrq); |
4fa3c019 SO |
399 | } |
400 | ||
e0f3e8f1 | 401 | void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) |
f30664e2 | 402 | { |
c7b3e923 | 403 | struct scm_request *scmrq = data; |
f30664e2 | 404 | |
c7b3e923 SO |
405 | scmrq->error = error; |
406 | if (error) { | |
407 | __scmrq_log_error(scmrq); | |
408 | if (scmrq->retries-- > 0) { | |
4fa3c019 | 409 | scm_blk_handle_error(scmrq); |
c7b3e923 | 410 | return; |
f30664e2 | 411 | } |
f30664e2 | 412 | } |
0d804b20 | 413 | |
c7b3e923 SO |
414 | scm_request_finish(scmrq); |
415 | } | |
0d804b20 | 416 | |
c7b3e923 SO |
417 | static void scm_blk_request_done(struct request *req) |
418 | { | |
c8b85024 | 419 | blk_status_t *error = blk_mq_rq_to_pdu(req); |
a3c1a219 SO |
420 | |
421 | blk_mq_end_request(req, *error); | |
f30664e2 SO |
422 | } |
423 | ||
605c3698 SO |
424 | static const struct block_device_operations scm_blk_devops = { |
425 | .owner = THIS_MODULE, | |
426 | }; | |
427 | ||
12d90762 SO |
428 | static const struct blk_mq_ops scm_mq_ops = { |
429 | .queue_rq = scm_blk_request, | |
c7b3e923 | 430 | .complete = scm_blk_request_done, |
9861dbd5 SO |
431 | .init_hctx = scm_blk_init_hctx, |
432 | .exit_hctx = scm_blk_exit_hctx, | |
12d90762 SO |
433 | }; |
434 | ||
f30664e2 SO |
435 | int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) |
436 | { | |
f30664e2 | 437 | unsigned int devindex, nr_max_blk; |
12d90762 SO |
438 | struct request_queue *rq; |
439 | int len, ret; | |
f30664e2 SO |
440 | |
441 | devindex = atomic_inc_return(&nr_devices) - 1; | |
442 | /* scma..scmz + scmaa..scmzz */ | |
443 | if (devindex > 701) { | |
444 | ret = -ENODEV; | |
445 | goto out; | |
446 | } | |
447 | ||
448 | bdev->scmdev = scmdev; | |
4fa3c019 | 449 | bdev->state = SCM_OPER; |
f30664e2 | 450 | spin_lock_init(&bdev->lock); |
f30664e2 | 451 | atomic_set(&bdev->queued_reqs, 0); |
f30664e2 | 452 | |
12d90762 | 453 | bdev->tag_set.ops = &scm_mq_ops; |
c8b85024 | 454 | bdev->tag_set.cmd_size = sizeof(blk_status_t); |
9861dbd5 | 455 | bdev->tag_set.nr_hw_queues = nr_requests; |
12d90762 SO |
456 | bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; |
457 | bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | |
458 | ||
459 | ret = blk_mq_alloc_tag_set(&bdev->tag_set); | |
460 | if (ret) | |
f30664e2 SO |
461 | goto out; |
462 | ||
12d90762 SO |
463 | rq = blk_mq_init_queue(&bdev->tag_set); |
464 | if (IS_ERR(rq)) { | |
465 | ret = PTR_ERR(rq); | |
466 | goto out_tag; | |
467 | } | |
f30664e2 SO |
468 | bdev->rq = rq; |
469 | nr_max_blk = min(scmdev->nr_max_block, | |
470 | (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); | |
471 | ||
472 | blk_queue_logical_block_size(rq, 1 << 12); | |
473 | blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ | |
474 | blk_queue_max_segments(rq, nr_max_blk); | |
8b904b5b BVA |
475 | blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); |
476 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); | |
f30664e2 SO |
477 | |
478 | bdev->gendisk = alloc_disk(SCM_NR_PARTS); | |
12d90762 SO |
479 | if (!bdev->gendisk) { |
480 | ret = -ENOMEM; | |
f30664e2 | 481 | goto out_queue; |
12d90762 | 482 | } |
f30664e2 | 483 | rq->queuedata = scmdev; |
f30664e2 SO |
484 | bdev->gendisk->private_data = scmdev; |
485 | bdev->gendisk->fops = &scm_blk_devops; | |
486 | bdev->gendisk->queue = rq; | |
487 | bdev->gendisk->major = scm_major; | |
488 | bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; | |
489 | ||
490 | len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); | |
491 | if (devindex > 25) { | |
492 | len += snprintf(bdev->gendisk->disk_name + len, | |
493 | DISK_NAME_LEN - len, "%c", | |
494 | 'a' + (devindex / 26) - 1); | |
495 | devindex = devindex % 26; | |
496 | } | |
497 | snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", | |
498 | 'a' + devindex); | |
499 | ||
500 | /* 512 byte sectors */ | |
501 | set_capacity(bdev->gendisk, scmdev->size >> 9); | |
0d52c756 | 502 | device_add_disk(&scmdev->dev, bdev->gendisk); |
f30664e2 SO |
503 | return 0; |
504 | ||
505 | out_queue: | |
506 | blk_cleanup_queue(rq); | |
12d90762 SO |
507 | out_tag: |
508 | blk_mq_free_tag_set(&bdev->tag_set); | |
f30664e2 SO |
509 | out: |
510 | atomic_dec(&nr_devices); | |
511 | return ret; | |
512 | } | |
513 | ||
514 | void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) | |
515 | { | |
f30664e2 SO |
516 | del_gendisk(bdev->gendisk); |
517 | blk_cleanup_queue(bdev->gendisk->queue); | |
12d90762 | 518 | blk_mq_free_tag_set(&bdev->tag_set); |
f30664e2 SO |
519 | put_disk(bdev->gendisk); |
520 | } | |
521 | ||
4fa3c019 SO |
522 | void scm_blk_set_available(struct scm_blk_dev *bdev) |
523 | { | |
524 | unsigned long flags; | |
525 | ||
526 | spin_lock_irqsave(&bdev->lock, flags); | |
527 | if (bdev->state == SCM_WR_PROHIBIT) | |
3bff6038 | 528 | pr_info("%lx: Write access to the SCM increment is restored\n", |
4fa3c019 SO |
529 | (unsigned long) bdev->scmdev->address); |
530 | bdev->state = SCM_OPER; | |
531 | spin_unlock_irqrestore(&bdev->lock, flags); | |
532 | } | |
533 | ||
8622384f SO |
534 | static bool __init scm_blk_params_valid(void) |
535 | { | |
536 | if (!nr_requests_per_io || nr_requests_per_io > 64) | |
537 | return false; | |
538 | ||
94d26bfc | 539 | return true; |
8622384f SO |
540 | } |
541 | ||
f30664e2 SO |
542 | static int __init scm_blk_init(void) |
543 | { | |
0d804b20 SO |
544 | int ret = -EINVAL; |
545 | ||
8622384f | 546 | if (!scm_blk_params_valid()) |
0d804b20 | 547 | goto out; |
f30664e2 SO |
548 | |
549 | ret = register_blkdev(0, "scm"); | |
550 | if (ret < 0) | |
551 | goto out; | |
552 | ||
553 | scm_major = ret; | |
94f9852d WY |
554 | ret = scm_alloc_rqs(nr_requests); |
555 | if (ret) | |
fff60fab | 556 | goto out_free; |
f30664e2 SO |
557 | |
558 | scm_debug = debug_register("scm_log", 16, 1, 16); | |
94f9852d WY |
559 | if (!scm_debug) { |
560 | ret = -ENOMEM; | |
f30664e2 | 561 | goto out_free; |
94f9852d | 562 | } |
f30664e2 SO |
563 | |
564 | debug_register_view(scm_debug, &debug_hex_ascii_view); | |
565 | debug_set_level(scm_debug, 2); | |
566 | ||
567 | ret = scm_drv_init(); | |
568 | if (ret) | |
569 | goto out_dbf; | |
570 | ||
571 | return ret; | |
572 | ||
573 | out_dbf: | |
574 | debug_unregister(scm_debug); | |
575 | out_free: | |
576 | scm_free_rqs(); | |
f30664e2 SO |
577 | unregister_blkdev(scm_major, "scm"); |
578 | out: | |
579 | return ret; | |
580 | } | |
581 | module_init(scm_blk_init); | |
582 | ||
583 | static void __exit scm_blk_cleanup(void) | |
584 | { | |
585 | scm_drv_cleanup(); | |
586 | debug_unregister(scm_debug); | |
587 | scm_free_rqs(); | |
588 | unregister_blkdev(scm_major, "scm"); | |
589 | } | |
590 | module_exit(scm_blk_cleanup); |