Merge tag 'for-linus-4.8-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git/hubca...
[linux-2.6-block.git] / drivers / s390 / block / scm_blk_cluster.c
CommitLineData
0d804b20
SO
1/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <linux/blkdev.h>
11#include <linux/genhd.h>
12#include <linux/slab.h>
13#include <linux/list.h>
14#include <asm/eadm.h>
15#include "scm_blk.h"
16
17static unsigned int write_cluster_size = 64;
18module_param(write_cluster_size, uint, S_IRUGO);
19MODULE_PARM_DESC(write_cluster_size,
20 "Number of pages used for contiguous writes.");
21
22#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
23
24void __scm_free_rq_cluster(struct scm_request *scmrq)
25{
26 int i;
27
28 if (!scmrq->cluster.buf)
29 return;
30
31 for (i = 0; i < 2 * write_cluster_size; i++)
32 free_page((unsigned long) scmrq->cluster.buf[i]);
33
34 kfree(scmrq->cluster.buf);
35}
36
37int __scm_alloc_rq_cluster(struct scm_request *scmrq)
38{
39 int i;
40
41 scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
42 GFP_KERNEL);
43 if (!scmrq->cluster.buf)
44 return -ENOMEM;
45
46 for (i = 0; i < 2 * write_cluster_size; i++) {
47 scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
48 if (!scmrq->cluster.buf[i])
49 return -ENOMEM;
50 }
51 INIT_LIST_HEAD(&scmrq->cluster.list);
52 return 0;
53}
54
55void scm_request_cluster_init(struct scm_request *scmrq)
56{
57 scmrq->cluster.state = CLUSTER_NONE;
58}
59
bbc610a9 60static bool clusters_intersect(struct request *A, struct request *B)
0d804b20
SO
61{
62 unsigned long firstA, lastA, firstB, lastB;
63
bbc610a9
SO
64 firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
65 lastA = (((u64) blk_rq_pos(A) << 9) +
66 blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
0d804b20 67
bbc610a9
SO
68 firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
69 lastB = (((u64) blk_rq_pos(B) << 9) +
70 blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
0d804b20
SO
71
72 return (firstB <= lastA && firstA <= lastB);
73}
74
75bool scm_reserve_cluster(struct scm_request *scmrq)
76{
bbc610a9 77 struct request *req = scmrq->request[scmrq->aob->request.msb_count];
0d804b20
SO
78 struct scm_blk_dev *bdev = scmrq->bdev;
79 struct scm_request *iter;
bbc610a9 80 int pos, add = 1;
0d804b20
SO
81
82 if (write_cluster_size == 0)
83 return true;
84
85 spin_lock(&bdev->lock);
86 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
bbc610a9
SO
87 if (iter == scmrq) {
88 /*
89 * We don't have to use clusters_intersect here, since
90 * cluster requests are always started separately.
91 */
92 add = 0;
93 continue;
94 }
a13ccb04 95 for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
bbc610a9
SO
96 if (clusters_intersect(req, iter->request[pos]) &&
97 (rq_data_dir(req) == WRITE ||
98 rq_data_dir(iter->request[pos]) == WRITE)) {
99 spin_unlock(&bdev->lock);
100 return false;
101 }
0d804b20
SO
102 }
103 }
bbc610a9
SO
104 if (add)
105 list_add(&scmrq->cluster.list, &bdev->cluster_list);
0d804b20
SO
106 spin_unlock(&bdev->lock);
107
108 return true;
109}
110
111void scm_release_cluster(struct scm_request *scmrq)
112{
113 struct scm_blk_dev *bdev = scmrq->bdev;
114 unsigned long flags;
115
116 if (write_cluster_size == 0)
117 return;
118
119 spin_lock_irqsave(&bdev->lock, flags);
120 list_del(&scmrq->cluster.list);
121 spin_unlock_irqrestore(&bdev->lock, flags);
122}
123
124void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
125{
126 INIT_LIST_HEAD(&bdev->cluster_list);
127 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
128}
129
9d4df77f 130static int scm_prepare_cluster_request(struct scm_request *scmrq)
0d804b20
SO
131{
132 struct scm_blk_dev *bdev = scmrq->bdev;
133 struct scm_device *scmdev = bdev->gendisk->private_data;
bbc610a9 134 struct request *req = scmrq->request[0];
0d804b20
SO
135 struct msb *msb = &scmrq->aob->msb[0];
136 struct req_iterator iter;
9d4df77f 137 struct aidaw *aidaw;
7988613b 138 struct bio_vec bv;
0d804b20
SO
139 int i = 0;
140 u64 addr;
141
142 switch (scmrq->cluster.state) {
143 case CLUSTER_NONE:
144 scmrq->cluster.state = CLUSTER_READ;
145 /* fall through */
146 case CLUSTER_READ:
0d804b20
SO
147 msb->bs = MSB_BS_4K;
148 msb->oc = MSB_OC_READ;
149 msb->flags = MSB_FLAG_IDA;
0d804b20
SO
150 msb->blk_count = write_cluster_size;
151
152 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
153 msb->scm_addr = round_down(addr, CLUSTER_SIZE);
154
155 if (msb->scm_addr !=
156 round_down(addr + (u64) blk_rq_bytes(req) - 1,
157 CLUSTER_SIZE))
158 msb->blk_count = 2 * write_cluster_size;
159
de88d0d2
SO
160 aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
161 if (!aidaw)
162 return -ENOMEM;
163
164 scmrq->aob->request.msb_count = 1;
165 msb->data_addr = (u64) aidaw;
0d804b20
SO
166 for (i = 0; i < msb->blk_count; i++) {
167 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
168 aidaw++;
169 }
170
171 break;
172 case CLUSTER_WRITE:
9d4df77f 173 aidaw = (void *) msb->data_addr;
0d804b20
SO
174 msb->oc = MSB_OC_WRITE;
175
176 for (addr = msb->scm_addr;
177 addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
178 addr += PAGE_SIZE) {
179 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
180 aidaw++;
181 i++;
182 }
183 rq_for_each_segment(bv, req, iter) {
7988613b 184 aidaw->data_addr = (u64) page_address(bv.bv_page);
0d804b20
SO
185 aidaw++;
186 i++;
187 }
188 for (; i < msb->blk_count; i++) {
189 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
190 aidaw++;
191 }
192 break;
193 }
9d4df77f 194 return 0;
0d804b20
SO
195}
196
197bool scm_need_cluster_request(struct scm_request *scmrq)
198{
bbc610a9
SO
199 int pos = scmrq->aob->request.msb_count;
200
201 if (rq_data_dir(scmrq->request[pos]) == READ)
0d804b20
SO
202 return false;
203
bbc610a9 204 return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
0d804b20
SO
205}
206
207/* Called with queue lock held. */
208void scm_initiate_cluster_request(struct scm_request *scmrq)
209{
9d4df77f
SO
210 if (scm_prepare_cluster_request(scmrq))
211 goto requeue;
605c3698 212 if (eadm_start_aob(scmrq->aob))
9d4df77f
SO
213 goto requeue;
214 return;
215requeue:
216 scm_request_requeue(scmrq);
0d804b20
SO
217}
218
219bool scm_test_cluster_request(struct scm_request *scmrq)
220{
221 return scmrq->cluster.state != CLUSTER_NONE;
222}
223
224void scm_cluster_request_irq(struct scm_request *scmrq)
225{
226 struct scm_blk_dev *bdev = scmrq->bdev;
227 unsigned long flags;
228
229 switch (scmrq->cluster.state) {
230 case CLUSTER_NONE:
231 BUG();
232 break;
233 case CLUSTER_READ:
234 if (scmrq->error) {
235 scm_request_finish(scmrq);
236 break;
237 }
238 scmrq->cluster.state = CLUSTER_WRITE;
239 spin_lock_irqsave(&bdev->rq_lock, flags);
240 scm_initiate_cluster_request(scmrq);
241 spin_unlock_irqrestore(&bdev->rq_lock, flags);
242 break;
243 case CLUSTER_WRITE:
244 scm_request_finish(scmrq);
245 break;
246 }
247}
248
249bool scm_cluster_size_valid(void)
250{
bd86055f
SO
251 if (write_cluster_size == 1 || write_cluster_size > 128)
252 return false;
253
254 return !(write_cluster_size & (write_cluster_size - 1));
0d804b20 255}