s390/scm_block: use mempool to manage aidaw requests
[linux-2.6-block.git] / drivers / s390 / block / scm_blk_cluster.c
CommitLineData
0d804b20
SO
1/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <linux/blkdev.h>
11#include <linux/genhd.h>
12#include <linux/slab.h>
13#include <linux/list.h>
14#include <asm/eadm.h>
15#include "scm_blk.h"
16
17static unsigned int write_cluster_size = 64;
18module_param(write_cluster_size, uint, S_IRUGO);
19MODULE_PARM_DESC(write_cluster_size,
20 "Number of pages used for contiguous writes.");
21
22#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
23
24void __scm_free_rq_cluster(struct scm_request *scmrq)
25{
26 int i;
27
28 if (!scmrq->cluster.buf)
29 return;
30
31 for (i = 0; i < 2 * write_cluster_size; i++)
32 free_page((unsigned long) scmrq->cluster.buf[i]);
33
34 kfree(scmrq->cluster.buf);
35}
36
37int __scm_alloc_rq_cluster(struct scm_request *scmrq)
38{
39 int i;
40
41 scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
42 GFP_KERNEL);
43 if (!scmrq->cluster.buf)
44 return -ENOMEM;
45
46 for (i = 0; i < 2 * write_cluster_size; i++) {
47 scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
48 if (!scmrq->cluster.buf[i])
49 return -ENOMEM;
50 }
51 INIT_LIST_HEAD(&scmrq->cluster.list);
52 return 0;
53}
54
55void scm_request_cluster_init(struct scm_request *scmrq)
56{
57 scmrq->cluster.state = CLUSTER_NONE;
58}
59
60static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
61{
62 unsigned long firstA, lastA, firstB, lastB;
63
64 firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
65 lastA = (((u64) blk_rq_pos(A->request) << 9) +
66 blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
67
68 firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
69 lastB = (((u64) blk_rq_pos(B->request) << 9) +
70 blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
71
72 return (firstB <= lastA && firstA <= lastB);
73}
74
75bool scm_reserve_cluster(struct scm_request *scmrq)
76{
77 struct scm_blk_dev *bdev = scmrq->bdev;
78 struct scm_request *iter;
79
80 if (write_cluster_size == 0)
81 return true;
82
83 spin_lock(&bdev->lock);
84 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
85 if (clusters_intersect(scmrq, iter) &&
86 (rq_data_dir(scmrq->request) == WRITE ||
87 rq_data_dir(iter->request) == WRITE)) {
88 spin_unlock(&bdev->lock);
89 return false;
90 }
91 }
92 list_add(&scmrq->cluster.list, &bdev->cluster_list);
93 spin_unlock(&bdev->lock);
94
95 return true;
96}
97
98void scm_release_cluster(struct scm_request *scmrq)
99{
100 struct scm_blk_dev *bdev = scmrq->bdev;
101 unsigned long flags;
102
103 if (write_cluster_size == 0)
104 return;
105
106 spin_lock_irqsave(&bdev->lock, flags);
107 list_del(&scmrq->cluster.list);
108 spin_unlock_irqrestore(&bdev->lock, flags);
109}
110
111void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
112{
113 INIT_LIST_HEAD(&bdev->cluster_list);
114 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
115}
116
9d4df77f 117static int scm_prepare_cluster_request(struct scm_request *scmrq)
0d804b20
SO
118{
119 struct scm_blk_dev *bdev = scmrq->bdev;
120 struct scm_device *scmdev = bdev->gendisk->private_data;
121 struct request *req = scmrq->request;
0d804b20
SO
122 struct msb *msb = &scmrq->aob->msb[0];
123 struct req_iterator iter;
9d4df77f 124 struct aidaw *aidaw;
7988613b 125 struct bio_vec bv;
0d804b20
SO
126 int i = 0;
127 u64 addr;
128
129 switch (scmrq->cluster.state) {
130 case CLUSTER_NONE:
131 scmrq->cluster.state = CLUSTER_READ;
132 /* fall through */
133 case CLUSTER_READ:
9d4df77f
SO
134 aidaw = scm_aidaw_alloc();
135 if (!aidaw)
136 return -ENOMEM;
137
138 memset(aidaw, 0, PAGE_SIZE);
0d804b20
SO
139 scmrq->aob->request.msb_count = 1;
140 msb->bs = MSB_BS_4K;
141 msb->oc = MSB_OC_READ;
142 msb->flags = MSB_FLAG_IDA;
143 msb->data_addr = (u64) aidaw;
144 msb->blk_count = write_cluster_size;
145
146 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
147 msb->scm_addr = round_down(addr, CLUSTER_SIZE);
148
149 if (msb->scm_addr !=
150 round_down(addr + (u64) blk_rq_bytes(req) - 1,
151 CLUSTER_SIZE))
152 msb->blk_count = 2 * write_cluster_size;
153
154 for (i = 0; i < msb->blk_count; i++) {
155 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
156 aidaw++;
157 }
158
159 break;
160 case CLUSTER_WRITE:
9d4df77f 161 aidaw = (void *) msb->data_addr;
0d804b20
SO
162 msb->oc = MSB_OC_WRITE;
163
164 for (addr = msb->scm_addr;
165 addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
166 addr += PAGE_SIZE) {
167 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
168 aidaw++;
169 i++;
170 }
171 rq_for_each_segment(bv, req, iter) {
7988613b 172 aidaw->data_addr = (u64) page_address(bv.bv_page);
0d804b20
SO
173 aidaw++;
174 i++;
175 }
176 for (; i < msb->blk_count; i++) {
177 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
178 aidaw++;
179 }
180 break;
181 }
9d4df77f 182 return 0;
0d804b20
SO
183}
184
185bool scm_need_cluster_request(struct scm_request *scmrq)
186{
187 if (rq_data_dir(scmrq->request) == READ)
188 return false;
189
190 return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
191}
192
193/* Called with queue lock held. */
194void scm_initiate_cluster_request(struct scm_request *scmrq)
195{
9d4df77f
SO
196 if (scm_prepare_cluster_request(scmrq))
197 goto requeue;
605c3698 198 if (eadm_start_aob(scmrq->aob))
9d4df77f
SO
199 goto requeue;
200 return;
201requeue:
202 scm_request_requeue(scmrq);
0d804b20
SO
203}
204
205bool scm_test_cluster_request(struct scm_request *scmrq)
206{
207 return scmrq->cluster.state != CLUSTER_NONE;
208}
209
210void scm_cluster_request_irq(struct scm_request *scmrq)
211{
212 struct scm_blk_dev *bdev = scmrq->bdev;
213 unsigned long flags;
214
215 switch (scmrq->cluster.state) {
216 case CLUSTER_NONE:
217 BUG();
218 break;
219 case CLUSTER_READ:
220 if (scmrq->error) {
221 scm_request_finish(scmrq);
222 break;
223 }
224 scmrq->cluster.state = CLUSTER_WRITE;
225 spin_lock_irqsave(&bdev->rq_lock, flags);
226 scm_initiate_cluster_request(scmrq);
227 spin_unlock_irqrestore(&bdev->rq_lock, flags);
228 break;
229 case CLUSTER_WRITE:
230 scm_request_finish(scmrq);
231 break;
232 }
233}
234
235bool scm_cluster_size_valid(void)
236{
bd86055f
SO
237 if (write_cluster_size == 1 || write_cluster_size > 128)
238 return false;
239
240 return !(write_cluster_size & (write_cluster_size - 1));
0d804b20 241}