Commit | Line | Data |
---|---|---|
cef1cce5 BS |
1 | /* |
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/err.h> | |
34 | #include <linux/vmalloc.h> | |
35 | ||
36 | #include "ipath_verbs.h" | |
37 | ||
38 | /** | |
39 | * ipath_cq_enter - add a new entry to the completion queue | |
40 | * @cq: completion queue | |
41 | * @entry: work completion entry to add | |
42 | * @sig: true if @entry is a solicitated entry | |
43 | * | |
44 | * This may be called with one of the qp->s_lock or qp->r_rq.lock held. | |
45 | */ | |
46 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |
47 | { | |
48 | unsigned long flags; | |
49 | u32 next; | |
50 | ||
51 | spin_lock_irqsave(&cq->lock, flags); | |
52 | ||
53 | if (cq->head == cq->ibcq.cqe) | |
54 | next = 0; | |
55 | else | |
56 | next = cq->head + 1; | |
57 | if (unlikely(next == cq->tail)) { | |
58 | spin_unlock_irqrestore(&cq->lock, flags); | |
59 | if (cq->ibcq.event_handler) { | |
60 | struct ib_event ev; | |
61 | ||
62 | ev.device = cq->ibcq.device; | |
63 | ev.element.cq = &cq->ibcq; | |
64 | ev.event = IB_EVENT_CQ_ERR; | |
65 | cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); | |
66 | } | |
67 | return; | |
68 | } | |
69 | cq->queue[cq->head] = *entry; | |
70 | cq->head = next; | |
71 | ||
72 | if (cq->notify == IB_CQ_NEXT_COMP || | |
73 | (cq->notify == IB_CQ_SOLICITED && solicited)) { | |
74 | cq->notify = IB_CQ_NONE; | |
75 | cq->triggered++; | |
76 | /* | |
77 | * This will cause send_complete() to be called in | |
78 | * another thread. | |
79 | */ | |
80 | tasklet_hi_schedule(&cq->comptask); | |
81 | } | |
82 | ||
83 | spin_unlock_irqrestore(&cq->lock, flags); | |
84 | ||
85 | if (entry->status != IB_WC_SUCCESS) | |
86 | to_idev(cq->ibcq.device)->n_wqe_errs++; | |
87 | } | |
88 | ||
89 | /** | |
90 | * ipath_poll_cq - poll for work completion entries | |
91 | * @ibcq: the completion queue to poll | |
92 | * @num_entries: the maximum number of entries to return | |
93 | * @entry: pointer to array where work completions are placed | |
94 | * | |
95 | * Returns the number of completion entries polled. | |
96 | * | |
97 | * This may be called from interrupt context. Also called by ib_poll_cq() | |
98 | * in the generic verbs code. | |
99 | */ | |
100 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |
101 | { | |
102 | struct ipath_cq *cq = to_icq(ibcq); | |
103 | unsigned long flags; | |
104 | int npolled; | |
105 | ||
106 | spin_lock_irqsave(&cq->lock, flags); | |
107 | ||
108 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { | |
109 | if (cq->tail == cq->head) | |
110 | break; | |
111 | *entry = cq->queue[cq->tail]; | |
112 | if (cq->tail == cq->ibcq.cqe) | |
113 | cq->tail = 0; | |
114 | else | |
115 | cq->tail++; | |
116 | } | |
117 | ||
118 | spin_unlock_irqrestore(&cq->lock, flags); | |
119 | ||
120 | return npolled; | |
121 | } | |
122 | ||
123 | static void send_complete(unsigned long data) | |
124 | { | |
125 | struct ipath_cq *cq = (struct ipath_cq *)data; | |
126 | ||
127 | /* | |
128 | * The completion handler will most likely rearm the notification | |
129 | * and poll for all pending entries. If a new completion entry | |
130 | * is added while we are in this routine, tasklet_hi_schedule() | |
131 | * won't call us again until we return so we check triggered to | |
132 | * see if we need to call the handler again. | |
133 | */ | |
134 | for (;;) { | |
135 | u8 triggered = cq->triggered; | |
136 | ||
137 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); | |
138 | ||
139 | if (cq->triggered == triggered) | |
140 | return; | |
141 | } | |
142 | } | |
143 | ||
144 | /** | |
145 | * ipath_create_cq - create a completion queue | |
146 | * @ibdev: the device this completion queue is attached to | |
147 | * @entries: the minimum size of the completion queue | |
148 | * @context: unused by the InfiniPath driver | |
149 | * @udata: unused by the InfiniPath driver | |
150 | * | |
151 | * Returns a pointer to the completion queue or negative errno values | |
152 | * for failure. | |
153 | * | |
154 | * Called by ib_create_cq() in the generic verbs code. | |
155 | */ | |
156 | struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | |
157 | struct ib_ucontext *context, | |
158 | struct ib_udata *udata) | |
159 | { | |
160 | struct ipath_cq *cq; | |
161 | struct ib_wc *wc; | |
162 | struct ib_cq *ret; | |
163 | ||
164 | /* | |
165 | * Need to use vmalloc() if we want to support large #s of | |
166 | * entries. | |
167 | */ | |
168 | cq = kmalloc(sizeof(*cq), GFP_KERNEL); | |
169 | if (!cq) { | |
170 | ret = ERR_PTR(-ENOMEM); | |
171 | goto bail; | |
172 | } | |
173 | ||
174 | /* | |
175 | * Need to use vmalloc() if we want to support large #s of entries. | |
176 | */ | |
177 | wc = vmalloc(sizeof(*wc) * (entries + 1)); | |
178 | if (!wc) { | |
179 | kfree(cq); | |
180 | ret = ERR_PTR(-ENOMEM); | |
181 | goto bail; | |
182 | } | |
183 | /* | |
184 | * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. | |
185 | * The number of entries should be >= the number requested or return | |
186 | * an error. | |
187 | */ | |
188 | cq->ibcq.cqe = entries; | |
189 | cq->notify = IB_CQ_NONE; | |
190 | cq->triggered = 0; | |
191 | spin_lock_init(&cq->lock); | |
192 | tasklet_init(&cq->comptask, send_complete, (unsigned long)cq); | |
193 | cq->head = 0; | |
194 | cq->tail = 0; | |
195 | cq->queue = wc; | |
196 | ||
197 | ret = &cq->ibcq; | |
198 | ||
199 | bail: | |
200 | return ret; | |
201 | } | |
202 | ||
203 | /** | |
204 | * ipath_destroy_cq - destroy a completion queue | |
205 | * @ibcq: the completion queue to destroy. | |
206 | * | |
207 | * Returns 0 for success. | |
208 | * | |
209 | * Called by ib_destroy_cq() in the generic verbs code. | |
210 | */ | |
211 | int ipath_destroy_cq(struct ib_cq *ibcq) | |
212 | { | |
213 | struct ipath_cq *cq = to_icq(ibcq); | |
214 | ||
215 | tasklet_kill(&cq->comptask); | |
216 | vfree(cq->queue); | |
217 | kfree(cq); | |
218 | ||
219 | return 0; | |
220 | } | |
221 | ||
222 | /** | |
223 | * ipath_req_notify_cq - change the notification type for a completion queue | |
224 | * @ibcq: the completion queue | |
225 | * @notify: the type of notification to request | |
226 | * | |
227 | * Returns 0 for success. | |
228 | * | |
229 | * This may be called from interrupt context. Also called by | |
230 | * ib_req_notify_cq() in the generic verbs code. | |
231 | */ | |
232 | int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |
233 | { | |
234 | struct ipath_cq *cq = to_icq(ibcq); | |
235 | unsigned long flags; | |
236 | ||
237 | spin_lock_irqsave(&cq->lock, flags); | |
238 | /* | |
239 | * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow | |
240 | * any other transitions. | |
241 | */ | |
242 | if (cq->notify != IB_CQ_NEXT_COMP) | |
243 | cq->notify = notify; | |
244 | spin_unlock_irqrestore(&cq->lock, flags); | |
245 | return 0; | |
246 | } | |
247 | ||
248 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |
249 | { | |
250 | struct ipath_cq *cq = to_icq(ibcq); | |
251 | struct ib_wc *wc, *old_wc; | |
252 | u32 n; | |
253 | int ret; | |
254 | ||
255 | /* | |
256 | * Need to use vmalloc() if we want to support large #s of entries. | |
257 | */ | |
258 | wc = vmalloc(sizeof(*wc) * (cqe + 1)); | |
259 | if (!wc) { | |
260 | ret = -ENOMEM; | |
261 | goto bail; | |
262 | } | |
263 | ||
264 | spin_lock_irq(&cq->lock); | |
265 | if (cq->head < cq->tail) | |
266 | n = cq->ibcq.cqe + 1 + cq->head - cq->tail; | |
267 | else | |
268 | n = cq->head - cq->tail; | |
269 | if (unlikely((u32)cqe < n)) { | |
270 | spin_unlock_irq(&cq->lock); | |
271 | vfree(wc); | |
272 | ret = -EOVERFLOW; | |
273 | goto bail; | |
274 | } | |
275 | for (n = 0; cq->tail != cq->head; n++) { | |
276 | wc[n] = cq->queue[cq->tail]; | |
277 | if (cq->tail == cq->ibcq.cqe) | |
278 | cq->tail = 0; | |
279 | else | |
280 | cq->tail++; | |
281 | } | |
282 | cq->ibcq.cqe = cqe; | |
283 | cq->head = n; | |
284 | cq->tail = 0; | |
285 | old_wc = cq->queue; | |
286 | cq->queue = wc; | |
287 | spin_unlock_irq(&cq->lock); | |
288 | ||
289 | vfree(old_wc); | |
290 | ||
291 | ret = 0; | |
292 | ||
293 | bail: | |
294 | return ret; | |
295 | } |