Commit | Line | Data |
---|---|---|
14d3a3b2 CH |
1 | /* |
2 | * Copyright (c) 2015 HGST, a Western Digital Company. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | */ | |
13 | #include <linux/module.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/slab.h> | |
16 | #include <rdma/ib_verbs.h> | |
17 | ||
18 | /* # of WCs to poll for with a single call to ib_poll_cq */ | |
19 | #define IB_POLL_BATCH 16 | |
20 | ||
21 | /* # of WCs to iterate over before yielding */ | |
22 | #define IB_POLL_BUDGET_IRQ 256 | |
23 | #define IB_POLL_BUDGET_WORKQUEUE 65536 | |
24 | ||
25 | #define IB_POLL_FLAGS \ | |
26 | (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) | |
27 | ||
28 | static int __ib_process_cq(struct ib_cq *cq, int budget) | |
29 | { | |
30 | int i, n, completed = 0; | |
31 | ||
fedd9e1f SG |
32 | /* |
33 | * budget might be (-1) if the caller does not | |
34 | * want to bound this call, thus we need unsigned | |
35 | * minimum here. | |
36 | */ | |
37 | while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, | |
38 | budget - completed), cq->wc)) > 0) { | |
14d3a3b2 CH |
39 | for (i = 0; i < n; i++) { |
40 | struct ib_wc *wc = &cq->wc[i]; | |
41 | ||
42 | if (wc->wr_cqe) | |
43 | wc->wr_cqe->done(cq, wc); | |
44 | else | |
45 | WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); | |
46 | } | |
47 | ||
48 | completed += n; | |
49 | ||
50 | if (n != IB_POLL_BATCH || | |
51 | (budget != -1 && completed >= budget)) | |
52 | break; | |
53 | } | |
54 | ||
55 | return completed; | |
56 | } | |
57 | ||
58 | /** | |
59 | * ib_process_direct_cq - process a CQ in caller context | |
60 | * @cq: CQ to process | |
61 | * @budget: number of CQEs to poll for | |
62 | * | |
63 | * This function is used to process all outstanding CQ entries on a | |
64 | * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different | |
65 | * context and does not ask for completion interrupts from the HCA. | |
66 | * | |
f039f44f BVA |
67 | * Note: do not pass -1 as %budget unless it is guaranteed that the number |
68 | * of completions that will be processed is small. | |
14d3a3b2 CH |
69 | */ |
70 | int ib_process_cq_direct(struct ib_cq *cq, int budget) | |
71 | { | |
72 | WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT); | |
73 | ||
74 | return __ib_process_cq(cq, budget); | |
75 | } | |
76 | EXPORT_SYMBOL(ib_process_cq_direct); | |
77 | ||
78 | static void ib_cq_completion_direct(struct ib_cq *cq, void *private) | |
79 | { | |
80 | WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); | |
81 | } | |
82 | ||
83 | static int ib_poll_handler(struct irq_poll *iop, int budget) | |
84 | { | |
85 | struct ib_cq *cq = container_of(iop, struct ib_cq, iop); | |
86 | int completed; | |
87 | ||
88 | completed = __ib_process_cq(cq, budget); | |
89 | if (completed < budget) { | |
90 | irq_poll_complete(&cq->iop); | |
91 | if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) | |
92 | irq_poll_sched(&cq->iop); | |
93 | } | |
94 | ||
95 | return completed; | |
96 | } | |
97 | ||
98 | static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) | |
99 | { | |
100 | irq_poll_sched(&cq->iop); | |
101 | } | |
102 | ||
103 | static void ib_cq_poll_work(struct work_struct *work) | |
104 | { | |
105 | struct ib_cq *cq = container_of(work, struct ib_cq, work); | |
106 | int completed; | |
107 | ||
108 | completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE); | |
109 | if (completed >= IB_POLL_BUDGET_WORKQUEUE || | |
110 | ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) | |
111 | queue_work(ib_comp_wq, &cq->work); | |
112 | } | |
113 | ||
114 | static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) | |
115 | { | |
116 | queue_work(ib_comp_wq, &cq->work); | |
117 | } | |
118 | ||
119 | /** | |
120 | * ib_alloc_cq - allocate a completion queue | |
121 | * @dev: device to allocate the CQ for | |
122 | * @private: driver private data, accessible from cq->cq_context | |
123 | * @nr_cqe: number of CQEs to allocate | |
124 | * @comp_vector: HCA completion vectors for this CQ | |
125 | * @poll_ctx: context to poll the CQ from. | |
126 | * | |
127 | * This is the proper interface to allocate a CQ for in-kernel users. A | |
128 | * CQ allocated with this interface will automatically be polled from the | |
6c6e51a6 | 129 | * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id |
14d3a3b2 CH |
130 | * to use this CQ abstraction. |
131 | */ | |
132 | struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, | |
133 | int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx) | |
134 | { | |
135 | struct ib_cq_init_attr cq_attr = { | |
136 | .cqe = nr_cqe, | |
137 | .comp_vector = comp_vector, | |
138 | }; | |
139 | struct ib_cq *cq; | |
140 | int ret = -ENOMEM; | |
141 | ||
142 | cq = dev->create_cq(dev, &cq_attr, NULL, NULL); | |
143 | if (IS_ERR(cq)) | |
144 | return cq; | |
145 | ||
146 | cq->device = dev; | |
147 | cq->uobject = NULL; | |
148 | cq->event_handler = NULL; | |
149 | cq->cq_context = private; | |
150 | cq->poll_ctx = poll_ctx; | |
151 | atomic_set(&cq->usecnt, 0); | |
152 | ||
153 | cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); | |
154 | if (!cq->wc) | |
155 | goto out_destroy_cq; | |
156 | ||
157 | switch (cq->poll_ctx) { | |
158 | case IB_POLL_DIRECT: | |
159 | cq->comp_handler = ib_cq_completion_direct; | |
160 | break; | |
161 | case IB_POLL_SOFTIRQ: | |
162 | cq->comp_handler = ib_cq_completion_softirq; | |
163 | ||
164 | irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); | |
165 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | |
166 | break; | |
167 | case IB_POLL_WORKQUEUE: | |
168 | cq->comp_handler = ib_cq_completion_workqueue; | |
169 | INIT_WORK(&cq->work, ib_cq_poll_work); | |
170 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | |
171 | break; | |
172 | default: | |
173 | ret = -EINVAL; | |
174 | goto out_free_wc; | |
175 | } | |
176 | ||
177 | return cq; | |
178 | ||
179 | out_free_wc: | |
180 | kfree(cq->wc); | |
181 | out_destroy_cq: | |
182 | cq->device->destroy_cq(cq); | |
183 | return ERR_PTR(ret); | |
184 | } | |
185 | EXPORT_SYMBOL(ib_alloc_cq); | |
186 | ||
187 | /** | |
188 | * ib_free_cq - free a completion queue | |
189 | * @cq: completion queue to free. | |
190 | */ | |
191 | void ib_free_cq(struct ib_cq *cq) | |
192 | { | |
193 | int ret; | |
194 | ||
195 | if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) | |
196 | return; | |
197 | ||
198 | switch (cq->poll_ctx) { | |
199 | case IB_POLL_DIRECT: | |
200 | break; | |
201 | case IB_POLL_SOFTIRQ: | |
202 | irq_poll_disable(&cq->iop); | |
203 | break; | |
204 | case IB_POLL_WORKQUEUE: | |
86f46aba | 205 | cancel_work_sync(&cq->work); |
14d3a3b2 CH |
206 | break; |
207 | default: | |
208 | WARN_ON_ONCE(1); | |
209 | } | |
210 | ||
211 | kfree(cq->wc); | |
212 | ret = cq->device->destroy_cq(cq); | |
213 | WARN_ON_ONCE(ret); | |
214 | } | |
215 | EXPORT_SYMBOL(ib_free_cq); |