Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | |
51a379d0 | 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/sched.h> | |
5a0e3ad6 | 36 | #include <linux/slab.h> |
225c7b1f RD |
37 | #include <linux/pci.h> |
38 | #include <linux/errno.h> | |
39 | ||
40 | #include <linux/mlx4/cmd.h> | |
41 | ||
42 | #include <asm/io.h> | |
43 | ||
44 | #include "mlx4.h" | |
45 | ||
46 | #define CMD_POLL_TOKEN 0xffff | |
47 | ||
48 | enum { | |
49 | /* command completed successfully: */ | |
50 | CMD_STAT_OK = 0x00, | |
51 | /* Internal error (such as a bus error) occurred while processing command: */ | |
52 | CMD_STAT_INTERNAL_ERR = 0x01, | |
53 | /* Operation/command not supported or opcode modifier not supported: */ | |
54 | CMD_STAT_BAD_OP = 0x02, | |
55 | /* Parameter not supported or parameter out of range: */ | |
56 | CMD_STAT_BAD_PARAM = 0x03, | |
57 | /* System not enabled or bad system state: */ | |
58 | CMD_STAT_BAD_SYS_STATE = 0x04, | |
59 | /* Attempt to access reserved or unallocaterd resource: */ | |
60 | CMD_STAT_BAD_RESOURCE = 0x05, | |
61 | /* Requested resource is currently executing a command, or is otherwise busy: */ | |
62 | CMD_STAT_RESOURCE_BUSY = 0x06, | |
63 | /* Required capability exceeds device limits: */ | |
64 | CMD_STAT_EXCEED_LIM = 0x08, | |
65 | /* Resource is not in the appropriate state or ownership: */ | |
66 | CMD_STAT_BAD_RES_STATE = 0x09, | |
67 | /* Index out of range: */ | |
68 | CMD_STAT_BAD_INDEX = 0x0a, | |
69 | /* FW image corrupted: */ | |
70 | CMD_STAT_BAD_NVMEM = 0x0b, | |
899698da JM |
71 | /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */ |
72 | CMD_STAT_ICM_ERROR = 0x0c, | |
225c7b1f RD |
73 | /* Attempt to modify a QP/EE which is not in the presumed state: */ |
74 | CMD_STAT_BAD_QP_STATE = 0x10, | |
75 | /* Bad segment parameters (Address/Size): */ | |
76 | CMD_STAT_BAD_SEG_PARAM = 0x20, | |
77 | /* Memory Region has Memory Windows bound to: */ | |
78 | CMD_STAT_REG_BOUND = 0x21, | |
79 | /* HCA local attached memory not present: */ | |
80 | CMD_STAT_LAM_NOT_PRE = 0x22, | |
81 | /* Bad management packet (silently discarded): */ | |
82 | CMD_STAT_BAD_PKT = 0x30, | |
83 | /* More outstanding CQEs in CQ than new CQ size: */ | |
cc4ac2e7 YP |
84 | CMD_STAT_BAD_SIZE = 0x40, |
85 | /* Multi Function device support required: */ | |
86 | CMD_STAT_MULTI_FUNC_REQ = 0x50, | |
225c7b1f RD |
87 | }; |
88 | ||
89 | enum { | |
90 | HCR_IN_PARAM_OFFSET = 0x00, | |
91 | HCR_IN_MODIFIER_OFFSET = 0x08, | |
92 | HCR_OUT_PARAM_OFFSET = 0x0c, | |
93 | HCR_TOKEN_OFFSET = 0x14, | |
94 | HCR_STATUS_OFFSET = 0x18, | |
95 | ||
96 | HCR_OPMOD_SHIFT = 12, | |
97 | HCR_T_BIT = 21, | |
98 | HCR_E_BIT = 22, | |
99 | HCR_GO_BIT = 23 | |
100 | }; | |
101 | ||
102 | enum { | |
36ce10d3 | 103 | GO_BIT_TIMEOUT_MSECS = 10000 |
225c7b1f RD |
104 | }; |
105 | ||
106 | struct mlx4_cmd_context { | |
107 | struct completion done; | |
108 | int result; | |
109 | int next; | |
110 | u64 out_param; | |
111 | u16 token; | |
112 | }; | |
113 | ||
ca281211 RD |
114 | static int mlx4_status_to_errno(u8 status) |
115 | { | |
225c7b1f RD |
116 | static const int trans_table[] = { |
117 | [CMD_STAT_INTERNAL_ERR] = -EIO, | |
118 | [CMD_STAT_BAD_OP] = -EPERM, | |
119 | [CMD_STAT_BAD_PARAM] = -EINVAL, | |
120 | [CMD_STAT_BAD_SYS_STATE] = -ENXIO, | |
121 | [CMD_STAT_BAD_RESOURCE] = -EBADF, | |
122 | [CMD_STAT_RESOURCE_BUSY] = -EBUSY, | |
123 | [CMD_STAT_EXCEED_LIM] = -ENOMEM, | |
124 | [CMD_STAT_BAD_RES_STATE] = -EBADF, | |
125 | [CMD_STAT_BAD_INDEX] = -EBADF, | |
126 | [CMD_STAT_BAD_NVMEM] = -EFAULT, | |
899698da | 127 | [CMD_STAT_ICM_ERROR] = -ENFILE, |
225c7b1f RD |
128 | [CMD_STAT_BAD_QP_STATE] = -EINVAL, |
129 | [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, | |
130 | [CMD_STAT_REG_BOUND] = -EBUSY, | |
131 | [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, | |
132 | [CMD_STAT_BAD_PKT] = -EINVAL, | |
133 | [CMD_STAT_BAD_SIZE] = -ENOMEM, | |
cc4ac2e7 | 134 | [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, |
225c7b1f RD |
135 | }; |
136 | ||
137 | if (status >= ARRAY_SIZE(trans_table) || | |
138 | (status != CMD_STAT_OK && trans_table[status] == 0)) | |
139 | return -EIO; | |
140 | ||
141 | return trans_table[status]; | |
142 | } | |
143 | ||
144 | static int cmd_pending(struct mlx4_dev *dev) | |
145 | { | |
146 | u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); | |
147 | ||
148 | return (status & swab32(1 << HCR_GO_BIT)) || | |
149 | (mlx4_priv(dev)->cmd.toggle == | |
150 | !!(status & swab32(1 << HCR_T_BIT))); | |
151 | } | |
152 | ||
153 | static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, | |
154 | u32 in_modifier, u8 op_modifier, u16 op, u16 token, | |
155 | int event) | |
156 | { | |
157 | struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; | |
158 | u32 __iomem *hcr = cmd->hcr; | |
159 | int ret = -EAGAIN; | |
160 | unsigned long end; | |
161 | ||
162 | mutex_lock(&cmd->hcr_mutex); | |
163 | ||
164 | end = jiffies; | |
165 | if (event) | |
36ce10d3 | 166 | end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); |
225c7b1f RD |
167 | |
168 | while (cmd_pending(dev)) { | |
169 | if (time_after_eq(jiffies, end)) | |
170 | goto out; | |
171 | cond_resched(); | |
172 | } | |
173 | ||
174 | /* | |
175 | * We use writel (instead of something like memcpy_toio) | |
176 | * because writes of less than 32 bits to the HCR don't work | |
177 | * (and some architectures such as ia64 implement memcpy_toio | |
178 | * in terms of writeb). | |
179 | */ | |
180 | __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); | |
181 | __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); | |
182 | __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); | |
183 | __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); | |
184 | __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); | |
185 | __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); | |
186 | ||
187 | /* __raw_writel may not order writes. */ | |
188 | wmb(); | |
189 | ||
190 | __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | | |
191 | (cmd->toggle << HCR_T_BIT) | | |
192 | (event ? (1 << HCR_E_BIT) : 0) | | |
193 | (op_modifier << HCR_OPMOD_SHIFT) | | |
194 | op), hcr + 6); | |
2e61c646 RD |
195 | |
196 | /* | |
197 | * Make sure that our HCR writes don't get mixed in with | |
198 | * writes from another CPU starting a FW command. | |
199 | */ | |
200 | mmiowb(); | |
201 | ||
225c7b1f RD |
202 | cmd->toggle = cmd->toggle ^ 1; |
203 | ||
204 | ret = 0; | |
205 | ||
206 | out: | |
207 | mutex_unlock(&cmd->hcr_mutex); | |
208 | return ret; | |
209 | } | |
210 | ||
211 | static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |
212 | int out_is_imm, u32 in_modifier, u8 op_modifier, | |
213 | u16 op, unsigned long timeout) | |
214 | { | |
215 | struct mlx4_priv *priv = mlx4_priv(dev); | |
216 | void __iomem *hcr = priv->cmd.hcr; | |
217 | int err = 0; | |
218 | unsigned long end; | |
219 | ||
220 | down(&priv->cmd.poll_sem); | |
221 | ||
222 | err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, | |
223 | in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); | |
224 | if (err) | |
225 | goto out; | |
226 | ||
227 | end = msecs_to_jiffies(timeout) + jiffies; | |
228 | while (cmd_pending(dev) && time_before(jiffies, end)) | |
229 | cond_resched(); | |
230 | ||
231 | if (cmd_pending(dev)) { | |
232 | err = -ETIMEDOUT; | |
233 | goto out; | |
234 | } | |
235 | ||
236 | if (out_is_imm) | |
237 | *out_param = | |
238 | (u64) be32_to_cpu((__force __be32) | |
239 | __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | | |
240 | (u64) be32_to_cpu((__force __be32) | |
241 | __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); | |
242 | ||
243 | err = mlx4_status_to_errno(be32_to_cpu((__force __be32) | |
244 | __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24); | |
245 | ||
246 | out: | |
247 | up(&priv->cmd.poll_sem); | |
248 | return err; | |
249 | } | |
250 | ||
251 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) | |
252 | { | |
253 | struct mlx4_priv *priv = mlx4_priv(dev); | |
254 | struct mlx4_cmd_context *context = | |
255 | &priv->cmd.context[token & priv->cmd.token_mask]; | |
256 | ||
257 | /* previously timed out command completing at long last */ | |
258 | if (token != context->token) | |
259 | return; | |
260 | ||
261 | context->result = mlx4_status_to_errno(status); | |
262 | context->out_param = out_param; | |
263 | ||
225c7b1f RD |
264 | complete(&context->done); |
265 | } | |
266 | ||
267 | static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |
268 | int out_is_imm, u32 in_modifier, u8 op_modifier, | |
269 | u16 op, unsigned long timeout) | |
270 | { | |
271 | struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; | |
272 | struct mlx4_cmd_context *context; | |
273 | int err = 0; | |
274 | ||
275 | down(&cmd->event_sem); | |
276 | ||
277 | spin_lock(&cmd->context_lock); | |
278 | BUG_ON(cmd->free_head < 0); | |
279 | context = &cmd->context[cmd->free_head]; | |
0981582d | 280 | context->token += cmd->token_mask + 1; |
225c7b1f RD |
281 | cmd->free_head = context->next; |
282 | spin_unlock(&cmd->context_lock); | |
283 | ||
284 | init_completion(&context->done); | |
285 | ||
286 | mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, | |
287 | in_modifier, op_modifier, op, context->token, 1); | |
288 | ||
289 | if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { | |
290 | err = -EBUSY; | |
291 | goto out; | |
292 | } | |
293 | ||
294 | err = context->result; | |
295 | if (err) | |
296 | goto out; | |
297 | ||
298 | if (out_is_imm) | |
299 | *out_param = context->out_param; | |
300 | ||
301 | out: | |
302 | spin_lock(&cmd->context_lock); | |
303 | context->next = cmd->free_head; | |
304 | cmd->free_head = context - cmd->context; | |
305 | spin_unlock(&cmd->context_lock); | |
306 | ||
307 | up(&cmd->event_sem); | |
308 | return err; | |
309 | } | |
310 | ||
311 | int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |
312 | int out_is_imm, u32 in_modifier, u8 op_modifier, | |
313 | u16 op, unsigned long timeout) | |
314 | { | |
315 | if (mlx4_priv(dev)->cmd.use_events) | |
316 | return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, | |
317 | in_modifier, op_modifier, op, timeout); | |
318 | else | |
319 | return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm, | |
320 | in_modifier, op_modifier, op, timeout); | |
321 | } | |
322 | EXPORT_SYMBOL_GPL(__mlx4_cmd); | |
323 | ||
324 | int mlx4_cmd_init(struct mlx4_dev *dev) | |
325 | { | |
326 | struct mlx4_priv *priv = mlx4_priv(dev); | |
327 | ||
328 | mutex_init(&priv->cmd.hcr_mutex); | |
329 | sema_init(&priv->cmd.poll_sem, 1); | |
330 | priv->cmd.use_events = 0; | |
331 | priv->cmd.toggle = 1; | |
332 | ||
333 | priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, | |
334 | MLX4_HCR_SIZE); | |
335 | if (!priv->cmd.hcr) { | |
336 | mlx4_err(dev, "Couldn't map command register."); | |
337 | return -ENOMEM; | |
338 | } | |
339 | ||
340 | priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, | |
341 | MLX4_MAILBOX_SIZE, | |
342 | MLX4_MAILBOX_SIZE, 0); | |
343 | if (!priv->cmd.pool) { | |
344 | iounmap(priv->cmd.hcr); | |
345 | return -ENOMEM; | |
346 | } | |
347 | ||
348 | return 0; | |
349 | } | |
350 | ||
351 | void mlx4_cmd_cleanup(struct mlx4_dev *dev) | |
352 | { | |
353 | struct mlx4_priv *priv = mlx4_priv(dev); | |
354 | ||
355 | pci_pool_destroy(priv->cmd.pool); | |
356 | iounmap(priv->cmd.hcr); | |
357 | } | |
358 | ||
359 | /* | |
360 | * Switch to using events to issue FW commands (can only be called | |
361 | * after event queue for command events has been initialized). | |
362 | */ | |
363 | int mlx4_cmd_use_events(struct mlx4_dev *dev) | |
364 | { | |
365 | struct mlx4_priv *priv = mlx4_priv(dev); | |
366 | int i; | |
367 | ||
368 | priv->cmd.context = kmalloc(priv->cmd.max_cmds * | |
369 | sizeof (struct mlx4_cmd_context), | |
370 | GFP_KERNEL); | |
371 | if (!priv->cmd.context) | |
372 | return -ENOMEM; | |
373 | ||
374 | for (i = 0; i < priv->cmd.max_cmds; ++i) { | |
375 | priv->cmd.context[i].token = i; | |
376 | priv->cmd.context[i].next = i + 1; | |
377 | } | |
378 | ||
379 | priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; | |
380 | priv->cmd.free_head = 0; | |
381 | ||
382 | sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); | |
383 | spin_lock_init(&priv->cmd.context_lock); | |
384 | ||
385 | for (priv->cmd.token_mask = 1; | |
386 | priv->cmd.token_mask < priv->cmd.max_cmds; | |
387 | priv->cmd.token_mask <<= 1) | |
388 | ; /* nothing */ | |
389 | --priv->cmd.token_mask; | |
390 | ||
391 | priv->cmd.use_events = 1; | |
392 | ||
393 | down(&priv->cmd.poll_sem); | |
394 | ||
395 | return 0; | |
396 | } | |
397 | ||
398 | /* | |
399 | * Switch back to polling (used when shutting down the device) | |
400 | */ | |
401 | void mlx4_cmd_use_polling(struct mlx4_dev *dev) | |
402 | { | |
403 | struct mlx4_priv *priv = mlx4_priv(dev); | |
404 | int i; | |
405 | ||
406 | priv->cmd.use_events = 0; | |
407 | ||
408 | for (i = 0; i < priv->cmd.max_cmds; ++i) | |
409 | down(&priv->cmd.event_sem); | |
410 | ||
411 | kfree(priv->cmd.context); | |
412 | ||
413 | up(&priv->cmd.poll_sem); | |
414 | } | |
415 | ||
416 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) | |
417 | { | |
418 | struct mlx4_cmd_mailbox *mailbox; | |
419 | ||
420 | mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); | |
421 | if (!mailbox) | |
422 | return ERR_PTR(-ENOMEM); | |
423 | ||
424 | mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, | |
425 | &mailbox->dma); | |
426 | if (!mailbox->buf) { | |
427 | kfree(mailbox); | |
428 | return ERR_PTR(-ENOMEM); | |
429 | } | |
430 | ||
431 | return mailbox; | |
432 | } | |
433 | EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); | |
434 | ||
435 | void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox) | |
436 | { | |
437 | if (!mailbox) | |
438 | return; | |
439 | ||
440 | pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); | |
441 | kfree(mailbox); | |
442 | } | |
443 | EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); |