Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
302bdf68 | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <asm-generic/kmap_types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/mlx5/driver.h> | |
37 | #include <linux/mlx5/cmd.h> | |
38 | #include "mlx5_core.h" | |
39 | ||
40 | enum { | |
41 | MLX5_PAGES_CANT_GIVE = 0, | |
42 | MLX5_PAGES_GIVE = 1, | |
43 | MLX5_PAGES_TAKE = 2 | |
44 | }; | |
45 | ||
0a324f31 ML |
46 | enum { |
47 | MLX5_BOOT_PAGES = 1, | |
48 | MLX5_INIT_PAGES = 2, | |
49 | MLX5_POST_INIT_PAGES = 3 | |
50 | }; | |
51 | ||
e126ba97 EC |
52 | struct mlx5_pages_req { |
53 | struct mlx5_core_dev *dev; | |
f241e749 | 54 | u16 func_id; |
0a324f31 | 55 | s32 npages; |
e126ba97 EC |
56 | struct work_struct work; |
57 | }; | |
58 | ||
59 | struct fw_page { | |
bf0bf77f EC |
60 | struct rb_node rb_node; |
61 | u64 addr; | |
62 | struct page *page; | |
63 | u16 func_id; | |
64 | unsigned long bitmask; | |
65 | struct list_head list; | |
66 | unsigned free_count; | |
e126ba97 EC |
67 | }; |
68 | ||
69 | struct mlx5_query_pages_inbox { | |
70 | struct mlx5_inbox_hdr hdr; | |
71 | u8 rsvd[8]; | |
72 | }; | |
73 | ||
74 | struct mlx5_query_pages_outbox { | |
75 | struct mlx5_outbox_hdr hdr; | |
0a324f31 | 76 | __be16 rsvd; |
e126ba97 | 77 | __be16 func_id; |
0a324f31 | 78 | __be32 num_pages; |
e126ba97 EC |
79 | }; |
80 | ||
81 | struct mlx5_manage_pages_inbox { | |
82 | struct mlx5_inbox_hdr hdr; | |
0a324f31 | 83 | __be16 rsvd; |
e126ba97 | 84 | __be16 func_id; |
0a324f31 | 85 | __be32 num_entries; |
e126ba97 EC |
86 | __be64 pas[0]; |
87 | }; | |
88 | ||
89 | struct mlx5_manage_pages_outbox { | |
90 | struct mlx5_outbox_hdr hdr; | |
0a324f31 ML |
91 | __be32 num_entries; |
92 | u8 rsvd[4]; | |
e126ba97 EC |
93 | __be64 pas[0]; |
94 | }; | |
95 | ||
dabed0e6 EC |
96 | enum { |
97 | MAX_RECLAIM_TIME_MSECS = 5000, | |
98 | }; | |
99 | ||
bf0bf77f EC |
100 | enum { |
101 | MLX5_MAX_RECLAIM_TIME_MILI = 5000, | |
05bdb2ab | 102 | MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, |
bf0bf77f EC |
103 | }; |
104 | ||
e126ba97 EC |
105 | static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) |
106 | { | |
107 | struct rb_root *root = &dev->priv.page_root; | |
108 | struct rb_node **new = &root->rb_node; | |
109 | struct rb_node *parent = NULL; | |
110 | struct fw_page *nfp; | |
111 | struct fw_page *tfp; | |
bf0bf77f | 112 | int i; |
e126ba97 EC |
113 | |
114 | while (*new) { | |
115 | parent = *new; | |
116 | tfp = rb_entry(parent, struct fw_page, rb_node); | |
117 | if (tfp->addr < addr) | |
118 | new = &parent->rb_left; | |
119 | else if (tfp->addr > addr) | |
120 | new = &parent->rb_right; | |
121 | else | |
122 | return -EEXIST; | |
123 | } | |
124 | ||
bf0bf77f | 125 | nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); |
e126ba97 EC |
126 | if (!nfp) |
127 | return -ENOMEM; | |
128 | ||
129 | nfp->addr = addr; | |
130 | nfp->page = page; | |
131 | nfp->func_id = func_id; | |
bf0bf77f EC |
132 | nfp->free_count = MLX5_NUM_4K_IN_PAGE; |
133 | for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) | |
134 | set_bit(i, &nfp->bitmask); | |
e126ba97 EC |
135 | |
136 | rb_link_node(&nfp->rb_node, parent, new); | |
137 | rb_insert_color(&nfp->rb_node, root); | |
bf0bf77f | 138 | list_add(&nfp->list, &dev->priv.free_list); |
e126ba97 EC |
139 | |
140 | return 0; | |
141 | } | |
142 | ||
bf0bf77f | 143 | static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) |
e126ba97 EC |
144 | { |
145 | struct rb_root *root = &dev->priv.page_root; | |
146 | struct rb_node *tmp = root->rb_node; | |
bf0bf77f | 147 | struct fw_page *result = NULL; |
e126ba97 EC |
148 | struct fw_page *tfp; |
149 | ||
150 | while (tmp) { | |
151 | tfp = rb_entry(tmp, struct fw_page, rb_node); | |
152 | if (tfp->addr < addr) { | |
153 | tmp = tmp->rb_left; | |
154 | } else if (tfp->addr > addr) { | |
155 | tmp = tmp->rb_right; | |
156 | } else { | |
bf0bf77f | 157 | result = tfp; |
e126ba97 EC |
158 | break; |
159 | } | |
160 | } | |
161 | ||
162 | return result; | |
163 | } | |
164 | ||
165 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |
0a324f31 | 166 | s32 *npages, int boot) |
e126ba97 EC |
167 | { |
168 | struct mlx5_query_pages_inbox in; | |
169 | struct mlx5_query_pages_outbox out; | |
170 | int err; | |
171 | ||
172 | memset(&in, 0, sizeof(in)); | |
173 | memset(&out, 0, sizeof(out)); | |
174 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); | |
0a324f31 ML |
175 | in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); |
176 | ||
e126ba97 EC |
177 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); |
178 | if (err) | |
179 | return err; | |
180 | ||
181 | if (out.hdr.status) | |
182 | return mlx5_cmd_status_to_err(&out.hdr); | |
183 | ||
0a324f31 | 184 | *npages = be32_to_cpu(out.num_pages); |
e126ba97 EC |
185 | *func_id = be16_to_cpu(out.func_id); |
186 | ||
187 | return err; | |
188 | } | |
189 | ||
bf0bf77f EC |
190 | static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) |
191 | { | |
192 | struct fw_page *fp; | |
193 | unsigned n; | |
194 | ||
24e42754 | 195 | if (list_empty(&dev->priv.free_list)) |
bf0bf77f | 196 | return -ENOMEM; |
bf0bf77f EC |
197 | |
198 | fp = list_entry(dev->priv.free_list.next, struct fw_page, list); | |
199 | n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); | |
200 | if (n >= MLX5_NUM_4K_IN_PAGE) { | |
201 | mlx5_core_warn(dev, "alloc 4k bug\n"); | |
202 | return -ENOENT; | |
203 | } | |
204 | clear_bit(n, &fp->bitmask); | |
205 | fp->free_count--; | |
206 | if (!fp->free_count) | |
207 | list_del(&fp->list); | |
208 | ||
05bdb2ab | 209 | *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; |
bf0bf77f EC |
210 | |
211 | return 0; | |
212 | } | |
213 | ||
214 | static void free_4k(struct mlx5_core_dev *dev, u64 addr) | |
215 | { | |
216 | struct fw_page *fwp; | |
217 | int n; | |
218 | ||
219 | fwp = find_fw_page(dev, addr & PAGE_MASK); | |
220 | if (!fwp) { | |
221 | mlx5_core_warn(dev, "page not found\n"); | |
222 | return; | |
223 | } | |
224 | ||
05bdb2ab | 225 | n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; |
bf0bf77f EC |
226 | fwp->free_count++; |
227 | set_bit(n, &fwp->bitmask); | |
228 | if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { | |
229 | rb_erase(&fwp->rb_node, &dev->priv.page_root); | |
2b136d02 EC |
230 | if (fwp->free_count != 1) |
231 | list_del(&fwp->list); | |
05bdb2ab EC |
232 | dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE, |
233 | DMA_BIDIRECTIONAL); | |
bf0bf77f EC |
234 | __free_page(fwp->page); |
235 | kfree(fwp); | |
236 | } else if (fwp->free_count == 1) { | |
237 | list_add(&fwp->list, &dev->priv.free_list); | |
238 | } | |
239 | } | |
240 | ||
241 | static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) | |
242 | { | |
243 | struct page *page; | |
244 | u64 addr; | |
245 | int err; | |
ad189106 | 246 | int nid = dev_to_node(&dev->pdev->dev); |
bf0bf77f | 247 | |
ad189106 | 248 | page = alloc_pages_node(nid, GFP_HIGHUSER, 0); |
bf0bf77f EC |
249 | if (!page) { |
250 | mlx5_core_warn(dev, "failed to allocate page\n"); | |
251 | return -ENOMEM; | |
252 | } | |
253 | addr = dma_map_page(&dev->pdev->dev, page, 0, | |
254 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
255 | if (dma_mapping_error(&dev->pdev->dev, addr)) { | |
256 | mlx5_core_warn(dev, "failed dma mapping page\n"); | |
257 | err = -ENOMEM; | |
258 | goto out_alloc; | |
259 | } | |
260 | err = insert_page(dev, addr, page, func_id); | |
261 | if (err) { | |
262 | mlx5_core_err(dev, "failed to track allocated page\n"); | |
263 | goto out_mapping; | |
264 | } | |
265 | ||
266 | return 0; | |
267 | ||
268 | out_mapping: | |
269 | dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); | |
270 | ||
271 | out_alloc: | |
272 | __free_page(page); | |
273 | ||
274 | return err; | |
275 | } | |
e126ba97 EC |
276 | static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, |
277 | int notify_fail) | |
278 | { | |
279 | struct mlx5_manage_pages_inbox *in; | |
280 | struct mlx5_manage_pages_outbox out; | |
952f5f6e | 281 | struct mlx5_manage_pages_inbox *nin; |
e126ba97 EC |
282 | int inlen; |
283 | u64 addr; | |
284 | int err; | |
285 | int i; | |
286 | ||
287 | inlen = sizeof(*in) + npages * sizeof(in->pas[0]); | |
288 | in = mlx5_vzalloc(inlen); | |
289 | if (!in) { | |
290 | mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); | |
291 | return -ENOMEM; | |
292 | } | |
293 | memset(&out, 0, sizeof(out)); | |
294 | ||
295 | for (i = 0; i < npages; i++) { | |
bf0bf77f EC |
296 | retry: |
297 | err = alloc_4k(dev, &addr); | |
e126ba97 | 298 | if (err) { |
bf0bf77f EC |
299 | if (err == -ENOMEM) |
300 | err = alloc_system_page(dev, func_id); | |
301 | if (err) | |
302 | goto out_4k; | |
303 | ||
304 | goto retry; | |
e126ba97 EC |
305 | } |
306 | in->pas[i] = cpu_to_be64(addr); | |
307 | } | |
308 | ||
309 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | |
310 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); | |
311 | in->func_id = cpu_to_be16(func_id); | |
0a324f31 | 312 | in->num_entries = cpu_to_be32(npages); |
e126ba97 | 313 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); |
e126ba97 | 314 | if (err) { |
1a91de28 JP |
315 | mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", |
316 | func_id, npages, err); | |
e126ba97 EC |
317 | goto out_alloc; |
318 | } | |
319 | dev->priv.fw_pages += npages; | |
320 | ||
321 | if (out.hdr.status) { | |
322 | err = mlx5_cmd_status_to_err(&out.hdr); | |
323 | if (err) { | |
1a91de28 JP |
324 | mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", |
325 | func_id, npages, out.hdr.status); | |
e126ba97 EC |
326 | goto out_alloc; |
327 | } | |
328 | } | |
329 | ||
330 | mlx5_core_dbg(dev, "err %d\n", err); | |
331 | ||
332 | goto out_free; | |
333 | ||
334 | out_alloc: | |
335 | if (notify_fail) { | |
952f5f6e EC |
336 | nin = kzalloc(sizeof(*nin), GFP_KERNEL); |
337 | if (!nin) { | |
338 | mlx5_core_warn(dev, "allocation failed\n"); | |
bf0bf77f | 339 | goto out_4k; |
952f5f6e | 340 | } |
e126ba97 | 341 | memset(&out, 0, sizeof(out)); |
952f5f6e EC |
342 | nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
343 | nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); | |
344 | if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) | |
345 | mlx5_core_warn(dev, "page notify failed\n"); | |
346 | kfree(nin); | |
e126ba97 | 347 | } |
952f5f6e | 348 | |
bf0bf77f EC |
349 | out_4k: |
350 | for (i--; i >= 0; i--) | |
351 | free_4k(dev, be64_to_cpu(in->pas[i])); | |
e126ba97 | 352 | out_free: |
479163f4 | 353 | kvfree(in); |
e126ba97 EC |
354 | return err; |
355 | } | |
356 | ||
357 | static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |
358 | int *nclaimed) | |
359 | { | |
360 | struct mlx5_manage_pages_inbox in; | |
361 | struct mlx5_manage_pages_outbox *out; | |
e126ba97 EC |
362 | int num_claimed; |
363 | int outlen; | |
364 | u64 addr; | |
365 | int err; | |
366 | int i; | |
367 | ||
dabed0e6 EC |
368 | if (nclaimed) |
369 | *nclaimed = 0; | |
370 | ||
e126ba97 EC |
371 | memset(&in, 0, sizeof(in)); |
372 | outlen = sizeof(*out) + npages * sizeof(out->pas[0]); | |
373 | out = mlx5_vzalloc(outlen); | |
374 | if (!out) | |
375 | return -ENOMEM; | |
376 | ||
377 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | |
378 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); | |
379 | in.func_id = cpu_to_be16(func_id); | |
0a324f31 | 380 | in.num_entries = cpu_to_be32(npages); |
e126ba97 EC |
381 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
382 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | |
383 | if (err) { | |
1a91de28 | 384 | mlx5_core_err(dev, "failed reclaiming pages\n"); |
e126ba97 EC |
385 | goto out_free; |
386 | } | |
387 | dev->priv.fw_pages -= npages; | |
388 | ||
389 | if (out->hdr.status) { | |
390 | err = mlx5_cmd_status_to_err(&out->hdr); | |
391 | goto out_free; | |
392 | } | |
393 | ||
0a324f31 | 394 | num_claimed = be32_to_cpu(out->num_entries); |
e126ba97 EC |
395 | if (nclaimed) |
396 | *nclaimed = num_claimed; | |
397 | ||
398 | for (i = 0; i < num_claimed; i++) { | |
399 | addr = be64_to_cpu(out->pas[i]); | |
bf0bf77f | 400 | free_4k(dev, addr); |
e126ba97 EC |
401 | } |
402 | ||
403 | out_free: | |
479163f4 | 404 | kvfree(out); |
e126ba97 EC |
405 | return err; |
406 | } | |
407 | ||
408 | static void pages_work_handler(struct work_struct *work) | |
409 | { | |
410 | struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); | |
411 | struct mlx5_core_dev *dev = req->dev; | |
412 | int err = 0; | |
413 | ||
414 | if (req->npages < 0) | |
415 | err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); | |
416 | else if (req->npages > 0) | |
417 | err = give_pages(dev, req->func_id, req->npages, 1); | |
418 | ||
419 | if (err) | |
1a91de28 JP |
420 | mlx5_core_warn(dev, "%s fail %d\n", |
421 | req->npages < 0 ? "reclaim" : "give", err); | |
e126ba97 EC |
422 | |
423 | kfree(req); | |
424 | } | |
425 | ||
426 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |
0a324f31 | 427 | s32 npages) |
e126ba97 EC |
428 | { |
429 | struct mlx5_pages_req *req; | |
430 | ||
431 | req = kzalloc(sizeof(*req), GFP_ATOMIC); | |
432 | if (!req) { | |
433 | mlx5_core_warn(dev, "failed to allocate pages request\n"); | |
434 | return; | |
435 | } | |
436 | ||
437 | req->dev = dev; | |
438 | req->func_id = func_id; | |
439 | req->npages = npages; | |
440 | INIT_WORK(&req->work, pages_work_handler); | |
441 | queue_work(dev->priv.pg_wq, &req->work); | |
442 | } | |
443 | ||
cd23b14b | 444 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
e126ba97 | 445 | { |
e126ba97 | 446 | u16 uninitialized_var(func_id); |
0a324f31 | 447 | s32 uninitialized_var(npages); |
e126ba97 EC |
448 | int err; |
449 | ||
0a324f31 | 450 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
e126ba97 EC |
451 | if (err) |
452 | return err; | |
453 | ||
0a324f31 ML |
454 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", |
455 | npages, boot ? "boot" : "init", func_id); | |
e126ba97 | 456 | |
0a324f31 | 457 | return give_pages(dev, func_id, npages, 0); |
e126ba97 EC |
458 | } |
459 | ||
4e3d677b ML |
460 | enum { |
461 | MLX5_BLKS_FOR_RECLAIM_PAGES = 12 | |
462 | }; | |
463 | ||
e126ba97 EC |
464 | static int optimal_reclaimed_pages(void) |
465 | { | |
466 | struct mlx5_cmd_prot_block *block; | |
467 | struct mlx5_cmd_layout *lay; | |
468 | int ret; | |
469 | ||
4e3d677b ML |
470 | ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - |
471 | sizeof(struct mlx5_manage_pages_outbox)) / | |
472 | FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); | |
e126ba97 EC |
473 | |
474 | return ret; | |
475 | } | |
476 | ||
477 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) | |
478 | { | |
dabed0e6 | 479 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
e126ba97 EC |
480 | struct fw_page *fwp; |
481 | struct rb_node *p; | |
dabed0e6 | 482 | int nclaimed = 0; |
e126ba97 EC |
483 | int err; |
484 | ||
485 | do { | |
486 | p = rb_first(&dev->priv.page_root); | |
487 | if (p) { | |
488 | fwp = rb_entry(p, struct fw_page, rb_node); | |
dabed0e6 EC |
489 | err = reclaim_pages(dev, fwp->func_id, |
490 | optimal_reclaimed_pages(), | |
491 | &nclaimed); | |
e126ba97 | 492 | if (err) { |
1a91de28 JP |
493 | mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", |
494 | err); | |
e126ba97 EC |
495 | return err; |
496 | } | |
dabed0e6 EC |
497 | if (nclaimed) |
498 | end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); | |
e126ba97 EC |
499 | } |
500 | if (time_after(jiffies, end)) { | |
501 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); | |
502 | break; | |
503 | } | |
504 | } while (p); | |
505 | ||
506 | return 0; | |
507 | } | |
508 | ||
509 | void mlx5_pagealloc_init(struct mlx5_core_dev *dev) | |
510 | { | |
511 | dev->priv.page_root = RB_ROOT; | |
bf0bf77f | 512 | INIT_LIST_HEAD(&dev->priv.free_list); |
e126ba97 EC |
513 | } |
514 | ||
515 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) | |
516 | { | |
517 | /* nothing */ | |
518 | } | |
519 | ||
520 | int mlx5_pagealloc_start(struct mlx5_core_dev *dev) | |
521 | { | |
522 | dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); | |
523 | if (!dev->priv.pg_wq) | |
524 | return -ENOMEM; | |
525 | ||
526 | return 0; | |
527 | } | |
528 | ||
529 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) | |
530 | { | |
531 | destroy_workqueue(dev->priv.pg_wq); | |
532 | } |