2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include "mlx5_core.h"
41 MLX5_PAGES_CANT_GIVE = 0,
49 MLX5_POST_INIT_PAGES = 3
52 struct mlx5_pages_req {
53 struct mlx5_core_dev *dev;
56 struct work_struct work;
60 struct rb_node rb_node;
64 unsigned long bitmask;
65 struct list_head list;
69 struct mlx5_query_pages_inbox {
70 struct mlx5_inbox_hdr hdr;
74 struct mlx5_query_pages_outbox {
75 struct mlx5_outbox_hdr hdr;
81 struct mlx5_manage_pages_inbox {
82 struct mlx5_inbox_hdr hdr;
89 struct mlx5_manage_pages_outbox {
90 struct mlx5_outbox_hdr hdr;
97 MAX_RECLAIM_TIME_MSECS = 5000,
101 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
102 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
105 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
107 struct rb_root *root = &dev->priv.page_root;
108 struct rb_node **new = &root->rb_node;
109 struct rb_node *parent = NULL;
116 tfp = rb_entry(parent, struct fw_page, rb_node);
117 if (tfp->addr < addr)
118 new = &parent->rb_left;
119 else if (tfp->addr > addr)
120 new = &parent->rb_right;
125 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
131 nfp->func_id = func_id;
132 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
133 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
134 set_bit(i, &nfp->bitmask);
136 rb_link_node(&nfp->rb_node, parent, new);
137 rb_insert_color(&nfp->rb_node, root);
138 list_add(&nfp->list, &dev->priv.free_list);
143 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
145 struct rb_root *root = &dev->priv.page_root;
146 struct rb_node *tmp = root->rb_node;
147 struct fw_page *result = NULL;
151 tfp = rb_entry(tmp, struct fw_page, rb_node);
152 if (tfp->addr < addr) {
154 } else if (tfp->addr > addr) {
165 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
166 s32 *npages, int boot)
168 struct mlx5_query_pages_inbox in;
169 struct mlx5_query_pages_outbox out;
172 memset(&in, 0, sizeof(in));
173 memset(&out, 0, sizeof(out));
174 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
175 in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
177 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
182 return mlx5_cmd_status_to_err(&out.hdr);
184 *npages = be32_to_cpu(out.num_pages);
185 *func_id = be16_to_cpu(out.func_id);
190 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
195 if (list_empty(&dev->priv.free_list))
198 fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
199 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
200 if (n >= MLX5_NUM_4K_IN_PAGE) {
201 mlx5_core_warn(dev, "alloc 4k bug\n");
204 clear_bit(n, &fp->bitmask);
209 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
214 static void free_4k(struct mlx5_core_dev *dev, u64 addr)
219 fwp = find_fw_page(dev, addr & PAGE_MASK);
221 mlx5_core_warn(dev, "page not found\n");
225 n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
227 set_bit(n, &fwp->bitmask);
228 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
229 rb_erase(&fwp->rb_node, &dev->priv.page_root);
230 if (fwp->free_count != 1)
231 list_del(&fwp->list);
232 dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
234 __free_page(fwp->page);
236 } else if (fwp->free_count == 1) {
237 list_add(&fwp->list, &dev->priv.free_list);
241 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
247 page = alloc_page(GFP_HIGHUSER);
249 mlx5_core_warn(dev, "failed to allocate page\n");
252 addr = dma_map_page(&dev->pdev->dev, page, 0,
253 PAGE_SIZE, DMA_BIDIRECTIONAL);
254 if (dma_mapping_error(&dev->pdev->dev, addr)) {
255 mlx5_core_warn(dev, "failed dma mapping page\n");
259 err = insert_page(dev, addr, page, func_id);
261 mlx5_core_err(dev, "failed to track allocated page\n");
268 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
275 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
278 struct mlx5_manage_pages_inbox *in;
279 struct mlx5_manage_pages_outbox out;
280 struct mlx5_manage_pages_inbox *nin;
286 inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
287 in = mlx5_vzalloc(inlen);
289 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
292 memset(&out, 0, sizeof(out));
294 for (i = 0; i < npages; i++) {
296 err = alloc_4k(dev, &addr);
299 err = alloc_system_page(dev, func_id);
305 in->pas[i] = cpu_to_be64(addr);
308 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
309 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
310 in->func_id = cpu_to_be16(func_id);
311 in->num_entries = cpu_to_be32(npages);
312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
315 func_id, npages, err);
318 dev->priv.fw_pages += npages;
320 if (out.hdr.status) {
321 err = mlx5_cmd_status_to_err(&out.hdr);
323 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
324 func_id, npages, out.hdr.status);
329 mlx5_core_dbg(dev, "err %d\n", err);
335 nin = kzalloc(sizeof(*nin), GFP_KERNEL);
337 mlx5_core_warn(dev, "allocation failed\n");
340 memset(&out, 0, sizeof(out));
341 nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
342 nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
343 if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
344 mlx5_core_warn(dev, "page notify failed\n");
349 for (i--; i >= 0; i--)
350 free_4k(dev, be64_to_cpu(in->pas[i]));
356 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
359 struct mlx5_manage_pages_inbox in;
360 struct mlx5_manage_pages_outbox *out;
370 memset(&in, 0, sizeof(in));
371 outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
372 out = mlx5_vzalloc(outlen);
376 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
377 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
378 in.func_id = cpu_to_be16(func_id);
379 in.num_entries = cpu_to_be32(npages);
380 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
381 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
383 mlx5_core_err(dev, "failed reclaiming pages\n");
386 dev->priv.fw_pages -= npages;
388 if (out->hdr.status) {
389 err = mlx5_cmd_status_to_err(&out->hdr);
393 num_claimed = be32_to_cpu(out->num_entries);
395 *nclaimed = num_claimed;
397 for (i = 0; i < num_claimed; i++) {
398 addr = be64_to_cpu(out->pas[i]);
407 static void pages_work_handler(struct work_struct *work)
409 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
410 struct mlx5_core_dev *dev = req->dev;
414 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
415 else if (req->npages > 0)
416 err = give_pages(dev, req->func_id, req->npages, 1);
419 mlx5_core_warn(dev, "%s fail %d\n",
420 req->npages < 0 ? "reclaim" : "give", err);
425 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
428 struct mlx5_pages_req *req;
430 req = kzalloc(sizeof(*req), GFP_ATOMIC);
432 mlx5_core_warn(dev, "failed to allocate pages request\n");
437 req->func_id = func_id;
438 req->npages = npages;
439 INIT_WORK(&req->work, pages_work_handler);
440 queue_work(dev->priv.pg_wq, &req->work);
443 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
445 u16 uninitialized_var(func_id);
446 s32 uninitialized_var(npages);
449 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
453 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
454 npages, boot ? "boot" : "init", func_id);
456 return give_pages(dev, func_id, npages, 0);
460 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
463 static int optimal_reclaimed_pages(void)
465 struct mlx5_cmd_prot_block *block;
466 struct mlx5_cmd_layout *lay;
469 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
470 sizeof(struct mlx5_manage_pages_outbox)) /
471 FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
476 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
478 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
485 p = rb_first(&dev->priv.page_root);
487 fwp = rb_entry(p, struct fw_page, rb_node);
488 err = reclaim_pages(dev, fwp->func_id,
489 optimal_reclaimed_pages(),
492 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
497 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
499 if (time_after(jiffies, end)) {
500 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
508 void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
510 dev->priv.page_root = RB_ROOT;
511 INIT_LIST_HEAD(&dev->priv.free_list);
514 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
519 int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
521 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
522 if (!dev->priv.pg_wq)
528 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
530 destroy_workqueue(dev->priv.pg_wq);