net/mlx5: Enhance debug print in page allocation failure
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / pagealloc.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/delay.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/xarray.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 #include "lib/tout.h"
41
42 enum {
43         MLX5_PAGES_CANT_GIVE    = 0,
44         MLX5_PAGES_GIVE         = 1,
45         MLX5_PAGES_TAKE         = 2
46 };
47
48 struct mlx5_pages_req {
49         struct mlx5_core_dev *dev;
50         u16     func_id;
51         u8      ec_function;
52         s32     npages;
53         struct work_struct work;
54         u8      release_all;
55 };
56
57 struct fw_page {
58         struct rb_node          rb_node;
59         u64                     addr;
60         struct page            *page;
61         u32                     function;
62         unsigned long           bitmask;
63         struct list_head        list;
64         unsigned int free_count;
65 };
66
67 enum {
68         MLX5_MAX_RECLAIM_TIME_MILI      = 5000,
69         MLX5_NUM_4K_IN_PAGE             = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
70 };
71
72 static u32 get_function(u16 func_id, bool ec_function)
73 {
74         return (u32)func_id | (ec_function << 16);
75 }
76
77 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
78 {
79         struct rb_root *root;
80         int err;
81
82         root = xa_load(&dev->priv.page_root_xa, function);
83         if (root)
84                 return root;
85
86         root = kzalloc(sizeof(*root), GFP_KERNEL);
87         if (!root)
88                 return ERR_PTR(-ENOMEM);
89
90         err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
91         if (err) {
92                 kfree(root);
93                 return ERR_PTR(err);
94         }
95
96         *root = RB_ROOT;
97
98         return root;
99 }
100
101 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
102 {
103         struct rb_node *parent = NULL;
104         struct rb_root *root;
105         struct rb_node **new;
106         struct fw_page *nfp;
107         struct fw_page *tfp;
108         int i;
109
110         root = page_root_per_function(dev, function);
111         if (IS_ERR(root))
112                 return PTR_ERR(root);
113
114         new = &root->rb_node;
115
116         while (*new) {
117                 parent = *new;
118                 tfp = rb_entry(parent, struct fw_page, rb_node);
119                 if (tfp->addr < addr)
120                         new = &parent->rb_left;
121                 else if (tfp->addr > addr)
122                         new = &parent->rb_right;
123                 else
124                         return -EEXIST;
125         }
126
127         nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
128         if (!nfp)
129                 return -ENOMEM;
130
131         nfp->addr = addr;
132         nfp->page = page;
133         nfp->function = function;
134         nfp->free_count = MLX5_NUM_4K_IN_PAGE;
135         for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
136                 set_bit(i, &nfp->bitmask);
137
138         rb_link_node(&nfp->rb_node, parent, new);
139         rb_insert_color(&nfp->rb_node, root);
140         list_add(&nfp->list, &dev->priv.free_list);
141
142         return 0;
143 }
144
145 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
146                                     u32 function)
147 {
148         struct fw_page *result = NULL;
149         struct rb_root *root;
150         struct rb_node *tmp;
151         struct fw_page *tfp;
152
153         root = xa_load(&dev->priv.page_root_xa, function);
154         if (WARN_ON_ONCE(!root))
155                 return NULL;
156
157         tmp = root->rb_node;
158
159         while (tmp) {
160                 tfp = rb_entry(tmp, struct fw_page, rb_node);
161                 if (tfp->addr < addr) {
162                         tmp = tmp->rb_left;
163                 } else if (tfp->addr > addr) {
164                         tmp = tmp->rb_right;
165                 } else {
166                         result = tfp;
167                         break;
168                 }
169         }
170
171         return result;
172 }
173
174 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
175                                 s32 *npages, int boot)
176 {
177         u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
178         u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
179         int err;
180
181         MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
182         MLX5_SET(query_pages_in, in, op_mod, boot ?
183                  MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
184                  MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
185         MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
186
187         err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
188         if (err)
189                 return err;
190
191         *npages = MLX5_GET(query_pages_out, out, num_pages);
192         *func_id = MLX5_GET(query_pages_out, out, function_id);
193
194         return err;
195 }
196
197 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
198 {
199         struct fw_page *fp = NULL;
200         struct fw_page *iter;
201         unsigned n;
202
203         list_for_each_entry(iter, &dev->priv.free_list, list) {
204                 if (iter->function != function)
205                         continue;
206                 fp = iter;
207         }
208
209         if (list_empty(&dev->priv.free_list) || !fp)
210                 return -ENOMEM;
211
212         n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
213         if (n >= MLX5_NUM_4K_IN_PAGE) {
214                 mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
215                                fp->addr, n, fp->bitmask,  MLX5_NUM_4K_IN_PAGE);
216                 return -ENOENT;
217         }
218         clear_bit(n, &fp->bitmask);
219         fp->free_count--;
220         if (!fp->free_count)
221                 list_del(&fp->list);
222
223         *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
224
225         return 0;
226 }
227
228 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
229
230 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
231                      bool in_free_list)
232 {
233         struct rb_root *root;
234
235         root = xa_load(&dev->priv.page_root_xa, fwp->function);
236         if (WARN_ON_ONCE(!root))
237                 return;
238
239         rb_erase(&fwp->rb_node, root);
240         if (in_free_list)
241                 list_del(&fwp->list);
242         dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
243                        PAGE_SIZE, DMA_BIDIRECTIONAL);
244         __free_page(fwp->page);
245         kfree(fwp);
246 }
247
248 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
249 {
250         struct fw_page *fwp;
251         int n;
252
253         fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
254         if (!fwp) {
255                 mlx5_core_warn_rl(dev, "page not found\n");
256                 return;
257         }
258         n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
259         fwp->free_count++;
260         set_bit(n, &fwp->bitmask);
261         if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
262                 free_fwp(dev, fwp, fwp->free_count != 1);
263         else if (fwp->free_count == 1)
264                 list_add(&fwp->list, &dev->priv.free_list);
265 }
266
267 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
268 {
269         struct device *device = mlx5_core_dma_dev(dev);
270         int nid = dev_to_node(device);
271         struct page *page;
272         u64 zero_addr = 1;
273         u64 addr;
274         int err;
275
276         page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
277         if (!page) {
278                 mlx5_core_warn(dev, "failed to allocate page\n");
279                 return -ENOMEM;
280         }
281 map:
282         addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
283         if (dma_mapping_error(device, addr)) {
284                 mlx5_core_warn(dev, "failed dma mapping page\n");
285                 err = -ENOMEM;
286                 goto err_mapping;
287         }
288
289         /* Firmware doesn't support page with physical address 0 */
290         if (addr == 0) {
291                 zero_addr = addr;
292                 goto map;
293         }
294
295         err = insert_page(dev, addr, page, function);
296         if (err) {
297                 mlx5_core_err(dev, "failed to track allocated page\n");
298                 dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
299         }
300
301 err_mapping:
302         if (err)
303                 __free_page(page);
304
305         if (zero_addr == 0)
306                 dma_unmap_page(device, zero_addr, PAGE_SIZE,
307                                DMA_BIDIRECTIONAL);
308
309         return err;
310 }
311
312 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
313                              bool ec_function)
314 {
315         u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
316         int err;
317
318         MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
319         MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
320         MLX5_SET(manage_pages_in, in, function_id, func_id);
321         MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
322
323         err = mlx5_cmd_exec_in(dev, manage_pages, in);
324         if (err)
325                 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
326                                func_id, err);
327 }
328
329 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
330                       int event, bool ec_function)
331 {
332         u32 function = get_function(func_id, ec_function);
333         u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
334         int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
335         int notify_fail = event;
336         u64 addr;
337         int err;
338         u32 *in;
339         int i;
340
341         inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
342         in = kvzalloc(inlen, GFP_KERNEL);
343         if (!in) {
344                 err = -ENOMEM;
345                 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
346                 goto out_free;
347         }
348
349         for (i = 0; i < npages; i++) {
350 retry:
351                 err = alloc_4k(dev, &addr, function);
352                 if (err) {
353                         if (err == -ENOMEM)
354                                 err = alloc_system_page(dev, function);
355                         if (err) {
356                                 dev->priv.fw_pages_alloc_failed += (npages - i);
357                                 goto out_4k;
358                         }
359
360                         goto retry;
361                 }
362                 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
363         }
364
365         MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
366         MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
367         MLX5_SET(manage_pages_in, in, function_id, func_id);
368         MLX5_SET(manage_pages_in, in, input_num_entries, npages);
369         MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
370
371         err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
372         if (err == -EREMOTEIO) {
373                 notify_fail = 0;
374                 /* if triggered by FW and failed by FW ignore */
375                 if (event) {
376                         err = 0;
377                         goto out_dropped;
378                 }
379         }
380         err = mlx5_cmd_check(dev, err, in, out);
381         if (err) {
382                 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
383                                func_id, npages, err);
384                 goto out_dropped;
385         }
386
387         dev->priv.fw_pages += npages;
388         if (func_id)
389                 dev->priv.vfs_pages += npages;
390         else if (mlx5_core_is_ecpf(dev) && !ec_function)
391                 dev->priv.host_pf_pages += npages;
392
393         mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
394                       npages, ec_function, func_id, err);
395
396         kvfree(in);
397         return 0;
398
399 out_dropped:
400         dev->priv.give_pages_dropped += npages;
401 out_4k:
402         for (i--; i >= 0; i--)
403                 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
404 out_free:
405         kvfree(in);
406         if (notify_fail)
407                 page_notify_fail(dev, func_id, ec_function);
408         return err;
409 }
410
411 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
412                               bool ec_function)
413 {
414         u32 function = get_function(func_id, ec_function);
415         struct rb_root *root;
416         struct rb_node *p;
417         int npages = 0;
418
419         root = xa_load(&dev->priv.page_root_xa, function);
420         if (WARN_ON_ONCE(!root))
421                 return;
422
423         p = rb_first(root);
424         while (p) {
425                 struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
426
427                 p = rb_next(p);
428                 npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
429                 free_fwp(dev, fwp, fwp->free_count);
430         }
431
432         dev->priv.fw_pages -= npages;
433         if (func_id)
434                 dev->priv.vfs_pages -= npages;
435         else if (mlx5_core_is_ecpf(dev) && !ec_function)
436                 dev->priv.host_pf_pages -= npages;
437
438         mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
439                       npages, ec_function, func_id);
440 }
441
442 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
443                                      u32 npages)
444 {
445         u32 pages_set = 0;
446         unsigned int n;
447
448         for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
449                 MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
450                                  fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
451                 pages_set++;
452
453                 if (!--npages)
454                         break;
455         }
456
457         return pages_set;
458 }
459
460 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
461                              u32 *in, int in_size, u32 *out, int out_size)
462 {
463         struct rb_root *root;
464         struct fw_page *fwp;
465         struct rb_node *p;
466         bool ec_function;
467         u32 func_id;
468         u32 npages;
469         u32 i = 0;
470
471         if (!mlx5_cmd_is_down(dev))
472                 return mlx5_cmd_do(dev, in, in_size, out, out_size);
473
474         /* No hard feelings, we want our pages back! */
475         npages = MLX5_GET(manage_pages_in, in, input_num_entries);
476         func_id = MLX5_GET(manage_pages_in, in, function_id);
477         ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
478
479         root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
480         if (WARN_ON_ONCE(!root))
481                 return -EEXIST;
482
483         p = rb_first(root);
484         while (p && i < npages) {
485                 fwp = rb_entry(p, struct fw_page, rb_node);
486                 p = rb_next(p);
487
488                 i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
489         }
490
491         MLX5_SET(manage_pages_out, out, output_num_entries, i);
492         return 0;
493 }
494
495 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
496                          int *nclaimed, bool event, bool ec_function)
497 {
498         u32 function = get_function(func_id, ec_function);
499         int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
500         u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
501         int num_claimed;
502         u32 *out;
503         int err;
504         int i;
505
506         if (nclaimed)
507                 *nclaimed = 0;
508
509         outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
510         out = kvzalloc(outlen, GFP_KERNEL);
511         if (!out)
512                 return -ENOMEM;
513
514         MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
515         MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
516         MLX5_SET(manage_pages_in, in, function_id, func_id);
517         MLX5_SET(manage_pages_in, in, input_num_entries, npages);
518         MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
519
520         mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
521                       func_id, npages, outlen);
522         err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
523         if (err) {
524                 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
525                 dev->priv.reclaim_pages_discard += npages;
526         }
527         /* if triggered by FW event and failed by FW then ignore */
528         if (event && err == -EREMOTEIO) {
529                 err = 0;
530                 goto out_free;
531         }
532
533         err = mlx5_cmd_check(dev, err, in, out);
534         if (err) {
535                 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
536                 goto out_free;
537         }
538
539         num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
540         if (num_claimed > npages) {
541                 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
542                                num_claimed, npages);
543                 err = -EINVAL;
544                 goto out_free;
545         }
546
547         for (i = 0; i < num_claimed; i++)
548                 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
549
550         if (nclaimed)
551                 *nclaimed = num_claimed;
552
553         dev->priv.fw_pages -= num_claimed;
554         if (func_id)
555                 dev->priv.vfs_pages -= num_claimed;
556         else if (mlx5_core_is_ecpf(dev) && !ec_function)
557                 dev->priv.host_pf_pages -= num_claimed;
558
559 out_free:
560         kvfree(out);
561         return err;
562 }
563
564 static void pages_work_handler(struct work_struct *work)
565 {
566         struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
567         struct mlx5_core_dev *dev = req->dev;
568         int err = 0;
569
570         if (req->release_all)
571                 release_all_pages(dev, req->func_id, req->ec_function);
572         else if (req->npages < 0)
573                 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
574                                     true, req->ec_function);
575         else if (req->npages > 0)
576                 err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
577
578         if (err)
579                 mlx5_core_warn(dev, "%s fail %d\n",
580                                req->npages < 0 ? "reclaim" : "give", err);
581
582         kfree(req);
583 }
584
585 enum {
586         EC_FUNCTION_MASK = 0x8000,
587         RELEASE_ALL_PAGES_MASK = 0x4000,
588 };
589
590 static int req_pages_handler(struct notifier_block *nb,
591                              unsigned long type, void *data)
592 {
593         struct mlx5_pages_req *req;
594         struct mlx5_core_dev *dev;
595         struct mlx5_priv *priv;
596         struct mlx5_eqe *eqe;
597         bool ec_function;
598         bool release_all;
599         u16 func_id;
600         s32 npages;
601
602         priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
603         dev  = container_of(priv, struct mlx5_core_dev, priv);
604         eqe  = data;
605
606         func_id = be16_to_cpu(eqe->data.req_pages.func_id);
607         npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
608         ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
609         release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
610                       RELEASE_ALL_PAGES_MASK;
611         mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
612                       func_id, npages, release_all);
613         req = kzalloc(sizeof(*req), GFP_ATOMIC);
614         if (!req) {
615                 mlx5_core_warn(dev, "failed to allocate pages request\n");
616                 return NOTIFY_DONE;
617         }
618
619         req->dev = dev;
620         req->func_id = func_id;
621         req->npages = npages;
622         req->ec_function = ec_function;
623         req->release_all = release_all;
624         INIT_WORK(&req->work, pages_work_handler);
625         queue_work(dev->priv.pg_wq, &req->work);
626         return NOTIFY_OK;
627 }
628
629 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
630 {
631         u16 func_id;
632         s32 npages;
633         int err;
634
635         err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
636         if (err)
637                 return err;
638
639         mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
640                       npages, boot ? "boot" : "init", func_id);
641
642         return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
643 }
644
645 enum {
646         MLX5_BLKS_FOR_RECLAIM_PAGES = 12
647 };
648
649 static int optimal_reclaimed_pages(void)
650 {
651         struct mlx5_cmd_prot_block *block;
652         struct mlx5_cmd_layout *lay;
653         int ret;
654
655         ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
656                MLX5_ST_SZ_BYTES(manage_pages_out)) /
657                MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
658
659         return ret;
660 }
661
662 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
663                                    struct rb_root *root, u16 func_id)
664 {
665         u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
666         unsigned long end = jiffies + recl_pages_to_jiffies;
667
668         while (!RB_EMPTY_ROOT(root)) {
669                 int nclaimed;
670                 int err;
671
672                 err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
673                                     &nclaimed, false, mlx5_core_is_ecpf(dev));
674                 if (err) {
675                         mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
676                                        err, func_id);
677                         return err;
678                 }
679
680                 if (nclaimed)
681                         end = jiffies + recl_pages_to_jiffies;
682
683                 if (time_after(jiffies, end)) {
684                         mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
685                         break;
686                 }
687         }
688
689         return 0;
690 }
691
692 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
693 {
694         struct rb_root *root;
695         unsigned long id;
696         void *entry;
697
698         xa_for_each(&dev->priv.page_root_xa, id, entry) {
699                 root = entry;
700                 mlx5_reclaim_root_pages(dev, root, id);
701                 xa_erase(&dev->priv.page_root_xa, id);
702                 kfree(root);
703         }
704
705         WARN_ON(!xa_empty(&dev->priv.page_root_xa));
706
707         WARN(dev->priv.fw_pages,
708              "FW pages counter is %d after reclaiming all pages\n",
709              dev->priv.fw_pages);
710         WARN(dev->priv.vfs_pages,
711              "VFs FW pages counter is %d after reclaiming all pages\n",
712              dev->priv.vfs_pages);
713         WARN(dev->priv.host_pf_pages,
714              "External host PF FW pages counter is %d after reclaiming all pages\n",
715              dev->priv.host_pf_pages);
716
717         return 0;
718 }
719
720 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
721 {
722         INIT_LIST_HEAD(&dev->priv.free_list);
723         dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
724         if (!dev->priv.pg_wq)
725                 return -ENOMEM;
726
727         xa_init(&dev->priv.page_root_xa);
728         mlx5_pages_debugfs_init(dev);
729
730         return 0;
731 }
732
733 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
734 {
735         mlx5_pages_debugfs_cleanup(dev);
736         xa_destroy(&dev->priv.page_root_xa);
737         destroy_workqueue(dev->priv.pg_wq);
738 }
739
740 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
741 {
742         MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
743         mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
744 }
745
746 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
747 {
748         mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
749         flush_workqueue(dev->priv.pg_wq);
750 }
751
752 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
753 {
754         u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
755         unsigned long end = jiffies + recl_vf_pages_to_jiffies;
756         int prev_pages = *pages;
757
758         /* In case of internal error we will free the pages manually later */
759         if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
760                 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
761                 return 0;
762         }
763
764         mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
765         while (*pages) {
766                 if (time_after(jiffies, end)) {
767                         mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
768                         return -ETIMEDOUT;
769                 }
770                 if (*pages < prev_pages) {
771                         end = jiffies + recl_vf_pages_to_jiffies;
772                         prev_pages = *pages;
773                 }
774                 msleep(50);
775         }
776
777         mlx5_core_dbg(dev, "All pages received\n");
778         return 0;
779 }