net/mlx5e: Refactor IPsec RX tables creation and destruction
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / pagealloc.c
CommitLineData
e126ba97 1/*
302bdf68 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
adec640e 33#include <linux/highmem.h>
e126ba97 34#include <linux/kernel.h>
fc50db98 35#include <linux/delay.h>
e126ba97 36#include <linux/mlx5/driver.h>
d6945242 37#include <linux/xarray.h>
e126ba97 38#include "mlx5_core.h"
0cf53c12 39#include "lib/eq.h"
32def412 40#include "lib/tout.h"
e126ba97
EC
41
42enum {
43 MLX5_PAGES_CANT_GIVE = 0,
44 MLX5_PAGES_GIVE = 1,
45 MLX5_PAGES_TAKE = 2
46};
47
48struct mlx5_pages_req {
49 struct mlx5_core_dev *dev;
f241e749 50 u16 func_id;
591905ba 51 u8 ec_function;
0a324f31 52 s32 npages;
e126ba97 53 struct work_struct work;
c6168161 54 u8 release_all;
e126ba97
EC
55};
56
57struct fw_page {
bf0bf77f
EC
58 struct rb_node rb_node;
59 u64 addr;
60 struct page *page;
0aa12847 61 u32 function;
bf0bf77f
EC
62 unsigned long bitmask;
63 struct list_head list;
c6baac47 64 unsigned int free_count;
e126ba97
EC
65};
66
bf0bf77f
EC
67enum {
68 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
05bdb2ab 69 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
bf0bf77f
EC
70};
71
0aa12847
DJ
72static u32 get_function(u16 func_id, bool ec_function)
73{
ed5e83a3 74 return (u32)func_id | (ec_function << 16);
0aa12847
DJ
75}
76
c3bdbaea
MS
77static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
78{
79 if (!func_id)
80 return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
81
395ccd6e
DJ
82 if (func_id <= max(mlx5_core_max_vfs(dev), mlx5_core_max_ec_vfs(dev))) {
83 if (ec_function)
84 return MLX5_EC_VF;
85 else
86 return MLX5_VF;
87 }
88 return MLX5_SF;
c3bdbaea
MS
89}
90
ba5d8f72
PP
91static u32 mlx5_get_ec_function(u32 function)
92{
93 return function >> 16;
94}
95
96static u32 mlx5_get_func_id(u32 function)
97{
98 return function & 0xffff;
99}
100
0aa12847 101static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
d6945242
EBE
102{
103 struct rb_root *root;
104 int err;
105
0aa12847 106 root = xa_load(&dev->priv.page_root_xa, function);
d6945242
EBE
107 if (root)
108 return root;
109
110 root = kzalloc(sizeof(*root), GFP_KERNEL);
111 if (!root)
112 return ERR_PTR(-ENOMEM);
113
0aa12847 114 err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
d6945242
EBE
115 if (err) {
116 kfree(root);
117 return ERR_PTR(err);
118 }
119
120 *root = RB_ROOT;
121
122 return root;
123}
124
0aa12847 125static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
e126ba97 126{
e126ba97 127 struct rb_node *parent = NULL;
d6945242
EBE
128 struct rb_root *root;
129 struct rb_node **new;
e126ba97
EC
130 struct fw_page *nfp;
131 struct fw_page *tfp;
bf0bf77f 132 int i;
e126ba97 133
0aa12847 134 root = page_root_per_function(dev, function);
d6945242
EBE
135 if (IS_ERR(root))
136 return PTR_ERR(root);
137
138 new = &root->rb_node;
139
e126ba97
EC
140 while (*new) {
141 parent = *new;
142 tfp = rb_entry(parent, struct fw_page, rb_node);
143 if (tfp->addr < addr)
144 new = &parent->rb_left;
145 else if (tfp->addr > addr)
146 new = &parent->rb_right;
147 else
148 return -EEXIST;
149 }
150
bf0bf77f 151 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
e126ba97
EC
152 if (!nfp)
153 return -ENOMEM;
154
155 nfp->addr = addr;
156 nfp->page = page;
0aa12847 157 nfp->function = function;
bf0bf77f
EC
158 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
159 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
160 set_bit(i, &nfp->bitmask);
e126ba97
EC
161
162 rb_link_node(&nfp->rb_node, parent, new);
163 rb_insert_color(&nfp->rb_node, root);
bf0bf77f 164 list_add(&nfp->list, &dev->priv.free_list);
e126ba97
EC
165
166 return 0;
167}
168
d6945242 169static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
0aa12847 170 u32 function)
e126ba97 171{
bf0bf77f 172 struct fw_page *result = NULL;
d6945242
EBE
173 struct rb_root *root;
174 struct rb_node *tmp;
e126ba97
EC
175 struct fw_page *tfp;
176
0aa12847 177 root = xa_load(&dev->priv.page_root_xa, function);
d6945242
EBE
178 if (WARN_ON_ONCE(!root))
179 return NULL;
180
181 tmp = root->rb_node;
182
e126ba97
EC
183 while (tmp) {
184 tfp = rb_entry(tmp, struct fw_page, rb_node);
185 if (tfp->addr < addr) {
186 tmp = tmp->rb_left;
187 } else if (tfp->addr > addr) {
188 tmp = tmp->rb_right;
189 } else {
bf0bf77f 190 result = tfp;
e126ba97
EC
191 break;
192 }
193 }
194
195 return result;
196}
197
198static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
0a324f31 199 s32 *npages, int boot)
e126ba97 200{
86d41641
LR
201 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
202 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
e126ba97
EC
203 int err;
204
a533ed5e
SM
205 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
206 MLX5_SET(query_pages_in, in, op_mod, boot ?
207 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
208 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
591905ba 209 MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
0a324f31 210
86d41641 211 err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
e126ba97
EC
212 if (err)
213 return err;
214
a533ed5e
SM
215 *npages = MLX5_GET(query_pages_out, out, num_pages);
216 *func_id = MLX5_GET(query_pages_out, out, function_id);
e126ba97
EC
217
218 return err;
219}
220
0aa12847 221static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
bf0bf77f 222{
2726cd4a
EBE
223 struct fw_page *fp = NULL;
224 struct fw_page *iter;
bf0bf77f
EC
225 unsigned n;
226
2726cd4a 227 list_for_each_entry(iter, &dev->priv.free_list, list) {
0aa12847 228 if (iter->function != function)
2726cd4a
EBE
229 continue;
230 fp = iter;
231 }
232
233 if (list_empty(&dev->priv.free_list) || !fp)
bf0bf77f 234 return -ENOMEM;
bf0bf77f 235
bf0bf77f
EC
236 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
237 if (n >= MLX5_NUM_4K_IN_PAGE) {
7eef9300
JM
238 mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
239 fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
bf0bf77f
EC
240 return -ENOENT;
241 }
242 clear_bit(n, &fp->bitmask);
243 fp->free_count--;
244 if (!fp->free_count)
245 list_del(&fp->list);
246
05bdb2ab 247 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
bf0bf77f
EC
248
249 return 0;
250}
251
59d2d18c
HL
252#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
253
e7f860e2
EBE
254static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
255 bool in_free_list)
bf0bf77f 256{
d6945242
EBE
257 struct rb_root *root;
258
0aa12847 259 root = xa_load(&dev->priv.page_root_xa, fwp->function);
d6945242
EBE
260 if (WARN_ON_ONCE(!root))
261 return;
262
263 rb_erase(&fwp->rb_node, root);
e7f860e2
EBE
264 if (in_free_list)
265 list_del(&fwp->list);
7be3412a 266 dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
e7f860e2
EBE
267 PAGE_SIZE, DMA_BIDIRECTIONAL);
268 __free_page(fwp->page);
269 kfree(fwp);
bf0bf77f
EC
270}
271
0aa12847 272static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
c655c1f4
EBE
273{
274 struct fw_page *fwp;
e7f860e2 275 int n;
c655c1f4 276
0aa12847 277 fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
c655c1f4 278 if (!fwp) {
c7636942 279 mlx5_core_warn_rl(dev, "page not found\n");
c655c1f4
EBE
280 return;
281 }
e7f860e2
EBE
282 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
283 fwp->free_count++;
284 set_bit(n, &fwp->bitmask);
285 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
286 free_fwp(dev, fwp, fwp->free_count != 1);
287 else if (fwp->free_count == 1)
288 list_add(&fwp->list, &dev->priv.free_list);
c655c1f4
EBE
289}
290
0aa12847 291static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
bf0bf77f 292{
7be3412a 293 struct device *device = mlx5_core_dma_dev(dev);
c42260f1 294 int nid = dev_to_node(device);
bf0bf77f 295 struct page *page;
6b276190 296 u64 zero_addr = 1;
bf0bf77f
EC
297 u64 addr;
298 int err;
299
ad189106 300 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
bf0bf77f
EC
301 if (!page) {
302 mlx5_core_warn(dev, "failed to allocate page\n");
303 return -ENOMEM;
304 }
6b276190 305map:
c42260f1
VP
306 addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
307 if (dma_mapping_error(device, addr)) {
bf0bf77f
EC
308 mlx5_core_warn(dev, "failed dma mapping page\n");
309 err = -ENOMEM;
6b276190 310 goto err_mapping;
bf0bf77f 311 }
6b276190
NO
312
313 /* Firmware doesn't support page with physical address 0 */
314 if (addr == 0) {
315 zero_addr = addr;
316 goto map;
317 }
318
0aa12847 319 err = insert_page(dev, addr, page, function);
bf0bf77f
EC
320 if (err) {
321 mlx5_core_err(dev, "failed to track allocated page\n");
c42260f1 322 dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
bf0bf77f
EC
323 }
324
6b276190
NO
325err_mapping:
326 if (err)
327 __free_page(page);
bf0bf77f 328
6b276190 329 if (zero_addr == 0)
c42260f1 330 dma_unmap_page(device, zero_addr, PAGE_SIZE,
6b276190 331 DMA_BIDIRECTIONAL);
bf0bf77f
EC
332
333 return err;
334}
a8ffe63e 335
591905ba
BW
336static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
337 bool ec_function)
a8ffe63e 338{
86d41641 339 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
a8ffe63e
EC
340 int err;
341
a533ed5e
SM
342 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
343 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
344 MLX5_SET(manage_pages_in, in, function_id, func_id);
591905ba 345 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
c4f287c4 346
86d41641 347 err = mlx5_cmd_exec_in(dev, manage_pages, in);
a8ffe63e 348 if (err)
a533ed5e
SM
349 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
350 func_id, err);
a8ffe63e
EC
351}
352
e126ba97 353static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
113fdaaa 354 int event, bool ec_function)
e126ba97 355{
0aa12847 356 u32 function = get_function(func_id, ec_function);
a533ed5e
SM
357 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
358 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
113fdaaa 359 int notify_fail = event;
c3bdbaea 360 u16 func_type;
e126ba97
EC
361 u64 addr;
362 int err;
a533ed5e 363 u32 *in;
e126ba97
EC
364 int i;
365
a533ed5e 366 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
1b9a07ee 367 in = kvzalloc(inlen, GFP_KERNEL);
e126ba97 368 if (!in) {
a8ffe63e 369 err = -ENOMEM;
e126ba97 370 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
a8ffe63e 371 goto out_free;
e126ba97 372 }
e126ba97
EC
373
374 for (i = 0; i < npages; i++) {
bf0bf77f 375retry:
0aa12847 376 err = alloc_4k(dev, &addr, function);
e126ba97 377 if (err) {
bf0bf77f 378 if (err == -ENOMEM)
0aa12847 379 err = alloc_system_page(dev, function);
32071187
MS
380 if (err) {
381 dev->priv.fw_pages_alloc_failed += (npages - i);
bf0bf77f 382 goto out_4k;
32071187 383 }
bf0bf77f
EC
384
385 goto retry;
e126ba97 386 }
b8a4ddb2 387 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
e126ba97
EC
388 }
389
a533ed5e
SM
390 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
391 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
392 MLX5_SET(manage_pages_in, in, function_id, func_id);
393 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
591905ba 394 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
a533ed5e 395
4dac2f10 396 err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
113fdaaa
MS
397 if (err == -EREMOTEIO) {
398 notify_fail = 0;
399 /* if triggered by FW and failed by FW ignore */
400 if (event) {
401 err = 0;
32071187 402 goto out_dropped;
113fdaaa
MS
403 }
404 }
090f3e4f 405 err = mlx5_cmd_check(dev, err, in, out);
e126ba97 406 if (err) {
1a91de28
JP
407 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
408 func_id, npages, err);
32071187 409 goto out_dropped;
e126ba97 410 }
e126ba97 411
c3bdbaea
MS
412 func_type = func_id_to_type(dev, func_id, ec_function);
413 dev->priv.page_counters[func_type] += npages;
fc50db98 414 dev->priv.fw_pages += npages;
fc50db98 415
591905ba
BW
416 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
417 npages, ec_function, func_id, err);
e126ba97 418
a8ffe63e
EC
419 kvfree(in);
420 return 0;
952f5f6e 421
32071187
MS
422out_dropped:
423 dev->priv.give_pages_dropped += npages;
bf0bf77f
EC
424out_4k:
425 for (i--; i >= 0; i--)
0aa12847 426 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
e126ba97 427out_free:
479163f4 428 kvfree(in);
a8ffe63e 429 if (notify_fail)
591905ba 430 page_notify_fail(dev, func_id, ec_function);
e126ba97
EC
431 return err;
432}
433
0aa12847 434static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
c6168161
EBE
435 bool ec_function)
436{
0aa12847 437 u32 function = get_function(func_id, ec_function);
d6945242 438 struct rb_root *root;
c6168161
EBE
439 struct rb_node *p;
440 int npages = 0;
c3bdbaea 441 u16 func_type;
c6168161 442
0aa12847 443 root = xa_load(&dev->priv.page_root_xa, function);
d6945242
EBE
444 if (WARN_ON_ONCE(!root))
445 return;
446
447 p = rb_first(root);
c6168161
EBE
448 while (p) {
449 struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
450
451 p = rb_next(p);
e7f860e2
EBE
452 npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
453 free_fwp(dev, fwp, fwp->free_count);
c6168161
EBE
454 }
455
c3bdbaea
MS
456 func_type = func_id_to_type(dev, func_id, ec_function);
457 dev->priv.page_counters[func_type] -= npages;
c6168161 458 dev->priv.fw_pages -= npages;
c6168161
EBE
459
460 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
461 npages, ec_function, func_id);
462}
463
1d2bb5ad
EBE
464static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
465 u32 npages)
466{
467 u32 pages_set = 0;
468 unsigned int n;
469
470 for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
471 MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
472 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
473 pages_set++;
474
475 if (!--npages)
476 break;
477 }
478
479 return pages_set;
480}
481
5adff6a0 482static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
a533ed5e 483 u32 *in, int in_size, u32 *out, int out_size)
5adff6a0 484{
d6945242 485 struct rb_root *root;
5adff6a0
DJ
486 struct fw_page *fwp;
487 struct rb_node *p;
0aa12847 488 bool ec_function;
d62292e8 489 u32 func_id;
5adff6a0
DJ
490 u32 npages;
491 u32 i = 0;
492
b898ce7b 493 if (!mlx5_cmd_is_down(dev))
8d564292 494 return mlx5_cmd_do(dev, in, in_size, out, out_size);
a533ed5e
SM
495
496 /* No hard feelings, we want our pages back! */
497 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
d62292e8 498 func_id = MLX5_GET(manage_pages_in, in, function_id);
0aa12847 499 ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
5adff6a0 500
0aa12847 501 root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
d6945242
EBE
502 if (WARN_ON_ONCE(!root))
503 return -EEXIST;
504
505 p = rb_first(root);
5adff6a0
DJ
506 while (p && i < npages) {
507 fwp = rb_entry(p, struct fw_page, rb_node);
5adff6a0 508 p = rb_next(p);
d62292e8 509
1d2bb5ad 510 i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
5adff6a0
DJ
511 }
512
a533ed5e 513 MLX5_SET(manage_pages_out, out, output_num_entries, i);
5adff6a0
DJ
514 return 0;
515}
516
0aa12847 517static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
8d564292 518 int *nclaimed, bool event, bool ec_function)
e126ba97 519{
0aa12847 520 u32 function = get_function(func_id, ec_function);
a533ed5e 521 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
86d41641 522 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
e126ba97 523 int num_claimed;
c3bdbaea 524 u16 func_type;
a533ed5e 525 u32 *out;
e126ba97
EC
526 int err;
527 int i;
528
dabed0e6
EC
529 if (nclaimed)
530 *nclaimed = 0;
531
a533ed5e 532 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
1b9a07ee 533 out = kvzalloc(outlen, GFP_KERNEL);
e126ba97
EC
534 if (!out)
535 return -ENOMEM;
536
a533ed5e
SM
537 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
538 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
539 MLX5_SET(manage_pages_in, in, function_id, func_id);
540 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
591905ba 541 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
a533ed5e 542
c1a0969e
AH
543 mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
544 func_id, npages, outlen);
a533ed5e 545 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
32071187
MS
546 if (err) {
547 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
548 dev->priv.reclaim_pages_discard += npages;
549 }
8d564292 550 /* if triggered by FW event and failed by FW then ignore */
090f3e4f 551 if (event && err == -EREMOTEIO) {
8d564292 552 err = 0;
090f3e4f
RN
553 goto out_free;
554 }
555
556 err = mlx5_cmd_check(dev, err, in, out);
e126ba97 557 if (err) {
5adff6a0 558 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
e126ba97
EC
559 goto out_free;
560 }
561
a533ed5e 562 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
fc50db98
EC
563 if (num_claimed > npages) {
564 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
565 num_claimed, npages);
566 err = -EINVAL;
567 goto out_free;
568 }
e126ba97 569
a533ed5e 570 for (i = 0; i < num_claimed; i++)
0aa12847 571 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
a533ed5e 572
5adff6a0
DJ
573 if (nclaimed)
574 *nclaimed = num_claimed;
575
c3bdbaea
MS
576 func_type = func_id_to_type(dev, func_id, ec_function);
577 dev->priv.page_counters[func_type] -= num_claimed;
fc50db98 578 dev->priv.fw_pages -= num_claimed;
e126ba97
EC
579
580out_free:
479163f4 581 kvfree(out);
e126ba97
EC
582 return err;
583}
584
585static void pages_work_handler(struct work_struct *work)
586{
587 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
588 struct mlx5_core_dev *dev = req->dev;
589 int err = 0;
590
c6168161
EBE
591 if (req->release_all)
592 release_all_pages(dev, req->func_id, req->ec_function);
593 else if (req->npages < 0)
591905ba 594 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
8d564292 595 true, req->ec_function);
e126ba97 596 else if (req->npages > 0)
591905ba 597 err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
e126ba97
EC
598
599 if (err)
1a91de28
JP
600 mlx5_core_warn(dev, "%s fail %d\n",
601 req->npages < 0 ? "reclaim" : "give", err);
e126ba97
EC
602
603 kfree(req);
604}
605
591905ba
BW
606enum {
607 EC_FUNCTION_MASK = 0x8000,
c6168161 608 RELEASE_ALL_PAGES_MASK = 0x4000,
591905ba
BW
609};
610
0cf53c12
SM
611static int req_pages_handler(struct notifier_block *nb,
612 unsigned long type, void *data)
e126ba97
EC
613{
614 struct mlx5_pages_req *req;
0cf53c12
SM
615 struct mlx5_core_dev *dev;
616 struct mlx5_priv *priv;
617 struct mlx5_eqe *eqe;
591905ba 618 bool ec_function;
c6168161 619 bool release_all;
0cf53c12
SM
620 u16 func_id;
621 s32 npages;
622
623 priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
624 dev = container_of(priv, struct mlx5_core_dev, priv);
625 eqe = data;
626
627 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
628 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
591905ba 629 ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
c6168161
EBE
630 release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
631 RELEASE_ALL_PAGES_MASK;
632 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
633 func_id, npages, release_all);
e126ba97
EC
634 req = kzalloc(sizeof(*req), GFP_ATOMIC);
635 if (!req) {
636 mlx5_core_warn(dev, "failed to allocate pages request\n");
0cf53c12 637 return NOTIFY_DONE;
e126ba97
EC
638 }
639
640 req->dev = dev;
641 req->func_id = func_id;
642 req->npages = npages;
591905ba 643 req->ec_function = ec_function;
c6168161 644 req->release_all = release_all;
e126ba97
EC
645 INIT_WORK(&req->work, pages_work_handler);
646 queue_work(dev->priv.pg_wq, &req->work);
0cf53c12 647 return NOTIFY_OK;
e126ba97
EC
648}
649
cd23b14b 650int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
e126ba97 651{
3f649ab7
KC
652 u16 func_id;
653 s32 npages;
e126ba97
EC
654 int err;
655
0a324f31 656 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
e126ba97
EC
657 if (err)
658 return err;
659
0a324f31
ML
660 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
661 npages, boot ? "boot" : "init", func_id);
e126ba97 662
591905ba 663 return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
e126ba97
EC
664}
665
4e3d677b
ML
666enum {
667 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
668};
669
e126ba97
EC
670static int optimal_reclaimed_pages(void)
671{
672 struct mlx5_cmd_prot_block *block;
673 struct mlx5_cmd_layout *lay;
674 int ret;
675
4e3d677b 676 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
a533ed5e
SM
677 MLX5_ST_SZ_BYTES(manage_pages_out)) /
678 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
e126ba97
EC
679
680 return ret;
681}
682
d6945242 683static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
ba5d8f72 684 struct rb_root *root, u32 function)
e126ba97 685{
32def412
AT
686 u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
687 unsigned long end = jiffies + recl_pages_to_jiffies;
e126ba97 688
d6945242 689 while (!RB_EMPTY_ROOT(root)) {
ba5d8f72
PP
690 u32 ec_function = mlx5_get_ec_function(function);
691 u32 function_id = mlx5_get_func_id(function);
d6945242
EBE
692 int nclaimed;
693 int err;
694
ba5d8f72
PP
695 err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
696 &nclaimed, false, ec_function);
d6945242 697 if (err) {
ba5d8f72
PP
698 mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
699 err, function_id, ec_function);
d6945242 700 return err;
e126ba97 701 }
d6945242
EBE
702
703 if (nclaimed)
32def412 704 end = jiffies + recl_pages_to_jiffies;
d6945242 705
e126ba97
EC
706 if (time_after(jiffies, end)) {
707 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
708 break;
709 }
d6945242
EBE
710 }
711
712 return 0;
713}
714
715int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
716{
717 struct rb_root *root;
718 unsigned long id;
719 void *entry;
720
721 xa_for_each(&dev->priv.page_root_xa, id, entry) {
722 root = entry;
723 mlx5_reclaim_root_pages(dev, root, id);
724 xa_erase(&dev->priv.page_root_xa, id);
725 kfree(root);
726 }
727
728 WARN_ON(!xa_empty(&dev->priv.page_root_xa));
e126ba97 729
5adff6a0
DJ
730 WARN(dev->priv.fw_pages,
731 "FW pages counter is %d after reclaiming all pages\n",
732 dev->priv.fw_pages);
c3bdbaea 733 WARN(dev->priv.page_counters[MLX5_VF],
5adff6a0 734 "VFs FW pages counter is %d after reclaiming all pages\n",
c3bdbaea
MS
735 dev->priv.page_counters[MLX5_VF]);
736 WARN(dev->priv.page_counters[MLX5_HOST_PF],
8a90f2fc 737 "External host PF FW pages counter is %d after reclaiming all pages\n",
c3bdbaea 738 dev->priv.page_counters[MLX5_HOST_PF]);
395ccd6e
DJ
739 WARN(dev->priv.page_counters[MLX5_EC_VF],
740 "EC VFs FW pages counter is %d after reclaiming all pages\n",
741 dev->priv.page_counters[MLX5_EC_VF]);
5adff6a0 742
e126ba97
EC
743 return 0;
744}
745
0cf53c12 746int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
e126ba97 747{
bf0bf77f 748 INIT_LIST_HEAD(&dev->priv.free_list);
0cf53c12
SM
749 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
750 if (!dev->priv.pg_wq)
751 return -ENOMEM;
752
d6945242 753 xa_init(&dev->priv.page_root_xa);
4e05cbf0 754 mlx5_pages_debugfs_init(dev);
d6945242 755
0cf53c12 756 return 0;
e126ba97
EC
757}
758
759void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
760{
4e05cbf0 761 mlx5_pages_debugfs_cleanup(dev);
d6945242 762 xa_destroy(&dev->priv.page_root_xa);
0cf53c12 763 destroy_workqueue(dev->priv.pg_wq);
e126ba97
EC
764}
765
0cf53c12 766void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
e126ba97 767{
0cf53c12
SM
768 MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
769 mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
e126ba97
EC
770}
771
772void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
773{
0cf53c12
SM
774 mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
775 flush_workqueue(dev->priv.pg_wq);
e126ba97 776}
fc50db98 777
591905ba 778int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
fc50db98 779{
32def412
AT
780 u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
781 unsigned long end = jiffies + recl_vf_pages_to_jiffies;
591905ba 782 int prev_pages = *pages;
fc50db98 783
d62292e8
MHY
784 /* In case of internal error we will free the pages manually later */
785 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
786 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
787 return 0;
788 }
789
27b942fb 790 mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
591905ba 791 while (*pages) {
fc50db98 792 if (time_after(jiffies, end)) {
591905ba 793 mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
fc50db98
EC
794 return -ETIMEDOUT;
795 }
591905ba 796 if (*pages < prev_pages) {
32def412 797 end = jiffies + recl_vf_pages_to_jiffies;
591905ba 798 prev_pages = *pages;
fc50db98
EC
799 }
800 msleep(50);
801 }
802
27b942fb 803 mlx5_core_dbg(dev, "All pages received\n");
fc50db98
EC
804 return 0;
805}