treewide: kzalloc() -> kcalloc()
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / fpga / ipsec.c
CommitLineData
bebb23e6
IT
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
d6c4f029 34#include <linux/rhashtable.h>
bebb23e6 35#include <linux/mlx5/driver.h>
05564d0a
AY
36#include <linux/mlx5/fs_helpers.h>
37#include <linux/mlx5/fs.h>
38#include <linux/rbtree.h>
bebb23e6
IT
39
40#include "mlx5_core.h"
05564d0a 41#include "fs_cmd.h"
bebb23e6
IT
42#include "fpga/ipsec.h"
43#include "fpga/sdk.h"
44#include "fpga/core.h"
45
581fddde
YK
46enum mlx5_fpga_ipsec_cmd_status {
47 MLX5_FPGA_IPSEC_CMD_PENDING,
48 MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
49 MLX5_FPGA_IPSEC_CMD_COMPLETE,
bebb23e6
IT
50};
51
d6c4f029 52struct mlx5_fpga_ipsec_cmd_context {
bebb23e6 53 struct mlx5_fpga_dma_buf buf;
581fddde
YK
54 enum mlx5_fpga_ipsec_cmd_status status;
55 struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
bebb23e6
IT
56 int status_code;
57 struct completion complete;
58 struct mlx5_fpga_device *dev;
59 struct list_head list; /* Item in pending_cmds */
581fddde 60 u8 command[0];
bebb23e6
IT
61};
62
d6c4f029
AY
63struct mlx5_fpga_esp_xfrm;
64
65struct mlx5_fpga_ipsec_sa_ctx {
66 struct rhash_head hash;
67 struct mlx5_ifc_fpga_ipsec_sa hw_sa;
68 struct mlx5_core_dev *dev;
69 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
70};
71
72struct mlx5_fpga_esp_xfrm {
73 unsigned int num_rules;
74 struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
75 struct mutex lock; /* xfrm lock */
76 struct mlx5_accel_esp_xfrm accel_xfrm;
77};
78
05564d0a
AY
79struct mlx5_fpga_ipsec_rule {
80 struct rb_node node;
81 struct fs_fte *fte;
82 struct mlx5_fpga_ipsec_sa_ctx *ctx;
83};
84
d6c4f029
AY
85static const struct rhashtable_params rhash_sa = {
86 .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
87 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
88 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
89 .automatic_shrinking = true,
90 .min_size = 1,
91};
92
bebb23e6 93struct mlx5_fpga_ipsec {
05564d0a 94 struct mlx5_fpga_device *fdev;
bebb23e6
IT
95 struct list_head pending_cmds;
96 spinlock_t pending_cmds_lock; /* Protects pending_cmds */
97 u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
98 struct mlx5_fpga_conn *conn;
d6c4f029 99
05564d0a
AY
100 struct notifier_block fs_notifier_ingress_bypass;
101 struct notifier_block fs_notifier_egress;
102
d6c4f029
AY
103 /* Map hardware SA --> SA context
104 * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
105 * We will use this hash to avoid SAs duplication in fpga which
106 * aren't allowed
107 */
108 struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
109 struct mutex sa_hash_lock;
05564d0a
AY
110
111 /* Tree holding all rules for this fpga device
112 * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
113 */
114 struct rb_root rules_rb;
115 struct mutex rules_rb_lock; /* rules lock */
bebb23e6
IT
116};
117
118static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
119{
120 if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
121 return false;
122
123 if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
124 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
125 return false;
126
127 if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
128 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
129 return false;
130
131 return true;
132}
133
134static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
135 struct mlx5_fpga_device *fdev,
136 struct mlx5_fpga_dma_buf *buf,
137 u8 status)
138{
d6c4f029 139 struct mlx5_fpga_ipsec_cmd_context *context;
bebb23e6
IT
140
141 if (status) {
d6c4f029 142 context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
bebb23e6
IT
143 buf);
144 mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
145 status);
581fddde 146 context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
bebb23e6
IT
147 complete(&context->complete);
148 }
149}
150
581fddde
YK
151static inline
152int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
bebb23e6
IT
153{
154 switch (syndrome) {
581fddde 155 case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
bebb23e6 156 return 0;
581fddde 157 case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
bebb23e6 158 return -EEXIST;
581fddde 159 case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
bebb23e6 160 return -EINVAL;
581fddde 161 case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
bebb23e6
IT
162 return -EIO;
163 }
164 return -EIO;
165}
166
167static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
168{
581fddde 169 struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
d6c4f029 170 struct mlx5_fpga_ipsec_cmd_context *context;
581fddde 171 enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
bebb23e6
IT
172 struct mlx5_fpga_device *fdev = cb_arg;
173 unsigned long flags;
174
175 if (buf->sg[0].size < sizeof(*resp)) {
176 mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
177 buf->sg[0].size, sizeof(*resp));
178 return;
179 }
180
581fddde
YK
181 mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
182 ntohl(resp->syndrome));
bebb23e6
IT
183
184 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
185 context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
d6c4f029 186 struct mlx5_fpga_ipsec_cmd_context,
bebb23e6
IT
187 list);
188 if (context)
189 list_del(&context->list);
190 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
191
192 if (!context) {
193 mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
194 return;
195 }
196 mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
197
bebb23e6
IT
198 syndrome = ntohl(resp->syndrome);
199 context->status_code = syndrome_to_errno(syndrome);
581fddde
YK
200 context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
201 memcpy(&context->resp, resp, sizeof(*resp));
bebb23e6
IT
202
203 if (context->status_code)
581fddde 204 mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
bebb23e6 205 syndrome);
581fddde 206
bebb23e6
IT
207 complete(&context->complete);
208}
209
581fddde
YK
210static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
211 const void *cmd, int cmd_size)
bebb23e6 212{
d6c4f029 213 struct mlx5_fpga_ipsec_cmd_context *context;
bebb23e6
IT
214 struct mlx5_fpga_device *fdev = mdev->fpga;
215 unsigned long flags;
581fddde 216 int res;
bebb23e6 217
bebb23e6
IT
218 if (!fdev || !fdev->ipsec)
219 return ERR_PTR(-EOPNOTSUPP);
220
581fddde
YK
221 if (cmd_size & 3)
222 return ERR_PTR(-EINVAL);
223
224 context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
bebb23e6
IT
225 if (!context)
226 return ERR_PTR(-ENOMEM);
227
581fddde
YK
228 context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
229 context->dev = fdev;
bebb23e6 230 context->buf.complete = mlx5_fpga_ipsec_send_complete;
bebb23e6 231 init_completion(&context->complete);
581fddde
YK
232 memcpy(&context->command, cmd, cmd_size);
233 context->buf.sg[0].size = cmd_size;
234 context->buf.sg[0].data = &context->command;
235
bebb23e6 236 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
1dcbc01f
YK
237 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
238 if (!res)
239 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
bebb23e6
IT
240 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
241
bebb23e6 242 if (res) {
1dcbc01f 243 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
bebb23e6
IT
244 kfree(context);
245 return ERR_PTR(res);
246 }
1dcbc01f 247
bebb23e6
IT
248 /* Context will be freed by wait func after completion */
249 return context;
250}
251
581fddde 252static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
bebb23e6 253{
d6c4f029 254 struct mlx5_fpga_ipsec_cmd_context *context = ctx;
ef927a9c 255 unsigned long timeout =
bb909416 256 msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
bebb23e6
IT
257 int res;
258
ef927a9c
AY
259 res = wait_for_completion_timeout(&context->complete, timeout);
260 if (!res) {
bebb23e6 261 mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
ef927a9c 262 return -ETIMEDOUT;
bebb23e6
IT
263 }
264
581fddde 265 if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
bebb23e6
IT
266 res = context->status_code;
267 else
268 res = -EIO;
269
581fddde
YK
270 return res;
271}
272
d6c4f029 273static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
581fddde 274{
d6c4f029
AY
275 if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
276 return true;
277 return false;
581fddde
YK
278}
279
d6c4f029
AY
280static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
281 struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
282 int opcode)
581fddde 283{
d6c4f029
AY
284 struct mlx5_core_dev *dev = fdev->mdev;
285 struct mlx5_ifc_fpga_ipsec_sa *sa;
286 struct mlx5_fpga_ipsec_cmd_context *cmd_context;
287 size_t sa_cmd_size;
288 int err;
581fddde 289
d6c4f029
AY
290 hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
291 if (is_v2_sadb_supported(fdev->ipsec))
292 sa_cmd_size = sizeof(*hw_sa);
293 else
294 sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
295
296 cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
297 mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
298 if (IS_ERR(cmd_context))
299 return PTR_ERR(cmd_context);
300
301 err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
302 if (err)
581fddde
YK
303 goto out;
304
d6c4f029
AY
305 sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
306 if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
307 mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
65802f48 308 ntohl(sa->ipsec_sa_v1.sw_sa_handle),
d6c4f029
AY
309 ntohl(cmd_context->resp.sw_sa_handle));
310 err = -EIO;
581fddde
YK
311 }
312
313out:
d6c4f029
AY
314 kfree(cmd_context);
315 return err;
bebb23e6
IT
316}
317
318u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
319{
320 struct mlx5_fpga_device *fdev = mdev->fpga;
321 u32 ret = 0;
322
af9fe19d 323 if (mlx5_fpga_is_ipsec_device(mdev)) {
1d2005e2 324 ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
af9fe19d
AY
325 ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
326 } else {
bebb23e6 327 return ret;
af9fe19d 328 }
bebb23e6
IT
329
330 if (!fdev->ipsec)
331 return ret;
332
333 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
1d2005e2 334 ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
bebb23e6
IT
335
336 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
1d2005e2 337 ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
bebb23e6
IT
338
339 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
1d2005e2 340 ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
bebb23e6 341
788a8210 342 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
1d2005e2 343 ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
788a8210 344
cb010083
AY
345 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
346 ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
347 ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
348 }
349
bebb23e6
IT
350 return ret;
351}
352
353unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
354{
355 struct mlx5_fpga_device *fdev = mdev->fpga;
356
357 if (!fdev || !fdev->ipsec)
358 return 0;
359
360 return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
361 number_of_ipsec_counters);
362}
363
364int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
365 unsigned int counters_count)
366{
367 struct mlx5_fpga_device *fdev = mdev->fpga;
368 unsigned int i;
2a41d15b 369 __be32 *data;
bebb23e6
IT
370 u32 count;
371 u64 addr;
372 int ret;
373
374 if (!fdev || !fdev->ipsec)
375 return 0;
376
377 addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
378 ipsec_counters_addr_low) +
379 ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
380 ipsec_counters_addr_high) << 32);
381
382 count = mlx5_fpga_ipsec_counters_count(mdev);
383
6396bb22 384 data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
bebb23e6
IT
385 if (!data) {
386 ret = -ENOMEM;
387 goto out;
388 }
389
390 ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
391 MLX5_FPGA_ACCESS_TYPE_DONTCARE);
392 if (ret < 0) {
393 mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
394 ret);
395 goto out;
396 }
397 ret = 0;
398
399 if (count > counters_count)
400 count = counters_count;
401
402 /* Each counter is low word, then high. But each word is big-endian */
403 for (i = 0; i < count; i++)
404 counters[i] = (u64)ntohl(data[i * 2]) |
405 ((u64)ntohl(data[i * 2 + 1]) << 32);
406
407out:
408 kfree(data);
409 return ret;
410}
411
788a8210
YK
412static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
413{
d6c4f029 414 struct mlx5_fpga_ipsec_cmd_context *context;
788a8210
YK
415 struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
416 int err;
417
d6c4f029 418 cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
788a8210
YK
419 cmd.flags = htonl(flags);
420 context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
421 if (IS_ERR(context)) {
422 err = PTR_ERR(context);
423 goto out;
424 }
425
426 err = mlx5_fpga_ipsec_cmd_wait(context);
427 if (err)
428 goto out;
429
430 if ((context->resp.flags & cmd.flags) != cmd.flags) {
431 mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
432 cmd.flags,
433 context->resp.flags);
434 err = -EIO;
435 }
436
437out:
438 return err;
439}
440
441static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
442{
443 u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
444 u32 flags = 0;
445
1d2005e2 446 if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
788a8210
YK
447 flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
448
449 return mlx5_fpga_ipsec_set_caps(mdev, flags);
450}
451
d6c4f029
AY
452static void
453mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
454 const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
455 struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
456{
457 const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
458
459 /* key */
460 memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
461 aes_gcm->key_len / 8);
462 /* Duplicate 128 bit key twice according to HW layout */
463 if (aes_gcm->key_len == 128)
464 memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
465 aes_gcm->aes_key, aes_gcm->key_len / 8);
466
467 /* salt and seq_iv */
468 memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
469 sizeof(aes_gcm->seq_iv));
470 memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
471 sizeof(aes_gcm->salt));
472
cb010083
AY
473 /* esn */
474 if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
475 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
476 hw_sa->ipsec_sa_v1.flags |=
477 (xfrm_attrs->flags &
478 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
479 MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
480 hw_sa->esn = htonl(xfrm_attrs->esn);
481 } else {
482 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
483 hw_sa->ipsec_sa_v1.flags &=
484 ~(xfrm_attrs->flags &
485 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
486 MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
487 hw_sa->esn = 0;
488 }
489
d6c4f029
AY
490 /* rx handle */
491 hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
492
493 /* enc mode */
494 switch (aes_gcm->key_len) {
495 case 128:
496 hw_sa->ipsec_sa_v1.enc_mode =
497 MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
498 break;
499 case 256:
500 hw_sa->ipsec_sa_v1.enc_mode =
501 MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
502 break;
503 }
504
505 /* flags */
506 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
507 MLX5_FPGA_IPSEC_SA_SPI_EN |
508 MLX5_FPGA_IPSEC_SA_IP_ESP;
509
510 if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
511 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
512 else
513 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
514}
515
516static void
517mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
518 struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
519 const __be32 saddr[4],
520 const __be32 daddr[4],
521 const __be32 spi, bool is_ipv6,
522 struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
523{
524 mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
525
526 /* IPs */
527 memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
528 memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
529
530 /* SPI */
531 hw_sa->ipsec_sa_v1.spi = spi;
532
533 /* flags */
534 if (is_ipv6)
535 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
536}
537
05564d0a
AY
538static bool is_full_mask(const void *p, size_t len)
539{
540 WARN_ON(len % 4);
541
542 return !memchr_inv(p, 0xff, len);
543}
544
545static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
546 const u32 *match_c,
547 const u32 *match_v)
548{
549 const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
550 match_c,
551 misc_parameters);
552 const void *headers_c = MLX5_ADDR_OF(fte_match_param,
553 match_c,
554 outer_headers);
555 const void *headers_v = MLX5_ADDR_OF(fte_match_param,
556 match_v,
557 outer_headers);
558
559 if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
560 const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
561 headers_c,
562 src_ipv4_src_ipv6.ipv4_layout.ipv4);
563 const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
564 headers_c,
565 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
566
567 if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
568 ipv4)) ||
569 !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
570 ipv4)))
571 return false;
572 } else {
573 const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
574 headers_c,
575 src_ipv4_src_ipv6.ipv6_layout.ipv6);
576 const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
577 headers_c,
578 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
579
580 if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
581 ipv6)) ||
582 !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
583 ipv6)))
584 return false;
585 }
586
587 if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
588 outer_esp_spi),
589 MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
590 return false;
591
592 return true;
593}
594
595static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
596 u8 match_criteria_enable,
597 const u32 *match_c,
598 const u32 *match_v)
599{
600 u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
601 bool ipv6_flow;
602
603 ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
604
605 if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
606 mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
607 mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
608 mlx5_fs_is_vxlan_flow(match_c) ||
609 !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
610 ipv6_flow))
611 return false;
612
613 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
614 return false;
615
616 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
617 mlx5_fs_is_outer_ipsec_flow(match_c))
618 return false;
619
620 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
621 ipv6_flow)
622 return false;
623
624 if (!validate_fpga_full_mask(dev, match_c, match_v))
625 return false;
626
627 return true;
628}
629
630static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
631 u8 match_criteria_enable,
632 const u32 *match_c,
633 const u32 *match_v,
634 struct mlx5_flow_act *flow_act)
635{
636 const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
637 outer_headers);
638 bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
639 MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
640 bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
641 MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
642 int ret;
643
644 ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
645 match_v);
646 if (!ret)
647 return ret;
648
649 if (is_dmac || is_smac ||
650 (match_criteria_enable &
651 ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
652 (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
653 flow_act->has_flow_tag)
654 return false;
655
656 return true;
657}
658
d6c4f029
AY
659void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
660 struct mlx5_accel_esp_xfrm *accel_xfrm,
661 const __be32 saddr[4],
662 const __be32 daddr[4],
663 const __be32 spi, bool is_ipv6)
664{
665 struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
666 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
667 container_of(accel_xfrm, typeof(*fpga_xfrm),
668 accel_xfrm);
669 struct mlx5_fpga_device *fdev = mdev->fpga;
670 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
671 int opcode, err;
672 void *context;
673
674 /* alloc SA */
675 sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
676 if (!sa_ctx)
677 return ERR_PTR(-ENOMEM);
678
679 sa_ctx->dev = mdev;
680
681 /* build candidate SA */
682 mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
683 saddr, daddr, spi, is_ipv6,
684 &sa_ctx->hw_sa);
685
686 mutex_lock(&fpga_xfrm->lock);
687
688 if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
689 /* all rules must be with same IPs and SPI */
690 if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
691 sizeof(sa_ctx->hw_sa))) {
692 context = ERR_PTR(-EINVAL);
693 goto exists;
694 }
695
696 ++fpga_xfrm->num_rules;
697 context = fpga_xfrm->sa_ctx;
698 goto exists;
699 }
700
701 /* This is unbounded fpga_xfrm, try to add to hash */
702 mutex_lock(&fipsec->sa_hash_lock);
703
704 err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
705 rhash_sa);
706 if (err) {
707 /* Can't bound different accel_xfrm to already existing sa_ctx.
708 * This is because we can't support multiple ketmats for
709 * same IPs and SPI
710 */
711 context = ERR_PTR(-EEXIST);
712 goto unlock_hash;
713 }
714
715 /* Bound accel_xfrm to sa_ctx */
716 opcode = is_v2_sadb_supported(fdev->ipsec) ?
717 MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
718 MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
719 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
720 sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
721 if (err) {
722 context = ERR_PTR(err);
723 goto delete_hash;
724 }
725
726 mutex_unlock(&fipsec->sa_hash_lock);
727
728 ++fpga_xfrm->num_rules;
729 fpga_xfrm->sa_ctx = sa_ctx;
730 sa_ctx->fpga_xfrm = fpga_xfrm;
731
732 mutex_unlock(&fpga_xfrm->lock);
733
734 return sa_ctx;
735
736delete_hash:
737 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
738 rhash_sa));
739unlock_hash:
740 mutex_unlock(&fipsec->sa_hash_lock);
741
742exists:
743 mutex_unlock(&fpga_xfrm->lock);
744 kfree(sa_ctx);
745 return context;
746}
747
05564d0a
AY
748static void *
749mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
750 struct fs_fte *fte,
751 bool is_egress)
752{
753 struct mlx5_accel_esp_xfrm *accel_xfrm;
754 __be32 saddr[4], daddr[4], spi;
755 struct mlx5_flow_group *fg;
756 bool is_ipv6 = false;
757
758 fs_get_obj(fg, fte->node.parent);
759 /* validate */
760 if (is_egress &&
761 !mlx5_is_fpga_egress_ipsec_rule(mdev,
762 fg->mask.match_criteria_enable,
763 fg->mask.match_criteria,
764 fte->val,
765 &fte->action))
766 return ERR_PTR(-EINVAL);
767 else if (!mlx5_is_fpga_ipsec_rule(mdev,
768 fg->mask.match_criteria_enable,
769 fg->mask.match_criteria,
770 fte->val))
771 return ERR_PTR(-EINVAL);
772
773 /* get xfrm context */
774 accel_xfrm =
775 (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
776
777 /* IPs */
778 if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
779 fte->val)) {
780 memcpy(&saddr[3],
781 MLX5_ADDR_OF(fte_match_set_lyr_2_4,
782 fte->val,
783 src_ipv4_src_ipv6.ipv4_layout.ipv4),
784 sizeof(saddr[3]));
785 memcpy(&daddr[3],
786 MLX5_ADDR_OF(fte_match_set_lyr_2_4,
787 fte->val,
788 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
789 sizeof(daddr[3]));
790 } else {
791 memcpy(saddr,
792 MLX5_ADDR_OF(fte_match_param,
793 fte->val,
794 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
795 sizeof(saddr));
796 memcpy(daddr,
797 MLX5_ADDR_OF(fte_match_param,
798 fte->val,
799 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
800 sizeof(daddr));
801 is_ipv6 = true;
802 }
803
804 /* SPI */
805 spi = MLX5_GET_BE(typeof(spi),
806 fte_match_param, fte->val,
807 misc_parameters.outer_esp_spi);
808
809 /* create */
810 return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
811 saddr, daddr,
812 spi, is_ipv6);
813}
814
d6c4f029
AY
815static void
816mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
817{
818 struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
819 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
820 int opcode = is_v2_sadb_supported(fdev->ipsec) ?
821 MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
822 MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
823 int err;
824
825 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
826 sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
827 if (err) {
828 WARN_ON(err);
829 return;
830 }
831
832 mutex_lock(&fipsec->sa_hash_lock);
833 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
834 rhash_sa));
835 mutex_unlock(&fipsec->sa_hash_lock);
836}
837
838void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
839{
840 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
841 ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
842
843 mutex_lock(&fpga_xfrm->lock);
844 if (!--fpga_xfrm->num_rules) {
845 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
846 fpga_xfrm->sa_ctx = NULL;
847 }
848 mutex_unlock(&fpga_xfrm->lock);
849}
850
05564d0a
AY
851static inline struct mlx5_fpga_ipsec_rule *
852_rule_search(struct rb_root *root, struct fs_fte *fte)
853{
854 struct rb_node *node = root->rb_node;
855
856 while (node) {
857 struct mlx5_fpga_ipsec_rule *rule =
858 container_of(node, struct mlx5_fpga_ipsec_rule,
859 node);
860
861 if (rule->fte < fte)
862 node = node->rb_left;
863 else if (rule->fte > fte)
864 node = node->rb_right;
865 else
866 return rule;
867 }
868 return NULL;
869}
870
871static struct mlx5_fpga_ipsec_rule *
872rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
873{
874 struct mlx5_fpga_ipsec_rule *rule;
875
876 mutex_lock(&ipsec_dev->rules_rb_lock);
877 rule = _rule_search(&ipsec_dev->rules_rb, fte);
878 mutex_unlock(&ipsec_dev->rules_rb_lock);
879
880 return rule;
881}
882
883static inline int _rule_insert(struct rb_root *root,
884 struct mlx5_fpga_ipsec_rule *rule)
885{
886 struct rb_node **new = &root->rb_node, *parent = NULL;
887
888 /* Figure out where to put new node */
889 while (*new) {
890 struct mlx5_fpga_ipsec_rule *this =
891 container_of(*new, struct mlx5_fpga_ipsec_rule,
892 node);
893
894 parent = *new;
895 if (rule->fte < this->fte)
896 new = &((*new)->rb_left);
897 else if (rule->fte > this->fte)
898 new = &((*new)->rb_right);
899 else
900 return -EEXIST;
901 }
902
903 /* Add new node and rebalance tree. */
904 rb_link_node(&rule->node, parent, new);
905 rb_insert_color(&rule->node, root);
906
907 return 0;
908}
909
910static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
911 struct mlx5_fpga_ipsec_rule *rule)
912{
913 int ret;
914
915 mutex_lock(&ipsec_dev->rules_rb_lock);
916 ret = _rule_insert(&ipsec_dev->rules_rb, rule);
917 mutex_unlock(&ipsec_dev->rules_rb_lock);
918
919 return ret;
920}
921
922static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
923 struct mlx5_fpga_ipsec_rule *rule)
924{
925 struct rb_root *root = &ipsec_dev->rules_rb;
926
927 mutex_lock(&ipsec_dev->rules_rb_lock);
928 rb_erase(&rule->node, root);
929 mutex_unlock(&ipsec_dev->rules_rb_lock);
930}
931
932static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
933 struct mlx5_fpga_ipsec_rule *rule)
934{
935 _rule_delete(ipsec_dev, rule);
936 kfree(rule);
937}
938
939struct mailbox_mod {
940 uintptr_t saved_esp_id;
941 u32 saved_action;
942 u32 saved_outer_esp_spi_value;
943};
944
945static void restore_spec_mailbox(struct fs_fte *fte,
946 struct mailbox_mod *mbox_mod)
947{
948 char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
949 fte->val,
950 misc_parameters);
951
952 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
953 mbox_mod->saved_outer_esp_spi_value);
954 fte->action.action |= mbox_mod->saved_action;
955 fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
956}
957
958static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
959 struct fs_fte *fte,
960 struct mailbox_mod *mbox_mod)
961{
962 char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
963 fte->val,
964 misc_parameters);
965
966 mbox_mod->saved_esp_id = fte->action.esp_id;
967 mbox_mod->saved_action = fte->action.action &
968 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
969 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
970 mbox_mod->saved_outer_esp_spi_value =
971 MLX5_GET(fte_match_set_misc, misc_params_v,
972 outer_esp_spi);
973
974 fte->action.esp_id = 0;
975 fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
976 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
977 if (!MLX5_CAP_FLOWTABLE(mdev,
978 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
979 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
980}
981
982static enum fs_flow_table_type egress_to_fs_ft(bool egress)
983{
984 return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
985}
986
987static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
988 struct mlx5_flow_table *ft,
989 u32 *in,
990 unsigned int *group_id,
991 bool is_egress)
992{
993 int (*create_flow_group)(struct mlx5_core_dev *dev,
994 struct mlx5_flow_table *ft, u32 *in,
995 unsigned int *group_id) =
996 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
997 char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
998 match_criteria.misc_parameters);
999 u32 saved_outer_esp_spi_mask;
1000 u8 match_criteria_enable;
1001 int ret;
1002
1003 if (MLX5_CAP_FLOWTABLE(dev,
1004 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1005 return create_flow_group(dev, ft, in, group_id);
1006
1007 match_criteria_enable =
1008 MLX5_GET(create_flow_group_in, in, match_criteria_enable);
1009 saved_outer_esp_spi_mask =
1010 MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
1011 if (!match_criteria_enable || !saved_outer_esp_spi_mask)
1012 return create_flow_group(dev, ft, in, group_id);
1013
1014 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
1015
1016 if (!(*misc_params_c) &&
1017 !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
1018 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1019 match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
1020
1021 ret = create_flow_group(dev, ft, in, group_id);
1022
1023 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
1024 MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
1025
1026 return ret;
1027}
1028
1029static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
1030 struct mlx5_flow_table *ft,
1031 struct mlx5_flow_group *fg,
1032 struct fs_fte *fte,
1033 bool is_egress)
1034{
1035 int (*create_fte)(struct mlx5_core_dev *dev,
1036 struct mlx5_flow_table *ft,
1037 struct mlx5_flow_group *fg,
1038 struct fs_fte *fte) =
1039 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
1040 struct mlx5_fpga_device *fdev = dev->fpga;
1041 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1042 struct mlx5_fpga_ipsec_rule *rule;
1043 bool is_esp = fte->action.esp_id;
1044 struct mailbox_mod mbox_mod;
1045 int ret;
1046
1047 if (!is_esp ||
1048 !(fte->action.action &
1049 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1050 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1051 return create_fte(dev, ft, fg, fte);
1052
1053 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1054 if (!rule)
1055 return -ENOMEM;
1056
1057 rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
1058 if (IS_ERR(rule->ctx)) {
59461949 1059 int err = PTR_ERR(rule->ctx);
05564d0a 1060 kfree(rule);
59461949 1061 return err;
05564d0a
AY
1062 }
1063
1064 rule->fte = fte;
1065 WARN_ON(rule_insert(fipsec, rule));
1066
1067 modify_spec_mailbox(dev, fte, &mbox_mod);
1068 ret = create_fte(dev, ft, fg, fte);
1069 restore_spec_mailbox(fte, &mbox_mod);
1070 if (ret) {
1071 _rule_delete(fipsec, rule);
1072 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1073 kfree(rule);
1074 }
1075
1076 return ret;
1077}
1078
1079static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
1080 struct mlx5_flow_table *ft,
1081 unsigned int group_id,
1082 int modify_mask,
1083 struct fs_fte *fte,
1084 bool is_egress)
1085{
1086 int (*update_fte)(struct mlx5_core_dev *dev,
1087 struct mlx5_flow_table *ft,
1088 unsigned int group_id,
1089 int modify_mask,
1090 struct fs_fte *fte) =
1091 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
1092 bool is_esp = fte->action.esp_id;
1093 struct mailbox_mod mbox_mod;
1094 int ret;
1095
1096 if (!is_esp ||
1097 !(fte->action.action &
1098 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1099 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1100 return update_fte(dev, ft, group_id, modify_mask, fte);
1101
1102 modify_spec_mailbox(dev, fte, &mbox_mod);
1103 ret = update_fte(dev, ft, group_id, modify_mask, fte);
1104 restore_spec_mailbox(fte, &mbox_mod);
1105
1106 return ret;
1107}
1108
1109static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
1110 struct mlx5_flow_table *ft,
1111 struct fs_fte *fte,
1112 bool is_egress)
1113{
1114 int (*delete_fte)(struct mlx5_core_dev *dev,
1115 struct mlx5_flow_table *ft,
1116 struct fs_fte *fte) =
1117 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
1118 struct mlx5_fpga_device *fdev = dev->fpga;
1119 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1120 struct mlx5_fpga_ipsec_rule *rule;
1121 bool is_esp = fte->action.esp_id;
1122 struct mailbox_mod mbox_mod;
1123 int ret;
1124
1125 if (!is_esp ||
1126 !(fte->action.action &
1127 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1128 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1129 return delete_fte(dev, ft, fte);
1130
1131 rule = rule_search(fipsec, fte);
1132 if (!rule)
1133 return -ENOENT;
1134
1135 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1136 rule_delete(fipsec, rule);
1137
1138 modify_spec_mailbox(dev, fte, &mbox_mod);
1139 ret = delete_fte(dev, ft, fte);
1140 restore_spec_mailbox(fte, &mbox_mod);
1141
1142 return ret;
1143}
1144
1145static int
1146mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
1147 struct mlx5_flow_table *ft,
1148 u32 *in,
1149 unsigned int *group_id)
1150{
1151 return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
1152}
1153
1154static int
1155mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
1156 struct mlx5_flow_table *ft,
1157 struct mlx5_flow_group *fg,
1158 struct fs_fte *fte)
1159{
1160 return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
1161}
1162
1163static int
1164mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
1165 struct mlx5_flow_table *ft,
1166 unsigned int group_id,
1167 int modify_mask,
1168 struct fs_fte *fte)
1169{
1170 return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1171 true);
1172}
1173
1174static int
1175mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
1176 struct mlx5_flow_table *ft,
1177 struct fs_fte *fte)
1178{
1179 return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
1180}
1181
1182static int
1183mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
1184 struct mlx5_flow_table *ft,
1185 u32 *in,
1186 unsigned int *group_id)
1187{
1188 return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
1189}
1190
1191static int
1192mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
1193 struct mlx5_flow_table *ft,
1194 struct mlx5_flow_group *fg,
1195 struct fs_fte *fte)
1196{
1197 return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
1198}
1199
1200static int
1201mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
1202 struct mlx5_flow_table *ft,
1203 unsigned int group_id,
1204 int modify_mask,
1205 struct fs_fte *fte)
1206{
1207 return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1208 false);
1209}
1210
1211static int
1212mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
1213 struct mlx5_flow_table *ft,
1214 struct fs_fte *fte)
1215{
1216 return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
1217}
1218
1219static struct mlx5_flow_cmds fpga_ipsec_ingress;
1220static struct mlx5_flow_cmds fpga_ipsec_egress;
1221
1222const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
1223{
1224 switch (type) {
1225 case FS_FT_NIC_RX:
1226 return &fpga_ipsec_ingress;
1227 case FS_FT_NIC_TX:
1228 return &fpga_ipsec_egress;
1229 default:
1230 WARN_ON(true);
1231 return NULL;
1232 }
1233}
1234
bebb23e6
IT
1235int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
1236{
1237 struct mlx5_fpga_conn_attr init_attr = {0};
1238 struct mlx5_fpga_device *fdev = mdev->fpga;
1239 struct mlx5_fpga_conn *conn;
1240 int err;
1241
1242 if (!mlx5_fpga_is_ipsec_device(mdev))
1243 return 0;
1244
1245 fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
1246 if (!fdev->ipsec)
1247 return -ENOMEM;
1248
05564d0a
AY
1249 fdev->ipsec->fdev = fdev;
1250
bebb23e6
IT
1251 err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
1252 fdev->ipsec->caps);
1253 if (err) {
1254 mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
1255 err);
1256 goto error;
1257 }
1258
1259 INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
1260 spin_lock_init(&fdev->ipsec->pending_cmds_lock);
1261
1262 init_attr.rx_size = SBU_QP_QUEUE_SIZE;
1263 init_attr.tx_size = SBU_QP_QUEUE_SIZE;
1264 init_attr.recv_cb = mlx5_fpga_ipsec_recv;
1265 init_attr.cb_arg = fdev;
1266 conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
1267 if (IS_ERR(conn)) {
1268 err = PTR_ERR(conn);
1269 mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
1270 err);
1271 goto error;
1272 }
1273 fdev->ipsec->conn = conn;
788a8210 1274
d6c4f029
AY
1275 err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
1276 if (err)
1277 goto err_destroy_conn;
1278 mutex_init(&fdev->ipsec->sa_hash_lock);
1279
05564d0a
AY
1280 fdev->ipsec->rules_rb = RB_ROOT;
1281 mutex_init(&fdev->ipsec->rules_rb_lock);
1282
788a8210
YK
1283 err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
1284 if (err) {
1285 mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
1286 err);
d6c4f029 1287 goto err_destroy_hash;
788a8210
YK
1288 }
1289
bebb23e6
IT
1290 return 0;
1291
d6c4f029
AY
1292err_destroy_hash:
1293 rhashtable_destroy(&fdev->ipsec->sa_hash);
1294
788a8210
YK
1295err_destroy_conn:
1296 mlx5_fpga_sbu_conn_destroy(conn);
1297
bebb23e6
IT
1298error:
1299 kfree(fdev->ipsec);
1300 fdev->ipsec = NULL;
1301 return err;
1302}
1303
05564d0a
AY
1304static void destroy_rules_rb(struct rb_root *root)
1305{
1306 struct mlx5_fpga_ipsec_rule *r, *tmp;
1307
1308 rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
1309 rb_erase(&r->node, root);
1310 mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
1311 kfree(r);
1312 }
1313}
1314
bebb23e6
IT
1315void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
1316{
1317 struct mlx5_fpga_device *fdev = mdev->fpga;
1318
1319 if (!mlx5_fpga_is_ipsec_device(mdev))
1320 return;
1321
05564d0a 1322 destroy_rules_rb(&fdev->ipsec->rules_rb);
d6c4f029
AY
1323 rhashtable_destroy(&fdev->ipsec->sa_hash);
1324
bebb23e6
IT
1325 mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
1326 kfree(fdev->ipsec);
1327 fdev->ipsec = NULL;
1328}
d6c4f029 1329
05564d0a
AY
1330void mlx5_fpga_ipsec_build_fs_cmds(void)
1331{
1332 /* ingress */
1333 fpga_ipsec_ingress.create_flow_table =
1334 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
1335 fpga_ipsec_ingress.destroy_flow_table =
1336 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
1337 fpga_ipsec_ingress.modify_flow_table =
1338 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
1339 fpga_ipsec_ingress.create_flow_group =
1340 mlx5_fpga_ipsec_fs_create_flow_group_ingress;
1341 fpga_ipsec_ingress.destroy_flow_group =
1342 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
1343 fpga_ipsec_ingress.create_fte =
1344 mlx5_fpga_ipsec_fs_create_fte_ingress;
1345 fpga_ipsec_ingress.update_fte =
1346 mlx5_fpga_ipsec_fs_update_fte_ingress;
1347 fpga_ipsec_ingress.delete_fte =
1348 mlx5_fpga_ipsec_fs_delete_fte_ingress;
1349 fpga_ipsec_ingress.update_root_ft =
1350 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
1351
1352 /* egress */
1353 fpga_ipsec_egress.create_flow_table =
1354 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
1355 fpga_ipsec_egress.destroy_flow_table =
1356 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
1357 fpga_ipsec_egress.modify_flow_table =
1358 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
1359 fpga_ipsec_egress.create_flow_group =
1360 mlx5_fpga_ipsec_fs_create_flow_group_egress;
1361 fpga_ipsec_egress.destroy_flow_group =
1362 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
1363 fpga_ipsec_egress.create_fte =
1364 mlx5_fpga_ipsec_fs_create_fte_egress;
1365 fpga_ipsec_egress.update_fte =
1366 mlx5_fpga_ipsec_fs_update_fte_egress;
1367 fpga_ipsec_egress.delete_fte =
1368 mlx5_fpga_ipsec_fs_delete_fte_egress;
1369 fpga_ipsec_egress.update_root_ft =
1370 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
1371}
1372
d6c4f029
AY
1373static int
1374mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
1375 const struct mlx5_accel_esp_xfrm_attrs *attrs)
1376{
1377 if (attrs->tfc_pad) {
1378 mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
1379 return -EOPNOTSUPP;
1380 }
1381
1382 if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
1383 mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
1384 return -EOPNOTSUPP;
1385 }
1386
1387 if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
1388 mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
1389 return -EOPNOTSUPP;
1390 }
1391
1392 if (attrs->keymat.aes_gcm.iv_algo !=
1393 MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
1394 mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
1395 return -EOPNOTSUPP;
1396 }
1397
1398 if (attrs->keymat.aes_gcm.icv_len != 128) {
1399 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
1400 return -EOPNOTSUPP;
1401 }
1402
1403 if (attrs->keymat.aes_gcm.key_len != 128 &&
1404 attrs->keymat.aes_gcm.key_len != 256) {
1405 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1406 return -EOPNOTSUPP;
1407 }
1408
1409 if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
1410 (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
1411 v2_command))) {
1412 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1413 return -EOPNOTSUPP;
1414 }
1415
1416 return 0;
1417}
1418
1419struct mlx5_accel_esp_xfrm *
1420mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
1421 const struct mlx5_accel_esp_xfrm_attrs *attrs,
1422 u32 flags)
1423{
1424 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1425
1426 if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
1427 mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
1428 return ERR_PTR(-EINVAL);
1429 }
1430
1431 if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1432 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1433 return ERR_PTR(-EOPNOTSUPP);
1434 }
1435
1436 fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
1437 if (!fpga_xfrm)
1438 return ERR_PTR(-ENOMEM);
1439
1440 mutex_init(&fpga_xfrm->lock);
1441 memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
1442 sizeof(fpga_xfrm->accel_xfrm.attrs));
1443
1444 return &fpga_xfrm->accel_xfrm;
1445}
1446
1447void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
1448{
1449 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
1450 container_of(xfrm, struct mlx5_fpga_esp_xfrm,
1451 accel_xfrm);
1452 /* assuming no sa_ctx are connected to this xfrm_ctx */
1453 kfree(fpga_xfrm);
1454}
05564d0a
AY
1455
1456int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
1457 const struct mlx5_accel_esp_xfrm_attrs *attrs)
1458{
1459 struct mlx5_core_dev *mdev = xfrm->mdev;
1460 struct mlx5_fpga_device *fdev = mdev->fpga;
1461 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1462 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1463 struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
1464
1465 int err = 0;
1466
1467 if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
1468 return 0;
1469
1470 if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1471 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1472 return -EOPNOTSUPP;
1473 }
1474
1475 if (is_v2_sadb_supported(fipsec)) {
1476 mlx5_core_warn(mdev, "Modify esp is not supported\n");
1477 return -EOPNOTSUPP;
1478 }
1479
1480 fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
1481
1482 mutex_lock(&fpga_xfrm->lock);
1483
1484 if (!fpga_xfrm->sa_ctx)
1485 /* Unbounded xfrm, chane only sw attrs */
1486 goto change_sw_xfrm_attrs;
1487
1488 /* copy original hw sa */
1489 memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
1490 mutex_lock(&fipsec->sa_hash_lock);
1491 /* remove original hw sa from hash */
1492 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1493 &fpga_xfrm->sa_ctx->hash, rhash_sa));
1494 /* update hw_sa with new xfrm attrs*/
1495 mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
1496 &fpga_xfrm->sa_ctx->hw_sa);
1497 /* try to insert new hw_sa to hash */
1498 err = rhashtable_insert_fast(&fipsec->sa_hash,
1499 &fpga_xfrm->sa_ctx->hash, rhash_sa);
1500 if (err)
1501 goto rollback_sa;
1502
1503 /* modify device with new hw_sa */
1504 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
1505 MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
1506 fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
1507 if (err)
1508 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1509 &fpga_xfrm->sa_ctx->hash,
1510 rhash_sa));
1511rollback_sa:
1512 if (err) {
1513 /* return original hw_sa to hash */
1514 memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
1515 sizeof(org_hw_sa));
1516 WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
1517 &fpga_xfrm->sa_ctx->hash,
1518 rhash_sa));
1519 }
1520 mutex_unlock(&fipsec->sa_hash_lock);
1521
1522change_sw_xfrm_attrs:
1523 if (!err)
1524 memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
1525 mutex_unlock(&fpga_xfrm->lock);
1526 return err;
1527}