net/mlx5: Add flow-steering commands for FPGA IPSec implementation
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ipsec.c
CommitLineData
547eede0
IT
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <crypto/internal/geniv.h>
35#include <crypto/aead.h>
36#include <linux/inetdevice.h>
37#include <linux/netdevice.h>
38#include <linux/module.h>
39
40#include "en.h"
41#include "accel/ipsec.h"
42#include "en_accel/ipsec.h"
899a59d3 43#include "en_accel/ipsec_rxtx.h"
547eede0
IT
44
45struct mlx5e_ipsec_sa_entry {
46 struct hlist_node hlist; /* Item in SADB_RX hashtable */
47 unsigned int handle; /* Handle in SADB_RX */
48 struct xfrm_state *x;
49 struct mlx5e_ipsec *ipsec;
d6c4f029
AY
50 struct mlx5_accel_esp_xfrm *xfrm;
51 void *hw_context;
547eede0
IT
52};
53
899a59d3
IT
54struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
55 unsigned int handle)
56{
57 struct mlx5e_ipsec_sa_entry *sa_entry;
58 struct xfrm_state *ret = NULL;
59
60 rcu_read_lock();
61 hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
62 if (sa_entry->handle == handle) {
63 ret = sa_entry->x;
64 xfrm_state_hold(ret);
65 break;
66 }
67 rcu_read_unlock();
68
69 return ret;
70}
71
547eede0
IT
72static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
73{
74 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
75 unsigned long flags;
76 int ret;
77
547eede0
IT
78 ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
79 if (ret < 0)
dc7debec 80 return ret;
547eede0 81
dc7debec 82 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
547eede0
IT
83 sa_entry->handle = ret;
84 hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
547eede0 85 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
dc7debec
AY
86
87 return 0;
547eede0
IT
88}
89
90static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
91{
92 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
93 unsigned long flags;
94
95 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
96 hash_del_rcu(&sa_entry->hlist);
97 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
98}
99
100static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
101{
102 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
547eede0 103
1c9a10eb
AY
104 /* xfrm already doing sync rcu between del and free callbacks */
105
547eede0 106 ida_simple_remove(&ipsec->halloc, sa_entry->handle);
547eede0
IT
107}
108
d6c4f029
AY
109static void
110mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
111 struct mlx5_accel_esp_xfrm_attrs *attrs)
547eede0
IT
112{
113 struct xfrm_state *x = sa_entry->x;
d6c4f029 114 struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
547eede0 115 struct aead_geniv_ctx *geniv_ctx;
547eede0 116 struct crypto_aead *aead;
d6c4f029 117 unsigned int crypto_data_len, key_len;
547eede0
IT
118 int ivsize;
119
d6c4f029 120 memset(attrs, 0, sizeof(*attrs));
547eede0 121
d6c4f029 122 /* key */
65802f48
AY
123 crypto_data_len = (x->aead->alg_key_len + 7) / 8;
124 key_len = crypto_data_len - 4; /* 4 bytes salt at end */
d6c4f029
AY
125
126 memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
127 aes_gcm->key_len = key_len * 8;
128
129 /* salt and seq_iv */
65802f48
AY
130 aead = x->data;
131 geniv_ctx = crypto_aead_ctx(aead);
132 ivsize = crypto_aead_ivsize(aead);
d6c4f029
AY
133 memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
134 memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
135 sizeof(aes_gcm->salt));
136
137 /* iv len */
138 aes_gcm->icv_len = x->aead->alg_icv_len;
139
140 /* rx handle */
141 attrs->sa_handle = sa_entry->handle;
142
143 /* algo type */
144 attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
145
146 /* action */
147 attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
148 MLX5_ACCEL_ESP_ACTION_ENCRYPT :
149 MLX5_ACCEL_ESP_ACTION_DECRYPT;
150 /* flags */
151 attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
152 MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
153 MLX5_ACCEL_ESP_FLAGS_TUNNEL;
547eede0
IT
154}
155
156static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
157{
158 struct net_device *netdev = x->xso.dev;
159 struct mlx5e_priv *priv;
160
161 priv = netdev_priv(netdev);
162
163 if (x->props.aalgo != SADB_AALG_NONE) {
164 netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
165 return -EINVAL;
166 }
167 if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
168 netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
169 return -EINVAL;
170 }
171 if (x->props.calgo != SADB_X_CALG_NONE) {
172 netdev_info(netdev, "Cannot offload compressed xfrm states\n");
173 return -EINVAL;
174 }
175 if (x->props.flags & XFRM_STATE_ESN) {
176 netdev_info(netdev, "Cannot offload ESN xfrm states\n");
177 return -EINVAL;
178 }
179 if (x->props.family != AF_INET &&
180 x->props.family != AF_INET6) {
181 netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
182 return -EINVAL;
183 }
184 if (x->props.mode != XFRM_MODE_TRANSPORT &&
185 x->props.mode != XFRM_MODE_TUNNEL) {
186 dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
187 return -EINVAL;
188 }
189 if (x->id.proto != IPPROTO_ESP) {
190 netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
191 return -EINVAL;
192 }
193 if (x->encap) {
194 netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
195 return -EINVAL;
196 }
197 if (!x->aead) {
198 netdev_info(netdev, "Cannot offload xfrm states without aead\n");
199 return -EINVAL;
200 }
201 if (x->aead->alg_icv_len != 128) {
202 netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
203 return -EINVAL;
204 }
205 if ((x->aead->alg_key_len != 128 + 32) &&
206 (x->aead->alg_key_len != 256 + 32)) {
207 netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
208 return -EINVAL;
209 }
210 if (x->tfcpad) {
211 netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
212 return -EINVAL;
213 }
214 if (!x->geniv) {
215 netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
216 return -EINVAL;
217 }
218 if (strcmp(x->geniv, "seqiv")) {
219 netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
220 return -EINVAL;
221 }
222 if (x->props.family == AF_INET6 &&
1d2005e2
AY
223 !(mlx5_accel_ipsec_device_caps(priv->mdev) &
224 MLX5_ACCEL_IPSEC_CAP_IPV6)) {
547eede0
IT
225 netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
226 return -EINVAL;
227 }
228 return 0;
229}
230
231static int mlx5e_xfrm_add_state(struct xfrm_state *x)
232{
233 struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
234 struct net_device *netdev = x->xso.dev;
d6c4f029 235 struct mlx5_accel_esp_xfrm_attrs attrs;
547eede0 236 struct mlx5e_priv *priv;
d6c4f029
AY
237 __be32 saddr[4] = {0}, daddr[4] = {0}, spi;
238 bool is_ipv6 = false;
547eede0
IT
239 int err;
240
241 priv = netdev_priv(netdev);
242
243 err = mlx5e_xfrm_validate_state(x);
244 if (err)
245 return err;
246
247 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
248 if (!sa_entry) {
249 err = -ENOMEM;
250 goto out;
251 }
252
253 sa_entry->x = x;
254 sa_entry->ipsec = priv->ipsec;
255
256 /* Add the SA to handle processed incoming packets before the add SA
257 * completion was received
258 */
259 if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
260 err = mlx5e_ipsec_sadb_rx_add(sa_entry);
261 if (err) {
262 netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
263 goto err_entry;
264 }
265 }
266
d6c4f029
AY
267 /* create xfrm */
268 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
269 sa_entry->xfrm =
270 mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
271 MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
272 if (IS_ERR(sa_entry->xfrm)) {
273 err = PTR_ERR(sa_entry->xfrm);
547eede0
IT
274 goto err_sadb_rx;
275 }
276
d6c4f029
AY
277 /* create hw context */
278 if (x->props.family == AF_INET) {
279 saddr[3] = x->props.saddr.a4;
280 daddr[3] = x->id.daddr.a4;
281 } else {
282 memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
283 memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
284 is_ipv6 = true;
285 }
286 spi = x->id.spi;
287 sa_entry->hw_context =
288 mlx5_accel_esp_create_hw_context(priv->mdev,
289 sa_entry->xfrm,
290 saddr, daddr, spi,
291 is_ipv6);
292 if (IS_ERR(sa_entry->hw_context)) {
293 err = PTR_ERR(sa_entry->hw_context);
294 goto err_xfrm;
295 }
547eede0
IT
296
297 x->xso.offload_handle = (unsigned long)sa_entry;
298 goto out;
299
d6c4f029
AY
300err_xfrm:
301 mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
547eede0
IT
302err_sadb_rx:
303 if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
304 mlx5e_ipsec_sadb_rx_del(sa_entry);
305 mlx5e_ipsec_sadb_rx_free(sa_entry);
306 }
307err_entry:
308 kfree(sa_entry);
309out:
310 return err;
311}
312
313static void mlx5e_xfrm_del_state(struct xfrm_state *x)
314{
315 struct mlx5e_ipsec_sa_entry *sa_entry;
547eede0
IT
316
317 if (!x->xso.offload_handle)
318 return;
319
320 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
321 WARN_ON(sa_entry->x != x);
322
323 if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
324 mlx5e_ipsec_sadb_rx_del(sa_entry);
547eede0
IT
325}
326
327static void mlx5e_xfrm_free_state(struct xfrm_state *x)
328{
329 struct mlx5e_ipsec_sa_entry *sa_entry;
547eede0
IT
330
331 if (!x->xso.offload_handle)
332 return;
333
334 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
335 WARN_ON(sa_entry->x != x);
336
d6c4f029
AY
337 if (sa_entry->hw_context) {
338 mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
339 mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
547eede0
IT
340 }
341
342 if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
343 mlx5e_ipsec_sadb_rx_free(sa_entry);
344
345 kfree(sa_entry);
346}
347
348int mlx5e_ipsec_init(struct mlx5e_priv *priv)
349{
350 struct mlx5e_ipsec *ipsec = NULL;
351
352 if (!MLX5_IPSEC_DEV(priv->mdev)) {
353 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
354 return 0;
355 }
356
357 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
358 if (!ipsec)
359 return -ENOMEM;
360
361 hash_init(ipsec->sadb_rx);
362 spin_lock_init(&ipsec->sadb_rx_lock);
363 ida_init(&ipsec->halloc);
364 ipsec->en_priv = priv;
365 ipsec->en_priv->ipsec = ipsec;
788a8210 366 ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
1d2005e2 367 MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
547eede0
IT
368 netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
369 return 0;
370}
371
372void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
373{
374 struct mlx5e_ipsec *ipsec = priv->ipsec;
375
376 if (!ipsec)
377 return;
378
379 ida_destroy(&ipsec->halloc);
380 kfree(ipsec);
381 priv->ipsec = NULL;
382}
383
2ac9cfe7
IT
384static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
385{
386 if (x->props.family == AF_INET) {
387 /* Offload with IPv4 options is not supported yet */
388 if (ip_hdr(skb)->ihl > 5)
389 return false;
390 } else {
391 /* Offload with IPv6 extension headers is not support yet */
392 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
393 return false;
394 }
395
396 return true;
397}
398
547eede0
IT
399static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
400 .xdo_dev_state_add = mlx5e_xfrm_add_state,
401 .xdo_dev_state_delete = mlx5e_xfrm_del_state,
402 .xdo_dev_state_free = mlx5e_xfrm_free_state,
2ac9cfe7 403 .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
547eede0
IT
404};
405
406void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
407{
408 struct mlx5_core_dev *mdev = priv->mdev;
409 struct net_device *netdev = priv->netdev;
410
411 if (!priv->ipsec)
412 return;
413
1d2005e2 414 if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
547eede0
IT
415 !MLX5_CAP_ETH(mdev, swp)) {
416 mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
417 return;
418 }
419
420 mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
421 netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
422 netdev->features |= NETIF_F_HW_ESP;
423 netdev->hw_enc_features |= NETIF_F_HW_ESP;
424
425 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
426 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
427 return;
428 }
429
430 netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
431 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
2ac9cfe7 432
1d2005e2 433 if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
2ac9cfe7
IT
434 !MLX5_CAP_ETH(mdev, swp_lso)) {
435 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
436 return;
437 }
438
439 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
440 netdev->features |= NETIF_F_GSO_ESP;
441 netdev->hw_features |= NETIF_F_GSO_ESP;
442 netdev->hw_enc_features |= NETIF_F_GSO_ESP;
547eede0 443}