Commit | Line | Data |
---|---|---|
547eede0 IT |
1 | /* |
2 | * Copyright (c) 2017 Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | ||
34 | #include <crypto/internal/geniv.h> | |
35 | #include <crypto/aead.h> | |
36 | #include <linux/inetdevice.h> | |
37 | #include <linux/netdevice.h> | |
547eede0 IT |
38 | |
39 | #include "en.h" | |
c6e3b421 LR |
40 | #include "ipsec.h" |
41 | #include "ipsec_rxtx.h" | |
547eede0 | 42 | |
75ef3f55 AY |
43 | static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) |
44 | { | |
021a429b | 45 | return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; |
75ef3f55 AY |
46 | } |
47 | ||
a5b8ca94 LR |
48 | static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x) |
49 | { | |
50 | return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle; | |
51 | } | |
52 | ||
cb010083 AY |
53 | static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) |
54 | { | |
55 | struct xfrm_replay_state_esn *replay_esn; | |
2d64663c | 56 | u32 seq_bottom = 0; |
cb010083 | 57 | u8 overlap; |
cb010083 AY |
58 | |
59 | if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) { | |
60 | sa_entry->esn_state.trigger = 0; | |
61 | return false; | |
62 | } | |
63 | ||
64 | replay_esn = sa_entry->x->replay_esn; | |
2d64663c RS |
65 | if (replay_esn->seq >= replay_esn->replay_window) |
66 | seq_bottom = replay_esn->seq - replay_esn->replay_window + 1; | |
67 | ||
cb010083 AY |
68 | overlap = sa_entry->esn_state.overlap; |
69 | ||
70 | sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x, | |
71 | htonl(seq_bottom)); | |
cb010083 AY |
72 | |
73 | sa_entry->esn_state.trigger = 1; | |
74 | if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { | |
cb010083 AY |
75 | sa_entry->esn_state.overlap = 0; |
76 | return true; | |
77 | } else if (unlikely(!overlap && | |
78 | (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) { | |
79 | sa_entry->esn_state.overlap = 1; | |
80 | return true; | |
81 | } | |
82 | ||
83 | return false; | |
84 | } | |
85 | ||
1ed78fc0 LR |
86 | static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry, |
87 | struct mlx5_accel_esp_xfrm_attrs *attrs) | |
88 | { | |
89 | struct xfrm_state *x = sa_entry->x; | |
90 | ||
91 | attrs->hard_packet_limit = x->lft.hard_packet_limit; | |
92 | if (x->lft.soft_packet_limit == XFRM_INF) | |
93 | return; | |
94 | ||
95 | /* Hardware decrements hard_packet_limit counter through | |
96 | * the operation. While fires an event when soft_packet_limit | |
97 | * is reached. It emans that we need substitute the numbers | |
98 | * in order to properly count soft limit. | |
99 | * | |
100 | * As an example: | |
101 | * XFRM user sets soft limit is 2 and hard limit is 9 and | |
102 | * expects to see soft event after 2 packets and hard event | |
103 | * after 9 packets. In our case, the hard limit will be set | |
104 | * to 9 and soft limit is comparator to 7 so user gets the | |
105 | * soft event after 2 packeta | |
106 | */ | |
107 | attrs->soft_packet_limit = | |
108 | x->lft.hard_packet_limit - x->lft.soft_packet_limit; | |
109 | } | |
110 | ||
cee137a6 LR |
111 | void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, |
112 | struct mlx5_accel_esp_xfrm_attrs *attrs) | |
547eede0 IT |
113 | { |
114 | struct xfrm_state *x = sa_entry->x; | |
6cd2126a | 115 | struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; |
547eede0 | 116 | struct aead_geniv_ctx *geniv_ctx; |
547eede0 | 117 | struct crypto_aead *aead; |
d6c4f029 | 118 | unsigned int crypto_data_len, key_len; |
547eede0 IT |
119 | int ivsize; |
120 | ||
d6c4f029 | 121 | memset(attrs, 0, sizeof(*attrs)); |
547eede0 | 122 | |
d6c4f029 | 123 | /* key */ |
65802f48 AY |
124 | crypto_data_len = (x->aead->alg_key_len + 7) / 8; |
125 | key_len = crypto_data_len - 4; /* 4 bytes salt at end */ | |
d6c4f029 AY |
126 | |
127 | memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len); | |
128 | aes_gcm->key_len = key_len * 8; | |
129 | ||
130 | /* salt and seq_iv */ | |
65802f48 AY |
131 | aead = x->data; |
132 | geniv_ctx = crypto_aead_ctx(aead); | |
133 | ivsize = crypto_aead_ivsize(aead); | |
d6c4f029 AY |
134 | memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize); |
135 | memcpy(&aes_gcm->salt, x->aead->alg_key + key_len, | |
136 | sizeof(aes_gcm->salt)); | |
137 | ||
6b5c45e1 LR |
138 | attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */ |
139 | ||
d6c4f029 AY |
140 | /* iv len */ |
141 | aes_gcm->icv_len = x->aead->alg_icv_len; | |
142 | ||
cb010083 AY |
143 | /* esn */ |
144 | if (sa_entry->esn_state.trigger) { | |
e3840530 | 145 | attrs->esn_trigger = true; |
cb010083 | 146 | attrs->esn = sa_entry->esn_state.esn; |
e3840530 | 147 | attrs->esn_overlap = sa_entry->esn_state.overlap; |
cded6d80 | 148 | attrs->replay_window = x->replay_esn->replay_window; |
cb010083 AY |
149 | } |
150 | ||
e3840530 | 151 | attrs->dir = x->xso.dir; |
1dbd51d0 | 152 | /* spi */ |
6cd2126a | 153 | attrs->spi = be32_to_cpu(x->id.spi); |
1dbd51d0 RS |
154 | |
155 | /* source , destination ips */ | |
156 | memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); | |
157 | memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); | |
e3840530 | 158 | attrs->family = x->props.family; |
8d15f364 | 159 | attrs->type = x->xso.type; |
67212396 | 160 | attrs->reqid = x->props.reqid; |
1ed78fc0 LR |
161 | |
162 | mlx5e_ipsec_init_limits(sa_entry, attrs); | |
547eede0 IT |
163 | } |
164 | ||
902812b8 LR |
165 | static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, |
166 | struct xfrm_state *x, | |
167 | struct netlink_ext_ack *extack) | |
547eede0 | 168 | { |
547eede0 | 169 | if (x->props.aalgo != SADB_AALG_NONE) { |
902812b8 | 170 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states"); |
547eede0 IT |
171 | return -EINVAL; |
172 | } | |
173 | if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) { | |
902812b8 | 174 | NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded"); |
547eede0 IT |
175 | return -EINVAL; |
176 | } | |
177 | if (x->props.calgo != SADB_X_CALG_NONE) { | |
902812b8 | 178 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states"); |
547eede0 IT |
179 | return -EINVAL; |
180 | } | |
cb010083 | 181 | if (x->props.flags & XFRM_STATE_ESN && |
902812b8 LR |
182 | !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) { |
183 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states"); | |
547eede0 IT |
184 | return -EINVAL; |
185 | } | |
186 | if (x->props.family != AF_INET && | |
187 | x->props.family != AF_INET6) { | |
902812b8 | 188 | NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded"); |
547eede0 IT |
189 | return -EINVAL; |
190 | } | |
547eede0 | 191 | if (x->id.proto != IPPROTO_ESP) { |
902812b8 | 192 | NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded"); |
547eede0 IT |
193 | return -EINVAL; |
194 | } | |
195 | if (x->encap) { | |
902812b8 | 196 | NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state may not be offloaded"); |
547eede0 IT |
197 | return -EINVAL; |
198 | } | |
199 | if (!x->aead) { | |
902812b8 | 200 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead"); |
547eede0 IT |
201 | return -EINVAL; |
202 | } | |
203 | if (x->aead->alg_icv_len != 128) { | |
902812b8 | 204 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit"); |
547eede0 IT |
205 | return -EINVAL; |
206 | } | |
207 | if ((x->aead->alg_key_len != 128 + 32) && | |
208 | (x->aead->alg_key_len != 256 + 32)) { | |
902812b8 | 209 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit"); |
547eede0 IT |
210 | return -EINVAL; |
211 | } | |
212 | if (x->tfcpad) { | |
902812b8 | 213 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding"); |
547eede0 IT |
214 | return -EINVAL; |
215 | } | |
216 | if (!x->geniv) { | |
902812b8 | 217 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv"); |
547eede0 IT |
218 | return -EINVAL; |
219 | } | |
220 | if (strcmp(x->geniv, "seqiv")) { | |
902812b8 | 221 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv"); |
547eede0 IT |
222 | return -EINVAL; |
223 | } | |
37d244ad LR |
224 | switch (x->xso.type) { |
225 | case XFRM_DEV_OFFLOAD_CRYPTO: | |
902812b8 LR |
226 | if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) { |
227 | NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported"); | |
37d244ad LR |
228 | return -EINVAL; |
229 | } | |
230 | ||
231 | if (x->props.mode != XFRM_MODE_TRANSPORT && | |
232 | x->props.mode != XFRM_MODE_TUNNEL) { | |
902812b8 | 233 | NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded"); |
37d244ad LR |
234 | return -EINVAL; |
235 | } | |
236 | break; | |
237 | case XFRM_DEV_OFFLOAD_PACKET: | |
902812b8 | 238 | if (!(mlx5_ipsec_device_caps(mdev) & |
37d244ad | 239 | MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { |
902812b8 | 240 | NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported"); |
37d244ad LR |
241 | return -EINVAL; |
242 | } | |
243 | ||
244 | if (x->props.mode != XFRM_MODE_TRANSPORT) { | |
902812b8 | 245 | NL_SET_ERR_MSG_MOD(extack, "Only transport xfrm states may be offloaded in packet mode"); |
37d244ad LR |
246 | return -EINVAL; |
247 | } | |
248 | ||
cded6d80 LR |
249 | if (x->replay_esn && x->replay_esn->replay_window != 32 && |
250 | x->replay_esn->replay_window != 64 && | |
251 | x->replay_esn->replay_window != 128 && | |
252 | x->replay_esn->replay_window != 256) { | |
902812b8 | 253 | NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size"); |
cded6d80 LR |
254 | return -EINVAL; |
255 | } | |
67212396 LR |
256 | |
257 | if (!x->props.reqid) { | |
902812b8 | 258 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid"); |
67212396 LR |
259 | return -EINVAL; |
260 | } | |
1ed78fc0 LR |
261 | |
262 | if (x->lft.hard_byte_limit != XFRM_INF || | |
263 | x->lft.soft_byte_limit != XFRM_INF) { | |
902812b8 | 264 | NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes"); |
1ed78fc0 LR |
265 | return -EINVAL; |
266 | } | |
267 | ||
268 | if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit && | |
269 | x->lft.hard_packet_limit != XFRM_INF) { | |
270 | /* XFRM stack doesn't prevent such configuration :(. */ | |
902812b8 | 271 | NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one"); |
1ed78fc0 LR |
272 | return -EINVAL; |
273 | } | |
37d244ad LR |
274 | break; |
275 | default: | |
902812b8 | 276 | NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type"); |
37d244ad | 277 | return -EINVAL; |
cded6d80 | 278 | } |
547eede0 IT |
279 | return 0; |
280 | } | |
281 | ||
c674df97 LR |
282 | static void _update_xfrm_state(struct work_struct *work) |
283 | { | |
284 | struct mlx5e_ipsec_modify_state_work *modify_work = | |
285 | container_of(work, struct mlx5e_ipsec_modify_state_work, work); | |
286 | struct mlx5e_ipsec_sa_entry *sa_entry = container_of( | |
287 | modify_work, struct mlx5e_ipsec_sa_entry, modify_work); | |
288 | ||
b73e6728 | 289 | mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs); |
c674df97 LR |
290 | } |
291 | ||
7681a4f5 LR |
292 | static int mlx5e_xfrm_add_state(struct xfrm_state *x, |
293 | struct netlink_ext_ack *extack) | |
547eede0 IT |
294 | { |
295 | struct mlx5e_ipsec_sa_entry *sa_entry = NULL; | |
bdfd2d1f | 296 | struct net_device *netdev = x->xso.real_dev; |
403b383a | 297 | struct mlx5e_ipsec *ipsec; |
547eede0 | 298 | struct mlx5e_priv *priv; |
547eede0 IT |
299 | int err; |
300 | ||
301 | priv = netdev_priv(netdev); | |
021a429b LR |
302 | if (!priv->ipsec) |
303 | return -EOPNOTSUPP; | |
547eede0 | 304 | |
403b383a | 305 | ipsec = priv->ipsec; |
902812b8 | 306 | err = mlx5e_xfrm_validate_state(priv->mdev, x, extack); |
547eede0 IT |
307 | if (err) |
308 | return err; | |
309 | ||
310 | sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); | |
902812b8 LR |
311 | if (!sa_entry) |
312 | return -ENOMEM; | |
547eede0 IT |
313 | |
314 | sa_entry->x = x; | |
403b383a | 315 | sa_entry->ipsec = ipsec; |
547eede0 | 316 | |
cb010083 AY |
317 | /* check esn */ |
318 | mlx5e_ipsec_update_esn_state(sa_entry); | |
319 | ||
b73e6728 | 320 | mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs); |
d6c4f029 | 321 | /* create hw context */ |
b73e6728 LR |
322 | err = mlx5_ipsec_create_sa_ctx(sa_entry); |
323 | if (err) | |
d6c4f029 | 324 | goto err_xfrm; |
547eede0 | 325 | |
c7049ca6 | 326 | err = mlx5e_accel_ipsec_fs_add_rule(sa_entry); |
5e466345 HN |
327 | if (err) |
328 | goto err_hw_ctx; | |
329 | ||
403b383a LR |
330 | /* We use *_bh() variant because xfrm_timer_handler(), which runs |
331 | * in softirq context, can reach our state delete logic and we need | |
332 | * xa_erase_bh() there. | |
333 | */ | |
334 | err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry, | |
335 | GFP_KERNEL); | |
336 | if (err) | |
337 | goto err_add_rule; | |
338 | ||
339 | if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) | |
7dfee4b1 RS |
340 | sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ? |
341 | mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; | |
7dfee4b1 | 342 | |
c674df97 | 343 | INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state); |
547eede0 | 344 | x->xso.offload_handle = (unsigned long)sa_entry; |
403b383a | 345 | return 0; |
547eede0 | 346 | |
5e466345 | 347 | err_add_rule: |
c7049ca6 | 348 | mlx5e_accel_ipsec_fs_del_rule(sa_entry); |
7dfee4b1 | 349 | err_hw_ctx: |
b73e6728 | 350 | mlx5_ipsec_free_sa_ctx(sa_entry); |
d6c4f029 | 351 | err_xfrm: |
547eede0 | 352 | kfree(sa_entry); |
902812b8 | 353 | NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy"); |
547eede0 IT |
354 | return err; |
355 | } | |
356 | ||
357 | static void mlx5e_xfrm_del_state(struct xfrm_state *x) | |
358 | { | |
75ef3f55 | 359 | struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); |
403b383a LR |
360 | struct mlx5e_ipsec *ipsec = sa_entry->ipsec; |
361 | struct mlx5e_ipsec_sa_entry *old; | |
547eede0 | 362 | |
403b383a LR |
363 | old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id); |
364 | WARN_ON(old != sa_entry); | |
547eede0 IT |
365 | } |
366 | ||
367 | static void mlx5e_xfrm_free_state(struct xfrm_state *x) | |
368 | { | |
75ef3f55 | 369 | struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); |
547eede0 | 370 | |
b73e6728 | 371 | cancel_work_sync(&sa_entry->modify_work.work); |
c7049ca6 | 372 | mlx5e_accel_ipsec_fs_del_rule(sa_entry); |
b73e6728 | 373 | mlx5_ipsec_free_sa_ctx(sa_entry); |
547eede0 IT |
374 | kfree(sa_entry); |
375 | } | |
376 | ||
953d7715 | 377 | void mlx5e_ipsec_init(struct mlx5e_priv *priv) |
547eede0 | 378 | { |
021a429b | 379 | struct mlx5e_ipsec *ipsec; |
953d7715 | 380 | int ret = -ENOMEM; |
547eede0 | 381 | |
2451da08 | 382 | if (!mlx5_ipsec_device_caps(priv->mdev)) { |
547eede0 | 383 | netdev_dbg(priv->netdev, "Not an IPSec offload device\n"); |
953d7715 | 384 | return; |
547eede0 IT |
385 | } |
386 | ||
387 | ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); | |
388 | if (!ipsec) | |
953d7715 | 389 | return; |
547eede0 | 390 | |
403b383a | 391 | xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC); |
9af1968e | 392 | ipsec->mdev = priv->mdev; |
cb010083 AY |
393 | ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, |
394 | priv->netdev->name); | |
953d7715 | 395 | if (!ipsec->wq) |
021a429b | 396 | goto err_wq; |
5e466345 | 397 | |
8518d05b LR |
398 | if (mlx5_ipsec_device_caps(priv->mdev) & |
399 | MLX5_IPSEC_CAP_PACKET_OFFLOAD) { | |
400 | ret = mlx5e_ipsec_aso_init(ipsec); | |
401 | if (ret) | |
402 | goto err_aso; | |
403 | } | |
404 | ||
021a429b LR |
405 | ret = mlx5e_accel_ipsec_fs_init(ipsec); |
406 | if (ret) | |
407 | goto err_fs_init; | |
408 | ||
c7049ca6 | 409 | ipsec->fs = priv->fs; |
5589b8f1 | 410 | priv->ipsec = ipsec; |
547eede0 | 411 | netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); |
953d7715 | 412 | return; |
021a429b LR |
413 | |
414 | err_fs_init: | |
8518d05b LR |
415 | if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) |
416 | mlx5e_ipsec_aso_cleanup(ipsec); | |
417 | err_aso: | |
021a429b LR |
418 | destroy_workqueue(ipsec->wq); |
419 | err_wq: | |
420 | kfree(ipsec); | |
953d7715 LR |
421 | mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret); |
422 | return; | |
547eede0 IT |
423 | } |
424 | ||
425 | void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) | |
426 | { | |
427 | struct mlx5e_ipsec *ipsec = priv->ipsec; | |
428 | ||
429 | if (!ipsec) | |
430 | return; | |
431 | ||
301e0be8 | 432 | mlx5e_accel_ipsec_fs_cleanup(ipsec); |
8518d05b LR |
433 | if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) |
434 | mlx5e_ipsec_aso_cleanup(ipsec); | |
cb010083 | 435 | destroy_workqueue(ipsec->wq); |
547eede0 IT |
436 | kfree(ipsec); |
437 | priv->ipsec = NULL; | |
438 | } | |
439 | ||
2ac9cfe7 IT |
440 | static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) |
441 | { | |
442 | if (x->props.family == AF_INET) { | |
443 | /* Offload with IPv4 options is not supported yet */ | |
444 | if (ip_hdr(skb)->ihl > 5) | |
445 | return false; | |
446 | } else { | |
447 | /* Offload with IPv6 extension headers is not support yet */ | |
448 | if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) | |
449 | return false; | |
450 | } | |
451 | ||
452 | return true; | |
453 | } | |
454 | ||
cb010083 AY |
455 | static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) |
456 | { | |
457 | struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); | |
c674df97 LR |
458 | struct mlx5e_ipsec_modify_state_work *modify_work = |
459 | &sa_entry->modify_work; | |
cb010083 AY |
460 | bool need_update; |
461 | ||
cb010083 AY |
462 | need_update = mlx5e_ipsec_update_esn_state(sa_entry); |
463 | if (!need_update) | |
464 | return; | |
465 | ||
cb010083 | 466 | mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs); |
c674df97 | 467 | queue_work(sa_entry->ipsec->wq, &modify_work->work); |
cb010083 AY |
468 | } |
469 | ||
1ed78fc0 LR |
470 | static void mlx5e_xfrm_update_curlft(struct xfrm_state *x) |
471 | { | |
472 | struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); | |
473 | int err; | |
474 | ||
475 | lockdep_assert_held(&x->lock); | |
476 | ||
477 | if (sa_entry->attrs.soft_packet_limit == XFRM_INF) | |
478 | /* Limits are not configured, as soft limit | |
479 | * must be lowever than hard limit. | |
480 | */ | |
481 | return; | |
482 | ||
8c582ddf | 483 | err = mlx5e_ipsec_aso_query(sa_entry, NULL); |
1ed78fc0 LR |
484 | if (err) |
485 | return; | |
486 | ||
487 | mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets); | |
488 | } | |
489 | ||
1bb70c5a LR |
490 | static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x, |
491 | struct netlink_ext_ack *extack) | |
a5b8ca94 | 492 | { |
a5b8ca94 | 493 | if (x->type != XFRM_POLICY_TYPE_MAIN) { |
1bb70c5a | 494 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types"); |
a5b8ca94 LR |
495 | return -EINVAL; |
496 | } | |
497 | ||
498 | /* Please pay attention that we support only one template */ | |
499 | if (x->xfrm_nr > 1) { | |
1bb70c5a | 500 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template"); |
a5b8ca94 LR |
501 | return -EINVAL; |
502 | } | |
503 | ||
504 | if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN && | |
505 | x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) { | |
1bb70c5a | 506 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy"); |
a5b8ca94 LR |
507 | return -EINVAL; |
508 | } | |
509 | ||
510 | if (!x->xfrm_vec[0].reqid) { | |
1bb70c5a | 511 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload policy without reqid"); |
a5b8ca94 LR |
512 | return -EINVAL; |
513 | } | |
514 | ||
515 | if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) { | |
1bb70c5a | 516 | NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type"); |
a5b8ca94 LR |
517 | return -EINVAL; |
518 | } | |
519 | ||
520 | return 0; | |
521 | } | |
522 | ||
523 | static void | |
524 | mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, | |
525 | struct mlx5_accel_pol_xfrm_attrs *attrs) | |
526 | { | |
527 | struct xfrm_policy *x = pol_entry->x; | |
528 | struct xfrm_selector *sel; | |
529 | ||
530 | sel = &x->selector; | |
531 | memset(attrs, 0, sizeof(*attrs)); | |
532 | ||
533 | memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr)); | |
534 | memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr)); | |
535 | attrs->family = sel->family; | |
536 | attrs->dir = x->xdo.dir; | |
537 | attrs->action = x->action; | |
538 | attrs->type = XFRM_DEV_OFFLOAD_PACKET; | |
67212396 | 539 | attrs->reqid = x->xfrm_vec[0].reqid; |
a5b8ca94 LR |
540 | } |
541 | ||
3089386d LR |
542 | static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, |
543 | struct netlink_ext_ack *extack) | |
a5b8ca94 LR |
544 | { |
545 | struct net_device *netdev = x->xdo.real_dev; | |
546 | struct mlx5e_ipsec_pol_entry *pol_entry; | |
547 | struct mlx5e_priv *priv; | |
548 | int err; | |
549 | ||
550 | priv = netdev_priv(netdev); | |
1bb70c5a LR |
551 | if (!priv->ipsec) { |
552 | NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload"); | |
a5b8ca94 | 553 | return -EOPNOTSUPP; |
1bb70c5a | 554 | } |
a5b8ca94 | 555 | |
1bb70c5a | 556 | err = mlx5e_xfrm_validate_policy(x, extack); |
a5b8ca94 LR |
557 | if (err) |
558 | return err; | |
559 | ||
560 | pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL); | |
561 | if (!pol_entry) | |
562 | return -ENOMEM; | |
563 | ||
564 | pol_entry->x = x; | |
565 | pol_entry->ipsec = priv->ipsec; | |
566 | ||
567 | mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs); | |
568 | err = mlx5e_accel_ipsec_fs_add_pol(pol_entry); | |
569 | if (err) | |
570 | goto err_fs; | |
571 | ||
572 | x->xdo.offload_handle = (unsigned long)pol_entry; | |
573 | return 0; | |
574 | ||
575 | err_fs: | |
576 | kfree(pol_entry); | |
1bb70c5a | 577 | NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy"); |
a5b8ca94 LR |
578 | return err; |
579 | } | |
580 | ||
581 | static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) | |
582 | { | |
583 | struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); | |
584 | ||
585 | mlx5e_accel_ipsec_fs_del_pol(pol_entry); | |
586 | kfree(pol_entry); | |
587 | } | |
588 | ||
547eede0 IT |
589 | static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { |
590 | .xdo_dev_state_add = mlx5e_xfrm_add_state, | |
591 | .xdo_dev_state_delete = mlx5e_xfrm_del_state, | |
592 | .xdo_dev_state_free = mlx5e_xfrm_free_state, | |
2ac9cfe7 | 593 | .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, |
cb010083 | 594 | .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, |
547eede0 IT |
595 | }; |
596 | ||
a5b8ca94 LR |
597 | static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = { |
598 | .xdo_dev_state_add = mlx5e_xfrm_add_state, | |
599 | .xdo_dev_state_delete = mlx5e_xfrm_del_state, | |
600 | .xdo_dev_state_free = mlx5e_xfrm_free_state, | |
601 | .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, | |
602 | .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, | |
603 | ||
1ed78fc0 | 604 | .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, |
a5b8ca94 LR |
605 | .xdo_dev_policy_add = mlx5e_xfrm_add_policy, |
606 | .xdo_dev_policy_free = mlx5e_xfrm_free_policy, | |
607 | }; | |
608 | ||
547eede0 IT |
609 | void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) |
610 | { | |
611 | struct mlx5_core_dev *mdev = priv->mdev; | |
612 | struct net_device *netdev = priv->netdev; | |
613 | ||
a8444b0b LR |
614 | if (!mlx5_ipsec_device_caps(mdev)) |
615 | return; | |
616 | ||
547eede0 | 617 | mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n"); |
a5b8ca94 LR |
618 | |
619 | if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) | |
620 | netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops; | |
621 | else | |
622 | netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; | |
623 | ||
547eede0 IT |
624 | netdev->features |= NETIF_F_HW_ESP; |
625 | netdev->hw_enc_features |= NETIF_F_HW_ESP; | |
626 | ||
627 | if (!MLX5_CAP_ETH(mdev, swp_csum)) { | |
628 | mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n"); | |
629 | return; | |
630 | } | |
631 | ||
632 | netdev->features |= NETIF_F_HW_ESP_TX_CSUM; | |
633 | netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; | |
2ac9cfe7 | 634 | |
effbe267 | 635 | if (!MLX5_CAP_ETH(mdev, swp_lso)) { |
2ac9cfe7 IT |
636 | mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); |
637 | return; | |
638 | } | |
639 | ||
5a985aa3 | 640 | netdev->gso_partial_features |= NETIF_F_GSO_ESP; |
2ac9cfe7 IT |
641 | mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n"); |
642 | netdev->features |= NETIF_F_GSO_ESP; | |
643 | netdev->hw_features |= NETIF_F_GSO_ESP; | |
644 | netdev->hw_enc_features |= NETIF_F_GSO_ESP; | |
547eede0 | 645 | } |