Commit | Line | Data |
---|---|---|
547eede0 IT |
1 | /* |
2 | * Copyright (c) 2017 Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | ||
34 | #include <crypto/internal/geniv.h> | |
35 | #include <crypto/aead.h> | |
36 | #include <linux/inetdevice.h> | |
37 | #include <linux/netdevice.h> | |
4c24272b | 38 | #include <net/netevent.h> |
547eede0 IT |
39 | |
40 | #include "en.h" | |
8efd7b17 | 41 | #include "eswitch.h" |
c6e3b421 LR |
42 | #include "ipsec.h" |
43 | #include "ipsec_rxtx.h" | |
f5c5abc4 | 44 | #include "en_rep.h" |
547eede0 | 45 | |
b2f7b01d | 46 | #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000) |
4c24272b | 47 | #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1 |
b2f7b01d | 48 | |
75ef3f55 AY |
49 | static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) |
50 | { | |
021a429b | 51 | return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; |
75ef3f55 AY |
52 | } |
53 | ||
a5b8ca94 LR |
54 | static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x) |
55 | { | |
56 | return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle; | |
57 | } | |
58 | ||
627aa139 | 59 | static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work) |
b2f7b01d LR |
60 | { |
61 | struct mlx5e_ipsec_dwork *dwork = | |
62 | container_of(_work, struct mlx5e_ipsec_dwork, dwork.work); | |
63 | struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry; | |
64 | struct xfrm_state *x = sa_entry->x; | |
65 | ||
c75b9425 LR |
66 | if (sa_entry->attrs.drop) |
67 | return; | |
68 | ||
69 | spin_lock_bh(&x->lock); | |
b2f7b01d LR |
70 | xfrm_state_check_expire(x); |
71 | if (x->km.state == XFRM_STATE_EXPIRED) { | |
72 | sa_entry->attrs.drop = true; | |
c75b9425 | 73 | spin_unlock_bh(&x->lock); |
b2f7b01d | 74 | |
c75b9425 | 75 | mlx5e_accel_ipsec_fs_modify(sa_entry); |
b2f7b01d | 76 | return; |
c75b9425 LR |
77 | } |
78 | spin_unlock_bh(&x->lock); | |
b2f7b01d LR |
79 | |
80 | queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork, | |
81 | MLX5_IPSEC_RESCHED); | |
82 | } | |
83 | ||
cb010083 AY |
84 | static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) |
85 | { | |
7db21ef4 | 86 | struct xfrm_state *x = sa_entry->x; |
2d64663c | 87 | u32 seq_bottom = 0; |
7db21ef4 | 88 | u32 esn, esn_msb; |
cb010083 | 89 | u8 overlap; |
cb010083 | 90 | |
7db21ef4 LR |
91 | switch (x->xso.type) { |
92 | case XFRM_DEV_OFFLOAD_PACKET: | |
93 | switch (x->xso.dir) { | |
94 | case XFRM_DEV_OFFLOAD_IN: | |
95 | esn = x->replay_esn->seq; | |
96 | esn_msb = x->replay_esn->seq_hi; | |
97 | break; | |
98 | case XFRM_DEV_OFFLOAD_OUT: | |
99 | esn = x->replay_esn->oseq; | |
100 | esn_msb = x->replay_esn->oseq_hi; | |
101 | break; | |
102 | default: | |
103 | WARN_ON(true); | |
104 | return false; | |
105 | } | |
106 | break; | |
107 | case XFRM_DEV_OFFLOAD_CRYPTO: | |
108 | /* Already parsed by XFRM core */ | |
109 | esn = x->replay_esn->seq; | |
110 | break; | |
111 | default: | |
112 | WARN_ON(true); | |
113 | return false; | |
114 | } | |
2d64663c | 115 | |
cb010083 AY |
116 | overlap = sa_entry->esn_state.overlap; |
117 | ||
7db21ef4 LR |
118 | if (esn >= x->replay_esn->replay_window) |
119 | seq_bottom = esn - x->replay_esn->replay_window + 1; | |
120 | ||
121 | if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO) | |
122 | esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom)); | |
123 | ||
3d42c8cc LR |
124 | if (sa_entry->esn_state.esn_msb) |
125 | sa_entry->esn_state.esn = esn; | |
126 | else | |
127 | /* According to RFC4303, section "3.3.3. Sequence Number Generation", | |
128 | * the first packet sent using a given SA will contain a sequence | |
129 | * number of 1. | |
130 | */ | |
131 | sa_entry->esn_state.esn = max_t(u32, esn, 1); | |
7db21ef4 | 132 | sa_entry->esn_state.esn_msb = esn_msb; |
cb010083 | 133 | |
cb010083 | 134 | if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { |
cb010083 AY |
135 | sa_entry->esn_state.overlap = 0; |
136 | return true; | |
137 | } else if (unlikely(!overlap && | |
138 | (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) { | |
139 | sa_entry->esn_state.overlap = 1; | |
140 | return true; | |
141 | } | |
142 | ||
143 | return false; | |
144 | } | |
145 | ||
1ed78fc0 LR |
146 | static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry, |
147 | struct mlx5_accel_esp_xfrm_attrs *attrs) | |
148 | { | |
149 | struct xfrm_state *x = sa_entry->x; | |
d05971a4 | 150 | s64 start_value, n; |
1ed78fc0 | 151 | |
d05971a4 LR |
152 | attrs->lft.hard_packet_limit = x->lft.hard_packet_limit; |
153 | attrs->lft.soft_packet_limit = x->lft.soft_packet_limit; | |
1ed78fc0 LR |
154 | if (x->lft.soft_packet_limit == XFRM_INF) |
155 | return; | |
156 | ||
d05971a4 | 157 | /* Compute hard limit initial value and number of rounds. |
1ed78fc0 | 158 | * |
d05971a4 LR |
159 | * The counting pattern of hardware counter goes: |
160 | * value -> 2^31-1 | |
161 | * 2^31 | (2^31-1) -> 2^31-1 | |
162 | * 2^31 | (2^31-1) -> 2^31-1 | |
163 | * [..] | |
164 | * 2^31 | (2^31-1) -> 0 | |
165 | * | |
166 | * The pattern is created by using an ASO operation to atomically set | |
167 | * bit 31 after the down counter clears bit 31. This is effectively an | |
168 | * atomic addition of 2**31 to the counter. | |
169 | * | |
170 | * We wish to configure the counter, within the above pattern, so that | |
171 | * when it reaches 0, it has hit the hard limit. This is defined by this | |
172 | * system of equations: | |
173 | * | |
174 | * hard_limit == start_value + n * 2^31 | |
175 | * n >= 0 | |
176 | * start_value < 2^32, start_value >= 0 | |
177 | * | |
178 | * These equations are not single-solution, there are often two choices: | |
179 | * hard_limit == start_value + n * 2^31 | |
180 | * hard_limit == (start_value+2^31) + (n-1) * 2^31 | |
181 | * | |
182 | * The algorithm selects the solution that keeps the counter value | |
183 | * above 2^31 until the final iteration. | |
184 | */ | |
185 | ||
186 | /* Start by estimating n and compute start_value */ | |
187 | n = attrs->lft.hard_packet_limit / BIT_ULL(31); | |
188 | start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31); | |
189 | ||
190 | /* Choose the best of the two solutions: */ | |
191 | if (n >= 1) | |
192 | n -= 1; | |
193 | ||
194 | /* Computed values solve the system of equations: */ | |
195 | start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31); | |
196 | ||
197 | /* The best solution means: when there are multiple iterations we must | |
198 | * start above 2^31 and count down to 2**31 to get the interrupt. | |
199 | */ | |
200 | attrs->lft.hard_packet_limit = lower_32_bits(start_value); | |
201 | attrs->lft.numb_rounds_hard = (u64)n; | |
202 | ||
203 | /* Compute soft limit initial value and number of rounds. | |
204 | * | |
205 | * The soft_limit is achieved by adjusting the counter's | |
206 | * interrupt_value. This is embedded in the counting pattern created by | |
207 | * hard packet calculations above. | |
208 | * | |
209 | * We wish to compute the interrupt_value for the soft_limit. This is | |
210 | * defined by this system of equations: | |
211 | * | |
212 | * soft_limit == start_value - soft_value + n * 2^31 | |
213 | * n >= 0 | |
214 | * soft_value < 2^32, soft_value >= 0 | |
215 | * for n == 0 start_value > soft_value | |
216 | * | |
217 | * As with compute_hard_n_value() the equations are not single-solution. | |
218 | * The algorithm selects the solution that has: | |
219 | * 2^30 <= soft_limit < 2^31 + 2^30 | |
220 | * for the interior iterations, which guarantees a large guard band | |
221 | * around the counter hard limit and next interrupt. | |
222 | */ | |
223 | ||
224 | /* Start by estimating n and compute soft_value */ | |
225 | n = (x->lft.soft_packet_limit - attrs->lft.hard_packet_limit) / BIT_ULL(31); | |
226 | start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - | |
227 | x->lft.soft_packet_limit; | |
228 | ||
229 | /* Compare against constraints and adjust n */ | |
230 | if (n < 0) | |
231 | n = 0; | |
232 | else if (start_value >= BIT_ULL(32)) | |
233 | n -= 1; | |
234 | else if (start_value < 0) | |
235 | n += 1; | |
236 | ||
237 | /* Choose the best of the two solutions: */ | |
238 | start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value; | |
239 | if (n != attrs->lft.numb_rounds_hard && start_value < BIT_ULL(30)) | |
240 | n += 1; | |
241 | ||
242 | /* Note that the upper limit of soft_value happens naturally because we | |
243 | * always select the lowest soft_value. | |
244 | */ | |
245 | ||
246 | /* Computed values solve the system of equations: */ | |
247 | start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value; | |
248 | ||
249 | /* The best solution means: when there are multiple iterations we must | |
250 | * not fall below 2^30 as that would get too close to the false | |
251 | * hard_limit and when we reach an interior iteration for soft_limit it | |
252 | * has to be far away from 2**32-1 which is the counter reset point | |
253 | * after the +2^31 to accommodate latency. | |
1ed78fc0 | 254 | */ |
d05971a4 LR |
255 | attrs->lft.soft_packet_limit = lower_32_bits(start_value); |
256 | attrs->lft.numb_rounds_soft = (u64)n; | |
1ed78fc0 LR |
257 | } |
258 | ||
37a417ca LR |
259 | static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, |
260 | struct mlx5_accel_esp_xfrm_attrs *attrs) | |
261 | { | |
262 | struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); | |
263 | struct xfrm_state *x = sa_entry->x; | |
264 | struct net_device *netdev; | |
265 | struct neighbour *n; | |
266 | u8 addr[ETH_ALEN]; | |
45fd01f2 LR |
267 | const void *pkey; |
268 | u8 *dst, *src; | |
37a417ca | 269 | |
4c24272b | 270 | if (attrs->mode != XFRM_MODE_TUNNEL || |
37a417ca LR |
271 | attrs->type != XFRM_DEV_OFFLOAD_PACKET) |
272 | return; | |
273 | ||
274 | netdev = x->xso.real_dev; | |
275 | ||
276 | mlx5_query_mac_address(mdev, addr); | |
277 | switch (attrs->dir) { | |
278 | case XFRM_DEV_OFFLOAD_IN: | |
45fd01f2 LR |
279 | src = attrs->dmac; |
280 | dst = attrs->smac; | |
281 | pkey = &attrs->saddr.a4; | |
37a417ca | 282 | break; |
efbd31c4 | 283 | case XFRM_DEV_OFFLOAD_OUT: |
45fd01f2 LR |
284 | src = attrs->smac; |
285 | dst = attrs->dmac; | |
286 | pkey = &attrs->daddr.a4; | |
efbd31c4 | 287 | break; |
37a417ca LR |
288 | default: |
289 | return; | |
290 | } | |
45fd01f2 LR |
291 | |
292 | ether_addr_copy(src, addr); | |
293 | n = neigh_lookup(&arp_tbl, pkey, netdev); | |
294 | if (!n) { | |
295 | n = neigh_create(&arp_tbl, pkey, netdev); | |
296 | if (IS_ERR(n)) | |
297 | return; | |
298 | neigh_event_send(n, NULL); | |
299 | attrs->drop = true; | |
300 | } else { | |
301 | neigh_ha_snapshot(addr, n, netdev); | |
302 | ether_addr_copy(dst, addr); | |
303 | } | |
37a417ca LR |
304 | neigh_release(n); |
305 | } | |
306 | ||
cee137a6 LR |
307 | void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, |
308 | struct mlx5_accel_esp_xfrm_attrs *attrs) | |
547eede0 IT |
309 | { |
310 | struct xfrm_state *x = sa_entry->x; | |
6cd2126a | 311 | struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; |
547eede0 | 312 | struct aead_geniv_ctx *geniv_ctx; |
547eede0 | 313 | struct crypto_aead *aead; |
d6c4f029 | 314 | unsigned int crypto_data_len, key_len; |
547eede0 IT |
315 | int ivsize; |
316 | ||
d6c4f029 | 317 | memset(attrs, 0, sizeof(*attrs)); |
547eede0 | 318 | |
d6c4f029 | 319 | /* key */ |
65802f48 AY |
320 | crypto_data_len = (x->aead->alg_key_len + 7) / 8; |
321 | key_len = crypto_data_len - 4; /* 4 bytes salt at end */ | |
d6c4f029 AY |
322 | |
323 | memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len); | |
324 | aes_gcm->key_len = key_len * 8; | |
325 | ||
326 | /* salt and seq_iv */ | |
65802f48 AY |
327 | aead = x->data; |
328 | geniv_ctx = crypto_aead_ctx(aead); | |
329 | ivsize = crypto_aead_ivsize(aead); | |
d6c4f029 AY |
330 | memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize); |
331 | memcpy(&aes_gcm->salt, x->aead->alg_key + key_len, | |
332 | sizeof(aes_gcm->salt)); | |
333 | ||
6b5c45e1 LR |
334 | attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */ |
335 | ||
d6c4f029 AY |
336 | /* iv len */ |
337 | aes_gcm->icv_len = x->aead->alg_icv_len; | |
338 | ||
315a597f LR |
339 | attrs->dir = x->xso.dir; |
340 | ||
cb010083 | 341 | /* esn */ |
f4979e26 | 342 | if (x->props.flags & XFRM_STATE_ESN) { |
7db21ef4 LR |
343 | attrs->replay_esn.trigger = true; |
344 | attrs->replay_esn.esn = sa_entry->esn_state.esn; | |
345 | attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; | |
346 | attrs->replay_esn.overlap = sa_entry->esn_state.overlap; | |
315a597f LR |
347 | if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) |
348 | goto skip_replay_window; | |
349 | ||
a5e400a9 LR |
350 | switch (x->replay_esn->replay_window) { |
351 | case 32: | |
352 | attrs->replay_esn.replay_window = | |
353 | MLX5_IPSEC_ASO_REPLAY_WIN_32BIT; | |
354 | break; | |
355 | case 64: | |
356 | attrs->replay_esn.replay_window = | |
357 | MLX5_IPSEC_ASO_REPLAY_WIN_64BIT; | |
358 | break; | |
359 | case 128: | |
360 | attrs->replay_esn.replay_window = | |
361 | MLX5_IPSEC_ASO_REPLAY_WIN_128BIT; | |
362 | break; | |
363 | case 256: | |
364 | attrs->replay_esn.replay_window = | |
365 | MLX5_IPSEC_ASO_REPLAY_WIN_256BIT; | |
366 | break; | |
367 | default: | |
368 | WARN_ON(true); | |
369 | return; | |
370 | } | |
cb010083 AY |
371 | } |
372 | ||
315a597f | 373 | skip_replay_window: |
1dbd51d0 | 374 | /* spi */ |
6cd2126a | 375 | attrs->spi = be32_to_cpu(x->id.spi); |
1dbd51d0 RS |
376 | |
377 | /* source , destination ips */ | |
378 | memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); | |
379 | memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); | |
e3840530 | 380 | attrs->family = x->props.family; |
8d15f364 | 381 | attrs->type = x->xso.type; |
67212396 | 382 | attrs->reqid = x->props.reqid; |
a7385187 RS |
383 | attrs->upspec.dport = ntohs(x->sel.dport); |
384 | attrs->upspec.dport_mask = ntohs(x->sel.dport_mask); | |
385 | attrs->upspec.sport = ntohs(x->sel.sport); | |
386 | attrs->upspec.sport_mask = ntohs(x->sel.sport_mask); | |
387 | attrs->upspec.proto = x->sel.proto; | |
6480a3b6 | 388 | attrs->mode = x->props.mode; |
1ed78fc0 LR |
389 | |
390 | mlx5e_ipsec_init_limits(sa_entry, attrs); | |
37a417ca | 391 | mlx5e_ipsec_init_macs(sa_entry, attrs); |
d6595493 LR |
392 | |
393 | if (x->encap) { | |
394 | attrs->encap = true; | |
395 | attrs->sport = x->encap->encap_sport; | |
396 | attrs->dport = x->encap->encap_dport; | |
397 | } | |
547eede0 IT |
398 | } |
399 | ||
902812b8 LR |
400 | static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, |
401 | struct xfrm_state *x, | |
402 | struct netlink_ext_ack *extack) | |
547eede0 | 403 | { |
547eede0 | 404 | if (x->props.aalgo != SADB_AALG_NONE) { |
902812b8 | 405 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states"); |
547eede0 IT |
406 | return -EINVAL; |
407 | } | |
408 | if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) { | |
902812b8 | 409 | NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded"); |
547eede0 IT |
410 | return -EINVAL; |
411 | } | |
412 | if (x->props.calgo != SADB_X_CALG_NONE) { | |
902812b8 | 413 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states"); |
547eede0 IT |
414 | return -EINVAL; |
415 | } | |
cb010083 | 416 | if (x->props.flags & XFRM_STATE_ESN && |
902812b8 LR |
417 | !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) { |
418 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states"); | |
547eede0 IT |
419 | return -EINVAL; |
420 | } | |
421 | if (x->props.family != AF_INET && | |
422 | x->props.family != AF_INET6) { | |
902812b8 | 423 | NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded"); |
547eede0 IT |
424 | return -EINVAL; |
425 | } | |
547eede0 | 426 | if (x->id.proto != IPPROTO_ESP) { |
902812b8 | 427 | NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded"); |
547eede0 IT |
428 | return -EINVAL; |
429 | } | |
430 | if (x->encap) { | |
d6595493 LR |
431 | if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) { |
432 | NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported"); | |
433 | return -EINVAL; | |
434 | } | |
435 | ||
436 | if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) { | |
437 | NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported"); | |
438 | return -EINVAL; | |
439 | } | |
440 | ||
441 | if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) { | |
442 | NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only"); | |
443 | return -EINVAL; | |
444 | } | |
445 | ||
446 | if (x->props.mode != XFRM_MODE_TRANSPORT) { | |
447 | NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only"); | |
448 | return -EINVAL; | |
449 | } | |
547eede0 IT |
450 | } |
451 | if (!x->aead) { | |
902812b8 | 452 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead"); |
547eede0 IT |
453 | return -EINVAL; |
454 | } | |
455 | if (x->aead->alg_icv_len != 128) { | |
902812b8 | 456 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit"); |
547eede0 IT |
457 | return -EINVAL; |
458 | } | |
459 | if ((x->aead->alg_key_len != 128 + 32) && | |
460 | (x->aead->alg_key_len != 256 + 32)) { | |
902812b8 | 461 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit"); |
547eede0 IT |
462 | return -EINVAL; |
463 | } | |
464 | if (x->tfcpad) { | |
902812b8 | 465 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding"); |
547eede0 IT |
466 | return -EINVAL; |
467 | } | |
468 | if (!x->geniv) { | |
902812b8 | 469 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv"); |
547eede0 IT |
470 | return -EINVAL; |
471 | } | |
472 | if (strcmp(x->geniv, "seqiv")) { | |
902812b8 | 473 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv"); |
547eede0 IT |
474 | return -EINVAL; |
475 | } | |
a7385187 | 476 | |
b8c697e1 LR |
477 | if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP && |
478 | x->sel.proto != IPPROTO_TCP) { | |
479 | NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP"); | |
a7385187 RS |
480 | return -EINVAL; |
481 | } | |
482 | ||
c941da23 LR |
483 | if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) { |
484 | NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded"); | |
485 | return -EINVAL; | |
486 | } | |
487 | ||
37d244ad LR |
488 | switch (x->xso.type) { |
489 | case XFRM_DEV_OFFLOAD_CRYPTO: | |
902812b8 LR |
490 | if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) { |
491 | NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported"); | |
37d244ad LR |
492 | return -EINVAL; |
493 | } | |
494 | ||
37d244ad LR |
495 | break; |
496 | case XFRM_DEV_OFFLOAD_PACKET: | |
902812b8 | 497 | if (!(mlx5_ipsec_device_caps(mdev) & |
37d244ad | 498 | MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { |
902812b8 | 499 | NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported"); |
37d244ad LR |
500 | return -EINVAL; |
501 | } | |
502 | ||
c941da23 LR |
503 | if (x->props.mode == XFRM_MODE_TUNNEL && |
504 | !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)) { | |
505 | NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported for tunnel mode"); | |
37d244ad LR |
506 | return -EINVAL; |
507 | } | |
508 | ||
315a597f LR |
509 | if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN && |
510 | x->replay_esn->replay_window != 32 && | |
cded6d80 LR |
511 | x->replay_esn->replay_window != 64 && |
512 | x->replay_esn->replay_window != 128 && | |
513 | x->replay_esn->replay_window != 256) { | |
902812b8 | 514 | NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size"); |
cded6d80 LR |
515 | return -EINVAL; |
516 | } | |
67212396 LR |
517 | |
518 | if (!x->props.reqid) { | |
902812b8 | 519 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid"); |
67212396 LR |
520 | return -EINVAL; |
521 | } | |
1ed78fc0 | 522 | |
627aa139 LR |
523 | if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit && |
524 | x->lft.hard_byte_limit != XFRM_INF) { | |
525 | /* XFRM stack doesn't prevent such configuration :(. */ | |
526 | NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one"); | |
527 | return -EINVAL; | |
528 | } | |
529 | ||
530 | if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) { | |
531 | NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0"); | |
1ed78fc0 LR |
532 | return -EINVAL; |
533 | } | |
534 | ||
535 | if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit && | |
536 | x->lft.hard_packet_limit != XFRM_INF) { | |
537 | /* XFRM stack doesn't prevent such configuration :(. */ | |
902812b8 | 538 | NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one"); |
1ed78fc0 LR |
539 | return -EINVAL; |
540 | } | |
2da961d2 LR |
541 | |
542 | if (!x->lft.soft_packet_limit || !x->lft.hard_packet_limit) { | |
543 | NL_SET_ERR_MSG_MOD(extack, "Soft/hard packet limits can't be 0"); | |
544 | return -EINVAL; | |
545 | } | |
37d244ad LR |
546 | break; |
547 | default: | |
902812b8 | 548 | NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type"); |
37d244ad | 549 | return -EINVAL; |
cded6d80 | 550 | } |
547eede0 IT |
551 | return 0; |
552 | } | |
553 | ||
4562116f | 554 | static void mlx5e_ipsec_modify_state(struct work_struct *_work) |
c674df97 | 555 | { |
4562116f LR |
556 | struct mlx5e_ipsec_work *work = |
557 | container_of(_work, struct mlx5e_ipsec_work, work); | |
558 | struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry; | |
559 | struct mlx5_accel_esp_xfrm_attrs *attrs; | |
c674df97 | 560 | |
4562116f LR |
561 | attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs; |
562 | ||
563 | mlx5_accel_esp_modify_xfrm(sa_entry, attrs); | |
c674df97 LR |
564 | } |
565 | ||
f4979e26 LR |
566 | static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry) |
567 | { | |
568 | struct xfrm_state *x = sa_entry->x; | |
569 | ||
570 | if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO || | |
571 | x->xso.dir != XFRM_DEV_OFFLOAD_OUT) | |
572 | return; | |
573 | ||
574 | if (x->props.flags & XFRM_STATE_ESN) { | |
575 | sa_entry->set_iv_op = mlx5e_ipsec_set_iv_esn; | |
576 | return; | |
577 | } | |
578 | ||
579 | sa_entry->set_iv_op = mlx5e_ipsec_set_iv; | |
580 | } | |
581 | ||
4c24272b LR |
582 | static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work) |
583 | { | |
584 | struct mlx5e_ipsec_work *work = | |
585 | container_of(_work, struct mlx5e_ipsec_work, work); | |
586 | struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry; | |
587 | struct mlx5e_ipsec_netevent_data *data = work->data; | |
588 | struct mlx5_accel_esp_xfrm_attrs *attrs; | |
589 | ||
590 | attrs = &sa_entry->attrs; | |
591 | ||
592 | switch (attrs->dir) { | |
593 | case XFRM_DEV_OFFLOAD_IN: | |
594 | ether_addr_copy(attrs->smac, data->addr); | |
595 | break; | |
596 | case XFRM_DEV_OFFLOAD_OUT: | |
597 | ether_addr_copy(attrs->dmac, data->addr); | |
598 | break; | |
599 | default: | |
600 | WARN_ON_ONCE(true); | |
601 | } | |
602 | attrs->drop = false; | |
603 | mlx5e_accel_ipsec_fs_modify(sa_entry); | |
604 | } | |
605 | ||
4562116f LR |
606 | static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry) |
607 | { | |
608 | struct xfrm_state *x = sa_entry->x; | |
609 | struct mlx5e_ipsec_work *work; | |
4c24272b | 610 | void *data = NULL; |
4562116f LR |
611 | |
612 | switch (x->xso.type) { | |
613 | case XFRM_DEV_OFFLOAD_CRYPTO: | |
614 | if (!(x->props.flags & XFRM_STATE_ESN)) | |
615 | return 0; | |
616 | break; | |
4c24272b LR |
617 | case XFRM_DEV_OFFLOAD_PACKET: |
618 | if (x->props.mode != XFRM_MODE_TUNNEL) | |
619 | return 0; | |
620 | break; | |
4562116f | 621 | default: |
4c24272b | 622 | break; |
4562116f LR |
623 | } |
624 | ||
625 | work = kzalloc(sizeof(*work), GFP_KERNEL); | |
626 | if (!work) | |
627 | return -ENOMEM; | |
628 | ||
4c24272b LR |
629 | switch (x->xso.type) { |
630 | case XFRM_DEV_OFFLOAD_CRYPTO: | |
631 | data = kzalloc(sizeof(*sa_entry), GFP_KERNEL); | |
632 | if (!data) | |
633 | goto free_work; | |
634 | ||
635 | INIT_WORK(&work->work, mlx5e_ipsec_modify_state); | |
636 | break; | |
637 | case XFRM_DEV_OFFLOAD_PACKET: | |
638 | data = kzalloc(sizeof(struct mlx5e_ipsec_netevent_data), | |
639 | GFP_KERNEL); | |
640 | if (!data) | |
641 | goto free_work; | |
642 | ||
643 | INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event); | |
644 | break; | |
645 | default: | |
646 | break; | |
4562116f LR |
647 | } |
648 | ||
4c24272b | 649 | work->data = data; |
4562116f LR |
650 | work->sa_entry = sa_entry; |
651 | sa_entry->work = work; | |
652 | return 0; | |
4c24272b LR |
653 | |
654 | free_work: | |
655 | kfree(work); | |
656 | return -ENOMEM; | |
4562116f LR |
657 | } |
658 | ||
b2f7b01d LR |
659 | static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) |
660 | { | |
661 | struct xfrm_state *x = sa_entry->x; | |
662 | struct mlx5e_ipsec_dwork *dwork; | |
663 | ||
664 | if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) | |
665 | return 0; | |
666 | ||
b2f7b01d | 667 | if (x->lft.soft_packet_limit == XFRM_INF && |
627aa139 LR |
668 | x->lft.hard_packet_limit == XFRM_INF && |
669 | x->lft.soft_byte_limit == XFRM_INF && | |
670 | x->lft.hard_byte_limit == XFRM_INF) | |
b2f7b01d LR |
671 | return 0; |
672 | ||
673 | dwork = kzalloc(sizeof(*dwork), GFP_KERNEL); | |
674 | if (!dwork) | |
675 | return -ENOMEM; | |
676 | ||
677 | dwork->sa_entry = sa_entry; | |
627aa139 | 678 | INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits); |
b2f7b01d LR |
679 | sa_entry->dwork = dwork; |
680 | return 0; | |
681 | } | |
682 | ||
7681a4f5 LR |
683 | static int mlx5e_xfrm_add_state(struct xfrm_state *x, |
684 | struct netlink_ext_ack *extack) | |
547eede0 IT |
685 | { |
686 | struct mlx5e_ipsec_sa_entry *sa_entry = NULL; | |
bdfd2d1f | 687 | struct net_device *netdev = x->xso.real_dev; |
403b383a | 688 | struct mlx5e_ipsec *ipsec; |
547eede0 | 689 | struct mlx5e_priv *priv; |
aa8bd0c9 | 690 | gfp_t gfp; |
547eede0 IT |
691 | int err; |
692 | ||
693 | priv = netdev_priv(netdev); | |
021a429b LR |
694 | if (!priv->ipsec) |
695 | return -EOPNOTSUPP; | |
547eede0 | 696 | |
403b383a | 697 | ipsec = priv->ipsec; |
aa8bd0c9 RS |
698 | gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL; |
699 | sa_entry = kzalloc(sizeof(*sa_entry), gfp); | |
902812b8 LR |
700 | if (!sa_entry) |
701 | return -ENOMEM; | |
547eede0 IT |
702 | |
703 | sa_entry->x = x; | |
403b383a | 704 | sa_entry->ipsec = ipsec; |
aa8bd0c9 RS |
705 | /* Check if this SA is originated from acquire flow temporary SA */ |
706 | if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) | |
707 | goto out; | |
708 | ||
709 | err = mlx5e_xfrm_validate_state(priv->mdev, x, extack); | |
710 | if (err) | |
711 | goto err_xfrm; | |
547eede0 | 712 | |
8efd7b17 LR |
713 | if (!mlx5_eswitch_block_ipsec(priv->mdev)) { |
714 | err = -EBUSY; | |
715 | goto err_xfrm; | |
716 | } | |
717 | ||
cb010083 | 718 | /* check esn */ |
f4979e26 LR |
719 | if (x->props.flags & XFRM_STATE_ESN) |
720 | mlx5e_ipsec_update_esn_state(sa_entry); | |
cb010083 | 721 | |
b73e6728 | 722 | mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs); |
4562116f LR |
723 | |
724 | err = mlx5_ipsec_create_work(sa_entry); | |
725 | if (err) | |
8efd7b17 | 726 | goto unblock_ipsec; |
4562116f | 727 | |
b2f7b01d LR |
728 | err = mlx5e_ipsec_create_dwork(sa_entry); |
729 | if (err) | |
730 | goto release_work; | |
731 | ||
d6c4f029 | 732 | /* create hw context */ |
b73e6728 LR |
733 | err = mlx5_ipsec_create_sa_ctx(sa_entry); |
734 | if (err) | |
b2f7b01d | 735 | goto release_dwork; |
547eede0 | 736 | |
c7049ca6 | 737 | err = mlx5e_accel_ipsec_fs_add_rule(sa_entry); |
5e466345 HN |
738 | if (err) |
739 | goto err_hw_ctx; | |
740 | ||
146c196b LR |
741 | if (x->props.mode == XFRM_MODE_TUNNEL && |
742 | x->xso.type == XFRM_DEV_OFFLOAD_PACKET && | |
743 | !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) { | |
744 | NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings"); | |
745 | err = -EINVAL; | |
746 | goto err_add_rule; | |
747 | } | |
748 | ||
403b383a LR |
749 | /* We use *_bh() variant because xfrm_timer_handler(), which runs |
750 | * in softirq context, can reach our state delete logic and we need | |
751 | * xa_erase_bh() there. | |
752 | */ | |
753 | err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry, | |
754 | GFP_KERNEL); | |
755 | if (err) | |
756 | goto err_add_rule; | |
757 | ||
f4979e26 | 758 | mlx5e_ipsec_set_esn_ops(sa_entry); |
b2f7b01d LR |
759 | |
760 | if (sa_entry->dwork) | |
761 | queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork, | |
762 | MLX5_IPSEC_RESCHED); | |
4c24272b LR |
763 | |
764 | if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && | |
765 | x->props.mode == XFRM_MODE_TUNNEL) | |
766 | xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id, | |
767 | MLX5E_IPSEC_TUNNEL_SA); | |
768 | ||
aa8bd0c9 | 769 | out: |
547eede0 | 770 | x->xso.offload_handle = (unsigned long)sa_entry; |
403b383a | 771 | return 0; |
547eede0 | 772 | |
5e466345 | 773 | err_add_rule: |
c7049ca6 | 774 | mlx5e_accel_ipsec_fs_del_rule(sa_entry); |
7dfee4b1 | 775 | err_hw_ctx: |
b73e6728 | 776 | mlx5_ipsec_free_sa_ctx(sa_entry); |
b2f7b01d LR |
777 | release_dwork: |
778 | kfree(sa_entry->dwork); | |
4562116f | 779 | release_work: |
94edec44 LR |
780 | if (sa_entry->work) |
781 | kfree(sa_entry->work->data); | |
4562116f | 782 | kfree(sa_entry->work); |
8efd7b17 LR |
783 | unblock_ipsec: |
784 | mlx5_eswitch_unblock_ipsec(priv->mdev); | |
d6c4f029 | 785 | err_xfrm: |
547eede0 | 786 | kfree(sa_entry); |
697b3518 | 787 | NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state"); |
547eede0 IT |
788 | return err; |
789 | } | |
790 | ||
791 | static void mlx5e_xfrm_del_state(struct xfrm_state *x) | |
792 | { | |
75ef3f55 | 793 | struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); |
4c24272b | 794 | struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; |
403b383a LR |
795 | struct mlx5e_ipsec *ipsec = sa_entry->ipsec; |
796 | struct mlx5e_ipsec_sa_entry *old; | |
547eede0 | 797 | |
aa8bd0c9 RS |
798 | if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) |
799 | return; | |
800 | ||
403b383a LR |
801 | old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id); |
802 | WARN_ON(old != sa_entry); | |
4c24272b LR |
803 | |
804 | if (attrs->mode == XFRM_MODE_TUNNEL && | |
805 | attrs->type == XFRM_DEV_OFFLOAD_PACKET) | |
806 | /* Make sure that no ARP requests are running in parallel */ | |
807 | flush_workqueue(ipsec->wq); | |
808 | ||
547eede0 IT |
809 | } |
810 | ||
811 | static void mlx5e_xfrm_free_state(struct xfrm_state *x) | |
812 | { | |
75ef3f55 | 813 | struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); |
8efd7b17 | 814 | struct mlx5e_ipsec *ipsec = sa_entry->ipsec; |
547eede0 | 815 | |
aa8bd0c9 RS |
816 | if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) |
817 | goto sa_entry_free; | |
818 | ||
4562116f LR |
819 | if (sa_entry->work) |
820 | cancel_work_sync(&sa_entry->work->work); | |
f4979e26 | 821 | |
b2f7b01d LR |
822 | if (sa_entry->dwork) |
823 | cancel_delayed_work_sync(&sa_entry->dwork->dwork); | |
824 | ||
c7049ca6 | 825 | mlx5e_accel_ipsec_fs_del_rule(sa_entry); |
b73e6728 | 826 | mlx5_ipsec_free_sa_ctx(sa_entry); |
b2f7b01d | 827 | kfree(sa_entry->dwork); |
94edec44 LR |
828 | if (sa_entry->work) |
829 | kfree(sa_entry->work->data); | |
4562116f | 830 | kfree(sa_entry->work); |
8efd7b17 | 831 | mlx5_eswitch_unblock_ipsec(ipsec->mdev); |
aa8bd0c9 | 832 | sa_entry_free: |
547eede0 IT |
833 | kfree(sa_entry); |
834 | } | |
835 | ||
4c24272b LR |
836 | static int mlx5e_ipsec_netevent_event(struct notifier_block *nb, |
837 | unsigned long event, void *ptr) | |
838 | { | |
839 | struct mlx5_accel_esp_xfrm_attrs *attrs; | |
840 | struct mlx5e_ipsec_netevent_data *data; | |
841 | struct mlx5e_ipsec_sa_entry *sa_entry; | |
842 | struct mlx5e_ipsec *ipsec; | |
843 | struct neighbour *n = ptr; | |
844 | struct net_device *netdev; | |
845 | struct xfrm_state *x; | |
846 | unsigned long idx; | |
847 | ||
848 | if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID)) | |
849 | return NOTIFY_DONE; | |
850 | ||
851 | ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb); | |
852 | xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) { | |
853 | attrs = &sa_entry->attrs; | |
854 | ||
855 | if (attrs->family == AF_INET) { | |
856 | if (!neigh_key_eq32(n, &attrs->saddr.a4) && | |
857 | !neigh_key_eq32(n, &attrs->daddr.a4)) | |
858 | continue; | |
859 | } else { | |
860 | if (!neigh_key_eq128(n, &attrs->saddr.a4) && | |
861 | !neigh_key_eq128(n, &attrs->daddr.a4)) | |
862 | continue; | |
863 | } | |
864 | ||
865 | x = sa_entry->x; | |
866 | netdev = x->xso.real_dev; | |
867 | data = sa_entry->work->data; | |
868 | ||
869 | neigh_ha_snapshot(data->addr, n, netdev); | |
870 | queue_work(ipsec->wq, &sa_entry->work->work); | |
871 | } | |
872 | ||
873 | return NOTIFY_DONE; | |
874 | } | |
875 | ||
953d7715 | 876 | void mlx5e_ipsec_init(struct mlx5e_priv *priv) |
547eede0 | 877 | { |
021a429b | 878 | struct mlx5e_ipsec *ipsec; |
953d7715 | 879 | int ret = -ENOMEM; |
547eede0 | 880 | |
2451da08 | 881 | if (!mlx5_ipsec_device_caps(priv->mdev)) { |
547eede0 | 882 | netdev_dbg(priv->netdev, "Not an IPSec offload device\n"); |
953d7715 | 883 | return; |
547eede0 IT |
884 | } |
885 | ||
886 | ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); | |
887 | if (!ipsec) | |
953d7715 | 888 | return; |
547eede0 | 889 | |
403b383a | 890 | xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC); |
9af1968e | 891 | ipsec->mdev = priv->mdev; |
82f9378c | 892 | init_completion(&ipsec->comp); |
20fbdab2 LR |
893 | ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0, |
894 | priv->netdev->name); | |
953d7715 | 895 | if (!ipsec->wq) |
021a429b | 896 | goto err_wq; |
5e466345 | 897 | |
8518d05b LR |
898 | if (mlx5_ipsec_device_caps(priv->mdev) & |
899 | MLX5_IPSEC_CAP_PACKET_OFFLOAD) { | |
900 | ret = mlx5e_ipsec_aso_init(ipsec); | |
901 | if (ret) | |
902 | goto err_aso; | |
903 | } | |
904 | ||
4c24272b LR |
905 | if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) { |
906 | ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event; | |
907 | ret = register_netevent_notifier(&ipsec->netevent_nb); | |
908 | if (ret) | |
909 | goto clear_aso; | |
910 | } | |
911 | ||
f5c5abc4 | 912 | ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv); |
eff5b663 | 913 | ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom); |
021a429b LR |
914 | if (ret) |
915 | goto err_fs_init; | |
916 | ||
c7049ca6 | 917 | ipsec->fs = priv->fs; |
5589b8f1 | 918 | priv->ipsec = ipsec; |
547eede0 | 919 | netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); |
953d7715 | 920 | return; |
021a429b LR |
921 | |
922 | err_fs_init: | |
4c24272b LR |
923 | if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) |
924 | unregister_netevent_notifier(&ipsec->netevent_nb); | |
925 | clear_aso: | |
8518d05b LR |
926 | if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) |
927 | mlx5e_ipsec_aso_cleanup(ipsec); | |
928 | err_aso: | |
021a429b LR |
929 | destroy_workqueue(ipsec->wq); |
930 | err_wq: | |
931 | kfree(ipsec); | |
953d7715 LR |
932 | mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret); |
933 | return; | |
547eede0 IT |
934 | } |
935 | ||
936 | void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) | |
937 | { | |
938 | struct mlx5e_ipsec *ipsec = priv->ipsec; | |
939 | ||
940 | if (!ipsec) | |
941 | return; | |
942 | ||
301e0be8 | 943 | mlx5e_accel_ipsec_fs_cleanup(ipsec); |
762a55a5 | 944 | if (ipsec->netevent_nb.notifier_call) { |
4c24272b | 945 | unregister_netevent_notifier(&ipsec->netevent_nb); |
762a55a5 CM |
946 | ipsec->netevent_nb.notifier_call = NULL; |
947 | } | |
948 | if (ipsec->aso) | |
8518d05b | 949 | mlx5e_ipsec_aso_cleanup(ipsec); |
cb010083 | 950 | destroy_workqueue(ipsec->wq); |
547eede0 IT |
951 | kfree(ipsec); |
952 | priv->ipsec = NULL; | |
953 | } | |
954 | ||
2ac9cfe7 IT |
955 | static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) |
956 | { | |
957 | if (x->props.family == AF_INET) { | |
958 | /* Offload with IPv4 options is not supported yet */ | |
959 | if (ip_hdr(skb)->ihl > 5) | |
960 | return false; | |
961 | } else { | |
962 | /* Offload with IPv6 extension headers is not support yet */ | |
963 | if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) | |
964 | return false; | |
965 | } | |
966 | ||
967 | return true; | |
968 | } | |
969 | ||
cb010083 AY |
970 | static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) |
971 | { | |
972 | struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); | |
4562116f LR |
973 | struct mlx5e_ipsec_work *work = sa_entry->work; |
974 | struct mlx5e_ipsec_sa_entry *sa_entry_shadow; | |
cb010083 AY |
975 | bool need_update; |
976 | ||
cb010083 AY |
977 | need_update = mlx5e_ipsec_update_esn_state(sa_entry); |
978 | if (!need_update) | |
979 | return; | |
980 | ||
4562116f LR |
981 | sa_entry_shadow = work->data; |
982 | memset(sa_entry_shadow, 0x00, sizeof(*sa_entry_shadow)); | |
983 | mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry_shadow->attrs); | |
984 | queue_work(sa_entry->ipsec->wq, &work->work); | |
cb010083 AY |
985 | } |
986 | ||
1ed78fc0 LR |
987 | static void mlx5e_xfrm_update_curlft(struct xfrm_state *x) |
988 | { | |
989 | struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); | |
5a6cddb8 RS |
990 | struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; |
991 | u64 packets, bytes, lastuse; | |
1ed78fc0 | 992 | |
5a6cddb8 RS |
993 | lockdep_assert(lockdep_is_held(&x->lock) || |
994 | lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex)); | |
1ed78fc0 | 995 | |
aa8bd0c9 RS |
996 | if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) |
997 | return; | |
998 | ||
5a6cddb8 RS |
999 | mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse); |
1000 | x->curlft.packets += packets; | |
1001 | x->curlft.bytes += bytes; | |
1ed78fc0 LR |
1002 | } |
1003 | ||
fa5aa2f8 PB |
1004 | static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, |
1005 | struct xfrm_policy *x, | |
1bb70c5a | 1006 | struct netlink_ext_ack *extack) |
a5b8ca94 | 1007 | { |
b3beba1f RS |
1008 | struct xfrm_selector *sel = &x->selector; |
1009 | ||
a5b8ca94 | 1010 | if (x->type != XFRM_POLICY_TYPE_MAIN) { |
1bb70c5a | 1011 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types"); |
a5b8ca94 LR |
1012 | return -EINVAL; |
1013 | } | |
1014 | ||
1015 | /* Please pay attention that we support only one template */ | |
1016 | if (x->xfrm_nr > 1) { | |
1bb70c5a | 1017 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template"); |
a5b8ca94 LR |
1018 | return -EINVAL; |
1019 | } | |
1020 | ||
1021 | if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN && | |
1022 | x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) { | |
1bb70c5a | 1023 | NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy"); |
a5b8ca94 LR |
1024 | return -EINVAL; |
1025 | } | |
1026 | ||
b3beba1f RS |
1027 | if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP && |
1028 | addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) { | |
1029 | NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0"); | |
a5b8ca94 LR |
1030 | return -EINVAL; |
1031 | } | |
1032 | ||
1033 | if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) { | |
1bb70c5a | 1034 | NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type"); |
a5b8ca94 LR |
1035 | return -EINVAL; |
1036 | } | |
1037 | ||
b8c697e1 LR |
1038 | if (x->selector.proto != IPPROTO_IP && |
1039 | x->selector.proto != IPPROTO_UDP && | |
1040 | x->selector.proto != IPPROTO_TCP) { | |
1041 | NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP"); | |
a7385187 RS |
1042 | return -EINVAL; |
1043 | } | |
1044 | ||
fa5aa2f8 PB |
1045 | if (x->priority) { |
1046 | if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) { | |
1047 | NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority"); | |
1048 | return -EINVAL; | |
1049 | } | |
1050 | ||
1051 | if (x->priority == U32_MAX) { | |
1052 | NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority"); | |
1053 | return -EINVAL; | |
1054 | } | |
1055 | } | |
1056 | ||
762a55a5 CM |
1057 | if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET && |
1058 | !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { | |
1059 | NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported"); | |
1060 | return -EINVAL; | |
1061 | } | |
1062 | ||
a5b8ca94 LR |
1063 | return 0; |
1064 | } | |
1065 | ||
1066 | static void | |
1067 | mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, | |
1068 | struct mlx5_accel_pol_xfrm_attrs *attrs) | |
1069 | { | |
1070 | struct xfrm_policy *x = pol_entry->x; | |
1071 | struct xfrm_selector *sel; | |
1072 | ||
1073 | sel = &x->selector; | |
1074 | memset(attrs, 0, sizeof(*attrs)); | |
1075 | ||
1076 | memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr)); | |
1077 | memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr)); | |
1078 | attrs->family = sel->family; | |
1079 | attrs->dir = x->xdo.dir; | |
1080 | attrs->action = x->action; | |
1081 | attrs->type = XFRM_DEV_OFFLOAD_PACKET; | |
67212396 | 1082 | attrs->reqid = x->xfrm_vec[0].reqid; |
a7385187 RS |
1083 | attrs->upspec.dport = ntohs(sel->dport); |
1084 | attrs->upspec.dport_mask = ntohs(sel->dport_mask); | |
1085 | attrs->upspec.sport = ntohs(sel->sport); | |
1086 | attrs->upspec.sport_mask = ntohs(sel->sport_mask); | |
1087 | attrs->upspec.proto = sel->proto; | |
fa5aa2f8 | 1088 | attrs->prio = x->priority; |
a5b8ca94 LR |
1089 | } |
1090 | ||
3089386d LR |
1091 | static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, |
1092 | struct netlink_ext_ack *extack) | |
a5b8ca94 LR |
1093 | { |
1094 | struct net_device *netdev = x->xdo.real_dev; | |
1095 | struct mlx5e_ipsec_pol_entry *pol_entry; | |
1096 | struct mlx5e_priv *priv; | |
1097 | int err; | |
1098 | ||
1099 | priv = netdev_priv(netdev); | |
1bb70c5a LR |
1100 | if (!priv->ipsec) { |
1101 | NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload"); | |
a5b8ca94 | 1102 | return -EOPNOTSUPP; |
1bb70c5a | 1103 | } |
a5b8ca94 | 1104 | |
fa5aa2f8 | 1105 | err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack); |
a5b8ca94 LR |
1106 | if (err) |
1107 | return err; | |
1108 | ||
1109 | pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL); | |
1110 | if (!pol_entry) | |
1111 | return -ENOMEM; | |
1112 | ||
1113 | pol_entry->x = x; | |
1114 | pol_entry->ipsec = priv->ipsec; | |
1115 | ||
8efd7b17 LR |
1116 | if (!mlx5_eswitch_block_ipsec(priv->mdev)) { |
1117 | err = -EBUSY; | |
1118 | goto ipsec_busy; | |
1119 | } | |
1120 | ||
a5b8ca94 LR |
1121 | mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs); |
1122 | err = mlx5e_accel_ipsec_fs_add_pol(pol_entry); | |
1123 | if (err) | |
1124 | goto err_fs; | |
1125 | ||
1126 | x->xdo.offload_handle = (unsigned long)pol_entry; | |
1127 | return 0; | |
1128 | ||
1129 | err_fs: | |
8efd7b17 LR |
1130 | mlx5_eswitch_unblock_ipsec(priv->mdev); |
1131 | ipsec_busy: | |
a5b8ca94 | 1132 | kfree(pol_entry); |
1bb70c5a | 1133 | NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy"); |
a5b8ca94 LR |
1134 | return err; |
1135 | } | |
1136 | ||
cf5bb023 | 1137 | static void mlx5e_xfrm_del_policy(struct xfrm_policy *x) |
a5b8ca94 LR |
1138 | { |
1139 | struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); | |
1140 | ||
1141 | mlx5e_accel_ipsec_fs_del_pol(pol_entry); | |
8efd7b17 | 1142 | mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev); |
cf5bb023 LR |
1143 | } |
1144 | ||
1145 | static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) | |
1146 | { | |
1147 | struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); | |
1148 | ||
a5b8ca94 LR |
1149 | kfree(pol_entry); |
1150 | } | |
1151 | ||
547eede0 IT |
1152 | static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { |
1153 | .xdo_dev_state_add = mlx5e_xfrm_add_state, | |
1154 | .xdo_dev_state_delete = mlx5e_xfrm_del_state, | |
1155 | .xdo_dev_state_free = mlx5e_xfrm_free_state, | |
2ac9cfe7 | 1156 | .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, |
cb010083 | 1157 | .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, |
a5b8ca94 | 1158 | |
1ed78fc0 | 1159 | .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, |
a5b8ca94 | 1160 | .xdo_dev_policy_add = mlx5e_xfrm_add_policy, |
cf5bb023 | 1161 | .xdo_dev_policy_delete = mlx5e_xfrm_del_policy, |
a5b8ca94 LR |
1162 | .xdo_dev_policy_free = mlx5e_xfrm_free_policy, |
1163 | }; | |
1164 | ||
547eede0 IT |
1165 | void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) |
1166 | { | |
1167 | struct mlx5_core_dev *mdev = priv->mdev; | |
1168 | struct net_device *netdev = priv->netdev; | |
1169 | ||
a8444b0b LR |
1170 | if (!mlx5_ipsec_device_caps(mdev)) |
1171 | return; | |
1172 | ||
547eede0 | 1173 | mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n"); |
a5b8ca94 | 1174 | |
762a55a5 | 1175 | netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; |
547eede0 IT |
1176 | netdev->features |= NETIF_F_HW_ESP; |
1177 | netdev->hw_enc_features |= NETIF_F_HW_ESP; | |
1178 | ||
1179 | if (!MLX5_CAP_ETH(mdev, swp_csum)) { | |
1180 | mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n"); | |
1181 | return; | |
1182 | } | |
1183 | ||
1184 | netdev->features |= NETIF_F_HW_ESP_TX_CSUM; | |
1185 | netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; | |
2ac9cfe7 | 1186 | |
effbe267 | 1187 | if (!MLX5_CAP_ETH(mdev, swp_lso)) { |
2ac9cfe7 IT |
1188 | mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); |
1189 | return; | |
1190 | } | |
1191 | ||
5a985aa3 | 1192 | netdev->gso_partial_features |= NETIF_F_GSO_ESP; |
2ac9cfe7 IT |
1193 | mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n"); |
1194 | netdev->features |= NETIF_F_GSO_ESP; | |
1195 | netdev->hw_features |= NETIF_F_GSO_ESP; | |
1196 | netdev->hw_enc_features |= NETIF_F_GSO_ESP; | |
547eede0 | 1197 | } |