1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/vmalloc.h>
8 #include <net/pkt_cls.h>
12 #include "../nfp_app.h"
14 struct nfp_mask_id_table {
15 struct hlist_node link;
21 struct nfp_fl_flow_table_cmp_arg {
22 struct net_device *netdev;
26 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
28 struct nfp_flower_priv *priv = app->priv;
29 struct circ_buf *ring;
31 ring = &priv->stats_ids.free_list;
32 /* Check if buffer is full. */
33 if (!CIRC_SPACE(ring->head, ring->tail,
34 priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
35 NFP_FL_STATS_ELEM_RS + 1))
38 memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
39 ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
40 (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
45 static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
47 struct nfp_flower_priv *priv = app->priv;
48 u32 freed_stats_id, temp_stats_id;
49 struct circ_buf *ring;
51 ring = &priv->stats_ids.free_list;
52 freed_stats_id = priv->stats_ring_size;
53 /* Check for unallocated entries first. */
54 if (priv->stats_ids.init_unalloc > 0) {
55 *stats_context_id = priv->stats_ids.init_unalloc - 1;
56 priv->stats_ids.init_unalloc--;
60 /* Check if buffer is empty. */
61 if (ring->head == ring->tail) {
62 *stats_context_id = freed_stats_id;
66 memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
67 *stats_context_id = temp_stats_id;
68 memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
69 ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
70 (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
75 /* Must be called with either RTNL or rcu_read_lock */
76 struct nfp_fl_payload *
77 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
78 struct net_device *netdev)
80 struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
81 struct nfp_flower_priv *priv = app->priv;
83 flower_cmp_arg.netdev = netdev;
84 flower_cmp_arg.cookie = tc_flower_cookie;
86 return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
87 nfp_flower_table_params);
90 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
92 unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
93 struct nfp_flower_priv *priv = app->priv;
94 struct nfp_fl_stats_frame *stats;
99 msg = nfp_flower_cmsg_get_data(skb);
101 spin_lock(&priv->stats_lock);
102 for (i = 0; i < msg_len / sizeof(*stats); i++) {
103 stats = (struct nfp_fl_stats_frame *)msg + i;
104 ctx_id = be32_to_cpu(stats->stats_con_id);
105 priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
106 priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
107 priv->stats[ctx_id].used = jiffies;
109 spin_unlock(&priv->stats_lock);
112 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
114 struct nfp_flower_priv *priv = app->priv;
115 struct circ_buf *ring;
117 ring = &priv->mask_ids.mask_id_free_list;
118 /* Checking if buffer is full. */
119 if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
122 memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
123 ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
124 (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
126 priv->mask_ids.last_used[mask_id] = ktime_get();
131 static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
133 struct nfp_flower_priv *priv = app->priv;
134 ktime_t reuse_timeout;
135 struct circ_buf *ring;
136 u8 temp_id, freed_id;
138 ring = &priv->mask_ids.mask_id_free_list;
139 freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
140 /* Checking for unallocated entries first. */
141 if (priv->mask_ids.init_unallocated > 0) {
142 *mask_id = priv->mask_ids.init_unallocated;
143 priv->mask_ids.init_unallocated--;
147 /* Checking if buffer is empty. */
148 if (ring->head == ring->tail)
151 memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
154 reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
155 NFP_FL_MASK_REUSE_TIME_NS);
157 if (ktime_before(ktime_get(), reuse_timeout))
160 memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
161 ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
162 (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
172 nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
174 struct nfp_flower_priv *priv = app->priv;
175 struct nfp_mask_id_table *mask_entry;
176 unsigned long hash_key;
179 if (nfp_mask_alloc(app, &mask_id))
182 mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
184 nfp_release_mask_id(app, mask_id);
188 INIT_HLIST_NODE(&mask_entry->link);
189 mask_entry->mask_id = mask_id;
190 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
191 mask_entry->hash_key = hash_key;
192 mask_entry->ref_cnt = 1;
193 hash_add(priv->mask_table, &mask_entry->link, hash_key);
198 static struct nfp_mask_id_table *
199 nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
201 struct nfp_flower_priv *priv = app->priv;
202 struct nfp_mask_id_table *mask_entry;
203 unsigned long hash_key;
205 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
207 hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
208 if (mask_entry->hash_key == hash_key)
215 nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
217 struct nfp_mask_id_table *mask_entry;
219 mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
223 mask_entry->ref_cnt++;
225 /* Casting u8 to int for later use. */
226 return mask_entry->mask_id;
230 nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
231 u8 *meta_flags, u8 *mask_id)
235 id = nfp_find_in_mask_table(app, mask_data, mask_len);
237 id = nfp_add_mask_table(app, mask_data, mask_len);
240 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
248 nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
249 u8 *meta_flags, u8 *mask_id)
251 struct nfp_mask_id_table *mask_entry;
253 mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
258 *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
260 *mask_id = mask_entry->mask_id;
261 mask_entry->ref_cnt--;
262 if (!mask_entry->ref_cnt) {
263 hash_del(&mask_entry->link);
264 nfp_release_mask_id(app, *mask_id);
267 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
273 int nfp_compile_flow_metadata(struct nfp_app *app,
274 struct tc_cls_flower_offload *flow,
275 struct nfp_fl_payload *nfp_flow,
276 struct net_device *netdev)
278 struct nfp_flower_priv *priv = app->priv;
279 struct nfp_fl_payload *check_entry;
283 if (nfp_get_stats_entry(app, &stats_cxt))
286 nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
287 nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
288 nfp_flow->ingress_dev = netdev;
291 if (!nfp_check_mask_add(app, nfp_flow->mask_data,
292 nfp_flow->meta.mask_len,
293 &nfp_flow->meta.flags, &new_mask_id)) {
294 if (nfp_release_stats_entry(app, stats_cxt))
299 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
300 priv->flower_version++;
302 /* Update flow payload with mask ids. */
303 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
304 priv->stats[stats_cxt].pkts = 0;
305 priv->stats[stats_cxt].bytes = 0;
306 priv->stats[stats_cxt].used = jiffies;
308 check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
310 if (nfp_release_stats_entry(app, stats_cxt))
313 if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
314 nfp_flow->meta.mask_len,
324 int nfp_modify_flow_metadata(struct nfp_app *app,
325 struct nfp_fl_payload *nfp_flow)
327 struct nfp_flower_priv *priv = app->priv;
331 nfp_check_mask_remove(app, nfp_flow->mask_data,
332 nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
335 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
336 priv->flower_version++;
338 /* Update flow payload with mask ids. */
339 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
341 /* Release the stats ctx id. */
342 temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
344 return nfp_release_stats_entry(app, temp_ctx_id);
347 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
350 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
351 const struct nfp_fl_payload *flow_entry = obj;
353 if (flow_entry->ingress_dev == cmp_arg->netdev)
354 return flow_entry->tc_flower_cookie != cmp_arg->cookie;
359 static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
361 const struct nfp_fl_payload *flower_entry = data;
363 return jhash2((u32 *)&flower_entry->tc_flower_cookie,
364 sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
368 static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
370 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
372 return jhash2((u32 *)&cmp_arg->cookie,
373 sizeof(cmp_arg->cookie) / sizeof(u32), seed);
376 const struct rhashtable_params nfp_flower_table_params = {
377 .head_offset = offsetof(struct nfp_fl_payload, fl_node),
378 .hashfn = nfp_fl_key_hashfn,
379 .obj_cmpfn = nfp_fl_obj_cmpfn,
380 .obj_hashfn = nfp_fl_obj_hashfn,
381 .automatic_shrinking = true,
384 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
386 struct nfp_flower_priv *priv = app->priv;
389 hash_init(priv->mask_table);
391 err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
395 get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
397 /* Init ring buffer and unallocated mask_ids. */
398 priv->mask_ids.mask_id_free_list.buf =
399 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
400 NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
401 if (!priv->mask_ids.mask_id_free_list.buf)
402 goto err_free_flow_table;
404 priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
406 /* Init timestamps for mask id*/
407 priv->mask_ids.last_used =
408 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
409 sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
410 if (!priv->mask_ids.last_used)
411 goto err_free_mask_id;
413 /* Init ring buffer and unallocated stats_ids. */
414 priv->stats_ids.free_list.buf =
415 vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
416 priv->stats_ring_size));
417 if (!priv->stats_ids.free_list.buf)
418 goto err_free_last_used;
420 priv->stats_ids.init_unalloc = host_ctx_count;
422 priv->stats = kvmalloc_array(priv->stats_ring_size,
423 sizeof(struct nfp_fl_stats), GFP_KERNEL);
425 goto err_free_ring_buf;
427 spin_lock_init(&priv->stats_lock);
432 vfree(priv->stats_ids.free_list.buf);
434 kfree(priv->mask_ids.last_used);
436 kfree(priv->mask_ids.mask_id_free_list.buf);
438 rhashtable_destroy(&priv->flow_table);
442 void nfp_flower_metadata_cleanup(struct nfp_app *app)
444 struct nfp_flower_priv *priv = app->priv;
449 rhashtable_free_and_destroy(&priv->flow_table,
450 nfp_check_rhashtable_empty, NULL);
452 kfree(priv->mask_ids.mask_id_free_list.buf);
453 kfree(priv->mask_ids.last_used);
454 vfree(priv->stats_ids.free_list.buf);