5944063
[linux-block.git] /
1 /* Copyright (c) 2014 Broadcom Corporation
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14  */
15
16
17 #include <linux/types.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <brcmu_utils.h>
21
22 #include "core.h"
23 #include "debug.h"
24 #include "bus.h"
25 #include "proto.h"
26 #include "flowring.h"
27 #include "msgbuf.h"
28 #include "common.h"
29
30
31 #define BRCMF_FLOWRING_HIGH             1024
32 #define BRCMF_FLOWRING_LOW              (BRCMF_FLOWRING_HIGH - 256)
33 #define BRCMF_FLOWRING_INVALID_IFIDX    0xff
34
35 #define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
36 #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
37
38 static const u8 brcmf_flowring_prio2fifo[] = {
39         1,
40         0,
41         0,
42         1,
43         2,
44         2,
45         3,
46         3
47 };
48
49
50 static bool
51 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN])
52 {
53         struct brcmf_flowring_tdls_entry *search;
54
55         search = flow->tdls_entry;
56
57         while (search) {
58                 if (memcmp(search->mac, mac, ETH_ALEN) == 0)
59                         return true;
60                 search = search->next;
61         }
62
63         return false;
64 }
65
66
67 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
68                           u8 prio, u8 ifidx)
69 {
70         struct brcmf_flowring_hash *hash;
71         u8 hash_idx;
72         u32 i;
73         bool found;
74         bool sta;
75         u8 fifo;
76         u8 *mac;
77
78         fifo = brcmf_flowring_prio2fifo[prio];
79         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
80         mac = da;
81         if ((!sta) && (is_multicast_ether_addr(da))) {
82                 mac = (u8 *)ALLFFMAC;
83                 fifo = 0;
84         }
85         if ((sta) && (flow->tdls_active) &&
86             (brcmf_flowring_is_tdls_mac(flow, da))) {
87                 sta = false;
88         }
89         hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
90                           BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
91         found = false;
92         hash = flow->hash;
93         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
94                 if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) &&
95                     (hash[hash_idx].fifo == fifo) &&
96                     (hash[hash_idx].ifidx == ifidx)) {
97                         found = true;
98                         break;
99                 }
100                 hash_idx++;
101         }
102         if (found)
103                 return hash[hash_idx].flowid;
104
105         return BRCMF_FLOWRING_INVALID_ID;
106 }
107
108
109 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
110                           u8 prio, u8 ifidx)
111 {
112         struct brcmf_flowring_ring *ring;
113         struct brcmf_flowring_hash *hash;
114         u8 hash_idx;
115         u32 i;
116         bool found;
117         u8 fifo;
118         bool sta;
119         u8 *mac;
120
121         fifo = brcmf_flowring_prio2fifo[prio];
122         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
123         mac = da;
124         if ((!sta) && (is_multicast_ether_addr(da))) {
125                 mac = (u8 *)ALLFFMAC;
126                 fifo = 0;
127         }
128         if ((sta) && (flow->tdls_active) &&
129             (brcmf_flowring_is_tdls_mac(flow, da))) {
130                 sta = false;
131         }
132         hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
133                           BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
134         found = false;
135         hash = flow->hash;
136         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
137                 if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
138                     (is_zero_ether_addr(hash[hash_idx].mac))) {
139                         found = true;
140                         break;
141                 }
142                 hash_idx++;
143         }
144         if (found) {
145                 for (i = 0; i < flow->nrofrings; i++) {
146                         if (flow->rings[i] == NULL)
147                                 break;
148                 }
149                 if (i == flow->nrofrings)
150                         return -ENOMEM;
151
152                 ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
153                 if (!ring)
154                         return -ENOMEM;
155
156                 memcpy(hash[hash_idx].mac, mac, ETH_ALEN);
157                 hash[hash_idx].fifo = fifo;
158                 hash[hash_idx].ifidx = ifidx;
159                 hash[hash_idx].flowid = i;
160
161                 ring->hash_id = hash_idx;
162                 ring->status = RING_CLOSED;
163                 skb_queue_head_init(&ring->skblist);
164                 flow->rings[i] = ring;
165
166                 return i;
167         }
168         return BRCMF_FLOWRING_INVALID_ID;
169 }
170
171
172 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
173 {
174         struct brcmf_flowring_ring *ring;
175
176         ring = flow->rings[flowid];
177
178         return flow->hash[ring->hash_id].fifo;
179 }
180
181
182 static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
183                                  bool blocked)
184 {
185         struct brcmf_flowring_ring *ring;
186         struct brcmf_bus *bus_if;
187         struct brcmf_pub *drvr;
188         struct brcmf_if *ifp;
189         bool currently_blocked;
190         int i;
191         u8 ifidx;
192         unsigned long flags;
193
194         spin_lock_irqsave(&flow->block_lock, flags);
195
196         ring = flow->rings[flowid];
197         ifidx = brcmf_flowring_ifidx_get(flow, flowid);
198
199         currently_blocked = false;
200         for (i = 0; i < flow->nrofrings; i++) {
201                 if (flow->rings[i]) {
202                         ring = flow->rings[i];
203                         if ((ring->status == RING_OPEN) &&
204                             (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
205                                 if (ring->blocked) {
206                                         currently_blocked = true;
207                                         break;
208                                 }
209                         }
210                 }
211         }
212         ring->blocked = blocked;
213         if (currently_blocked == blocked) {
214                 spin_unlock_irqrestore(&flow->block_lock, flags);
215                 return;
216         }
217
218         bus_if = dev_get_drvdata(flow->dev);
219         drvr = bus_if->drvr;
220         ifp = drvr->iflist[ifidx];
221         brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
222
223         spin_unlock_irqrestore(&flow->block_lock, flags);
224 }
225
226
227 void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
228 {
229         struct brcmf_flowring_ring *ring;
230         u8 hash_idx;
231         struct sk_buff *skb;
232
233         ring = flow->rings[flowid];
234         if (!ring)
235                 return;
236         brcmf_flowring_block(flow, flowid, false);
237         hash_idx = ring->hash_id;
238         flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
239         eth_zero_addr(flow->hash[hash_idx].mac);
240         flow->rings[flowid] = NULL;
241
242         skb = skb_dequeue(&ring->skblist);
243         while (skb) {
244                 brcmu_pkt_buf_free_skb(skb);
245                 skb = skb_dequeue(&ring->skblist);
246         }
247
248         kfree(ring);
249 }
250
251
252 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
253                            struct sk_buff *skb)
254 {
255         struct brcmf_flowring_ring *ring;
256
257         ring = flow->rings[flowid];
258
259         skb_queue_tail(&ring->skblist, skb);
260
261         if (!ring->blocked &&
262             (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
263                 brcmf_flowring_block(flow, flowid, true);
264                 brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid);
265                 /* To prevent (work around) possible race condition, check
266                  * queue len again. It is also possible to use locking to
267                  * protect, but that is undesirable for every enqueue and
268                  * dequeue. This simple check will solve a possible race
269                  * condition if it occurs.
270                  */
271                 if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
272                         brcmf_flowring_block(flow, flowid, false);
273         }
274         return skb_queue_len(&ring->skblist);
275 }
276
277
278 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
279 {
280         struct brcmf_flowring_ring *ring;
281         struct sk_buff *skb;
282
283         ring = flow->rings[flowid];
284         if (ring->status != RING_OPEN)
285                 return NULL;
286
287         skb = skb_dequeue(&ring->skblist);
288
289         if (ring->blocked &&
290             (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
291                 brcmf_flowring_block(flow, flowid, false);
292                 brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid);
293         }
294
295         return skb;
296 }
297
298
299 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
300                              struct sk_buff *skb)
301 {
302         struct brcmf_flowring_ring *ring;
303
304         ring = flow->rings[flowid];
305
306         skb_queue_head(&ring->skblist, skb);
307 }
308
309
310 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
311 {
312         struct brcmf_flowring_ring *ring;
313
314         ring = flow->rings[flowid];
315         if (!ring)
316                 return 0;
317
318         if (ring->status != RING_OPEN)
319                 return 0;
320
321         return skb_queue_len(&ring->skblist);
322 }
323
324
325 void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
326 {
327         struct brcmf_flowring_ring *ring;
328
329         ring = flow->rings[flowid];
330         if (!ring) {
331                 brcmf_err("Ring NULL, for flowid %d\n", flowid);
332                 return;
333         }
334
335         ring->status = RING_OPEN;
336 }
337
338
339 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
340 {
341         struct brcmf_flowring_ring *ring;
342         u8 hash_idx;
343
344         ring = flow->rings[flowid];
345         hash_idx = ring->hash_id;
346
347         return flow->hash[hash_idx].ifidx;
348 }
349
350
351 struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings)
352 {
353         struct brcmf_flowring *flow;
354         u32 i;
355
356         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
357         if (flow) {
358                 flow->dev = dev;
359                 flow->nrofrings = nrofrings;
360                 spin_lock_init(&flow->block_lock);
361                 for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++)
362                         flow->addr_mode[i] = ADDR_INDIRECT;
363                 for (i = 0; i < ARRAY_SIZE(flow->hash); i++)
364                         flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
365                 flow->rings = kcalloc(nrofrings, sizeof(*flow->rings),
366                                       GFP_KERNEL);
367                 if (!flow->rings) {
368                         kfree(flow);
369                         flow = NULL;
370                 }
371         }
372
373         return flow;
374 }
375
376
377 void brcmf_flowring_detach(struct brcmf_flowring *flow)
378 {
379         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
380         struct brcmf_pub *drvr = bus_if->drvr;
381         struct brcmf_flowring_tdls_entry *search;
382         struct brcmf_flowring_tdls_entry *remove;
383         u8 flowid;
384
385         for (flowid = 0; flowid < flow->nrofrings; flowid++) {
386                 if (flow->rings[flowid])
387                         brcmf_msgbuf_delete_flowring(drvr, flowid);
388         }
389
390         search = flow->tdls_entry;
391         while (search) {
392                 remove = search;
393                 search = search->next;
394                 kfree(remove);
395         }
396         kfree(flow->rings);
397         kfree(flow);
398 }
399
400
401 void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
402                                         enum proto_addr_mode addr_mode)
403 {
404         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
405         struct brcmf_pub *drvr = bus_if->drvr;
406         u32 i;
407         u8 flowid;
408
409         if (flow->addr_mode[ifidx] != addr_mode) {
410                 for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
411                         if (flow->hash[i].ifidx == ifidx) {
412                                 flowid = flow->hash[i].flowid;
413                                 if (flow->rings[flowid]->status != RING_OPEN)
414                                         continue;
415                                 flow->rings[flowid]->status = RING_CLOSING;
416                                 brcmf_msgbuf_delete_flowring(drvr, flowid);
417                         }
418                 }
419                 flow->addr_mode[ifidx] = addr_mode;
420         }
421 }
422
423
424 void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
425                                 u8 peer[ETH_ALEN])
426 {
427         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
428         struct brcmf_pub *drvr = bus_if->drvr;
429         struct brcmf_flowring_hash *hash;
430         struct brcmf_flowring_tdls_entry *prev;
431         struct brcmf_flowring_tdls_entry *search;
432         u32 i;
433         u8 flowid;
434         bool sta;
435
436         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
437
438         search = flow->tdls_entry;
439         prev = NULL;
440         while (search) {
441                 if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
442                         sta = false;
443                         break;
444                 }
445                 prev = search;
446                 search = search->next;
447         }
448
449         hash = flow->hash;
450         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
451                 if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
452                     (hash[i].ifidx == ifidx)) {
453                         flowid = flow->hash[i].flowid;
454                         if (flow->rings[flowid]->status == RING_OPEN) {
455                                 flow->rings[flowid]->status = RING_CLOSING;
456                                 brcmf_msgbuf_delete_flowring(drvr, flowid);
457                         }
458                 }
459         }
460
461         if (search) {
462                 if (prev)
463                         prev->next = search->next;
464                 else
465                         flow->tdls_entry = search->next;
466                 kfree(search);
467                 if (flow->tdls_entry == NULL)
468                         flow->tdls_active = false;
469         }
470 }
471
472
473 void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
474                                   u8 peer[ETH_ALEN])
475 {
476         struct brcmf_flowring_tdls_entry *tdls_entry;
477         struct brcmf_flowring_tdls_entry *search;
478
479         tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC);
480         if (tdls_entry == NULL)
481                 return;
482
483         memcpy(tdls_entry->mac, peer, ETH_ALEN);
484         tdls_entry->next = NULL;
485         if (flow->tdls_entry == NULL) {
486                 flow->tdls_entry = tdls_entry;
487         } else {
488                 search = flow->tdls_entry;
489                 if (memcmp(search->mac, peer, ETH_ALEN) == 0)
490                         return;
491                 while (search->next) {
492                         search = search->next;
493                         if (memcmp(search->mac, peer, ETH_ALEN) == 0)
494                                 return;
495                 }
496                 search->next = tdls_entry;
497         }
498
499         flow->tdls_active = true;
500 }