Commit | Line | Data |
---|---|---|
aa5a7aca IPG |
1 | /* |
2 | * Intel Wireless WiMAX Connection 2400m | |
3 | * Handle incoming traffic and deliver it to the control or data planes | |
4 | * | |
5 | * | |
6 | * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * | |
12 | * * Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | |
14 | * * Redistributions in binary form must reproduce the above copyright | |
15 | * notice, this list of conditions and the following disclaimer in | |
16 | * the documentation and/or other materials provided with the | |
17 | * distribution. | |
18 | * * Neither the name of Intel Corporation nor the names of its | |
19 | * contributors may be used to endorse or promote products derived | |
20 | * from this software without specific prior written permission. | |
21 | * | |
22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
23 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
24 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
25 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
26 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
27 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
28 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
29 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
30 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
32 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
33 | * | |
34 | * | |
35 | * Intel Corporation <linux-wimax@intel.com> | |
36 | * Yanir Lubetkin <yanirx.lubetkin@intel.com> | |
37 | * - Initial implementation | |
38 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | |
39 | * - Use skb_clone(), break up processing in chunks | |
40 | * - Split transport/device specific | |
41 | * - Make buffer size dynamic to exert less memory pressure | |
c747583d | 42 | * - RX reorder support |
aa5a7aca IPG |
43 | * |
44 | * This handles the RX path. | |
45 | * | |
46 | * We receive an RX message from the bus-specific driver, which | |
47 | * contains one or more payloads that have potentially different | |
48 | * destinataries (data or control paths). | |
49 | * | |
50 | * So we just take that payload from the transport specific code in | |
51 | * the form of an skb, break it up in chunks (a cloned skb each in the | |
52 | * case of network packets) and pass it to netdev or to the | |
53 | * command/ack handler (and from there to the WiMAX stack). | |
54 | * | |
55 | * PROTOCOL FORMAT | |
56 | * | |
57 | * The format of the buffer is: | |
58 | * | |
59 | * HEADER (struct i2400m_msg_hdr) | |
60 | * PAYLOAD DESCRIPTOR 0 (struct i2400m_pld) | |
61 | * PAYLOAD DESCRIPTOR 1 | |
62 | * ... | |
63 | * PAYLOAD DESCRIPTOR N | |
64 | * PAYLOAD 0 (raw bytes) | |
65 | * PAYLOAD 1 | |
66 | * ... | |
67 | * PAYLOAD N | |
68 | * | |
69 | * See tx.c for a deeper description on alignment requirements and | |
70 | * other fun facts of it. | |
71 | * | |
fd5c565c IPG |
72 | * DATA PACKETS |
73 | * | |
74 | * In firmwares <= v1.3, data packets have no header for RX, but they | |
75 | * do for TX (currently unused). | |
76 | * | |
77 | * In firmware >= 1.4, RX packets have an extended header (16 | |
78 | * bytes). This header conveys information for management of host | |
79 | * reordering of packets (the device offloads storage of the packets | |
c747583d | 80 | * for reordering to the host). Read below for more information. |
fd5c565c IPG |
81 | * |
82 | * The header is used as dummy space to emulate an ethernet header and | |
83 | * thus be able to act as an ethernet device without having to reallocate. | |
84 | * | |
c747583d IPG |
85 | * DATA RX REORDERING |
86 | * | |
87 | * Starting in firmware v1.4, the device can deliver packets for | |
88 | * delivery with special reordering information; this allows it to | |
89 | * more effectively do packet management when some frames were lost in | |
90 | * the radio traffic. | |
91 | * | |
92 | * Thus, for RX packets that come out of order, the device gives the | |
93 | * driver enough information to queue them properly and then at some | |
94 | * point, the signal to deliver the whole (or part) of the queued | |
95 | * packets to the networking stack. There are 16 such queues. | |
96 | * | |
97 | * This only happens when a packet comes in with the "need reorder" | |
98 | * flag set in the RX header. When such bit is set, the following | |
99 | * operations might be indicated: | |
100 | * | |
101 | * - reset queue: send all queued packets to the OS | |
102 | * | |
103 | * - queue: queue a packet | |
104 | * | |
105 | * - update ws: update the queue's window start and deliver queued | |
106 | * packets that meet the criteria | |
107 | * | |
108 | * - queue & update ws: queue a packet, update the window start and | |
109 | * deliver queued packets that meet the criteria | |
110 | * | |
111 | * (delivery criteria: the packet's [normalized] sequence number is | |
112 | * lower than the new [normalized] window start). | |
113 | * | |
114 | * See the i2400m_roq_*() functions for details. | |
115 | * | |
aa5a7aca IPG |
116 | * ROADMAP |
117 | * | |
118 | * i2400m_rx | |
119 | * i2400m_rx_msg_hdr_check | |
120 | * i2400m_rx_pl_descr_check | |
121 | * i2400m_rx_payload | |
122 | * i2400m_net_rx | |
fd5c565c IPG |
123 | * i2400m_rx_edata |
124 | * i2400m_net_erx | |
c747583d IPG |
125 | * i2400m_roq_reset |
126 | * i2400m_net_erx | |
127 | * i2400m_roq_queue | |
128 | * __i2400m_roq_queue | |
129 | * i2400m_roq_update_ws | |
130 | * __i2400m_roq_update_ws | |
131 | * i2400m_net_erx | |
132 | * i2400m_roq_queue_update_ws | |
133 | * __i2400m_roq_queue | |
134 | * __i2400m_roq_update_ws | |
135 | * i2400m_net_erx | |
aa5a7aca IPG |
136 | * i2400m_rx_ctl |
137 | * i2400m_msg_size_check | |
138 | * i2400m_report_hook_work [in a workqueue] | |
139 | * i2400m_report_hook | |
140 | * wimax_msg_to_user | |
141 | * i2400m_rx_ctl_ack | |
142 | * wimax_msg_to_user_alloc | |
143 | * i2400m_rx_trace | |
144 | * i2400m_msg_size_check | |
145 | * wimax_msg | |
146 | */ | |
147 | #include <linux/kernel.h> | |
148 | #include <linux/if_arp.h> | |
149 | #include <linux/netdevice.h> | |
150 | #include <linux/workqueue.h> | |
151 | #include "i2400m.h" | |
152 | ||
153 | ||
154 | #define D_SUBMODULE rx | |
155 | #include "debug-levels.h" | |
156 | ||
157 | struct i2400m_report_hook_args { | |
158 | struct sk_buff *skb_rx; | |
159 | const struct i2400m_l3l4_hdr *l3l4_hdr; | |
160 | size_t size; | |
161 | }; | |
162 | ||
163 | ||
164 | /* | |
165 | * Execute i2400m_report_hook in a workqueue | |
166 | * | |
167 | * Unpacks arguments from the deferred call, executes it and then | |
168 | * drops the references. | |
169 | * | |
170 | * Obvious NOTE: References are needed because we are a separate | |
171 | * thread; otherwise the buffer changes under us because it is | |
172 | * released by the original caller. | |
173 | */ | |
174 | static | |
175 | void i2400m_report_hook_work(struct work_struct *ws) | |
176 | { | |
177 | struct i2400m_work *iw = | |
178 | container_of(ws, struct i2400m_work, ws); | |
179 | struct i2400m_report_hook_args *args = (void *) iw->pl; | |
052991d7 IPG |
180 | if (iw->i2400m->ready) |
181 | i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size); | |
aa5a7aca IPG |
182 | kfree_skb(args->skb_rx); |
183 | i2400m_put(iw->i2400m); | |
184 | kfree(iw); | |
185 | } | |
186 | ||
187 | ||
188 | /* | |
189 | * Process an ack to a command | |
190 | * | |
191 | * @i2400m: device descriptor | |
192 | * @payload: pointer to message | |
193 | * @size: size of the message | |
194 | * | |
195 | * Pass the acknodledgment (in an skb) to the thread that is waiting | |
196 | * for it in i2400m->msg_completion. | |
197 | * | |
198 | * We need to coordinate properly with the thread waiting for the | |
199 | * ack. Check if it is waiting or if it is gone. We loose the spinlock | |
200 | * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC, | |
201 | * but this is not so speed critical). | |
202 | */ | |
203 | static | |
204 | void i2400m_rx_ctl_ack(struct i2400m *i2400m, | |
205 | const void *payload, size_t size) | |
206 | { | |
207 | struct device *dev = i2400m_dev(i2400m); | |
208 | struct wimax_dev *wimax_dev = &i2400m->wimax_dev; | |
209 | unsigned long flags; | |
210 | struct sk_buff *ack_skb; | |
211 | ||
212 | /* Anyone waiting for an answer? */ | |
213 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
214 | if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) { | |
215 | dev_err(dev, "Huh? reply to command with no waiters\n"); | |
216 | goto error_no_waiter; | |
217 | } | |
218 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
219 | ||
220 | ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL); | |
221 | ||
222 | /* Check waiter didn't time out waiting for the answer... */ | |
223 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
224 | if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) { | |
225 | d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); | |
226 | goto error_waiter_cancelled; | |
227 | } | |
228 | if (ack_skb == NULL) { | |
229 | dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); | |
230 | i2400m->ack_skb = ERR_PTR(-ENOMEM); | |
231 | } else | |
232 | i2400m->ack_skb = ack_skb; | |
233 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
234 | complete(&i2400m->msg_completion); | |
235 | return; | |
236 | ||
237 | error_waiter_cancelled: | |
c71a2699 | 238 | kfree_skb(ack_skb); |
aa5a7aca IPG |
239 | error_no_waiter: |
240 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
241 | return; | |
242 | } | |
243 | ||
244 | ||
245 | /* | |
246 | * Receive and process a control payload | |
247 | * | |
248 | * @i2400m: device descriptor | |
249 | * @skb_rx: skb that contains the payload (for reference counting) | |
250 | * @payload: pointer to message | |
251 | * @size: size of the message | |
252 | * | |
253 | * There are two types of control RX messages: reports (asynchronous, | |
254 | * like your every day interrupts) and 'acks' (reponses to a command, | |
255 | * get or set request). | |
256 | * | |
257 | * If it is a report, we run hooks on it (to extract information for | |
258 | * things we need to do in the driver) and then pass it over to the | |
259 | * WiMAX stack to send it to user space. | |
260 | * | |
261 | * NOTE: report processing is done in a workqueue specific to the | |
262 | * generic driver, to avoid deadlocks in the system. | |
263 | * | |
264 | * If it is not a report, it is an ack to a previously executed | |
265 | * command, set or get, so wake up whoever is waiting for it from | |
266 | * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that. | |
267 | * | |
268 | * Note that the sizes we pass to other functions from here are the | |
269 | * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have | |
270 | * verified in _msg_size_check() that they are congruent. | |
271 | * | |
272 | * For reports: We can't clone the original skb where the data is | |
273 | * because we need to send this up via netlink; netlink has to add | |
274 | * headers and we can't overwrite what's preceeding the payload...as | |
275 | * it is another message. So we just dup them. | |
276 | */ | |
277 | static | |
278 | void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx, | |
279 | const void *payload, size_t size) | |
280 | { | |
281 | int result; | |
282 | struct device *dev = i2400m_dev(i2400m); | |
283 | const struct i2400m_l3l4_hdr *l3l4_hdr = payload; | |
284 | unsigned msg_type; | |
285 | ||
286 | result = i2400m_msg_size_check(i2400m, l3l4_hdr, size); | |
287 | if (result < 0) { | |
288 | dev_err(dev, "HW BUG? device sent a bad message: %d\n", | |
289 | result); | |
290 | goto error_check; | |
291 | } | |
292 | msg_type = le16_to_cpu(l3l4_hdr->type); | |
293 | d_printf(1, dev, "%s 0x%04x: %zu bytes\n", | |
294 | msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET", | |
295 | msg_type, size); | |
296 | d_dump(2, dev, l3l4_hdr, size); | |
297 | if (msg_type & I2400M_MT_REPORT_MASK) { | |
298 | /* These hooks have to be ran serialized; as well, the | |
299 | * handling might force the execution of commands, and | |
300 | * that might cause reentrancy issues with | |
301 | * bus-specific subdrivers and workqueues. So we run | |
302 | * it in a separate workqueue. */ | |
303 | struct i2400m_report_hook_args args = { | |
304 | .skb_rx = skb_rx, | |
305 | .l3l4_hdr = l3l4_hdr, | |
306 | .size = size | |
307 | }; | |
308 | if (unlikely(i2400m->ready == 0)) /* only send if up */ | |
309 | return; | |
310 | skb_get(skb_rx); | |
311 | i2400m_queue_work(i2400m, i2400m_report_hook_work, | |
312 | GFP_KERNEL, &args, sizeof(args)); | |
44b849d1 IPG |
313 | if (unlikely(i2400m->trace_msg_from_user)) |
314 | wimax_msg(&i2400m->wimax_dev, "echo", | |
315 | l3l4_hdr, size, GFP_KERNEL); | |
aa5a7aca IPG |
316 | result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size, |
317 | GFP_KERNEL); | |
318 | if (result < 0) | |
319 | dev_err(dev, "error sending report to userspace: %d\n", | |
320 | result); | |
321 | } else /* an ack to a CMD, GET or SET */ | |
322 | i2400m_rx_ctl_ack(i2400m, payload, size); | |
323 | error_check: | |
324 | return; | |
325 | } | |
326 | ||
327 | ||
aa5a7aca IPG |
328 | /* |
329 | * Receive and send up a trace | |
330 | * | |
331 | * @i2400m: device descriptor | |
332 | * @skb_rx: skb that contains the trace (for reference counting) | |
333 | * @payload: pointer to trace message inside the skb | |
334 | * @size: size of the message | |
335 | * | |
336 | * THe i2400m might produce trace information (diagnostics) and we | |
337 | * send them through a different kernel-to-user pipe (to avoid | |
338 | * clogging it). | |
339 | * | |
340 | * As in i2400m_rx_ctl(), we can't clone the original skb where the | |
341 | * data is because we need to send this up via netlink; netlink has to | |
342 | * add headers and we can't overwrite what's preceeding the | |
343 | * payload...as it is another message. So we just dup them. | |
344 | */ | |
345 | static | |
346 | void i2400m_rx_trace(struct i2400m *i2400m, | |
347 | const void *payload, size_t size) | |
348 | { | |
349 | int result; | |
350 | struct device *dev = i2400m_dev(i2400m); | |
351 | struct wimax_dev *wimax_dev = &i2400m->wimax_dev; | |
352 | const struct i2400m_l3l4_hdr *l3l4_hdr = payload; | |
353 | unsigned msg_type; | |
354 | ||
355 | result = i2400m_msg_size_check(i2400m, l3l4_hdr, size); | |
356 | if (result < 0) { | |
357 | dev_err(dev, "HW BUG? device sent a bad trace message: %d\n", | |
358 | result); | |
359 | goto error_check; | |
360 | } | |
361 | msg_type = le16_to_cpu(l3l4_hdr->type); | |
362 | d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n", | |
363 | msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET", | |
364 | msg_type, size); | |
365 | d_dump(2, dev, l3l4_hdr, size); | |
366 | if (unlikely(i2400m->ready == 0)) /* only send if up */ | |
367 | return; | |
368 | result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL); | |
369 | if (result < 0) | |
370 | dev_err(dev, "error sending trace to userspace: %d\n", | |
371 | result); | |
372 | error_check: | |
373 | return; | |
374 | } | |
375 | ||
c747583d IPG |
376 | |
377 | /* | |
378 | * Reorder queue data stored on skb->cb while the skb is queued in the | |
379 | * reorder queues. | |
380 | */ | |
381 | struct i2400m_roq_data { | |
382 | unsigned sn; /* Serial number for the skb */ | |
383 | enum i2400m_cs cs; /* packet type for the skb */ | |
384 | }; | |
385 | ||
386 | ||
387 | /* | |
388 | * ReOrder Queue | |
389 | * | |
390 | * @ws: Window Start; sequence number where the current window start | |
391 | * is for this queue | |
392 | * @queue: the skb queue itself | |
393 | * @log: circular ring buffer used to log information about the | |
394 | * reorder process in this queue that can be displayed in case of | |
395 | * error to help diagnose it. | |
396 | * | |
397 | * This is the head for a list of skbs. In the skb->cb member of the | |
398 | * skb when queued here contains a 'struct i2400m_roq_data' were we | |
399 | * store the sequence number (sn) and the cs (packet type) coming from | |
400 | * the RX payload header from the device. | |
401 | */ | |
402 | struct i2400m_roq | |
403 | { | |
404 | unsigned ws; | |
405 | struct sk_buff_head queue; | |
406 | struct i2400m_roq_log *log; | |
407 | }; | |
408 | ||
409 | ||
410 | static | |
411 | void __i2400m_roq_init(struct i2400m_roq *roq) | |
412 | { | |
413 | roq->ws = 0; | |
414 | skb_queue_head_init(&roq->queue); | |
415 | } | |
416 | ||
417 | ||
418 | static | |
419 | unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq) | |
420 | { | |
421 | return ((unsigned long) roq - (unsigned long) i2400m->rx_roq) | |
422 | / sizeof(*roq); | |
423 | } | |
424 | ||
425 | ||
426 | /* | |
427 | * Normalize a sequence number based on the queue's window start | |
428 | * | |
429 | * nsn = (sn - ws) % 2048 | |
430 | * | |
431 | * Note that if @sn < @roq->ws, we still need a positive number; %'s | |
432 | * sign is implementation specific, so we normalize it by adding 2048 | |
433 | * to bring it to be positive. | |
434 | */ | |
435 | static | |
436 | unsigned __i2400m_roq_nsn(struct i2400m_roq *roq, unsigned sn) | |
437 | { | |
438 | int r; | |
439 | r = ((int) sn - (int) roq->ws) % 2048; | |
440 | if (r < 0) | |
441 | r += 2048; | |
442 | return r; | |
443 | } | |
444 | ||
445 | ||
446 | /* | |
447 | * Circular buffer to keep the last N reorder operations | |
448 | * | |
449 | * In case something fails, dumb then to try to come up with what | |
450 | * happened. | |
451 | */ | |
452 | enum { | |
453 | I2400M_ROQ_LOG_LENGTH = 32, | |
454 | }; | |
455 | ||
456 | struct i2400m_roq_log { | |
457 | struct i2400m_roq_log_entry { | |
458 | enum i2400m_ro_type type; | |
459 | unsigned ws, count, sn, nsn, new_ws; | |
460 | } entry[I2400M_ROQ_LOG_LENGTH]; | |
461 | unsigned in, out; | |
462 | }; | |
463 | ||
464 | ||
465 | /* Print a log entry */ | |
466 | static | |
467 | void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index, | |
468 | unsigned e_index, | |
469 | struct i2400m_roq_log_entry *e) | |
470 | { | |
471 | struct device *dev = i2400m_dev(i2400m); | |
472 | ||
473 | switch(e->type) { | |
474 | case I2400M_RO_TYPE_RESET: | |
475 | dev_err(dev, "q#%d reset ws %u cnt %u sn %u/%u" | |
476 | " - new nws %u\n", | |
477 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | |
478 | break; | |
479 | case I2400M_RO_TYPE_PACKET: | |
480 | dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n", | |
481 | index, e->ws, e->count, e->sn, e->nsn); | |
482 | break; | |
483 | case I2400M_RO_TYPE_WS: | |
484 | dev_err(dev, "q#%d update_ws ws %u cnt %u sn %u/%u" | |
485 | " - new nws %u\n", | |
486 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | |
487 | break; | |
488 | case I2400M_RO_TYPE_PACKET_WS: | |
489 | dev_err(dev, "q#%d queue_update_ws ws %u cnt %u sn %u/%u" | |
490 | " - new nws %u\n", | |
491 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | |
492 | break; | |
493 | default: | |
494 | dev_err(dev, "q#%d BUG? entry %u - unknown type %u\n", | |
495 | index, e_index, e->type); | |
496 | break; | |
497 | } | |
498 | } | |
499 | ||
500 | ||
501 | static | |
502 | void i2400m_roq_log_add(struct i2400m *i2400m, | |
503 | struct i2400m_roq *roq, enum i2400m_ro_type type, | |
504 | unsigned ws, unsigned count, unsigned sn, | |
505 | unsigned nsn, unsigned new_ws) | |
506 | { | |
507 | struct i2400m_roq_log_entry *e; | |
508 | unsigned cnt_idx; | |
509 | int index = __i2400m_roq_index(i2400m, roq); | |
510 | ||
511 | /* if we run out of space, we eat from the end */ | |
512 | if (roq->log->in - roq->log->out == I2400M_ROQ_LOG_LENGTH) | |
513 | roq->log->out++; | |
514 | cnt_idx = roq->log->in++ % I2400M_ROQ_LOG_LENGTH; | |
515 | e = &roq->log->entry[cnt_idx]; | |
516 | ||
517 | e->type = type; | |
518 | e->ws = ws; | |
519 | e->count = count; | |
520 | e->sn = sn; | |
521 | e->nsn = nsn; | |
522 | e->new_ws = new_ws; | |
523 | ||
524 | if (d_test(1)) | |
525 | i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); | |
526 | } | |
527 | ||
528 | ||
529 | /* Dump all the entries in the FIFO and reinitialize it */ | |
530 | static | |
531 | void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq) | |
532 | { | |
533 | unsigned cnt, cnt_idx; | |
534 | struct i2400m_roq_log_entry *e; | |
535 | int index = __i2400m_roq_index(i2400m, roq); | |
536 | ||
537 | BUG_ON(roq->log->out > roq->log->in); | |
538 | for (cnt = roq->log->out; cnt < roq->log->in; cnt++) { | |
539 | cnt_idx = cnt % I2400M_ROQ_LOG_LENGTH; | |
540 | e = &roq->log->entry[cnt_idx]; | |
541 | i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); | |
542 | memset(e, 0, sizeof(*e)); | |
543 | } | |
544 | roq->log->in = roq->log->out = 0; | |
545 | } | |
546 | ||
547 | ||
548 | /* | |
549 | * Backbone for the queuing of an skb (by normalized sequence number) | |
550 | * | |
551 | * @i2400m: device descriptor | |
552 | * @roq: reorder queue where to add | |
553 | * @skb: the skb to add | |
554 | * @sn: the sequence number of the skb | |
555 | * @nsn: the normalized sequence number of the skb (pre-computed by the | |
556 | * caller from the @sn and @roq->ws). | |
557 | * | |
558 | * We try first a couple of quick cases: | |
559 | * | |
560 | * - the queue is empty | |
561 | * - the skb would be appended to the queue | |
562 | * | |
563 | * These will be the most common operations. | |
564 | * | |
565 | * If these fail, then we have to do a sorted insertion in the queue, | |
566 | * which is the slowest path. | |
567 | * | |
568 | * We don't have to acquire a reference count as we are going to own it. | |
569 | */ | |
570 | static | |
571 | void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, | |
572 | struct sk_buff *skb, unsigned sn, unsigned nsn) | |
573 | { | |
574 | struct device *dev = i2400m_dev(i2400m); | |
575 | struct sk_buff *skb_itr; | |
576 | struct i2400m_roq_data *roq_data_itr, *roq_data; | |
577 | unsigned nsn_itr; | |
578 | ||
579 | d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n", | |
580 | i2400m, roq, skb, sn, nsn); | |
581 | ||
582 | roq_data = (struct i2400m_roq_data *) &skb->cb; | |
583 | BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb)); | |
584 | roq_data->sn = sn; | |
585 | d_printf(3, dev, "ERX: roq %p [ws %u] nsn %d sn %u\n", | |
586 | roq, roq->ws, nsn, roq_data->sn); | |
587 | ||
588 | /* Queues will be empty on not-so-bad environments, so try | |
589 | * that first */ | |
590 | if (skb_queue_empty(&roq->queue)) { | |
591 | d_printf(2, dev, "ERX: roq %p - first one\n", roq); | |
592 | __skb_queue_head(&roq->queue, skb); | |
593 | goto out; | |
594 | } | |
595 | /* Now try append, as most of the operations will be that */ | |
596 | skb_itr = skb_peek_tail(&roq->queue); | |
597 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
598 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | |
599 | /* NSN bounds assumed correct (checked when it was queued) */ | |
600 | if (nsn >= nsn_itr) { | |
601 | d_printf(2, dev, "ERX: roq %p - appended after %p (nsn %d sn %u)\n", | |
602 | roq, skb_itr, nsn_itr, roq_data_itr->sn); | |
603 | __skb_queue_tail(&roq->queue, skb); | |
604 | goto out; | |
605 | } | |
606 | /* None of the fast paths option worked. Iterate to find the | |
607 | * right spot where to insert the packet; we know the queue is | |
608 | * not empty, so we are not the first ones; we also know we | |
609 | * are not going to be the last ones. The list is sorted, so | |
610 | * we have to insert before the the first guy with an nsn_itr | |
611 | * greater that our nsn. */ | |
612 | skb_queue_walk(&roq->queue, skb_itr) { | |
613 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
614 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | |
615 | /* NSN bounds assumed correct (checked when it was queued) */ | |
616 | if (nsn_itr > nsn) { | |
617 | d_printf(2, dev, "ERX: roq %p - queued before %p " | |
618 | "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr, | |
619 | roq_data_itr->sn); | |
620 | __skb_queue_before(&roq->queue, skb_itr, skb); | |
621 | goto out; | |
622 | } | |
623 | } | |
624 | /* If we get here, that is VERY bad -- print info to help | |
625 | * diagnose and crash it */ | |
626 | dev_err(dev, "SW BUG? failed to insert packet\n"); | |
627 | dev_err(dev, "ERX: roq %p [ws %u] skb %p nsn %d sn %u\n", | |
628 | roq, roq->ws, skb, nsn, roq_data->sn); | |
629 | skb_queue_walk(&roq->queue, skb_itr) { | |
630 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
631 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | |
632 | /* NSN bounds assumed correct (checked when it was queued) */ | |
633 | dev_err(dev, "ERX: roq %p skb_itr %p nsn %d sn %u\n", | |
634 | roq, skb_itr, nsn_itr, roq_data_itr->sn); | |
635 | } | |
636 | BUG(); | |
637 | out: | |
638 | d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n", | |
639 | i2400m, roq, skb, sn, nsn); | |
640 | return; | |
641 | } | |
642 | ||
643 | ||
644 | /* | |
645 | * Backbone for the update window start operation | |
646 | * | |
647 | * @i2400m: device descriptor | |
648 | * @roq: Reorder queue | |
649 | * @sn: New sequence number | |
650 | * | |
651 | * Updates the window start of a queue; when doing so, it must deliver | |
652 | * to the networking stack all the queued skb's whose normalized | |
653 | * sequence number is lower than the new normalized window start. | |
654 | */ | |
655 | static | |
656 | unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | |
657 | unsigned sn) | |
658 | { | |
659 | struct device *dev = i2400m_dev(i2400m); | |
660 | struct sk_buff *skb_itr, *tmp_itr; | |
661 | struct i2400m_roq_data *roq_data_itr; | |
662 | unsigned new_nws, nsn_itr; | |
663 | ||
664 | new_nws = __i2400m_roq_nsn(roq, sn); | |
665 | if (unlikely(new_nws >= 1024) && d_test(1)) { | |
666 | dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n", | |
667 | new_nws, sn, roq->ws); | |
668 | WARN_ON(1); | |
669 | i2400m_roq_log_dump(i2400m, roq); | |
670 | } | |
671 | skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { | |
672 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
673 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | |
674 | /* NSN bounds assumed correct (checked when it was queued) */ | |
675 | if (nsn_itr < new_nws) { | |
676 | d_printf(2, dev, "ERX: roq %p - release skb %p " | |
677 | "(nsn %u/%u new nws %u)\n", | |
678 | roq, skb_itr, nsn_itr, roq_data_itr->sn, | |
679 | new_nws); | |
680 | __skb_unlink(skb_itr, &roq->queue); | |
681 | i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); | |
682 | } | |
683 | else | |
684 | break; /* rest of packets all nsn_itr > nws */ | |
685 | } | |
686 | roq->ws = sn; | |
687 | return new_nws; | |
688 | } | |
689 | ||
690 | ||
691 | /* | |
692 | * Reset a queue | |
693 | * | |
694 | * @i2400m: device descriptor | |
695 | * @cin: Queue Index | |
696 | * | |
697 | * Deliver all the packets and reset the window-start to zero. Name is | |
698 | * kind of misleading. | |
699 | */ | |
700 | static | |
701 | void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq) | |
702 | { | |
703 | struct device *dev = i2400m_dev(i2400m); | |
704 | struct sk_buff *skb_itr, *tmp_itr; | |
705 | struct i2400m_roq_data *roq_data_itr; | |
706 | ||
707 | d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq); | |
708 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET, | |
709 | roq->ws, skb_queue_len(&roq->queue), | |
710 | ~0, ~0, 0); | |
711 | skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { | |
712 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
713 | d_printf(2, dev, "ERX: roq %p - release skb %p (sn %u)\n", | |
714 | roq, skb_itr, roq_data_itr->sn); | |
715 | __skb_unlink(skb_itr, &roq->queue); | |
716 | i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); | |
717 | } | |
718 | roq->ws = 0; | |
719 | d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq); | |
720 | return; | |
721 | } | |
722 | ||
723 | ||
724 | /* | |
725 | * Queue a packet | |
726 | * | |
727 | * @i2400m: device descriptor | |
728 | * @cin: Queue Index | |
729 | * @skb: containing the packet data | |
730 | * @fbn: First block number of the packet in @skb | |
731 | * @lbn: Last block number of the packet in @skb | |
732 | * | |
733 | * The hardware is asking the driver to queue a packet for later | |
734 | * delivery to the networking stack. | |
735 | */ | |
736 | static | |
737 | void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, | |
738 | struct sk_buff * skb, unsigned lbn) | |
739 | { | |
740 | struct device *dev = i2400m_dev(i2400m); | |
741 | unsigned nsn, len; | |
742 | ||
743 | d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", | |
744 | i2400m, roq, skb, lbn); | |
745 | len = skb_queue_len(&roq->queue); | |
746 | nsn = __i2400m_roq_nsn(roq, lbn); | |
747 | if (unlikely(nsn >= 1024)) { | |
748 | dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n", | |
749 | nsn, lbn, roq->ws); | |
750 | i2400m_roq_log_dump(i2400m, roq); | |
751 | i2400m->bus_reset(i2400m, I2400M_RT_WARM); | |
752 | } else { | |
753 | __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn); | |
754 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET, | |
755 | roq->ws, len, lbn, nsn, ~0); | |
756 | } | |
757 | d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", | |
758 | i2400m, roq, skb, lbn); | |
759 | return; | |
760 | } | |
761 | ||
762 | ||
763 | /* | |
764 | * Update the window start in a reorder queue and deliver all skbs | |
765 | * with a lower window start | |
766 | * | |
767 | * @i2400m: device descriptor | |
768 | * @roq: Reorder queue | |
769 | * @sn: New sequence number | |
770 | */ | |
771 | static | |
772 | void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | |
773 | unsigned sn) | |
774 | { | |
775 | struct device *dev = i2400m_dev(i2400m); | |
776 | unsigned old_ws, nsn, len; | |
777 | ||
778 | d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn); | |
779 | old_ws = roq->ws; | |
780 | len = skb_queue_len(&roq->queue); | |
781 | nsn = __i2400m_roq_update_ws(i2400m, roq, sn); | |
782 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS, | |
783 | old_ws, len, sn, nsn, roq->ws); | |
784 | d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn); | |
785 | return; | |
786 | } | |
787 | ||
788 | ||
789 | /* | |
790 | * Queue a packet and update the window start | |
791 | * | |
792 | * @i2400m: device descriptor | |
793 | * @cin: Queue Index | |
794 | * @skb: containing the packet data | |
795 | * @fbn: First block number of the packet in @skb | |
796 | * @sn: Last block number of the packet in @skb | |
797 | * | |
798 | * Note that unlike i2400m_roq_update_ws(), which sets the new window | |
799 | * start to @sn, in here we'll set it to @sn + 1. | |
800 | */ | |
801 | static | |
802 | void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | |
803 | struct sk_buff * skb, unsigned sn) | |
804 | { | |
805 | struct device *dev = i2400m_dev(i2400m); | |
806 | unsigned nsn, old_ws, len; | |
807 | ||
808 | d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n", | |
809 | i2400m, roq, skb, sn); | |
810 | len = skb_queue_len(&roq->queue); | |
811 | nsn = __i2400m_roq_nsn(roq, sn); | |
812 | old_ws = roq->ws; | |
813 | if (unlikely(nsn >= 1024)) { | |
814 | dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", | |
815 | nsn, sn, roq->ws); | |
816 | i2400m_roq_log_dump(i2400m, roq); | |
817 | i2400m->bus_reset(i2400m, I2400M_RT_WARM); | |
818 | } else { | |
819 | /* if the queue is empty, don't bother as we'd queue | |
820 | * it and inmediately unqueue it -- just deliver it */ | |
821 | if (len == 0) { | |
822 | struct i2400m_roq_data *roq_data; | |
823 | roq_data = (struct i2400m_roq_data *) &skb->cb; | |
824 | i2400m_net_erx(i2400m, skb, roq_data->cs); | |
825 | } | |
4e5b6d00 | 826 | else |
c747583d | 827 | __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); |
4e5b6d00 | 828 | __i2400m_roq_update_ws(i2400m, roq, sn + 1); |
c747583d IPG |
829 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, |
830 | old_ws, len, sn, nsn, roq->ws); | |
831 | } | |
832 | d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", | |
833 | i2400m, roq, skb, sn); | |
834 | return; | |
835 | } | |
836 | ||
837 | ||
fd5c565c IPG |
838 | /* |
839 | * Receive and send up an extended data packet | |
840 | * | |
841 | * @i2400m: device descriptor | |
842 | * @skb_rx: skb that contains the extended data packet | |
843 | * @single_last: 1 if the payload is the only one or the last one of | |
844 | * the skb. | |
845 | * @payload: pointer to the packet's data inside the skb | |
846 | * @size: size of the payload | |
847 | * | |
848 | * Starting in v1.4 of the i2400m's firmware, the device can send data | |
849 | * packets to the host in an extended format that; this incudes a 16 | |
850 | * byte header (struct i2400m_pl_edata_hdr). Using this header's space | |
851 | * we can fake ethernet headers for ethernet device emulation without | |
852 | * having to copy packets around. | |
853 | * | |
854 | * This function handles said path. | |
c747583d IPG |
855 | * |
856 | * | |
857 | * Receive and send up an extended data packet that requires no reordering | |
858 | * | |
859 | * @i2400m: device descriptor | |
860 | * @skb_rx: skb that contains the extended data packet | |
861 | * @single_last: 1 if the payload is the only one or the last one of | |
862 | * the skb. | |
863 | * @payload: pointer to the packet's data (past the actual extended | |
864 | * data payload header). | |
865 | * @size: size of the payload | |
866 | * | |
867 | * Pass over to the networking stack a data packet that might have | |
868 | * reordering requirements. | |
869 | * | |
870 | * This needs to the decide if the skb in which the packet is | |
871 | * contained can be reused or if it needs to be cloned. Then it has to | |
872 | * be trimmed in the edges so that the beginning is the space for eth | |
873 | * header and then pass it to i2400m_net_erx() for the stack | |
874 | * | |
875 | * Assumes the caller has verified the sanity of the payload (size, | |
876 | * etc) already. | |
fd5c565c IPG |
877 | */ |
878 | static | |
879 | void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, | |
880 | unsigned single_last, const void *payload, size_t size) | |
881 | { | |
882 | struct device *dev = i2400m_dev(i2400m); | |
883 | const struct i2400m_pl_edata_hdr *hdr = payload; | |
884 | struct net_device *net_dev = i2400m->wimax_dev.net_dev; | |
885 | struct sk_buff *skb; | |
886 | enum i2400m_cs cs; | |
c747583d IPG |
887 | u32 reorder; |
888 | unsigned ro_needed, ro_type, ro_cin, ro_sn; | |
889 | struct i2400m_roq *roq; | |
890 | struct i2400m_roq_data *roq_data; | |
fd5c565c | 891 | |
c747583d IPG |
892 | BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); |
893 | ||
894 | d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p " | |
fd5c565c IPG |
895 | "size %zu)\n", i2400m, skb_rx, single_last, payload, size); |
896 | if (size < sizeof(*hdr)) { | |
897 | dev_err(dev, "ERX: HW BUG? message with short header (%zu " | |
898 | "vs %zu bytes expected)\n", size, sizeof(*hdr)); | |
899 | goto error; | |
900 | } | |
c747583d | 901 | |
fd5c565c IPG |
902 | if (single_last) { |
903 | skb = skb_get(skb_rx); | |
c747583d | 904 | d_printf(3, dev, "ERX: skb %p reusing\n", skb); |
fd5c565c IPG |
905 | } else { |
906 | skb = skb_clone(skb_rx, GFP_KERNEL); | |
fd5c565c IPG |
907 | if (skb == NULL) { |
908 | dev_err(dev, "ERX: no memory to clone skb\n"); | |
909 | net_dev->stats.rx_dropped++; | |
910 | goto error_skb_clone; | |
911 | } | |
c747583d | 912 | d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx); |
fd5c565c IPG |
913 | } |
914 | /* now we have to pull and trim so that the skb points to the | |
915 | * beginning of the IP packet; the netdev part will add the | |
c747583d IPG |
916 | * ethernet header as needed - we know there is enough space |
917 | * because we checked in i2400m_rx_edata(). */ | |
fd5c565c | 918 | skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data); |
c747583d IPG |
919 | skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr)); |
920 | ||
921 | reorder = le32_to_cpu(hdr->reorder); | |
922 | ro_needed = reorder & I2400M_RO_NEEDED; | |
923 | cs = hdr->cs; | |
924 | if (ro_needed) { | |
925 | ro_type = (reorder >> I2400M_RO_TYPE_SHIFT) & I2400M_RO_TYPE; | |
926 | ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; | |
927 | ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; | |
928 | ||
929 | roq = &i2400m->rx_roq[ro_cin]; | |
930 | roq_data = (struct i2400m_roq_data *) &skb->cb; | |
931 | roq_data->sn = ro_sn; | |
932 | roq_data->cs = cs; | |
933 | d_printf(2, dev, "ERX: reorder needed: " | |
934 | "type %u cin %u [ws %u] sn %u/%u len %zuB\n", | |
935 | ro_type, ro_cin, roq->ws, ro_sn, | |
936 | __i2400m_roq_nsn(roq, ro_sn), size); | |
937 | d_dump(2, dev, payload, size); | |
938 | switch(ro_type) { | |
939 | case I2400M_RO_TYPE_RESET: | |
940 | i2400m_roq_reset(i2400m, roq); | |
941 | kfree_skb(skb); /* no data here */ | |
942 | break; | |
943 | case I2400M_RO_TYPE_PACKET: | |
944 | i2400m_roq_queue(i2400m, roq, skb, ro_sn); | |
945 | break; | |
946 | case I2400M_RO_TYPE_WS: | |
947 | i2400m_roq_update_ws(i2400m, roq, ro_sn); | |
948 | kfree_skb(skb); /* no data here */ | |
949 | break; | |
950 | case I2400M_RO_TYPE_PACKET_WS: | |
951 | i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn); | |
952 | break; | |
953 | default: | |
954 | dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); | |
955 | } | |
956 | } | |
957 | else | |
958 | i2400m_net_erx(i2400m, skb, cs); | |
fd5c565c IPG |
959 | error_skb_clone: |
960 | error: | |
c747583d | 961 | d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p " |
fd5c565c IPG |
962 | "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); |
963 | return; | |
964 | } | |
965 | ||
966 | ||
aa5a7aca IPG |
967 | /* |
968 | * Act on a received payload | |
969 | * | |
970 | * @i2400m: device instance | |
971 | * @skb_rx: skb where the transaction was received | |
fd5c565c IPG |
972 | * @single_last: 1 this is the only payload or the last one (so the |
973 | * skb can be reused instead of cloned). | |
aa5a7aca IPG |
974 | * @pld: payload descriptor |
975 | * @payload: payload data | |
976 | * | |
977 | * Upon reception of a payload, look at its guts in the payload | |
fd5c565c IPG |
978 | * descriptor and decide what to do with it. If it is a single payload |
979 | * skb or if the last skb is a data packet, the skb will be referenced | |
980 | * and modified (so it doesn't have to be cloned). | |
aa5a7aca IPG |
981 | */ |
982 | static | |
983 | void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx, | |
fd5c565c | 984 | unsigned single_last, const struct i2400m_pld *pld, |
aa5a7aca IPG |
985 | const void *payload) |
986 | { | |
987 | struct device *dev = i2400m_dev(i2400m); | |
988 | size_t pl_size = i2400m_pld_size(pld); | |
989 | enum i2400m_pt pl_type = i2400m_pld_type(pld); | |
990 | ||
fd5c565c IPG |
991 | d_printf(7, dev, "RX: received payload type %u, %zu bytes\n", |
992 | pl_type, pl_size); | |
993 | d_dump(8, dev, payload, pl_size); | |
994 | ||
aa5a7aca IPG |
995 | switch (pl_type) { |
996 | case I2400M_PT_DATA: | |
997 | d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size); | |
fd5c565c | 998 | i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size); |
aa5a7aca IPG |
999 | break; |
1000 | case I2400M_PT_CTRL: | |
1001 | i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size); | |
1002 | break; | |
1003 | case I2400M_PT_TRACE: | |
1004 | i2400m_rx_trace(i2400m, payload, pl_size); | |
1005 | break; | |
fd5c565c IPG |
1006 | case I2400M_PT_EDATA: |
1007 | d_printf(3, dev, "ERX: data payload %zu bytes\n", pl_size); | |
1008 | i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size); | |
1009 | break; | |
aa5a7aca IPG |
1010 | default: /* Anything else shouldn't come to the host */ |
1011 | if (printk_ratelimit()) | |
1012 | dev_err(dev, "RX: HW BUG? unexpected payload type %u\n", | |
1013 | pl_type); | |
1014 | } | |
1015 | } | |
1016 | ||
1017 | ||
1018 | /* | |
1019 | * Check a received transaction's message header | |
1020 | * | |
1021 | * @i2400m: device descriptor | |
1022 | * @msg_hdr: message header | |
1023 | * @buf_size: size of the received buffer | |
1024 | * | |
1025 | * Check that the declarations done by a RX buffer message header are | |
1026 | * sane and consistent with the amount of data that was received. | |
1027 | */ | |
1028 | static | |
1029 | int i2400m_rx_msg_hdr_check(struct i2400m *i2400m, | |
1030 | const struct i2400m_msg_hdr *msg_hdr, | |
1031 | size_t buf_size) | |
1032 | { | |
1033 | int result = -EIO; | |
1034 | struct device *dev = i2400m_dev(i2400m); | |
1035 | if (buf_size < sizeof(*msg_hdr)) { | |
1036 | dev_err(dev, "RX: HW BUG? message with short header (%zu " | |
1037 | "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr)); | |
1038 | goto error; | |
1039 | } | |
1040 | if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) { | |
1041 | dev_err(dev, "RX: HW BUG? message received with unknown " | |
1042 | "barker 0x%08x (buf_size %zu bytes)\n", | |
1043 | le32_to_cpu(msg_hdr->barker), buf_size); | |
1044 | goto error; | |
1045 | } | |
1046 | if (msg_hdr->num_pls == 0) { | |
1047 | dev_err(dev, "RX: HW BUG? zero payload packets in message\n"); | |
1048 | goto error; | |
1049 | } | |
1050 | if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) { | |
1051 | dev_err(dev, "RX: HW BUG? message contains more payload " | |
1052 | "than maximum; ignoring.\n"); | |
1053 | goto error; | |
1054 | } | |
1055 | result = 0; | |
1056 | error: | |
1057 | return result; | |
1058 | } | |
1059 | ||
1060 | ||
1061 | /* | |
1062 | * Check a payload descriptor against the received data | |
1063 | * | |
1064 | * @i2400m: device descriptor | |
1065 | * @pld: payload descriptor | |
1066 | * @pl_itr: offset (in bytes) in the received buffer the payload is | |
1067 | * located | |
1068 | * @buf_size: size of the received buffer | |
1069 | * | |
1070 | * Given a payload descriptor (part of a RX buffer), check it is sane | |
1071 | * and that the data it declares fits in the buffer. | |
1072 | */ | |
1073 | static | |
1074 | int i2400m_rx_pl_descr_check(struct i2400m *i2400m, | |
1075 | const struct i2400m_pld *pld, | |
1076 | size_t pl_itr, size_t buf_size) | |
1077 | { | |
1078 | int result = -EIO; | |
1079 | struct device *dev = i2400m_dev(i2400m); | |
1080 | size_t pl_size = i2400m_pld_size(pld); | |
1081 | enum i2400m_pt pl_type = i2400m_pld_type(pld); | |
1082 | ||
1083 | if (pl_size > i2400m->bus_pl_size_max) { | |
1084 | dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is " | |
1085 | "bigger than maximum %zu; ignoring message\n", | |
1086 | pl_itr, pl_size, i2400m->bus_pl_size_max); | |
1087 | goto error; | |
1088 | } | |
1089 | if (pl_itr + pl_size > buf_size) { /* enough? */ | |
1090 | dev_err(dev, "RX: HW BUG? payload @%zu: size %zu " | |
1091 | "goes beyond the received buffer " | |
1092 | "size (%zu bytes); ignoring message\n", | |
1093 | pl_itr, pl_size, buf_size); | |
1094 | goto error; | |
1095 | } | |
1096 | if (pl_type >= I2400M_PT_ILLEGAL) { | |
1097 | dev_err(dev, "RX: HW BUG? illegal payload type %u; " | |
1098 | "ignoring message\n", pl_type); | |
1099 | goto error; | |
1100 | } | |
1101 | result = 0; | |
1102 | error: | |
1103 | return result; | |
1104 | } | |
1105 | ||
1106 | ||
1107 | /** | |
1108 | * i2400m_rx - Receive a buffer of data from the device | |
1109 | * | |
1110 | * @i2400m: device descriptor | |
1111 | * @skb: skbuff where the data has been received | |
1112 | * | |
1113 | * Parse in a buffer of data that contains an RX message sent from the | |
1114 | * device. See the file header for the format. Run all checks on the | |
1115 | * buffer header, then run over each payload's descriptors, verify | |
1116 | * their consistency and act on each payload's contents. If | |
af901ca1 | 1117 | * everything is successful, update the device's statistics. |
aa5a7aca IPG |
1118 | * |
1119 | * Note: You need to set the skb to contain only the length of the | |
1120 | * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE). | |
1121 | * | |
1122 | * Returns: | |
1123 | * | |
1124 | * 0 if ok, < 0 errno on error | |
1125 | * | |
1126 | * If ok, this function owns now the skb and the caller DOESN'T have | |
1127 | * to run kfree_skb() on it. However, on error, the caller still owns | |
1128 | * the skb and it is responsible for releasing it. | |
1129 | */ | |
1130 | int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) | |
1131 | { | |
1132 | int i, result; | |
1133 | struct device *dev = i2400m_dev(i2400m); | |
1134 | const struct i2400m_msg_hdr *msg_hdr; | |
1135 | size_t pl_itr, pl_size, skb_len; | |
1136 | unsigned long flags; | |
fd5c565c | 1137 | unsigned num_pls, single_last; |
aa5a7aca IPG |
1138 | |
1139 | skb_len = skb->len; | |
1140 | d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n", | |
1141 | i2400m, skb, skb_len); | |
1142 | result = -EIO; | |
1143 | msg_hdr = (void *) skb->data; | |
1144 | result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len); | |
1145 | if (result < 0) | |
1146 | goto error_msg_hdr_check; | |
1147 | result = -EIO; | |
1148 | num_pls = le16_to_cpu(msg_hdr->num_pls); | |
1149 | pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ | |
1150 | num_pls * sizeof(msg_hdr->pld[0]); | |
8593a196 | 1151 | pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); |
aa5a7aca IPG |
1152 | if (pl_itr > skb->len) { /* got all the payload descriptors? */ |
1153 | dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " | |
1154 | "%u payload descriptors (%zu each, total %zu)\n", | |
1155 | skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); | |
1156 | goto error_pl_descr_short; | |
1157 | } | |
1158 | /* Walk each payload payload--check we really got it */ | |
1159 | for (i = 0; i < num_pls; i++) { | |
1160 | /* work around old gcc warnings */ | |
1161 | pl_size = i2400m_pld_size(&msg_hdr->pld[i]); | |
1162 | result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], | |
1163 | pl_itr, skb->len); | |
1164 | if (result < 0) | |
1165 | goto error_pl_descr_check; | |
fd5c565c IPG |
1166 | single_last = num_pls == 1 || i == num_pls - 1; |
1167 | i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i], | |
aa5a7aca | 1168 | skb->data + pl_itr); |
8593a196 | 1169 | pl_itr += ALIGN(pl_size, I2400M_PL_ALIGN); |
aa5a7aca IPG |
1170 | cond_resched(); /* Don't monopolize */ |
1171 | } | |
1172 | kfree_skb(skb); | |
1173 | /* Update device statistics */ | |
1174 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
1175 | i2400m->rx_pl_num += i; | |
1176 | if (i > i2400m->rx_pl_max) | |
1177 | i2400m->rx_pl_max = i; | |
1178 | if (i < i2400m->rx_pl_min) | |
1179 | i2400m->rx_pl_min = i; | |
1180 | i2400m->rx_num++; | |
1181 | i2400m->rx_size_acc += skb->len; | |
1182 | if (skb->len < i2400m->rx_size_min) | |
1183 | i2400m->rx_size_min = skb->len; | |
1184 | if (skb->len > i2400m->rx_size_max) | |
1185 | i2400m->rx_size_max = skb->len; | |
1186 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
1187 | error_pl_descr_check: | |
1188 | error_pl_descr_short: | |
1189 | error_msg_hdr_check: | |
1190 | d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n", | |
1191 | i2400m, skb, skb_len, result); | |
1192 | return result; | |
1193 | } | |
1194 | EXPORT_SYMBOL_GPL(i2400m_rx); | |
c747583d IPG |
1195 | |
1196 | ||
1197 | /* | |
1198 | * Initialize the RX queue and infrastructure | |
1199 | * | |
1200 | * This sets up all the RX reordering infrastructures, which will not | |
1201 | * be used if reordering is not enabled or if the firmware does not | |
1202 | * support it. The device is told to do reordering in | |
1203 | * i2400m_dev_initialize(), where it also looks at the value of the | |
1204 | * i2400m->rx_reorder switch before taking a decission. | |
1205 | * | |
1206 | * Note we allocate the roq queues in one chunk and the actual logging | |
1207 | * support for it (logging) in another one and then we setup the | |
1208 | * pointers from the first to the last. | |
1209 | */ | |
1210 | int i2400m_rx_setup(struct i2400m *i2400m) | |
1211 | { | |
1212 | int result = 0; | |
1213 | struct device *dev = i2400m_dev(i2400m); | |
1214 | ||
1215 | i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1; | |
1216 | if (i2400m->rx_reorder) { | |
1217 | unsigned itr; | |
1218 | size_t size; | |
1219 | struct i2400m_roq_log *rd; | |
1220 | ||
1221 | result = -ENOMEM; | |
1222 | ||
1223 | size = sizeof(i2400m->rx_roq[0]) * (I2400M_RO_CIN + 1); | |
1224 | i2400m->rx_roq = kzalloc(size, GFP_KERNEL); | |
1225 | if (i2400m->rx_roq == NULL) { | |
1226 | dev_err(dev, "RX: cannot allocate %zu bytes for " | |
1227 | "reorder queues\n", size); | |
1228 | goto error_roq_alloc; | |
1229 | } | |
1230 | ||
1231 | size = sizeof(*i2400m->rx_roq[0].log) * (I2400M_RO_CIN + 1); | |
1232 | rd = kzalloc(size, GFP_KERNEL); | |
1233 | if (rd == NULL) { | |
1234 | dev_err(dev, "RX: cannot allocate %zu bytes for " | |
1235 | "reorder queues log areas\n", size); | |
1236 | result = -ENOMEM; | |
1237 | goto error_roq_log_alloc; | |
1238 | } | |
1239 | ||
1240 | for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) { | |
1241 | __i2400m_roq_init(&i2400m->rx_roq[itr]); | |
1242 | i2400m->rx_roq[itr].log = &rd[itr]; | |
1243 | } | |
1244 | } | |
1245 | return 0; | |
1246 | ||
1247 | error_roq_log_alloc: | |
1248 | kfree(i2400m->rx_roq); | |
1249 | error_roq_alloc: | |
1250 | return result; | |
1251 | } | |
1252 | ||
1253 | ||
1254 | /* Tear down the RX queue and infrastructure */ | |
1255 | void i2400m_rx_release(struct i2400m *i2400m) | |
1256 | { | |
1257 | if (i2400m->rx_reorder) { | |
1258 | unsigned itr; | |
1259 | for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) | |
1260 | __skb_queue_purge(&i2400m->rx_roq[itr].queue); | |
1261 | kfree(i2400m->rx_roq[0].log); | |
1262 | kfree(i2400m->rx_roq); | |
1263 | } | |
1264 | } |