mISDN: Reduce RX buffer allocation for transparent data
[linux-block.git] / drivers / isdn / mISDN / hwchannel.c
CommitLineData
1b2b03f8
KK
1/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
5a0e3ad6 18#include <linux/gfp.h>
1b2b03f8
KK
19#include <linux/module.h>
20#include <linux/mISDNhw.h>
21
22static void
23dchannel_bh(struct work_struct *ws)
24{
25 struct dchannel *dch = container_of(ws, struct dchannel, workq);
26 struct sk_buff *skb;
27 int err;
28
29 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
30 while ((skb = skb_dequeue(&dch->rqueue))) {
31 if (likely(dch->dev.D.peer)) {
32 err = dch->dev.D.recv(dch->dev.D.peer, skb);
33 if (err)
34 dev_kfree_skb(skb);
35 } else
36 dev_kfree_skb(skb);
37 }
38 }
39 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
40 if (dch->phfunc)
41 dch->phfunc(dch);
42 }
43}
44
45static void
46bchannel_bh(struct work_struct *ws)
47{
48 struct bchannel *bch = container_of(ws, struct bchannel, workq);
49 struct sk_buff *skb;
50 int err;
51
52 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
53 while ((skb = skb_dequeue(&bch->rqueue))) {
1b2b03f8
KK
54 bch->rcount--;
55 if (likely(bch->ch.peer)) {
56 err = bch->ch.recv(bch->ch.peer, skb);
57 if (err)
58 dev_kfree_skb(skb);
59 } else
60 dev_kfree_skb(skb);
61 }
62 }
63}
64
65int
66mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
67{
68 test_and_set_bit(FLG_HDLC, &ch->Flags);
69 ch->maxlen = maxlen;
70 ch->hw = NULL;
71 ch->rx_skb = NULL;
72 ch->tx_skb = NULL;
73 ch->tx_idx = 0;
74 ch->phfunc = phf;
75 skb_queue_head_init(&ch->squeue);
76 skb_queue_head_init(&ch->rqueue);
77 INIT_LIST_HEAD(&ch->dev.bchannels);
78 INIT_WORK(&ch->workq, dchannel_bh);
79 return 0;
80}
81EXPORT_SYMBOL(mISDN_initdchannel);
82
83int
84mISDN_initbchannel(struct bchannel *ch, int maxlen)
85{
86 ch->Flags = 0;
87 ch->maxlen = maxlen;
88 ch->hw = NULL;
89 ch->rx_skb = NULL;
90 ch->tx_skb = NULL;
91 ch->tx_idx = 0;
92 skb_queue_head_init(&ch->rqueue);
93 ch->rcount = 0;
94 ch->next_skb = NULL;
95 INIT_WORK(&ch->workq, bchannel_bh);
96 return 0;
97}
98EXPORT_SYMBOL(mISDN_initbchannel);
99
100int
101mISDN_freedchannel(struct dchannel *ch)
102{
103 if (ch->tx_skb) {
104 dev_kfree_skb(ch->tx_skb);
105 ch->tx_skb = NULL;
106 }
107 if (ch->rx_skb) {
108 dev_kfree_skb(ch->rx_skb);
109 ch->rx_skb = NULL;
110 }
111 skb_queue_purge(&ch->squeue);
112 skb_queue_purge(&ch->rqueue);
0d26aa70 113 flush_work_sync(&ch->workq);
1b2b03f8
KK
114 return 0;
115}
116EXPORT_SYMBOL(mISDN_freedchannel);
117
fb286f04
KK
118void
119mISDN_clear_bchannel(struct bchannel *ch)
1b2b03f8
KK
120{
121 if (ch->tx_skb) {
122 dev_kfree_skb(ch->tx_skb);
123 ch->tx_skb = NULL;
124 }
fb286f04 125 ch->tx_idx = 0;
1b2b03f8
KK
126 if (ch->rx_skb) {
127 dev_kfree_skb(ch->rx_skb);
128 ch->rx_skb = NULL;
129 }
130 if (ch->next_skb) {
131 dev_kfree_skb(ch->next_skb);
132 ch->next_skb = NULL;
133 }
fb286f04
KK
134 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
135 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
136 test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
137}
138EXPORT_SYMBOL(mISDN_clear_bchannel);
139
140int
141mISDN_freebchannel(struct bchannel *ch)
142{
143 mISDN_clear_bchannel(ch);
1b2b03f8
KK
144 skb_queue_purge(&ch->rqueue);
145 ch->rcount = 0;
0d26aa70 146 flush_work_sync(&ch->workq);
1b2b03f8
KK
147 return 0;
148}
149EXPORT_SYMBOL(mISDN_freebchannel);
150
151static inline u_int
152get_sapi_tei(u_char *p)
153{
154 u_int sapi, tei;
155
156 sapi = *p >> 2;
157 tei = p[1] >> 1;
158 return sapi | (tei << 8);
159}
160
161void
162recv_Dchannel(struct dchannel *dch)
163{
164 struct mISDNhead *hh;
165
166 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
167 dev_kfree_skb(dch->rx_skb);
168 dch->rx_skb = NULL;
169 return;
170 }
171 hh = mISDN_HEAD_P(dch->rx_skb);
172 hh->prim = PH_DATA_IND;
173 hh->id = get_sapi_tei(dch->rx_skb->data);
174 skb_queue_tail(&dch->rqueue, dch->rx_skb);
175 dch->rx_skb = NULL;
176 schedule_event(dch, FLG_RECVQUEUE);
177}
178EXPORT_SYMBOL(recv_Dchannel);
179
1f28fa19
MB
180void
181recv_Echannel(struct dchannel *ech, struct dchannel *dch)
182{
183 struct mISDNhead *hh;
184
185 if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
186 dev_kfree_skb(ech->rx_skb);
187 ech->rx_skb = NULL;
188 return;
189 }
190 hh = mISDN_HEAD_P(ech->rx_skb);
191 hh->prim = PH_DATA_E_IND;
192 hh->id = get_sapi_tei(ech->rx_skb->data);
193 skb_queue_tail(&dch->rqueue, ech->rx_skb);
194 ech->rx_skb = NULL;
195 schedule_event(dch, FLG_RECVQUEUE);
196}
197EXPORT_SYMBOL(recv_Echannel);
198
1b2b03f8 199void
7cfa153d 200recv_Bchannel(struct bchannel *bch, unsigned int id)
1b2b03f8
KK
201{
202 struct mISDNhead *hh;
203
7206e659
KK
204 /* if allocation did fail upper functions still may call us */
205 if (unlikely(!bch->rx_skb))
1b2b03f8 206 return;
7206e659
KK
207 if (unlikely(!bch->rx_skb->len)) {
208 /* we have no data to send - this may happen after recovery
209 * from overflow or too small allocation.
210 * We need to free the buffer here */
211 dev_kfree_skb(bch->rx_skb);
212 bch->rx_skb = NULL;
213 } else {
214 hh = mISDN_HEAD_P(bch->rx_skb);
215 hh->prim = PH_DATA_IND;
216 hh->id = id;
217 if (bch->rcount >= 64) {
218 printk(KERN_WARNING
219 "B%d receive queue overflow - flushing!\n",
220 bch->nr);
221 skb_queue_purge(&bch->rqueue);
222 }
223 bch->rcount++;
224 skb_queue_tail(&bch->rqueue, bch->rx_skb);
225 bch->rx_skb = NULL;
226 schedule_event(bch, FLG_RECVQUEUE);
1b2b03f8 227 }
1b2b03f8
KK
228}
229EXPORT_SYMBOL(recv_Bchannel);
230
231void
232recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
233{
234 skb_queue_tail(&dch->rqueue, skb);
235 schedule_event(dch, FLG_RECVQUEUE);
236}
237EXPORT_SYMBOL(recv_Dchannel_skb);
238
239void
240recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
241{
242 if (bch->rcount >= 64) {
11618496 243 printk(KERN_WARNING "B-channel %p receive queue overflow, "
475be4d8 244 "flushing!\n", bch);
11618496
AE
245 skb_queue_purge(&bch->rqueue);
246 bch->rcount = 0;
1b2b03f8
KK
247 }
248 bch->rcount++;
249 skb_queue_tail(&bch->rqueue, skb);
250 schedule_event(bch, FLG_RECVQUEUE);
251}
252EXPORT_SYMBOL(recv_Bchannel_skb);
253
254static void
255confirm_Dsend(struct dchannel *dch)
256{
257 struct sk_buff *skb;
258
259 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
475be4d8 260 0, NULL, GFP_ATOMIC);
1b2b03f8
KK
261 if (!skb) {
262 printk(KERN_ERR "%s: no skb id %x\n", __func__,
475be4d8 263 mISDN_HEAD_ID(dch->tx_skb));
1b2b03f8
KK
264 return;
265 }
266 skb_queue_tail(&dch->rqueue, skb);
267 schedule_event(dch, FLG_RECVQUEUE);
268}
269
270int
271get_next_dframe(struct dchannel *dch)
272{
273 dch->tx_idx = 0;
274 dch->tx_skb = skb_dequeue(&dch->squeue);
275 if (dch->tx_skb) {
276 confirm_Dsend(dch);
277 return 1;
278 }
279 dch->tx_skb = NULL;
280 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
281 return 0;
282}
283EXPORT_SYMBOL(get_next_dframe);
284
8bfddfbe 285static void
1b2b03f8
KK
286confirm_Bsend(struct bchannel *bch)
287{
288 struct sk_buff *skb;
289
11618496
AE
290 if (bch->rcount >= 64) {
291 printk(KERN_WARNING "B-channel %p receive queue overflow, "
475be4d8 292 "flushing!\n", bch);
11618496
AE
293 skb_queue_purge(&bch->rqueue);
294 bch->rcount = 0;
295 }
1b2b03f8 296 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
475be4d8 297 0, NULL, GFP_ATOMIC);
1b2b03f8
KK
298 if (!skb) {
299 printk(KERN_ERR "%s: no skb id %x\n", __func__,
475be4d8 300 mISDN_HEAD_ID(bch->tx_skb));
1b2b03f8
KK
301 return;
302 }
303 bch->rcount++;
304 skb_queue_tail(&bch->rqueue, skb);
305 schedule_event(bch, FLG_RECVQUEUE);
306}
1b2b03f8
KK
307
308int
309get_next_bframe(struct bchannel *bch)
310{
311 bch->tx_idx = 0;
312 if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
313 bch->tx_skb = bch->next_skb;
314 if (bch->tx_skb) {
315 bch->next_skb = NULL;
316 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
8bfddfbe
KK
317 /* confirm imediately to allow next data */
318 confirm_Bsend(bch);
1b2b03f8
KK
319 return 1;
320 } else {
321 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
322 printk(KERN_WARNING "B TX_NEXT without skb\n");
323 }
324 }
325 bch->tx_skb = NULL;
326 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
327 return 0;
328}
329EXPORT_SYMBOL(get_next_bframe);
330
331void
332queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
333{
334 struct mISDNhead *hh;
335
336 if (!skb) {
337 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
338 } else {
339 if (ch->peer) {
340 hh = mISDN_HEAD_P(skb);
341 hh->prim = pr;
342 hh->id = id;
343 if (!ch->recv(ch->peer, skb))
344 return;
345 }
346 dev_kfree_skb(skb);
347 }
348}
349EXPORT_SYMBOL(queue_ch_frame);
350
351int
352dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
353{
354 /* check oversize */
355 if (skb->len <= 0) {
356 printk(KERN_WARNING "%s: skb too small\n", __func__);
357 return -EINVAL;
358 }
359 if (skb->len > ch->maxlen) {
360 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
475be4d8 361 __func__, skb->len, ch->maxlen);
1b2b03f8
KK
362 return -EINVAL;
363 }
364 /* HW lock must be obtained */
365 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
366 skb_queue_tail(&ch->squeue, skb);
367 return 0;
368 } else {
369 /* write to fifo */
370 ch->tx_skb = skb;
371 ch->tx_idx = 0;
372 return 1;
373 }
374}
375EXPORT_SYMBOL(dchannel_senddata);
376
377int
378bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
379{
380
381 /* check oversize */
382 if (skb->len <= 0) {
383 printk(KERN_WARNING "%s: skb too small\n", __func__);
384 return -EINVAL;
385 }
386 if (skb->len > ch->maxlen) {
387 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
475be4d8 388 __func__, skb->len, ch->maxlen);
1b2b03f8
KK
389 return -EINVAL;
390 }
391 /* HW lock must be obtained */
392 /* check for pending next_skb */
393 if (ch->next_skb) {
394 printk(KERN_WARNING
475be4d8
JP
395 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
396 __func__, skb->len, ch->next_skb->len);
1b2b03f8
KK
397 return -EBUSY;
398 }
399 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
400 test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
401 ch->next_skb = skb;
402 return 0;
403 } else {
404 /* write to fifo */
405 ch->tx_skb = skb;
406 ch->tx_idx = 0;
8bfddfbe 407 confirm_Bsend(ch);
1b2b03f8
KK
408 return 1;
409 }
410}
411EXPORT_SYMBOL(bchannel_senddata);
7206e659
KK
412
413/* The function allocates a new receive skb on demand with a size for the
414 * requirements of the current protocol. It returns the tailroom of the
415 * receive skb or an error.
416 */
417int
418bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
419{
420 int len;
421
422 if (bch->rx_skb) {
423 len = skb_tailroom(bch->rx_skb);
424 if (len < reqlen) {
425 pr_warning("B%d no space for %d (only %d) bytes\n",
426 bch->nr, reqlen, len);
427 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
428 /* send what we have now and try a new buffer */
429 recv_Bchannel(bch, 0);
430 } else {
431 /* on HDLC we have to drop too big frames */
432 return -EMSGSIZE;
433 }
434 } else {
435 return len;
436 }
437 }
438 if (unlikely(reqlen > bch->maxlen))
439 return -EMSGSIZE;
440 if (test_bit(FLG_TRANSPARENT, &bch->Flags))
441 len = reqlen;
442 else /* with HDLC we do not know the length yet */
443 len = bch->maxlen;
444 bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
445 if (!bch->rx_skb) {
446 pr_warning("B%d receive no memory for %d bytes\n",
447 bch->nr, len);
448 len = -ENOMEM;
449 }
450 return len;
451}
452EXPORT_SYMBOL(bchannel_get_rxbuf);