3 * Author Karsten Keil <kkeil@novell.com>
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/gfp.h>
19 #include <linux/module.h>
20 #include <linux/mISDNhw.h>
23 dchannel_bh(struct work_struct *ws)
25 struct dchannel *dch = container_of(ws, struct dchannel, workq);
29 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
30 while ((skb = skb_dequeue(&dch->rqueue))) {
31 if (likely(dch->dev.D.peer)) {
32 err = dch->dev.D.recv(dch->dev.D.peer, skb);
39 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
46 bchannel_bh(struct work_struct *ws)
48 struct bchannel *bch = container_of(ws, struct bchannel, workq);
52 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
53 while ((skb = skb_dequeue(&bch->rqueue))) {
55 if (likely(bch->ch.peer)) {
56 err = bch->ch.recv(bch->ch.peer, skb);
66 mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
68 test_and_set_bit(FLG_HDLC, &ch->Flags);
75 skb_queue_head_init(&ch->squeue);
76 skb_queue_head_init(&ch->rqueue);
77 INIT_LIST_HEAD(&ch->dev.bchannels);
78 INIT_WORK(&ch->workq, dchannel_bh);
81 EXPORT_SYMBOL(mISDN_initdchannel);
84 mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
85 unsigned short minlen)
89 ch->next_minlen = minlen;
90 ch->init_minlen = minlen;
92 ch->next_maxlen = maxlen;
93 ch->init_maxlen = maxlen;
98 skb_queue_head_init(&ch->rqueue);
101 INIT_WORK(&ch->workq, bchannel_bh);
104 EXPORT_SYMBOL(mISDN_initbchannel);
107 mISDN_freedchannel(struct dchannel *ch)
110 dev_kfree_skb(ch->tx_skb);
114 dev_kfree_skb(ch->rx_skb);
117 skb_queue_purge(&ch->squeue);
118 skb_queue_purge(&ch->rqueue);
119 flush_work_sync(&ch->workq);
122 EXPORT_SYMBOL(mISDN_freedchannel);
125 mISDN_clear_bchannel(struct bchannel *ch)
128 dev_kfree_skb(ch->tx_skb);
133 dev_kfree_skb(ch->rx_skb);
137 dev_kfree_skb(ch->next_skb);
140 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
141 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
142 test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
143 test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
144 test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
145 test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
147 ch->minlen = ch->init_minlen;
148 ch->next_minlen = ch->init_minlen;
149 ch->maxlen = ch->init_maxlen;
150 ch->next_maxlen = ch->init_maxlen;
152 EXPORT_SYMBOL(mISDN_clear_bchannel);
155 mISDN_freebchannel(struct bchannel *ch)
157 mISDN_clear_bchannel(ch);
158 skb_queue_purge(&ch->rqueue);
160 flush_work_sync(&ch->workq);
163 EXPORT_SYMBOL(mISDN_freebchannel);
166 mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
171 case MISDN_CTRL_GETOP:
172 cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
175 case MISDN_CTRL_FILL_EMPTY:
177 memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
178 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
180 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
183 case MISDN_CTRL_RX_OFF:
184 /* read back dropped byte count */
185 cq->p2 = bch->dropcnt;
187 test_and_set_bit(FLG_RX_OFF, &bch->Flags);
189 test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
192 case MISDN_CTRL_RX_BUFFER:
193 if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
194 bch->next_maxlen = cq->p2;
195 if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
196 bch->next_minlen = cq->p1;
197 /* we return the old values */
198 cq->p1 = bch->minlen;
199 cq->p2 = bch->maxlen;
202 pr_info("mISDN unhandled control %x operation\n", cq->op);
208 EXPORT_SYMBOL(mISDN_ctrl_bchannel);
211 get_sapi_tei(u_char *p)
217 return sapi | (tei << 8);
221 recv_Dchannel(struct dchannel *dch)
223 struct mISDNhead *hh;
225 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
226 dev_kfree_skb(dch->rx_skb);
230 hh = mISDN_HEAD_P(dch->rx_skb);
231 hh->prim = PH_DATA_IND;
232 hh->id = get_sapi_tei(dch->rx_skb->data);
233 skb_queue_tail(&dch->rqueue, dch->rx_skb);
235 schedule_event(dch, FLG_RECVQUEUE);
237 EXPORT_SYMBOL(recv_Dchannel);
240 recv_Echannel(struct dchannel *ech, struct dchannel *dch)
242 struct mISDNhead *hh;
244 if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
245 dev_kfree_skb(ech->rx_skb);
249 hh = mISDN_HEAD_P(ech->rx_skb);
250 hh->prim = PH_DATA_E_IND;
251 hh->id = get_sapi_tei(ech->rx_skb->data);
252 skb_queue_tail(&dch->rqueue, ech->rx_skb);
254 schedule_event(dch, FLG_RECVQUEUE);
256 EXPORT_SYMBOL(recv_Echannel);
259 recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
261 struct mISDNhead *hh;
263 /* if allocation did fail upper functions still may call us */
264 if (unlikely(!bch->rx_skb))
266 if (unlikely(!bch->rx_skb->len)) {
267 /* we have no data to send - this may happen after recovery
268 * from overflow or too small allocation.
269 * We need to free the buffer here */
270 dev_kfree_skb(bch->rx_skb);
273 if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
274 (bch->rx_skb->len < bch->minlen) && !force)
276 hh = mISDN_HEAD_P(bch->rx_skb);
277 hh->prim = PH_DATA_IND;
279 if (bch->rcount >= 64) {
281 "B%d receive queue overflow - flushing!\n",
283 skb_queue_purge(&bch->rqueue);
286 skb_queue_tail(&bch->rqueue, bch->rx_skb);
288 schedule_event(bch, FLG_RECVQUEUE);
291 EXPORT_SYMBOL(recv_Bchannel);
294 recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
296 skb_queue_tail(&dch->rqueue, skb);
297 schedule_event(dch, FLG_RECVQUEUE);
299 EXPORT_SYMBOL(recv_Dchannel_skb);
302 recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
304 if (bch->rcount >= 64) {
305 printk(KERN_WARNING "B-channel %p receive queue overflow, "
307 skb_queue_purge(&bch->rqueue);
311 skb_queue_tail(&bch->rqueue, skb);
312 schedule_event(bch, FLG_RECVQUEUE);
314 EXPORT_SYMBOL(recv_Bchannel_skb);
317 confirm_Dsend(struct dchannel *dch)
321 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
322 0, NULL, GFP_ATOMIC);
324 printk(KERN_ERR "%s: no skb id %x\n", __func__,
325 mISDN_HEAD_ID(dch->tx_skb));
328 skb_queue_tail(&dch->rqueue, skb);
329 schedule_event(dch, FLG_RECVQUEUE);
333 get_next_dframe(struct dchannel *dch)
336 dch->tx_skb = skb_dequeue(&dch->squeue);
342 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
345 EXPORT_SYMBOL(get_next_dframe);
348 confirm_Bsend(struct bchannel *bch)
352 if (bch->rcount >= 64) {
353 printk(KERN_WARNING "B-channel %p receive queue overflow, "
355 skb_queue_purge(&bch->rqueue);
358 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
359 0, NULL, GFP_ATOMIC);
361 printk(KERN_ERR "%s: no skb id %x\n", __func__,
362 mISDN_HEAD_ID(bch->tx_skb));
366 skb_queue_tail(&bch->rqueue, skb);
367 schedule_event(bch, FLG_RECVQUEUE);
371 get_next_bframe(struct bchannel *bch)
374 if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
375 bch->tx_skb = bch->next_skb;
377 bch->next_skb = NULL;
378 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
379 /* confirm imediately to allow next data */
383 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
384 printk(KERN_WARNING "B TX_NEXT without skb\n");
388 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
391 EXPORT_SYMBOL(get_next_bframe);
394 queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
396 struct mISDNhead *hh;
399 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
402 hh = mISDN_HEAD_P(skb);
405 if (!ch->recv(ch->peer, skb))
411 EXPORT_SYMBOL(queue_ch_frame);
414 dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
418 printk(KERN_WARNING "%s: skb too small\n", __func__);
421 if (skb->len > ch->maxlen) {
422 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
423 __func__, skb->len, ch->maxlen);
426 /* HW lock must be obtained */
427 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
428 skb_queue_tail(&ch->squeue, skb);
437 EXPORT_SYMBOL(dchannel_senddata);
440 bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
445 printk(KERN_WARNING "%s: skb too small\n", __func__);
448 if (skb->len > ch->maxlen) {
449 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
450 __func__, skb->len, ch->maxlen);
453 /* HW lock must be obtained */
454 /* check for pending next_skb */
457 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
458 __func__, skb->len, ch->next_skb->len);
461 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
462 test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
473 EXPORT_SYMBOL(bchannel_senddata);
475 /* The function allocates a new receive skb on demand with a size for the
476 * requirements of the current protocol. It returns the tailroom of the
477 * receive skb or an error.
480 bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
485 len = skb_tailroom(bch->rx_skb);
487 pr_warning("B%d no space for %d (only %d) bytes\n",
488 bch->nr, reqlen, len);
489 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
490 /* send what we have now and try a new buffer */
491 recv_Bchannel(bch, 0, true);
493 /* on HDLC we have to drop too big frames */
500 /* update current min/max length first */
501 if (unlikely(bch->maxlen != bch->next_maxlen))
502 bch->maxlen = bch->next_maxlen;
503 if (unlikely(bch->minlen != bch->next_minlen))
504 bch->minlen = bch->next_minlen;
505 if (unlikely(reqlen > bch->maxlen))
507 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
508 if (reqlen >= bch->minlen) {
511 len = 2 * bch->minlen;
512 if (len > bch->maxlen)
516 /* with HDLC we do not know the length yet */
519 bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
521 pr_warning("B%d receive no memory for %d bytes\n",
527 EXPORT_SYMBOL(bchannel_get_rxbuf);