Merge tag 'leds-for-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anasz...
[linux-2.6-block.git] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The following are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/interrupt.h>
57 #include <linux/wait.h>
58 #include <linux/slab.h>
59 #include <asm/io.h>  
60 #include <linux/atomic.h>
61 #include <linux/uaccess.h>  
62 #include <asm/string.h>  
63 #include <asm/byteorder.h>  
64 #include <linux/vmalloc.h>
65 #include <linux/jiffies.h>
66 #include "iphase.h"               
67 #include "suni.h"                 
68 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
69
70 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
71
72 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
73 static void desc_dbg(IADEV *iadev);
74
75 static IADEV *ia_dev[8];
76 static struct atm_dev *_ia_dev[8];
77 static int iadev_count;
78 static void ia_led_timer(struct timer_list *unused);
79 static DEFINE_TIMER(ia_timer, ia_led_timer);
80 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
81 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
82 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
84
85 module_param(IA_TX_BUF, int, 0);
86 module_param(IA_TX_BUF_SZ, int, 0);
87 module_param(IA_RX_BUF, int, 0);
88 module_param(IA_RX_BUF_SZ, int, 0);
89 module_param(IADebugFlag, uint, 0644);
90
91 MODULE_LICENSE("GPL");
92
93 /**************************** IA_LIB **********************************/
94
95 static void ia_init_rtn_q (IARTN_Q *que) 
96
97    que->next = NULL; 
98    que->tail = NULL; 
99 }
100
101 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
102 {
103    data->next = NULL;
104    if (que->next == NULL) 
105       que->next = que->tail = data;
106    else {
107       data->next = que->next;
108       que->next = data;
109    } 
110    return;
111 }
112
113 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
114    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
115    if (!entry)
116       return -ENOMEM;
117    entry->data = data;
118    entry->next = NULL;
119    if (que->next == NULL) 
120       que->next = que->tail = entry;
121    else {
122       que->tail->next = entry;
123       que->tail = que->tail->next;
124    }      
125    return 1;
126 }
127
128 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
129    IARTN_Q *tmpdata;
130    if (que->next == NULL)
131       return NULL;
132    tmpdata = que->next;
133    if ( que->next == que->tail)  
134       que->next = que->tail = NULL;
135    else 
136       que->next = que->next->next;
137    return tmpdata;
138 }
139
140 static void ia_hack_tcq(IADEV *dev) {
141
142   u_short               desc1;
143   u_short               tcq_wr;
144   struct ia_vcc         *iavcc_r = NULL; 
145
146   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
147   while (dev->host_tcq_wr != tcq_wr) {
148      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
149      if (!desc1) ;
150      else if (!dev->desc_tbl[desc1 -1].timestamp) {
151         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
152         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
153      }                                 
154      else if (dev->desc_tbl[desc1 -1].timestamp) {
155         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
156            printk("IA: Fatal err in get_desc\n");
157            continue;
158         }
159         iavcc_r->vc_desc_cnt--;
160         dev->desc_tbl[desc1 -1].timestamp = 0;
161         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
162                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
163         if (iavcc_r->pcr < dev->rate_limit) {
164            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
165            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
166               printk("ia_hack_tcq: No memory available\n");
167         } 
168         dev->desc_tbl[desc1 -1].iavcc = NULL;
169         dev->desc_tbl[desc1 -1].txskb = NULL;
170      }
171      dev->host_tcq_wr += 2;
172      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
173         dev->host_tcq_wr = dev->ffL.tcq_st;
174   }
175 } /* ia_hack_tcq */
176
177 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
178   u_short               desc_num, i;
179   struct sk_buff        *skb;
180   struct ia_vcc         *iavcc_r = NULL; 
181   unsigned long delta;
182   static unsigned long timer = 0;
183   int ltimeout;
184
185   ia_hack_tcq (dev);
186   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
187      timer = jiffies; 
188      i=0;
189      while (i < dev->num_tx_desc) {
190         if (!dev->desc_tbl[i].timestamp) {
191            i++;
192            continue;
193         }
194         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
195         delta = jiffies - dev->desc_tbl[i].timestamp;
196         if (delta >= ltimeout) {
197            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
198            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
199               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
200            else 
201               dev->ffL.tcq_rd -= 2;
202            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
203            if (!(skb = dev->desc_tbl[i].txskb) || 
204                           !(iavcc_r = dev->desc_tbl[i].iavcc))
205               printk("Fatal err, desc table vcc or skb is NULL\n");
206            else 
207               iavcc_r->vc_desc_cnt--;
208            dev->desc_tbl[i].timestamp = 0;
209            dev->desc_tbl[i].iavcc = NULL;
210            dev->desc_tbl[i].txskb = NULL;
211         }
212         i++;
213      } /* while */
214   }
215   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
216      return 0xFFFF;
217     
218   /* Get the next available descriptor number from TCQ */
219   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
220
221   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
222      dev->ffL.tcq_rd += 2;
223      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
224         dev->ffL.tcq_rd = dev->ffL.tcq_st;
225      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
226         return 0xFFFF; 
227      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
228   }
229
230   /* get system time */
231   dev->desc_tbl[desc_num -1].timestamp = jiffies;
232   return desc_num;
233 }
234
235 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
236   u_char                foundLockUp;
237   vcstatus_t            *vcstatus;
238   u_short               *shd_tbl;
239   u_short               tempCellSlot, tempFract;
240   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
241   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
242   u_int  i;
243
244   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
245      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
246      vcstatus->cnt++;
247      foundLockUp = 0;
248      if( vcstatus->cnt == 0x05 ) {
249         abr_vc += vcc->vci;
250         eabr_vc += vcc->vci;
251         if( eabr_vc->last_desc ) {
252            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
253               /* Wait for 10 Micro sec */
254               udelay(10);
255               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
256                  foundLockUp = 1;
257            }
258            else {
259               tempCellSlot = abr_vc->last_cell_slot;
260               tempFract    = abr_vc->fraction;
261               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
262                          && (tempFract == dev->testTable[vcc->vci]->fract))
263                  foundLockUp = 1;                   
264               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
265               dev->testTable[vcc->vci]->fract = tempFract; 
266            }        
267         } /* last descriptor */            
268         vcstatus->cnt = 0;      
269      } /* vcstatus->cnt */
270         
271      if (foundLockUp) {
272         IF_ABR(printk("LOCK UP found\n");) 
273         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
274         /* Wait for 10 Micro sec */
275         udelay(10); 
276         abr_vc->status &= 0xFFF8;
277         abr_vc->status |= 0x0001;  /* state is idle */
278         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
279         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
280         if (i < dev->num_vc)
281            shd_tbl[i] = vcc->vci;
282         else
283            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
284         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
285         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
286         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
287         vcstatus->cnt = 0;
288      } /* foundLockUp */
289
290   } /* if an ABR VC */
291
292
293 }
294  
295 /*
296 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
297 **
298 **  +----+----+------------------+-------------------------------+
299 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
300 **  +----+----+------------------+-------------------------------+
301 ** 
302 **    R = reserved (written as 0)
303 **    NZ = 0 if 0 cells/sec; 1 otherwise
304 **
305 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
306 */
307 static u16
308 cellrate_to_float(u32 cr)
309 {
310
311 #define NZ              0x4000
312 #define M_BITS          9               /* Number of bits in mantissa */
313 #define E_BITS          5               /* Number of bits in exponent */
314 #define M_MASK          0x1ff           
315 #define E_MASK          0x1f
316   u16   flot;
317   u32   tmp = cr & 0x00ffffff;
318   int   i   = 0;
319   if (cr == 0)
320      return 0;
321   while (tmp != 1) {
322      tmp >>= 1;
323      i++;
324   }
325   if (i == M_BITS)
326      flot = NZ | (i << M_BITS) | (cr & M_MASK);
327   else if (i < M_BITS)
328      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
329   else
330      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
331   return flot;
332 }
333
334 #if 0
335 /*
336 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
337 */
338 static u32
339 float_to_cellrate(u16 rate)
340 {
341   u32   exp, mantissa, cps;
342   if ((rate & NZ) == 0)
343      return 0;
344   exp = (rate >> M_BITS) & E_MASK;
345   mantissa = rate & M_MASK;
346   if (exp == 0)
347      return 1;
348   cps = (1 << M_BITS) | mantissa;
349   if (exp == M_BITS)
350      cps = cps;
351   else if (exp > M_BITS)
352      cps <<= (exp - M_BITS);
353   else
354      cps >>= (M_BITS - exp);
355   return cps;
356 }
357 #endif 
358
359 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
360   srv_p->class_type = ATM_ABR;
361   srv_p->pcr        = dev->LineRate;
362   srv_p->mcr        = 0;
363   srv_p->icr        = 0x055cb7;
364   srv_p->tbe        = 0xffffff;
365   srv_p->frtt       = 0x3a;
366   srv_p->rif        = 0xf;
367   srv_p->rdf        = 0xb;
368   srv_p->nrm        = 0x4;
369   srv_p->trm        = 0x7;
370   srv_p->cdf        = 0x3;
371   srv_p->adtf       = 50;
372 }
373
374 static int
375 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
376                                                 struct atm_vcc *vcc, u8 flag)
377 {
378   f_vc_abr_entry  *f_abr_vc;
379   r_vc_abr_entry  *r_abr_vc;
380   u32           icr;
381   u8            trm, nrm, crm;
382   u16           adtf, air, *ptr16;      
383   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
384   f_abr_vc += vcc->vci;       
385   switch (flag) {
386      case 1: /* FFRED initialization */
387 #if 0  /* sanity check */
388        if (srv_p->pcr == 0)
389           return INVALID_PCR;
390        if (srv_p->pcr > dev->LineRate)
391           srv_p->pcr = dev->LineRate;
392        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
393           return MCR_UNAVAILABLE;
394        if (srv_p->mcr > srv_p->pcr)
395           return INVALID_MCR;
396        if (!(srv_p->icr))
397           srv_p->icr = srv_p->pcr;
398        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
399           return INVALID_ICR;
400        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
401           return INVALID_TBE;
402        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
403           return INVALID_FRTT;
404        if (srv_p->nrm > MAX_NRM)
405           return INVALID_NRM;
406        if (srv_p->trm > MAX_TRM)
407           return INVALID_TRM;
408        if (srv_p->adtf > MAX_ADTF)
409           return INVALID_ADTF;
410        else if (srv_p->adtf == 0)
411           srv_p->adtf = 1;
412        if (srv_p->cdf > MAX_CDF)
413           return INVALID_CDF;
414        if (srv_p->rif > MAX_RIF)
415           return INVALID_RIF;
416        if (srv_p->rdf > MAX_RDF)
417           return INVALID_RDF;
418 #endif
419        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
420        f_abr_vc->f_vc_type = ABR;
421        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
422                                   /* i.e 2**n = 2 << (n-1) */
423        f_abr_vc->f_nrm = nrm << 8 | nrm;
424        trm = 100000/(2 << (16 - srv_p->trm));
425        if ( trm == 0) trm = 1;
426        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
427        crm = srv_p->tbe / nrm;
428        if (crm == 0) crm = 1;
429        f_abr_vc->f_crm = crm & 0xff;
430        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
431        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
432                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
433                                 (1000000/(srv_p->frtt/srv_p->tbe)));
434        f_abr_vc->f_icr = cellrate_to_float(icr);
435        adtf = (10000 * srv_p->adtf)/8192;
436        if (adtf == 0) adtf = 1; 
437        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
438        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
439        f_abr_vc->f_acr = f_abr_vc->f_icr;
440        f_abr_vc->f_status = 0x0042;
441        break;
442     case 0: /* RFRED initialization */  
443        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
444        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
445        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
446        r_abr_vc += vcc->vci;
447        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
448        air = srv_p->pcr << (15 - srv_p->rif);
449        if (air == 0) air = 1;
450        r_abr_vc->r_air = cellrate_to_float(air);
451        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
452        dev->sum_mcr        += srv_p->mcr;
453        dev->n_abr++;
454        break;
455     default:
456        break;
457   }
458   return        0;
459 }
460 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
461    u32 rateLow=0, rateHigh, rate;
462    int entries;
463    struct ia_vcc *ia_vcc;
464
465    int   idealSlot =0, testSlot, toBeAssigned, inc;
466    u32   spacing;
467    u16  *SchedTbl, *TstSchedTbl;
468    u16  cbrVC, vcIndex;
469    u32   fracSlot    = 0;
470    u32   sp_mod      = 0;
471    u32   sp_mod2     = 0;
472
473    /* IpAdjustTrafficParams */
474    if (vcc->qos.txtp.max_pcr <= 0) {
475       IF_ERR(printk("PCR for CBR not defined\n");)
476       return -1;
477    }
478    rate = vcc->qos.txtp.max_pcr;
479    entries = rate / dev->Granularity;
480    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
481                                 entries, rate, dev->Granularity);)
482    if (entries < 1)
483       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
484    rateLow  =  entries * dev->Granularity;
485    rateHigh = (entries + 1) * dev->Granularity;
486    if (3*(rate - rateLow) > (rateHigh - rate))
487       entries++;
488    if (entries > dev->CbrRemEntries) {
489       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
490       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
491                                        entries, dev->CbrRemEntries);)
492       return -EBUSY;
493    }   
494
495    ia_vcc = INPH_IA_VCC(vcc);
496    ia_vcc->NumCbrEntry = entries; 
497    dev->sum_mcr += entries * dev->Granularity; 
498    /* IaFFrednInsertCbrSched */
499    // Starting at an arbitrary location, place the entries into the table
500    // as smoothly as possible
501    cbrVC   = 0;
502    spacing = dev->CbrTotEntries / entries;
503    sp_mod  = dev->CbrTotEntries % entries; // get modulo
504    toBeAssigned = entries;
505    fracSlot = 0;
506    vcIndex  = vcc->vci;
507    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
508    while (toBeAssigned)
509    {
510       // If this is the first time, start the table loading for this connection
511       // as close to entryPoint as possible.
512       if (toBeAssigned == entries)
513       {
514          idealSlot = dev->CbrEntryPt;
515          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
516          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
517             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
518       } else {
519          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
520          // in the table that would be  smoothest
521          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
522          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
523       }
524       if (idealSlot >= (int)dev->CbrTotEntries) 
525          idealSlot -= dev->CbrTotEntries;  
526       // Continuously check around this ideal value until a null
527       // location is encountered.
528       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
529       inc = 0;
530       testSlot = idealSlot;
531       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
532       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
533                                 testSlot, TstSchedTbl,toBeAssigned);)
534       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
535       while (cbrVC)  // If another VC at this location, we have to keep looking
536       {
537           inc++;
538           testSlot = idealSlot - inc;
539           if (testSlot < 0) { // Wrap if necessary
540              testSlot += dev->CbrTotEntries;
541              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
542                                                        SchedTbl,testSlot);)
543           }
544           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
545           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
546           if (!cbrVC)
547              break;
548           testSlot = idealSlot + inc;
549           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
550              testSlot -= dev->CbrTotEntries;
551              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
552              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
553                                             testSlot, toBeAssigned);)
554           } 
555           // set table index and read in value
556           TstSchedTbl = (u16*)(SchedTbl + testSlot);
557           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
558                           TstSchedTbl,cbrVC,inc);)
559           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
560        } /* while */
561        // Move this VCI number into this location of the CBR Sched table.
562        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
563        dev->CbrRemEntries--;
564        toBeAssigned--;
565    } /* while */ 
566
567    /* IaFFrednCbrEnable */
568    dev->NumEnabledCBR++;
569    if (dev->NumEnabledCBR == 1) {
570        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
571        IF_CBR(printk("CBR is enabled\n");)
572    }
573    return 0;
574 }
575 static void ia_cbrVc_close (struct atm_vcc *vcc) {
576    IADEV *iadev;
577    u16 *SchedTbl, NullVci = 0;
578    u32 i, NumFound;
579
580    iadev = INPH_IA_DEV(vcc->dev);
581    iadev->NumEnabledCBR--;
582    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
583    if (iadev->NumEnabledCBR == 0) {
584       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
585       IF_CBR (printk("CBR support disabled\n");)
586    }
587    NumFound = 0;
588    for (i=0; i < iadev->CbrTotEntries; i++)
589    {
590       if (*SchedTbl == vcc->vci) {
591          iadev->CbrRemEntries++;
592          *SchedTbl = NullVci;
593          IF_CBR(NumFound++;)
594       }
595       SchedTbl++;   
596    } 
597    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
598 }
599
600 static int ia_avail_descs(IADEV *iadev) {
601    int tmp = 0;
602    ia_hack_tcq(iadev);
603    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
604       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
605    else
606       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
607                    iadev->ffL.tcq_st) / 2;
608    return tmp;
609 }    
610
611 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
612
613 static int ia_que_tx (IADEV *iadev) { 
614    struct sk_buff *skb;
615    int num_desc;
616    struct atm_vcc *vcc;
617    num_desc = ia_avail_descs(iadev);
618
619    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
620       if (!(vcc = ATM_SKB(skb)->vcc)) {
621          dev_kfree_skb_any(skb);
622          printk("ia_que_tx: Null vcc\n");
623          break;
624       }
625       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
626          dev_kfree_skb_any(skb);
627          printk("Free the SKB on closed vci %d \n", vcc->vci);
628          break;
629       }
630       if (ia_pkt_tx (vcc, skb)) {
631          skb_queue_head(&iadev->tx_backlog, skb);
632       }
633       num_desc--;
634    }
635    return 0;
636 }
637
638 static void ia_tx_poll (IADEV *iadev) {
639    struct atm_vcc *vcc = NULL;
640    struct sk_buff *skb = NULL, *skb1 = NULL;
641    struct ia_vcc *iavcc;
642    IARTN_Q *  rtne;
643
644    ia_hack_tcq(iadev);
645    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
646        skb = rtne->data.txskb;
647        if (!skb) {
648            printk("ia_tx_poll: skb is null\n");
649            goto out;
650        }
651        vcc = ATM_SKB(skb)->vcc;
652        if (!vcc) {
653            printk("ia_tx_poll: vcc is null\n");
654            dev_kfree_skb_any(skb);
655            goto out;
656        }
657
658        iavcc = INPH_IA_VCC(vcc);
659        if (!iavcc) {
660            printk("ia_tx_poll: iavcc is null\n");
661            dev_kfree_skb_any(skb);
662            goto out;
663        }
664
665        skb1 = skb_dequeue(&iavcc->txing_skb);
666        while (skb1 && (skb1 != skb)) {
667           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
668              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
669           }
670           IF_ERR(printk("Release the SKB not match\n");)
671           if ((vcc->pop) && (skb1->len != 0))
672           {
673              vcc->pop(vcc, skb1);
674              IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
675                                                           (long)skb1);)
676           }
677           else 
678              dev_kfree_skb_any(skb1);
679           skb1 = skb_dequeue(&iavcc->txing_skb);
680        }                                                        
681        if (!skb1) {
682           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
683           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
684           break;
685        }
686        if ((vcc->pop) && (skb->len != 0))
687        {
688           vcc->pop(vcc, skb);
689           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
690        }
691        else 
692           dev_kfree_skb_any(skb);
693        kfree(rtne);
694     }
695     ia_que_tx(iadev);
696 out:
697     return;
698 }
699 #if 0
700 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
701 {
702         u32     t;
703         int     i;
704         /*
705          * Issue a command to enable writes to the NOVRAM
706          */
707         NVRAM_CMD (EXTEND + EWEN);
708         NVRAM_CLR_CE;
709         /*
710          * issue the write command
711          */
712         NVRAM_CMD(IAWRITE + addr);
713         /* 
714          * Send the data, starting with D15, then D14, and so on for 16 bits
715          */
716         for (i=15; i>=0; i--) {
717                 NVRAM_CLKOUT (val & 0x8000);
718                 val <<= 1;
719         }
720         NVRAM_CLR_CE;
721         CFG_OR(NVCE);
722         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
723         while (!(t & NVDO))
724                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
725
726         NVRAM_CLR_CE;
727         /*
728          * disable writes again
729          */
730         NVRAM_CMD(EXTEND + EWDS)
731         NVRAM_CLR_CE;
732         CFG_AND(~NVDI);
733 }
734 #endif
735
736 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
737 {
738         u_short val;
739         u32     t;
740         int     i;
741         /*
742          * Read the first bit that was clocked with the falling edge of the
743          * the last command data clock
744          */
745         NVRAM_CMD(IAREAD + addr);
746         /*
747          * Now read the rest of the bits, the next bit read is D14, then D13,
748          * and so on.
749          */
750         val = 0;
751         for (i=15; i>=0; i--) {
752                 NVRAM_CLKIN(t);
753                 val |= (t << i);
754         }
755         NVRAM_CLR_CE;
756         CFG_AND(~NVDI);
757         return val;
758 }
759
760 static void ia_hw_type(IADEV *iadev) {
761    u_short memType = ia_eeprom_get(iadev, 25);   
762    iadev->memType = memType;
763    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
764       iadev->num_tx_desc = IA_TX_BUF;
765       iadev->tx_buf_sz = IA_TX_BUF_SZ;
766       iadev->num_rx_desc = IA_RX_BUF;
767       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
768    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
769       if (IA_TX_BUF == DFL_TX_BUFFERS)
770         iadev->num_tx_desc = IA_TX_BUF / 2;
771       else 
772         iadev->num_tx_desc = IA_TX_BUF;
773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
774       if (IA_RX_BUF == DFL_RX_BUFFERS)
775         iadev->num_rx_desc = IA_RX_BUF / 2;
776       else
777         iadev->num_rx_desc = IA_RX_BUF;
778       iadev->rx_buf_sz = IA_RX_BUF_SZ;
779    }
780    else {
781       if (IA_TX_BUF == DFL_TX_BUFFERS) 
782         iadev->num_tx_desc = IA_TX_BUF / 8;
783       else
784         iadev->num_tx_desc = IA_TX_BUF;
785       iadev->tx_buf_sz = IA_TX_BUF_SZ;
786       if (IA_RX_BUF == DFL_RX_BUFFERS)
787         iadev->num_rx_desc = IA_RX_BUF / 8;
788       else
789         iadev->num_rx_desc = IA_RX_BUF;
790       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
791    } 
792    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
793    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
794          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
795          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
796
797 #if 0
798    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
799       iadev->phy_type = PHY_OC3C_S;
800    else if ((memType & FE_MASK) == FE_UTP_OPTION)
801       iadev->phy_type = PHY_UTP155;
802    else
803      iadev->phy_type = PHY_OC3C_M;
804 #endif
805    
806    iadev->phy_type = memType & FE_MASK;
807    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
808                                          memType,iadev->phy_type);)
809    if (iadev->phy_type == FE_25MBIT_PHY) 
810       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
811    else if (iadev->phy_type == FE_DS3_PHY)
812       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
813    else if (iadev->phy_type == FE_E3_PHY) 
814       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
815    else
816        iadev->LineRate = (u32)(ATM_OC3_PCR);
817    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
818
819 }
820
821 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
822 {
823         return readl(ia->phy + (reg >> 2));
824 }
825
826 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
827 {
828         writel(val, ia->phy + (reg >> 2));
829 }
830
831 static void ia_frontend_intr(struct iadev_priv *iadev)
832 {
833         u32 status;
834
835         if (iadev->phy_type & FE_25MBIT_PHY) {
836                 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
837                 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
838         } else if (iadev->phy_type & FE_DS3_PHY) {
839                 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
840                 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
841                 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
842         } else if (iadev->phy_type & FE_E3_PHY) {
843                 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
844                 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
845                 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
846         } else {
847                 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
848                 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
849         }
850
851         printk(KERN_INFO "IA: SUNI carrier %s\n",
852                 iadev->carrier_detect ? "detected" : "lost signal");
853 }
854
855 static void ia_mb25_init(struct iadev_priv *iadev)
856 {
857 #if 0
858    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
859 #endif
860         ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
861         ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
862
863         iadev->carrier_detect =
864                 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
865 }
866
867 struct ia_reg {
868         u16 reg;
869         u16 val;
870 };
871
872 static void ia_phy_write(struct iadev_priv *iadev,
873                          const struct ia_reg *regs, int len)
874 {
875         while (len--) {
876                 ia_phy_write32(iadev, regs->reg, regs->val);
877                 regs++;
878         }
879 }
880
881 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
882 {
883         static const struct ia_reg suni_ds3_init[] = {
884                 { SUNI_DS3_FRM_INTR_ENBL,       0x17 },
885                 { SUNI_DS3_FRM_CFG,             0x01 },
886                 { SUNI_DS3_TRAN_CFG,            0x01 },
887                 { SUNI_CONFIG,                  0 },
888                 { SUNI_SPLR_CFG,                0 },
889                 { SUNI_SPLT_CFG,                0 }
890         };
891         u32 status;
892
893         status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
894         iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
895
896         ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
897 }
898
899 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
900 {
901         static const struct ia_reg suni_e3_init[] = {
902                 { SUNI_E3_FRM_FRAM_OPTIONS,             0x04 },
903                 { SUNI_E3_FRM_MAINT_OPTIONS,            0x20 },
904                 { SUNI_E3_FRM_FRAM_INTR_ENBL,           0x1d },
905                 { SUNI_E3_FRM_MAINT_INTR_ENBL,          0x30 },
906                 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS,       0 },
907                 { SUNI_E3_TRAN_FRAM_OPTIONS,            0x01 },
908                 { SUNI_CONFIG,                          SUNI_PM7345_E3ENBL },
909                 { SUNI_SPLR_CFG,                        0x41 },
910                 { SUNI_SPLT_CFG,                        0x41 }
911         };
912         u32 status;
913
914         status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
915         iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
916         ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
917 }
918
919 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
920 {
921         static const struct ia_reg suni_init[] = {
922                 /* Enable RSOP loss of signal interrupt. */
923                 { SUNI_INTR_ENBL,               0x28 },
924                 /* Clear error counters. */
925                 { SUNI_ID_RESET,                0 },
926                 /* Clear "PMCTST" in master test register. */
927                 { SUNI_MASTER_TEST,             0 },
928
929                 { SUNI_RXCP_CTRL,               0x2c },
930                 { SUNI_RXCP_FCTRL,              0x81 },
931
932                 { SUNI_RXCP_IDLE_PAT_H1,        0 },
933                 { SUNI_RXCP_IDLE_PAT_H2,        0 },
934                 { SUNI_RXCP_IDLE_PAT_H3,        0 },
935                 { SUNI_RXCP_IDLE_PAT_H4,        0x01 },
936
937                 { SUNI_RXCP_IDLE_MASK_H1,       0xff },
938                 { SUNI_RXCP_IDLE_MASK_H2,       0xff },
939                 { SUNI_RXCP_IDLE_MASK_H3,       0xff },
940                 { SUNI_RXCP_IDLE_MASK_H4,       0xfe },
941
942                 { SUNI_RXCP_CELL_PAT_H1,        0 },
943                 { SUNI_RXCP_CELL_PAT_H2,        0 },
944                 { SUNI_RXCP_CELL_PAT_H3,        0 },
945                 { SUNI_RXCP_CELL_PAT_H4,        0x01 },
946
947                 { SUNI_RXCP_CELL_MASK_H1,       0xff },
948                 { SUNI_RXCP_CELL_MASK_H2,       0xff },
949                 { SUNI_RXCP_CELL_MASK_H3,       0xff },
950                 { SUNI_RXCP_CELL_MASK_H4,       0xff },
951
952                 { SUNI_TXCP_CTRL,               0xa4 },
953                 { SUNI_TXCP_INTR_EN_STS,        0x10 },
954                 { SUNI_TXCP_IDLE_PAT_H5,        0x55 }
955         };
956
957         if (iadev->phy_type & FE_DS3_PHY)
958                 ia_suni_pm7345_init_ds3(iadev);
959         else
960                 ia_suni_pm7345_init_e3(iadev);
961
962         ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
963
964         ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
965                 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
966                   SUNI_PM7345_DLB | SUNI_PM7345_PLB));
967 #ifdef __SNMP__
968    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
969 #endif /* __SNMP__ */
970    return;
971 }
972
973
974 /***************************** IA_LIB END *****************************/
975     
976 #ifdef CONFIG_ATM_IA_DEBUG
977 static int tcnter = 0;
978 static void xdump( u_char*  cp, int  length, char*  prefix )
979 {
980     int col, count;
981     u_char prntBuf[120];
982     u_char*  pBuf = prntBuf;
983     count = 0;
984     while(count < length){
985         pBuf += sprintf( pBuf, "%s", prefix );
986         for(col = 0;count + col < length && col < 16; col++){
987             if (col != 0 && (col % 4) == 0)
988                 pBuf += sprintf( pBuf, " " );
989             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
990         }
991         while(col++ < 16){      /* pad end of buffer with blanks */
992             if ((col % 4) == 0)
993                 sprintf( pBuf, " " );
994             pBuf += sprintf( pBuf, "   " );
995         }
996         pBuf += sprintf( pBuf, "  " );
997         for(col = 0;count + col < length && col < 16; col++){
998             if (isprint((int)cp[count + col]))
999                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
1000             else
1001                 pBuf += sprintf( pBuf, "." );
1002                 }
1003         printk("%s\n", prntBuf);
1004         count += col;
1005         pBuf = prntBuf;
1006     }
1007
1008 }  /* close xdump(... */
1009 #endif /* CONFIG_ATM_IA_DEBUG */
1010
1011   
1012 static struct atm_dev *ia_boards = NULL;  
1013   
1014 #define ACTUAL_RAM_BASE \
1015         RAM_BASE*((iadev->mem)/(128 * 1024))  
1016 #define ACTUAL_SEG_RAM_BASE \
1017         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1018 #define ACTUAL_REASS_RAM_BASE \
1019         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1020   
1021   
1022 /*-- some utilities and memory allocation stuff will come here -------------*/  
1023   
1024 static void desc_dbg(IADEV *iadev) {
1025
1026   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1027   u32 i;
1028   void __iomem *tmp;
1029   // regval = readl((u32)ia_cmds->maddr);
1030   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1031   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1032                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1033                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1034   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1035                    iadev->ffL.tcq_rd);
1036   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1037   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1038   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1039   i = 0;
1040   while (tcq_st_ptr != tcq_ed_ptr) {
1041       tmp = iadev->seg_ram+tcq_st_ptr;
1042       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1043       tcq_st_ptr += 2;
1044   }
1045   for(i=0; i <iadev->num_tx_desc; i++)
1046       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1047
1048   
1049   
1050 /*----------------------------- Receiving side stuff --------------------------*/  
1051  
1052 static void rx_excp_rcvd(struct atm_dev *dev)  
1053 {  
1054 #if 0 /* closing the receiving size will cause too many excp int */  
1055   IADEV *iadev;  
1056   u_short state;  
1057   u_short excpq_rd_ptr;  
1058   //u_short *ptr;  
1059   int vci, error = 1;  
1060   iadev = INPH_IA_DEV(dev);  
1061   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1062   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1063   { printk("state = %x \n", state); 
1064         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1065  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1066         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1067             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1068         // TODO: update exception stat
1069         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1070         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1071         // pwang_test
1072         excpq_rd_ptr += 4;  
1073         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1074             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1075         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1076         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1077   }  
1078 #endif
1079 }  
1080   
1081 static void free_desc(struct atm_dev *dev, int desc)  
1082 {  
1083         IADEV *iadev;  
1084         iadev = INPH_IA_DEV(dev);  
1085         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1086         iadev->rfL.fdq_wr +=2;
1087         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1088                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1089         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1090 }  
1091   
1092   
1093 static int rx_pkt(struct atm_dev *dev)  
1094 {  
1095         IADEV *iadev;  
1096         struct atm_vcc *vcc;  
1097         unsigned short status;  
1098         struct rx_buf_desc __iomem *buf_desc_ptr;  
1099         int desc;   
1100         struct dle* wr_ptr;  
1101         int len;  
1102         struct sk_buff *skb;  
1103         u_int buf_addr, dma_addr;  
1104
1105         iadev = INPH_IA_DEV(dev);  
1106         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1107         {  
1108             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1109             return -EINVAL;  
1110         }  
1111         /* mask 1st 3 bits to get the actual descno. */  
1112         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1113         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1114                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1115               printk(" pcq_wr_ptr = 0x%x\n",
1116                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1117         /* update the read pointer  - maybe we shud do this in the end*/  
1118         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1119                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1120         else  
1121                 iadev->rfL.pcq_rd += 2;
1122         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1123   
1124         /* get the buffer desc entry.  
1125                 update stuff. - doesn't seem to be any update necessary  
1126         */  
1127         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1128         /* make the ptr point to the corresponding buffer desc entry */  
1129         buf_desc_ptr += desc;     
1130         if (!desc || (desc > iadev->num_rx_desc) || 
1131                       ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1132             free_desc(dev, desc);
1133             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1134             return -1;
1135         }
1136         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1137         if (!vcc)  
1138         {      
1139                 free_desc(dev, desc); 
1140                 printk("IA: null vcc, drop PDU\n");  
1141                 return -1;  
1142         }  
1143           
1144   
1145         /* might want to check the status bits for errors */  
1146         status = (u_short) (buf_desc_ptr->desc_mode);  
1147         if (status & (RX_CER | RX_PTE | RX_OFL))  
1148         {  
1149                 atomic_inc(&vcc->stats->rx_err);
1150                 IF_ERR(printk("IA: bad packet, dropping it");)  
1151                 if (status & RX_CER) { 
1152                     IF_ERR(printk(" cause: packet CRC error\n");)
1153                 }
1154                 else if (status & RX_PTE) {
1155                     IF_ERR(printk(" cause: packet time out\n");)
1156                 }
1157                 else {
1158                     IF_ERR(printk(" cause: buffer overflow\n");)
1159                 }
1160                 goto out_free_desc;
1161         }  
1162   
1163         /*  
1164                 build DLE.        
1165         */  
1166   
1167         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1168         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1169         len = dma_addr - buf_addr;  
1170         if (len > iadev->rx_buf_sz) {
1171            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1172            atomic_inc(&vcc->stats->rx_err);
1173            goto out_free_desc;
1174         }
1175                   
1176         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1177            if (vcc->vci < 32)
1178               printk("Drop control packets\n");
1179            goto out_free_desc;
1180         }
1181         skb_put(skb,len);  
1182         // pwang_test
1183         ATM_SKB(skb)->vcc = vcc;
1184         ATM_DESC(skb) = desc;        
1185         skb_queue_tail(&iadev->rx_dma_q, skb);  
1186
1187         /* Build the DLE structure */  
1188         wr_ptr = iadev->rx_dle_q.write;  
1189         wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1190                                               len, DMA_FROM_DEVICE);
1191         wr_ptr->local_pkt_addr = buf_addr;  
1192         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1193         wr_ptr->mode = DMA_INT_ENABLE;  
1194   
1195         /* shud take care of wrap around here too. */  
1196         if(++wr_ptr == iadev->rx_dle_q.end)
1197              wr_ptr = iadev->rx_dle_q.start;
1198         iadev->rx_dle_q.write = wr_ptr;  
1199         udelay(1);  
1200         /* Increment transaction counter */  
1201         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1202 out:    return 0;  
1203 out_free_desc:
1204         free_desc(dev, desc);
1205         goto out;
1206 }  
1207   
1208 static void rx_intr(struct atm_dev *dev)  
1209 {  
1210   IADEV *iadev;  
1211   u_short status;  
1212   u_short state, i;  
1213   
1214   iadev = INPH_IA_DEV(dev);  
1215   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1216   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1217   if (status & RX_PKT_RCVD)  
1218   {  
1219         /* do something */  
1220         /* Basically recvd an interrupt for receiving a packet.  
1221         A descriptor would have been written to the packet complete   
1222         queue. Get all the descriptors and set up dma to move the   
1223         packets till the packet complete queue is empty..  
1224         */  
1225         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1226         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1227         while(!(state & PCQ_EMPTY))  
1228         {  
1229              rx_pkt(dev);  
1230              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1231         }  
1232         iadev->rxing = 1;
1233   }  
1234   if (status & RX_FREEQ_EMPT)  
1235   {   
1236      if (iadev->rxing) {
1237         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1238         iadev->rx_tmp_jif = jiffies; 
1239         iadev->rxing = 0;
1240      } 
1241      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1242                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1243         for (i = 1; i <= iadev->num_rx_desc; i++)
1244                free_desc(dev, i);
1245 printk("Test logic RUN!!!!\n");
1246         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1247         iadev->rxing = 1;
1248      }
1249      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1250   }  
1251
1252   if (status & RX_EXCP_RCVD)  
1253   {  
1254         /* probably need to handle the exception queue also. */  
1255         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1256         rx_excp_rcvd(dev);  
1257   }  
1258
1259
1260   if (status & RX_RAW_RCVD)  
1261   {  
1262         /* need to handle the raw incoming cells. This deepnds on   
1263         whether we have programmed to receive the raw cells or not.  
1264         Else ignore. */  
1265         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1266   }  
1267 }  
1268   
1269   
1270 static void rx_dle_intr(struct atm_dev *dev)  
1271 {  
1272   IADEV *iadev;  
1273   struct atm_vcc *vcc;   
1274   struct sk_buff *skb;  
1275   int desc;  
1276   u_short state;   
1277   struct dle *dle, *cur_dle;  
1278   u_int dle_lp;  
1279   int len;
1280   iadev = INPH_IA_DEV(dev);  
1281  
1282   /* free all the dles done, that is just update our own dle read pointer   
1283         - do we really need to do this. Think not. */  
1284   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1285         and push them up to the higher layer protocol. Also free the desc  
1286         associated with the buffer. */  
1287   dle = iadev->rx_dle_q.read;  
1288   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1289   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1290   while(dle != cur_dle)  
1291   {  
1292       /* free the DMAed skb */  
1293       skb = skb_dequeue(&iadev->rx_dma_q);  
1294       if (!skb)  
1295          goto INCR_DLE;
1296       desc = ATM_DESC(skb);
1297       free_desc(dev, desc);  
1298                
1299       if (!(len = skb->len))
1300       {  
1301           printk("rx_dle_intr: skb len 0\n");  
1302           dev_kfree_skb_any(skb);  
1303       }  
1304       else  
1305       {  
1306           struct cpcs_trailer *trailer;
1307           u_short length;
1308           struct ia_vcc *ia_vcc;
1309
1310           dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1311                            len, DMA_FROM_DEVICE);
1312           /* no VCC related housekeeping done as yet. lets see */  
1313           vcc = ATM_SKB(skb)->vcc;
1314           if (!vcc) {
1315               printk("IA: null vcc\n");  
1316               dev_kfree_skb_any(skb);
1317               goto INCR_DLE;
1318           }
1319           ia_vcc = INPH_IA_VCC(vcc);
1320           if (ia_vcc == NULL)
1321           {
1322              atomic_inc(&vcc->stats->rx_err);
1323              atm_return(vcc, skb->truesize);
1324              dev_kfree_skb_any(skb);
1325              goto INCR_DLE;
1326            }
1327           // get real pkt length  pwang_test
1328           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1329                                  skb->len - sizeof(*trailer));
1330           length = swap_byte_order(trailer->length);
1331           if ((length > iadev->rx_buf_sz) || (length > 
1332                               (skb->len - sizeof(struct cpcs_trailer))))
1333           {
1334              atomic_inc(&vcc->stats->rx_err);
1335              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1336                                                             length, skb->len);)
1337              atm_return(vcc, skb->truesize);
1338              dev_kfree_skb_any(skb);
1339              goto INCR_DLE;
1340           }
1341           skb_trim(skb, length);
1342           
1343           /* Display the packet */  
1344           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1345           xdump(skb->data, skb->len, "RX: ");
1346           printk("\n");)
1347
1348           IF_RX(printk("rx_dle_intr: skb push");)  
1349           vcc->push(vcc,skb);  
1350           atomic_inc(&vcc->stats->rx);
1351           iadev->rx_pkt_cnt++;
1352       }  
1353 INCR_DLE:
1354       if (++dle == iadev->rx_dle_q.end)  
1355           dle = iadev->rx_dle_q.start;  
1356   }  
1357   iadev->rx_dle_q.read = dle;  
1358   
1359   /* if the interrupts are masked because there were no free desc available,  
1360                 unmask them now. */ 
1361   if (!iadev->rxing) {
1362      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1363      if (!(state & FREEQ_EMPTY)) {
1364         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1365         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1366                                       iadev->reass_reg+REASS_MASK_REG);
1367         iadev->rxing++; 
1368      }
1369   }
1370 }  
1371   
1372   
1373 static int open_rx(struct atm_vcc *vcc)  
1374 {  
1375         IADEV *iadev;  
1376         u_short __iomem *vc_table;  
1377         u_short __iomem *reass_ptr;  
1378         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1379
1380         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1381         iadev = INPH_IA_DEV(vcc->dev);  
1382         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1383            if (iadev->phy_type & FE_25MBIT_PHY) {
1384                printk("IA:  ABR not support\n");
1385                return -EINVAL; 
1386            }
1387         }
1388         /* Make only this VCI in the vc table valid and let all   
1389                 others be invalid entries */  
1390         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1391         vc_table += vcc->vci;
1392         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1393
1394         *vc_table = vcc->vci << 6;
1395         /* Also keep a list of open rx vcs so that we can attach them with  
1396                 incoming PDUs later. */  
1397         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1398                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1399         {  
1400                 srv_cls_param_t srv_p;
1401                 init_abr_vc(iadev, &srv_p);
1402                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1403         } 
1404         else {  /* for UBR  later may need to add CBR logic */
1405                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1406                 reass_ptr += vcc->vci;
1407                 *reass_ptr = NO_AAL5_PKT;
1408         }
1409         
1410         if (iadev->rx_open[vcc->vci])  
1411                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1412                         vcc->dev->number, vcc->vci);  
1413         iadev->rx_open[vcc->vci] = vcc;  
1414         return 0;  
1415 }  
1416   
1417 static int rx_init(struct atm_dev *dev)  
1418 {  
1419         IADEV *iadev;  
1420         struct rx_buf_desc __iomem *buf_desc_ptr;  
1421         unsigned long rx_pkt_start = 0;  
1422         void *dle_addr;  
1423         struct abr_vc_table  *abr_vc_table; 
1424         u16 *vc_table;  
1425         u16 *reass_table;  
1426         int i,j, vcsize_sel;  
1427         u_short freeq_st_adr;  
1428         u_short *freeq_start;  
1429   
1430         iadev = INPH_IA_DEV(dev);  
1431   //    spin_lock_init(&iadev->rx_lock); 
1432   
1433         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1434         dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1435                                       &iadev->rx_dle_dma, GFP_KERNEL);
1436         if (!dle_addr)  {  
1437                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1438                 goto err_out;
1439         }
1440         iadev->rx_dle_q.start = (struct dle *)dle_addr;
1441         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1442         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1443         iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1444         /* the end of the dle q points to the entry after the last  
1445         DLE that can be used. */  
1446   
1447         /* write the upper 20 bits of the start address to rx list address register */  
1448         /* We know this is 32bit bus addressed so the following is safe */
1449         writel(iadev->rx_dle_dma & 0xfffff000,
1450                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1451         IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1452                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1453                       readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1454         printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1455                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1456                       readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1457   
1458         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1459         writew(0, iadev->reass_reg+MODE_REG);  
1460         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1461   
1462         /* Receive side control memory map  
1463            -------------------------------  
1464   
1465                 Buffer descr    0x0000 (736 - 23K)  
1466                 VP Table        0x5c00 (256 - 512)  
1467                 Except q        0x5e00 (128 - 512)  
1468                 Free buffer q   0x6000 (1K - 2K)  
1469                 Packet comp q   0x6800 (1K - 2K)  
1470                 Reass Table     0x7000 (1K - 2K)  
1471                 VC Table        0x7800 (1K - 2K)  
1472                 ABR VC Table    0x8000 (1K - 32K)  
1473         */  
1474           
1475         /* Base address for Buffer Descriptor Table */  
1476         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1477         /* Set the buffer size register */  
1478         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1479   
1480         /* Initialize each entry in the Buffer Descriptor Table */  
1481         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1482         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1483         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1484         buf_desc_ptr++;  
1485         rx_pkt_start = iadev->rx_pkt_ram;  
1486         for(i=1; i<=iadev->num_rx_desc; i++)  
1487         {  
1488                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1489                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1490                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1491                 buf_desc_ptr++;           
1492                 rx_pkt_start += iadev->rx_buf_sz;  
1493         }  
1494         IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1495         i = FREE_BUF_DESC_Q*iadev->memSize; 
1496         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1497         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1498         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1499                                          iadev->reass_reg+FREEQ_ED_ADR);
1500         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1501         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1502                                         iadev->reass_reg+FREEQ_WR_PTR);    
1503         /* Fill the FREEQ with all the free descriptors. */  
1504         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1505         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1506         for(i=1; i<=iadev->num_rx_desc; i++)  
1507         {  
1508                 *freeq_start = (u_short)i;  
1509                 freeq_start++;  
1510         }  
1511         IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1512         /* Packet Complete Queue */
1513         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1514         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1515         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1516         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1517         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1518
1519         /* Exception Queue */
1520         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1521         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1522         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1523                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1524         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1525         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1526  
1527         /* Load local copy of FREEQ and PCQ ptrs */
1528         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1529         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1530         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1531         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1532         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1533         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1534         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1535         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1536         
1537         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1538               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1539               iadev->rfL.pcq_wr);)                
1540         /* just for check - no VP TBL */  
1541         /* VP Table */  
1542         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1543         /* initialize VP Table for invalid VPIs  
1544                 - I guess we can write all 1s or 0x000f in the entire memory  
1545                   space or something similar.  
1546         */  
1547   
1548         /* This seems to work and looks right to me too !!! */  
1549         i =  REASS_TABLE * iadev->memSize;
1550         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1551         /* initialize Reassembly table to I don't know what ???? */  
1552         reass_table = (u16 *)(iadev->reass_ram+i);  
1553         j = REASS_TABLE_SZ * iadev->memSize;
1554         for(i=0; i < j; i++)  
1555                 *reass_table++ = NO_AAL5_PKT;  
1556        i = 8*1024;
1557        vcsize_sel =  0;
1558        while (i != iadev->num_vc) {
1559           i /= 2;
1560           vcsize_sel++;
1561        }
1562        i = RX_VC_TABLE * iadev->memSize;
1563        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1564        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1565         j = RX_VC_TABLE_SZ * iadev->memSize;
1566         for(i = 0; i < j; i++)  
1567         {  
1568                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1569                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1570                 is those low 3 bits.   
1571                 Shall program this later.  
1572                 */  
1573                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1574                 vc_table++;  
1575         }  
1576         /* ABR VC table */
1577         i =  ABR_VC_TABLE * iadev->memSize;
1578         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1579                    
1580         i = ABR_VC_TABLE * iadev->memSize;
1581         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1582         j = REASS_TABLE_SZ * iadev->memSize;
1583         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1584         for(i = 0; i < j; i++) {                
1585                 abr_vc_table->rdf = 0x0003;
1586                 abr_vc_table->air = 0x5eb1;
1587                 abr_vc_table++;         
1588         }  
1589
1590         /* Initialize other registers */  
1591   
1592         /* VP Filter Register set for VC Reassembly only */  
1593         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1594         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1595         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1596
1597         /* Packet Timeout Count  related Registers : 
1598            Set packet timeout to occur in about 3 seconds
1599            Set Packet Aging Interval count register to overflow in about 4 us
1600         */  
1601         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1602
1603         i = (j >> 6) & 0xFF;
1604         j += 2 * (j - 1);
1605         i |= ((j << 2) & 0xFF00);
1606         writew(i, iadev->reass_reg+TMOUT_RANGE);
1607
1608         /* initiate the desc_tble */
1609         for(i=0; i<iadev->num_tx_desc;i++)
1610             iadev->desc_tbl[i].timestamp = 0;
1611
1612         /* to clear the interrupt status register - read it */  
1613         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1614   
1615         /* Mask Register - clear it */  
1616         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1617   
1618         skb_queue_head_init(&iadev->rx_dma_q);  
1619         iadev->rx_free_desc_qhead = NULL;   
1620
1621         iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1622         if (!iadev->rx_open) {
1623                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1624                 dev->number);  
1625                 goto err_free_dle;
1626         }  
1627
1628         iadev->rxing = 1;
1629         iadev->rx_pkt_cnt = 0;
1630         /* Mode Register */  
1631         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1632         return 0;  
1633
1634 err_free_dle:
1635         dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1636                           iadev->rx_dle_dma);
1637 err_out:
1638         return -ENOMEM;
1639 }  
1640   
1641
1642 /*  
1643         The memory map suggested in appendix A and the coding for it.   
1644         Keeping it around just in case we change our mind later.  
1645   
1646                 Buffer descr    0x0000 (128 - 4K)  
1647                 UBR sched       0x1000 (1K - 4K)  
1648                 UBR Wait q      0x2000 (1K - 4K)  
1649                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1650                                         (128 - 256) each  
1651                 extended VC     0x4000 (1K - 8K)  
1652                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1653                 CBR sched       0x7000 (as needed)  
1654                 VC table        0x8000 (1K - 32K)  
1655 */  
1656   
1657 static void tx_intr(struct atm_dev *dev)  
1658 {  
1659         IADEV *iadev;  
1660         unsigned short status;  
1661         unsigned long flags;
1662
1663         iadev = INPH_IA_DEV(dev);  
1664   
1665         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1666         if (status & TRANSMIT_DONE){
1667
1668            IF_EVENT(printk("Transmit Done Intr logic run\n");)
1669            spin_lock_irqsave(&iadev->tx_lock, flags);
1670            ia_tx_poll(iadev);
1671            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1672            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1673            if (iadev->close_pending)  
1674                wake_up(&iadev->close_wait);
1675         }         
1676         if (status & TCQ_NOT_EMPTY)  
1677         {  
1678             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1679         }  
1680 }  
1681   
1682 static void tx_dle_intr(struct atm_dev *dev)
1683 {
1684         IADEV *iadev;
1685         struct dle *dle, *cur_dle; 
1686         struct sk_buff *skb;
1687         struct atm_vcc *vcc;
1688         struct ia_vcc  *iavcc;
1689         u_int dle_lp;
1690         unsigned long flags;
1691
1692         iadev = INPH_IA_DEV(dev);
1693         spin_lock_irqsave(&iadev->tx_lock, flags);   
1694         dle = iadev->tx_dle_q.read;
1695         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1696                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1697         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1698         while (dle != cur_dle)
1699         {
1700             /* free the DMAed skb */ 
1701             skb = skb_dequeue(&iadev->tx_dma_q); 
1702             if (!skb) break;
1703
1704             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1705             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1706                 dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1707                                  DMA_TO_DEVICE);
1708             }
1709             vcc = ATM_SKB(skb)->vcc;
1710             if (!vcc) {
1711                   printk("tx_dle_intr: vcc is null\n");
1712                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1713                   dev_kfree_skb_any(skb);
1714
1715                   return;
1716             }
1717             iavcc = INPH_IA_VCC(vcc);
1718             if (!iavcc) {
1719                   printk("tx_dle_intr: iavcc is null\n");
1720                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1721                   dev_kfree_skb_any(skb);
1722                   return;
1723             }
1724             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1725                if ((vcc->pop) && (skb->len != 0))
1726                {     
1727                  vcc->pop(vcc, skb);
1728                } 
1729                else {
1730                  dev_kfree_skb_any(skb);
1731                }
1732             }
1733             else { /* Hold the rate-limited skb for flow control */
1734                IA_SKB_STATE(skb) |= IA_DLED;
1735                skb_queue_tail(&iavcc->txing_skb, skb);
1736             }
1737             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1738             if (++dle == iadev->tx_dle_q.end)
1739                  dle = iadev->tx_dle_q.start;
1740         }
1741         iadev->tx_dle_q.read = dle;
1742         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1743 }
1744   
1745 static int open_tx(struct atm_vcc *vcc)  
1746 {  
1747         struct ia_vcc *ia_vcc;  
1748         IADEV *iadev;  
1749         struct main_vc *vc;  
1750         struct ext_vc *evc;  
1751         int ret;
1752         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1753         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1754         iadev = INPH_IA_DEV(vcc->dev);  
1755         
1756         if (iadev->phy_type & FE_25MBIT_PHY) {
1757            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1758                printk("IA:  ABR not support\n");
1759                return -EINVAL; 
1760            }
1761           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1762                printk("IA:  CBR not support\n");
1763                return -EINVAL; 
1764           }
1765         }
1766         ia_vcc =  INPH_IA_VCC(vcc);
1767         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1768         if (vcc->qos.txtp.max_sdu > 
1769                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1770            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1771                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1772            vcc->dev_data = NULL;
1773            kfree(ia_vcc);
1774            return -EINVAL; 
1775         }
1776         ia_vcc->vc_desc_cnt = 0;
1777         ia_vcc->txing = 1;
1778
1779         /* find pcr */
1780         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1781            vcc->qos.txtp.pcr = iadev->LineRate;
1782         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1783            vcc->qos.txtp.pcr = iadev->LineRate;
1784         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1785            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1786         if (vcc->qos.txtp.pcr > iadev->LineRate)
1787              vcc->qos.txtp.pcr = iadev->LineRate;
1788         ia_vcc->pcr = vcc->qos.txtp.pcr;
1789
1790         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1791         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1792         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1793         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1794         if (ia_vcc->pcr < iadev->rate_limit)
1795            skb_queue_head_init (&ia_vcc->txing_skb);
1796         if (ia_vcc->pcr < iadev->rate_limit) {
1797            struct sock *sk = sk_atm(vcc);
1798
1799            if (vcc->qos.txtp.max_sdu != 0) {
1800                if (ia_vcc->pcr > 60000)
1801                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1802                else if (ia_vcc->pcr > 2000)
1803                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1804                else
1805                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1806            }
1807            else
1808              sk->sk_sndbuf = 24576;
1809         }
1810            
1811         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1812         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1813         vc += vcc->vci;  
1814         evc += vcc->vci;  
1815         memset((caddr_t)vc, 0, sizeof(*vc));  
1816         memset((caddr_t)evc, 0, sizeof(*evc));  
1817           
1818         /* store the most significant 4 bits of vci as the last 4 bits   
1819                 of first part of atm header.  
1820            store the last 12 bits of vci as first 12 bits of the second  
1821                 part of the atm header.  
1822         */  
1823         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1824         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1825  
1826         /* check the following for different traffic classes */  
1827         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1828         {  
1829                 vc->type = UBR;  
1830                 vc->status = CRC_APPEND;
1831                 vc->acr = cellrate_to_float(iadev->LineRate);  
1832                 if (vcc->qos.txtp.pcr > 0) 
1833                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1834                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1835                                              vcc->qos.txtp.max_pcr,vc->acr);)
1836         }  
1837         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1838         {       srv_cls_param_t srv_p;
1839                 IF_ABR(printk("Tx ABR VCC\n");)  
1840                 init_abr_vc(iadev, &srv_p);
1841                 if (vcc->qos.txtp.pcr > 0) 
1842                    srv_p.pcr = vcc->qos.txtp.pcr;
1843                 if (vcc->qos.txtp.min_pcr > 0) {
1844                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1845                    if (tmpsum > iadev->LineRate)
1846                        return -EBUSY;
1847                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1848                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1849                 } 
1850                 else srv_p.mcr = 0;
1851                 if (vcc->qos.txtp.icr)
1852                    srv_p.icr = vcc->qos.txtp.icr;
1853                 if (vcc->qos.txtp.tbe)
1854                    srv_p.tbe = vcc->qos.txtp.tbe;
1855                 if (vcc->qos.txtp.frtt)
1856                    srv_p.frtt = vcc->qos.txtp.frtt;
1857                 if (vcc->qos.txtp.rif)
1858                    srv_p.rif = vcc->qos.txtp.rif;
1859                 if (vcc->qos.txtp.rdf)
1860                    srv_p.rdf = vcc->qos.txtp.rdf;
1861                 if (vcc->qos.txtp.nrm_pres)
1862                    srv_p.nrm = vcc->qos.txtp.nrm;
1863                 if (vcc->qos.txtp.trm_pres)
1864                    srv_p.trm = vcc->qos.txtp.trm;
1865                 if (vcc->qos.txtp.adtf_pres)
1866                    srv_p.adtf = vcc->qos.txtp.adtf;
1867                 if (vcc->qos.txtp.cdf_pres)
1868                    srv_p.cdf = vcc->qos.txtp.cdf;    
1869                 if (srv_p.icr > srv_p.pcr)
1870                    srv_p.icr = srv_p.pcr;    
1871                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1872                                                       srv_p.pcr, srv_p.mcr);)
1873                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1874         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1875                 if (iadev->phy_type & FE_25MBIT_PHY) {
1876                     printk("IA:  CBR not support\n");
1877                     return -EINVAL; 
1878                 }
1879                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1880                    IF_CBR(printk("PCR is not available\n");)
1881                    return -1;
1882                 }
1883                 vc->type = CBR;
1884                 vc->status = CRC_APPEND;
1885                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1886                     return ret;
1887                 }
1888         } else {
1889                 printk("iadev:  Non UBR, ABR and CBR traffic not supported\n");
1890         }
1891         
1892         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1893         IF_EVENT(printk("ia open_tx returning \n");)  
1894         return 0;  
1895 }  
1896   
1897   
1898 static int tx_init(struct atm_dev *dev)  
1899 {  
1900         IADEV *iadev;  
1901         struct tx_buf_desc *buf_desc_ptr;
1902         unsigned int tx_pkt_start;  
1903         void *dle_addr;  
1904         int i;  
1905         u_short tcq_st_adr;  
1906         u_short *tcq_start;  
1907         u_short prq_st_adr;  
1908         u_short *prq_start;  
1909         struct main_vc *vc;  
1910         struct ext_vc *evc;   
1911         u_short tmp16;
1912         u32 vcsize_sel;
1913  
1914         iadev = INPH_IA_DEV(dev);  
1915         spin_lock_init(&iadev->tx_lock);
1916  
1917         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1918                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1919
1920         /* Allocate 4k (boundary aligned) bytes */
1921         dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1922                                       &iadev->tx_dle_dma, GFP_KERNEL);
1923         if (!dle_addr)  {
1924                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1925                 goto err_out;
1926         }
1927         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1928         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1929         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1930         iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1931
1932         /* write the upper 20 bits of the start address to tx list address register */  
1933         writel(iadev->tx_dle_dma & 0xfffff000,
1934                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1935         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1936         writew(0, iadev->seg_reg+MODE_REG_0);  
1937         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1938         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1939         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1940         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1941   
1942         /*  
1943            Transmit side control memory map  
1944            --------------------------------    
1945          Buffer descr   0x0000 (128 - 4K)  
1946          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1947                                         (512 - 1K) each  
1948                                         TCQ - 4K, PRQ - 5K  
1949          CBR Table      0x1800 (as needed) - 6K  
1950          UBR Table      0x3000 (1K - 4K) - 12K  
1951          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1952          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1953                                 ABR Tbl - 20K, ABR Wq - 22K   
1954          extended VC    0x6000 (1K - 8K) - 24K  
1955          VC Table       0x8000 (1K - 32K) - 32K  
1956           
1957         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1958         and Wait q, which can be allotted later.  
1959         */  
1960      
1961         /* Buffer Descriptor Table Base address */  
1962         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1963   
1964         /* initialize each entry in the buffer descriptor table */  
1965         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1966         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1967         buf_desc_ptr++;  
1968         tx_pkt_start = TX_PACKET_RAM;  
1969         for(i=1; i<=iadev->num_tx_desc; i++)  
1970         {  
1971                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1972                 buf_desc_ptr->desc_mode = AAL5;  
1973                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1974                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1975                 buf_desc_ptr++;           
1976                 tx_pkt_start += iadev->tx_buf_sz;  
1977         }  
1978         iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1979                                       sizeof(*iadev->tx_buf),
1980                                       GFP_KERNEL);
1981         if (!iadev->tx_buf) {
1982             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1983             goto err_free_dle;
1984         }
1985         for (i= 0; i< iadev->num_tx_desc; i++)
1986         {
1987             struct cpcs_trailer *cpcs;
1988  
1989             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1990             if(!cpcs) {                
1991                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1992                 goto err_free_tx_bufs;
1993             }
1994             iadev->tx_buf[i].cpcs = cpcs;
1995             iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1996                                                        cpcs,
1997                                                        sizeof(*cpcs),
1998                                                        DMA_TO_DEVICE);
1999         }
2000         iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2001                                         sizeof(*iadev->desc_tbl),
2002                                         GFP_KERNEL);
2003         if (!iadev->desc_tbl) {
2004                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2005                 goto err_free_all_tx_bufs;
2006         }
2007   
2008         /* Communication Queues base address */  
2009         i = TX_COMP_Q * iadev->memSize;
2010         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
2011   
2012         /* Transmit Complete Queue */  
2013         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
2014         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
2015         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
2016         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2017         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2018                                               iadev->seg_reg+TCQ_ED_ADR); 
2019         /* Fill the TCQ with all the free descriptors. */  
2020         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2021         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2022         for(i=1; i<=iadev->num_tx_desc; i++)  
2023         {  
2024                 *tcq_start = (u_short)i;  
2025                 tcq_start++;  
2026         }  
2027   
2028         /* Packet Ready Queue */  
2029         i = PKT_RDY_Q * iadev->memSize; 
2030         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2031         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2032                                               iadev->seg_reg+PRQ_ED_ADR);
2033         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2034         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2035          
2036         /* Load local copy of PRQ and TCQ ptrs */
2037         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2038         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2039         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2040
2041         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2042         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2043         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2044
2045         /* Just for safety initializing the queue to have desc 1 always */  
2046         /* Fill the PRQ with all the free descriptors. */  
2047         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2048         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2049         for(i=1; i<=iadev->num_tx_desc; i++)  
2050         {  
2051                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2052                 prq_start++;  
2053         }  
2054         /* CBR Table */  
2055         IF_INIT(printk("Start CBR Init\n");)
2056 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2057         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2058 #else /* Charlie's logic is wrong ? */
2059         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2060         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2061         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2062 #endif
2063
2064         IF_INIT(printk("value in register = 0x%x\n",
2065                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2066         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2067         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2068         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2069                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2070         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2071         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2072         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2073         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2074                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2075         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2076           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2077           readw(iadev->seg_reg+CBR_TAB_END+1));)
2078
2079         /* Initialize the CBR Schedualing Table */
2080         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2081                                                           0, iadev->num_vc*6); 
2082         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2083         iadev->CbrEntryPt = 0;
2084         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2085         iadev->NumEnabledCBR = 0;
2086
2087         /* UBR scheduling Table and wait queue */  
2088         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2089                 - SCHEDSZ is 1K (# of entries).  
2090                 - UBR Table size is 4K  
2091                 - UBR wait queue is 4K  
2092            since the table and wait queues are contiguous, all the bytes   
2093            can be initialized by one memeset.
2094         */  
2095         
2096         vcsize_sel = 0;
2097         i = 8*1024;
2098         while (i != iadev->num_vc) {
2099           i /= 2;
2100           vcsize_sel++;
2101         }
2102  
2103         i = MAIN_VC_TABLE * iadev->memSize;
2104         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2105         i =  EXT_VC_TABLE * iadev->memSize;
2106         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2107         i = UBR_SCHED_TABLE * iadev->memSize;
2108         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2109         i = UBR_WAIT_Q * iadev->memSize; 
2110         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2111         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2112                                                        0, iadev->num_vc*8);
2113         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2114         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2115                 - SCHEDSZ is 1K (# of entries).  
2116                 - ABR Table size is 2K  
2117                 - ABR wait queue is 2K  
2118            since the table and wait queues are contiguous, all the bytes   
2119            can be initialized by one memeset.
2120         */  
2121         i = ABR_SCHED_TABLE * iadev->memSize;
2122         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2123         i = ABR_WAIT_Q * iadev->memSize;
2124         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2125  
2126         i = ABR_SCHED_TABLE*iadev->memSize;
2127         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2128         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2129         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2130         iadev->testTable = kmalloc_array(iadev->num_vc,
2131                                          sizeof(*iadev->testTable),
2132                                          GFP_KERNEL);
2133         if (!iadev->testTable) {
2134            printk("Get freepage  failed\n");
2135            goto err_free_desc_tbl;
2136         }
2137         for(i=0; i<iadev->num_vc; i++)  
2138         {  
2139                 memset((caddr_t)vc, 0, sizeof(*vc));  
2140                 memset((caddr_t)evc, 0, sizeof(*evc));  
2141                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2142                                                 GFP_KERNEL);
2143                 if (!iadev->testTable[i])
2144                         goto err_free_test_tables;
2145                 iadev->testTable[i]->lastTime = 0;
2146                 iadev->testTable[i]->fract = 0;
2147                 iadev->testTable[i]->vc_status = VC_UBR;
2148                 vc++;  
2149                 evc++;  
2150         }  
2151   
2152         /* Other Initialization */  
2153           
2154         /* Max Rate Register */  
2155         if (iadev->phy_type & FE_25MBIT_PHY) {
2156            writew(RATE25, iadev->seg_reg+MAXRATE);  
2157            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2158         }
2159         else {
2160            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2161            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2162         }
2163         /* Set Idle Header Reigisters to be sure */  
2164         writew(0, iadev->seg_reg+IDLEHEADHI);  
2165         writew(0, iadev->seg_reg+IDLEHEADLO);  
2166   
2167         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2168         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2169
2170         iadev->close_pending = 0;
2171         init_waitqueue_head(&iadev->close_wait);
2172         init_waitqueue_head(&iadev->timeout_wait);
2173         skb_queue_head_init(&iadev->tx_dma_q);  
2174         ia_init_rtn_q(&iadev->tx_return_q);  
2175
2176         /* RM Cell Protocol ID and Message Type */  
2177         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2178         skb_queue_head_init (&iadev->tx_backlog);
2179   
2180         /* Mode Register 1 */  
2181         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2182   
2183         /* Mode Register 0 */  
2184         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2185   
2186         /* Interrupt Status Register - read to clear */  
2187         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2188   
2189         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2190         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2191         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2192         iadev->tx_pkt_cnt = 0;
2193         iadev->rate_limit = iadev->LineRate / 3;
2194   
2195         return 0;
2196
2197 err_free_test_tables:
2198         while (--i >= 0)
2199                 kfree(iadev->testTable[i]);
2200         kfree(iadev->testTable);
2201 err_free_desc_tbl:
2202         kfree(iadev->desc_tbl);
2203 err_free_all_tx_bufs:
2204         i = iadev->num_tx_desc;
2205 err_free_tx_bufs:
2206         while (--i >= 0) {
2207                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2208
2209                 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2210                                  sizeof(*desc->cpcs), DMA_TO_DEVICE);
2211                 kfree(desc->cpcs);
2212         }
2213         kfree(iadev->tx_buf);
2214 err_free_dle:
2215         dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2216                           iadev->tx_dle_dma);
2217 err_out:
2218         return -ENOMEM;
2219 }   
2220    
2221 static irqreturn_t ia_int(int irq, void *dev_id)  
2222 {  
2223    struct atm_dev *dev;  
2224    IADEV *iadev;  
2225    unsigned int status;  
2226    int handled = 0;
2227
2228    dev = dev_id;  
2229    iadev = INPH_IA_DEV(dev);  
2230    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2231    { 
2232         handled = 1;
2233         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2234         if (status & STAT_REASSINT)  
2235         {  
2236            /* do something */  
2237            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2238            rx_intr(dev);  
2239         }  
2240         if (status & STAT_DLERINT)  
2241         {  
2242            /* Clear this bit by writing a 1 to it. */  
2243            writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2244            rx_dle_intr(dev);  
2245         }  
2246         if (status & STAT_SEGINT)  
2247         {  
2248            /* do something */ 
2249            IF_EVENT(printk("IA: tx_intr \n");) 
2250            tx_intr(dev);  
2251         }  
2252         if (status & STAT_DLETINT)  
2253         {  
2254            writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2255            tx_dle_intr(dev);  
2256         }  
2257         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2258         {  
2259            if (status & STAT_FEINT) 
2260                ia_frontend_intr(iadev);
2261         }  
2262    }
2263    return IRQ_RETVAL(handled);
2264 }  
2265           
2266           
2267           
2268 /*----------------------------- entries --------------------------------*/  
2269 static int get_esi(struct atm_dev *dev)  
2270 {  
2271         IADEV *iadev;  
2272         int i;  
2273         u32 mac1;  
2274         u16 mac2;  
2275           
2276         iadev = INPH_IA_DEV(dev);  
2277         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2278                                 iadev->reg+IPHASE5575_MAC1)));  
2279         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2280         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2281         for (i=0; i<MAC1_LEN; i++)  
2282                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2283           
2284         for (i=0; i<MAC2_LEN; i++)  
2285                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2286         return 0;  
2287 }  
2288           
2289 static int reset_sar(struct atm_dev *dev)  
2290 {  
2291         IADEV *iadev;  
2292         int i, error = 1;  
2293         unsigned int pci[64];  
2294           
2295         iadev = INPH_IA_DEV(dev);  
2296         for(i=0; i<64; i++)  
2297           if ((error = pci_read_config_dword(iadev->pci,  
2298                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2299               return error;  
2300         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2301         for(i=0; i<64; i++)  
2302           if ((error = pci_write_config_dword(iadev->pci,  
2303                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2304             return error;  
2305         udelay(5);  
2306         return 0;  
2307 }  
2308           
2309           
2310 static int ia_init(struct atm_dev *dev)
2311 {  
2312         IADEV *iadev;  
2313         unsigned long real_base;
2314         void __iomem *base;
2315         unsigned short command;  
2316         int error, i; 
2317           
2318         /* The device has been identified and registered. Now we read   
2319            necessary configuration info like memory base address,   
2320            interrupt number etc */  
2321           
2322         IF_INIT(printk(">ia_init\n");)  
2323         dev->ci_range.vpi_bits = 0;  
2324         dev->ci_range.vci_bits = NR_VCI_LD;  
2325
2326         iadev = INPH_IA_DEV(dev);  
2327         real_base = pci_resource_start (iadev->pci, 0);
2328         iadev->irq = iadev->pci->irq;
2329                   
2330         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2331         if (error) {
2332                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2333                                 dev->number,error);  
2334                 return -EINVAL;  
2335         }  
2336         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2337                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2338           
2339         /* find mapping size of board */  
2340           
2341         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2342
2343         if (iadev->pci_map_size == 0x100000){
2344           iadev->num_vc = 4096;
2345           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2346           iadev->memSize = 4;
2347         }
2348         else if (iadev->pci_map_size == 0x40000) {
2349           iadev->num_vc = 1024;
2350           iadev->memSize = 1;
2351         }
2352         else {
2353            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2354            return -EINVAL;
2355         }
2356         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2357           
2358         /* enable bus mastering */
2359         pci_set_master(iadev->pci);
2360
2361         /*  
2362          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2363          */  
2364         udelay(10);  
2365           
2366         /* mapping the physical address to a virtual address in address space */  
2367         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2368           
2369         if (!base)  
2370         {  
2371                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2372                             dev->number);  
2373                 return -ENOMEM;
2374         }  
2375         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2376                         dev->number, iadev->pci->revision, base, iadev->irq);)
2377           
2378         /* filling the iphase dev structure */  
2379         iadev->mem = iadev->pci_map_size /2;  
2380         iadev->real_base = real_base;  
2381         iadev->base = base;  
2382                   
2383         /* Bus Interface Control Registers */  
2384         iadev->reg = base + REG_BASE;
2385         /* Segmentation Control Registers */  
2386         iadev->seg_reg = base + SEG_BASE;
2387         /* Reassembly Control Registers */  
2388         iadev->reass_reg = base + REASS_BASE;  
2389         /* Front end/ DMA control registers */  
2390         iadev->phy = base + PHY_BASE;  
2391         iadev->dma = base + PHY_BASE;  
2392         /* RAM - Segmentation RAm and Reassembly RAM */  
2393         iadev->ram = base + ACTUAL_RAM_BASE;  
2394         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2395         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2396   
2397         /* lets print out the above */  
2398         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2399           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2400           iadev->phy, iadev->ram, iadev->seg_ram, 
2401           iadev->reass_ram);) 
2402           
2403         /* lets try reading the MAC address */  
2404         error = get_esi(dev);  
2405         if (error) {
2406           iounmap(iadev->base);
2407           return error;  
2408         }
2409         printk("IA: ");
2410         for (i=0; i < ESI_LEN; i++)  
2411                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2412         printk("\n");  
2413   
2414         /* reset SAR */  
2415         if (reset_sar(dev)) {
2416            iounmap(iadev->base);
2417            printk("IA: reset SAR fail, please try again\n");
2418            return 1;
2419         }
2420         return 0;  
2421 }  
2422
2423 static void ia_update_stats(IADEV *iadev) {
2424     if (!iadev->carrier_detect)
2425         return;
2426     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2427     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2428     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2429     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2430     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2431     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2432     return;
2433 }
2434   
2435 static void ia_led_timer(struct timer_list *unused) {
2436         unsigned long flags;
2437         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2438         u_char i;
2439         static u32 ctrl_reg; 
2440         for (i = 0; i < iadev_count; i++) {
2441            if (ia_dev[i]) {
2442               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2443               if (blinking[i] == 0) {
2444                  blinking[i]++;
2445                  ctrl_reg &= (~CTRL_LED);
2446                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2447                  ia_update_stats(ia_dev[i]);
2448               }
2449               else {
2450                  blinking[i] = 0;
2451                  ctrl_reg |= CTRL_LED;
2452                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2453                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2454                  if (ia_dev[i]->close_pending)  
2455                     wake_up(&ia_dev[i]->close_wait);
2456                  ia_tx_poll(ia_dev[i]);
2457                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2458               }
2459            }
2460         }
2461         mod_timer(&ia_timer, jiffies + HZ / 4);
2462         return;
2463 }
2464
2465 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2466         unsigned long addr)  
2467 {  
2468         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2469 }  
2470   
2471 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2472 {  
2473         return readl(INPH_IA_DEV(dev)->phy+addr);  
2474 }  
2475
2476 static void ia_free_tx(IADEV *iadev)
2477 {
2478         int i;
2479
2480         kfree(iadev->desc_tbl);
2481         for (i = 0; i < iadev->num_vc; i++)
2482                 kfree(iadev->testTable[i]);
2483         kfree(iadev->testTable);
2484         for (i = 0; i < iadev->num_tx_desc; i++) {
2485                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2486
2487                 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2488                                  sizeof(*desc->cpcs), DMA_TO_DEVICE);
2489                 kfree(desc->cpcs);
2490         }
2491         kfree(iadev->tx_buf);
2492         dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2493                           iadev->tx_dle_dma);
2494 }
2495
2496 static void ia_free_rx(IADEV *iadev)
2497 {
2498         kfree(iadev->rx_open);
2499         dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2500                           iadev->rx_dle_dma);
2501 }
2502
2503 static int ia_start(struct atm_dev *dev)
2504 {  
2505         IADEV *iadev;  
2506         int error;  
2507         unsigned char phy;  
2508         u32 ctrl_reg;  
2509         IF_EVENT(printk(">ia_start\n");)  
2510         iadev = INPH_IA_DEV(dev);  
2511         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2512                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2513                     dev->number, iadev->irq);  
2514                 error = -EAGAIN;
2515                 goto err_out;
2516         }  
2517         /* @@@ should release IRQ on error */  
2518         /* enabling memory + master */  
2519         if ((error = pci_write_config_word(iadev->pci,   
2520                                 PCI_COMMAND,   
2521                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2522         {  
2523                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2524                     "master (0x%x)\n",dev->number, error);  
2525                 error = -EIO;  
2526                 goto err_free_irq;
2527         }  
2528         udelay(10);  
2529   
2530         /* Maybe we should reset the front end, initialize Bus Interface Control   
2531                 Registers and see. */  
2532   
2533         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2534                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2535         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2536         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2537                         | CTRL_B8  
2538                         | CTRL_B16  
2539                         | CTRL_B32  
2540                         | CTRL_B48  
2541                         | CTRL_B64  
2542                         | CTRL_B128  
2543                         | CTRL_ERRMASK  
2544                         | CTRL_DLETMASK         /* shud be removed l8r */  
2545                         | CTRL_DLERMASK  
2546                         | CTRL_SEGMASK  
2547                         | CTRL_REASSMASK          
2548                         | CTRL_FEMASK  
2549                         | CTRL_CSPREEMPT;  
2550   
2551        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2552   
2553         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2554                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2555            printk("Bus status reg after init: %08x\n", 
2556                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2557     
2558         ia_hw_type(iadev); 
2559         error = tx_init(dev);  
2560         if (error)
2561                 goto err_free_irq;
2562         error = rx_init(dev);  
2563         if (error)
2564                 goto err_free_tx;
2565   
2566         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2567         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2568         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2569                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2570         phy = 0; /* resolve compiler complaint */
2571         IF_INIT ( 
2572         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2573                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2574         else  
2575                 printk("IA: utopia,rev.%0x\n",phy);) 
2576
2577         if (iadev->phy_type &  FE_25MBIT_PHY)
2578            ia_mb25_init(iadev);
2579         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2580            ia_suni_pm7345_init(iadev);
2581         else {
2582                 error = suni_init(dev);
2583                 if (error)
2584                         goto err_free_rx;
2585                 if (dev->phy->start) {
2586                         error = dev->phy->start(dev);
2587                         if (error)
2588                                 goto err_free_rx;
2589                 }
2590                 /* Get iadev->carrier_detect status */
2591                 ia_frontend_intr(iadev);
2592         }
2593         return 0;
2594
2595 err_free_rx:
2596         ia_free_rx(iadev);
2597 err_free_tx:
2598         ia_free_tx(iadev);
2599 err_free_irq:
2600         free_irq(iadev->irq, dev);  
2601 err_out:
2602         return error;
2603 }  
2604   
2605 static void ia_close(struct atm_vcc *vcc)  
2606 {
2607         DEFINE_WAIT(wait);
2608         u16 *vc_table;
2609         IADEV *iadev;
2610         struct ia_vcc *ia_vcc;
2611         struct sk_buff *skb = NULL;
2612         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2613         unsigned long closetime, flags;
2614
2615         iadev = INPH_IA_DEV(vcc->dev);
2616         ia_vcc = INPH_IA_VCC(vcc);
2617         if (!ia_vcc) return;  
2618
2619         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2620                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2621         clear_bit(ATM_VF_READY,&vcc->flags);
2622         skb_queue_head_init (&tmp_tx_backlog);
2623         skb_queue_head_init (&tmp_vcc_backlog); 
2624         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2625            iadev->close_pending++;
2626            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2627            schedule_timeout(msecs_to_jiffies(500));
2628            finish_wait(&iadev->timeout_wait, &wait);
2629            spin_lock_irqsave(&iadev->tx_lock, flags); 
2630            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2631               if (ATM_SKB(skb)->vcc == vcc){ 
2632                  if (vcc->pop) vcc->pop(vcc, skb);
2633                  else dev_kfree_skb_any(skb);
2634               }
2635               else 
2636                  skb_queue_tail(&tmp_tx_backlog, skb);
2637            } 
2638            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2639              skb_queue_tail(&iadev->tx_backlog, skb);
2640            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2641            closetime = 300000 / ia_vcc->pcr;
2642            if (closetime == 0)
2643               closetime = 1;
2644            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2645            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2646            spin_lock_irqsave(&iadev->tx_lock, flags);
2647            iadev->close_pending--;
2648            iadev->testTable[vcc->vci]->lastTime = 0;
2649            iadev->testTable[vcc->vci]->fract = 0; 
2650            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2651            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2652               if (vcc->qos.txtp.min_pcr > 0)
2653                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2654            }
2655            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2656               ia_vcc = INPH_IA_VCC(vcc); 
2657               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2658               ia_cbrVc_close (vcc);
2659            }
2660            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2661         }
2662         
2663         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2664            // reset reass table
2665            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2666            vc_table += vcc->vci; 
2667            *vc_table = NO_AAL5_PKT;
2668            // reset vc table
2669            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2670            vc_table += vcc->vci;
2671            *vc_table = (vcc->vci << 6) | 15;
2672            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2673               struct abr_vc_table __iomem *abr_vc_table = 
2674                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2675               abr_vc_table +=  vcc->vci;
2676               abr_vc_table->rdf = 0x0003;
2677               abr_vc_table->air = 0x5eb1;
2678            }                                 
2679            // Drain the packets
2680            rx_dle_intr(vcc->dev); 
2681            iadev->rx_open[vcc->vci] = NULL;
2682         }
2683         kfree(INPH_IA_VCC(vcc));  
2684         ia_vcc = NULL;
2685         vcc->dev_data = NULL;
2686         clear_bit(ATM_VF_ADDR,&vcc->flags);
2687         return;        
2688 }  
2689   
2690 static int ia_open(struct atm_vcc *vcc)
2691 {  
2692         struct ia_vcc *ia_vcc;  
2693         int error;  
2694         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2695         {  
2696                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2697                 vcc->dev_data = NULL;
2698         }  
2699         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2700         {  
2701                 IF_EVENT(printk("iphase open: unspec part\n");)  
2702                 set_bit(ATM_VF_ADDR,&vcc->flags);
2703         }  
2704         if (vcc->qos.aal != ATM_AAL5)  
2705                 return -EINVAL;  
2706         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2707                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2708   
2709         /* Device dependent initialization */  
2710         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2711         if (!ia_vcc) return -ENOMEM;  
2712         vcc->dev_data = ia_vcc;
2713   
2714         if ((error = open_rx(vcc)))  
2715         {  
2716                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2717                 ia_close(vcc);  
2718                 return error;  
2719         }  
2720   
2721         if ((error = open_tx(vcc)))  
2722         {  
2723                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2724                 ia_close(vcc);  
2725                 return error;  
2726         }  
2727   
2728         set_bit(ATM_VF_READY,&vcc->flags);
2729
2730 #if 0
2731         {
2732            static u8 first = 1; 
2733            if (first) {
2734               ia_timer.expires = jiffies + 3*HZ;
2735               add_timer(&ia_timer);
2736               first = 0;
2737            }           
2738         }
2739 #endif
2740         IF_EVENT(printk("ia open returning\n");)  
2741         return 0;  
2742 }  
2743   
2744 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2745 {  
2746         IF_EVENT(printk(">ia_change_qos\n");)  
2747         return 0;  
2748 }  
2749   
2750 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2751 {  
2752    IA_CMDBUF ia_cmds;
2753    IADEV *iadev;
2754    int i, board;
2755    u16 __user *tmps;
2756    IF_EVENT(printk(">ia_ioctl\n");)  
2757    if (cmd != IA_CMD) {
2758       if (!dev->phy->ioctl) return -EINVAL;
2759       return dev->phy->ioctl(dev,cmd,arg);
2760    }
2761    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2762    board = ia_cmds.status;
2763    if ((board < 0) || (board > iadev_count))
2764          board = 0;    
2765    iadev = ia_dev[board];
2766    switch (ia_cmds.cmd) {
2767    case MEMDUMP:
2768    {
2769         switch (ia_cmds.sub_cmd) {
2770           case MEMDUMP_SEGREG:
2771              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2772              tmps = (u16 __user *)ia_cmds.buf;
2773              for(i=0; i<0x80; i+=2, tmps++)
2774                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2775              ia_cmds.status = 0;
2776              ia_cmds.len = 0x80;
2777              break;
2778           case MEMDUMP_REASSREG:
2779              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2780              tmps = (u16 __user *)ia_cmds.buf;
2781              for(i=0; i<0x80; i+=2, tmps++)
2782                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2783              ia_cmds.status = 0;
2784              ia_cmds.len = 0x80;
2785              break;
2786           case MEMDUMP_FFL:
2787           {  
2788              ia_regs_t       *regs_local;
2789              ffredn_t        *ffL;
2790              rfredn_t        *rfL;
2791                      
2792              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2793              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2794              if (!regs_local) return -ENOMEM;
2795              ffL = &regs_local->ffredn;
2796              rfL = &regs_local->rfredn;
2797              /* Copy real rfred registers into the local copy */
2798              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2799                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2800                 /* Copy real ffred registers into the local copy */
2801              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2802                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2803
2804              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2805                 kfree(regs_local);
2806                 return -EFAULT;
2807              }
2808              kfree(regs_local);
2809              printk("Board %d registers dumped\n", board);
2810              ia_cmds.status = 0;                  
2811          }      
2812              break;        
2813          case READ_REG:
2814          {  
2815              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2816              desc_dbg(iadev); 
2817              ia_cmds.status = 0; 
2818          }
2819              break;
2820          case 0x6:
2821          {  
2822              ia_cmds.status = 0; 
2823              printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2824              printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2825          }
2826              break;
2827          case 0x8:
2828          {
2829              struct k_sonet_stats *stats;
2830              stats = &PRIV(_ia_dev[board])->sonet_stats;
2831              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2832              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2833              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2834              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2835              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2836              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2837              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2838              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2839              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2840          }
2841             ia_cmds.status = 0;
2842             break;
2843          case 0x9:
2844             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2845             for (i = 1; i <= iadev->num_rx_desc; i++)
2846                free_desc(_ia_dev[board], i);
2847             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2848                                             iadev->reass_reg+REASS_MASK_REG);
2849             iadev->rxing = 1;
2850             
2851             ia_cmds.status = 0;
2852             break;
2853
2854          case 0xb:
2855             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2856             ia_frontend_intr(iadev);
2857             break;
2858          case 0xa:
2859             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2860          {  
2861              ia_cmds.status = 0; 
2862              IADebugFlag = ia_cmds.maddr;
2863              printk("New debug option loaded\n");
2864          }
2865              break;
2866          default:
2867              ia_cmds.status = 0;
2868              break;
2869       } 
2870    }
2871       break;
2872    default:
2873       break;
2874
2875    }    
2876    return 0;  
2877 }  
2878   
2879 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2880         void __user *optval, int optlen)  
2881 {  
2882         IF_EVENT(printk(">ia_getsockopt\n");)  
2883         return -EINVAL;  
2884 }  
2885   
2886 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2887         void __user *optval, unsigned int optlen)  
2888 {  
2889         IF_EVENT(printk(">ia_setsockopt\n");)  
2890         return -EINVAL;  
2891 }  
2892   
2893 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2894         IADEV *iadev;
2895         struct dle *wr_ptr;
2896         struct tx_buf_desc __iomem *buf_desc_ptr;
2897         int desc;
2898         int comp_code;
2899         int total_len;
2900         struct cpcs_trailer *trailer;
2901         struct ia_vcc *iavcc;
2902
2903         iadev = INPH_IA_DEV(vcc->dev);  
2904         iavcc = INPH_IA_VCC(vcc);
2905         if (!iavcc->txing) {
2906            printk("discard packet on closed VC\n");
2907            if (vcc->pop)
2908                 vcc->pop(vcc, skb);
2909            else
2910                 dev_kfree_skb_any(skb);
2911            return 0;
2912         }
2913
2914         if (skb->len > iadev->tx_buf_sz - 8) {
2915            printk("Transmit size over tx buffer size\n");
2916            if (vcc->pop)
2917                  vcc->pop(vcc, skb);
2918            else
2919                  dev_kfree_skb_any(skb);
2920           return 0;
2921         }
2922         if ((unsigned long)skb->data & 3) {
2923            printk("Misaligned SKB\n");
2924            if (vcc->pop)
2925                  vcc->pop(vcc, skb);
2926            else
2927                  dev_kfree_skb_any(skb);
2928            return 0;
2929         }       
2930         /* Get a descriptor number from our free descriptor queue  
2931            We get the descr number from the TCQ now, since I am using  
2932            the TCQ as a free buffer queue. Initially TCQ will be   
2933            initialized with all the descriptors and is hence, full.  
2934         */
2935         desc = get_desc (iadev, iavcc);
2936         if (desc == 0xffff) 
2937             return 1;
2938         comp_code = desc >> 13;  
2939         desc &= 0x1fff;  
2940   
2941         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2942         {  
2943                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2944                 atomic_inc(&vcc->stats->tx);
2945                 if (vcc->pop)   
2946                     vcc->pop(vcc, skb);   
2947                 else  
2948                     dev_kfree_skb_any(skb);
2949                 return 0;   /* return SUCCESS */
2950         }  
2951   
2952         if (comp_code)  
2953         {  
2954             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2955                                                             desc, comp_code);)  
2956         }  
2957        
2958         /* remember the desc and vcc mapping */
2959         iavcc->vc_desc_cnt++;
2960         iadev->desc_tbl[desc-1].iavcc = iavcc;
2961         iadev->desc_tbl[desc-1].txskb = skb;
2962         IA_SKB_STATE(skb) = 0;
2963
2964         iadev->ffL.tcq_rd += 2;
2965         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2966                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2967         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2968   
2969         /* Put the descriptor number in the packet ready queue  
2970                 and put the updated write pointer in the DLE field   
2971         */   
2972         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2973
2974         iadev->ffL.prq_wr += 2;
2975         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2976                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2977           
2978         /* Figure out the exact length of the packet and padding required to 
2979            make it  aligned on a 48 byte boundary.  */
2980         total_len = skb->len + sizeof(struct cpcs_trailer);  
2981         total_len = ((total_len + 47) / 48) * 48;
2982         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2983  
2984         /* Put the packet in a tx buffer */   
2985         trailer = iadev->tx_buf[desc-1].cpcs;
2986         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2987                   skb, skb->data, skb->len, desc);)
2988         trailer->control = 0; 
2989         /*big endian*/ 
2990         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2991         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2992
2993         /* Display the packet */  
2994         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2995                                                         skb->len, tcnter++);  
2996         xdump(skb->data, skb->len, "TX: ");
2997         printk("\n");)
2998
2999         /* Build the buffer descriptor */  
3000         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
3001         buf_desc_ptr += desc;   /* points to the corresponding entry */  
3002         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
3003         /* Huh ? p.115 of users guide describes this as a read-only register */
3004         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3005         buf_desc_ptr->vc_index = vcc->vci;
3006         buf_desc_ptr->bytes = total_len;  
3007
3008         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3009            clear_lockup (vcc, iadev);
3010
3011         /* Build the DLE structure */  
3012         wr_ptr = iadev->tx_dle_q.write;  
3013         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3014         wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3015                                               skb->len, DMA_TO_DEVICE);
3016         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3017                                                   buf_desc_ptr->buf_start_lo;  
3018         /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3019         wr_ptr->bytes = skb->len;  
3020
3021         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3022         if ((wr_ptr->bytes >> 2) == 0xb)
3023            wr_ptr->bytes = 0x30;
3024
3025         wr_ptr->mode = TX_DLE_PSI; 
3026         wr_ptr->prq_wr_ptr_data = 0;
3027   
3028         /* end is not to be used for the DLE q */  
3029         if (++wr_ptr == iadev->tx_dle_q.end)  
3030                 wr_ptr = iadev->tx_dle_q.start;  
3031         
3032         /* Build trailer dle */
3033         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3034         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3035           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3036
3037         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3038         wr_ptr->mode = DMA_INT_ENABLE; 
3039         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3040         
3041         /* end is not to be used for the DLE q */
3042         if (++wr_ptr == iadev->tx_dle_q.end)  
3043                 wr_ptr = iadev->tx_dle_q.start;
3044
3045         iadev->tx_dle_q.write = wr_ptr;  
3046         ATM_DESC(skb) = vcc->vci;
3047         skb_queue_tail(&iadev->tx_dma_q, skb);
3048
3049         atomic_inc(&vcc->stats->tx);
3050         iadev->tx_pkt_cnt++;
3051         /* Increment transaction counter */  
3052         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3053         
3054 #if 0        
3055         /* add flow control logic */ 
3056         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3057           if (iavcc->vc_desc_cnt > 10) {
3058              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3059             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3060               iavcc->flow_inc = -1;
3061               iavcc->saved_tx_quota = vcc->tx_quota;
3062            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3063              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3064              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3065               iavcc->flow_inc = 0;
3066            }
3067         }
3068 #endif
3069         IF_TX(printk("ia send done\n");)  
3070         return 0;  
3071 }  
3072
3073 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3074 {
3075         IADEV *iadev; 
3076         unsigned long flags;
3077
3078         iadev = INPH_IA_DEV(vcc->dev);
3079         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3080         {
3081             if (!skb)
3082                 printk(KERN_CRIT "null skb in ia_send\n");
3083             else dev_kfree_skb_any(skb);
3084             return -EINVAL;
3085         }                         
3086         spin_lock_irqsave(&iadev->tx_lock, flags); 
3087         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3088             dev_kfree_skb_any(skb);
3089             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3090             return -EINVAL; 
3091         }
3092         ATM_SKB(skb)->vcc = vcc;
3093  
3094         if (skb_peek(&iadev->tx_backlog)) {
3095            skb_queue_tail(&iadev->tx_backlog, skb);
3096         }
3097         else {
3098            if (ia_pkt_tx (vcc, skb)) {
3099               skb_queue_tail(&iadev->tx_backlog, skb);
3100            }
3101         }
3102         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3103         return 0;
3104
3105 }
3106
3107 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3108
3109   int   left = *pos, n;   
3110   char  *tmpPtr;
3111   IADEV *iadev = INPH_IA_DEV(dev);
3112   if(!left--) {
3113      if (iadev->phy_type == FE_25MBIT_PHY) {
3114        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3115        return n;
3116      }
3117      if (iadev->phy_type == FE_DS3_PHY)
3118         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3119      else if (iadev->phy_type == FE_E3_PHY)
3120         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3121      else if (iadev->phy_type == FE_UTP_OPTION)
3122          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3123      else
3124         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3125      tmpPtr = page + n;
3126      if (iadev->pci_map_size == 0x40000)
3127         n += sprintf(tmpPtr, "-1KVC-");
3128      else
3129         n += sprintf(tmpPtr, "-4KVC-");  
3130      tmpPtr = page + n; 
3131      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3132         n += sprintf(tmpPtr, "1M  \n");
3133      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3134         n += sprintf(tmpPtr, "512K\n");
3135      else
3136        n += sprintf(tmpPtr, "128K\n");
3137      return n;
3138   }
3139   if (!left) {
3140      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3141                            "  Size of Tx Buffer  :  %u\n"
3142                            "  Number of Rx Buffer:  %u\n"
3143                            "  Size of Rx Buffer  :  %u\n"
3144                            "  Packets Received   :  %u\n"
3145                            "  Packets Transmitted:  %u\n"
3146                            "  Cells Received     :  %u\n"
3147                            "  Cells Transmitted  :  %u\n"
3148                            "  Board Dropped Cells:  %u\n"
3149                            "  Board Dropped Pkts :  %u\n",
3150                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3151                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3152                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3153                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3154                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3155   }
3156   return 0;
3157 }
3158   
3159 static const struct atmdev_ops ops = {  
3160         .open           = ia_open,  
3161         .close          = ia_close,  
3162         .ioctl          = ia_ioctl,  
3163         .getsockopt     = ia_getsockopt,  
3164         .setsockopt     = ia_setsockopt,  
3165         .send           = ia_send,  
3166         .phy_put        = ia_phy_put,  
3167         .phy_get        = ia_phy_get,  
3168         .change_qos     = ia_change_qos,  
3169         .proc_read      = ia_proc_read,
3170         .owner          = THIS_MODULE,
3171 };  
3172           
3173 static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3174 {  
3175         struct atm_dev *dev;  
3176         IADEV *iadev;  
3177         int ret;
3178
3179         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3180         if (!iadev) {
3181                 ret = -ENOMEM;
3182                 goto err_out;
3183         }
3184
3185         iadev->pci = pdev;
3186
3187         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3188                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3189         if (pci_enable_device(pdev)) {
3190                 ret = -ENODEV;
3191                 goto err_out_free_iadev;
3192         }
3193         dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3194         if (!dev) {
3195                 ret = -ENOMEM;
3196                 goto err_out_disable_dev;
3197         }
3198         dev->dev_data = iadev;
3199         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3200         IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3201                 iadev->LineRate);)
3202
3203         pci_set_drvdata(pdev, dev);
3204
3205         ia_dev[iadev_count] = iadev;
3206         _ia_dev[iadev_count] = dev;
3207         iadev_count++;
3208         if (ia_init(dev) || ia_start(dev)) {  
3209                 IF_INIT(printk("IA register failed!\n");)
3210                 iadev_count--;
3211                 ia_dev[iadev_count] = NULL;
3212                 _ia_dev[iadev_count] = NULL;
3213                 ret = -EINVAL;
3214                 goto err_out_deregister_dev;
3215         }
3216         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3217
3218         iadev->next_board = ia_boards;  
3219         ia_boards = dev;  
3220
3221         return 0;
3222
3223 err_out_deregister_dev:
3224         atm_dev_deregister(dev);  
3225 err_out_disable_dev:
3226         pci_disable_device(pdev);
3227 err_out_free_iadev:
3228         kfree(iadev);
3229 err_out:
3230         return ret;
3231 }
3232
3233 static void ia_remove_one(struct pci_dev *pdev)
3234 {
3235         struct atm_dev *dev = pci_get_drvdata(pdev);
3236         IADEV *iadev = INPH_IA_DEV(dev);
3237
3238         /* Disable phy interrupts */
3239         ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3240                                    SUNI_RSOP_CIE);
3241         udelay(1);
3242
3243         if (dev->phy && dev->phy->stop)
3244                 dev->phy->stop(dev);
3245
3246         /* De-register device */  
3247         free_irq(iadev->irq, dev);
3248         iadev_count--;
3249         ia_dev[iadev_count] = NULL;
3250         _ia_dev[iadev_count] = NULL;
3251         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3252         atm_dev_deregister(dev);
3253
3254         iounmap(iadev->base);  
3255         pci_disable_device(pdev);
3256
3257         ia_free_rx(iadev);
3258         ia_free_tx(iadev);
3259
3260         kfree(iadev);
3261 }
3262
3263 static const struct pci_device_id ia_pci_tbl[] = {
3264         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3265         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3266         { 0,}
3267 };
3268 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3269
3270 static struct pci_driver ia_driver = {
3271         .name =         DEV_LABEL,
3272         .id_table =     ia_pci_tbl,
3273         .probe =        ia_init_one,
3274         .remove =       ia_remove_one,
3275 };
3276
3277 static int __init ia_module_init(void)
3278 {
3279         int ret;
3280
3281         ret = pci_register_driver(&ia_driver);
3282         if (ret >= 0) {
3283                 ia_timer.expires = jiffies + 3*HZ;
3284                 add_timer(&ia_timer); 
3285         } else
3286                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3287         return ret;
3288 }
3289
3290 static void __exit ia_module_exit(void)
3291 {
3292         pci_unregister_driver(&ia_driver);
3293
3294         del_timer(&ia_timer);
3295 }
3296
3297 module_init(ia_module_init);
3298 module_exit(ia_module_exit);