1 /* Cypress WestBridge OMAP3430 Kernel Hal source file (cyashalomap_kernel.c)
2 ## ===========================
3 ## Copyright (C) 2010 Cypress Semiconductor
5 ## This program is free software; you can redistribute it and/or
6 ## modify it under the terms of the GNU General Public License
7 ## as published by the Free Software Foundation; either version 2
8 ## of the License, or (at your option) any later version.
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
15 ## You should have received a copy of the GNU General Public License
16 ## along with this program; if not, write to the Free Software
17 ## Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 ## Boston, MA 02110-1301, USA.
19 ## ===========================
22 #ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
25 #include <linux/ioport.h>
26 #include <linux/timer.h>
27 #include <linux/gpio.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/scatterlist.h>
32 #include <linux/irq.h>
33 #include <linux/slab.h>
34 #include <linux/sched.h>
35 /* include seems broken moving for patch submission
36 * #include <mach/mux.h>
37 * #include <mach/gpmc.h>
38 * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h>
39 * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h>
40 * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h>
41 * #include <linux/westbridge/cyaserr.h>
42 * #include <linux/westbridge/cyasregs.h>
43 * #include <linux/westbridge/cyasdma.h>
44 * #include <linux/westbridge/cyasintr.h>
46 #include <linux/../../arch/arm/plat-omap/include/plat/mux.h>
47 #include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
48 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h"
49 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h"
50 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h"
51 #include "../../../include/linux/westbridge/cyaserr.h"
52 #include "../../../include/linux/westbridge/cyasregs.h"
53 #include "../../../include/linux/westbridge/cyasdma.h"
54 #include "../../../include/linux/westbridge/cyasintr.h"
56 #define HAL_REV "1.1.0"
59 * uncomment to enable 16bit pnand interface
61 #define PNAND_16BIT_MODE
64 * selects one of 3 versions of pnand_lbd_read()
65 * PNAND_LBD_READ_NO_PFE - original 8/16 bit code
66 * reads through the gpmc CONTROLLER REGISTERS
67 * ENABLE_GPMC_PF_ENGINE - USES GPMC PFE FIFO reads, in 8 bit mode,
68 * same speed as the above
69 * PFE_LBD_READ_V2 - slightly diffrenet, performance same as above
71 #define PNAND_LBD_READ_NO_PFE
72 /* #define ENABLE_GPMC_PF_ENGINE */
73 /* #define PFE_LBD_READ_V2 */
76 * westbrige astoria ISR options to limit number of
77 * back to back DMA transfers per ISR interrupt
79 #define MAX_DRQ_LOOPS_IN_ISR 4
82 * debug prints enabling
83 *#define DBGPRN_ENABLED
84 *#define DBGPRN_DMA_SETUP_RD
85 *#define DBGPRN_DMA_SETUP_WR
90 * For performance reasons, we handle storage endpoint transfers up to 4 KB
91 * within the HAL itself.
93 #define CYASSTORAGE_WRITE_EP_NUM (4)
94 #define CYASSTORAGE_READ_EP_NUM (8)
97 * size of DMA packet HAL can accept from Storage API
98 * HAL will fragment it into smaller chunks that the P port can accept
100 #define CYASSTORAGE_MAX_XFER_SIZE (2*32768)
103 * P port MAX DMA packet size according to interface/ep configurartion
105 #define HAL_DMA_PKT_SZ 512
107 #define is_storage_e_p(ep) (((ep) == 2) || ((ep) == 4) || \
108 ((ep) == 6) || ((ep) == 8))
111 * persistent, stores current GPMC interface cfg mode
113 static uint8_t pnand_16bit;
116 * keep processing new WB DRQ in ISR until all handled (performance feature)
118 #define PROCESS_MULTIPLE_DRQ_IN_ISR (1)
122 * ASTORIA PNAND IF COMMANDS, CASDO - READ, CASDI - WRITE
126 #define RDPAGE_B1 0x00
127 #define RDPAGE_B2 0x30
128 #define PGMPAGE_B1 0x80
129 #define PGMPAGE_B2 0x10
132 * The type of DMA operation, per endpoint
134 typedef enum cy_as_hal_dma_type {
138 } cy_as_hal_dma_type;
142 * SG list halpers defined in scaterlist.h
143 #define sg_is_chain(sg) ((sg)->page_link & 0x01)
144 #define sg_is_last(sg) ((sg)->page_link & 0x02)
145 #define sg_chain_ptr(sg) \
146 ((struct scatterlist *) ((sg)->page_link & ~0x03))
148 typedef struct cy_as_hal_endpoint_dma {
149 cy_bool buffer_valid;
153 * sg_list_enabled - if true use, r/w DMA transfers use sg list,
154 * FALSE use pointer to a buffer
155 * sg_p - pointer to the owner's sg list, of there is such
157 * dma_xfer_sz - size of the next dma xfer on P port
158 * seg_xfer_cnt - counts xfered bytes for in current sg_list
160 * req_xfer_cnt - total number of bytes transferred so far in
162 * req_length - total request length
164 bool sg_list_enabled;
165 struct scatterlist *sg_p;
166 uint16_t dma_xfer_sz;
167 uint32_t seg_xfer_cnt;
168 uint16_t req_xfer_cnt;
170 cy_as_hal_dma_type type;
172 } cy_as_hal_endpoint_dma;
175 * The list of OMAP devices (should be one)
177 static cy_as_omap_dev_kernel *m_omap_list_p;
180 * The callback to call after DMA operations are complete
182 static cy_as_hal_dma_complete_callback callback;
185 * Pending data size for the endpoints
187 static cy_as_hal_endpoint_dma end_points[16];
190 * Forward declaration
192 static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p);
194 static uint16_t intr_sequence_num;
195 static uint8_t intr__enable;
198 static u32 iomux_vma;
202 * gpmc I/O registers VMA
204 static u32 gpmc_base;
207 * gpmc data VMA associated with CS4 (ASTORIA CS on GPMC)
209 static u32 gpmc_data_vma;
210 static u32 ndata_reg_vma;
211 static u32 ncmd_reg_vma;
212 static u32 naddr_reg_vma;
217 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff);
218 static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff);
219 static inline u16 __attribute__((always_inline))
220 ast_p_nand_casdo_read(u8 reg_addr8);
221 static inline void __attribute__((always_inline))
222 ast_p_nand_casdi_write(u8 reg_addr8, u16 data);
225 * prints given number of omap registers
227 static void cy_as_hal_print_omap_regs(char *name_prefix,
228 u8 name_base, u32 virt_base, u16 count)
230 u32 reg_val, reg_addr;
232 cy_as_hal_print_message(KERN_INFO "\n");
233 for (i = 0; i < count; i++) {
235 reg_addr = virt_base + (i*4);
236 /* use virtual addresses here*/
237 reg_val = __raw_readl(reg_addr);
238 cy_as_hal_print_message(KERN_INFO "%s_%d[%8.8x]=%8.8x\n",
239 name_prefix, name_base+i,
245 * setMUX function for a pad + additional pad flags
247 static u16 omap_cfg_reg_L(u32 pad_func_index)
249 static u8 sanity_check = 1;
252 u16 cur_val, wr_val, rdback_val;
255 * do sanity check on the omap_mux_pin_cfg[] table
257 cy_as_hal_print_message(KERN_INFO" OMAP pins user_pad cfg ");
259 if ((omap_mux_pin_cfg[END_OF_TABLE].name[0] == 'E') &&
260 (omap_mux_pin_cfg[END_OF_TABLE].name[1] == 'N') &&
261 (omap_mux_pin_cfg[END_OF_TABLE].name[2] == 'D')) {
263 cy_as_hal_print_message(KERN_INFO
266 cy_as_hal_print_message(KERN_WARNING
267 "table is bad, fix it");
276 * get virtual address to the PADCNF_REG
278 reg_vma = (u32)iomux_vma + omap_mux_pin_cfg[pad_func_index].offset;
281 * add additional USER PU/PD/EN flags
283 wr_val = omap_mux_pin_cfg[pad_func_index].mux_val;
284 cur_val = IORD16(reg_vma);
287 * PADCFG regs 16 bit long, packed into 32 bit regs,
288 * can also be accessed as u16
290 IOWR16(reg_vma, wr_val);
291 rdback_val = IORD16(reg_vma);
294 * in case if the caller wants to save the old value
299 #define BLKSZ_4K 0x1000
302 * switch GPMC DATA bus mode
304 void cy_as_hal_gpmc_enable_16bit_bus(bool dbus16_enabled)
309 * disable gpmc CS4 operation 1st
311 tmp32 = gpmc_cs_read_reg(AST_GPMC_CS,
312 GPMC_CS_CONFIG7) & ~GPMC_CONFIG7_CSVALID;
313 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
316 * GPMC NAND data bus can be 8 or 16 bit wide
318 if (dbus16_enabled) {
319 DBGPRN("enabling 16 bit bus\n");
320 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
321 (GPMC_CONFIG1_DEVICETYPE(2) |
322 GPMC_CONFIG1_WAIT_PIN_SEL(2) |
323 GPMC_CONFIG1_DEVICESIZE_16)
326 DBGPRN(KERN_INFO "enabling 8 bit bus\n");
327 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
328 (GPMC_CONFIG1_DEVICETYPE(2) |
329 GPMC_CONFIG1_WAIT_PIN_SEL(2))
334 * re-enable astoria CS operation on GPMC
336 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
337 (tmp32 | GPMC_CONFIG7_CSVALID));
342 pnand_16bit = dbus16_enabled;
345 static int cy_as_hal_gpmc_init(void)
349 struct gpmc_timings timings;
351 gpmc_base = (u32)ioremap_nocache(OMAP34XX_GPMC_BASE, BLKSZ_4K);
352 DBGPRN(KERN_INFO "kernel has gpmc_base=%x , val@ the base=%x",
353 gpmc_base, __raw_readl(gpmc_base)
357 * these are globals are full VMAs of the gpmc_base above
359 ncmd_reg_vma = GPMC_VMA(AST_GPMC_NAND_CMD);
360 naddr_reg_vma = GPMC_VMA(AST_GPMC_NAND_ADDR);
361 ndata_reg_vma = GPMC_VMA(AST_GPMC_NAND_DATA);
364 * request GPMC CS for ASTORIA request
366 if (gpmc_cs_request(AST_GPMC_CS, SZ_16M, (void *)&csa_phy) < 0) {
367 cy_as_hal_print_message(KERN_ERR "error failed to request"
368 "ncs4 for ASTORIA\n");
371 DBGPRN(KERN_INFO "got phy_addr:%x for "
372 "GPMC CS%d GPMC_CFGREG7[CS4]\n",
373 csa_phy, AST_GPMC_CS);
377 * request VM region for 4K addr space for chip select 4 phy address
378 * technically we don't need it for NAND devices, but do it anyway
379 * so that data read/write bus cycle can be triggered by reading
380 * or writing this mem region
382 if (!request_mem_region(csa_phy, BLKSZ_4K, "AST_OMAP_HAL")) {
384 cy_as_hal_print_message(KERN_ERR "error MEM region "
385 "request for phy_addr:%x failed\n",
391 * REMAP mem region associated with our CS
393 gpmc_data_vma = (u32)ioremap_nocache(csa_phy, BLKSZ_4K);
394 if (!gpmc_data_vma) {
396 cy_as_hal_print_message(KERN_ERR "error- ioremap()"
397 "for phy_addr:%x failed", csa_phy);
399 goto out_release_mem_region;
401 cy_as_hal_print_message(KERN_INFO "ioremap(%x) returned vma=%x\n",
402 csa_phy, gpmc_data_vma);
404 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
405 (GPMC_CONFIG1_DEVICETYPE(2) |
406 GPMC_CONFIG1_WAIT_PIN_SEL(2)));
408 memset(&timings, 0, sizeof(timings));
411 timings.cs_on = WB_GPMC_CS_t_o_n;
412 timings.cs_wr_off = WB_GPMC_BUSCYC_t;
413 timings.cs_rd_off = WB_GPMC_BUSCYC_t;
416 timings.adv_on = WB_GPMC_ADV_t_o_n;
417 timings.adv_rd_off = WB_GPMC_BUSCYC_t;
418 timings.adv_wr_off = WB_GPMC_BUSCYC_t;
421 timings.oe_on = WB_GPMC_OE_t_o_n;
422 timings.oe_off = WB_GPMC_OE_t_o_f_f;
423 timings.access = WB_GPMC_RD_t_a_c_c;
424 timings.rd_cycle = WB_GPMC_BUSCYC_t;
427 timings.we_on = WB_GPMC_WE_t_o_n;
428 timings.we_off = WB_GPMC_WE_t_o_f_f;
429 timings.wr_access = WB_GPMC_WR_t_a_c_c;
430 timings.wr_cycle = WB_GPMC_BUSCYC_t;
432 timings.page_burst_access = WB_GPMC_BUSCYC_t;
433 timings.wr_data_mux_bus = WB_GPMC_BUSCYC_t;
434 gpmc_cs_set_timings(AST_GPMC_CS, &timings);
436 cy_as_hal_print_omap_regs("GPMC_CONFIG", 1,
437 GPMC_VMA(GPMC_CFG_REG(1, AST_GPMC_CS)), 7);
440 * DISABLE cs4, NOTE GPMC REG7 is already configured
441 * at this point by gpmc_cs_request
443 tmp32 = gpmc_cs_read_reg(AST_GPMC_CS, GPMC_CS_CONFIG7) &
444 ~GPMC_CONFIG7_CSVALID;
445 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
448 * PROGRAM chip select Region, (see OMAP3430 TRM PAGE 1088)
450 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
451 (AS_CS_MASK | AS_CS_BADDR));
454 * by default configure GPMC into 8 bit mode
455 * (to match astoria default mode)
457 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
458 (GPMC_CONFIG1_DEVICETYPE(2) |
459 GPMC_CONFIG1_WAIT_PIN_SEL(2)));
462 * ENABLE astoria cs operation on GPMC
464 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
465 (tmp32 | GPMC_CONFIG7_CSVALID));
468 * No method currently exists to write this register through GPMC APIs
469 * need to change WAIT2 polarity
471 tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
472 tmp32 = tmp32 | NAND_FORCE_POSTED_WRITE_B | 0x40;
473 IOWR32(GPMC_VMA(GPMC_CONFIG_REG), tmp32);
475 tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
476 cy_as_hal_print_message("GPMC_CONFIG_REG=0x%x\n", tmp32);
480 out_release_mem_region:
481 release_mem_region(csa_phy, BLKSZ_4K);
484 gpmc_cs_free(AST_GPMC_CS);
490 * west bridge astoria ISR (Interrupt handler)
492 static irqreturn_t cy_astoria_int_handler(int irq,
493 void *dev_id, struct pt_regs *regs)
495 cy_as_omap_dev_kernel *dev_p;
496 uint16_t read_val = 0;
497 uint16_t mask_val = 0;
500 * debug stuff, counts number of loops per one intr trigger
502 uint16_t drq_loop_cnt = 0;
507 const uint16_t sentinel = (CY_AS_MEM_P0_INTR_REG_MCUINT |
508 CY_AS_MEM_P0_INTR_REG_MBINT |
509 CY_AS_MEM_P0_INTR_REG_PMINT |
510 CY_AS_MEM_P0_INTR_REG_PLLLOCKINT);
513 * sample IRQ pin level (just for statistics)
515 irq_pin = __gpio_get_value(AST_INT);
518 * this one just for debugging
523 * astoria device handle
528 * read Astoria intr register
530 read_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
531 CY_AS_MEM_P0_INTR_REG);
534 * save current mask value
536 mask_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
537 CY_AS_MEM_P0_INT_MASK_REG);
539 DBGPRN("<1>HAL__intr__enter:_seq:%d, P0_INTR_REG:%x\n",
540 intr_sequence_num, read_val);
543 * Disable WB interrupt signal generation while we are in ISR
545 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
546 CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
549 * this is a DRQ Interrupt
551 if (read_val & CY_AS_MEM_P0_INTR_REG_DRQINT) {
555 * handle DRQ interrupt
559 cy_handle_d_r_q_interrupt(dev_p);
562 * spending to much time in ISR may impact
563 * average system performance
565 if (drq_loop_cnt >= MAX_DRQ_LOOPS_IN_ISR)
569 * Keep processing if there is another DRQ int flag
571 } while (cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
572 CY_AS_MEM_P0_INTR_REG) &
573 CY_AS_MEM_P0_INTR_REG_DRQINT);
576 if (read_val & sentinel)
577 cy_as_intr_service_interrupt((cy_as_hal_device_tag)dev_p);
579 DBGPRN("<1>_hal:_intr__exit seq:%d, mask=%4.4x,"
580 "int_pin:%d DRQ_jobs:%d\n",
587 * re-enable WB hw interrupts
589 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
590 CY_AS_MEM_P0_INT_MASK_REG, mask_val);
595 static int cy_as_hal_configure_interrupts(void *dev_p)
598 int irq_pin = AST_INT;
600 irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW);
603 * for shared IRQS must provide non NULL device ptr
604 * othervise the int won't register
606 result = request_irq(OMAP_GPIO_IRQ(irq_pin),
607 (irq_handler_t)cy_astoria_int_handler,
608 IRQF_SHARED, "AST_INT#", dev_p);
612 * OMAP_GPIO_IRQ(irq_pin) - omap logical IRQ number
613 * assigned to this interrupt
614 * OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1) - print status
615 * of AST_INT GPIO IRQ_ENABLE FLAG
617 cy_as_hal_print_message(KERN_INFO"AST_INT omap_pin:"
618 "%d assigned IRQ #%d IRQEN1=%d\n",
620 OMAP_GPIO_IRQ(irq_pin),
621 OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1)
624 cy_as_hal_print_message("cyasomaphal: interrupt "
625 "failed to register\n");
627 cy_as_hal_print_message(KERN_WARNING
628 "ASTORIA: can't get assigned IRQ"
629 "%i for INT#\n", OMAP_GPIO_IRQ(irq_pin));
636 * initialize OMAP pads/pins to user defined functions
638 static void cy_as_hal_init_user_pads(user_pad_cfg_t *pad_cfg_tab)
641 * browse through the table an dinitiaze the pins
646 while (pad_cfg_tab->name != NULL) {
648 if (gpio_request(pad_cfg_tab->pin_num, NULL) == 0) {
650 pad_cfg_tab->valid = 1;
651 mux_val = omap_cfg_reg_L(pad_cfg_tab->mux_func);
654 * always set drv level before changing out direction
656 __gpio_set_value(pad_cfg_tab->pin_num,
660 * "0" - OUT, "1", input omap_set_gpio_direction
661 * (pad_cfg_tab->pin_num, pad_cfg_tab->dir);
663 if (pad_cfg_tab->dir)
664 gpio_direction_input(pad_cfg_tab->pin_num);
666 gpio_direction_output(pad_cfg_tab->pin_num,
670 in_level = __gpio_get_value(pad_cfg_tab->pin_num);
672 cy_as_hal_print_message(KERN_INFO "configured %s to "
673 "OMAP pad_%d, DIR=%d "
676 pad_cfg_tab->pin_num,
683 * get the pad_mux value to check on the pin_function
685 cy_as_hal_print_message(KERN_INFO "couldn't cfg pin %d"
686 "for signal %s, its already taken\n",
687 pad_cfg_tab->pin_num,
691 tmp16 = *(u16 *)PADCFG_VMA
692 (omap_mux_pin_cfg[pad_cfg_tab->mux_func].offset);
694 cy_as_hal_print_message(KERN_INFO "GPIO_%d(PAD_CFG=%x,OE=%d"
695 "DOUT=%d, DIN=%d IRQEN=%d)\n\n",
696 pad_cfg_tab->pin_num, tmp16,
697 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_OE),
698 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_OUT),
699 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_IN),
700 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_IRQENABLE1)
704 * next pad_cfg deriptor
709 cy_as_hal_print_message(KERN_INFO"pads configured\n");
714 * release gpios taken by the module
716 static void cy_as_hal_release_user_pads(user_pad_cfg_t *pad_cfg_tab)
718 while (pad_cfg_tab->name != NULL) {
720 if (pad_cfg_tab->valid) {
721 gpio_free(pad_cfg_tab->pin_num);
722 pad_cfg_tab->valid = 0;
723 cy_as_hal_print_message(KERN_INFO "GPIO_%d "
724 "released from %s\n",
725 pad_cfg_tab->pin_num,
728 cy_as_hal_print_message(KERN_INFO "no release "
729 "for %s, GPIO_%d, wasn't acquired\n",
731 pad_cfg_tab->pin_num);
737 void cy_as_hal_config_c_s_mux(void)
740 * FORCE the GPMC CS4 pin (it is in use by the zoom system)
742 omap_cfg_reg_L(T8_OMAP3430_GPMC_n_c_s4);
744 EXPORT_SYMBOL(cy_as_hal_config_c_s_mux);
749 uint32_t cy_as_hal_processor_hw_init(void)
753 cy_as_hal_print_message(KERN_INFO "init OMAP3430 hw...\n");
755 iomux_vma = (u32)ioremap_nocache(
756 (u32)CTLPADCONF_BASE_ADDR, CTLPADCONF_SIZE);
757 cy_as_hal_print_message(KERN_INFO "PADCONF_VMA=%x val=%x\n",
758 iomux_vma, IORD32(iomux_vma));
763 for (i = 0; i < 6; i++) {
764 gpio_vma_tab[i].virt_addr = (u32)ioremap_nocache(
765 gpio_vma_tab[i].phy_addr,
766 gpio_vma_tab[i].size);
768 cy_as_hal_print_message(KERN_INFO "%s virt_addr=%x\n",
769 gpio_vma_tab[i].name,
770 (u32)gpio_vma_tab[i].virt_addr);
774 * force OMAP_GPIO_126 to rleased state,
775 * will be configured to drive reset
777 gpio_free(AST_RESET);
780 *same thing with AStoria CS pin
785 * initialize all the OMAP pads connected to astoria
787 cy_as_hal_init_user_pads(user_pad_cfg);
789 err = cy_as_hal_gpmc_init();
791 cy_as_hal_print_message(KERN_INFO"gpmc init failed:%d", err);
793 cy_as_hal_config_c_s_mux();
795 return gpmc_data_vma;
797 EXPORT_SYMBOL(cy_as_hal_processor_hw_init);
799 void cy_as_hal_omap_hardware_deinit(cy_as_omap_dev_kernel *dev_p)
802 * free omap hw resources
804 if (gpmc_data_vma != 0)
805 iounmap((void *)gpmc_data_vma);
808 release_mem_region(csa_phy, BLKSZ_4K);
810 gpmc_cs_free(AST_GPMC_CS);
812 free_irq(OMAP_GPIO_IRQ(AST_INT), dev_p);
814 cy_as_hal_release_user_pads(user_pad_cfg);
818 * These are the functions that are not part of the
819 * HAL layer, but are required to be called for this HAL
823 * Called On AstDevice LKM exit
825 int stop_o_m_a_p_kernel(const char *pgm, cy_as_hal_device_tag tag)
827 cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
830 * TODO: Need to disable WB interrupt handlere 1st
835 cy_as_hal_print_message("<1>_stopping OMAP34xx HAL layer object\n");
836 if (dev_p->m_sig != CY_AS_OMAP_KERNEL_HAL_SIG) {
837 cy_as_hal_print_message("<1>%s: %s: bad HAL tag\n",
845 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
846 CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
849 if (dev_p->thread_flag == 0) {
850 dev_p->thread_flag = 1;
851 wait_for_completion(&dev_p->thread_complete);
852 cy_as_hal_print_message("cyasomaphal:"
853 "done cleaning thread\n");
854 cy_as_hal_destroy_sleep_channel(&dev_p->thread_sc);
858 cy_as_hal_omap_hardware_deinit(dev_p);
863 if (m_omap_list_p == dev_p)
864 m_omap_list_p = dev_p->m_next_p;
866 cy_as_hal_free(dev_p);
868 cy_as_hal_print_message(KERN_INFO"OMAP_kernel_hal stopped\n");
872 int omap_start_intr(cy_as_hal_device_tag tag)
874 cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
876 const uint16_t mask = CY_AS_MEM_P0_INTR_REG_DRQINT |
877 CY_AS_MEM_P0_INTR_REG_MBINT;
880 * register for interrupts
882 ret = cy_as_hal_configure_interrupts(dev_p);
885 * enable only MBox & DRQ interrupts for now
887 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
888 CY_AS_MEM_P0_INT_MASK_REG, mask);
894 * Below are the functions that communicate with the WestBridge device.
895 * These are system dependent and must be defined by the HAL layer
896 * for a given system.
900 * GPMC NAND command+addr write phase
902 static inline void nand_cmd_n_addr(u8 cmdb1, u16 col_addr, u32 row_addr)
905 * byte order on the bus <cmd> <CA0,CA1,RA0,RA1, RA2>
907 u32 tmpa32 = ((row_addr << 16) | col_addr);
908 u8 RA2 = (u8)(row_addr >> 16);
912 * GPMC PNAND 8bit BUS
917 IOWR8(ncmd_reg_vma, cmdb1);
920 *pnand bus: <CA0,CA1,RA0,RA1>
922 IOWR32(naddr_reg_vma, tmpa32);
925 * <RA2> , always zero
927 IOWR8(naddr_reg_vma, RA2);
931 * GPMC PNAND 16bit BUS , in 16 bit mode CMD
932 * and ADDR sent on [d7..d0]
934 uint8_t CA0, CA1, RA0, RA1;
935 CA0 = tmpa32 & 0x000000ff;
936 CA1 = (tmpa32 >> 8) & 0x000000ff;
937 RA0 = (tmpa32 >> 16) & 0x000000ff;
938 RA1 = (tmpa32 >> 24) & 0x000000ff;
941 * can't use 32 bit writes here omap will not serialize
942 * them to lower half in16 bit mode
946 *pnand bus: <CMD1, CA0,CA1,RA0,RA1, RA2 (always zero)>
948 IOWR8(ncmd_reg_vma, cmdb1);
949 IOWR8(naddr_reg_vma, CA0);
950 IOWR8(naddr_reg_vma, CA1);
951 IOWR8(naddr_reg_vma, RA0);
952 IOWR8(naddr_reg_vma, RA1);
953 IOWR8(naddr_reg_vma, RA2);
958 * spin until r/b goes high
960 inline int wait_rn_b_high(void)
965 * TODO: note R/b may go low here, need to spin until high
966 * while (omap_get_gpio_datain(AST_RnB) == 0) {
969 * if (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN) == 0) {
971 * while (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN) == 0) {
974 * printk("<1>RnB=0!:%d\n",w_spins);
980 #ifdef ENABLE_GPMC_PF_ENGINE
981 /* #define PFE_READ_DEBUG
982 * PNAND block read with OMAP PFE enabled
983 * status: Not tested, NW, broken , etc
985 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
990 uint8_t bytes_in_fifo;
993 #ifdef PFE_READ_DEBUG
995 uint16_t bytes_read = 0;
999 * configure the prefetch engine
1002 uint32_t pfe_status;
1005 * DISABLE GPMC CS4 operation 1st, this is
1006 * in case engine is be already disabled
1008 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x0);
1009 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1010 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1012 #ifdef PFE_READ_DEBUG
1013 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG1));
1014 if (tmp32 != GPMC_PREFETCH_CONFIG1_VAL) {
1015 printk(KERN_INFO "<1> prefetch is CONFIG1 read val:%8.8x, != VAL written:%8.8x\n",
1016 tmp32, GPMC_PREFETCH_CONFIG1_VAL);
1017 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1018 printk(KERN_INFO "<1> GPMC_PREFETCH_STATUS : %8.8x\n", tmp32);
1024 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG2));
1025 if (tmp32 != (count))
1026 printk(KERN_INFO "<1> GPMC_PREFETCH_CONFIG2 read val:%d, "
1027 "!= VAL written:%d\n", tmp32, count);
1031 * ISSUE PNAND CMD+ADDR, note gpmc puts 32b words
1032 * on the bus least sig. byte 1st
1034 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1036 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1039 * start the prefetch engine
1041 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1047 * GPMC PFE service loop
1051 * spin until PFE fetched some
1052 * PNAND bus words in the FIFO
1054 pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1055 bytes_in_fifo = (pfe_status >> 24) & 0x7f;
1056 } while (bytes_in_fifo == 0);
1058 /* whole 32 bit words in fifo */
1059 w32cnt = bytes_in_fifo >> 2;
1063 *NOTE: FIFO_PTR indicates number of NAND bus words bytes
1064 * already received in the FIFO and available to be read
1065 * by DMA or MPU whether COUNTVAL indicates number of BUS
1066 * words yet to be read from PNAND bus words
1068 printk(KERN_ERR "<1> got PF_STATUS:%8.8x FIFO_PTR:%d, COUNTVAL:%d, w32cnt:%d\n",
1069 pfe_status, bytes_in_fifo,
1070 (pfe_status & 0x3fff), w32cnt);
1074 *ptr32++ = IORD32(gpmc_data_vma);
1076 if ((pfe_status & 0x3fff) == 0) {
1078 * PFE acc angine done, there still may be data leftover
1079 * in the FIFO re-read FIFO BYTE counter (check for
1080 * leftovers from 32 bit read accesses above)
1082 bytes_in_fifo = (IORD32(
1083 GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
1086 * NOTE we may still have one word left in the fifo
1090 switch (bytes_in_fifo) {
1094 * nothing to do we already read the
1095 * FIFO out with 32 bit accesses
1100 * this only possible
1101 * for 8 bit pNAND only
1103 *ptr8 = IORD8(gpmc_data_vma);
1108 * this one can occur in either modes
1110 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1115 * this only possible for 8 bit pNAND only
1117 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1119 *ptr8 = IORD8(gpmc_data_vma);
1124 * shouldn't happen, but has been seen
1127 *ptr32 = IORD32(gpmc_data_vma);
1131 printk(KERN_ERR"<1>_error: PFE FIFO bytes leftover is not read:%d\n",
1136 * read is completed, get out of the while(1) loop
1144 #ifdef PFE_LBD_READ_V2
1146 * PFE engine assisted reads with the 64 byte blocks
1148 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1154 uint32_t pfe_status;
1157 * ISSUE PNAND CMD+ADDR
1158 * note gpmc puts 32b words on the bus least sig. byte 1st
1160 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1161 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1165 * count - OMAP number of bytes to access on pnand bus
1168 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1169 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1170 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1175 pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1176 rd_cnt = pfe_status >> (24+2);
1179 *ptr32++ = IORD32(gpmc_data_vma);
1181 } while (pfe_status & 0x3fff);
1184 * read out the leftover
1187 rd_cnt = (IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
1190 *ptr8++ = IORD8(gpmc_data_vma);
1194 #ifdef PNAND_LBD_READ_NO_PFE
1196 * Endpoint buffer read w/o OMAP GPMC Prefetch Engine
1197 * the original working code, works at max speed for 8 bit xfers
1198 * for 16 bit the bus diagram has gaps
1200 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1207 DBGPRN("<1> %s(): NO_PFE\n", __func__);
1210 /* number of whole 32 bit words in the transfer */
1211 w32cnt = count >> 2;
1213 /* remainder, in bytes(0..3) */
1214 remainder = count & 03;
1217 * note gpmc puts 32b words on the bus least sig. byte 1st
1219 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1220 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1223 * read data by 32 bit chunks
1226 *ptr32++ = IORD32(ndata_reg_vma);
1229 * now do the remainder(it can be 0, 1, 2 or 3)
1230 * same code for both 8 & 16 bit bus
1231 * do 1 or 2 MORE words
1233 ptr16 = (uint16_t *)ptr32;
1235 switch (remainder) {
1237 /* read one 16 bit word
1238 * IN 8 BIT WE NEED TO READ even number of bytes
1241 *ptr16 = IORD16(ndata_reg_vma);
1245 * for 3 bytes read 2 16 bit words
1247 *ptr16++ = IORD16(ndata_reg_vma);
1248 *ptr16 = IORD16(ndata_reg_vma);
1260 * uses LBD mode to write N bytes into astoria
1261 * Status: Working, however there are 150ns idle
1262 * timeafter every 2 (16 bit or 4(8 bit) bus cycles
1264 static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff)
1272 remainder = count & 03;
1273 w32cnt = count >> 2;
1278 * send: CMDB1, CA0,CA1,RA0,RA1,RA2
1280 nand_cmd_n_addr(PGMPAGE_B1, col_addr, row_addr);
1283 * blast the data out in 32bit chunks
1286 IOWR32(ndata_reg_vma, *ptr32++);
1289 * do the reminder if there is one
1290 * same handling for both 8 & 16 bit pnand: mode
1292 ptr16 = (uint16_t *)ptr32; /* do 1 or 2 words */
1294 switch (remainder) {
1297 * read one 16 bit word
1300 IOWR16(ndata_reg_vma, *ptr16);
1305 * for 3 bytes read 2 16 bit words
1307 IOWR16(ndata_reg_vma, *ptr16++);
1308 IOWR16(ndata_reg_vma, *ptr16);
1317 * finally issue a PGM cmd
1319 IOWR8(ncmd_reg_vma, PGMPAGE_B2);
1323 * write Astoria register
1325 static inline void ast_p_nand_casdi_write(u8 reg_addr8, u16 data)
1327 unsigned long flags;
1330 * throw an error if called from multiple threads
1332 static atomic_t rdreg_usage_cnt = { 0 };
1335 * disable interrupts
1337 local_irq_save(flags);
1339 if (atomic_read(&rdreg_usage_cnt) != 0) {
1340 cy_as_hal_print_message(KERN_ERR "cy_as_omap_hal:"
1341 "* cy_as_hal_write_register usage:%d\n",
1342 atomic_read(&rdreg_usage_cnt));
1345 atomic_inc(&rdreg_usage_cnt);
1348 * 2 flavors of GPMC -> PNAND access
1352 * 16 BIT gpmc NAND mode
1358 IOWR8(ncmd_reg_vma, 0x85);
1359 IOWR8(naddr_reg_vma, reg_addr8);
1360 IOWR8(naddr_reg_vma, 0x0c);
1363 * this should be sent on the 16 bit bus
1365 IOWR16(ndata_reg_vma, data);
1368 * 8 bit nand mode GPMC will automatically
1369 * seriallize 16bit or 32 bit writes into
1370 * 8 bit onesto the lower 8 bit in LE order
1372 addr16 = 0x0c00 | reg_addr8;
1377 IOWR8(ncmd_reg_vma, 0x85);
1378 IOWR16(naddr_reg_vma, addr16);
1379 IOWR16(ndata_reg_vma, data);
1383 * re-enable interrupts
1385 atomic_dec(&rdreg_usage_cnt);
1386 local_irq_restore(flags);
1391 * read astoria register via pNAND interface
1393 static inline u16 ast_p_nand_casdo_read(u8 reg_addr8)
1397 unsigned long flags;
1399 * throw an error if called from multiple threads
1401 static atomic_t wrreg_usage_cnt = { 0 };
1404 * disable interrupts
1406 local_irq_save(flags);
1408 if (atomic_read(&wrreg_usage_cnt) != 0) {
1410 * if it gets here ( from other threads), this function needs
1411 * need spin_lock_irq save() protection
1413 cy_as_hal_print_message(KERN_ERR"cy_as_omap_hal: "
1414 "cy_as_hal_write_register usage:%d\n",
1415 atomic_read(&wrreg_usage_cnt));
1417 atomic_inc(&wrreg_usage_cnt);
1420 * 2 flavors of GPMC -> PNAND access
1424 * 16 BIT gpmc NAND mode
1428 IOWR8(ncmd_reg_vma, 0x05);
1429 IOWR8(naddr_reg_vma, reg_addr8);
1430 IOWR8(naddr_reg_vma, 0x0c);
1431 IOWR8(ncmd_reg_vma, 0x00E0);
1436 * much faster through the gPMC Register space
1438 data = IORD16(ndata_reg_vma);
1441 * 8 BIT gpmc NAND mode
1442 * CMD1, CA1, CA2, CMD2
1444 addr16 = 0x0c00 | reg_addr8;
1445 IOWR8(ncmd_reg_vma, 0x05);
1446 IOWR16(naddr_reg_vma, addr16);
1447 IOWR8(ncmd_reg_vma, 0xE0);
1449 data = IORD16(ndata_reg_vma);
1453 * re-enable interrupts
1455 atomic_dec(&wrreg_usage_cnt);
1456 local_irq_restore(flags);
1463 * This function must be defined to write a register within the WestBridge
1464 * device. The addr value is the address of the register to write with
1465 * respect to the base address of the WestBridge device.
1467 void cy_as_hal_write_register(
1468 cy_as_hal_device_tag tag,
1469 uint16_t addr, uint16_t data)
1471 ast_p_nand_casdi_write((u8)addr, data);
1475 * This function must be defined to read a register from the WestBridge
1476 * device. The addr value is the address of the register to read with
1477 * respect to the base address of the WestBridge device.
1479 uint16_t cy_as_hal_read_register(cy_as_hal_device_tag tag, uint16_t addr)
1484 * READ ASTORIA REGISTER USING CASDO
1486 data = ast_p_nand_casdo_read((u8)addr);
1492 * preps Ep pointers & data counters for next packet
1493 * (fragment of the request) xfer returns true if
1494 * there is a next transfer, and false if all bytes in
1495 * current request have been xfered
1497 static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
1500 if (!end_points[ep].sg_list_enabled) {
1502 * no further transfers for non storage EPs
1503 * (like EP2 during firmware download, done
1504 * in 64 byte chunks)
1506 if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
1507 DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
1508 __func__, end_points[ep].req_length, ep);
1511 * no more transfers, we are done with the request
1517 * calculate size of the next DMA xfer, corner
1518 * case for non-storage EPs where transfer size
1519 * is not egual N * HAL_DMA_PKT_SZ xfers
1521 if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
1522 >= HAL_DMA_PKT_SZ) {
1523 end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1526 * that would be the last chunk less
1527 * than P-port max size
1529 end_points[ep].dma_xfer_sz = end_points[ep].req_length -
1530 end_points[ep].req_xfer_cnt;
1537 * for SG_list assisted dma xfers
1538 * are we done with current SG ?
1540 if (end_points[ep].seg_xfer_cnt == end_points[ep].sg_p->length) {
1542 * was it the Last SG segment on the list ?
1544 if (sg_is_last(end_points[ep].sg_p)) {
1545 DBGPRN("<1> %s: EP:%d completed,"
1546 "%d bytes xfered\n",
1549 end_points[ep].req_xfer_cnt
1555 * There are more SG segments in current
1556 * request's sg list setup new segment
1559 end_points[ep].seg_xfer_cnt = 0;
1560 end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
1561 /* set data pointer for next DMA sg transfer*/
1562 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1563 DBGPRN("<1> %s new SG:_va:%p\n\n",
1564 __func__, end_points[ep].data_p);
1570 * for sg list xfers it will always be 512 or 1024
1572 end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1575 * next transfer is required
1582 * Astoria DMA read request, APP_CPU reads from WB ep buffer
1584 static void cy_service_e_p_dma_read_request(
1585 cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1587 cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1590 uint16_t col_addr = 0x0000;
1591 uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1592 uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1595 * get the XFER size frtom WB eP DMA REGISTER
1597 v = cy_as_hal_read_register(tag, ep_dma_reg);
1600 * amount of data in EP buff in bytes
1602 size = v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;
1605 * memory pointer for this DMA packet xfer (sub_segment)
1607 dptr = end_points[ep].data_p;
1609 DBGPRN("<1>HAL:_svc_dma_read on EP_%d sz:%d, intr_seq:%d, dptr:%p\n",
1616 cy_as_hal_assert(size != 0);
1620 * the actual WB-->OMAP memory "soft" DMA xfer
1622 p_nand_lbd_read(col_addr, row_addr, size, dptr);
1626 * clear DMAVALID bit indicating that the data has been read
1628 cy_as_hal_write_register(tag, ep_dma_reg, 0);
1630 end_points[ep].seg_xfer_cnt += size;
1631 end_points[ep].req_xfer_cnt += size;
1634 * pre-advance data pointer (if it's outside sg
1635 * list it will be reset anyway
1637 end_points[ep].data_p += size;
1639 if (prep_for_next_xfer(tag, ep)) {
1641 * we have more data to read in this request,
1642 * setup next dma packet due tell WB how much
1643 * data we are going to xfer next
1645 v = end_points[ep].dma_xfer_sz/*HAL_DMA_PKT_SZ*/ |
1646 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1647 cy_as_hal_write_register(tag, ep_dma_reg, v);
1649 end_points[ep].pending = cy_false;
1650 end_points[ep].type = cy_as_hal_none;
1651 end_points[ep].buffer_valid = cy_false;
1654 * notify the API that we are done with rq on this EP
1657 DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
1658 end_points[ep].req_xfer_cnt);
1660 end_points[ep].req_xfer_cnt,
1661 CY_AS_ERROR_SUCCESS);
1667 * omap_cpu needs to transfer data to ASTORIA EP buffer
1669 static void cy_service_e_p_dma_write_request(
1670 cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1675 uint16_t col_addr = 0x0000;
1676 uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1679 cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1681 * note: size here its the size of the dma transfer could be
1682 * anything > 0 && < P_PORT packet size
1684 size = end_points[ep].dma_xfer_sz;
1685 dptr = end_points[ep].data_p;
1688 * perform the soft DMA transfer, soft in this case
1691 p_nand_lbd_write(col_addr, row_addr, size, dptr);
1693 end_points[ep].seg_xfer_cnt += size;
1694 end_points[ep].req_xfer_cnt += size;
1696 * pre-advance data pointer
1697 * (if it's outside sg list it will be reset anyway)
1699 end_points[ep].data_p += size;
1702 * now clear DMAVAL bit to indicate we are done
1703 * transferring data and that the data can now be
1704 * sent via USB to the USB host, sent to storage,
1705 * or used internally.
1708 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1709 cy_as_hal_write_register(tag, addr, size);
1712 * finally, tell the USB subsystem that the
1713 * data is gone and we can accept the
1714 * next request if one exists.
1716 if (prep_for_next_xfer(tag, ep)) {
1718 * There is more data to go. Re-init the WestBridge DMA side
1720 v = end_points[ep].dma_xfer_sz |
1721 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1722 cy_as_hal_write_register(tag, addr, v);
1725 end_points[ep].pending = cy_false;
1726 end_points[ep].type = cy_as_hal_none;
1727 end_points[ep].buffer_valid = cy_false;
1730 * notify the API that we are done with rq on this EP
1734 * this callback will wake up the process that might be
1735 * sleeping on the EP which data is being transferred
1738 end_points[ep].req_xfer_cnt,
1739 CY_AS_ERROR_SUCCESS);
1745 * HANDLE DRQINT from Astoria (called in AS_Intr context
1747 static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p)
1750 static uint8_t service_ep = 2;
1753 * We've got DRQ INT, read DRQ STATUS Register */
1754 v = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
1758 #ifndef WESTBRIDGE_NDEBUG
1759 cy_as_hal_print_message("stray DRQ interrupt detected\n");
1765 * Now, pick a given DMA request to handle, for now, we just
1766 * go round robin. Each bit position in the service_mask
1767 * represents an endpoint from EP2 to EP15. We rotate through
1768 * each of the endpoints to find one that needs to be serviced.
1770 while ((v & (1 << service_ep)) == 0) {
1772 if (service_ep == 15)
1778 if (end_points[service_ep].type == cy_as_hal_write) {
1780 * handle DMA WRITE REQUEST: app_cpu will
1781 * write data into astoria EP buffer
1783 cy_service_e_p_dma_write_request(dev_p, service_ep);
1784 } else if (end_points[service_ep].type == cy_as_hal_read) {
1786 * handle DMA READ REQUEST: cpu will
1787 * read EP buffer from Astoria
1789 cy_service_e_p_dma_read_request(dev_p, service_ep);
1791 #ifndef WESTBRIDGE_NDEBUG
1793 cy_as_hal_print_message("cyashalomap:interrupt,"
1794 " w/o pending DMA job,"
1795 "-check DRQ_MASK logic\n");
1799 * Now bump the EP ahead, so other endpoints get
1800 * a shot before the one we just serviced
1802 if (end_points[service_ep].type == cy_as_hal_none) {
1803 if (service_ep == 15)
1811 void cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep)
1813 DBGPRN("cy_as_hal_dma_cancel_request on ep:%d", ep);
1814 if (end_points[ep].pending)
1815 cy_as_hal_write_register(tag,
1816 CY_AS_MEM_P0_EP2_DMA_REG + ep - 2, 0);
1818 end_points[ep].buffer_valid = cy_false;
1819 end_points[ep].type = cy_as_hal_none;
1823 * enables/disables SG list assisted DMA xfers for the given EP
1824 * sg_list assisted XFERS can use physical addresses of mem pages in case if the
1825 * xfer is performed by a h/w DMA controller rather then the CPU on P port
1827 void cy_as_hal_set_ep_dma_mode(uint8_t ep, bool sg_xfer_enabled)
1829 end_points[ep].sg_list_enabled = sg_xfer_enabled;
1830 DBGPRN("<1> EP:%d sg_list assisted DMA mode set to = %d\n",
1831 ep, end_points[ep].sg_list_enabled);
1833 EXPORT_SYMBOL(cy_as_hal_set_ep_dma_mode);
1836 * This function must be defined to transfer a block of data to
1837 * the WestBridge device. This function can use the burst write
1838 * (DMA) capabilities of WestBridge to do this, or it can just copy
1839 * the data using writes.
1841 void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
1842 uint8_t ep, void *buf,
1843 uint32_t size, uint16_t maxsize)
1849 * Note: "size" is the actual request size
1850 * "maxsize" - is the P port fragment size
1851 * No EP0 or EP1 traffic should get here
1853 cy_as_hal_assert(ep != 0 && ep != 1);
1856 * If this asserts, we have an ordering problem. Another DMA request
1857 * is coming down before the previous one has completed.
1859 cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1860 end_points[ep].buffer_valid = cy_true;
1861 end_points[ep].type = cy_as_hal_write;
1862 end_points[ep].pending = cy_true;
1865 * total length of the request
1867 end_points[ep].req_length = size;
1869 if (size >= maxsize) {
1871 * set xfer size for very 1st DMA xfer operation
1872 * port max packet size ( typically 512 or 1024)
1874 end_points[ep].dma_xfer_sz = maxsize;
1877 * smaller xfers for non-storage EPs
1879 end_points[ep].dma_xfer_sz = size;
1883 * check the EP transfer mode uses sg_list rather then a memory buffer
1884 * block devices pass it to the HAL, so the hAL could get to the real
1885 * physical address for each segment and set up a DMA controller
1886 * hardware ( if there is one)
1888 if (end_points[ep].sg_list_enabled) {
1890 * buf - pointer to the SG list
1891 * data_p - data pointer to the 1st DMA segment
1892 * seg_xfer_cnt - keeps track of N of bytes sent in current
1894 * req_xfer_cnt - keeps track of the total N of bytes
1895 * transferred for the request
1897 end_points[ep].sg_p = buf;
1898 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1899 end_points[ep].seg_xfer_cnt = 0;
1900 end_points[ep].req_xfer_cnt = 0;
1902 #ifdef DBGPRN_DMA_SETUP_WR
1903 DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
1904 "req_sz:%d, maxsz:%d\n",
1908 end_points[ep].data_p,
1915 * setup XFER for non sg_list assisted EPs
1918 #ifdef DBGPRN_DMA_SETUP_WR
1919 DBGPRN("<1>%s non storage or sz < 512:"
1920 "EP:%d, sz:%d\n", __func__, ep, size);
1923 end_points[ep].sg_p = NULL;
1926 * must be a VMA of a membuf in kernel space
1928 end_points[ep].data_p = buf;
1931 * will keep track No of bytes xferred for the request
1933 end_points[ep].req_xfer_cnt = 0;
1937 * Tell WB we are ready to send data on the given endpoint
1939 v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
1940 | CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1942 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1944 cy_as_hal_write_register(tag, addr, v);
1948 * This function must be defined to transfer a block of data from
1949 * the WestBridge device. This function can use the burst read
1950 * (DMA) capabilities of WestBridge to do this, or it can just
1951 * copy the data using reads.
1953 void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
1954 uint8_t ep, void *buf,
1955 uint32_t size, uint16_t maxsize)
1961 * Note: "size" is the actual request size
1962 * "maxsize" - is the P port fragment size
1963 * No EP0 or EP1 traffic should get here
1965 cy_as_hal_assert(ep != 0 && ep != 1);
1968 * If this asserts, we have an ordering problem.
1969 * Another DMA request is coming down before the
1970 * previous one has completed. we should not get
1971 * new requests if current is still in process
1974 cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1976 end_points[ep].buffer_valid = cy_true;
1977 end_points[ep].type = cy_as_hal_read;
1978 end_points[ep].pending = cy_true;
1979 end_points[ep].req_xfer_cnt = 0;
1980 end_points[ep].req_length = size;
1982 if (size >= maxsize) {
1984 * set xfer size for very 1st DMA xfer operation
1985 * port max packet size ( typically 512 or 1024)
1987 end_points[ep].dma_xfer_sz = maxsize;
1990 * so that we could handle small xfers on in case
1991 * of non-storage EPs
1993 end_points[ep].dma_xfer_sz = size;
1996 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1998 if (end_points[ep].sg_list_enabled) {
2000 * Handle sg-list assisted EPs
2001 * seg_xfer_cnt - keeps track of N of sent packets
2002 * buf - pointer to the SG list
2003 * data_p - data pointer for the 1st DMA segment
2005 end_points[ep].seg_xfer_cnt = 0;
2006 end_points[ep].sg_p = buf;
2007 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
2009 #ifdef DBGPRN_DMA_SETUP_RD
2010 DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
2011 "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
2014 end_points[ep].data_p,
2018 v = (end_points[ep].dma_xfer_sz &
2019 CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2020 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2021 cy_as_hal_write_register(tag, addr, v);
2024 * Non sg list EP passed void *buf rather then scatterlist *sg
2026 #ifdef DBGPRN_DMA_SETUP_RD
2027 DBGPRN("%s:non-sg_list EP:%d,"
2028 "RQ_sz:%d, maxsz:%d\n",
2029 __func__, ep, size, maxsize);
2032 end_points[ep].sg_p = NULL;
2035 * must be a VMA of a membuf in kernel space
2037 end_points[ep].data_p = buf;
2040 * Program the EP DMA register for Storage endpoints only.
2042 if (is_storage_e_p(ep)) {
2043 v = (end_points[ep].dma_xfer_sz &
2044 CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2045 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2046 cy_as_hal_write_register(tag, addr, v);
2052 * This function must be defined to allow the WB API to
2053 * register a callback function that is called when a
2054 * DMA transfer is complete.
2056 void cy_as_hal_dma_register_callback(cy_as_hal_device_tag tag,
2057 cy_as_hal_dma_complete_callback cb)
2059 DBGPRN("<1>\n%s: WB API has registered a dma_complete callback:%x\n",
2060 __func__, (uint32_t)cb);
2065 * This function must be defined to return the maximum size of
2066 * DMA request that can be handled on the given endpoint. The
2067 * return value should be the maximum size in bytes that the DMA
2068 * module can handle.
2070 uint32_t cy_as_hal_dma_max_request_size(cy_as_hal_device_tag tag,
2071 cy_as_end_point_number_t ep)
2074 * Storage reads and writes are always done in 512 byte blocks.
2075 * So, we do the count handling within the HAL, and save on
2076 * some of the data transfer delay.
2078 if ((ep == CYASSTORAGE_READ_EP_NUM) ||
2079 (ep == CYASSTORAGE_WRITE_EP_NUM)) {
2080 /* max DMA request size HAL can handle by itself */
2081 return CYASSTORAGE_MAX_XFER_SIZE;
2084 * For the USB - Processor endpoints, the maximum transfer
2085 * size depends on the speed of USB operation. So, we use
2086 * the following constant to indicate to the API that
2087 * splitting of the data into chunks less that or equal to
2088 * the max transfer size should be handled internally.
2091 /* DEFINED AS 0xffffffff in cyasdma.h */
2092 return CY_AS_DMA_MAX_SIZE_HW_SIZE;
2097 * This function must be defined to set the state of the WAKEUP pin
2098 * on the WestBridge device. Generally this is done via a GPIO of
2101 cy_bool cy_as_hal_set_wakeup_pin(cy_as_hal_device_tag tag, cy_bool state)
2104 * Not supported as of now.
2109 void cy_as_hal_pll_lock_loss_handler(cy_as_hal_device_tag tag)
2111 cy_as_hal_print_message("error: astoria PLL lock is lost\n");
2112 cy_as_hal_print_message("please check the input voltage levels");
2113 cy_as_hal_print_message("and clock, and restart the system\n");
2117 * Below are the functions that must be defined to provide the basic
2118 * operating system services required by the API.
2122 * This function is required by the API to allocate memory.
2123 * This function is expected to work exactly like malloc().
2125 void *cy_as_hal_alloc(uint32_t cnt)
2127 return kmalloc(cnt, GFP_ATOMIC);
2131 * This function is required by the API to free memory allocated
2132 * with CyAsHalAlloc(). This function is'expected to work exacly
2135 void cy_as_hal_free(void *mem_p)
2141 * Allocator that can be used in interrupt context.
2142 * We have to ensure that the kmalloc call does not
2143 * sleep in this case.
2145 void *cy_as_hal_c_b_alloc(uint32_t cnt)
2147 return kmalloc(cnt, GFP_ATOMIC);
2151 * This function is required to set a block of memory to a
2152 * specific value. This function is expected to work exactly
2155 void cy_as_hal_mem_set(void *ptr, uint8_t value, uint32_t cnt)
2157 memset(ptr, value, cnt);
2161 * This function is expected to create a sleep channel.
2162 * The data structure that represents the sleep channel object
2163 * sleep channel (which is Linux "wait_queue_head_t wq" for this particular HAL)
2164 * passed as a pointer, and allpocated by the caller
2165 * (typically as a local var on the stack) "Create" word should read as
2166 * "SleepOn", this func doesn't actually create anything
2168 cy_bool cy_as_hal_create_sleep_channel(cy_as_hal_sleep_channel *channel)
2170 init_waitqueue_head(&channel->wq);
2175 * for this particular HAL it doesn't actually destroy anything
2176 * since no actual sleep object is created in CreateSleepChannel()
2177 * sleep channel is given by the pointer in the argument.
2179 cy_bool cy_as_hal_destroy_sleep_channel(cy_as_hal_sleep_channel *channel)
2185 * platform specific wakeable Sleep implementation
2187 cy_bool cy_as_hal_sleep_on(cy_as_hal_sleep_channel *channel, uint32_t ms)
2189 wait_event_interruptible_timeout(channel->wq, 0, ((ms * HZ)/1000));
2194 * wakes up the process waiting on the CHANNEL
2196 cy_bool cy_as_hal_wake(cy_as_hal_sleep_channel *channel)
2198 wake_up_interruptible_all(&channel->wq);
2202 uint32_t cy_as_hal_disable_interrupts()
2204 if (0 == intr__enable)
2211 void cy_as_hal_enable_interrupts(uint32_t val)
2214 if (0 == intr__enable)
2219 * Sleep atleast 150ns, cpu dependent
2221 void cy_as_hal_sleep150(void)
2226 for (i = 0; i < 1000; i++)
2230 void cy_as_hal_sleep(uint32_t ms)
2232 cy_as_hal_sleep_channel channel;
2234 cy_as_hal_create_sleep_channel(&channel);
2235 cy_as_hal_sleep_on(&channel, ms);
2236 cy_as_hal_destroy_sleep_channel(&channel);
2239 cy_bool cy_as_hal_is_polling()
2244 void cy_as_hal_c_b_free(void *ptr)
2246 cy_as_hal_free(ptr);
2250 * suppose to reinstate the astoria registers
2251 * that may be clobbered in sleep mode
2253 void cy_as_hal_init_dev_registers(cy_as_hal_device_tag tag,
2254 cy_bool is_standby_wakeup)
2256 /* specific to SPI, no implementation required */
2258 (void) is_standby_wakeup;
2261 void cy_as_hal_read_regs_before_standby(cy_as_hal_device_tag tag)
2263 /* specific to SPI, no implementation required */
2267 cy_bool cy_as_hal_sync_device_clocks(cy_as_hal_device_tag tag)
2270 * we are in asynchronous mode. so no need to handle this
2276 * init OMAP h/w resources
2278 int start_o_m_a_p_kernel(const char *pgm,
2279 cy_as_hal_device_tag *tag, cy_bool debug)
2281 cy_as_omap_dev_kernel *dev_p;
2287 * No debug mode support through argument as of now
2291 DBGPRN(KERN_INFO"starting OMAP34xx HAL...\n");
2294 * Initialize the HAL level endpoint DMA data.
2296 for (i = 0; i < sizeof(end_points)/sizeof(end_points[0]); i++) {
2297 end_points[i].data_p = 0;
2298 end_points[i].pending = cy_false;
2299 end_points[i].size = 0;
2300 end_points[i].type = cy_as_hal_none;
2301 end_points[i].sg_list_enabled = cy_false;
2304 * by default the DMA transfers to/from the E_ps don't
2305 * use sg_list that implies that the upper devices like
2306 * blockdevice have to enable it for the E_ps in their
2307 * initialization code
2312 * allocate memory for OMAP HAL
2314 dev_p = (cy_as_omap_dev_kernel *)cy_as_hal_alloc(
2315 sizeof(cy_as_omap_dev_kernel));
2317 cy_as_hal_print_message("out of memory allocating OMAP"
2318 "device structure\n");
2322 dev_p->m_sig = CY_AS_OMAP_KERNEL_HAL_SIG;
2325 * initialize OMAP hardware and StartOMAPKernelall gpio pins
2327 dev_p->m_addr_base = (void *)cy_as_hal_processor_hw_init();
2330 * Now perform a hard reset of the device to have
2331 * the new settings take effect
2333 __gpio_set_value(AST_WAKEUP, 1);
2336 * do Astoria h/w reset
2338 DBGPRN(KERN_INFO"-_-_pulse -> westbridge RST pin\n");
2341 * NEGATIVE PULSE on RST pin
2343 __gpio_set_value(AST_RESET, 0);
2345 __gpio_set_value(AST_RESET, 1);
2349 * note AFTER reset PNAND interface is 8 bit mode
2350 * so if gpmc Is configured in 8 bit mode upper half will be FF
2352 pncfg_reg = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2354 #ifdef PNAND_16BIT_MODE
2357 * switch to 16 bit mode, force NON-LNA LBD mode, 3 RA addr bytes
2359 ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0001);
2362 * now in order to continue to talk to astoria
2363 * sw OMAP GPMC into 16 bit mode as well
2365 cy_as_hal_gpmc_enable_16bit_bus(cy_true);
2367 /* Astoria and GPMC are already in 8 bit mode, just initialize PNAND_CFG */
2368 ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0000);
2372 * NOTE: if you want to capture bus activity on the LA,
2373 * don't use printks in between the activities you want to capture.
2374 * prinks may take milliseconds, and the data of interest
2375 * will fall outside the LA capture window/buffer
2377 data16[0] = ast_p_nand_casdo_read(CY_AS_MEM_CM_WB_CFG_ID);
2378 data16[1] = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2380 if (data16[0] != 0xA200) {
2382 * astoria device is not found
2384 printk(KERN_ERR "ERROR: astoria device is not found, CY_AS_MEM_CM_WB_CFG_ID ");
2385 printk(KERN_ERR "read returned:%4.4X: CY_AS_MEM_PNAND_CFG:%4.4x !\n",
2386 data16[0], data16[0]);
2390 cy_as_hal_print_message(KERN_INFO" register access CASDO test:"
2391 "\n CY_AS_MEM_CM_WB_CFG_ID:%4.4x\n"
2392 "PNAND_CFG after RST:%4.4x\n "
2393 "CY_AS_MEM_PNAND_CFG"
2394 "after cfg_wr:%4.4x\n\n",
2395 data16[0], pncfg_reg, data16[1]);
2397 dev_p->thread_flag = 1;
2398 spin_lock_init(&int_lock);
2399 dev_p->m_next_p = m_omap_list_p;
2401 m_omap_list_p = dev_p;
2404 cy_as_hal_configure_interrupts((void *)dev_p);
2406 cy_as_hal_print_message(KERN_INFO"OMAP3430__hal started tag:%p"
2407 ", kernel HZ:%d\n", dev_p, HZ);
2410 *make processor to storage endpoints SG assisted by default
2412 cy_as_hal_set_ep_dma_mode(4, true);
2413 cy_as_hal_set_ep_dma_mode(8, true);
2418 * there's been a NAND bus access error or
2419 * astoria device is not connected
2423 * at this point hal tag hasn't been set yet
2424 * so the device will not call omap_stop
2426 cy_as_hal_omap_hardware_deinit(dev_p);
2427 cy_as_hal_free(dev_p);
2433 * Some compilers do not like empty C files, so if the OMAP hal is not being
2434 * compiled, we compile this single function. We do this so that for a
2435 * given target HAL there are not multiple sources for the HAL functions.
2437 void my_o_m_a_p_kernel_hal_dummy_function(void)