Merge branch 'x86-setup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / spi / amba-pl022.c
CommitLineData
b43d65f7
LW
1/*
2 * drivers/spi/amba-pl022.c
3 *
4 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
5 *
6 * Copyright (C) 2008-2009 ST-Ericsson AB
7 * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
8 *
9 * Author: Linus Walleij <linus.walleij@stericsson.com>
10 *
11 * Initial version inspired by:
12 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
13 * Initial adoption to PL022 by:
14 * Sachin Verma <sachin.verma@st.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 */
26
27/*
28 * TODO:
29 * - add timeout on polled transfers
30 * - add generic DMA framework support
31 */
32
33#include <linux/init.h>
34#include <linux/module.h>
35#include <linux/device.h>
36#include <linux/ioport.h>
37#include <linux/errno.h>
38#include <linux/interrupt.h>
39#include <linux/spi/spi.h>
40#include <linux/workqueue.h>
41#include <linux/errno.h>
42#include <linux/delay.h>
43#include <linux/clk.h>
44#include <linux/err.h>
45#include <linux/amba/bus.h>
46#include <linux/amba/pl022.h>
47#include <linux/io.h>
48#include <linux/delay.h>
49
50/*
51 * This macro is used to define some register default values.
52 * reg is masked with mask, the OR:ed with an (again masked)
53 * val shifted sb steps to the left.
54 */
55#define SSP_WRITE_BITS(reg, val, mask, sb) \
56 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
57
58/*
59 * This macro is also used to define some default values.
60 * It will just shift val by sb steps to the left and mask
61 * the result with mask.
62 */
63#define GEN_MASK_BITS(val, mask, sb) \
64 (((val)<<(sb)) & (mask))
65
66#define DRIVE_TX 0
67#define DO_NOT_DRIVE_TX 1
68
69#define DO_NOT_QUEUE_DMA 0
70#define QUEUE_DMA 1
71
72#define RX_TRANSFER 1
73#define TX_TRANSFER 2
74
75/*
76 * Macros to access SSP Registers with their offsets
77 */
78#define SSP_CR0(r) (r + 0x000)
79#define SSP_CR1(r) (r + 0x004)
80#define SSP_DR(r) (r + 0x008)
81#define SSP_SR(r) (r + 0x00C)
82#define SSP_CPSR(r) (r + 0x010)
83#define SSP_IMSC(r) (r + 0x014)
84#define SSP_RIS(r) (r + 0x018)
85#define SSP_MIS(r) (r + 0x01C)
86#define SSP_ICR(r) (r + 0x020)
87#define SSP_DMACR(r) (r + 0x024)
88#define SSP_ITCR(r) (r + 0x080)
89#define SSP_ITIP(r) (r + 0x084)
90#define SSP_ITOP(r) (r + 0x088)
91#define SSP_TDR(r) (r + 0x08C)
92
93#define SSP_PID0(r) (r + 0xFE0)
94#define SSP_PID1(r) (r + 0xFE4)
95#define SSP_PID2(r) (r + 0xFE8)
96#define SSP_PID3(r) (r + 0xFEC)
97
98#define SSP_CID0(r) (r + 0xFF0)
99#define SSP_CID1(r) (r + 0xFF4)
100#define SSP_CID2(r) (r + 0xFF8)
101#define SSP_CID3(r) (r + 0xFFC)
102
103/*
104 * SSP Control Register 0 - SSP_CR0
105 */
106#define SSP_CR0_MASK_DSS (0x1FUL << 0)
107#define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
108#define SSP_CR0_MASK_SPO (0x1UL << 6)
109#define SSP_CR0_MASK_SPH (0x1UL << 7)
110#define SSP_CR0_MASK_SCR (0xFFUL << 8)
111#define SSP_CR0_MASK_CSS (0x1FUL << 16)
112#define SSP_CR0_MASK_FRF (0x3UL << 21)
113
114/*
115 * SSP Control Register 0 - SSP_CR1
116 */
117#define SSP_CR1_MASK_LBM (0x1UL << 0)
118#define SSP_CR1_MASK_SSE (0x1UL << 1)
119#define SSP_CR1_MASK_MS (0x1UL << 2)
120#define SSP_CR1_MASK_SOD (0x1UL << 3)
121#define SSP_CR1_MASK_RENDN (0x1UL << 4)
122#define SSP_CR1_MASK_TENDN (0x1UL << 5)
123#define SSP_CR1_MASK_MWAIT (0x1UL << 6)
124#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
125#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
126
127/*
128 * SSP Data Register - SSP_DR
129 */
130#define SSP_DR_MASK_DATA 0xFFFFFFFF
131
132/*
133 * SSP Status Register - SSP_SR
134 */
135#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
136#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
137#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
138#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
139#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
140
141/*
142 * SSP Clock Prescale Register - SSP_CPSR
143 */
144#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
145
146/*
147 * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
148 */
149#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
150#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
151#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
152#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
153
154/*
155 * SSP Raw Interrupt Status Register - SSP_RIS
156 */
157/* Receive Overrun Raw Interrupt status */
158#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
159/* Receive Timeout Raw Interrupt status */
160#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
161/* Receive FIFO Raw Interrupt status */
162#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
163/* Transmit FIFO Raw Interrupt status */
164#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
165
166/*
167 * SSP Masked Interrupt Status Register - SSP_MIS
168 */
169/* Receive Overrun Masked Interrupt status */
170#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
171/* Receive Timeout Masked Interrupt status */
172#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
173/* Receive FIFO Masked Interrupt status */
174#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
175/* Transmit FIFO Masked Interrupt status */
176#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
177
178/*
179 * SSP Interrupt Clear Register - SSP_ICR
180 */
181/* Receive Overrun Raw Clear Interrupt bit */
182#define SSP_ICR_MASK_RORIC (0x1UL << 0)
183/* Receive Timeout Clear Interrupt bit */
184#define SSP_ICR_MASK_RTIC (0x1UL << 1)
185
186/*
187 * SSP DMA Control Register - SSP_DMACR
188 */
189/* Receive DMA Enable bit */
190#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
191/* Transmit DMA Enable bit */
192#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
193
194/*
195 * SSP Integration Test control Register - SSP_ITCR
196 */
197#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
198#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
199
200/*
201 * SSP Integration Test Input Register - SSP_ITIP
202 */
203#define ITIP_MASK_SSPRXD (0x1UL << 0)
204#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
205#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
206#define ITIP_MASK_RXDMAC (0x1UL << 3)
207#define ITIP_MASK_TXDMAC (0x1UL << 4)
208#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
209
210/*
211 * SSP Integration Test output Register - SSP_ITOP
212 */
213#define ITOP_MASK_SSPTXD (0x1UL << 0)
214#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
215#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
216#define ITOP_MASK_SSPOEn (0x1UL << 3)
217#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
218#define ITOP_MASK_RORINTR (0x1UL << 5)
219#define ITOP_MASK_RTINTR (0x1UL << 6)
220#define ITOP_MASK_RXINTR (0x1UL << 7)
221#define ITOP_MASK_TXINTR (0x1UL << 8)
222#define ITOP_MASK_INTR (0x1UL << 9)
223#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
224#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
225#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
226#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
227
228/*
229 * SSP Test Data Register - SSP_TDR
230 */
231#define TDR_MASK_TESTDATA (0xFFFFFFFF)
232
233/*
234 * Message State
235 * we use the spi_message.state (void *) pointer to
236 * hold a single state value, that's why all this
237 * (void *) casting is done here.
238 */
239#define STATE_START ((void *) 0)
240#define STATE_RUNNING ((void *) 1)
241#define STATE_DONE ((void *) 2)
242#define STATE_ERROR ((void *) -1)
243
244/*
245 * Queue State
246 */
247#define QUEUE_RUNNING (0)
248#define QUEUE_STOPPED (1)
249/*
250 * SSP State - Whether Enabled or Disabled
251 */
252#define SSP_DISABLED (0)
253#define SSP_ENABLED (1)
254
255/*
256 * SSP DMA State - Whether DMA Enabled or Disabled
257 */
258#define SSP_DMA_DISABLED (0)
259#define SSP_DMA_ENABLED (1)
260
261/*
262 * SSP Clock Defaults
263 */
264#define NMDK_SSP_DEFAULT_CLKRATE 0x2
265#define NMDK_SSP_DEFAULT_PRESCALE 0x40
266
267/*
268 * SSP Clock Parameter ranges
269 */
270#define CPSDVR_MIN 0x02
271#define CPSDVR_MAX 0xFE
272#define SCR_MIN 0x00
273#define SCR_MAX 0xFF
274
275/*
276 * SSP Interrupt related Macros
277 */
278#define DEFAULT_SSP_REG_IMSC 0x0UL
279#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
280#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
281
282#define CLEAR_ALL_INTERRUPTS 0x3
283
284
285/*
286 * The type of reading going on on this chip
287 */
288enum ssp_reading {
289 READING_NULL,
290 READING_U8,
291 READING_U16,
292 READING_U32
293};
294
295/**
296 * The type of writing going on on this chip
297 */
298enum ssp_writing {
299 WRITING_NULL,
300 WRITING_U8,
301 WRITING_U16,
302 WRITING_U32
303};
304
305/**
306 * struct vendor_data - vendor-specific config parameters
307 * for PL022 derivates
308 * @fifodepth: depth of FIFOs (both)
309 * @max_bpw: maximum number of bits per word
310 * @unidir: supports unidirection transfers
311 */
312struct vendor_data {
313 int fifodepth;
314 int max_bpw;
315 bool unidir;
316};
317
318/**
319 * struct pl022 - This is the private SSP driver data structure
320 * @adev: AMBA device model hookup
321 * @phybase: The physical memory where the SSP device resides
322 * @virtbase: The virtual memory where the SSP is mapped
323 * @master: SPI framework hookup
324 * @master_info: controller-specific data from machine setup
325 * @regs: SSP controller register's virtual address
326 * @pump_messages: Work struct for scheduling work to the workqueue
327 * @lock: spinlock to syncronise access to driver data
328 * @workqueue: a workqueue on which any spi_message request is queued
329 * @busy: workqueue is busy
330 * @run: workqueue is running
331 * @pump_transfers: Tasklet used in Interrupt Transfer mode
332 * @cur_msg: Pointer to current spi_message being processed
333 * @cur_transfer: Pointer to current spi_transfer
334 * @cur_chip: pointer to current clients chip(assigned from controller_state)
335 * @tx: current position in TX buffer to be read
336 * @tx_end: end position in TX buffer to be read
337 * @rx: current position in RX buffer to be written
338 * @rx_end: end position in RX buffer to be written
339 * @readingtype: the type of read currently going on
340 * @writingtype: the type or write currently going on
341 */
342struct pl022 {
343 struct amba_device *adev;
344 struct vendor_data *vendor;
345 resource_size_t phybase;
346 void __iomem *virtbase;
347 struct clk *clk;
348 struct spi_master *master;
349 struct pl022_ssp_controller *master_info;
350 /* Driver message queue */
351 struct workqueue_struct *workqueue;
352 struct work_struct pump_messages;
353 spinlock_t queue_lock;
354 struct list_head queue;
355 int busy;
356 int run;
357 /* Message transfer pump */
358 struct tasklet_struct pump_transfers;
359 struct spi_message *cur_msg;
360 struct spi_transfer *cur_transfer;
361 struct chip_data *cur_chip;
362 void *tx;
363 void *tx_end;
364 void *rx;
365 void *rx_end;
366 enum ssp_reading read;
367 enum ssp_writing write;
368};
369
370/**
371 * struct chip_data - To maintain runtime state of SSP for each client chip
372 * @cr0: Value of control register CR0 of SSP
373 * @cr1: Value of control register CR1 of SSP
374 * @dmacr: Value of DMA control Register of SSP
375 * @cpsr: Value of Clock prescale register
376 * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
377 * @enable_dma: Whether to enable DMA or not
378 * @write: function ptr to be used to write when doing xfer for this chip
379 * @read: function ptr to be used to read when doing xfer for this chip
380 * @cs_control: chip select callback provided by chip
381 * @xfer_type: polling/interrupt/DMA
382 *
383 * Runtime state of the SSP controller, maintained per chip,
384 * This would be set according to the current message that would be served
385 */
386struct chip_data {
387 u16 cr0;
388 u16 cr1;
389 u16 dmacr;
390 u16 cpsr;
391 u8 n_bytes;
392 u8 enable_dma:1;
393 enum ssp_reading read;
394 enum ssp_writing write;
395 void (*cs_control) (u32 command);
396 int xfer_type;
397};
398
399/**
400 * null_cs_control - Dummy chip select function
401 * @command: select/delect the chip
402 *
403 * If no chip select function is provided by client this is used as dummy
404 * chip select
405 */
406static void null_cs_control(u32 command)
407{
408 pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
409}
410
411/**
412 * giveback - current spi_message is over, schedule next message and call
413 * callback of this message. Assumes that caller already
414 * set message->status; dma and pio irqs are blocked
415 * @pl022: SSP driver private data structure
416 */
417static void giveback(struct pl022 *pl022)
418{
419 struct spi_transfer *last_transfer;
420 unsigned long flags;
421 struct spi_message *msg;
422 void (*curr_cs_control) (u32 command);
423
424 /*
425 * This local reference to the chip select function
426 * is needed because we set curr_chip to NULL
427 * as a step toward termininating the message.
428 */
429 curr_cs_control = pl022->cur_chip->cs_control;
430 spin_lock_irqsave(&pl022->queue_lock, flags);
431 msg = pl022->cur_msg;
432 pl022->cur_msg = NULL;
433 pl022->cur_transfer = NULL;
434 pl022->cur_chip = NULL;
435 queue_work(pl022->workqueue, &pl022->pump_messages);
436 spin_unlock_irqrestore(&pl022->queue_lock, flags);
437
438 last_transfer = list_entry(msg->transfers.prev,
439 struct spi_transfer,
440 transfer_list);
441
442 /* Delay if requested before any change in chip select */
443 if (last_transfer->delay_usecs)
444 /*
445 * FIXME: This runs in interrupt context.
446 * Is this really smart?
447 */
448 udelay(last_transfer->delay_usecs);
449
450 /*
451 * Drop chip select UNLESS cs_change is true or we are returning
452 * a message with an error, or next message is for another chip
453 */
454 if (!last_transfer->cs_change)
455 curr_cs_control(SSP_CHIP_DESELECT);
456 else {
457 struct spi_message *next_msg;
458
459 /* Holding of cs was hinted, but we need to make sure
460 * the next message is for the same chip. Don't waste
461 * time with the following tests unless this was hinted.
462 *
463 * We cannot postpone this until pump_messages, because
464 * after calling msg->complete (below) the driver that
465 * sent the current message could be unloaded, which
466 * could invalidate the cs_control() callback...
467 */
468
469 /* get a pointer to the next message, if any */
470 spin_lock_irqsave(&pl022->queue_lock, flags);
471 if (list_empty(&pl022->queue))
472 next_msg = NULL;
473 else
474 next_msg = list_entry(pl022->queue.next,
475 struct spi_message, queue);
476 spin_unlock_irqrestore(&pl022->queue_lock, flags);
477
478 /* see if the next and current messages point
479 * to the same chip
480 */
481 if (next_msg && next_msg->spi != msg->spi)
482 next_msg = NULL;
483 if (!next_msg || msg->state == STATE_ERROR)
484 curr_cs_control(SSP_CHIP_DESELECT);
485 }
486 msg->state = NULL;
487 if (msg->complete)
488 msg->complete(msg->context);
489 /* This message is completed, so let's turn off the clock! */
490 clk_disable(pl022->clk);
491}
492
493/**
494 * flush - flush the FIFO to reach a clean state
495 * @pl022: SSP driver private data structure
496 */
497static int flush(struct pl022 *pl022)
498{
499 unsigned long limit = loops_per_jiffy << 1;
500
501 dev_dbg(&pl022->adev->dev, "flush\n");
502 do {
503 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
504 readw(SSP_DR(pl022->virtbase));
505 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
506 return limit;
507}
508
509/**
510 * restore_state - Load configuration of current chip
511 * @pl022: SSP driver private data structure
512 */
513static void restore_state(struct pl022 *pl022)
514{
515 struct chip_data *chip = pl022->cur_chip;
516
517 writew(chip->cr0, SSP_CR0(pl022->virtbase));
518 writew(chip->cr1, SSP_CR1(pl022->virtbase));
519 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
520 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
521 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
522 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
523}
524
525/**
526 * load_ssp_default_config - Load default configuration for SSP
527 * @pl022: SSP driver private data structure
528 */
529
530/*
531 * Default SSP Register Values
532 */
533#define DEFAULT_SSP_REG_CR0 ( \
534 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
535 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
536 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
537 GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \
538 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
539 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
540 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
541)
542
543#define DEFAULT_SSP_REG_CR1 ( \
544 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
545 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
546 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
547 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
548 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
549 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
550 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
551 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
552 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
553)
554
555#define DEFAULT_SSP_REG_CPSR ( \
556 GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
557)
558
559#define DEFAULT_SSP_REG_DMACR (\
560 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
561 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
562)
563
564
565static void load_ssp_default_config(struct pl022 *pl022)
566{
567 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
568 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
569 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
570 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
571 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
572 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
573}
574
575/**
576 * This will write to TX and read from RX according to the parameters
577 * set in pl022.
578 */
579static void readwriter(struct pl022 *pl022)
580{
581
582 /*
583 * The FIFO depth is different inbetween primecell variants.
584 * I believe filling in too much in the FIFO might cause
585 * errons in 8bit wide transfers on ARM variants (just 8 words
586 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
587 *
588 * FIXME: currently we have no logic to account for this.
589 * perhaps there is even something broken in HW regarding
590 * 8bit transfers (it doesn't fail on 16bit) so this needs
591 * more investigation...
592 */
593 dev_dbg(&pl022->adev->dev,
594 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
595 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
596
597 /* Read as much as you can */
598 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
599 && (pl022->rx < pl022->rx_end)) {
600 switch (pl022->read) {
601 case READING_NULL:
602 readw(SSP_DR(pl022->virtbase));
603 break;
604 case READING_U8:
605 *(u8 *) (pl022->rx) =
606 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
607 break;
608 case READING_U16:
609 *(u16 *) (pl022->rx) =
610 (u16) readw(SSP_DR(pl022->virtbase));
611 break;
612 case READING_U32:
613 *(u32 *) (pl022->rx) =
614 readl(SSP_DR(pl022->virtbase));
615 break;
616 }
617 pl022->rx += (pl022->cur_chip->n_bytes);
618 }
619 /*
620 * Write as much as you can, while keeping an eye on the RX FIFO!
621 */
622 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
623 && (pl022->tx < pl022->tx_end)) {
624 switch (pl022->write) {
625 case WRITING_NULL:
626 writew(0x0, SSP_DR(pl022->virtbase));
627 break;
628 case WRITING_U8:
629 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
630 break;
631 case WRITING_U16:
632 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
633 break;
634 case WRITING_U32:
635 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
636 break;
637 }
638 pl022->tx += (pl022->cur_chip->n_bytes);
639 /*
640 * This inner reader takes care of things appearing in the RX
641 * FIFO as we're transmitting. This will happen a lot since the
642 * clock starts running when you put things into the TX FIFO,
643 * and then things are continously clocked into the RX FIFO.
644 */
645 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
646 && (pl022->rx < pl022->rx_end)) {
647 switch (pl022->read) {
648 case READING_NULL:
649 readw(SSP_DR(pl022->virtbase));
650 break;
651 case READING_U8:
652 *(u8 *) (pl022->rx) =
653 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
654 break;
655 case READING_U16:
656 *(u16 *) (pl022->rx) =
657 (u16) readw(SSP_DR(pl022->virtbase));
658 break;
659 case READING_U32:
660 *(u32 *) (pl022->rx) =
661 readl(SSP_DR(pl022->virtbase));
662 break;
663 }
664 pl022->rx += (pl022->cur_chip->n_bytes);
665 }
666 }
667 /*
668 * When we exit here the TX FIFO should be full and the RX FIFO
669 * should be empty
670 */
671}
672
673
674/**
675 * next_transfer - Move to the Next transfer in the current spi message
676 * @pl022: SSP driver private data structure
677 *
678 * This function moves though the linked list of spi transfers in the
679 * current spi message and returns with the state of current spi
680 * message i.e whether its last transfer is done(STATE_DONE) or
681 * Next transfer is ready(STATE_RUNNING)
682 */
683static void *next_transfer(struct pl022 *pl022)
684{
685 struct spi_message *msg = pl022->cur_msg;
686 struct spi_transfer *trans = pl022->cur_transfer;
687
688 /* Move to next transfer */
689 if (trans->transfer_list.next != &msg->transfers) {
690 pl022->cur_transfer =
691 list_entry(trans->transfer_list.next,
692 struct spi_transfer, transfer_list);
693 return STATE_RUNNING;
694 }
695 return STATE_DONE;
696}
697/**
698 * pl022_interrupt_handler - Interrupt handler for SSP controller
699 *
700 * This function handles interrupts generated for an interrupt based transfer.
701 * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
702 * current message's state as STATE_ERROR and schedule the tasklet
703 * pump_transfers which will do the postprocessing of the current message by
704 * calling giveback(). Otherwise it reads data from RX FIFO till there is no
705 * more data, and writes data in TX FIFO till it is not full. If we complete
706 * the transfer we move to the next transfer and schedule the tasklet.
707 */
708static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
709{
710 struct pl022 *pl022 = dev_id;
711 struct spi_message *msg = pl022->cur_msg;
712 u16 irq_status = 0;
713 u16 flag = 0;
714
715 if (unlikely(!msg)) {
716 dev_err(&pl022->adev->dev,
717 "bad message state in interrupt handler");
718 /* Never fail */
719 return IRQ_HANDLED;
720 }
721
722 /* Read the Interrupt Status Register */
723 irq_status = readw(SSP_MIS(pl022->virtbase));
724
725 if (unlikely(!irq_status))
726 return IRQ_NONE;
727
728 /* This handles the error code interrupts */
729 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
730 /*
731 * Overrun interrupt - bail out since our Data has been
732 * corrupted
733 */
734 dev_err(&pl022->adev->dev,
735 "FIFO overrun\n");
736 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
737 dev_err(&pl022->adev->dev,
738 "RXFIFO is full\n");
739 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
740 dev_err(&pl022->adev->dev,
741 "TXFIFO is full\n");
742
743 /*
744 * Disable and clear interrupts, disable SSP,
745 * mark message with bad status so it can be
746 * retried.
747 */
748 writew(DISABLE_ALL_INTERRUPTS,
749 SSP_IMSC(pl022->virtbase));
750 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
751 writew((readw(SSP_CR1(pl022->virtbase)) &
752 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
753 msg->state = STATE_ERROR;
754
755 /* Schedule message queue handler */
756 tasklet_schedule(&pl022->pump_transfers);
757 return IRQ_HANDLED;
758 }
759
760 readwriter(pl022);
761
762 if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
763 flag = 1;
764 /* Disable Transmit interrupt */
765 writew(readw(SSP_IMSC(pl022->virtbase)) &
766 (~SSP_IMSC_MASK_TXIM),
767 SSP_IMSC(pl022->virtbase));
768 }
769
770 /*
771 * Since all transactions must write as much as shall be read,
772 * we can conclude the entire transaction once RX is complete.
773 * At this point, all TX will always be finished.
774 */
775 if (pl022->rx >= pl022->rx_end) {
776 writew(DISABLE_ALL_INTERRUPTS,
777 SSP_IMSC(pl022->virtbase));
778 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
779 if (unlikely(pl022->rx > pl022->rx_end)) {
780 dev_warn(&pl022->adev->dev, "read %u surplus "
781 "bytes (did you request an odd "
782 "number of bytes on a 16bit bus?)\n",
783 (u32) (pl022->rx - pl022->rx_end));
784 }
785 /* Update total bytes transfered */
786 msg->actual_length += pl022->cur_transfer->len;
787 if (pl022->cur_transfer->cs_change)
788 pl022->cur_chip->
789 cs_control(SSP_CHIP_DESELECT);
790 /* Move to next transfer */
791 msg->state = next_transfer(pl022);
792 tasklet_schedule(&pl022->pump_transfers);
793 return IRQ_HANDLED;
794 }
795
796 return IRQ_HANDLED;
797}
798
799/**
800 * This sets up the pointers to memory for the next message to
801 * send out on the SPI bus.
802 */
803static int set_up_next_transfer(struct pl022 *pl022,
804 struct spi_transfer *transfer)
805{
806 int residue;
807
808 /* Sanity check the message for this bus width */
809 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
810 if (unlikely(residue != 0)) {
811 dev_err(&pl022->adev->dev,
812 "message of %u bytes to transmit but the current "
813 "chip bus has a data width of %u bytes!\n",
814 pl022->cur_transfer->len,
815 pl022->cur_chip->n_bytes);
816 dev_err(&pl022->adev->dev, "skipping this message\n");
817 return -EIO;
818 }
819 pl022->tx = (void *)transfer->tx_buf;
820 pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
821 pl022->rx = (void *)transfer->rx_buf;
822 pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
823 pl022->write =
824 pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
825 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
826 return 0;
827}
828
829/**
830 * pump_transfers - Tasklet function which schedules next interrupt transfer
831 * when running in interrupt transfer mode.
832 * @data: SSP driver private data structure
833 *
834 */
835static void pump_transfers(unsigned long data)
836{
837 struct pl022 *pl022 = (struct pl022 *) data;
838 struct spi_message *message = NULL;
839 struct spi_transfer *transfer = NULL;
840 struct spi_transfer *previous = NULL;
841
842 /* Get current state information */
843 message = pl022->cur_msg;
844 transfer = pl022->cur_transfer;
845
846 /* Handle for abort */
847 if (message->state == STATE_ERROR) {
848 message->status = -EIO;
849 giveback(pl022);
850 return;
851 }
852
853 /* Handle end of message */
854 if (message->state == STATE_DONE) {
855 message->status = 0;
856 giveback(pl022);
857 return;
858 }
859
860 /* Delay if requested at end of transfer before CS change */
861 if (message->state == STATE_RUNNING) {
862 previous = list_entry(transfer->transfer_list.prev,
863 struct spi_transfer,
864 transfer_list);
865 if (previous->delay_usecs)
866 /*
867 * FIXME: This runs in interrupt context.
868 * Is this really smart?
869 */
870 udelay(previous->delay_usecs);
871
872 /* Drop chip select only if cs_change is requested */
873 if (previous->cs_change)
874 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
875 } else {
876 /* STATE_START */
877 message->state = STATE_RUNNING;
878 }
879
880 if (set_up_next_transfer(pl022, transfer)) {
881 message->state = STATE_ERROR;
882 message->status = -EIO;
883 giveback(pl022);
884 return;
885 }
886 /* Flush the FIFOs and let's go! */
887 flush(pl022);
888 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
889}
890
891/**
892 * NOT IMPLEMENTED
893 * configure_dma - It configures the DMA pipes for DMA transfers
894 * @data: SSP driver's private data structure
895 *
896 */
897static int configure_dma(void *data)
898{
899 struct pl022 *pl022 = data;
900 dev_dbg(&pl022->adev->dev, "configure DMA\n");
901 return -ENOTSUPP;
902}
903
904/**
905 * do_dma_transfer - It handles transfers of the current message
906 * if it is DMA xfer.
907 * NOT FULLY IMPLEMENTED
908 * @data: SSP driver's private data structure
909 */
910static void do_dma_transfer(void *data)
911{
912 struct pl022 *pl022 = data;
913
914 if (configure_dma(data)) {
915 dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
916 goto err_config_dma;
917 }
918
919 /* TODO: Implememt DMA setup of pipes here */
920
921 /* Enable target chip, set up transfer */
922 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
923 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
924 /* Error path */
925 pl022->cur_msg->state = STATE_ERROR;
926 pl022->cur_msg->status = -EIO;
927 giveback(pl022);
928 return;
929 }
930 /* Enable SSP */
931 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
932 SSP_CR1(pl022->virtbase));
933
934 /* TODO: Enable the DMA transfer here */
935 return;
936
937 err_config_dma:
938 pl022->cur_msg->state = STATE_ERROR;
939 pl022->cur_msg->status = -EIO;
940 giveback(pl022);
941 return;
942}
943
944static void do_interrupt_transfer(void *data)
945{
946 struct pl022 *pl022 = data;
947
948 /* Enable target chip */
949 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
950 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
951 /* Error path */
952 pl022->cur_msg->state = STATE_ERROR;
953 pl022->cur_msg->status = -EIO;
954 giveback(pl022);
955 return;
956 }
957 /* Enable SSP, turn on interrupts */
958 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
959 SSP_CR1(pl022->virtbase));
960 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
961}
962
963static void do_polling_transfer(void *data)
964{
965 struct pl022 *pl022 = data;
966 struct spi_message *message = NULL;
967 struct spi_transfer *transfer = NULL;
968 struct spi_transfer *previous = NULL;
969 struct chip_data *chip;
970
971 chip = pl022->cur_chip;
972 message = pl022->cur_msg;
973
974 while (message->state != STATE_DONE) {
975 /* Handle for abort */
976 if (message->state == STATE_ERROR)
977 break;
978 transfer = pl022->cur_transfer;
979
980 /* Delay if requested at end of transfer */
981 if (message->state == STATE_RUNNING) {
982 previous =
983 list_entry(transfer->transfer_list.prev,
984 struct spi_transfer, transfer_list);
985 if (previous->delay_usecs)
986 udelay(previous->delay_usecs);
987 if (previous->cs_change)
988 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
989 } else {
990 /* STATE_START */
991 message->state = STATE_RUNNING;
992 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
993 }
994
995 /* Configuration Changing Per Transfer */
996 if (set_up_next_transfer(pl022, transfer)) {
997 /* Error path */
998 message->state = STATE_ERROR;
999 break;
1000 }
1001 /* Flush FIFOs and enable SSP */
1002 flush(pl022);
1003 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1004 SSP_CR1(pl022->virtbase));
1005
1006 dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
1007 /* FIXME: insert a timeout so we don't hang here indefinately */
1008 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
1009 readwriter(pl022);
1010
1011 /* Update total byte transfered */
1012 message->actual_length += pl022->cur_transfer->len;
1013 if (pl022->cur_transfer->cs_change)
1014 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
1015 /* Move to next transfer */
1016 message->state = next_transfer(pl022);
1017 }
1018
1019 /* Handle end of message */
1020 if (message->state == STATE_DONE)
1021 message->status = 0;
1022 else
1023 message->status = -EIO;
1024
1025 giveback(pl022);
1026 return;
1027}
1028
1029/**
1030 * pump_messages - Workqueue function which processes spi message queue
1031 * @data: pointer to private data of SSP driver
1032 *
1033 * This function checks if there is any spi message in the queue that
1034 * needs processing and delegate control to appropriate function
1035 * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
1036 * based on the kind of the transfer
1037 *
1038 */
1039static void pump_messages(struct work_struct *work)
1040{
1041 struct pl022 *pl022 =
1042 container_of(work, struct pl022, pump_messages);
1043 unsigned long flags;
1044
1045 /* Lock queue and check for queue work */
1046 spin_lock_irqsave(&pl022->queue_lock, flags);
1047 if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
1048 pl022->busy = 0;
1049 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1050 return;
1051 }
1052 /* Make sure we are not already running a message */
1053 if (pl022->cur_msg) {
1054 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1055 return;
1056 }
1057 /* Extract head of queue */
1058 pl022->cur_msg =
1059 list_entry(pl022->queue.next, struct spi_message, queue);
1060
1061 list_del_init(&pl022->cur_msg->queue);
1062 pl022->busy = 1;
1063 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1064
1065 /* Initial message state */
1066 pl022->cur_msg->state = STATE_START;
1067 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
1068 struct spi_transfer,
1069 transfer_list);
1070
1071 /* Setup the SPI using the per chip configuration */
1072 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1073 /*
1074 * We enable the clock here, then the clock will be disabled when
1075 * giveback() is called in each method (poll/interrupt/DMA)
1076 */
1077 clk_enable(pl022->clk);
1078 restore_state(pl022);
1079 flush(pl022);
1080
1081 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1082 do_polling_transfer(pl022);
1083 else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
1084 do_interrupt_transfer(pl022);
1085 else
1086 do_dma_transfer(pl022);
1087}
1088
1089
1090static int __init init_queue(struct pl022 *pl022)
1091{
1092 INIT_LIST_HEAD(&pl022->queue);
1093 spin_lock_init(&pl022->queue_lock);
1094
1095 pl022->run = QUEUE_STOPPED;
1096 pl022->busy = 0;
1097
1098 tasklet_init(&pl022->pump_transfers,
1099 pump_transfers, (unsigned long)pl022);
1100
1101 INIT_WORK(&pl022->pump_messages, pump_messages);
1102 pl022->workqueue = create_singlethread_workqueue(
1103 dev_name(pl022->master->dev.parent));
1104 if (pl022->workqueue == NULL)
1105 return -EBUSY;
1106
1107 return 0;
1108}
1109
1110
1111static int start_queue(struct pl022 *pl022)
1112{
1113 unsigned long flags;
1114
1115 spin_lock_irqsave(&pl022->queue_lock, flags);
1116
1117 if (pl022->run == QUEUE_RUNNING || pl022->busy) {
1118 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1119 return -EBUSY;
1120 }
1121
1122 pl022->run = QUEUE_RUNNING;
1123 pl022->cur_msg = NULL;
1124 pl022->cur_transfer = NULL;
1125 pl022->cur_chip = NULL;
1126 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1127
1128 queue_work(pl022->workqueue, &pl022->pump_messages);
1129
1130 return 0;
1131}
1132
1133
1134static int stop_queue(struct pl022 *pl022)
1135{
1136 unsigned long flags;
1137 unsigned limit = 500;
1138 int status = 0;
1139
1140 spin_lock_irqsave(&pl022->queue_lock, flags);
1141
1142 /* This is a bit lame, but is optimized for the common execution path.
1143 * A wait_queue on the pl022->busy could be used, but then the common
1144 * execution path (pump_messages) would be required to call wake_up or
1145 * friends on every SPI message. Do this instead */
1146 pl022->run = QUEUE_STOPPED;
1147 while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
1148 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1149 msleep(10);
1150 spin_lock_irqsave(&pl022->queue_lock, flags);
1151 }
1152
1153 if (!list_empty(&pl022->queue) || pl022->busy)
1154 status = -EBUSY;
1155
1156 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1157
1158 return status;
1159}
1160
1161static int destroy_queue(struct pl022 *pl022)
1162{
1163 int status;
1164
1165 status = stop_queue(pl022);
1166 /* we are unloading the module or failing to load (only two calls
1167 * to this routine), and neither call can handle a return value.
1168 * However, destroy_workqueue calls flush_workqueue, and that will
1169 * block until all work is done. If the reason that stop_queue
1170 * timed out is that the work will never finish, then it does no
1171 * good to call destroy_workqueue, so return anyway. */
1172 if (status != 0)
1173 return status;
1174
1175 destroy_workqueue(pl022->workqueue);
1176
1177 return 0;
1178}
1179
1180static int verify_controller_parameters(struct pl022 *pl022,
1181 struct pl022_config_chip *chip_info)
1182{
1183 if ((chip_info->lbm != LOOPBACK_ENABLED)
1184 && (chip_info->lbm != LOOPBACK_DISABLED)) {
1185 dev_err(chip_info->dev,
1186 "loopback Mode is configured incorrectly\n");
1187 return -EINVAL;
1188 }
1189 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1190 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1191 dev_err(chip_info->dev,
1192 "interface is configured incorrectly\n");
1193 return -EINVAL;
1194 }
1195 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1196 (!pl022->vendor->unidir)) {
1197 dev_err(chip_info->dev,
1198 "unidirectional mode not supported in this "
1199 "hardware version\n");
1200 return -EINVAL;
1201 }
1202 if ((chip_info->hierarchy != SSP_MASTER)
1203 && (chip_info->hierarchy != SSP_SLAVE)) {
1204 dev_err(chip_info->dev,
1205 "hierarchy is configured incorrectly\n");
1206 return -EINVAL;
1207 }
1208 if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
1209 || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
1210 dev_err(chip_info->dev,
1211 "cpsdvsr is configured incorrectly\n");
1212 return -EINVAL;
1213 }
1214 if ((chip_info->endian_rx != SSP_RX_MSB)
1215 && (chip_info->endian_rx != SSP_RX_LSB)) {
1216 dev_err(chip_info->dev,
1217 "RX FIFO endianess is configured incorrectly\n");
1218 return -EINVAL;
1219 }
1220 if ((chip_info->endian_tx != SSP_TX_MSB)
1221 && (chip_info->endian_tx != SSP_TX_LSB)) {
1222 dev_err(chip_info->dev,
1223 "TX FIFO endianess is configured incorrectly\n");
1224 return -EINVAL;
1225 }
1226 if ((chip_info->data_size < SSP_DATA_BITS_4)
1227 || (chip_info->data_size > SSP_DATA_BITS_32)) {
1228 dev_err(chip_info->dev,
1229 "DATA Size is configured incorrectly\n");
1230 return -EINVAL;
1231 }
1232 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1233 && (chip_info->com_mode != DMA_TRANSFER)
1234 && (chip_info->com_mode != POLLING_TRANSFER)) {
1235 dev_err(chip_info->dev,
1236 "Communication mode is configured incorrectly\n");
1237 return -EINVAL;
1238 }
1239 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
1240 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
1241 dev_err(chip_info->dev,
1242 "RX FIFO Trigger Level is configured incorrectly\n");
1243 return -EINVAL;
1244 }
1245 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
1246 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
1247 dev_err(chip_info->dev,
1248 "TX FIFO Trigger Level is configured incorrectly\n");
1249 return -EINVAL;
1250 }
1251 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
1252 if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE)
1253 && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) {
1254 dev_err(chip_info->dev,
1255 "Clock Phase is configured incorrectly\n");
1256 return -EINVAL;
1257 }
1258 if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
1259 && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
1260 dev_err(chip_info->dev,
1261 "Clock Polarity is configured incorrectly\n");
1262 return -EINVAL;
1263 }
1264 }
1265 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1266 if ((chip_info->ctrl_len < SSP_BITS_4)
1267 || (chip_info->ctrl_len > SSP_BITS_32)) {
1268 dev_err(chip_info->dev,
1269 "CTRL LEN is configured incorrectly\n");
1270 return -EINVAL;
1271 }
1272 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1273 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1274 dev_err(chip_info->dev,
1275 "Wait State is configured incorrectly\n");
1276 return -EINVAL;
1277 }
1278 if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1279 && (chip_info->duplex !=
1280 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1281 dev_err(chip_info->dev,
1282 "DUPLEX is configured incorrectly\n");
1283 return -EINVAL;
1284 }
1285 }
1286 if (chip_info->cs_control == NULL) {
1287 dev_warn(chip_info->dev,
1288 "Chip Select Function is NULL for this chip\n");
1289 chip_info->cs_control = null_cs_control;
1290 }
1291 return 0;
1292}
1293
1294/**
1295 * pl022_transfer - transfer function registered to SPI master framework
1296 * @spi: spi device which is requesting transfer
1297 * @msg: spi message which is to handled is queued to driver queue
1298 *
1299 * This function is registered to the SPI framework for this SPI master
1300 * controller. It will queue the spi_message in the queue of driver if
1301 * the queue is not stopped and return.
1302 */
1303static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1304{
1305 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1306 unsigned long flags;
1307
1308 spin_lock_irqsave(&pl022->queue_lock, flags);
1309
1310 if (pl022->run == QUEUE_STOPPED) {
1311 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1312 return -ESHUTDOWN;
1313 }
1314 msg->actual_length = 0;
1315 msg->status = -EINPROGRESS;
1316 msg->state = STATE_START;
1317
1318 list_add_tail(&msg->queue, &pl022->queue);
1319 if (pl022->run == QUEUE_RUNNING && !pl022->busy)
1320 queue_work(pl022->workqueue, &pl022->pump_messages);
1321
1322 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1323 return 0;
1324}
1325
1326static int calculate_effective_freq(struct pl022 *pl022,
1327 int freq,
1328 struct ssp_clock_params *clk_freq)
1329{
1330 /* Lets calculate the frequency parameters */
1331 u16 cpsdvsr = 2;
1332 u16 scr = 0;
1333 bool freq_found = false;
1334 u32 rate;
1335 u32 max_tclk;
1336 u32 min_tclk;
1337
1338 rate = clk_get_rate(pl022->clk);
1339 /* cpsdvscr = 2 & scr 0 */
1340 max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
1341 /* cpsdvsr = 254 & scr = 255 */
1342 min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
1343
1344 if ((freq <= max_tclk) && (freq >= min_tclk)) {
1345 while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
1346 while (scr <= SCR_MAX && !freq_found) {
1347 if ((rate /
1348 (cpsdvsr * (1 + scr))) > freq)
1349 scr += 1;
1350 else {
1351 /*
1352 * This bool is made true when
1353 * effective frequency >=
1354 * target frequency is found
1355 */
1356 freq_found = true;
1357 if ((rate /
1358 (cpsdvsr * (1 + scr))) != freq) {
1359 if (scr == SCR_MIN) {
1360 cpsdvsr -= 2;
1361 scr = SCR_MAX;
1362 } else
1363 scr -= 1;
1364 }
1365 }
1366 }
1367 if (!freq_found) {
1368 cpsdvsr += 2;
1369 scr = SCR_MIN;
1370 }
1371 }
1372 if (cpsdvsr != 0) {
1373 dev_dbg(&pl022->adev->dev,
1374 "SSP Effective Frequency is %u\n",
1375 (rate / (cpsdvsr * (1 + scr))));
1376 clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
1377 clk_freq->scr = (u8) (scr & 0xFF);
1378 dev_dbg(&pl022->adev->dev,
1379 "SSP cpsdvsr = %d, scr = %d\n",
1380 clk_freq->cpsdvsr, clk_freq->scr);
1381 }
1382 } else {
1383 dev_err(&pl022->adev->dev,
1384 "controller data is incorrect: out of range frequency");
1385 return -EINVAL;
1386 }
1387 return 0;
1388}
1389
1390/**
1391 * NOT IMPLEMENTED
1392 * process_dma_info - Processes the DMA info provided by client drivers
1393 * @chip_info: chip info provided by client device
1394 * @chip: Runtime state maintained by the SSP controller for each spi device
1395 *
1396 * This function processes and stores DMA config provided by client driver
1397 * into the runtime state maintained by the SSP controller driver
1398 */
1399static int process_dma_info(struct pl022_config_chip *chip_info,
1400 struct chip_data *chip)
1401{
1402 dev_err(chip_info->dev,
1403 "cannot process DMA info, DMA not implemented!\n");
1404 return -ENOTSUPP;
1405}
1406
1407/**
1408 * pl022_setup - setup function registered to SPI master framework
1409 * @spi: spi device which is requesting setup
1410 *
1411 * This function is registered to the SPI framework for this SPI master
1412 * controller. If it is the first time when setup is called by this device,
1413 * this function will initialize the runtime state for this chip and save
1414 * the same in the device structure. Else it will update the runtime info
1415 * with the updated chip info. Nothing is really being written to the
1416 * controller hardware here, that is not done until the actual transfer
1417 * commence.
1418 */
1419
1420/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
1421#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1422 | SPI_LSB_FIRST | SPI_LOOP)
1423
1424static int pl022_setup(struct spi_device *spi)
1425{
1426 struct pl022_config_chip *chip_info;
1427 struct chip_data *chip;
1428 int status = 0;
1429 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1430
1431 if (spi->mode & ~MODEBITS) {
1432 dev_dbg(&spi->dev, "unsupported mode bits %x\n",
1433 spi->mode & ~MODEBITS);
1434 return -EINVAL;
1435 }
1436
1437 if (!spi->max_speed_hz)
1438 return -EINVAL;
1439
1440 /* Get controller_state if one is supplied */
1441 chip = spi_get_ctldata(spi);
1442
1443 if (chip == NULL) {
1444 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1445 if (!chip) {
1446 dev_err(&spi->dev,
1447 "cannot allocate controller state\n");
1448 return -ENOMEM;
1449 }
1450 dev_dbg(&spi->dev,
1451 "allocated memory for controller's runtime state\n");
1452 }
1453
1454 /* Get controller data if one is supplied */
1455 chip_info = spi->controller_data;
1456
1457 if (chip_info == NULL) {
1458 /* spi_board_info.controller_data not is supplied */
1459 dev_dbg(&spi->dev,
1460 "using default controller_data settings\n");
1461
1462 chip_info =
1463 kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
1464
1465 if (!chip_info) {
1466 dev_err(&spi->dev,
1467 "cannot allocate controller data\n");
1468 status = -ENOMEM;
1469 goto err_first_setup;
1470 }
1471
1472 dev_dbg(&spi->dev, "allocated memory for controller data\n");
1473
1474 /* Pointer back to the SPI device */
1475 chip_info->dev = &spi->dev;
1476 /*
1477 * Set controller data default values:
1478 * Polling is supported by default
1479 */
1480 chip_info->lbm = LOOPBACK_DISABLED;
1481 chip_info->com_mode = POLLING_TRANSFER;
1482 chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
1483 chip_info->hierarchy = SSP_SLAVE;
1484 chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
1485 chip_info->endian_tx = SSP_TX_LSB;
1486 chip_info->endian_rx = SSP_RX_LSB;
1487 chip_info->data_size = SSP_DATA_BITS_12;
1488 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
1489 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
1490 chip_info->clk_phase = SSP_CLK_FALLING_EDGE;
1491 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
1492 chip_info->ctrl_len = SSP_BITS_8;
1493 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
1494 chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
1495 chip_info->cs_control = null_cs_control;
1496 } else {
1497 dev_dbg(&spi->dev,
1498 "using user supplied controller_data settings\n");
1499 }
1500
1501 /*
1502 * We can override with custom divisors, else we use the board
1503 * frequency setting
1504 */
1505 if ((0 == chip_info->clk_freq.cpsdvsr)
1506 && (0 == chip_info->clk_freq.scr)) {
1507 status = calculate_effective_freq(pl022,
1508 spi->max_speed_hz,
1509 &chip_info->clk_freq);
1510 if (status < 0)
1511 goto err_config_params;
1512 } else {
1513 if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
1514 chip_info->clk_freq.cpsdvsr =
1515 chip_info->clk_freq.cpsdvsr - 1;
1516 }
1517 status = verify_controller_parameters(pl022, chip_info);
1518 if (status) {
1519 dev_err(&spi->dev, "controller data is incorrect");
1520 goto err_config_params;
1521 }
1522 /* Now set controller state based on controller data */
1523 chip->xfer_type = chip_info->com_mode;
1524 chip->cs_control = chip_info->cs_control;
1525
1526 if (chip_info->data_size <= 8) {
1527 dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
1528 chip->n_bytes = 1;
1529 chip->read = READING_U8;
1530 chip->write = WRITING_U8;
1531 } else if (chip_info->data_size <= 16) {
1532 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1533 chip->n_bytes = 2;
1534 chip->read = READING_U16;
1535 chip->write = WRITING_U16;
1536 } else {
1537 if (pl022->vendor->max_bpw >= 32) {
1538 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1539 chip->n_bytes = 4;
1540 chip->read = READING_U32;
1541 chip->write = WRITING_U32;
1542 } else {
1543 dev_err(&spi->dev,
1544 "illegal data size for this controller!\n");
1545 dev_err(&spi->dev,
1546 "a standard pl022 can only handle "
1547 "1 <= n <= 16 bit words\n");
1548 goto err_config_params;
1549 }
1550 }
1551
1552 /* Now Initialize all register settings required for this chip */
1553 chip->cr0 = 0;
1554 chip->cr1 = 0;
1555 chip->dmacr = 0;
1556 chip->cpsr = 0;
1557 if ((chip_info->com_mode == DMA_TRANSFER)
1558 && ((pl022->master_info)->enable_dma)) {
1559 chip->enable_dma = 1;
1560 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1561 status = process_dma_info(chip_info, chip);
1562 if (status < 0)
1563 goto err_config_params;
1564 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1565 SSP_DMACR_MASK_RXDMAE, 0);
1566 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1567 SSP_DMACR_MASK_TXDMAE, 1);
1568 } else {
1569 chip->enable_dma = 0;
1570 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1571 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1572 SSP_DMACR_MASK_RXDMAE, 0);
1573 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1574 SSP_DMACR_MASK_TXDMAE, 1);
1575 }
1576
1577 chip->cpsr = chip_info->clk_freq.cpsdvsr;
1578
1579 SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
1580 SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
1581 SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
1582 SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
1583 SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
1584 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
1585 SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
1586 SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
1587 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
1588 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
1589 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
1590 SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
1591 SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
1592 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
1593 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
1594 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
1595
1596 /* Save controller_state */
1597 spi_set_ctldata(spi, chip);
1598 return status;
1599 err_config_params:
1600 err_first_setup:
1601 kfree(chip);
1602 return status;
1603}
1604
1605/**
1606 * pl022_cleanup - cleanup function registered to SPI master framework
1607 * @spi: spi device which is requesting cleanup
1608 *
1609 * This function is registered to the SPI framework for this SPI master
1610 * controller. It will free the runtime state of chip.
1611 */
1612static void pl022_cleanup(struct spi_device *spi)
1613{
1614 struct chip_data *chip = spi_get_ctldata(spi);
1615
1616 spi_set_ctldata(spi, NULL);
1617 kfree(chip);
1618}
1619
1620
1621static int __init
1622pl022_probe(struct amba_device *adev, struct amba_id *id)
1623{
1624 struct device *dev = &adev->dev;
1625 struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
1626 struct spi_master *master;
1627 struct pl022 *pl022 = NULL; /*Data for this driver */
1628 int status = 0;
1629
1630 dev_info(&adev->dev,
1631 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
1632 if (platform_info == NULL) {
1633 dev_err(&adev->dev, "probe - no platform data supplied\n");
1634 status = -ENODEV;
1635 goto err_no_pdata;
1636 }
1637
1638 /* Allocate master with space for data */
1639 master = spi_alloc_master(dev, sizeof(struct pl022));
1640 if (master == NULL) {
1641 dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
1642 status = -ENOMEM;
1643 goto err_no_master;
1644 }
1645
1646 pl022 = spi_master_get_devdata(master);
1647 pl022->master = master;
1648 pl022->master_info = platform_info;
1649 pl022->adev = adev;
1650 pl022->vendor = id->data;
1651
1652 /*
1653 * Bus Number Which has been Assigned to this SSP controller
1654 * on this board
1655 */
1656 master->bus_num = platform_info->bus_id;
1657 master->num_chipselect = platform_info->num_chipselect;
1658 master->cleanup = pl022_cleanup;
1659 master->setup = pl022_setup;
1660 master->transfer = pl022_transfer;
1661
1662 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
1663
1664 status = amba_request_regions(adev, NULL);
1665 if (status)
1666 goto err_no_ioregion;
1667
1668 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
1669 if (pl022->virtbase == NULL) {
1670 status = -ENOMEM;
1671 goto err_no_ioremap;
1672 }
1673 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
1674 adev->res.start, pl022->virtbase);
1675
1676 pl022->clk = clk_get(&adev->dev, NULL);
1677 if (IS_ERR(pl022->clk)) {
1678 status = PTR_ERR(pl022->clk);
1679 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
1680 goto err_no_clk;
1681 }
1682
1683 /* Disable SSP */
1684 clk_enable(pl022->clk);
1685 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1686 SSP_CR1(pl022->virtbase));
1687 load_ssp_default_config(pl022);
1688 clk_disable(pl022->clk);
1689
1690 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1691 pl022);
1692 if (status < 0) {
1693 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
1694 goto err_no_irq;
1695 }
1696 /* Initialize and start queue */
1697 status = init_queue(pl022);
1698 if (status != 0) {
1699 dev_err(&adev->dev, "probe - problem initializing queue\n");
1700 goto err_init_queue;
1701 }
1702 status = start_queue(pl022);
1703 if (status != 0) {
1704 dev_err(&adev->dev, "probe - problem starting queue\n");
1705 goto err_start_queue;
1706 }
1707 /* Register with the SPI framework */
1708 amba_set_drvdata(adev, pl022);
1709 status = spi_register_master(master);
1710 if (status != 0) {
1711 dev_err(&adev->dev,
1712 "probe - problem registering spi master\n");
1713 goto err_spi_register;
1714 }
1715 dev_dbg(dev, "probe succeded\n");
1716 return 0;
1717
1718 err_spi_register:
1719 err_start_queue:
1720 err_init_queue:
1721 destroy_queue(pl022);
1722 free_irq(adev->irq[0], pl022);
1723 err_no_irq:
1724 clk_put(pl022->clk);
1725 err_no_clk:
1726 iounmap(pl022->virtbase);
1727 err_no_ioremap:
1728 amba_release_regions(adev);
1729 err_no_ioregion:
1730 spi_master_put(master);
1731 err_no_master:
1732 err_no_pdata:
1733 return status;
1734}
1735
1736static int __exit
1737pl022_remove(struct amba_device *adev)
1738{
1739 struct pl022 *pl022 = amba_get_drvdata(adev);
1740 int status = 0;
1741 if (!pl022)
1742 return 0;
1743
1744 /* Remove the queue */
1745 status = destroy_queue(pl022);
1746 if (status != 0) {
1747 dev_err(&adev->dev,
1748 "queue remove failed (%d)\n", status);
1749 return status;
1750 }
1751 load_ssp_default_config(pl022);
1752 free_irq(adev->irq[0], pl022);
1753 clk_disable(pl022->clk);
1754 clk_put(pl022->clk);
1755 iounmap(pl022->virtbase);
1756 amba_release_regions(adev);
1757 tasklet_disable(&pl022->pump_transfers);
1758 spi_unregister_master(pl022->master);
1759 spi_master_put(pl022->master);
1760 amba_set_drvdata(adev, NULL);
1761 dev_dbg(&adev->dev, "remove succeded\n");
1762 return 0;
1763}
1764
1765#ifdef CONFIG_PM
1766static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1767{
1768 struct pl022 *pl022 = amba_get_drvdata(adev);
1769 int status = 0;
1770
1771 status = stop_queue(pl022);
1772 if (status) {
1773 dev_warn(&adev->dev, "suspend cannot stop queue\n");
1774 return status;
1775 }
1776
1777 clk_enable(pl022->clk);
1778 load_ssp_default_config(pl022);
1779 clk_disable(pl022->clk);
1780 dev_dbg(&adev->dev, "suspended\n");
1781 return 0;
1782}
1783
1784static int pl022_resume(struct amba_device *adev)
1785{
1786 struct pl022 *pl022 = amba_get_drvdata(adev);
1787 int status = 0;
1788
1789 /* Start the queue running */
1790 status = start_queue(pl022);
1791 if (status)
1792 dev_err(&adev->dev, "problem starting queue (%d)\n", status);
1793 else
1794 dev_dbg(&adev->dev, "resumed\n");
1795
1796 return status;
1797}
1798#else
1799#define pl022_suspend NULL
1800#define pl022_resume NULL
1801#endif /* CONFIG_PM */
1802
1803static struct vendor_data vendor_arm = {
1804 .fifodepth = 8,
1805 .max_bpw = 16,
1806 .unidir = false,
1807};
1808
1809
1810static struct vendor_data vendor_st = {
1811 .fifodepth = 32,
1812 .max_bpw = 32,
1813 .unidir = false,
1814};
1815
1816static struct amba_id pl022_ids[] = {
1817 {
1818 /*
1819 * ARM PL022 variant, this has a 16bit wide
1820 * and 8 locations deep TX/RX FIFO
1821 */
1822 .id = 0x00041022,
1823 .mask = 0x000fffff,
1824 .data = &vendor_arm,
1825 },
1826 {
1827 /*
1828 * ST Micro derivative, this has 32bit wide
1829 * and 32 locations deep TX/RX FIFO
1830 */
1831 .id = 0x00108022,
1832 .mask = 0xffffffff,
1833 .data = &vendor_st,
1834 },
1835 { 0, 0 },
1836};
1837
1838static struct amba_driver pl022_driver = {
1839 .drv = {
1840 .name = "ssp-pl022",
1841 },
1842 .id_table = pl022_ids,
1843 .probe = pl022_probe,
1844 .remove = __exit_p(pl022_remove),
1845 .suspend = pl022_suspend,
1846 .resume = pl022_resume,
1847};
1848
1849
1850static int __init pl022_init(void)
1851{
1852 return amba_driver_register(&pl022_driver);
1853}
1854
1855module_init(pl022_init);
1856
1857static void __exit pl022_exit(void)
1858{
1859 amba_driver_unregister(&pl022_driver);
1860}
1861
1862module_exit(pl022_exit);
1863
1864MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
1865MODULE_DESCRIPTION("PL022 SSP Controller Driver");
1866MODULE_LICENSE("GPL");