22: Newly-added code has been compiled with `gcc -W'. This will generate
lots of noise, but is good for finding bugs like "warning: comparison
between signed and unsigned".
+
+23: Tested after it has been merged into the -mm patchset to make sure
+ that it still works with all of the other queued patches and various
+ changes in the VM, VFS, and other subsystems.
Introduction
------------
- The S3C2410 supports a low-power suspend mode, where the SDRAM is kept
+ The S3C24XX supports a low-power suspend mode, where the SDRAM is kept
in Self-Refresh mode, and all but the essential peripheral blocks are
powered down. For more information on how this works, please look
- at the S3C2410 datasheets from Samsung.
+ at the relevant CPU datasheet from Samsung.
Requirements
Note, the original method of adding an late_initcall() is wrong,
and will end up initialising all compiled machines' pm init!
+ The following is an example of code used for testing wakeup from
+ an falling edge on IRQ_EINT0:
+
+
+static irqreturn_t button_irq(int irq, void *pw)
+{
+ return IRQ_HANDLED;
+}
+
+statuc void __init machine_init(void)
+{
+ ...
+
+ request_irq(IRQ_EINT0, button_irq, IRQF_TRIGGER_FALLING,
+ "button-irq-eint0", NULL);
+
+ enable_irq_wake(IRQ_EINT0);
+
+ s3c2410_pm_init();
+}
+
Debugging
---------
care should be taken that any external clock sources that the UARTs
rely on are still enabled at that point.
+ 3) If any debugging is placed in the resume path, then it must have the
+ relevant clocks and peripherals setup before use (ie, bootloader).
+
+ For example, if you transmit a character from the UART, the baud
+ rate and uart controls must be setup beforehand.
+
Configuration
-------------
Allows the entire memory to be checksummed before and after the
suspend to see if there has been any corruption of the contents.
+ Note, the time to calculate the CRC is dependant on the CPU speed
+ and the size of memory. For an 64Mbyte RAM area on an 200MHz
+ S3C2410, this can take approximately 4 seconds to complete.
+
This support requires the CRC32 function to be enabled.
--- /dev/null
+CPU load
+--------
+
+Linux exports various bits of information via `/proc/stat' and
+`/proc/uptime' that userland tools, such as top(1), use to calculate
+the average time system spent in a particular state, for example:
+
+ $ iostat
+ Linux 2.6.18.3-exp (linmac) 02/20/2007
+
+ avg-cpu: %user %nice %system %iowait %steal %idle
+ 10.01 0.00 2.92 5.44 0.00 81.63
+
+ ...
+
+Here the system thinks that over the default sampling period the
+system spent 10.01% of the time doing work in user space, 2.92% in the
+kernel, and was overall 81.63% of the time idle.
+
+In most cases the `/proc/stat' information reflects the reality quite
+closely, however due to the nature of how/when the kernel collects
+this data sometimes it can not be trusted at all.
+
+So how is this information collected? Whenever timer interrupt is
+signalled the kernel looks what kind of task was running at this
+moment and increments the counter that corresponds to this tasks
+kind/state. The problem with this is that the system could have
+switched between various states multiple times between two timer
+interrupts yet the counter is incremented only for the last state.
+
+
+Example
+-------
+
+If we imagine the system with one task that periodically burns cycles
+in the following manner:
+
+ time line between two timer interrupts
+|--------------------------------------|
+ ^ ^
+ |_ something begins working |
+ |_ something goes to sleep
+ (only to be awaken quite soon)
+
+In the above situation the system will be 0% loaded according to the
+`/proc/stat' (since the timer interrupt will always happen when the
+system is executing the idle handler), but in reality the load is
+closer to 99%.
+
+One can imagine many more situations where this behavior of the kernel
+will lead to quite erratic information inside `/proc/stat'.
+
+
+/* gcc -o hog smallhog.c */
+#include <time.h>
+#include <limits.h>
+#include <signal.h>
+#include <sys/time.h>
+#define HIST 10
+
+static volatile sig_atomic_t stop;
+
+static void sighandler (int signr)
+{
+ (void) signr;
+ stop = 1;
+}
+static unsigned long hog (unsigned long niters)
+{
+ stop = 0;
+ while (!stop && --niters);
+ return niters;
+}
+int main (void)
+{
+ int i;
+ struct itimerval it = { .it_interval = { .tv_sec = 0, .tv_usec = 1 },
+ .it_value = { .tv_sec = 0, .tv_usec = 1 } };
+ sigset_t set;
+ unsigned long v[HIST];
+ double tmp = 0.0;
+ unsigned long n;
+ signal (SIGALRM, &sighandler);
+ setitimer (ITIMER_REAL, &it, NULL);
+
+ hog (ULONG_MAX);
+ for (i = 0; i < HIST; ++i) v[i] = ULONG_MAX - hog (ULONG_MAX);
+ for (i = 0; i < HIST; ++i) tmp += v[i];
+ tmp /= HIST;
+ n = tmp - (tmp / 3.0);
+
+ sigemptyset (&set);
+ sigaddset (&set, SIGALRM);
+
+ for (;;) {
+ hog (n);
+ sigwait (&set, &i);
+ }
+ return 0;
+}
+
+
+References
+----------
+
+http://lkml.org/lkml/2007/2/12/6
+Documentation/filesystems/proc.txt (1.8)
+
+
+Thanks
+------
+
+Con Kolivas, Pavel Machek
Who: Richard Purdie <rpurdie@rpsys.net>
---------------------------
+
+What: Wireless extensions over netlink (CONFIG_NET_WIRELESS_RTNETLINK)
+When: with the merge of wireless-dev, 2.6.22 or later
+Why: The option/code is
+ * not enabled on most kernels
+ * not required by any userspace tools (except an experimental one,
+ and even there only for some parts, others use ioctl)
+ * pointless since wext is no longer evolving and the ioctl
+ interface needs to be kept
+Who: Johannes Berg <johannes@sipsolutions.net>
+
+---------------------------
2.11 /proc/sys/fs/mqueue - POSIX message queues filesystem
2.12 /proc/<pid>/oom_adj - Adjust the oom-killer score
2.13 /proc/<pid>/oom_score - Display current oom-killer score
+ 2.14 /proc/<pid>/io - Display the IO accounting fields
------------------------------------------------------------------------------
Preface
command to write value into these files, thereby changing the default settings
of the kernel.
------------------------------------------------------------------------------
+
+2.14 /proc/<pid>/io - Display the IO accounting fields
+-------------------------------------------------------
+
+This file contains IO statistics for each running process
+
+Example
+-------
+
+test:/tmp # dd if=/dev/zero of=/tmp/test.dat &
+[1] 3828
+
+test:/tmp # cat /proc/3828/io
+rchar: 323934931
+wchar: 323929600
+syscr: 632687
+syscw: 632675
+read_bytes: 0
+write_bytes: 323932160
+cancelled_write_bytes: 0
+
+
+Description
+-----------
+
+rchar
+-----
+
+I/O counter: chars read
+The number of bytes which this task has caused to be read from storage. This
+is simply the sum of bytes which this process passed to read() and pread().
+It includes things like tty IO and it is unaffected by whether or not actual
+physical disk IO was required (the read might have been satisfied from
+pagecache)
+
+
+wchar
+-----
+
+I/O counter: chars written
+The number of bytes which this task has caused, or shall cause to be written
+to disk. Similar caveats apply here as with rchar.
+
+
+syscr
+-----
+
+I/O counter: read syscalls
+Attempt to count the number of read I/O operations, i.e. syscalls like read()
+and pread().
+
+
+syscw
+-----
+
+I/O counter: write syscalls
+Attempt to count the number of write I/O operations, i.e. syscalls like
+write() and pwrite().
+
+
+read_bytes
+----------
+
+I/O counter: bytes read
+Attempt to count the number of bytes which this process really did cause to
+be fetched from the storage layer. Done at the submit_bio() level, so it is
+accurate for block-backed filesystems. <please add status regarding NFS and
+CIFS at a later time>
+
+
+write_bytes
+-----------
+
+I/O counter: bytes written
+Attempt to count the number of bytes which this process caused to be sent to
+the storage layer. This is done at page-dirtying time.
+
+
+cancelled_write_bytes
+---------------------
+
+The big inaccuracy here is truncate. If a process writes 1MB to a file and
+then deletes the file, it will in fact perform no writeout. But it will have
+been accounted as having caused 1MB of write.
+In other words: The number of bytes which this process caused to not happen,
+by truncating pagecache. A task can cause "negative" IO too. If this task
+truncates some dirty pagecache, some IO which another task has been accounted
+for (in it's write_bytes) will not be happening. We _could_ just subtract that
+from the truncating task's write_bytes, but there is information loss in doing
+that.
+
+
+Note
+----
+
+At its current implementation state, this is a bit racy on 32-bit machines: if
+process A reads process B's /proc/pid/io while process B is updating one of
+those 64-bit counters, process A could see an intermediate result.
+
+
+More information about this can be found within the taskstats documentation in
+Documentation/accounting.
+
+------------------------------------------------------------------------------
"hdx=remap63" : remap the drive: add 63 to all sector numbers
(for DM OnTrack)
-
+
+ "idex=noautotune" : driver will NOT attempt to tune interface speed
+
"hdx=autotune" : driver will attempt to tune interface speed
to the fastest PIO mode supported,
if possible for this drive only.
"idex=base,ctl" : specify both base and ctl
"idex=base,ctl,irq" : specify base, ctl, and irq number
-
- "idex=autotune" : driver will attempt to tune interface speed
- to the fastest PIO mode supported,
- for all drives on this interface.
- Not fully supported by all chipset types,
- and quite likely to cause trouble with
- older/odd IDE drives.
-
- "idex=noautotune" : driver will NOT attempt to tune interface speed
- This is the default for most chipsets,
- except the cmd640.
"idex=serialize" : do not overlap operations on idex. Please note
that you will have to specify this option for
to the first ATA interface found on the particular host, and the defaults for
the base,ctl ports must not be altered.
- "ide0=dtc2278" : probe/support DTC2278 interface
- "ide0=ht6560b" : probe/support HT6560B interface
"ide0=cmd640_vlb" : *REQUIRED* for VLB cards with the CMD640 chip
(not for PCI -- automatically detected)
- "ide0=qd65xx" : probe/support qd65xx interface
- "ide0=ali14xx" : probe/support ali14xx chipsets (ALI M1439/M1443/M1445)
- "ide0=umc8672" : probe/support umc8672 chipsets
"ide=doubler" : probe/support IDE doublers on Amiga
Everything else is rejected with a "BAD OPTION" message.
+For legacy IDE VLB host drivers (ali14xx/dtc2278/ht6560b/qd65xx/umc8672)
+you need to explicitly enable probing by using "probe" kernel parameter,
+i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use:
+
+* "ali14xx.probe" boot option when ali14xx driver is built-in the kernel
+
+* "probe" module parameter when ali14xx driver is compiled as module
+ ("modprobe ali14xx probe")
+
================================================================================
IDE ATAPI streaming tape driver
Documentation/scsi/.
SELINUX SELinux support is enabled.
SERIAL Serial support is enabled.
+ SH SuperH architecture is enabled.
SMP The kernel is an SMP kernel.
SPARC Sparc architecture is enabled.
SWSUSP Software suspend is enabled.
dtc3181e= [HW,SCSI]
- earlyprintk= [IA-32,X86-64]
+ earlyprintk= [IA-32,X86-64,SH]
earlyprintk=vga
earlyprintk=serial[,ttySn[,baudrate]]
stifb= [HW]
Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]]
+ sunrpc.pool_mode=
+ [NFS]
+ Control how the NFS server code allocates CPUs to
+ service thread pools. Depending on how many NICs
+ you have and where their interrupts are bound, this
+ option will affect which CPUs will do NFS serving.
+ Note: this parameter cannot be changed while the
+ NFS server is running.
+
+ auto the server chooses an appropriate mode
+ automatically using heuristics
+ global a single global pool contains all CPUs
+ percpu one pool for each CPU
+ pernode one pool for each NUMA node (equivalent
+ to global on non-NUMA machines)
+
swiotlb= [IA-64] Number of I/O TLB slabs
switches= [HW,M68k]
usbhid.mousepoll=
[USBHID] The interval which mice are to be polled at.
- vdso= [IA-32]
+ vdso= [IA-32,SH]
vdso=1: enable VDSO (default)
vdso=0: disable VDSO mapping
More congestion control algorithms may be available as modules,
but not loaded.
+tcp_base_mss - INTEGER
+ The initial value of search_low to be used by Packetization Layer
+ Path MTU Discovery (MTU probing). If MTU probing is enabled,
+ this is the inital MSS used by the connection.
+
tcp_congestion_control - STRING
Set the congestion control algorithm to be used for new
connections. The algorithm "reno" is always available, but
Defaults are calculated at boot time from amount of available
memory.
+tcp_moderate_rcvbuf - BOOLEAN
+ If set, TCP performs receive buffer autotuning, attempting to
+ automatically size the buffer (no greater than tcp_rmem[2]) to
+ match the size required by the path for full throughput. Enabled by
+ default.
+
+tcp_mtu_probing - INTEGER
+ Controls TCP Packetization-Layer Path MTU Discovery. Takes three
+ values:
+ 0 - Disabled
+ 1 - Disabled by default, enabled when an ICMP black hole detected
+ 2 - Always enabled, use initial MSS of tcp_base_mss.
+
+tcp_no_metrics_save - BOOLEAN
+ By default, TCP saves various connection metrics in the route cache
+ when the connection closes, so that connections established in the
+ near future can use these to set initial conditions. Usually, this
+ increases overall performance, but may sometimes cause performance
+ degredation. If set, TCP will not cache metrics on closing
+ connections.
+
tcp_orphan_retries - INTEGER
How may times to retry before killing TCP connection, closed
by our side. Default value 7 corresponds to ~50sec-16min
mpu_port - 0x300,0x310,0x320,0x330 = legacy port,
1 = integrated PCI port,
0 = disable (default)
- fm_port - 0x388 (default), 0 = disable (default)
+ fm_port - 0x388 = legacy port,
+ 1 = integrated PCI port (default),
+ 0 = disable
soft_ac3 - Software-conversion of raw SPDIF packets (model 033 only)
(default = 1)
joystick_port - Joystick port address (0 = disable, 1 = auto-detect)
can be adjusted. Appearing only when compiled with
$CONFIG_SND_DEBUG=y
- STAC9200/9205/9220/9221/9254
+ STAC9200/9205/9254
+ ref Reference board
+
+ STAC9220/9221
ref Reference board
3stack D945 3stack
5stack D945 5stack + SPDIF
+ macmini Intel Mac Mini
+ macbook Intel Mac Book
+ macbook-pro Intel Mac Book Pro
STAC9202/9250/9251
ref Reference board, base config
FRAMEBUFFER LAYER
P: Antonino Daplas
-M: adaplas@pol.net
+M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
W: http://linux-fbdev.sourceforge.net/
S: Maintained
P: Jiri Kosina
M: jkosina@suse.cz
L: linux-input@atrey.karlin.mff.cuni.cz
+T: git kernel.org:/pub/scm/linux/kernel/git/jikos/hid.git
S: Maintained
HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
INTEL 810/815 FRAMEBUFFER DRIVER
P: Antonino Daplas
-M: adaplas@pol.net
+M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
W: http://www.melware.de
S: Maintained
-JOURNALLING FLASH FILE SYSTEM (JFFS)
-P: Axis Communications AB
-M: jffs-dev@axis.com
-L: jffs-dev@axis.com
-W: http://www.developer.axis.com/software/jffs/
-S: Maintained
-
JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
P: David Woodhouse
M: dwmw2@infradead.org
NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
P: Antonino Daplas
-M: adaplas@pol.net
+M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
S: Maintained
PARALLEL PORT SUPPORT
-P: Phil Blundell
-M: philb@gnu.org
-P: Tim Waugh
-M: tim@cyberelk.net
-P: David Campbell
-P: Andrea Arcangeli
-M: andrea@suse.de
L: linux-parport@lists.infradead.org
-W: http://people.redhat.com/twaugh/parport/
-S: Maintained
+S: Orphan
PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
P: Tim Waugh
S3 SAVAGE FRAMEBUFFER DRIVER
P: Antonino Daplas
-M: adaplas@pol.net
+M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
S: Supported
SPIDERNET NETWORK DRIVER for CELL
-P: Jim Lewis
-M: jim@jklewis.com
+P: Linas Vepstas
+M: linas@austin.ibm.com
L: netdev@vger.kernel.org
S: Supported
P: Jiri Kosina
M: jkosina@suse.cz
L: linux-usb-devel@lists.sourceforge.net
+T: git kernel.org:/pub/scm/linux/kernel/git/jikos/hid.git
S: Maintained
USB HUB DRIVER
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 21
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
NAME = Homicidal Dwarf Hamster
# *DOCUMENTATION*
config SYS_SUPPORTS_APM_EMULATION
bool
+config GENERIC_GPIO
+ bool
+ default n
+
config GENERIC_TIME
bool
default n
config ARCH_AT91
bool "Atmel AT91"
+ select GENERIC_GPIO
help
This enables support for systems based on the Atmel AT91RM9200
and AT91SAM9xxx processors.
config ARCH_EBSA110
bool "EBSA-110"
select ISA
+ select NO_IOPORT
help
This is an evaluation board for the StrongARM processor available
from Digital. It has limited hardware on-board, including an
bool "PXA2xx-based"
depends on MMU
select ARCH_MTD_XIP
+ select GENERIC_GPIO
select GENERIC_TIME
help
Support for Intel's PXA2XX processor line.
select ISA
select ARCH_DISCONTIGMEM_ENABLE
select ARCH_MTD_XIP
+ select GENERIC_GPIO
help
Support for StrongARM 11x0 based boards.
config ARCH_S3C2410
bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
+ select GENERIC_GPIO
help
Samsung S3C2410X CPU based systems, such as the Simtec Electronics
BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
config ARCH_OMAP
bool "TI OMAP"
+ select GENERIC_GPIO
help
Support for TI's OMAP platform (OMAP1 and OMAP2).
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
static struct spi_board_info nokia770_spi_board_info[] __initdata = {
[0] = {
- .modalias = "lcd_lph8923",
+ .modalias = "lcd_mipid",
.bus_num = 2,
.chip_select = 3,
.max_speed_hz = 12000000,
}
if (clk->flags & CLOCK_NO_IDLE_PARENT)
- if (!cpu_is_omap24xx())
- omap1_clk_deny_idle(clk->parent);
+ omap1_clk_deny_idle(clk->parent);
}
ret = clk->enable(clk);
if (likely(clk->parent)) {
omap1_clk_disable(clk->parent);
if (clk->flags & CLOCK_NO_IDLE_PARENT)
- if (!cpu_is_omap24xx())
- omap1_clk_allow_idle(clk->parent);
+ omap1_clk_allow_idle(clk->parent);
}
}
}
if (unlikely(clk->enable_reg == 0)) {
printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
clk->name);
- return 0;
+ return -EINVAL;
}
if (clk->flags & ENABLE_REG_32BIT) {
int crystal_type = 0; /* Default 12 MHz */
u32 reg;
+#ifdef CONFIG_DEBUG_LL
+ /* Resets some clocks that may be left on from bootloader,
+ * but leaves serial clocks on.
+ */
+ omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
+#endif
+
/* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
reg = omap_readw(SOFT_REQ_REG) & (1 << 4);
omap_writew(reg, SOFT_REQ_REG);
- omap_writew(0, SOFT_REQ_REG2);
+ if (!cpu_is_omap15xx())
+ omap_writew(0, SOFT_REQ_REG2);
clk_init(&omap1_clk_functions);
info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
if (info != NULL) {
- if (!cpu_is_omap1510())
+ if (!cpu_is_omap15xx())
crystal_type = info->system_clock_type;
}
if (cpu_is_omap730())
omap_unmask_irq(INT_730_IH2_IRQ);
- else if (cpu_is_omap1510())
+ else if (cpu_is_omap15xx())
omap_unmask_irq(INT_1510_IH2_IRQ);
else if (cpu_is_omap16xx())
omap_unmask_irq(INT_1610_IH2_IRQ);
tps65010_set_led(LED1, OFF);
}
- omap_writew(0xffff, ULPD_SOFT_DISABLE_REQ_REG);
+ if (!cpu_is_omap15xx())
+ omap_writew(0xffff, ULPD_SOFT_DISABLE_REQ_REG);
/*
* Step 1: turn off interrupts (FIXME: NOTE: already disabled)
MPUI1610_RESTORE(OMAP_IH2_3_MIR);
}
- omap_writew(0, ULPD_SOFT_DISABLE_REQ_REG);
+ if (!cpu_is_omap15xx())
+ omap_writew(0, ULPD_SOFT_DISABLE_REQ_REG);
/*
* Reenable interrupts
static int __init omap_pm_init(void)
{
+ int error;
+
printk("Power Management for TI OMAP.\n");
/*
omap_pm_init_proc();
#endif
- subsys_create_file(&power_subsys, &sleep_while_idle_attr);
+ error = subsys_create_file(&power_subsys, &sleep_while_idle_attr);
+ if (error)
+ printk(KERN_ERR "subsys_create_file failed: %d\n", error);
if (cpu_is_omap16xx()) {
/* configure LOW_PWR pin */
/*
* linux/arch/arm/mach-omap1/serial.c
*
- * OMAP1 CPU identification code
+ * OMAP1 serial support.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
omap_serial_outp(p, UART_OMAP_SCR, 0x08); /* TX watermark */
omap_serial_outp(p, UART_OMAP_MDR1, 0x00); /* enable UART */
- if (!cpu_is_omap1510()) {
+ if (!cpu_is_omap15xx()) {
omap_serial_outp(p, UART_OMAP_SYSC, 0x01);
while (!(omap_serial_in(p, UART_OMAP_SYSC) & 0x01));
}
serial_platform_data[1].irq = INT_730_UART_MODEM_IRDA_2;
}
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
serial_platform_data[0].uartclk = OMAP1510_BASE_BAUD * 16;
serial_platform_data[1].uartclk = OMAP1510_BASE_BAUD * 16;
serial_platform_data[2].uartclk = OMAP1510_BASE_BAUD * 16;
printk("Could not get uart1_ck\n");
else {
clk_enable(uart1_ck);
- if (cpu_is_omap1510())
+ if (cpu_is_omap15xx())
clk_set_rate(uart1_ck, 12000000);
}
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
omap_cfg_reg(UART1_TX);
omap_cfg_reg(UART1_RTS);
if (machine_is_omap_innovator()) {
printk("Could not get uart2_ck\n");
else {
clk_enable(uart2_ck);
- if (cpu_is_omap1510())
+ if (cpu_is_omap15xx())
clk_set_rate(uart2_ck, 12000000);
else
clk_set_rate(uart2_ck, 48000000);
}
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
omap_cfg_reg(UART2_TX);
omap_cfg_reg(UART2_RTS);
if (machine_is_omap_innovator()) {
printk("Could not get uart3_ck\n");
else {
clk_enable(uart3_ck);
- if (cpu_is_omap1510())
+ if (cpu_is_omap15xx())
clk_set_rate(uart3_ck, 12000000);
}
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
omap_cfg_reg(UART3_TX);
omap_cfg_reg(UART3_RX);
}
#include "prcm-regs.h"
#include <asm/io.h>
-#include <asm/delay.h>
static unsigned int row_gpios[6] = { 88, 89, 124, 11, 6, 96 };
static unsigned int col_gpios[7] = { 90, 91, 100, 36, 12, 97, 98 };
return err;
}
-static void set_trans_mode(void *data)
+static void set_trans_mode(struct work_struct *work)
{
- int *mode = data;
+ struct omap_irda_config *irda_config =
+ container_of(work, struct omap_irda_config, gpio_expa.work);
+ int mode = irda_config->mode;
unsigned char expa;
int err = 0;
expa &= ~0x01;
- if (!(*mode & IR_SIRMODE)) { /* MIR/FIR */
+ if (!(mode & IR_SIRMODE)) { /* MIR/FIR */
expa |= 0x01;
}
{
struct omap_irda_config *irda_config = dev->platform_data;
+ irda_config->mode = mode;
cancel_delayed_work(&irda_config->gpio_expa);
- PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
-#error this is not permitted - mode is an argument variable
+ PREPARE_DELAYED_WORK(&irda_config->gpio_expa, set_trans_mode);
schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/irq.h>
#include <asm/mach/time.h>
#include <asm/arch/dmtimer.h>
BUG_ON(gptimer == NULL);
omap_dm_timer_set_source(gptimer, OMAP_TIMER_SRC_SYS_CLK);
- tick_period = clk_get_rate(omap_dm_timer_get_fclk(gptimer)) / 100;
+ tick_period = clk_get_rate(omap_dm_timer_get_fclk(gptimer)) / HZ;
tick_period -= 1;
setup_irq(omap_dm_timer_get_irq(gptimer), &omap2_gp_timer_irq);
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <asm/apm-emulation.h>
+#include <linux/apm-emulation.h>
+
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/hardware.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <asm/apm-emulation.h>
+#include <linux/apm-emulation.h>
+
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/hardware.h>
struct op_arm_model_spec *spec = NULL;
int ret = -ENODEV;
+ ops->backtrace = arm_backtrace;
+
#ifdef CONFIG_CPU_XSCALE
spec = &op_xscale_spec;
#endif
ops->start = op_arm_start;
ops->stop = op_arm_stop;
ops->cpu_type = op_arm_model->name;
- ops->backtrace = arm_backtrace;
printk(KERN_INFO "oprofile: using %s\n", spec->name);
}
omap_enable_channel_irq(free_ch);
/* Clear the CSR register and IRQ status register */
OMAP_DMA_CSR_REG(free_ch) = OMAP2_DMA_CSR_CLEAR_MASK;
- omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0);
+ omap_writel(1 << free_ch, OMAP_DMA4_IRQSTATUS_L0);
}
*dma_ch_out = free_ch;
/* Clear the CSR register and IRQ status register */
OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
-
- val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
- val |= 1 << lch;
- omap_writel(val, OMAP_DMA4_IRQSTATUS_L0);
+ omap_writel(1 << lch, OMAP_DMA4_IRQSTATUS_L0);
/* Disable all DMA interrupts for the channel. */
OMAP_DMA_CICR_REG(lch) = 0;
static int omap2_dma_handle_ch(int ch)
{
u32 status = OMAP_DMA_CSR_REG(ch);
- u32 val;
if (!status)
return 0;
dma_chan[ch].dev_id);
OMAP_DMA_CSR_REG(ch) = OMAP2_DMA_CSR_CLEAR_MASK;
-
- val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
- /* ch in this function is from 0-31 while in register it is 1-32 */
- val = 1 << (ch);
- omap_writel(val, OMAP_DMA4_IRQSTATUS_L0);
+ omap_writel(1 << ch, OMAP_DMA4_IRQSTATUS_L0);
if (likely(dma_chan[ch].callback != NULL))
dma_chan[ch].callback(ch, status, dma_chan[ch].data);
{ .phys_base = 0xfffb2c00, .irq = INT_1610_GPTIMER4 },
{ .phys_base = 0xfffb3400, .irq = INT_1610_GPTIMER5 },
{ .phys_base = 0xfffb3c00, .irq = INT_1610_GPTIMER6 },
- { .phys_base = 0xfffb4400, .irq = INT_1610_GPTIMER7 },
- { .phys_base = 0xfffb4c00, .irq = INT_1610_GPTIMER8 },
+ { .phys_base = 0xfffb7400, .irq = INT_1610_GPTIMER7 },
+ { .phys_base = 0xfffbd400, .irq = INT_1610_GPTIMER8 },
};
#elif defined(CONFIG_ARCH_OMAP2)
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
BUG();
+
+ return 0;
}
#endif
};
static struct irq_chip mpuio_irq_chip = {
- .name = "MPUIO",
- .ack = mpuio_ack_irq,
- .mask = mpuio_mask_irq,
- .unmask = mpuio_unmask_irq
+ .name = "MPUIO",
+ .ack = mpuio_ack_irq,
+ .mask = mpuio_mask_irq,
+ .unmask = mpuio_unmask_irq,
+ .set_type = gpio_irq_type,
};
static int initialized;
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/delay.h>
-#include <asm/delay.h>
#include <asm/io.h>
#include <asm/irq.h>
}
/* Check for pull up or pull down selection on 1610 */
- if (!cpu_is_omap1510()) {
+ if (!cpu_is_omap15xx()) {
if (cfg->pu_pd_reg && cfg->pull_val) {
spin_lock_irqsave(&mux_spin_lock, flags);
pu_pd_orig = omap_readl(cfg->pu_pd_reg);
printk(" %s (0x%08x) = 0x%08x -> 0x%08x\n",
cfg->mux_reg_name, cfg->mux_reg, reg_orig, reg);
- if (!cpu_is_omap1510()) {
+ if (!cpu_is_omap15xx()) {
if (cfg->pu_pd_reg && cfg->pull_val) {
printk(" %s (0x%08x) = 0x%08x -> 0x%08x\n",
cfg->pu_pd_name, cfg->pu_pd_reg,
going to sleep. The blocks are then checked on resume for any
errors.
+ Note, this can take several seconds depending on memory size
+ and CPU speed.
+
+ See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
+
config S3C2410_PM_CHECK_CHUNKSIZE
int "S3C2410 PM Suspend CRC Chunksize (KiB)"
depends on ARCH_S3C2410 && PM && S3C2410_PM_CHECK
the CRC data block will take more memory, but wil identify any
faults with better precision.
+ See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
+
config S3C2410_LOWLEVEL_UART_PORT
int "S3C2410 UART to use for low-level messages"
default 0
zero_fp
get_scno
-#ifdef CONFIG_ALIGNMENT_TRAP
- ldr ip, __cr_alignment
- ldr ip, [ip]
- mcr p15, 0, ip, c1, c0 @ update control register
-#endif
enable_irqs ip
str r4, [sp, #-S_OFF]! @ push fifth arg
b ret_slow_syscall
.align 5
-#ifdef CONFIG_ALIGNMENT_TRAP
- .type __cr_alignment, #object
-__cr_alignment:
- .word cr_alignment
-#endif
.type sys_call_table, #object
ENTRY(sys_call_table)
config UID16
bool
+config GENERIC_GPIO
+ bool
+ default y
+
config GENERIC_HARDIRQS
bool
default y
__mtdr(DBGREG_DC, dc);
ti = current_thread_info();
- ti->flags |= _TIF_BREAKPOINT;
+ set_ti_thread_flag(ti, TIF_BREAKPOINT);
/* The TLB miss handlers don't check thread flags */
if ((regs->pc >= (unsigned long)&itlb_miss)
* single step.
*/
if ((regs->sr & MODE_MASK) != MODE_SUPERVISOR)
- ti->flags |= TIF_SINGLE_STEP;
+ set_ti_thread_flag(ti, TIF_SINGLE_STEP);
} else {
panic("Unable to handle debug trap at pc = %08lx\n",
regs->pc);
return;
}
+static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
+{
+ return (p > (unsigned long)tinfo)
+ && (p < (unsigned long)tinfo + THREAD_SIZE - 3);
+}
+
#ifdef CONFIG_FRAME_POINTER
static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs)
{
- unsigned long __user *fp;
- unsigned long __user *last_fp = NULL;
-
- if (regs) {
- fp = (unsigned long __user *)regs->r7;
- } else if (tsk == current) {
- register unsigned long __user *real_fp __asm__("r7");
- fp = real_fp;
- } else {
- fp = (unsigned long __user *)tsk->thread.cpu_context.r7;
- }
+ unsigned long lr, fp;
+ struct thread_info *tinfo;
+
+ tinfo = (struct thread_info *)
+ ((unsigned long)sp & ~(THREAD_SIZE - 1));
+
+ if (regs)
+ fp = regs->r7;
+ else if (tsk == current)
+ asm("mov %0, r7" : "=r"(fp));
+ else
+ fp = tsk->thread.cpu_context.r7;
/*
- * Walk the stack until (a) we get an exception, (b) the frame
- * pointer becomes zero, or (c) the frame pointer gets stuck
- * at the same value.
+ * Walk the stack as long as the frame pointer (a) is within
+ * the kernel stack of the task, and (b) it doesn't move
+ * downwards.
*/
- while (fp && fp != last_fp) {
- unsigned long lr, new_fp = 0;
-
- last_fp = fp;
- if (__get_user(lr, fp))
- break;
- if (fp && __get_user(new_fp, fp + 1))
- break;
- fp = (unsigned long __user *)new_fp;
+ while (valid_stack_ptr(tinfo, fp)) {
+ unsigned long new_fp;
+ lr = *(unsigned long *)fp;
printk(" [<%08lx>] ", lr);
print_symbol("%s\n", lr);
+
+ new_fp = *(unsigned long *)(fp + 4);
+ if (new_fp <= fp)
+ break;
+ fp = new_fp;
}
printk("\n");
}
DEFINE_DEV(atmel_spi, 1);
DEV_CLK(spi_clk, atmel_spi1, pba, 1);
-static void
+static void __init
at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b,
unsigned int n, const u8 *pins)
{
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
if (vma->vm_flags & VM_EXEC) {
- void *v = kmap(page);
+ void *v = page_address(page);
__flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE);
- kunmap(v);
}
}
return DMA_MEMORY_IO;
free1_out:
- kfree(dev->dma_mem->bitmap);
+ kfree(dev->dma_mem);
out:
return 0;
}
/* distribute the allocatable pages across the various zones and pass them to the allocator
*/
- zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
- zones_size[ZONE_NORMAL] = 0;
+ zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
#endif
config VMI
bool "VMI Paravirt-ops support"
- depends on PARAVIRT && !NO_HZ
- default y
+ depends on PARAVIRT
help
- VMI provides a paravirtualized interface to multiple hypervisors
- include VMware ESX server and Xen by connecting to a ROM module
+ VMI provides a paravirtualized interface to the VMware ESX server
+ (it could be used by other hypervisors in theory too, but is not
+ at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.
config ACPI_SRAT
config COMPAT_VDSO
bool "Compat VDSO support"
default y
- depends on !PARAVIRT
help
Map the VDSO to the predictable old-style address too.
---help---
config KTIME_SCALAR
bool
default y
-
-config NO_IDLE_HZ
- bool
- depends on PARAVIRT
- default y
- help
- Switches the regular HZ timer off when the system is going idle.
- This helps a hypervisor detect that the Linux system is idle,
- reducing the overhead of idle systems.
/* No broadcast on UP ! */
if (num_possible_cpus() == 1)
return;
- } else
- lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+ } else {
+ /*
+ * If nmi_watchdog is set to IO_APIC, we need the
+ * PIT/HPET going. Otherwise register lapic as a dummy
+ * device.
+ */
+ if (nmi_watchdog != NMI_IO_APIC)
+ lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+ }
/* Setup the lapic or request the broadcast */
setup_APIC_timer();
return 0;
}
-static const struct cpufreq_driver nforce2_driver = {
+static struct cpufreq_driver nforce2_driver = {
.name = "nforce2",
.verify = nforce2_verify,
.target = nforce2_target,
NULL,
};
-static const struct cpufreq_driver eps_driver = {
+static struct cpufreq_driver eps_driver = {
.verify = eps_verify,
.target = eps_target,
.init = eps_cpu_init,
};
-static const struct cpufreq_driver elanfreq_driver = {
+static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
.verify = elanfreq_verify,
.target = elanfreq_target,
* cpufreq_gx_init:
* MediaGX/Geode GX initialize cpufreq driver
*/
-static const struct cpufreq_driver gx_suspmod_driver = {
+static struct cpufreq_driver gx_suspmod_driver = {
.get = gx_get_cpuspeed,
.verify = cpufreq_gx_verify,
.target = cpufreq_gx_target,
NULL,
};
-static const struct cpufreq_driver longhaul_driver = {
+static struct cpufreq_driver longhaul_driver = {
.verify = longhaul_verify,
.target = longhaul_target,
.get = longhaul_get,
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longrun", msg)
-static const struct cpufreq_driver longrun_driver;
+static struct cpufreq_driver longrun_driver;
/**
* longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
}
-static const struct cpufreq_driver longrun_driver = {
+static struct cpufreq_driver longrun_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = longrun_verify_policy,
.setpolicy = longrun_set_policy,
NULL,
};
-static const struct cpufreq_driver powernow_k6_driver = {
+static struct cpufreq_driver powernow_k6_driver = {
.verify = powernow_k6_verify,
.target = powernow_k6_target,
.init = powernow_k6_cpu_init,
NULL,
};
-static const struct cpufreq_driver powernow_driver = {
+static struct cpufreq_driver powernow_driver = {
.verify = powernow_verify,
.target = powernow_target,
.get = powernow_get,
NULL,
};
-static const struct cpufreq_driver cpufreq_amd64_driver = {
+static struct cpufreq_driver cpufreq_amd64_driver = {
.verify = powernowk8_verify,
.target = powernowk8_target,
.init = powernowk8_cpu_init,
};
-static const struct cpufreq_driver sc520_freq_driver = {
+static struct cpufreq_driver sc520_freq_driver = {
.get = sc520_freq_get_cpu_frequency,
.verify = sc520_freq_verify,
.target = sc520_freq_target,
};
-static const struct cpufreq_driver speedstep_driver = {
+static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-ich",
.verify = speedstep_verify,
.target = speedstep_target,
NULL,
};
-static const struct cpufreq_driver speedstep_driver = {
+static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
.verify = speedstep_verify,
.target = speedstep_target,
return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0);
}
+/*
+ * Clock source related code
+ */
+static cycle_t read_hpet(void)
+{
+ return (cycle_t)hpet_readl(HPET_COUNTER);
+}
+
+static struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = HPET_MASK,
+ .shift = HPET_SHIFT,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
/*
* Try to setup the HPET timer
*/
{
unsigned long id;
uint64_t hpet_freq;
+ u64 tmp;
if (!is_hpet_capable())
return 0;
/* Start the counter */
hpet_start_counter();
+ /* Initialize and register HPET clocksource
+ *
+ * hpet period is in femto seconds per cycle
+ * so we need to convert this to ns/cyc units
+ * aproximated by mult/2^shift
+ *
+ * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
+ * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
+ * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
+ * (fsec/cyc << shift)/1000000 = mult
+ * (hpet_period << shift)/FSEC_PER_NSEC = mult
+ */
+ tmp = (u64)hpet_period << HPET_SHIFT;
+ do_div(tmp, FSEC_PER_NSEC);
+ clocksource_hpet.mult = (u32)tmp;
+
+ clocksource_register(&clocksource_hpet);
+
+
if (id & HPET_ID_LEGSUP) {
hpet_enable_int();
hpet_reserve_platform_timers(id);
return 0;
}
-/*
- * Clock source related code
- */
-static cycle_t read_hpet(void)
-{
- return (cycle_t)hpet_readl(HPET_COUNTER);
-}
-
-static struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = HPET_MASK,
- .shift = HPET_SHIFT,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init init_hpet_clocksource(void)
-{
- u64 tmp;
-
- if (!hpet_virt_address)
- return -ENODEV;
-
- /*
- * hpet period is in femto seconds per cycle
- * so we need to convert this to ns/cyc units
- * aproximated by mult/2^shift
- *
- * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
- * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
- * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
- * (fsec/cyc << shift)/1000000 = mult
- * (hpet_period << shift)/FSEC_PER_NSEC = mult
- */
- tmp = (u64)hpet_period << HPET_SHIFT;
- do_div(tmp, FSEC_PER_NSEC);
- clocksource_hpet.mult = (u32)tmp;
-
- return clocksource_register(&clocksource_hpet);
-}
-
-module_init(init_hpet_clocksource);
#ifdef CONFIG_HPET_EMULATE_RTC
clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
return clocksource_register(&clocksource_pit);
}
-module_init(init_pit_clocksource);
+arch_initcall(init_pit_clocksource);
}
spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(apic, pin, entry);
- irq_desc[irq].affinity = TARGET_CPUS;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
}
spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(ioapic, pin, entry);
- irq_desc[irq].affinity = TARGET_CPUS;
spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
#include <asm/fixmap.h>
#include <asm/apic.h>
#include <asm/tlbflush.h>
+#include <asm/timer.h>
/* nop stub */
static void native_nop(void)
.memory_setup = machine_specific_memory_setup,
.get_wallclock = native_get_wallclock,
.set_wallclock = native_set_wallclock,
- .time_init = time_init_hook,
+ .time_init = hpet_time_init,
.init_IRQ = native_init_IRQ,
.cpuid = native_cpuid,
.write_msr = native_write_msr,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
+ .get_scheduled_cycles = native_read_tsc,
+ .get_cpu_khz = native_calculate_cpu_khz,
.load_tr_desc = native_load_tr_desc,
.set_ldt = native_set_ldt,
.load_gdt = native_load_gdt,
.set_iopl_mask = native_set_iopl_mask,
.io_delay = native_io_delay,
- .const_udelay = __const_udelay,
#ifdef CONFIG_X86_LOCAL_APIC
.apic_write = native_apic_write,
.flush_tlb_kernel = native_flush_tlb_global,
.flush_tlb_single = native_flush_tlb_single,
+ .map_pt_hook = (void *)native_nop,
+
.alloc_pt = (void *)native_nop,
.alloc_pd = (void *)native_nop,
.alloc_pd_clone = (void *)native_nop,
return DMA_MEMORY_IO;
free1_out:
- kfree(dev->dma_mem->bitmap);
+ kfree(dev->dma_mem);
out:
if (mem_base)
iounmap(mem_base);
conswitchp = &dummy_con;
#endif
#endif
- tsc_init();
}
* Dave Jones : Report invalid combinations of Athlon CPUs.
* Rusty Russell : Hacked into shape for new "hotplug" boot process. */
-
-/* SMP boot always wants to use real time delay to allow sufficient time for
- * the APs to come online */
-#define USE_REAL_TIME_DELAY
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/percpu.h>
+#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
int __cpuinit __cpu_up(unsigned int cpu)
{
+ unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU
- int ret=0;
+ int ret = 0;
/*
* We do warm boot only on cpus that had booted earlier
/* In case one didn't come up */
if (!cpu_isset(cpu, cpu_callin_map)) {
printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
- local_irq_enable();
return -EIO;
}
- local_irq_enable();
-
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Unleash the CPU! */
cpu_set(cpu, smp_commenced_mask);
/*
- * Check TSC synchronization with the AP:
+ * Check TSC synchronization with the AP (keep irqs disabled
+ * while doing so):
*/
+ local_irq_save(flags);
check_tsc_sync_source(cpu);
+ local_irq_restore(flags);
- while (!cpu_isset(cpu, cpu_online_map))
+ while (!cpu_isset(cpu, cpu_online_map)) {
cpu_relax();
+ touch_nmi_watchdog();
+ }
#ifdef CONFIG_X86_GENERICARCH
if (num_online_cpus() > 8 && genapic == &apic_default)
extern void (*late_time_init)(void);
/* Duplicate of time_init() below, with hpet_enable part added */
-static void __init hpet_time_init(void)
+void __init hpet_time_init(void)
{
if (!hpet_enable())
setup_pit_timer();
- do_time_init();
+ time_init_hook();
}
+/*
+ * This is called directly from init code; we must delay timer setup in the
+ * HPET case as we can't make the decision to turn on HPET this early in the
+ * boot process.
+ *
+ * The chosen time_init function will usually be hpet_time_init, above, but
+ * in the case of virtual hardware, an alternative function may be substituted.
+ */
void __init time_init(void)
{
- late_time_init = hpet_time_init;
+ tsc_init();
+ late_time_init = choose_time_init();
}
#include <asm/delay.h>
#include <asm/tsc.h>
#include <asm/io.h>
+#include <asm/timer.h>
#include "mach_timer.h"
* an extra value to store the TSC freq
*/
unsigned int tsc_khz;
-unsigned long long (*custom_sched_clock)(void);
int tsc_disable;
{
unsigned long long this_offset;
- if (unlikely(custom_sched_clock))
- return (*custom_sched_clock)();
-
/*
* Fall back to jiffies if there's no TSC available:
*/
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
/* read the Time Stamp Counter: */
- rdtscll(this_offset);
+ get_scheduled_cycles(this_offset);
/* return the value in ns */
return cycles_2_ns(this_offset);
}
-static unsigned long calculate_cpu_khz(void)
+unsigned long native_calculate_cpu_khz(void)
{
unsigned long long start, end;
unsigned long count;
EXPORT_SYMBOL(recalibrate_cpu_khz);
-void __init tsc_init(void)
-{
- if (!cpu_has_tsc || tsc_disable)
- goto out_no_tsc;
-
- cpu_khz = calculate_cpu_khz();
- tsc_khz = cpu_khz;
-
- if (!cpu_khz)
- goto out_no_tsc;
-
- printk("Detected %lu.%03lu MHz processor.\n",
- (unsigned long)cpu_khz / 1000,
- (unsigned long)cpu_khz % 1000);
-
- set_cyc2ns_scale(cpu_khz);
- use_tsc_delay();
- return;
-
-out_no_tsc:
- /*
- * Set the tsc_disable flag if there's no TSC support, this
- * makes it a fast flag for the kernel to see whether it
- * should be using the TSC.
- */
- tsc_disable = 1;
-}
-
#ifdef CONFIG_CPU_FREQ
/*
static inline void check_geode_tsc_reliable(void) { }
#endif
-static int __init init_tsc_clocksource(void)
+
+void __init tsc_init(void)
{
+ if (!cpu_has_tsc || tsc_disable)
+ goto out_no_tsc;
- if (cpu_has_tsc && tsc_khz && !tsc_disable) {
- /* check blacklist */
- dmi_check_system(bad_tsc_dmi_table);
+ cpu_khz = calculate_cpu_khz();
+ tsc_khz = cpu_khz;
- unsynchronized_tsc();
- check_geode_tsc_reliable();
- current_tsc_khz = tsc_khz;
- clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
- clocksource_tsc.shift);
- /* lower the rating if we already know its unstable: */
- if (check_tsc_unstable()) {
- clocksource_tsc.rating = 0;
- clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
- }
+ if (!cpu_khz)
+ goto out_no_tsc;
+
+ printk("Detected %lu.%03lu MHz processor.\n",
+ (unsigned long)cpu_khz / 1000,
+ (unsigned long)cpu_khz % 1000);
+
+ set_cyc2ns_scale(cpu_khz);
+ use_tsc_delay();
- return clocksource_register(&clocksource_tsc);
+ /* Check and install the TSC clocksource */
+ dmi_check_system(bad_tsc_dmi_table);
+
+ unsynchronized_tsc();
+ check_geode_tsc_reliable();
+ current_tsc_khz = tsc_khz;
+ clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
+ clocksource_tsc.shift);
+ /* lower the rating if we already know its unstable: */
+ if (check_tsc_unstable()) {
+ clocksource_tsc.rating = 0;
+ clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
}
+ clocksource_register(&clocksource_tsc);
- return 0;
-}
+ return;
-module_init(init_tsc_clocksource);
+out_no_tsc:
+ /*
+ * Set the tsc_disable flag if there's no TSC support, this
+ * makes it a fast flag for the kernel to see whether it
+ * should be using the TSC.
+ */
+ tsc_disable = 1;
+}
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/vmi_time.h>
+#include <asm/kmap_types.h>
/* Convenient for calling VMI functions indirectly in the ROM */
typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
static struct vrom_header *vmi_rom;
static int license_gplok;
-static int disable_nodelay;
static int disable_pge;
static int disable_pse;
static int disable_sep;
static int disable_tsc;
static int disable_mtrr;
+static int disable_noidle;
+static int disable_vmi_timer;
/* Cached VMI operations */
struct {
}
/* For NO_IDLE_HZ, we stop the clock when halting the kernel */
-#ifdef CONFIG_NO_IDLE_HZ
static fastcall void vmi_safe_halt(void)
{
int idle = vmi_stop_hz_timer();
local_irq_enable();
}
}
-#endif
#ifdef CONFIG_DEBUG_PAGE_TYPE
#define vmi_check_page_type(p,t) do { } while (0)
#endif
+static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn)
+{
+ /*
+ * Internally, the VMI ROM must map virtual addresses to physical
+ * addresses for processing MMU updates. By the time MMU updates
+ * are issued, this information is typically already lost.
+ * Fortunately, the VMI provides a cache of mapping slots for active
+ * page tables.
+ *
+ * We use slot zero for the linear mapping of physical memory, and
+ * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
+ *
+ * args: SLOT VA COUNT PFN
+ */
+ BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+ vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
static void vmi_allocate_pt(u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
#endif
#ifdef CONFIG_SMP
-struct vmi_ap_state ap;
extern void setup_pda(void);
-static void __init /* XXX cpu hotplug */
+static void __devinit
vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
unsigned long start_esp)
{
+ struct vmi_ap_state ap;
+
/* Default everything to zero. This is fine for most GPRs. */
memset(&ap, 0, sizeof(struct vmi_ap_state));
/* Protected mode, paging, AM, WP, NE, MP. */
ap.cr0 = 0x80050023;
ap.cr4 = mmu_cr4_features;
- vmi_ops.set_initial_ap_state(__pa(&ap), phys_apicid);
+ vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
}
#endif
void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
- if (vmi_rom)
+ if (vmi_ops.set_linear_mapping)
vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
}
/*
- * Return a pointer to the VMI function or a NOP stub
+ * Return a pointer to a VMI function or NULL if unimplemented
*/
static void *vmi_get_function(int vmicall)
{
if (rel->type == VMI_RELOCATION_CALL_REL)
return (void *)rel->eip;
else
- return (void *)vmi_nop;
+ return NULL;
}
/*
* Helper macro for making the VMI paravirt-ops fill code readable.
- * For unimplemented operations, fall back to default.
+ * For unimplemented operations, fall back to default, unless nop
+ * is returned by the ROM.
*/
#define para_fill(opname, vmicall) \
do { \
if (rel->type != VMI_RELOCATION_NONE) { \
BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \
paravirt_ops.opname = (void *)rel->eip; \
+ } else if (rel->type == VMI_RELOCATION_NOP) \
+ paravirt_ops.opname = (void *)vmi_nop; \
+} while (0)
+
+/*
+ * Helper macro for making the VMI paravirt-ops fill code readable.
+ * For cached operations which do not match the VMI ROM ABI and must
+ * go through a tranlation stub. Ignore NOPs, since it is not clear
+ * a NOP * VMI function corresponds to a NOP paravirt-op when the
+ * functions are not in 1-1 correspondence.
+ */
+#define para_wrap(opname, wrapper, cache, vmicall) \
+do { \
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, \
+ VMI_CALL_##vmicall); \
+ BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
+ if (rel->type == VMI_RELOCATION_CALL_REL) { \
+ paravirt_ops.opname = wrapper; \
+ vmi_ops.cache = (void *)rel->eip; \
} \
} while (0)
+
/*
* Activate the VMI interface and switch into paravirtualized mode
*/
* rdpmc is not yet used in Linux
*/
- /* CPUID is special, so very special */
- reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_CPUID);
- if (rel->type != VMI_RELOCATION_NONE) {
- BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
- vmi_ops.cpuid = (void *)rel->eip;
- paravirt_ops.cpuid = vmi_cpuid;
- }
+ /* CPUID is special, so very special it gets wrapped like a present */
+ para_wrap(cpuid, vmi_cpuid, cpuid, CPUID);
para_fill(clts, CLTS);
para_fill(get_debugreg, GetDR);
para_fill(restore_fl, SetInterruptMask);
para_fill(irq_disable, DisableInterrupts);
para_fill(irq_enable, EnableInterrupts);
+
/* irq_save_disable !!! sheer pain */
patch_offset(&irq_save_disable_callout[IRQ_PATCH_INT_MASK],
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
-#ifndef CONFIG_NO_IDLE_HZ
- para_fill(safe_halt, Halt);
-#else
- vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
- paravirt_ops.safe_halt = vmi_safe_halt;
-#endif
+
para_fill(wbinvd, WBINVD);
+ para_fill(read_tsc, RDTSC);
+
+ /* The following we emulate with trap and emulate for now */
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
- para_fill(read_tsc, RDTSC);
/* paravirt_ops.rdpmc = vmi_rdpmc */
- /* TR interface doesn't pass TR value */
- reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetTR);
- if (rel->type != VMI_RELOCATION_NONE) {
- BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
- vmi_ops.set_tr = (void *)rel->eip;
- paravirt_ops.load_tr_desc = vmi_set_tr;
- }
+ /* TR interface doesn't pass TR value, wrap */
+ para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR);
/* LDT is special, too */
- reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetLDT);
- if (rel->type != VMI_RELOCATION_NONE) {
- BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
- vmi_ops._set_ldt = (void *)rel->eip;
- paravirt_ops.set_ldt = vmi_set_ldt;
- }
+ para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
para_fill(load_gdt, SetGDT);
para_fill(load_idt, SetIDT);
para_fill(write_ldt_entry, WriteLDTEntry);
para_fill(write_gdt_entry, WriteGDTEntry);
para_fill(write_idt_entry, WriteIDTEntry);
- reloc = call_vrom_long_func(vmi_rom, get_reloc,
- VMI_CALL_UpdateKernelStack);
- if (rel->type != VMI_RELOCATION_NONE) {
- BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
- vmi_ops.set_kernel_stack = (void *)rel->eip;
- paravirt_ops.load_esp0 = vmi_load_esp0;
- }
-
+ para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
para_fill(set_iopl_mask, SetIOPLMask);
- paravirt_ops.io_delay = (void *)vmi_nop;
- if (!disable_nodelay) {
- paravirt_ops.const_udelay = (void *)vmi_nop;
- }
-
+ para_fill(io_delay, IODelay);
para_fill(set_lazy_mode, SetLazyMode);
- reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_FlushTLB);
- if (rel->type != VMI_RELOCATION_NONE) {
- vmi_ops.flush_tlb = (void *)rel->eip;
- paravirt_ops.flush_tlb_user = vmi_flush_tlb_user;
- paravirt_ops.flush_tlb_kernel = vmi_flush_tlb_kernel;
- }
+ /* user and kernel flush are just handled with different flags to FlushTLB */
+ para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB);
+ para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, flush_tlb, FlushTLB);
para_fill(flush_tlb_single, InvalPage);
/*
vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
#endif
- vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
- vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
- vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
- paravirt_ops.alloc_pt = vmi_allocate_pt;
- paravirt_ops.alloc_pd = vmi_allocate_pd;
- paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
- paravirt_ops.release_pt = vmi_release_pt;
- paravirt_ops.release_pd = vmi_release_pd;
- paravirt_ops.set_pte = vmi_set_pte;
- paravirt_ops.set_pte_at = vmi_set_pte_at;
- paravirt_ops.set_pmd = vmi_set_pmd;
- paravirt_ops.pte_update = vmi_update_pte;
- paravirt_ops.pte_update_defer = vmi_update_pte_defer;
+ if (vmi_ops.set_pte) {
+ paravirt_ops.set_pte = vmi_set_pte;
+ paravirt_ops.set_pte_at = vmi_set_pte_at;
+ paravirt_ops.set_pmd = vmi_set_pmd;
#ifdef CONFIG_X86_PAE
- paravirt_ops.set_pte_atomic = vmi_set_pte_atomic;
- paravirt_ops.set_pte_present = vmi_set_pte_present;
- paravirt_ops.set_pud = vmi_set_pud;
- paravirt_ops.pte_clear = vmi_pte_clear;
- paravirt_ops.pmd_clear = vmi_pmd_clear;
+ paravirt_ops.set_pte_atomic = vmi_set_pte_atomic;
+ paravirt_ops.set_pte_present = vmi_set_pte_present;
+ paravirt_ops.set_pud = vmi_set_pud;
+ paravirt_ops.pte_clear = vmi_pte_clear;
+ paravirt_ops.pmd_clear = vmi_pmd_clear;
#endif
+ }
+
+ if (vmi_ops.update_pte) {
+ paravirt_ops.pte_update = vmi_update_pte;
+ paravirt_ops.pte_update_defer = vmi_update_pte_defer;
+ }
+
+ vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
+ if (vmi_ops.allocate_page) {
+ paravirt_ops.alloc_pt = vmi_allocate_pt;
+ paravirt_ops.alloc_pd = vmi_allocate_pd;
+ paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
+ }
+
+ vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
+ if (vmi_ops.release_page) {
+ paravirt_ops.release_pt = vmi_release_pt;
+ paravirt_ops.release_pd = vmi_release_pd;
+ }
+ para_wrap(map_pt_hook, vmi_map_pt_hook, set_linear_mapping,
+ SetLinearMapping);
+
/*
* These MUST always be patched. Don't support indirect jumps
* through these operations, as the VMI interface may use either
paravirt_ops.iret = (void *)0xbadbab0;
#ifdef CONFIG_SMP
- paravirt_ops.startup_ipi_hook = vmi_startup_ipi_hook;
- vmi_ops.set_initial_ap_state = vmi_get_function(VMI_CALL_SetInitialAPState);
+ para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
- paravirt_ops.apic_read = vmi_get_function(VMI_CALL_APICRead);
- paravirt_ops.apic_write = vmi_get_function(VMI_CALL_APICWrite);
- paravirt_ops.apic_write_atomic = vmi_get_function(VMI_CALL_APICWrite);
+ para_fill(apic_read, APICRead);
+ para_fill(apic_write, APICWrite);
+ para_fill(apic_write_atomic, APICWrite);
#endif
/*
* Check for VMI timer functionality by probing for a cycle frequency method
*/
reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
- if (rel->type != VMI_RELOCATION_NONE) {
+ if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
vmi_timer_ops.get_cycle_counter =
vmi_get_function(VMI_CALL_GetCycleCounter);
paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm;
paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm;
#endif
- custom_sched_clock = vmi_sched_clock;
+ paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles;
+ paravirt_ops.get_cpu_khz = vmi_cpu_khz;
+
+ /* We have true wallclock functions; disable CMOS clock sync */
+ no_sync_cmos_clock = 1;
+ } else {
+ disable_noidle = 1;
+ disable_vmi_timer = 1;
}
+ /* No idle HZ mode only works if VMI timer and no idle is enabled */
+ if (disable_noidle || disable_vmi_timer)
+ para_fill(safe_halt, Halt);
+ else
+ para_wrap(safe_halt, vmi_safe_halt, halt, Halt);
+
/*
* Alternative instruction rewriting doesn't happen soon enough
* to convert VMI_IRET to a call instead of a jump; so we have
local_irq_save(flags);
activate_vmi();
-#ifdef CONFIG_SMP
+
+#ifdef CONFIG_X86_IO_APIC
+ /* This is virtual hardware; timer routing is wired correctly */
no_timer_check = 1;
#endif
local_irq_restore(flags & X86_EFLAGS_IF);
if (!arg)
return -EINVAL;
- if (!strcmp(arg, "disable_nodelay"))
- disable_nodelay = 1;
- else if (!strcmp(arg, "disable_pge")) {
+ if (!strcmp(arg, "disable_pge")) {
clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
disable_pge = 1;
} else if (!strcmp(arg, "disable_pse")) {
} else if (!strcmp(arg, "disable_mtrr")) {
clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
disable_mtrr = 1;
- }
+ } else if (!strcmp(arg, "disable_timer")) {
+ disable_vmi_timer = 1;
+ disable_noidle = 1;
+ } else if (!strcmp(arg, "disable_noidle"))
+ disable_noidle = 1;
return 0;
}
static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id);
static struct irqaction vmi_timer_irq = {
- vmi_timer_interrupt,
- SA_INTERRUPT,
- CPU_MASK_NONE,
- "VMI-alarm",
- NULL,
- NULL
+ .handler = vmi_timer_interrupt,
+ .flags = IRQF_DISABLED,
+ .mask = CPU_MASK_NONE,
+ .name = "VMI-alarm",
};
/* Alarm rate */
ts->tv_sec = wallclock;
}
-static void update_xtime_from_wallclock(void)
-{
- struct timespec ts;
- vmi_get_wallclock_ts(&ts);
- do_settimeofday(&ts);
-}
-
unsigned long vmi_get_wallclock(void)
{
struct timespec ts;
return -1;
}
-unsigned long long vmi_sched_clock(void)
+unsigned long long vmi_get_sched_cycles(void)
{
return read_available_cycles();
}
+unsigned long vmi_cpu_khz(void)
+{
+ unsigned long long khz;
+
+ khz = vmi_timer_ops.get_cycle_frequency();
+ (void)do_div(khz, 1000);
+ return khz;
+}
+
void __init vmi_time_init(void)
{
unsigned long long cycles_per_sec, cycles_per_msec;
set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt);
#endif
- no_sync_cmos_clock = 1;
-
- vmi_get_wallclock_ts(&xtime);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
real_cycles_accounted_system = read_real_cycles();
- update_xtime_from_wallclock();
per_cpu(process_times_cycles_accounted_cpu, 0) = read_available_cycles();
cycles_per_sec = vmi_timer_ops.get_cycle_frequency();
-
cycles_per_jiffy = cycles_per_sec;
(void)do_div(cycles_per_jiffy, HZ);
cycles_per_alarm = cycles_per_sec;
(void)do_div(cycles_per_alarm, alarm_hz);
cycles_per_msec = cycles_per_sec;
(void)do_div(cycles_per_msec, 1000);
- cpu_khz = cycles_per_msec;
printk(KERN_WARNING "VMI timer cycles/sec = %llu ; cycles/jiffy = %llu ;"
"cycles/alarm = %llu\n", cycles_per_sec, cycles_per_jiffy,
/* Initialize the time accounting variables for an AP on an SMP system.
* Also, set the local alarm for the AP. */
-void __init vmi_timer_setup_secondary_alarm(void)
+void __devinit vmi_timer_setup_secondary_alarm(void)
{
int cpu = smp_processor_id();
cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system;
while (cycles_not_accounted >= cycles_per_jiffy) {
- /* systems wide jiffies and wallclock. */
+ /* systems wide jiffies. */
do_timer(1);
cycles_not_accounted -= cycles_per_jiffy;
real_cycles_accounted_system += cycles_per_jiffy;
}
- if (vmi_timer_ops.wallclock_updated())
- update_xtime_from_wallclock();
-
write_sequnlock(&xtime_lock);
}
unsigned long seq, next;
unsigned long long real_cycles_expiry;
int cpu = smp_processor_id();
- int idle;
BUG_ON(!irqs_disabled());
if (sysctl_hz_timer != 0)
cpu_set(cpu, nohz_cpu_mask);
smp_mb();
+
if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
- (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+ (next = next_timer_interrupt(),
+ time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) {
cpu_clear(cpu, nohz_cpu_mask);
- next = jiffies;
- idle = 0;
- } else
- idle = 1;
+ return 0;
+ }
/* Convert jiffies to the real cycle counter. */
do {
} while (read_seqretry(&xtime_lock, seq));
/* This cpu is going idle. Disable the periodic alarm. */
- if (idle) {
- vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
- per_cpu(idle_start_jiffies, cpu) = jiffies;
- }
-
+ vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
+ per_cpu(idle_start_jiffies, cpu) = jiffies;
/* Set the real time alarm to expire at the next event. */
vmi_timer_ops.set_alarm(
- VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL,
- real_cycles_expiry, 0);
-
- return idle;
+ VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL,
+ real_cycles_expiry, 0);
+ return 1;
}
static void vmi_reenable_hz_timer(int cpu)
struct getdents32_callback buf;
int error;
+ error = -EFAULT;
+ if (!access_ok(VERIFY_WRITE, dirent, count))
+ goto out;
+
error = -EBADF;
file = fget(fd);
if (!file)
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
- error = -EINVAL;
if (put_user(file->f_pos, &lastdirent->d_off))
- goto out_putf;
- error = count - buf.count;
+ error = -EFAULT;
+ else
+ error = count - buf.count;
}
out_putf:
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
BLANK();
* Skip non-WB memory and ignore empty memory ranges.
*/
#include <linux/module.h>
+#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
if (!is_memory_available(md))
continue;
+#ifdef CONFIG_CRASH_DUMP
+ /* saved_max_pfn should ignore max_addr= command line arg */
+ if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
+ saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
+#endif
/*
* Round ends inward to granule boundaries
* Give trimmings to uncached allocator
return ~0UL;
}
#endif
+
+#ifdef CONFIG_PROC_VMCORE
+/* locate the size find a the descriptor at a certain address */
+unsigned long
+vmcore_find_descriptor_size (unsigned long address)
+{
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+ unsigned long ret = 0;
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+ if (efi_wb(md) && md->type == EFI_LOADER_DATA
+ && md->phys_addr == address) {
+ ret = efi_md_size(md);
+ break;
+ }
+ }
+
+ if (ret == 0)
+ printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
+
+ return ret;
+}
+#endif
* probably broke it along the way... ;-)
* 13-Jul-04 clameter Implement fsys_clock_gettime and revise fsys_gettimeofday to make
* it capable of using memory based clocks without falling back to C code.
+ * 08-Feb-07 Fenghua Yu Implement fsys_getcpu.
+ *
*/
#include <asm/asmmacro.h>
#endif
END(fsys_rt_sigprocmask)
+/*
+ * fsys_getcpu doesn't use the third parameter in this implementation. It reads
+ * current_thread_info()->cpu and corresponding node in cpu_to_node_map.
+ */
+ENTRY(fsys_getcpu)
+ .prologue
+ .altrp b6
+ .body
+ ;;
+ add r2=TI_FLAGS+IA64_TASK_SIZE,r16
+ tnat.nz p6,p0 = r32 // guard against NaT argument
+ add r3=TI_CPU+IA64_TASK_SIZE,r16
+ ;;
+ ld4 r3=[r3] // M r3 = thread_info->cpu
+ ld4 r2=[r2] // M r2 = thread_info->flags
+(p6) br.cond.spnt.few .fail_einval // B
+ ;;
+ tnat.nz p7,p0 = r33 // I guard against NaT argument
+(p7) br.cond.spnt.few .fail_einval // B
+#ifdef CONFIG_NUMA
+ movl r17=cpu_to_node_map
+ ;;
+EX(.fail_efault, probe.w.fault r32, 3) // M This takes 5 cycles
+EX(.fail_efault, probe.w.fault r33, 3) // M This takes 5 cycles
+ shladd r18=r3,1,r17
+ ;;
+ ld2 r20=[r18] // r20 = cpu_to_node_map[cpu]
+ and r2 = TIF_ALLWORK_MASK,r2
+ ;;
+ cmp.ne p8,p0=0,r2
+(p8) br.spnt.many fsys_fallback_syscall
+ ;;
+ ;;
+EX(.fail_efault, st4 [r32] = r3)
+EX(.fail_efault, st2 [r33] = r20)
+ mov r8=0
+ ;;
+#else
+EX(.fail_efault, probe.w.fault r32, 3) // M This takes 5 cycles
+EX(.fail_efault, probe.w.fault r33, 3) // M This takes 5 cycles
+ and r2 = TIF_ALLWORK_MASK,r2
+ ;;
+ cmp.ne p8,p0=0,r2
+(p8) br.spnt.many fsys_fallback_syscall
+ ;;
+EX(.fail_efault, st4 [r32] = r3)
+EX(.fail_efault, st2 [r33] = r0)
+ mov r8=0
+ ;;
+#endif
+ FSYS_RETURN
+END(fsys_getcpu)
+
ENTRY(fsys_fallback_syscall)
.prologue
.altrp b6
data8 0 // timer_delete
data8 0 // clock_settime
data8 fsys_clock_gettime // clock_gettime
+ data8 0 // clock_getres // 1255
+ data8 0 // clock_nanosleep
+ data8 0 // fstatfs64
+ data8 0 // statfs64
+ data8 0 // mbind
+ data8 0 // get_mempolicy // 1260
+ data8 0 // set_mempolicy
+ data8 0 // mq_open
+ data8 0 // mq_unlink
+ data8 0 // mq_timedsend
+ data8 0 // mq_timedreceive // 1265
+ data8 0 // mq_notify
+ data8 0 // mq_getsetattr
+ data8 0 // kexec_load
+ data8 0 // vserver
+ data8 0 // waitid // 1270
+ data8 0 // add_key
+ data8 0 // request_key
+ data8 0 // keyctl
+ data8 0 // ioprio_set
+ data8 0 // ioprio_get // 1275
+ data8 0 // move_pages
+ data8 0 // inotify_init
+ data8 0 // inotify_add_watch
+ data8 0 // inotify_rm_watch
+ data8 0 // migrate_pages // 1280
+ data8 0 // openat
+ data8 0 // mkdirat
+ data8 0 // mknodat
+ data8 0 // fchownat
+ data8 0 // futimesat // 1285
+ data8 0 // newfstatat
+ data8 0 // unlinkat
+ data8 0 // renameat
+ data8 0 // linkat
+ data8 0 // symlinkat // 1290
+ data8 0 // readlinkat
+ data8 0 // fchmodat
+ data8 0 // faccessat
+ data8 0
+ data8 0 // 1295
+ data8 0 // unshare
+ data8 0 // splice
+ data8 0 // set_robust_list
+ data8 0 // get_robust_list
+ data8 0 // sync_file_range // 1300
+ data8 0 // tee
+ data8 0 // vmsplice
+ data8 0
+ data8 fsys_getcpu // getcpu // 1304
// fill in zeros for the remaining entries
.zero:
#define iosapic_disable_level_irq mask_irq
#define iosapic_ack_level_irq nop
-struct hw_interrupt_type irq_type_iosapic_level = {
+struct irq_chip irq_type_iosapic_level = {
.name = "IO-SAPIC-level",
.startup = iosapic_startup_level_irq,
.shutdown = iosapic_shutdown_level_irq,
.disable = iosapic_disable_level_irq,
.ack = iosapic_ack_level_irq,
.end = iosapic_end_level_irq,
+ .mask = mask_irq,
+ .unmask = unmask_irq,
.set_affinity = iosapic_set_affinity
};
#define iosapic_disable_edge_irq nop
#define iosapic_end_edge_irq nop
-struct hw_interrupt_type irq_type_iosapic_edge = {
+struct irq_chip irq_type_iosapic_edge = {
.name = "IO-SAPIC-edge",
.startup = iosapic_startup_edge_irq,
.shutdown = iosapic_disable_edge_irq,
.disable = iosapic_disable_edge_irq,
.ack = iosapic_ack_edge_irq,
.end = iosapic_end_edge_irq,
+ .mask = mask_irq,
+ .unmask = unmask_irq,
.set_affinity = iosapic_set_affinity
};
* allocate a sampling buffer and remaps it into the user address space of the task
*/
static int
-pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
+pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma = NULL;
* partially initialize the vma for the sampling buffer
*/
vma->vm_mm = mm;
+ vma->vm_file = filp;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
goto error;
}
+ get_file(filp);
+
/*
* now insert the vma in the vm list for the process, must be
* done with mmap lock held
}
static int
-pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
+pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
unsigned int cpu, pfarg_context_t *arg)
{
pfm_buffer_fmt_t *fmt = NULL;
/*
* buffer is always remapped into the caller's address space
*/
- ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
+ ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
if (ret) goto error;
/* keep track of user address of buffer */
* does the user want to sample?
*/
if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
- ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
+ ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
if (ret) goto buffer_error;
}
.name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
-extern void efi_initialize_iomem_resources(struct resource *,
- struct resource *);
extern char _text[], _end[], _etext[];
unsigned long ia64_max_cacheline_size;
}
#endif
+#ifdef CONFIG_PROC_VMCORE
+ if (reserve_elfcorehdr(&rsvd_region[n].start,
+ &rsvd_region[n].end) == 0)
+ n++;
+#endif
+
efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
n++;
return 0;
}
early_param("elfcorehdr", parse_elfcorehdr);
+
+int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
+{
+ unsigned long length;
+
+ /* We get the address using the kernel command line,
+ * but the size is extracted from the EFI tables.
+ * Both address and size are required for reservation
+ * to work properly.
+ */
+
+ if (elfcorehdr_addr >= ELFCORE_ADDR_MAX)
+ return -EINVAL;
+
+ if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
+ elfcorehdr_addr = ELFCORE_ADDR_MAX;
+ return -EINVAL;
+ }
+
+ *start = (unsigned long)__va(elfcorehdr_addr);
+ *end = *start + length;
+ return 0;
+}
+
#endif /* CONFIG_PROC_VMCORE */
void __init
checksum.o clear_page.o csum_partial_copy.o \
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
- memset.o strlen.o
+ memset.o strlen.o xor.o
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
-lib-$(CONFIG_MD_RAID456) += xor.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
find_initrd();
-#ifdef CONFIG_CRASH_DUMP
- /* If we are doing a crash dump, we still need to know the real mem
- * size before original memory map is reset. */
- saved_max_pfn = max_pfn;
-#endif
}
#ifdef CONFIG_SMP
max_pfn = max_low_pfn;
find_initrd();
-
-#ifdef CONFIG_CRASH_DUMP
- /* If we are doing a crash dump, we still need to know the real mem
- * size before original memory map is reset. */
- saved_max_pfn = max_pfn;
-#endif
}
#ifdef CONFIG_SMP
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
}
-struct hw_interrupt_type irq_type_sn = {
+static void
+sn_mask_irq(unsigned int irq)
+{
+}
+
+static void
+sn_unmask_irq(unsigned int irq)
+{
+}
+
+struct irq_chip irq_type_sn = {
.name = "SN hub",
.startup = sn_startup_irq,
.shutdown = sn_shutdown_irq,
.disable = sn_disable_irq,
.ack = sn_ack_irq,
.end = sn_end_irq,
+ .mask = sn_mask_irq,
+ .unmask = sn_unmask_irq,
.set_affinity = sn_set_affinity_irq
};
{
}
-void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *));
+void (*mach_sched_init) (irq_handler_t handler);
void (*mach_tick)( void );
/* machine dependent keyboard functions */
int (*mach_keyb_init) (void);
/* machine dependent timer functions */
unsigned long (*mach_gettimeoffset) (void);
void (*mach_gettod) (int*, int*, int*, int*, int*, int*);
-int (*mach_hwclk) (int, struct hwclk_time*);
+int (*mach_hwclk) (int, struct rtc_time*);
int (*mach_set_clock_mmss) (unsigned long);
void (*mach_mksound)( unsigned int count, unsigned int ticks );
void (*mach_reset)( void );
/* The number of spurious interrupts */
volatile unsigned int num_spurious;
-unsigned int local_bh_count[NR_CPUS];
unsigned int local_irq_count[NR_CPUS];
static irqreturn_t default_irq_handler(int irq, void *ptr)
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/errno.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/irq.h>
asmlinkage void trap45(void);
asmlinkage void trap46(void);
asmlinkage void trap47(void);
-asmlinkage irqreturn_t bad_interrupt(int, void *, struct pt_regs *);
+asmlinkage irqreturn_t bad_interrupt(int, void *);
asmlinkage irqreturn_t inthandler(void);
asmlinkage irqreturn_t inthandler1(void);
asmlinkage irqreturn_t inthandler2(void);
int request_irq(
unsigned int irq,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ irq_handler_t handler,
unsigned long flags,
const char *devname,
void *dev_id)
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/system.h>
#include <asm/pgtable.h>
/***************************************************************************/
-void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *))
+void m68328_timer_init(irq_handler_t timer_routine)
{
/* disable timer 1 */
TCTL = 0;
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/console.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/system.h>
extern void config_M68360_irq(void);
-void BSP_sched_init(void (*timer_routine)(int, void *, struct pt_regs *))
+void BSP_sched_init(irq_handler_t timer_routine)
{
unsigned char prescaler;
unsigned short tgcr_save;
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/console.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/system.h>
/***************************************************************************/
-void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *));
+void m68328_timer_init(irq_handler_t timer_routine);
void m68328_timer_tick(void);
unsigned long m68328_timer_gettimeoffset(void);
void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec);
#include <linux/console.h>
#include <linux/kd.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/system.h>
/***************************************************************************/
-void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *));
+void m68328_timer_init(irq_handler_t timer_routine);
void m68328_timer_tick(void);
unsigned long m68328_timer_gettimeoffset(void);
void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec);
select SYS_SUPPORTS_LITTLE_ENDIAN
config BASLER_EXCITE
- bool "Basler eXcite smart camera support"
+ bool "Basler eXcite smart camera"
select DMA_COHERENT
select HW_HAS_PCI
select IRQ_CPU
select SYS_SUPPORTS_KGDB
help
The eXcite is a smart camera platform manufactured by
- Basler Vision Technologies AG
+ Basler Vision Technologies AG.
config BASLER_EXCITE_PROTOTYPE
bool "Support for pre-release units"
select IRQ_CPU
select MIPS_GT64111
select SYS_HAS_CPU_NEVADA
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
select SYS_SUPPORTS_LITTLE_ENDIAN
bool "DECstations"
select BOOT_ELF32
select DMA_NONCOHERENT
- select EARLY_PRINTK
+ select SYS_HAS_EARLY_PRINTK
select IRQ_CPU
select SYS_HAS_CPU_R3000
select SYS_HAS_CPU_R4X00
config LASAT
bool "LASAT Networks platforms"
select DMA_NONCOHERENT
+ select SYS_HAS_EARLY_PRINTK
select HW_HAS_PCI
select MIPS_GT64120
select MIPS_NILE4
bool "MIPS Atlas board"
select BOOT_ELF32
select DMA_NONCOHERENT
+ select SYS_HAS_EARLY_PRINTK
select IRQ_CPU
select HW_HAS_PCI
select MIPS_BOARDS_GEN
select SYS_HAS_CPU_MIPS64_R1
select SYS_HAS_CPU_NEVADA
select SYS_HAS_CPU_RM7000
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
depends on EXPERIMENTAL
select IRQ_CPU
select DMA_NONCOHERENT
+ select SYS_HAS_EARLY_PRINTK
select MIPS_BOARDS_GEN
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
config MIPS_SIM
bool 'MIPS simulator (MIPSsim)'
select DMA_NONCOHERENT
+ select SYS_HAS_EARLY_PRINTK
select IRQ_CPU
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select RM7000_CPU_SCACHE
select SWAP_IO_SPACE
select SYS_HAS_CPU_RM9000
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select IRQ_CPU_RM9K
select SWAP_IO_SPACE
select SYS_HAS_CPU_RM9000
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SWAP_IO_SPACE
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select ARC64
select BOOT_ELF64
select DMA_IP27
- select EARLY_PRINTK
+ select SYS_HAS_EARLY_PRINTK
select HW_HAS_PCI
select NR_CPUS_DEFAULT_64
select PCI_DOMAINS
select SYS_HAS_CPU_R5000
select SYS_HAS_CPU_R10000
select R5000_CPU_SCACHE
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
select SYS_SUPPORTS_BIG_ENDIAN
source "arch/mips/tx4938/Kconfig"
source "arch/mips/vr41xx/Kconfig"
source "arch/mips/philips/pnx8550/common/Kconfig"
-source "arch/mips/cobalt/Kconfig"
endmenu
bool
config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED && DEBUG_KERNEL
+ depends on SYS_HAS_EARLY_PRINTK
+ default y
+ help
+ This option enables special console drivers which allow the kernel
+ to print messages very early in the bootup process.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks on some machines ugly and
+ oesn't cooperate with an X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config SYS_HAS_EARLY_PRINTK
bool
config GENERIC_ISA_DMA
select DMA_NONCOHERENT
select HW_HAS_PCI
select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select GENERIC_HARDIRQS_NO__DO_IRQ
select SYS_SUPPORTS_KGDB
config ARC_CONSOLE
bool "ARC console support"
- depends on SGI_IP22 || SNI_RM
+ depends on SGI_IP22 || (SNI_RM && CPU_LITTLE_ENDIAN)
config ARC_MEMORY
bool
source "kernel/Kconfig.preempt"
-config RTC_DS1742
- bool "DS1742 BRAM/RTC support"
- depends on TOSHIBA_JMR3927 || TOSHIBA_RBTX4927
-
config MIPS_INSANE_LARGE
bool "Support for large 64-bit configurations"
depends on CPU_R10000 && 64BIT
This option will slow down process creation somewhat.
+config CONFIG_SMTC_IDLE_HOOK_DEBUG
+ bool "Enable additional debug checks before going into CPU idle loop"
+ depends on DEBUG_KERNEL && MIPS_MT_SMTC
+ help
+ This option enables Enable additional debug checks before going into
+ CPU idle loop. For details on these checks, see
+ arch/mips/kernel/smtc.c. This debugging option result in significant
+ overhead so should be disabled in production kernels.
+
config KGDB
bool "Remote GDB kernel debugging"
depends on DEBUG_KERNEL && SYS_SUPPORTS_KGDB
ArcWrite(1, &it, 1, &cnt);
bc_enable();
}
-
-char prom_getchar(void)
-{
- ULONG cnt;
- CHAR c;
-
- bc_disable();
- ArcRead(0, &c, 1, &cnt);
- bc_enable();
-
- return c;
-}
-
-void prom_printf(char *fmt, ...)
-{
- va_list args;
- char ppbuf[1024];
- char *bptr;
-
- va_start(args, fmt);
- vsprintf(ppbuf, fmt, args);
-
- bptr = ppbuf;
-
- while (*bptr != 0) {
- if (*bptr == '\n')
- prom_putchar('\r');
-
- prom_putchar(*bptr++);
- }
- va_end(args);
-}
void __init prom_init(void)
{
PSYSTEM_PARAMETER_BLOCK pb = PROMBLOCK;
+
romvec = ROMVECTOR;
+
prom_argc = fw_arg0;
_prom_argv = (LONG *) fw_arg1;
_prom_envp = (LONG *) fw_arg2;
if (pb->magic != 0x53435241) {
- prom_printf("Aieee, bad prom vector magic %08lx\n", pb->magic);
+ printk(KERN_CRIT "Aieee, bad prom vector magic %08lx\n",
+ (unsigned long) pb->magic);
while(1)
;
}
prom_meminit();
#ifdef DEBUG_PROM_INIT
- prom_printf("Press a key to reboot\n");
- prom_getchar();
+ pr_info("Press a key to reboot\n");
+ ArcRead(0, &c, 1, &cnt);
ArcEnterInteractiveMode();
#endif
}
#ifdef DEBUG
int i = 0;
- prom_printf("ARCS MEMORY DESCRIPTOR dump:\n");
+ printk("ARCS MEMORY DESCRIPTOR dump:\n");
p = ArcGetMemoryDescriptor(PROM_NULL_MDESC);
while(p) {
- prom_printf("[%d,%p]: base<%08lx> pages<%08lx> type<%s>\n",
- i, p, p->base, p->pages, mtypes(p->type));
+ printk("[%d,%p]: base<%08lx> pages<%08lx> type<%s>\n",
+ i, p, p->base, p->pages, mtypes(p->type));
p = ArcGetMemoryDescriptor(p);
i++;
}
static void __init
dump_component(pcomponent *p)
{
- prom_printf("[%p]:class<%s>type<%s>flags<%s>ver<%d>rev<%d>",
- p, classes[p->class], types[p->type],
- iflags[p->iflags], p->vers, p->rev);
- prom_printf("key<%08lx>\n\tamask<%08lx>cdsize<%d>ilen<%d>iname<%s>\n",
- p->key, p->amask, (int)p->cdsize, (int)p->ilen, p->iname);
+ printk("[%p]:class<%s>type<%s>flags<%s>ver<%d>rev<%d>",
+ p, classes[p->class], types[p->type],
+ iflags[p->iflags], p->vers, p->rev);
+ printk("key<%08lx>\n\tamask<%08lx>cdsize<%d>ilen<%d>iname<%s>\n",
+ p->key, p->amask, (int)p->cdsize, (int)p->ilen, p->iname);
}
static void __init
#include <asm/pgtable.h>
#include <asm/mach-au1x00/au1000.h>
+extern int (*board_pci_idsel)(unsigned int devsel, int assert);
+int mtx1_pci_idsel(unsigned int devsel, int assert);
+
void board_reset (void)
{
/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
#endif
// initialize sys_pinfunc:
- // disable second ethernet port (SYS_PF_NI2)
- // set U3/GPIO23 to GPIO23 (SYS_PF_U3)
- au_writel( SYS_PF_NI2 | SYS_PF_U3, SYS_PINFUNC );
+ au_writel( SYS_PF_NI2, SYS_PINFUNC );
// initialize GPIO
au_writel( 0xFFFFFFFF, SYS_TRIOUTCLR );
au_writel( 0x00000001, SYS_OUTPUTCLR ); // set M66EN (PCI 66MHz) to OFF
au_writel( 0x00000008, SYS_OUTPUTSET ); // set PCI CLKRUN# to OFF
+ au_writel( 0x00000002, SYS_OUTPUTSET ); // set EXT_IO3 ON
au_writel( 0x00000020, SYS_OUTPUTCLR ); // set eth PHY TX_ER to OFF
// enable LED and set it to green
au_writel( au_readl(GPIO2_DIR) | 0x1800, GPIO2_DIR );
au_writel( 0x18000800, GPIO2_OUTPUT );
+ board_pci_idsel = mtx1_pci_idsel;
+
printk("4G Systems MTX-1 Board\n");
}
+
+int
+mtx1_pci_idsel(unsigned int devsel, int assert)
+{
+#define MTX_IDSEL_ONLY_0_AND_3 0
+#if MTX_IDSEL_ONLY_0_AND_3
+ if (devsel != 0 && devsel != 3) {
+ printk("*** not 0 or 3\n");
+ return 0;
+ }
+#endif
+
+ if (assert && devsel != 0) {
+ // supress signal to cardbus
+ au_writel( 0x00000002, SYS_OUTPUTCLR ); // set EXT_IO3 OFF
+ }
+ else {
+ au_writel( 0x00000002, SYS_OUTPUTSET ); // set EXT_IO3 ON
+ }
+ au_sync_udelay(1);
+ return 1;
+}
+
#include <asm/mach-au1x00/au1000.h>
char irq_tab_alchemy[][5] __initdata = {
- [0] = { -1, INTA, INTB, INTX, INTX}, /* IDSEL 00 - AdapterA-Slot0 (top) */
+ [0] = { -1, INTA, INTA, INTX, INTX}, /* IDSEL 00 - AdapterA-Slot0 (top) */
[1] = { -1, INTB, INTA, INTX, INTX}, /* IDSEL 01 - AdapterA-Slot1 (bottom) */
[2] = { -1, INTC, INTD, INTX, INTX}, /* IDSEL 02 - AdapterB-Slot0 (top) */
[3] = { -1, INTD, INTC, INTX, INTX}, /* IDSEL 03 - AdapterB-Slot1 (bottom) */
up.irq = TITAN_IRQ;
up.uartclk = TITAN_UART_CLK;
up.regshift = 0;
- up.iotype = UPIO_MEM32;
+ up.iotype = UPIO_RM9000;
up.type = PORT_RM9000;
up.flags = UPF_SHARE_IRQ;
up.line = 0;
+++ /dev/null
-config EARLY_PRINTK
- bool "Early console support"
- depends on MIPS_COBALT
- help
- Provide early console support by direct access to the
- on board UART. The UART must have been previously
- initialised by the boot loader.
obj-$(CONFIG_EARLY_PRINTK) += console.o
obj-$(CONFIG_MTD_PHYSMAP) += mtd.o
-
-EXTRA_AFLAGS := $(CFLAGS)
#include <asm/addrspace.h>
#include <asm/mach-cobalt/cobalt.h>
-static void putchar(int c)
+void prom_putchar(char c)
{
- if(c == '\n')
- putchar('\r');
-
while(!(COBALT_UART[UART_LSR] & UART_LSR_THRE))
;
COBALT_UART[UART_TX] = c;
}
-
-static void cons_write(struct console *c, const char *s, unsigned n)
-{
- while(n-- && *s)
- putchar(*s++);
-}
-
-static struct console cons_info =
-{
- .name = "uart",
- .write = cons_write,
- .flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1,
-};
-
-void __init cobalt_early_console(void)
-{
- register_console(&cons_info);
-
- printk("Cobalt: early console registered\n");
-}
-
-void __init disable_early_printk(void)
-{
- unregister_console(&cons_info);
-}
.flags = IORESOURCE_IO
};
-static struct resource cobalt_io_resources[] = {
- {
+/*
+ * Cobalt doesn't have PS/2 keyboard/mouse interfaces,
+ * keyboard conntroller is never used.
+ * Also PCI-ISA bridge DMA contoroller is never used.
+ */
+static struct resource cobalt_reserved_resources[] = {
+ { /* dma1 */
.start = 0x00,
.end = 0x1f,
- .name = "dma1",
- .flags = IORESOURCE_BUSY
- }, {
- .start = 0x40,
- .end = 0x5f,
- .name = "timer",
- .flags = IORESOURCE_BUSY
- }, {
+ .name = "reserved",
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO,
+ },
+ { /* keyboard */
.start = 0x60,
.end = 0x6f,
- .name = "keyboard",
- .flags = IORESOURCE_BUSY
- }, {
+ .name = "reserved",
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO,
+ },
+ { /* dma page reg */
.start = 0x80,
.end = 0x8f,
- .name = "dma page reg",
- .flags = IORESOURCE_BUSY
- }, {
+ .name = "reserved",
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO,
+ },
+ { /* dma2 */
.start = 0xc0,
.end = 0xdf,
- .name = "dma2",
- .flags = IORESOURCE_BUSY
+ .name = "reserved",
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO,
},
};
-#define COBALT_IO_RESOURCES (sizeof(cobalt_io_resources)/sizeof(struct resource))
-
static struct pci_controller cobalt_pci_controller = {
.pci_ops = >64111_pci_ops,
.mem_resource = &cobalt_mem_resource,
/* I/O port resource must include LCD/buttons */
ioport_resource.end = 0x0fffffff;
- /* request I/O space for devices used on all i[345]86 PCs */
- for (i = 0; i < COBALT_IO_RESOURCES; i++)
- request_resource(&ioport_resource, cobalt_io_resources + i);
+ /* These resources have been reserved by VIA SuperI/O chip. */
+ for (i = 0; i < ARRAY_SIZE(cobalt_reserved_resources); i++)
+ request_resource(&ioport_resource, cobalt_reserved_resources + i);
/* Read the cobalt id register out of the PCI config space */
PCI_CFG_SET(devfn, (VIA_COBALT_BRD_ID_REG & ~0x3));
#endif
if (cobalt_board_id > COBALT_BRD_ID_RAQ1) {
-#ifdef CONFIG_EARLY_PRINTK
- cobalt_early_console();
-#endif
-
#ifdef CONFIG_SERIAL_8250
uart.line = 0;
uart.type = PORT_UNKNOWN;
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-CONFIG_RTC_DS1742=y
# CONFIG_KEXEC is not set
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
#
# Real Time Clock
#
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+
+#
+# RTC drivers
+#
+# CONFIG_RTC_DRV_DS1553 is not set
+CONFIG_RTC_DRV_DS1742=y
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_TEST is not set
+# CONFIG_RTC_DRV_V3020 is not set
#
# DMA Engine support
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.20
-# Tue Feb 20 21:47:41 2007
+# Linux kernel version: 2.6.21-rc1
+# Thu Feb 22 10:38:09 2007
#
CONFIG_MIPS=y
# CONFIG_HWMON is not set
# CONFIG_HWMON_VID is not set
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_SM501=y
+
#
# Multimedia devices
#
#
# Graphics support
#
-# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_BACKLIGHT is not set
# CONFIG_FB_MODE_HELPERS is not set
# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frambuffer hardware drivers
+#
# CONFIG_FB_CIRRUS is not set
# CONFIG_FB_PM2 is not set
# CONFIG_FB_CYBER2000 is not set
# CONFIG_FB_KYRO is not set
# CONFIG_FB_3DFX is not set
# CONFIG_FB_VOODOO1 is not set
-CONFIG_FB_SMIVGX=y
+# CONFIG_FB_SMIVGX is not set
# CONFIG_FB_TRIDENT is not set
+CONFIG_FB_SM501=y
# CONFIG_FB_VIRTUAL is not set
#
# Logo configuration
#
# CONFIG_LOGO is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
# Sound
obj-$(CONFIG_RUNTIME_DEBUG) += debug.o
obj-$(CONFIG_KGDB) += kgdb_io.o
-
-EXTRA_AFLAGS := $(CFLAGS)
obj-$(CONFIG_PROM_CONSOLE) += promcon.o
obj-$(CONFIG_TC) += tc.o
obj-$(CONFIG_CPU_HAS_WB) += wbflush.o
-
-EXTRA_AFLAGS := $(CFLAGS)
lib-$(CONFIG_32BIT) += locore.o
lib-$(CONFIG_64BIT) += call_o32.o
-
-EXTRA_AFLAGS := $(CFLAGS)
#include <asm/dec/prom.h>
-static void __init prom_console_write(struct console *con, const char *s,
- unsigned int c)
+void prom_putchar(char c)
{
- static char sfmt[] __initdata = "%%%us";
- char fmt[13];
+ char s[2];
- snprintf(fmt, sizeof(fmt), sfmt, c);
- prom_printf(fmt, s);
-}
-
-static struct console promcons __initdata = {
- .name = "prom",
- .write = prom_console_write,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-static int promcons_output __initdata = 0;
-
-void __init register_prom_console(void)
-{
- if (!promcons_output) {
- promcons_output = 1;
- register_console(&promcons);
- }
-}
+ s[0] = c;
+ s[1] = '\0';
-void __init unregister_prom_console(void)
-{
- if (promcons_output) {
- unregister_console(&promcons);
- promcons_output = 0;
- }
+ prom_printf( s);
}
-
-void disable_early_printk(void)
- __attribute__((alias("unregister_prom_console")));
#include "dectypes.h"
-extern unsigned long mips_machgroup;
-extern unsigned long mips_machtype;
-
static const char *dec_system_strings[] = {
[MACH_DSUNKNOWN] "unknown DECstation",
[MACH_DS23100] "DECstation 2100/3100",
#
obj-y += irq.o promcon.o reset.o serialGT.o setup.o
-
-EXTRA_AFLAGS := $(CFLAGS)
}
}
-int prom_getchar(void)
-{
- return 0;
-}
-
static struct console sercons = {
.name = "ttyS",
.write = prom_console_write,
obj-y += irq.o prom.o reset.o setup.o
obj-$(CONFIG_KGDB) += dbg_io.o
-
-EXTRA_AFLAGS := $(CFLAGS)
#
obj-y += irq.o reset.o setup.o time.o pci.o
-
-EXTRA_AFLAGS := $(CFLAGS)
#
obj-y := irq.o jazzdma.o reset.o setup.o
-
-EXTRA_AFLAGS := $(CFLAGS)
# Makefile for the common code of TOSHIBA JMR-TX3927 board
#
-obj-y += prom.o puts.o rtc_ds1742.o
+obj-y += prom.o puts.o
+++ /dev/null
-/*
- * Copyright 2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ahennessy@mvista.com
- *
- * arch/mips/jmr3927/common/rtc_ds1742.c
- * Based on arch/mips/ddb5xxx/common/rtc_ds1386.c
- * low-level RTC hookups for s for Dallas 1742 chip.
- *
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-/*
- * This file exports a function, rtc_ds1386_init(), which expects an
- * uncached base address as the argument. It will set the two function
- * pointers expected by the MIPS generic timer code.
- */
-
-#include <linux/bcd.h>
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/rtc.h>
-#include <linux/ds1742rtc.h>
-
-#include <asm/time.h>
-#include <asm/addrspace.h>
-
-#include <asm/debug.h>
-
-#define EPOCH 2000
-
-static unsigned long rtc_base;
-
-static unsigned long
-rtc_ds1742_get_time(void)
-{
- unsigned int year, month, day, hour, minute, second;
- unsigned int century;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- rtc_write(RTC_READ, RTC_CONTROL);
- second = BCD2BIN(rtc_read(RTC_SECONDS) & RTC_SECONDS_MASK);
- minute = BCD2BIN(rtc_read(RTC_MINUTES));
- hour = BCD2BIN(rtc_read(RTC_HOURS));
- day = BCD2BIN(rtc_read(RTC_DATE));
- month = BCD2BIN(rtc_read(RTC_MONTH));
- year = BCD2BIN(rtc_read(RTC_YEAR));
- century = BCD2BIN(rtc_read(RTC_CENTURY) & RTC_CENTURY_MASK);
- rtc_write(0, RTC_CONTROL);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- year += century * 100;
-
- return mktime(year, month, day, hour, minute, second);
-}
-extern void to_tm(unsigned long tim, struct rtc_time * tm);
-
-static int
-rtc_ds1742_set_time(unsigned long t)
-{
- struct rtc_time tm;
- u8 year, month, day, hour, minute, second;
- u8 cmos_year, cmos_month, cmos_day, cmos_hour, cmos_minute, cmos_second;
- int cmos_century;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- rtc_write(RTC_READ, RTC_CONTROL);
- cmos_second = (u8)(rtc_read(RTC_SECONDS) & RTC_SECONDS_MASK);
- cmos_minute = (u8)rtc_read(RTC_MINUTES);
- cmos_hour = (u8)rtc_read(RTC_HOURS);
- cmos_day = (u8)rtc_read(RTC_DATE);
- cmos_month = (u8)rtc_read(RTC_MONTH);
- cmos_year = (u8)rtc_read(RTC_YEAR);
- cmos_century = rtc_read(RTC_CENTURY) & RTC_CENTURY_MASK;
-
- rtc_write(RTC_WRITE, RTC_CONTROL);
-
- /* convert */
- to_tm(t, &tm);
-
- /* check each field one by one */
- year = BIN2BCD(tm.tm_year - EPOCH);
- if (year != cmos_year) {
- rtc_write(year,RTC_YEAR);
- }
-
- month = BIN2BCD(tm.tm_mon);
- if (month != (cmos_month & 0x1f)) {
- rtc_write((month & 0x1f) | (cmos_month & ~0x1f),RTC_MONTH);
- }
-
- day = BIN2BCD(tm.tm_mday);
- if (day != cmos_day) {
-
- rtc_write(day, RTC_DATE);
- }
-
- if (cmos_hour & 0x40) {
- /* 12 hour format */
- hour = 0x40;
- if (tm.tm_hour > 12) {
- hour |= 0x20 | (BIN2BCD(hour-12) & 0x1f);
- } else {
- hour |= BIN2BCD(tm.tm_hour);
- }
- } else {
- /* 24 hour format */
- hour = BIN2BCD(tm.tm_hour) & 0x3f;
- }
- if (hour != cmos_hour) rtc_write(hour, RTC_HOURS);
-
- minute = BIN2BCD(tm.tm_min);
- if (minute != cmos_minute) {
- rtc_write(minute, RTC_MINUTES);
- }
-
- second = BIN2BCD(tm.tm_sec);
- if (second != cmos_second) {
- rtc_write(second & RTC_SECONDS_MASK,RTC_SECONDS);
- }
-
- /* RTC_CENTURY and RTC_CONTROL share same address... */
- rtc_write(cmos_century, RTC_CONTROL);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return 0;
-}
-
-void
-rtc_ds1742_init(unsigned long base)
-{
- u8 cmos_second;
-
- /* remember the base */
- rtc_base = base;
- db_assert((rtc_base & 0xe0000000) == KSEG1);
-
- /* set the function pointers */
- rtc_mips_get_time = rtc_ds1742_get_time;
- rtc_mips_set_time = rtc_ds1742_set_time;
-
- /* clear oscillator stop bit */
- rtc_write(RTC_READ, RTC_CONTROL);
- cmos_second = (u8)(rtc_read(RTC_SECONDS) & RTC_SECONDS_MASK);
- rtc_write(RTC_WRITE, RTC_CONTROL);
- rtc_write(cmos_second, RTC_SECONDS); /* clear msb */
- rtc_write(0, RTC_CONTROL);
-}
obj-y += init.o irq.o setup.o
obj-$(CONFIG_RUNTIME_DEBUG) += debug.o
obj-$(CONFIG_KGDB) += kgdb_io.o
-
-EXTRA_AFLAGS := $(CFLAGS)
#include <linux/param.h> /* for HZ */
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/platform_device.h>
#ifdef CONFIG_SERIAL_TXX9
#include <linux/tty.h>
#include <linux/serial.h>
static inline void do_reset(void)
{
-#ifdef CONFIG_TC35815
- extern void tc35815_killall(void);
- tc35815_killall();
-#endif
#if 1 /* Resetting PCI bus */
jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR);
jmr3927_ioc_reg_out(JMR3927_IOC_RESET_PCI, JMR3927_IOC_RESET_ADDR);
return jiffies * (JMR3927_TIMER_CLK / HZ) + jmr3927_tmrptr->trr;
}
-#define USE_RTC_DS1742
-#ifdef USE_RTC_DS1742
-extern void rtc_ds1742_init(unsigned long base);
-#endif
static void __init jmr3927_time_init(void)
{
clocksource_mips.read = jmr3927_hpt_read;
mips_hpt_frequency = JMR3927_TIMER_CLK;
-#ifdef USE_RTC_DS1742
- if (jmr3927_have_nvram()) {
- rtc_ds1742_init(JMR3927_IOC_NVRAMB_ADDR);
- }
-#endif
}
void __init plat_timer_setup(struct irqaction *irq)
printk("TX3927 D-Cache WriteBack (CWF) .\n");
}
}
+
+/* This trick makes rtc-ds1742 driver usable as is. */
+unsigned long __swizzle_addr_b(unsigned long port)
+{
+ if ((port & 0xffff0000) != JMR3927_IOC_NVRAMB_ADDR)
+ return port;
+ port = (port & 0xffff0000) | (port & 0x7fff << 1);
+#ifdef __BIG_ENDIAN
+ return port;
+#else
+ return port | 1;
+#endif
+}
+EXPORT_SYMBOL(__swizzle_addr_b);
+
+static int __init jmr3927_rtc_init(void)
+{
+ struct resource res = {
+ .start = JMR3927_IOC_NVRAMB_ADDR - IO_BASE,
+ .end = JMR3927_IOC_NVRAMB_ADDR - IO_BASE + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ struct platform_device *dev;
+ if (!jmr3927_have_nvram())
+ return -ENODEV;
+ dev = platform_device_register_simple("ds1742", -1, &res, 1);
+ return IS_ERR(dev) ? PTR_ERR(dev) : 0;
+}
+device_initcall(jmr3927_rtc_init);
obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
-
-EXTRA_AFLAGS := $(CFLAGS)
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/console.h>
+#include <linux/init.h>
+
+extern void prom_putchar(char);
+
+static void early_console_write(struct console *con, const char *s, unsigned n)
+{
+ while (n-- && *s) {
+ if (*s == '\n')
+ prom_putchar('\r');
+ prom_putchar(*s);
+ s++;
+ }
+}
+
+static struct console early_console = {
+ .name = "early",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+};
+
+void __init setup_early_printk(void)
+{
+ register_console(&early_console);
+}
+
+void __init disable_early_printk(void)
+{
+ unregister_console(&early_console);
+}
return error;
}
-asmlinkage long
-sysn32_waitid(int which, compat_pid_t pid,
- siginfo_t __user *uinfo, int options,
- struct compat_rusage __user *uru)
-{
- struct rusage ru;
- long ret;
- mm_segment_t old_fs = get_fs();
- int si_signo;
-
- if (!access_ok(VERIFY_WRITE, uinfo, sizeof(*uinfo)))
- return -EFAULT;
-
- set_fs (KERNEL_DS);
- ret = sys_waitid(which, pid, uinfo, options,
- uru ? (struct rusage __user *) &ru : NULL);
- set_fs (old_fs);
-
- if (__get_user(si_signo, &uinfo->si_signo))
- return -EFAULT;
- if (ret < 0 || si_signo == 0)
- return ret;
-
- if (uru)
- ret = put_compat_rusage(&ru, uru);
- return ret;
-}
-
#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
flags);
}
-/* Argument list sizes for sys_socketcall */
-#define AL(x) ((x) * sizeof(unsigned int))
-static unsigned char socketcall_nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
- AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
- AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)};
-#undef AL
-
-/*
- * System call vectors.
- *
- * Argument checking cleaned up. Saved 20% in size.
- * This function doesn't need to set the kernel lock because
- * it is set by the callees.
- */
-
-asmlinkage long sys32_socketcall(int call, unsigned int __user *args32)
-{
- unsigned int a[6];
- unsigned int a0,a1;
- int err;
-
- extern asmlinkage long sys_socket(int family, int type, int protocol);
- extern asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
- extern asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen);
- extern asmlinkage long sys_listen(int fd, int backlog);
- extern asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen);
- extern asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len);
- extern asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len);
- extern asmlinkage long sys_socketpair(int family, int type, int protocol, int __user *usockvec);
- extern asmlinkage long sys_send(int fd, void __user * buff, size_t len, unsigned flags);
- extern asmlinkage long sys_sendto(int fd, void __user * buff, size_t len, unsigned flags,
- struct sockaddr __user *addr, int addr_len);
- extern asmlinkage long sys_recv(int fd, void __user * ubuf, size_t size, unsigned flags);
- extern asmlinkage long sys_recvfrom(int fd, void __user * ubuf, size_t size, unsigned flags,
- struct sockaddr __user *addr, int __user *addr_len);
- extern asmlinkage long sys_shutdown(int fd, int how);
- extern asmlinkage long sys_setsockopt(int fd, int level, int optname, char __user *optval, int optlen);
- extern asmlinkage long sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen);
- extern asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
- extern asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned int flags);
-
-
- if(call<1||call>SYS_RECVMSG)
- return -EINVAL;
-
- /* copy_from_user should be SMP safe. */
- if (copy_from_user(a, args32, socketcall_nargs[call]))
- return -EFAULT;
-
- a0=a[0];
- a1=a[1];
-
- switch(call)
- {
- case SYS_SOCKET:
- err = sys_socket(a0,a1,a[2]);
- break;
- case SYS_BIND:
- err = sys_bind(a0,(struct sockaddr __user *)A(a1), a[2]);
- break;
- case SYS_CONNECT:
- err = sys_connect(a0, (struct sockaddr __user *)A(a1), a[2]);
- break;
- case SYS_LISTEN:
- err = sys_listen(a0,a1);
- break;
- case SYS_ACCEPT:
- err = sys_accept(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_GETSOCKNAME:
- err = sys_getsockname(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_GETPEERNAME:
- err = sys_getpeername(a0, (struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_SOCKETPAIR:
- err = sys_socketpair(a0,a1, a[2], (int __user *)A(a[3]));
- break;
- case SYS_SEND:
- err = sys_send(a0, (void __user *)A(a1), a[2], a[3]);
- break;
- case SYS_SENDTO:
- err = sys_sendto(a0,(void __user *)A(a1), a[2], a[3],
- (struct sockaddr __user *)A(a[4]), a[5]);
- break;
- case SYS_RECV:
- err = sys_recv(a0, (void __user *)A(a1), a[2], a[3]);
- break;
- case SYS_RECVFROM:
- err = sys_recvfrom(a0, (void __user *)A(a1), a[2], a[3],
- (struct sockaddr __user *)A(a[4]), (int __user *)A(a[5]));
- break;
- case SYS_SHUTDOWN:
- err = sys_shutdown(a0,a1);
- break;
- case SYS_SETSOCKOPT:
- err = sys_setsockopt(a0, a1, a[2], (char __user *)A(a[3]), a[4]);
- break;
- case SYS_GETSOCKOPT:
- err = sys_getsockopt(a0, a1, a[2], (char __user *)A(a[3]), (int __user *)A(a[4]));
- break;
- case SYS_SENDMSG:
- err = sys_sendmsg(a0, (struct msghdr __user *) A(a1), a[2]);
- break;
- case SYS_RECVMSG:
- err = sys_recvmsg(a0, (struct msghdr __user *) A(a1), a[2]);
- break;
- default:
- err = -EINVAL;
- break;
- }
- return err;
-}
-
-struct sigevent32 {
- u32 sigev_value;
- u32 sigev_signo;
- u32 sigev_notify;
- u32 payload[(64 / 4) - 3];
-};
-
-extern asmlinkage long
-sys_timer_create(clockid_t which_clock,
- struct sigevent __user *timer_event_spec,
- timer_t __user * created_timer_id);
-
-long
-sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id)
-{
- struct sigevent __user *p = NULL;
- if (se32) {
- struct sigevent se;
- p = compat_alloc_user_space(sizeof(struct sigevent));
- memset(&se, 0, sizeof(struct sigevent));
- if (get_user(se.sigev_value.sival_int, &se32->sigev_value) ||
- __get_user(se.sigev_signo, &se32->sigev_signo) ||
- __get_user(se.sigev_notify, &se32->sigev_notify) ||
- __copy_from_user(&se._sigev_un._pad, &se32->payload,
- sizeof(se32->payload)) ||
- copy_to_user(p, &se, sizeof(se)))
- return -EFAULT;
- }
- return sys_timer_create(clock, p, timer_id);
-}
-
save_static_function(sys32_clone);
__attribute_used__ noinline static int
_sys32_clone(nabi_no_regargs struct pt_regs regs)
return do_fork(clone_flags, newsp, ®s, 0,
parent_tidptr, child_tidptr);
}
-
-/*
- * Implement the event wait interface for the eventpoll file. It is the kernel
- * part of the user space epoll_pwait(2).
- */
-asmlinkage long compat_sys_epoll_pwait(int epfd,
- struct epoll_event __user *events, int maxevents, int timeout,
- const compat_sigset_t __user *sigmask, size_t sigsetsize)
-{
- int error;
- sigset_t ksigmask, sigsaved;
-
- /*
- * If the caller wants a certain signal mask to be set during the wait,
- * we apply it here.
- */
- if (sigmask) {
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
- if (!access_ok(VERIFY_READ, sigmask, sizeof(ksigmask)))
- return -EFAULT;
- if (__copy_conv_sigset_from_user(&ksigmask, sigmask))
- return -EFAULT;
- sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
- }
-
- error = sys_epoll_wait(epfd, events, maxevents, timeout);
-
- /*
- * If we changed the signal mask, we need to restore the original one.
- * In case we've got a signal while waiting, we do not restore the
- * signal mask yet, and we allow do_signal() to deliver the signal on
- * the way back to userspace, before the signal mask is restored.
- */
- if (sigmask) {
- if (error == -EINTR) {
- memcpy(¤t->saved_sigmask, &sigsaved,
- sizeof(sigsaved));
- set_thread_flag(TIF_RESTORE_SIGMASK);
- } else
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
- }
-
- return error;
-}
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__copy_user_inatomic);
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_user_asm);
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched()) {
-#ifdef CONFIG_MIPS_MT_SMTC
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
extern void smtc_idle_loop_hook(void);
smtc_idle_loop_hook();
-#endif /* CONFIG_MIPS_MT_SMTC */
+#endif
if (cpu_wait)
(*cpu_wait)();
}
static struct chan_waitqueues {
wait_queue_head_t rt_queue;
wait_queue_head_t lx_queue;
- int in_open;
+ atomic_t in_open;
} channel_wqs[RTLX_CHANNELS];
static struct irqaction irq;
int rtlx_open(int index, int can_sleep)
{
- int ret;
- struct rtlx_channel *chan;
volatile struct rtlx_info **p;
+ struct rtlx_channel *chan;
+ enum rtlx_state state;
+ int ret = 0;
if (index >= RTLX_CHANNELS) {
printk(KERN_DEBUG "rtlx_open index out of range\n");
return -ENOSYS;
}
- if (channel_wqs[index].in_open) {
- printk(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
- return -EBUSY;
+ if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
+ printk(KERN_DEBUG "rtlx_open channel %d already opened\n",
+ index);
+ ret = -EBUSY;
+ goto out_fail;
}
- channel_wqs[index].in_open++;
-
if (rtlx == NULL) {
if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
if (can_sleep) {
- DECLARE_WAITQUEUE(wait, current);
-
- /* go to sleep */
- add_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- set_current_state(TASK_INTERRUPTIBLE);
- while ((p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- /* back running */
+ __wait_event_interruptible(channel_wqs[index].lx_queue,
+ (p = vpe_get_shared(RTLX_TARG_VPE)),
+ ret);
+ if (ret)
+ goto out_fail;
} else {
- printk( KERN_DEBUG "No SP program loaded, and device "
+ printk(KERN_DEBUG "No SP program loaded, and device "
"opened with O_NONBLOCK\n");
- channel_wqs[index].in_open = 0;
- return -ENOSYS;
+ ret = -ENOSYS;
+ goto out_fail;
}
}
if (*p == NULL) {
if (can_sleep) {
- DECLARE_WAITQUEUE(wait, current);
-
- /* go to sleep */
- add_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- set_current_state(TASK_INTERRUPTIBLE);
- while (*p == NULL) {
- schedule();
-
- /* reset task state to interruptable otherwise
- we'll whizz round here like a very fast loopy
- thing. schedule() appears to return with state
- set to TASK_RUNNING.
-
- If the loaded SP program, for whatever reason,
- doesn't set up the shared structure *p will never
- become true. So whoever connected to either /dev/rt?
- or if it was kspd, will then take up rather a lot of
- processor cycles.
- */
-
- set_current_state(TASK_INTERRUPTIBLE);
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- /* back running */
- }
- else {
+ __wait_event_interruptible(channel_wqs[index].lx_queue,
+ *p != NULL,
+ ret);
+ if (ret)
+ goto out_fail;
+ } else {
printk(" *vpe_get_shared is NULL. "
"Has an SP program been loaded?\n");
- channel_wqs[index].in_open = 0;
- return -ENOSYS;
+ ret = -ENOSYS;
+ goto out_fail;
}
}
if ((unsigned int)*p < KSEG0) {
printk(KERN_WARNING "vpe_get_shared returned an invalid pointer "
"maybe an error code %d\n", (int)*p);
- channel_wqs[index].in_open = 0;
- return -ENOSYS;
+ ret = -ENOSYS;
+ goto out_fail;
}
- if ((ret = rtlx_init(*p)) < 0) {
- channel_wqs[index].in_open = 0;
- return ret;
- }
+ if ((ret = rtlx_init(*p)) < 0)
+ goto out_ret;
}
chan = &rtlx->channel[index];
- if (chan->lx_state == RTLX_STATE_OPENED) {
- channel_wqs[index].in_open = 0;
- return -EBUSY;
- }
+ state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
+ if (state == RTLX_STATE_OPENED) {
+ ret = -EBUSY;
+ goto out_fail;
+ }
- chan->lx_state = RTLX_STATE_OPENED;
- channel_wqs[index].in_open = 0;
- return 0;
+out_fail:
+ smp_mb();
+ atomic_dec(&channel_wqs[index].in_open);
+ smp_mb();
+
+out_ret:
+ return ret;
}
int rtlx_release(int index)
/* data available to read? */
if (chan->lx_read == chan->lx_write) {
if (can_sleep) {
- DECLARE_WAITQUEUE(wait, current);
-
- /* go to sleep */
- add_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- set_current_state(TASK_INTERRUPTIBLE);
- while (chan->lx_read == chan->lx_write) {
- schedule();
+ int ret = 0;
- set_current_state(TASK_INTERRUPTIBLE);
+ __wait_event_interruptible(channel_wqs[index].lx_queue,
+ chan->lx_read != chan->lx_write || sp_stopping,
+ ret);
+ if (ret)
+ return ret;
- if (sp_stopping) {
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
- return 0;
- }
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- /* back running */
- }
- else
+ if (sp_stopping)
+ return 0;
+ } else
return 0;
}
{
int minor;
struct rtlx_channel *rt;
- DECLARE_WAITQUEUE(wait, current);
minor = iminor(file->f_path.dentry->d_inode);
rt = &rtlx->channel[minor];
/* any space left... */
if (!rtlx_write_poll(minor)) {
+ int ret = 0;
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (!rtlx_write_poll(minor))
- schedule();
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[minor].rt_queue, &wait);
+ __wait_event_interruptible(channel_wqs[minor].rt_queue,
+ rtlx_write_poll(minor),
+ ret);
+ if (ret)
+ return ret;
}
return rtlx_write(minor, (void *)buffer, count, 1);
for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
- channel_wqs[i].in_open = 0;
+ atomic_set(&channel_wqs[i].in_open, 0);
dev = device_create(mt_class, NULL, MKDEV(major, i),
"%s%d", module_name, i);
sys sys_kexec_load 4
sys sys_getcpu 3
sys sys_epoll_pwait 6
+ sys sys_ioprio_set 3
+ sys sys_ioprio_get 2
.endm
/* We pre-compute the number of _instruction_ bytes needed to
PTR sys_get_robust_list
PTR sys_kexec_load /* 5270 */
PTR sys_getcpu
- PTR compat_sys_epoll_pwait
+ PTR sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get
+ .size sys_call_table,.-sys_call_table
PTR compat_sys_statfs64
PTR compat_sys_fstatfs64
PTR sys_sendfile64
- PTR sys32_timer_create /* 6220 */
+ PTR compat_sys_timer_create /* 6220 */
PTR compat_sys_timer_settime
PTR compat_sys_timer_gettime
PTR sys_timer_getoverrun
PTR compat_sys_mq_notify
PTR compat_sys_mq_getsetattr
PTR sys_ni_syscall /* 6240, sys_vserver */
- PTR sysn32_waitid
+ PTR compat_sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list
PTR compat_sys_kexec_load
- PTR sys_getcpu
+ PTR sys_getcpu /* 6275 */
PTR compat_sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get
+ .size sysn32_call_table,.-sysn32_call_table
PTR compat_sys_statfs
PTR compat_sys_fstatfs /* 4100 */
PTR sys_ni_syscall /* sys_ioperm */
- PTR sys32_socketcall
+ PTR compat_sys_socketcall
PTR sys_syslog
PTR compat_sys_setitimer
PTR compat_sys_getitimer /* 4105 */
PTR sys_fadvise64_64
PTR compat_sys_statfs64 /* 4255 */
PTR compat_sys_fstatfs64
- PTR sys32_timer_create
+ PTR compat_sys_timer_create
PTR compat_sys_timer_settime
PTR compat_sys_timer_gettime
PTR sys_timer_getoverrun /* 4260 */
PTR compat_sys_get_robust_list /* 4310 */
PTR compat_sys_kexec_load
PTR sys_getcpu
- PTR sys_epoll_pwait
+ PTR compat_sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get /* 4315 */
.size sys_call_table,.-sys_call_table
{
cpu_probe();
prom_init();
+
+#ifdef CONFIG_EARLY_PRINTK
+ {
+ extern void setup_early_printk(void);
+
+ setup_early_printk();
+ }
+#endif
cpu_report();
#if defined(CONFIG_VT)
__setup("nostlb", stlb_disable);
__setup("asidmask=", asidmask_set);
-/* Enable additional debug checks before going into CPU idle loop */
-#define SMTC_IDLE_HOOK_DEBUG
-
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
static int hang_trig = 0;
static atomic_t idle_hook_initialized = {0};
static int clock_hang_reported[NR_CPUS];
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
printk("ASID mask value override to 0x%x\n", asidmask);
/* Temporary */
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
if (hang_trig)
printk("Logic Analyser Trigger on suspected TC hang\n");
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/* Put MVPE's into 'configuration state' */
write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
case SMTC_CLOCK_TICK:
/* Invoke Clock "Interrupt" */
ipi_timer_latch[dest_copy] = 0;
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
clock_hang_reported[dest_copy] = 0;
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
local_timer_interrupt(0, NULL);
break;
case LINUX_SMP_IPI:
void smtc_idle_loop_hook(void)
{
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
int im;
int flags;
int mtflags;
local_irq_restore(flags);
if (pdb_msg != &id_ho_db_msg[0])
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/*
* Replay any accumulated deferred IPIs. If "Instant Replay"
goto sigbus;
pc = (unsigned int __user *) exception_epc(regs);
- if ((current->thread.mflags & MF_FIXADE) == 0)
+ if (user_mode(regs) && (current->thread.mflags & MF_FIXADE) == 0)
goto sigbus;
/*
static int vpe_open(struct inode *inode, struct file *filp)
{
int minor, ret;
+ enum vpe_state state;
struct vpe *v;
struct vpe_notifications *not;
return -ENODEV;
}
- if (v->state != VPE_STATE_UNUSED) {
+ state = xchg(&v->state, VPE_STATE_INUSE);
+ if (state != VPE_STATE_UNUSED) {
dvpe();
printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
cleanup_tc(get_tc(minor));
}
- // allocate it so when we get write ops we know it's expected.
- v->state = VPE_STATE_INUSE;
-
/* this of-course trashes what was there before... */
v->pbuffer = vmalloc(P_SIZE);
v->plen = P_SIZE;
sizeof(struct lasat_eeprom_struct) - 4);
if (crc != lasat_board_info.li_eeprom_info.crc32) {
- prom_printf("WARNING...\nWARNING...\nEEPROM CRC does not match calculated, attempting to soldier on...\n");
+ printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM CRC does "
+ "not match calculated, attempting to soldier on...\n");
}
- if (lasat_board_info.li_eeprom_info.version != LASAT_EEPROM_VERSION)
- {
- prom_printf("WARNING...\nWARNING...\nEEPROM version %d, wanted version %d, attempting to soldier on...\n",
+ if (lasat_board_info.li_eeprom_info.version != LASAT_EEPROM_VERSION) {
+ printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM version "
+ "%d, wanted version %d, attempting to soldier on...\n",
(unsigned int)lasat_board_info.li_eeprom_info.version,
LASAT_EEPROM_VERSION);
}
cfg1 = lasat_board_info.li_eeprom_info.cfg[1];
if ( LASAT_W0_DSCTYPE(cfg0) != 1) {
- prom_printf("WARNING...\nWARNING...\nInvalid configuration read from EEPROM, attempting to soldier on...");
+ printk(KERN_WARNING "WARNING...\nWARNING...\n"
+ "Invalid configuration read from EEPROM, attempting to "
+ "soldier on...");
}
/* We have a valid configuration */
#define PROM_PUTC_ADDR PROM_JUMP_TABLE_ENTRY(1)
#define PROM_MONITOR_ADDR PROM_JUMP_TABLE_ENTRY(2)
-static void null_prom_printf(const char * fmt, ...)
-{
-}
-
static void null_prom_display(const char *string, int pos, int clear)
{
}
}
/* these are functions provided by the bootloader */
-static void (* prom_putc)(char c) = null_prom_putc;
-void (* prom_printf)(const char * fmt, ...) = null_prom_printf;
+static void (* __prom_putc)(char c) = null_prom_putc;
+
+void prom_putchar(char c)
+{
+ __prom_putc(c);
+}
+
void (* prom_display)(const char *string, int pos, int clear) =
null_prom_display;
void (* prom_monitor)(void) = null_prom_monitor;
unsigned int lasat_ndelay_divider;
-#define PROM_PRINTFBUF_SIZE 256
-static char prom_printfbuf[PROM_PRINTFBUF_SIZE];
-
-static void real_prom_printf(const char * fmt, ...)
-{
- va_list ap;
- int len;
- char *c = prom_printfbuf;
- int i;
-
- va_start(ap, fmt);
- len = vsnprintf(prom_printfbuf, PROM_PRINTFBUF_SIZE, fmt, ap);
- va_end(ap);
-
- /* output overflowed the buffer */
- if (len < 0 || len > PROM_PRINTFBUF_SIZE)
- len = PROM_PRINTFBUF_SIZE;
-
- for (i=0; i < len; i++) {
- if (*c == '\n')
- prom_putc('\r');
- prom_putc(*c++);
- }
-}
-
static void setup_prom_vectors(void)
{
u32 version = *(u32 *)(RESET_VECTOR + 0x90);
if (version >= 307) {
prom_display = (void *)PROM_DISPLAY_ADDR;
- prom_putc = (void *)PROM_PUTC_ADDR;
- prom_printf = real_prom_printf;
+ __prom_putc = (void *)PROM_PUTC_ADDR;
prom_monitor = (void *)PROM_MONITOR_ADDR;
}
- prom_printf("prom vectors set up\n");
+ printk("prom vectors set up\n");
}
static struct at93c_defs at93c_defs[N_MACHTYPES] = {
setup_prom_vectors();
if (current_cpu_data.cputype == CPU_R5000) {
- prom_printf("LASAT 200 board\n");
+ printk("LASAT 200 board\n");
mips_machtype = MACH_LASAT_200;
lasat_ndelay_divider = LASAT_200_DIVIDER;
} else {
- prom_printf("LASAT 100 board\n");
+ printk("LASAT 100 board\n");
mips_machtype = MACH_LASAT_100;
lasat_ndelay_divider = LASAT_100_DIVIDER;
}
#define PROM_H
extern void (* prom_display)(const char *string, int pos, int clear);
extern void (* prom_monitor)(void);
-extern void (* prom_printf)(const char * fmt, ...);
#endif
void __init plat_timer_setup(struct irqaction *irq)
{
- write_c0_compare( read_c0_count() + mips_hpt_frequency / HZ);
change_c0_status(ST0_IM, IE_IRQ0 | IE_IRQ5);
}
/* Switch from prom exception handler to normal mode */
change_c0_status(ST0_BEV,0);
- prom_printf("Lasat specific initialization complete\n");
+ pr_info("Lasat specific initialization complete\n");
}
obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o
obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o
-
-EXTRA_AFLAGS := $(CFLAGS)
obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o
obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o
-
-EXTRA_AFLAGS := $(CFLAGS)
# Makefile for MIPS-specific library files..
#
-lib-y += csum_partial.o memcpy.o memcpy-inatomic.o memset.o promlib.o \
- strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+lib-y += csum_partial.o memcpy.o memcpy-inatomic.o memset.o strlen_user.o \
+ strncpy_user.o strnlen_user.o uncached.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
# libgcc-style stuff needed in the kernel
lib-y += ashldi3.o ashrdi3.o lshrdi3.o
-
-EXTRA_AFLAGS := $(CFLAGS)
+++ /dev/null
-#include <stdarg.h>
-#include <linux/kernel.h>
-
-extern void prom_putchar(char);
-
-void prom_printf(char *fmt, ...)
-{
- va_list args;
- char ppbuf[1024];
- char *bptr;
-
- va_start(args, fmt);
- vsprintf(ppbuf, fmt, args);
-
- bptr = ppbuf;
-
- while (*bptr != 0) {
- if (*bptr == '\n')
- prom_putchar('\r');
-
- prom_putchar(*bptr++);
- }
- va_end(args);
-}
# Makefile for the MIPS boards generic routines under Linux.
#
-obj-y := reset.o display.o init.o memory.o printf.o \
+obj-y := reset.o display.o init.o memory.o \
cmdline.o time.o
+
+obj-$(CONFIG_EARLY_PRINTK) += console.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_KGDB) += gdb_hook.o
-
-EXTRA_AFLAGS := $(CFLAGS)
--- /dev/null
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Putting things on the screen/serial line using YAMONs facilities.
+ */
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/serial_reg.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_MIPS_ATLAS
+#include <asm/mips-boards/atlas.h>
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define PORT(offset) (ATLAS_UART_REGS_BASE + ((offset)<<3))
+#else
+#define PORT(offset) (ATLAS_UART_REGS_BASE + 3 + ((offset)<<3))
+#endif
+
+#elif defined(CONFIG_MIPS_SEAD)
+
+#include <asm/mips-boards/sead.h>
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define PORT(offset) (SEAD_UART0_REGS_BASE + ((offset)<<3))
+#else
+#define PORT(offset) (SEAD_UART0_REGS_BASE + 3 + ((offset)<<3))
+#endif
+
+#else
+
+#define PORT(offset) (0x3f8 + (offset))
+
+#endif
+
+static inline unsigned int serial_in(int offset)
+{
+ return inb(PORT(offset));
+}
+
+static inline void serial_out(int offset, int value)
+{
+ outb(value, PORT(offset));
+}
+
+int prom_putchar(char c)
+{
+ while ((serial_in(UART_LSR) & UART_LSR_THRE) == 0)
+ ;
+
+ serial_out(UART_TX, c);
+
+ return 1;
+}
flow = 'r';
sprintf (console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow);
strcat (prom_getcmdline(), console_string);
- prom_printf("Config serial console:%s\n", console_string);
+ pr_info("Config serial console:%s\n", console_string);
}
}
#endif
generic_getDebugChar = rs_getDebugChar;
}
- prom_printf("KGDB: Using serial line /dev/ttyS%d at %d for session, "
- "please connect your debugger\n", line ? 1 : 0, speed);
+ pr_info("KGDB: Using serial line /dev/ttyS%d at %d for "
+ "session, please connect your debugger\n",
+ line ? 1 : 0, speed);
{
char *s;
void __init prom_init(void)
{
- u32 start, map, mask, data;
-
prom_argc = fw_arg0;
_prom_argv = (int *) fw_arg1;
_prom_envp = (int *) fw_arg2;
mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC;
}
switch(mips_revision_corid) {
+ u32 start, map, mask, data;
+
case MIPS_REVISION_CORID_QED_RM5261:
case MIPS_REVISION_CORID_CORE_LV:
case MIPS_REVISION_CORID_CORE_FPGA:
board_nmi_handler_setup = mips_nmi_setup;
board_ejtag_handler_setup = mips_ejtag_setup;
- prom_printf("\nLINUX started...\n");
+ pr_info("\nLINUX started...\n");
prom_init_cmdline();
prom_meminit();
#ifdef CONFIG_SERIAL_8250_CONSOLE
/* otherwise look in the environment */
memsize_str = prom_getenv("memsize");
if (!memsize_str) {
- prom_printf("memsize not set in boot prom, set to default (32Mb)\n");
+ printk(KERN_WARNING
+ "memsize not set in boot prom, set to default (32Mb)\n");
physical_memsize = 0x02000000;
} else {
#ifdef DEBUG
- prom_printf("prom_memsize = %s\n", memsize_str);
+ pr_debug("prom_memsize = %s\n", memsize_str);
#endif
physical_memsize = simple_strtol(memsize_str, NULL, 0);
}
struct prom_pmemblock *p;
#ifdef DEBUG
- prom_printf("YAMON MEMORY DESCRIPTOR dump:\n");
+ pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
p = prom_getmdesc();
while (p->size) {
int i = 0;
- prom_printf("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
- i, p, p->base, p->size, mtypes[p->type]);
+ pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
+ i, p, p->base, p->size, mtypes[p->type]);
p++;
i++;
}
+++ /dev/null
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Putting things on the screen/serial line using YAMONs facilities.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-
-#ifdef CONFIG_MIPS_ATLAS
-#include <asm/mips-boards/atlas.h>
-
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-#define PORT(offset) (ATLAS_UART_REGS_BASE + ((offset)<<3))
-#else
-#define PORT(offset) (ATLAS_UART_REGS_BASE + 3 + ((offset)<<3))
-#endif
-
-#elif defined(CONFIG_MIPS_SEAD)
-
-#include <asm/mips-boards/sead.h>
-
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-#define PORT(offset) (SEAD_UART0_REGS_BASE + ((offset)<<3))
-#else
-#define PORT(offset) (SEAD_UART0_REGS_BASE + 3 + ((offset)<<3))
-#endif
-
-#else
-
-#define PORT(offset) (0x3f8 + (offset))
-
-#endif
-
-static inline unsigned int serial_in(int offset)
-{
- return inb(PORT(offset));
-}
-
-static inline void serial_out(int offset, int value)
-{
- outb(value, PORT(offset));
-}
-
-int prom_putchar(char c)
-{
- while ((serial_in(UART_LSR) & UART_LSR_THRE) == 0)
- ;
-
- serial_out(UART_TX, c);
-
- return 1;
-}
-
-char prom_getchar(void)
-{
- while (!(serial_in(UART_LSR) & UART_LSR_DR))
- ;
-
- return serial_in(UART_RX);
-}
-
irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif
-
- /* to generate the first timer interrupt */
- write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
}
obj-y := malta_int.o malta_setup.o
obj-$(CONFIG_MTD) += malta_mtd.o
-obj-$(CONFIG_SMP) += malta_smp.o
+obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o
+++ /dev/null
-/*
- * Malta Platform-specific hooks for SMP operation
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-
-#include <asm/atomic.h>
-#include <asm/cpu.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/smp.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/smtc_ipi.h>
-#endif /* CONFIG_MIPS_MT_SMTC */
-
-/* VPE/SMP Prototype implements platform interfaces directly */
-#if !defined(CONFIG_MIPS_MT_SMP)
-
-/*
- * Cause the specified action to be performed on a targeted "CPU"
- */
-
-void core_send_ipi(int cpu, unsigned int action)
-{
-/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
-#ifdef CONFIG_MIPS_MT_SMTC
- smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
-#endif /* CONFIG_MIPS_MT_SMTC */
-}
-
-/*
- * Platform "CPU" startup hook
- */
-
-void prom_boot_secondary(int cpu, struct task_struct *idle)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- smtc_boot_secondary(cpu, idle);
-#endif /* CONFIG_MIPS_MT_SMTC */
-}
-
-/*
- * Post-config but pre-boot cleanup entry point
- */
-
-void prom_init_secondary(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_init_secondary(void);
- int myvpe;
-
- /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
- myvpe = read_c0_tcbind() & TCBIND_CURVPE;
- if (myvpe != 0) {
- /* Ideally, this should be done only once per VPE, but... */
- clear_c0_status(STATUSF_IP2);
- set_c0_status(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP3
- | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6
- | STATUSF_IP7);
- }
-
- smtc_init_secondary();
-#endif /* CONFIG_MIPS_MT_SMTC */
-}
-
-/*
- * Platform SMP pre-initialization
- *
- * As noted above, we can assume a single CPU for now
- * but it may be multithreaded.
- */
-
-void plat_smp_setup(void)
-{
- if (read_c0_config3() & (1<<2))
- mipsmt_build_cpu_map(0);
-}
-
-void __init plat_prepare_cpus(unsigned int max_cpus)
-{
- if (read_c0_config3() & (1<<2))
- mipsmt_prepare_cpus();
-}
-
-/*
- * SMP initialization finalization entry point
- */
-
-void prom_smp_finish(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- smtc_smp_finish();
-#endif /* CONFIG_MIPS_MT_SMTC */
-}
-
-/*
- * Hook for after all CPUs are online
- */
-
-void prom_cpus_done(void)
-{
-}
-
-#endif /* CONFIG_MIPS32R2_MT_SMP */
--- /dev/null
+/*
+ * Malta Platform-specific hooks for SMP operation
+ */
+#include <linux/init.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/smtc.h>
+#include <asm/smtc_ipi.h>
+
+/* VPE/SMP Prototype implements platform interfaces directly */
+
+/*
+ * Cause the specified action to be performed on a targeted "CPU"
+ */
+
+void core_send_ipi(int cpu, unsigned int action)
+{
+ /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
+ smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
+}
+
+/*
+ * Platform "CPU" startup hook
+ */
+
+void prom_boot_secondary(int cpu, struct task_struct *idle)
+{
+ smtc_boot_secondary(cpu, idle);
+}
+
+/*
+ * Post-config but pre-boot cleanup entry point
+ */
+
+void prom_init_secondary(void)
+{
+ void smtc_init_secondary(void);
+ int myvpe;
+
+ /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
+ myvpe = read_c0_tcbind() & TCBIND_CURVPE;
+ if (myvpe != 0) {
+ /* Ideally, this should be done only once per VPE, but... */
+ clear_c0_status(STATUSF_IP2);
+ set_c0_status(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP3
+ | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6
+ | STATUSF_IP7);
+ }
+
+ smtc_init_secondary();
+}
+
+/*
+ * Platform SMP pre-initialization
+ *
+ * As noted above, we can assume a single CPU for now
+ * but it may be multithreaded.
+ */
+
+void plat_smp_setup(void)
+{
+ if (read_c0_config3() & (1<<2))
+ mipsmt_build_cpu_map(0);
+}
+
+void __init plat_prepare_cpus(unsigned int max_cpus)
+{
+ if (read_c0_config3() & (1<<2))
+ mipsmt_prepare_cpus();
+}
+
+/*
+ * SMP initialization finalization entry point
+ */
+
+void prom_smp_finish(void)
+{
+ smtc_smp_finish();
+}
+
+/*
+ * Hook for after all CPUs are online
+ */
+
+void prom_cpus_done(void)
+{
+}
#
# Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+# Copyright (C) 2007 MIPS Technologies, Inc.
+# written by Ralf Baechle (ralf@linux-mips.org)
#
# This program is free software; you can distribute it and/or modify it
# under the terms of the GNU General Public License (Version 2) as
# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
-obj-y := sim_setup.o sim_mem.o sim_time.o sim_printf.o sim_int.o sim_cmdline.o
+obj-y := sim_setup.o sim_mem.o sim_time.o sim_int.o sim_cmdline.o
+
+obj-$(CONFIG_EARLY_PRINTK) += sim_console.o
obj-$(CONFIG_SMP) += sim_smp.o
--- /dev/null
+/*
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * written by Ralf Baechle
+ */
+#include <linux/init.h>
+#include <linux/serial_reg.h>
+#include <asm/io.h>
+
+static inline unsigned int serial_in(int offset)
+{
+ return inb(0x3f8 + offset);
+}
+
+static inline void serial_out(int offset, int value)
+{
+ outb(value, 0x3f8 + offset);
+}
+
+void __init prom_putchar(char c)
+{
+ while ((serial_in(UART_LSR) & UART_LSR_THRE) == 0)
+ ;
+
+ serial_out(UART_TX, c);
+}
unsigned int memsize;
memsize = 0x02000000;
- prom_printf("Setting default memory size 0x%08x\n", memsize);
+ pr_info("Setting default memory size 0x%08x\n", memsize);
memset(mdesc, 0, sizeof(mdesc));
+++ /dev/null
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Putting things on the screen/serial line using YAMONs facilities.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
-static inline unsigned int serial_in(int offset)
-{
- return inb(0x3f8 + offset);
-}
-
-static inline void serial_out(int offset, int value)
-{
- outb(value, 0x3f8 + offset);
-}
-
-int putPromChar(char c)
-{
- while ((serial_in(UART_LSR) & UART_LSR_THRE) == 0)
- ;
-
- serial_out(UART_TX, c);
-
- return 1;
-}
-
-char getPromChar(void)
-{
- while (!(serial_in(UART_LSR) & 1))
- ;
-
- return serial_in(UART_RX);
-}
-
-void prom_printf(char *fmt, ...)
-{
- va_list args;
- int l;
- char *p, *buf_end;
- char buf[1024];
-
- va_start(args, fmt);
- l = vsprintf(buf, fmt, args); /* hopefully i < sizeof(buf) */
- va_end(args);
-
- buf_end = buf + l;
-
- for (p = buf; p < buf_end; p++) {
- /* Crude cr/nl handling is better than none */
- if (*p == '\n')
- putPromChar('\r');
- putPromChar(*p);
- }
-}
serial_init();
board_time_init = sim_time_init;
- prom_printf("Linux started...\n");
+ pr_info("Linux started...\n");
#ifdef CONFIG_MIPS_MT_SMP
sanitize_tlb_entries();
{
set_io_port_base(0xbfd00000);
- prom_printf("\nLINUX started...\n");
+ pr_info("\nLINUX started...\n");
prom_init_cmdline();
prom_meminit();
}
s.timeout = 4;
if (early_serial_setup(&s) != 0) {
- prom_printf(KERN_ERR "Serial setup failed!\n");
+ printk(KERN_ERR "Serial setup failed!\n");
}
#endif
irq_desc[mips_cpu_timer_irq].flags |= IRQ_PER_CPU;
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif
-
- /* to generate the first timer interrupt */
- write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
}
obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
-
-EXTRA_AFLAGS := $(CFLAGS)
return;
tx39_blast_dcache();
- tx39_blast_icache();
}
static inline void tx39___flush_cache_all(void)
if (!cpu_has_dc_aliases)
return;
- if (cpu_context(smp_processor_id(), mm) != 0) {
- tx39_flush_cache_all();
- }
+ if (cpu_context(smp_processor_id(), mm) != 0)
+ tx39_blast_dcache();
}
static void tx39_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- int exec;
-
+ if (!cpu_has_dc_aliases)
+ return;
if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
return;
- exec = vma->vm_flags & VM_EXEC;
- if (cpu_has_dc_aliases || exec)
- tx39_blast_dcache();
- if (exec)
- tx39_blast_icache();
+ tx39_blast_dcache();
}
static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
static void local_tx39_flush_data_cache_page(void * addr)
{
- tx39_blast_dcache_page(addr);
+ tx39_blast_dcache_page((unsigned long)addr);
}
static void tx39_flush_data_cache_page(unsigned long addr)
static inline void breakout_errctl(unsigned int val)
{
if (val & CP0_ERRCTL_RECOVERABLE)
- prom_printf(" recoverable");
+ printk(" recoverable");
if (val & CP0_ERRCTL_DCACHE)
- prom_printf(" dcache");
+ printk(" dcache");
if (val & CP0_ERRCTL_ICACHE)
- prom_printf(" icache");
+ printk(" icache");
if (val & CP0_ERRCTL_MULTIBUS)
- prom_printf(" multiple-buserr");
- prom_printf("\n");
+ printk(" multiple-buserr");
+ printk("\n");
}
static inline void breakout_cerri(unsigned int val)
{
if (val & CP0_CERRI_TAG_PARITY)
- prom_printf(" tag-parity");
+ printk(" tag-parity");
if (val & CP0_CERRI_DATA_PARITY)
- prom_printf(" data-parity");
+ printk(" data-parity");
if (val & CP0_CERRI_EXTERNAL)
- prom_printf(" external");
- prom_printf("\n");
+ printk(" external");
+ printk("\n");
}
static inline void breakout_cerrd(unsigned int val)
{
switch (val & CP0_CERRD_CAUSES) {
case CP0_CERRD_LOAD:
- prom_printf(" load,");
+ printk(" load,");
break;
case CP0_CERRD_STORE:
- prom_printf(" store,");
+ printk(" store,");
break;
case CP0_CERRD_FILLWB:
- prom_printf(" fill/wb,");
+ printk(" fill/wb,");
break;
case CP0_CERRD_COHERENCY:
- prom_printf(" coherency,");
+ printk(" coherency,");
break;
case CP0_CERRD_DUPTAG:
- prom_printf(" duptags,");
+ printk(" duptags,");
break;
default:
- prom_printf(" NO CAUSE,");
+ printk(" NO CAUSE,");
break;
}
if (!(val & CP0_CERRD_TYPES))
- prom_printf(" NO TYPE");
+ printk(" NO TYPE");
else {
if (val & CP0_CERRD_MULTIPLE)
- prom_printf(" multi-err");
+ printk(" multi-err");
if (val & CP0_CERRD_TAG_STATE)
- prom_printf(" tag-state");
+ printk(" tag-state");
if (val & CP0_CERRD_TAG_ADDRESS)
- prom_printf(" tag-address");
+ printk(" tag-address");
if (val & CP0_CERRD_DATA_SBE)
- prom_printf(" data-SBE");
+ printk(" data-SBE");
if (val & CP0_CERRD_DATA_DBE)
- prom_printf(" data-DBE");
+ printk(" data-DBE");
if (val & CP0_CERRD_EXTERNAL)
- prom_printf(" external");
+ printk(" external");
}
- prom_printf("\n");
+ printk("\n");
}
#ifndef CONFIG_SIBYTE_BUS_WATCHER
l2_tag = in64(IO_SPACE_BASE | A_L2_ECC_TAG);
#endif
memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
- prom_printf("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
- prom_printf("\nLast recorded signature:\n");
- prom_printf("Request %02x from %d, answered by %d with Dcode %d\n",
+ printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
+ printk("\nLast recorded signature:\n");
+ printk("Request %02x from %d, answered by %d with Dcode %d\n",
(unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
(int)(G_SCD_BERR_TID(status) >> 6),
(int)G_SCD_BERR_RID(status),
(int)G_SCD_BERR_DCODE(status));
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
- prom_printf("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
+ printk("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
#endif
} else {
- prom_printf("Bus watcher indicates no error\n");
+ printk("Bus watcher indicates no error\n");
}
}
#else
#else
csr_out32(M_SCD_TRACE_CFG_FREEZE, IO_SPACE_BASE | A_SCD_TRACE_CFG);
#endif
- prom_printf("Trace buffer frozen\n");
+ printk("Trace buffer frozen\n");
#endif
- prom_printf("Cache error exception on CPU %x:\n",
- (read_c0_prid() >> 25) & 0x7);
+ printk("Cache error exception on CPU %x:\n",
+ (read_c0_prid() >> 25) & 0x7);
__asm__ __volatile__ (
" .set push\n\t"
"=r" (dpahi), "=r" (dpalo), "=r" (eepc));
cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
- prom_printf(" c0_errorepc == %08x\n", eepc);
- prom_printf(" c0_errctl == %08x", errctl);
+ printk(" c0_errorepc == %08x\n", eepc);
+ printk(" c0_errctl == %08x", errctl);
breakout_errctl(errctl);
if (errctl & CP0_ERRCTL_ICACHE) {
- prom_printf(" c0_cerr_i == %08x", cerr_i);
+ printk(" c0_cerr_i == %08x", cerr_i);
breakout_cerri(cerr_i);
if (CP0_CERRI_IDX_VALID(cerr_i)) {
/* Check index of EPC, allowing for delay slot */
if (((eepc & SB1_CACHE_INDEX_MASK) != (cerr_i & SB1_CACHE_INDEX_MASK)) &&
((eepc & SB1_CACHE_INDEX_MASK) != ((cerr_i & SB1_CACHE_INDEX_MASK) - 4)))
- prom_printf(" cerr_i idx doesn't match eepc\n");
+ printk(" cerr_i idx doesn't match eepc\n");
else {
res = extract_ic(cerr_i & SB1_CACHE_INDEX_MASK,
(cerr_i & CP0_CERRI_DATA) != 0);
if (!(res & cerr_i))
- prom_printf("...didn't see indicated icache problem\n");
+ printk("...didn't see indicated icache problem\n");
}
}
}
if (errctl & CP0_ERRCTL_DCACHE) {
- prom_printf(" c0_cerr_d == %08x", cerr_d);
+ printk(" c0_cerr_d == %08x", cerr_d);
breakout_cerrd(cerr_d);
if (CP0_CERRD_DPA_VALID(cerr_d)) {
- prom_printf(" c0_cerr_dpa == %010llx\n", cerr_dpa);
+ printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
if (!CP0_CERRD_IDX_VALID(cerr_d)) {
res = extract_dc(cerr_dpa & SB1_CACHE_INDEX_MASK,
(cerr_d & CP0_CERRD_DATA) != 0);
if (!(res & cerr_d))
- prom_printf("...didn't see indicated dcache problem\n");
+ printk("...didn't see indicated dcache problem\n");
} else {
if ((cerr_dpa & SB1_CACHE_INDEX_MASK) != (cerr_d & SB1_CACHE_INDEX_MASK))
- prom_printf(" cerr_d idx doesn't match cerr_dpa\n");
+ printk(" cerr_d idx doesn't match cerr_dpa\n");
else {
res = extract_dc(cerr_d & SB1_CACHE_INDEX_MASK,
(cerr_d & CP0_CERRD_DATA) != 0);
if (!(res & cerr_d))
- prom_printf("...didn't see indicated problem\n");
+ printk("...didn't see indicated problem\n");
}
}
}
uint8_t lru;
int res = 0;
- prom_printf("Icache index 0x%04x ", addr);
+ printk("Icache index 0x%04x ", addr);
for (way = 0; way < 4; way++) {
/* Index-load-tag-I */
__asm__ __volatile__ (
taglo = ((unsigned long long)taglohi << 32) | taglolo;
if (way == 0) {
lru = (taghi >> 14) & 0xff;
- prom_printf("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
+ printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
((addr >> 5) & 0x3), /* bank */
((addr >> 7) & 0x3f), /* index */
(lru & 0x3),
if (valid) {
tlo_tmp = taglo & 0xfff3ff;
if (((taglo >> 10) & 1) ^ range_parity(tlo_tmp, 23, 0)) {
- prom_printf(" ** bad parity in VTag0/G/ASID\n");
+ printk(" ** bad parity in VTag0/G/ASID\n");
res |= CP0_CERRI_TAG_PARITY;
}
if (((taglo >> 11) & 1) ^ range_parity(taglo, 63, 24)) {
- prom_printf(" ** bad parity in R/VTag1\n");
+ printk(" ** bad parity in R/VTag1\n");
res |= CP0_CERRI_TAG_PARITY;
}
}
if (valid ^ ((taghi >> 27) & 1)) {
- prom_printf(" ** bad parity for valid bit\n");
+ printk(" ** bad parity for valid bit\n");
res |= CP0_CERRI_TAG_PARITY;
}
- prom_printf(" %d [VA %016llx] [Vld? %d] raw tags: %08X-%016llX\n",
+ printk(" %d [VA %016llx] [Vld? %d] raw tags: %08X-%016llX\n",
way, va, valid, taghi, taglo);
if (data) {
: "r" ((way << 13) | addr | (offset << 3)));
predecode = (datahi >> 8) & 0xff;
if (((datahi >> 16) & 1) != (uint32_t)range_parity(predecode, 7, 0)) {
- prom_printf(" ** bad parity in predecode\n");
+ printk(" ** bad parity in predecode\n");
res |= CP0_CERRI_DATA_PARITY;
}
/* XXXKW should/could check predecode bits themselves */
if (((datahi >> 4) & 0xf) ^ inst_parity(insta)) {
- prom_printf(" ** bad parity in instruction a\n");
+ printk(" ** bad parity in instruction a\n");
res |= CP0_CERRI_DATA_PARITY;
}
if ((datahi & 0xf) ^ inst_parity(instb)) {
- prom_printf(" ** bad parity in instruction b\n");
+ printk(" ** bad parity in instruction b\n");
res |= CP0_CERRI_DATA_PARITY;
}
- prom_printf(" %05X-%08X%08X", datahi, insta, instb);
+ printk(" %05X-%08X%08X", datahi, insta, instb);
}
- prom_printf("\n");
+ printk("\n");
}
}
return res;
uint8_t ecc, lru;
int res = 0;
- prom_printf("Dcache index 0x%04x ", addr);
+ printk("Dcache index 0x%04x ", addr);
for (way = 0; way < 4; way++) {
__asm__ __volatile__ (
" .set push\n\t"
pa = (taglo & 0xFFFFFFE000ULL) | addr;
if (way == 0) {
lru = (taghi >> 14) & 0xff;
- prom_printf("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
+ printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
((addr >> 11) & 0x2) | ((addr >> 5) & 1), /* bank */
((addr >> 6) & 0x3f), /* index */
(lru & 0x3),
}
state = (taghi >> 25) & 0x1f;
valid = DC_TAG_VALID(state);
- prom_printf(" %d [PA %010llx] [state %s (%02x)] raw tags: %08X-%016llX\n",
+ printk(" %d [PA %010llx] [state %s (%02x)] raw tags: %08X-%016llX\n",
way, pa, dc_state_str(state), state, taghi, taglo);
if (valid) {
if (((taglo >> 11) & 1) ^ range_parity(taglo, 39, 26)) {
- prom_printf(" ** bad parity in PTag1\n");
+ printk(" ** bad parity in PTag1\n");
res |= CP0_CERRD_TAG_ADDRESS;
}
if (((taglo >> 10) & 1) ^ range_parity(taglo, 25, 13)) {
- prom_printf(" ** bad parity in PTag0\n");
+ printk(" ** bad parity in PTag0\n");
res |= CP0_CERRD_TAG_ADDRESS;
}
} else {
}
res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
}
- prom_printf(" %02X-%016llX", datahi, datalo);
+ printk(" %02X-%016llX", datahi, datalo);
}
- prom_printf("\n");
+ printk("\n");
if (bad_ecc)
- prom_printf(" dwords w/ bad ECC: %d %d %d %d\n",
- !!(bad_ecc & 8), !!(bad_ecc & 4),
- !!(bad_ecc & 2), !!(bad_ecc & 1));
+ printk(" dwords w/ bad ECC: %d %d %d %d\n",
+ !!(bad_ecc & 8), !!(bad_ecc & 4),
+ !!(bad_ecc & 2), !!(bad_ecc & 1));
}
}
return res;
{
BUG_ON(direction == DMA_NONE);
- if (cpu_is_noncoherent_r10000(dev)) {
+ if (!plat_device_is_coherent(dev)) {
unsigned long addr;
- addr = plat_dma_addr_to_phys(dma_handle);
+ addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
__dma_sync(addr, size, direction);
}
}
{
BUG_ON(direction == DMA_NONE);
- if (cpu_is_noncoherent_r10000(dev)) {
+ if (!plat_device_is_coherent(dev)) {
unsigned long addr;
addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
- if (!plat_device_is_coherent(dev))
+ if (cpu_is_noncoherent_r10000(dev))
__dma_sync((unsigned long)page_address(sg->page),
sg->length, direction);
plat_unmap_dma_mem(sg->dma_address);
uart->iu_thr = c;
}
-char __init prom_getchar(void)
-{
- return 0;
-}
-
static void inline ja_console_probe(void)
{
struct uart_port up;
},
};
-static char eth0_mac_addr[ETH_ALEN];
-
static struct mv643xx_eth_platform_data eth0_pd = {
- .mac_addr = eth0_mac_addr,
+ .port_number = 0,
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
},
};
-static char eth1_mac_addr[ETH_ALEN];
-
static struct mv643xx_eth_platform_data eth1_pd = {
- .mac_addr = eth1_mac_addr,
+ .port_number = 1,
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
},
};
-static char eth2_mac_addr[ETH_ALEN];
-
static struct mv643xx_eth_platform_data eth2_pd = {
- .mac_addr = eth2_mac_addr,
+ .port_number = 2,
};
static struct platform_device eth2_device = {
int ret;
get_mac(mac);
- eth_mac_add(eth0_mac_addr, mac, 0);
- eth_mac_add(eth1_mac_addr, mac, 1);
- eth_mac_add(eth2_mac_addr, mac, 2);
+ eth_mac_add(eth0_pd.mac_addr, mac, 0);
+ eth_mac_add(eth1_pd.mac_addr, mac, 1);
+ eth_mac_add(eth2_pd.mac_addr, mac, 2);
ret = platform_add_devices(mv643xx_eth_pd_devs,
ARRAY_SIZE(mv643xx_eth_pd_devs));
},
};
-static char eth0_mac_addr[ETH_ALEN];
-
static struct mv643xx_eth_platform_data eth0_pd = {
- .mac_addr = eth0_mac_addr,
+ .port_number = 0,
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
},
};
-static char eth1_mac_addr[ETH_ALEN];
-
static struct mv643xx_eth_platform_data eth1_pd = {
- .mac_addr = eth1_mac_addr,
+ .port_number = 1,
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
},
};
-static char eth2_mac_addr[ETH_ALEN];
-
static struct mv643xx_eth_platform_data eth2_pd = {
- .mac_addr = eth2_mac_addr,
+ .port_number = 2,
};
static struct platform_device eth2_device = {
int ret;
get_mac(mac);
- eth_mac_add(eth0_mac_addr, mac, 0);
- eth_mac_add(eth1_mac_addr, mac, 1);
- eth_mac_add(eth2_mac_addr, mac, 2);
+ eth_mac_add(eth0_pd.mac_addr, mac, 0);
+ eth_mac_add(eth1_pd.mac_addr, mac, 1);
+ eth_mac_add(eth2_pd.mac_addr, mac, 2);
ret = platform_add_devices(mv643xx_eth_pd_devs,
ARRAY_SIZE(mv643xx_eth_pd_devs));
},
};
-static char eth0_mac_addr[ETH_ALEN];
-
static struct mv643xx_eth_platform_data eth0_pd = {
- .mac_addr = eth0_mac_addr,
+ .port_number = 0,
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
},
};
-static char eth1_mac_addr[ETH_ALEN];
-
static struct mv643xx_eth_platform_data eth1_pd = {
- .mac_addr = eth1_mac_addr,
+ .port_number = 1,
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
int ret;
get_mac(mac);
- eth_mac_add(eth0_mac_addr, mac, 0);
- eth_mac_add(eth1_mac_addr, mac, 1);
+ eth_mac_add(eth0_pd.mac_addr, mac, 0);
+ eth_mac_add(eth1_pd.mac_addr, mac, 1);
ret = platform_add_devices(mv643xx_eth_pd_devs,
ARRAY_SIZE(mv643xx_eth_pd_devs));
obj-y += irq.o gt-irq.o prom.o reset.o setup.o
obj-$(CONFIG_KGDB) += dbg_io.o
-
-EXTRA_AFLAGS := $(CFLAGS)
switch (current_cpu_data.cputype) {
case CPU_R10000:
counters = 2;
+ break;
case CPU_R12000:
case CPU_R14000:
counters = 4;
+ break;
default:
counters = __n_counters();
{
unsigned char irq = pin;
+ /* SMSC SLC90E66 IDE uses irq 14, 15 (default) */
+ if (dev->vendor == PCI_VENDOR_ID_EFAR &&
+ dev->device == PCI_DEVICE_ID_EFAR_SLC90E66_1)
+ return irq;
/* IRQ rotation (PICMG) */
irq--; /* 0-3 */
if (dev->bus->parent == NULL &&
{
return 0;
}
-
-int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
-{
- /* SMSC SLC90E66 IDE uses irq 14, 15 (default) */
- if (!(dev->vendor == PCI_VENDOR_ID_EFAR &&
- dev->device == PCI_DEVICE_ID_EFAR_SLC90E66_1))
- return pci_get_irq(dev, pin);
-
- dev->irq = irq;
-}
#include <asm/mipsregs.h>
#include <asm/sni.h>
+#include <irq.h>
+
/*
* PCIMT Shortcuts ...
*/
error = -1;
DBG("Au1x Master Abort\n");
} else if ((status >> 28) & 0xf) {
- DBG("PCI ERR detected: status %x\n", status);
+ DBG("PCI ERR detected: device %d, status %x\n", device, ((status >> 28) & 0xf));
+
+ /* clear errors */
+ au_writel(status & 0xf000ffff, Au1500_PCI_STATCMD);
+
*data = 0xffffffff;
error = -1;
}
return pcibios_plat_dev_init(dev);
}
-static void __init pcibios_fixup_device_resources(struct pci_dev *dev,
+static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
struct pci_bus *bus)
{
/* Update device resources. */
extern int pnx8550_console_port;
-/* used by prom_printf */
+/* used by early printk */
void prom_putchar(char c)
{
if (pnx8550_console_port != -1) {
#include <uart.h>
#include <nand.h>
-extern void prom_printf(char *fmt, ...);
-
extern void __init board_setup(void);
extern void pnx8550_machine_restart(char *);
extern void pnx8550_machine_halt(void);
extern struct resource iomem_resource;
extern void pnx8550_time_init(void);
extern void rs_kgdb_hook(int tty_no);
-extern void prom_printf(char *fmt, ...);
extern char *prom_getcmdline(void);
struct resource standard_io_resources[] = {
argptr += strlen("console=ttyS");
pnx8550_console_port = *argptr == '0' ? 0 : 1;
- /* We must initialize the UART (console) before prom_printf */
+ /* We must initialize the UART (console) before early printk */
/* Set LCR to 8-bit and BAUD to 38400 (no 5) */
ip3106_lcr(UART_BASE, pnx8550_console_port) =
PNX8XXX_UART_LCR_8BIT;
argptr += strlen("kgdb=ttyS");
line = *argptr == '0' ? 0 : 1;
rs_kgdb_hook(line);
- prom_printf("KGDB: Using ttyS%i for session, "
- "please connect your debugger\n", line ? 1 : 0);
+ pr_info("KGDB: Using ttyS%i for session, "
+ "please connect your debugger\n", line ? 1 : 0);
}
#endif
return;
while ((readb_outer_space(lsr) & 0x20) == 0);
writeb_outer_space(thr, c);
}
-
-char __init prom_getchar(void)
-{
- return 0;
-}
ip22-time.o ip22-nvram.o ip22-reset.o ip22-setup.o
obj-$(CONFIG_EISA) += ip22-eisa.o
-
-EXTRA_AFLAGS := $(CFLAGS)
obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o
obj-$(CONFIG_KGDB) += ip27-dbgio.o
obj-$(CONFIG_SMP) += ip27-smp.o
-
-EXTRA_AFLAGS := $(CFLAGS)
* Copyright (C) 2001, 2002 Ralf Baechle
*/
#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/kdev_t.h>
-#include <linux/major.h>
-#include <linux/termios.h>
-#include <linux/sched.h>
-#include <linux/tty.h>
#include <asm/page.h>
#include <asm/semaphore.h>
return &ioc3->sregs.uarta;
}
-void prom_putchar(char c)
+void __init prom_putchar(char c)
{
struct ioc3_uartregs *uart = console_uart();
while ((uart->iu_lsr & 0x20) == 0);
uart->iu_thr = c;
}
-
-static void ioc3_console_write(struct console *con, const char *s, unsigned n)
-{
- while (n-- && *s) {
- if (*s == '\n')
- prom_putchar('\r');
- prom_putchar(*s);
- s++;
- }
-}
-
-static struct console ioc3_console = {
- .name = "ioc3",
- .write = ioc3_console_write,
- .flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1
-};
-
-__init void ip27_setup_console(void)
-{
- register_console(&ioc3_console);
-}
-
-void __init disable_early_printk(void)
-{
- unregister_console(&ioc3_console);
-}
ioc3->eier = 0;
}
-extern void ip27_setup_console(void);
extern void ip27_time_init(void);
extern void ip27_reboot_setup(void);
hubreg_t p, e, n_mode;
nasid_t nid;
- ip27_setup_console();
ip27_reboot_setup();
/*
obj-y += ip32-berr.o ip32-irq.o ip32-setup.o ip32-reset.o \
crime.o ip32-memory.o
-
-EXTRA_AFLAGS := $(CFLAGS)
config SIBYTE_CFE
bool "Booting from CFE"
depends on SIBYTE_SB1xxx_SOC
+ select SYS_HAS_EARLY_PRINTK
help
Make use of the CFE API for enumerating available memory,
controlling secondary CPUs, and possibly console output.
config SIBYTE_STANDALONE
bool
depends on SIBYTE_SB1xxx_SOC && !SIBYTE_CFE
+ select SYS_HAS_EARLY_PRINTK
default y
config SIBYTE_STANDALONE_RAM_SIZE
obj-y := setup.o irq.o time.o
obj-$(CONFIG_SMP) += smp.o
-
-EXTRA_AFLAGS := $(CFLAGS)
#ifdef CONFIG_GDB_CONSOLE
register_gdb_console();
#endif
- prom_printf("Waiting for GDB on UART port %d\n", kgdb_port);
+ printk("Waiting for GDB on UART port %d\n", kgdb_port);
set_debug_traps();
breakpoint();
}
break;
default:
- prom_printf("Unknown part type %x\n", part_type);
+ printk("Unknown part type %x\n", part_type);
ret = 1;
break;
}
pass_str = "B0 (pass2)";
break;
default:
- prom_printf("Unknown %s rev %x\n", soc_str, soc_pass);
+ printk("Unknown %s rev %x\n", soc_str, soc_pass);
periph_rev = 1;
pass_str = "Unknown Revision";
break;
soc_pass = G_SYS_REVISION(sys_rev);
if (sys_rev_decode()) {
- prom_printf("Restart after failure to identify SiByte chip\n");
+ printk("Restart after failure to identify SiByte chip\n");
machine_restart(NULL);
}
plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25);
- prom_printf("Broadcom SiByte %s %s @ %d MHz (SB-1A rev %d)\n",
+ printk("Broadcom SiByte %s %s @ %d MHz (SB-1A rev %d)\n",
soc_str, pass_str, zbbus_mhz * 2, sb1_pass);
- prom_printf("Board type: %s\n", get_system_type());
+ printk("Board type: %s\n", get_system_type());
}
goto fail;
}
initrd_end = initrd_start + initrd_size;
- prom_printf("Found initrd of %lx@%lx\n", initrd_size, initrd_start);
+ printk("Found initrd of %lx@%lx\n", initrd_size, initrd_start);
return 1;
fail:
- prom_printf("Bad initrd argument. Disabling initrd\n");
+ printk("Bad initrd argument. Disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
return 1;
}
if (cfe_eptseal != CFE_EPTSEAL) {
/* too early for panic to do any good */
- prom_printf("CFE's entrypoint seal doesn't match. Spinning.");
+ printk("CFE's entrypoint seal doesn't match. Spinning.");
while (1) ;
}
cfe_init(cfe_handle, cfe_ept);
} else {
/* The loader should have set the command line */
/* too early for panic to do any good */
- prom_printf("LINUX_CMDLINE not defined in cfe.");
+ printk("LINUX_CMDLINE not defined in cfe.");
while (1) ;
}
}
obj-$(CONFIG_SIBYTE_TBPROF) += bcm1250_tbprof.o
obj-$(CONFIG_SIBYTE_STANDALONE) += prom.o
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
-
-EXTRA_AFLAGS := $(CFLAGS)
ret = setup_bcm112x();
break;
default:
- prom_printf("Unknown SOC type %x\n", soc_type);
+ printk("Unknown SOC type %x\n", soc_type);
ret = 1;
break;
}
pass_str = "A0-A6";
war_pass = K_SYS_REVISION_BCM1250_PASS2;
} else {
- prom_printf("Unknown BCM1250 rev %x\n", soc_pass);
+ printk("Unknown BCM1250 rev %x\n", soc_pass);
ret = 1;
}
break;
pass_str = "A2";
break;
default:
- prom_printf("Unknown %s rev %x\n", soc_str, soc_pass);
+ printk("Unknown %s rev %x\n", soc_str, soc_pass);
ret = 1;
}
return ret;
soc_pass = G_SYS_REVISION(sys_rev);
if (sys_rev_decode()) {
- prom_printf("Restart after failure to identify SiByte chip\n");
+ printk("Restart after failure to identify SiByte chip\n");
machine_restart(NULL);
}
plldiv = G_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25);
- prom_printf("Broadcom SiByte %s %s @ %d MHz (SB1 rev %d)\n",
+ printk("Broadcom SiByte %s %s @ %d MHz (SB1 rev %d)\n",
soc_str, pass_str, zbbus_mhz * 2, sb1_pass);
- prom_printf("Board type: %s\n", get_system_type());
+ printk("Board type: %s\n", get_system_type());
switch (war_pass) {
case K_SYS_REVISION_BCM1250_PASS1:
#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
- prom_printf("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
+ printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
"and the kernel doesn't have the proper "
"workarounds compiled in. @@@@\n");
bad_config = 1;
/* Pass 2 - easiest as default for now - so many numbers */
#if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \
!defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS)
- prom_printf("@@@@ This is a BCM1250 A3-A10 board, and the "
+ printk("@@@@ This is a BCM1250 A3-A10 board, and the "
"kernel doesn't have the proper workarounds "
"compiled in. @@@@\n");
bad_config = 1;
#endif
#ifdef CONFIG_CPU_HAS_PREFETCH
- prom_printf("@@@@ Prefetches may be enabled in this kernel, "
+ printk("@@@@ Prefetches may be enabled in this kernel, "
"but are buggy on this board. @@@@\n");
bad_config = 1;
#endif
break;
case K_SYS_REVISION_BCM1250_PASS2_2:
#ifndef CONFIG_SB1_PASS_2_WORKAROUNDS
- prom_printf("@@@@ This is a BCM1250 B1/B2. board, and the "
+ printk("@@@@ This is a BCM1250 B1/B2. board, and the "
"kernel doesn't have the proper workarounds "
"compiled in. @@@@\n");
bad_config = 1;
#endif
#if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \
!defined(CONFIG_CPU_HAS_PREFETCH)
- prom_printf("@@@@ This is a BCM1250 B1/B2, but the kernel is "
+ printk("@@@@ This is a BCM1250 B1/B2, but the kernel is "
"conservatively configured for an 'A' stepping. "
"@@@@\n");
#endif
break;
}
if (bad_config) {
- prom_printf("Invalid configuration for this chip.\n");
+ printk("Invalid configuration for this chip.\n");
machine_restart(NULL);
}
}
obj-y += irq.o reset.o setup.o ds1216.o a20r.o rm200.o pcimt.o pcit.o time.o
obj-$(CONFIG_CPU_BIG_ENDIAN) += sniprom.o
-
-EXTRA_AFLAGS := $(CFLAGS)
u32 pending = (read_c0_cause() & read_c0_status());
if (pending & C_IRQ5)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 7);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 7);
else if (pending & C_IRQ4)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 6);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 6);
else if (pending & C_IRQ3)
pcimt_hwint3();
else if (pending & C_IRQ1)
if (pending & C_IRQ1)
pcit_hwint1();
else if (pending & C_IRQ2)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 4);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 4);
else if (pending & C_IRQ3)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 5);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 5);
else if (pending & C_IRQ5)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 7);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 7);
}
static void sni_pcit_hwint_cplus(void)
if (pending & C_IRQ0)
pcit_hwint0();
else if (pending & C_IRQ2)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 4);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 4);
else if (pending & C_IRQ3)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 5);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 5);
else if (pending & C_IRQ5)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 7);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 7);
}
void __init sni_pcit_irq_init(void)
int irq;
if (pending & C_IRQ5)
- do_IRQ (SNI_MIPS_IRQ_CPU_BASE + 7);
+ do_IRQ (MIPS_CPU_IRQ_BASE + 7);
else if (pending & C_IRQ0) {
clear_c0_status (IE_IRQ0);
mask = *(volatile u8 *)SNI_RM200_INT_ENA_REG ^ 0x1f;
* Copyright (C) 2005-2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
+#define DEBUG
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#define PROM_ENTRY(x) (PROM_VEC + (x))
-#define DEBUG
-#ifdef DEBUG
-#define DBG_PRINTF(x...) prom_printf(x)
-#else
-#define DBG_PRINTF(x...)
-#endif
-
static int *(*__prom_putchar)(int) = (int *(*)(int))PROM_ENTRY(PROM_PUTCHAR);
+
+void prom_putchar(char c)
+{
+ __prom_putchar(c);
+}
+
static char *(*__prom_getenv)(char *) = (char *(*)(char *))PROM_ENTRY(PROM_GETENV);
static void (*__prom_get_memconf)(void *) = (void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF);
return __prom_getenv(s);
}
-void prom_printf(char *fmt, ...)
-{
- va_list args;
- char ppbuf[1024];
- char *bptr;
-
- va_start(args, fmt);
- vsprintf(ppbuf, fmt, args);
-
- bptr = ppbuf;
-
- while (*bptr != 0) {
- if (*bptr == '\n')
- __prom_putchar('\r');
-
- __prom_putchar(*bptr++);
- }
- va_end(args);
-}
-
void __init prom_free_prom_memory(void)
{
}
{
int i;
- prom_printf("SNI IDProm dump:\n");
+ pr_debug("SNI IDProm dump:\n");
for (i = 0; i < 256; i++) {
if (i%16 == 0)
- prom_printf("%04x ", i);
+ pr_debug("%04x ", i);
- prom_printf("%02x ", *(unsigned char *) (SNI_IDPROM_BASE + i));
+ printk("%02x ", *(unsigned char *) (SNI_IDPROM_BASE + i));
if (i % 16 == 15)
- prom_printf("\n");
+ printk("\n");
}
}
#endif
/* MemSIZE from prom in 16MByte chunks */
memsize = *((unsigned char *) SNI_IDPROM_MEMSIZE) * 16;
- DBG_PRINTF("IDProm memsize: %lu MByte\n", memsize);
+ pr_debug("IDProm memsize: %lu MByte\n", memsize);
/* get memory bank layout from prom */
__prom_get_memconf(&memconf);
- DBG_PRINTF("prom_get_mem_conf memory configuration:\n");
+ pr_debug("prom_get_mem_conf memory configuration:\n");
for (i = 0;i < 8 && memconf[i].size; i++) {
if (sni_brd_type == SNI_BRD_PCI_TOWER ||
sni_brd_type == SNI_BRD_PCI_TOWER_CPLUS) {
memconf[i].base -= 0x20000000;
}
}
- DBG_PRINTF("Bank%d: %08x @ %08x\n", i,
+ pr_debug("Bank%d: %08x @ %08x\n", i,
memconf[i].size, memconf[i].base);
add_memory_region(memconf[i].base, memconf[i].size, BOOT_MEM_RAM);
}
systype = "RM300-Exx";
break;
}
- DBG_PRINTF("Found SNI brdtype %02x name %s\n", sni_brd_type,systype);
+ pr_debug("Found SNI brdtype %02x name %s\n", sni_brd_type,systype);
#ifdef DEBUG
sni_idprom_dump();
void __init plat_timer_setup(struct irqaction *irq)
{
- u32 count;
- u32 c1;
- u32 c2;
-
setup_irq(TX4927_IRQ_CPU_TIMER, irq);
- /* to generate the first timer interrupt */
- c1 = read_c0_count();
- count = c1 + (mips_hpt_frequency / HZ);
- write_c0_compare(count);
- c2 = read_c0_count();
-
#ifdef CONFIG_TOSHIBA_RBTX4927
{
extern void toshiba_rbtx4927_timer_setup(struct irqaction
obj-y += toshiba_rbtx4927_prom.o
obj-y += toshiba_rbtx4927_setup.o
obj-y += toshiba_rbtx4927_irq.o
-
-EXTRA_AFLAGS := $(CFLAGS)
#include <asm/wbflush.h>
#include <linux/bootmem.h>
#include <linux/blkdev.h>
-#ifdef CONFIG_RTC_DS1742
-#include <linux/ds1742rtc.h>
-#endif
#ifdef CONFIG_TOSHIBA_FPCIB0
#include <asm/tx4927/smsc_fdc37m81x.h>
#endif
#include <linux/pci.h>
#include <linux/timex.h>
#include <linux/pm.h>
+#include <linux/platform_device.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
#include <asm/time.h>
#include <linux/bootmem.h>
#include <linux/blkdev.h>
-#ifdef CONFIG_RTC_DS1742
-#include <linux/ds1742rtc.h>
-#endif
#ifdef CONFIG_TOSHIBA_FPCIB0
#include <asm/tx4927/smsc_fdc37m81x.h>
#endif
"+\n");
}
-#ifdef CONFIG_RTC_DS1742
-extern unsigned long rtc_ds1742_get_time(void);
-extern int rtc_ds1742_set_time(unsigned long);
-extern void rtc_ds1742_wait(void);
-#endif
-
void __init
toshiba_rbtx4927_time_init(void)
{
- u32 c1;
- u32 c2;
-
TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT, "-\n");
-#ifdef CONFIG_RTC_DS1742
-
- rtc_mips_get_time = rtc_ds1742_get_time;
- rtc_mips_set_time = rtc_ds1742_set_time;
-
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
- ":rtc_ds1742_init()-\n");
- rtc_ds1742_init(0xbc010000);
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
- ":rtc_ds1742_init()+\n");
-
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
- ":Calibrate mips_hpt_frequency-\n");
- rtc_ds1742_wait();
-
- /* get the count */
- c1 = read_c0_count();
-
- /* wait for the seconds to change again */
- rtc_ds1742_wait();
-
- /* get the count again */
- c2 = read_c0_count();
-
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
- ":Calibrate mips_hpt_frequency+\n");
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
- ":c1=%12u\n", c1);
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
- ":c2=%12u\n", c2);
-
- /* this diff is as close as we are going to get to counter ticks per sec */
- mips_hpt_frequency = abs(c2 - c1);
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
- ":f1=%12u\n", mips_hpt_frequency);
-
- /* round to 1/10th of a MHz */
- mips_hpt_frequency /= (100 * 1000);
- mips_hpt_frequency *= (100 * 1000);
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
- ":f2=%12u\n", mips_hpt_frequency);
-
- TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_INFO,
- ":mips_hpt_frequency=%uHz (%uMHz)\n",
- mips_hpt_frequency,
- mips_hpt_frequency / 1000000);
-#else
- mips_hpt_frequency = 100000000;
-#endif
+ mips_hpt_frequency = tx4927_cpu_clock / 2;
TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT, "+\n");
TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIMER_SETUP,
"+\n");
}
+
+static int __init toshiba_rbtx4927_rtc_init(void)
+{
+ struct resource res = {
+ .start = 0x1c010000,
+ .end = 0x1c010000 + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ struct platform_device *dev =
+ platform_device_register_simple("ds1742", -1, &res, 1);
+ return IS_ERR(dev) ? PTR_ERR(dev) : 0;
+}
+device_initcall(toshiba_rbtx4927_rtc_init);
void __init plat_timer_setup(struct irqaction *irq)
{
- u32 count;
- u32 c1;
- u32 c2;
-
setup_irq(TX4938_IRQ_CPU_TIMER, irq);
-
- c1 = read_c0_count();
- count = c1 + (mips_hpt_frequency / HZ);
- write_c0_compare(count);
- c2 = read_c0_count();
}
#
obj-y += bcu.o cmu.o icu.o init.o irq.o pmu.o type.o
-
-EXTRA_AFLAGS := $(CFLAGS)
struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly;
+extern int update_cr16_clocksource(void); /* from time.c */
+
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
**
}
#endif
+ /* If we've registered more than one cpu,
+ * we'll use the jiffies clocksource since cr16
+ * is not synchronized between CPUs.
+ */
+ update_cr16_clocksource();
+
return 0;
}
#include <asm/io.h>
#include <asm/setup.h>
-char __initdata command_line[COMMAND_LINE_SIZE] __read_mostly;
+char __initdata command_line[COMMAND_LINE_SIZE];
/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
struct proc_dir_entry * proc_runway_root __read_mostly = NULL;
return get_cycles();
}
-static int cr16_update_callback(void);
-
static struct clocksource clocksource_cr16 = {
.name = "cr16",
.rating = 300,
.mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
.mult = 0, /* to be set */
.shift = 22,
- .update_callback = cr16_update_callback,
- .is_continuous = 1,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static int cr16_update_callback(void)
+#ifdef CONFIG_SMP
+int update_cr16_clocksource(void)
{
int change = 0;
/* since the cr16 cycle counters are not syncronized across CPUs,
we'll check if we should switch to a safe clocksource: */
if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
- clocksource_cr16.rating = 0;
- clocksource_reselect();
+ clocksource_change_rating(&clocksource_cr16, 0);
change = 1;
}
return change;
}
-
+#else
+int update_cr16_clocksource(void)
+{
+ return 0; /* no change */
+}
+#endif /*CONFIG_SMP*/
void __init start_cpu_itimer(void)
{
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
void pci_disable_msix(struct pci_dev *dev) {}
void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
-void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
void pci_no_msi(void) {}
EXPORT_SYMBOL(pci_enable_msix);
EXPORT_SYMBOL(pci_disable_msix);
static struct mv643xx_eth_platform_data eth0_pd = {
+ .port_number = 0,
.tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0,
.tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
.tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
};
static struct mv643xx_eth_platform_data eth1_pd = {
+ .port_number = 1,
.tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1,
.tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
.tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
menu "PS3 Platform Options"
depends on PPC_PS3
+config PS3_ADVANCED
+ depends on PPC_PS3
+ bool "PS3 Advanced configuration options"
+ help
+ This gives you access to some advanced options for the PS3. The
+ defaults should be fine for most users, but these options may make
+ it possible to better control the kernel configuration if you know
+ what you are doing.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about these options.
+
+ Most users should say N to this question.
+
config PS3_HTAB_SIZE
depends on PPC_PS3
- int "PS3 Platform pagetable size"
+ int "PS3 Platform pagetable size" if PS3_ADVANCED
range 18 20
default 20
help
config PS3_VUART
depends on PPC_PS3
- bool "PS3 Virtual UART support"
+ bool "PS3 Virtual UART support" if PS3_ADVANCED
default y
help
Include support for the PS3 Virtual UART.
general, all users will say Y.
config PS3_PS3AV
- tristate "PS3 AV settings driver"
- depends on PPC_PS3
- select PS3_VUART
+ tristate "PS3 AV settings driver" if PS3_ADVANCED
+ depends on PS3_VUART
default y
help
Include support for the PS3 AV Settings driver.
general, all users will say Y or M.
config PS3_SYS_MANAGER
- bool "PS3 System Manager driver"
- select PS3_VUART
+ bool "PS3 System Manager driver" if PS3_ADVANCED
+ depends on PS3_VUART
default y
help
Include support for the PS3 System Manager.
},
};
-static struct mv643xx_eth_platform_data eth0_pd;
+static struct mv643xx_eth_platform_data eth0_pd = {
+ .port_number = 0,
+};
static struct platform_device eth0_device = {
.name = MV643XX_ETH_NAME,
},
};
-static struct mv643xx_eth_platform_data eth1_pd;
+static struct mv643xx_eth_platform_data eth1_pd = {
+ .port_number = 1,
+};
static struct platform_device eth1_device = {
.name = MV643XX_ETH_NAME,
},
};
-static struct mv643xx_eth_platform_data eth2_pd;
+static struct mv643xx_eth_platform_data eth2_pd = {
+ .port_number = 2,
+};
static struct platform_device eth2_device = {
.name = MV643XX_ETH_NAME,
Select this option, if you want to share the text segment of the
Linux kernel between different VM guests. This reduces memory
usage with lots of guests but greatly increases kernel size.
+ Also if a kernel was IPL'ed from a shared segment the kexec system
+ call will not work.
You should only select this option if you know what you are
doing and want to exploit this feature.
.long .Lduct # cr2: dispatchable unit control table
.long 0 # cr3: instruction authorization
.long 0 # cr4: instruction authorization
- .long 0xffffffff # cr5: primary-aste origin
+ .long .Lduct # cr5: primary-aste origin
.long 0 # cr6: I/O interrupts
.long 0 # cr7: secondary space segment table
.long 0 # cr8: access registers translation
.long 0 # cr13: home space segment table
.long 0xc0000000 # cr14: machine check handling off
.long 0 # cr15: linkage stack operations
-.Lduct: .long 0,0,0,0,0,0,0,0
- .long 0,0,0,0,0,0,0,0
.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
.Linittu: .long init_thread_union
.Lstartup_init:
.long startup_init
+ .align 64
+.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+ .long 0,0,0,0,0,0,0,0
+ .align 128
+.Lduald:.rept 8
+ .long 0x80000000,0,0,0 # invalid access-list entries
+ .endr
.org 0x12000
.globl _ehead
.quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization
.quad 0 # cr4: instruction authorization
- .quad 0xffffffffffffffff # cr5: primary-aste origin
+ .quad .Lduct # cr5: primary-aste origin
.quad 0 # cr6: I/O interrupts
.quad 0 # cr7: secondary space segment table
.quad 0 # cr8: access registers translation
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
.quad 0 # cr15: linkage stack operations
-.Lduct: .long 0,0,0,0,0,0,0,0
- .long 0,0,0,0,0,0,0,0
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700
.Lparmaddr:
.quad PARMAREA
+ .align 64
+.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+ .long 0,0,0,0,0,0,0,0
+ .align 128
+.Lduald:.rept 8
+ .long 0x80000000,0,0,0 # invalid access-list entries
+ .endr
.org 0x12000
.globl _ehead
reset->fn();
}
-extern __u32 dump_prefix_page;
+u32 dump_prefix_page;
void s390_reset_system(void)
{
lc->panic_stack = S390_lowcore.panic_stack;
/* Save prefix page address for dump case */
- dump_prefix_page = (unsigned long) lc;
+ dump_prefix_page = (u32)(unsigned long) lc;
/* Disable prefixing */
set_prefix(0);
}
p = get_kprobe(addr);
- if (!p) {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- *
- */
- ret = 1;
- }
- /* Not one of ours: let kernel handle it */
+ if (!p)
+ /*
+ * No kprobe at this address. The fault has not been
+ * caused by a kprobe breakpoint. The race of breakpoint
+ * vs. kprobe remove does not exist because on s390 we
+ * use stop_machine_run to arm/disarm the breakpoints.
+ */
goto no_kprobe;
- }
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb);
#include <asm/system.h>
#include <asm/smp.h>
#include <asm/reset.h>
+#include <asm/ipl.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
{
void *reboot_code_buffer;
+ /* Can't replace kernel image since it is read-only. */
+ if (ipl_flags & IPL_NSS_VALID)
+ return -ENOSYS;
+
/* We don't support anything but the default image type for now. */
if (image->type != KEXEC_TYPE_DEFAULT)
return -EINVAL;
#include <asm/lowcore.h>
+#
+# do_reipl_asm
+# Parameter: r2 = schid of reipl device
+#
.globl do_reipl_asm
do_reipl_asm: basr %r13,0
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
stm %r0,%r15,__LC_GPREGS_SAVE_AREA
stctl %c0,%c15,__LC_CREGS_SAVE_AREA
stam %a0,%a15,__LC_AREGS_SAVE_AREA
- mvc __LC_PREFIX_SAVE_AREA(4),dump_prefix_page-.Lpg0(%r13)
+ l %r10,.Ldump_pfx-.Lpg0(%r13)
+ mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
stckc .Lclkcmp-.Lpg0(%r13)
mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
stpt __LC_CPU_TIMER_SAVE_AREA
st %r13, __LC_PSW_SAVE_AREA+4
-
lctl %c6,%c6,.Lall-.Lpg0(%r13)
lr %r1,%r2
mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
.align 8
.Lclkcmp: .quad 0x0000000000000000
.Lall: .long 0xff000000
+.Ldump_pfx: .long dump_prefix_page
.align 8
.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
.Lpcnew: .long 0x00080000,0x80000000+.Lecs
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
- .globl dump_prefix_page
-dump_prefix_page:
- .long 0x00000000
-
*/
#include <asm/lowcore.h>
+
+#
+# do_reipl_asm
+# Parameter: r2 = schid of reipl device
+#
+
.globl do_reipl_asm
do_reipl_asm: basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
- mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),dump_prefix_page-.Lpg0(%r13)
+ lg %r10,.Ldump_pfx-.Lpg0(%r13)
+ mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
stckc .Lclkcmp-.Lpg0(%r13)
mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13)
.align 8
.Lclkcmp: .quad 0x0000000000000000
.Lall: .quad 0x00000000ff000000
+.Ldump_pfx: .quad dump_prefix_page
.Lregsave: .quad 0x0000000000000000
.align 16
/*
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
- .globl dump_prefix_page
-dump_prefix_page:
- .long 0x00000000
int cpu, local = 0;
/*
- * Can deadlock when interrupts are disabled or if in wrong context,
- * caller must disable preemption
+ * Can deadlock when interrupts are disabled or if in wrong context.
*/
- WARN_ON(irqs_disabled() || in_irq() || preemptible());
+ WARN_ON(irqs_disabled() || in_irq());
/*
* Check for local function call. We have to have the same call order
* Run a function on all other CPUs.
*
* You must not call this function with disabled interrupts or from a
- * hardware interrupt handler. Must be called with preemption disabled.
- * You may call it from a bottom half.
+ * hardware interrupt handler. You may call it from a bottom half.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
cpumask_t map;
+ preempt_disable();
map = cpu_online_map;
cpu_clear(smp_processor_id(), map);
__smp_call_function_map(func, info, nonatomic, wait, map);
+ preempt_enable();
return 0;
}
EXPORT_SYMBOL(smp_call_function);
* Run a function on one processor.
*
* You must not call this function with disabled interrupts or from a
- * hardware interrupt handler. Must be called with preemption disabled.
- * You may call it from a bottom half.
+ * hardware interrupt handler. You may call it from a bottom half.
*/
int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
int wait, int cpu)
{
cpumask_t map = CPU_MASK_NONE;
+ preempt_disable();
cpu_set(cpu, map);
__smp_call_function_map(func, info, nonatomic, wait, map);
+ preempt_enable();
return 0;
}
EXPORT_SYMBOL(smp_call_function_on);
}
/*
- * Check which address space is addressed by the access
- * register in S390_lowcore.exc_access_id.
- * Returns 1 for user space and 0 for kernel space.
+ * Returns the address space associated with the fault.
+ * Returns 0 for kernel space, 1 for user space and
+ * 2 for code execution in user space with noexec=on.
*/
-static int __check_access_register(struct pt_regs *regs, int error_code)
-{
- int areg = S390_lowcore.exc_access_id;
-
- if (areg == 0)
- /* Access via access register 0 -> kernel address */
- return 0;
- save_access_regs(current->thread.acrs);
- if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1)
- /*
- * access register contains 0 -> kernel address,
- * access register contains 1 -> user space address
- */
- return current->thread.acrs[areg];
-
- /* Something unhealthy was done with the access registers... */
- die("page fault via unknown access register", regs, error_code);
- do_exit(SIGKILL);
- return 0;
-}
-
-/*
- * Check which address space the address belongs to.
- * May return 1 or 2 for user space and 0 for kernel space.
- * Returns 2 for user space in primary addressing mode with
- * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
- */
-static inline int check_user_space(struct pt_regs *regs, int error_code)
+static inline int check_space(struct task_struct *tsk)
{
/*
- * The lowest two bits of S390_lowcore.trans_exc_code indicate
- * which paging table was used:
- * 0: Primary Segment Table Descriptor
- * 1: STD determined via access register
- * 2: Secondary Segment Table Descriptor
- * 3: Home Segment Table Descriptor
+ * The lowest two bits of S390_lowcore.trans_exc_code
+ * indicate which paging table was used.
*/
- int descriptor = S390_lowcore.trans_exc_code & 3;
- if (unlikely(descriptor == 1))
- return __check_access_register(regs, error_code);
- if (descriptor == 2)
- return current->thread.mm_segment.ar4;
- return ((descriptor != 0) ^ (switch_amode)) << s390_noexec;
+ int desc = S390_lowcore.trans_exc_code & 3;
+
+ if (desc == 3) /* Home Segment Table Descriptor */
+ return switch_amode == 0;
+ if (desc == 2) /* Secondary Segment Table Descriptor */
+ return tsk->thread.mm_segment.ar4;
+#ifdef CONFIG_S390_SWITCH_AMODE
+ if (unlikely(desc == 1)) { /* STD determined via access register */
+ /* %a0 always indicates primary space. */
+ if (S390_lowcore.exc_access_id != 0) {
+ save_access_regs(tsk->thread.acrs);
+ /*
+ * An alet of 0 indicates primary space.
+ * An alet of 1 indicates secondary space.
+ * Any other alet values generate an
+ * alen-translation exception.
+ */
+ if (tsk->thread.acrs[S390_lowcore.exc_access_id])
+ return tsk->thread.mm_segment.ar4;
+ }
+ }
+#endif
+ /* Primary Segment Table Descriptor */
+ return switch_amode << s390_noexec;
}
/*
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
-static inline void __kprobes
+static inline void
do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
unsigned long address;
- int user_address;
const struct exception_table_entry *fixup;
- int si_code = SEGV_MAPERR;
+ int si_code;
+ int space;
tsk = current;
mm = tsk->mm;
NULL pointer write access in kernel mode. */
if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
address = 0;
- user_address = 0;
+ space = 0;
goto no_context;
}
* the address
*/
address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
- user_address = check_user_space(regs, error_code);
+ space = check_space(tsk);
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
- if (user_address == 0 || in_atomic() || !mm)
- goto no_context;
+ if (unlikely(space == 0 || in_atomic() || !mm))
+ goto no_context;
/*
* When we get here, the fault happened in the current
down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
+ si_code = SEGV_MAPERR;
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
#ifdef CONFIG_S390_EXEC_PROTECT
- if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC)))
+ if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
if (!signal_return(mm, regs, address, error_code))
/*
* signal_return() has done an up_read(&mm->mmap_sem)
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
- clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
+ clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if (user_address == 0)
+ if (space == 0)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
goto no_context;
}
-void do_protection_exception(struct pt_regs *regs, unsigned long error_code)
+void __kprobes do_protection_exception(struct pt_regs *regs,
+ unsigned long error_code)
{
regs->psw.addr -= (error_code >> 16);
do_exception(regs, 4, 1);
}
-void do_dat_exception(struct pt_regs *regs, unsigned long error_code)
+void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code)
{
do_exception(regs, error_code & 0xff, 0);
}
# Makefile for the R7780RP-1 specific parts of the kernel
#
-obj-y := setup.o io.o irq.o
+obj-y := setup.o irq.o
obj-$(CONFIG_PUSH_SWITCH) += psw.o
+++ /dev/null
-/*
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Renesas Solutions Highlander R7780RP-1
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_r7780rp.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <asm/r7780rp.h>
-#include <asm/addrspace.h>
-
-static inline unsigned long port88796l(unsigned int port, int flag)
-{
- unsigned long addr;
-
- if (flag)
- addr = PA_AX88796L + ((port - AX88796L_IO_BASE) << 1);
- else
- addr = PA_AX88796L + ((port - AX88796L_IO_BASE) << 1) + 0x1000;
-
- return addr;
-}
-
-#if defined(CONFIG_NE2000) || defined(CONFIG_NE2000_MODULE)
-#define CHECK_AX88796L_PORT(port) \
- ((port >= AX88796L_IO_BASE) && (port < (AX88796L_IO_BASE+0x20)))
-#else
-#define CHECK_AX88796L_PORT(port) (0)
-#endif
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used w/o translation for
- * compatibility.
- */
-u8 r7780rp_inb(unsigned long port)
-{
- if (CHECK_AX88796L_PORT(port))
- return ctrl_inw(port88796l(port, 0)) & 0xff;
- else if (is_pci_ioaddr(port))
- return ctrl_inb(pci_ioaddr(port));
-
- return ctrl_inw(port) & 0xff;
-}
-
-u8 r7780rp_inb_p(unsigned long port)
-{
- u8 v;
-
- if (CHECK_AX88796L_PORT(port))
- v = ctrl_inw(port88796l(port, 0)) & 0xff;
- else if (is_pci_ioaddr(port))
- v = ctrl_inb(pci_ioaddr(port));
- else
- v = ctrl_inw(port) & 0xff;
-
- ctrl_delay();
-
- return v;
-}
-
-u16 r7780rp_inw(unsigned long port)
-{
- if (is_pci_ioaddr(port))
- return ctrl_inw(pci_ioaddr(port));
-
- return ctrl_inw(port);
-}
-
-u32 r7780rp_inl(unsigned long port)
-{
- if (is_pci_ioaddr(port))
- return ctrl_inl(pci_ioaddr(port));
-
- return ctrl_inl(port);
-}
-
-void r7780rp_outb(u8 value, unsigned long port)
-{
- if (CHECK_AX88796L_PORT(port))
- ctrl_outw(value, port88796l(port, 0));
- else if (is_pci_ioaddr(port))
- ctrl_outb(value, pci_ioaddr(port));
- else
- ctrl_outb(value, port);
-}
-
-void r7780rp_outb_p(u8 value, unsigned long port)
-{
- if (CHECK_AX88796L_PORT(port))
- ctrl_outw(value, port88796l(port, 0));
- else if (is_pci_ioaddr(port))
- ctrl_outb(value, pci_ioaddr(port));
- else
- ctrl_outb(value, port);
-
- ctrl_delay();
-}
-
-void r7780rp_outw(u16 value, unsigned long port)
-{
- if (is_pci_ioaddr(port))
- ctrl_outw(value, pci_ioaddr(port));
- else
- ctrl_outw(value, port);
-}
-
-void r7780rp_outl(u32 value, unsigned long port)
-{
- if (is_pci_ioaddr(port))
- ctrl_outl(value, pci_ioaddr(port));
- else
- ctrl_outl(value, port);
-}
-
-void r7780rp_insb(unsigned long port, void *dst, unsigned long count)
-{
- volatile u16 *p;
- u8 *buf = dst;
-
- if (CHECK_AX88796L_PORT(port)) {
- p = (volatile u16 *)port88796l(port, 0);
- while (count--)
- *buf++ = *p & 0xff;
- } else if (is_pci_ioaddr(port)) {
- volatile u8 *bp = (volatile u8 *)pci_ioaddr(port);
-
- while (count--)
- *buf++ = *bp;
- } else {
- p = (volatile u16 *)port;
- while (count--)
- *buf++ = *p & 0xff;
- }
-}
-
-void r7780rp_insw(unsigned long port, void *dst, unsigned long count)
-{
- volatile u16 *p;
- u16 *buf = dst;
-
- if (CHECK_AX88796L_PORT(port))
- p = (volatile u16 *)port88796l(port, 1);
- else if (is_pci_ioaddr(port))
- p = (volatile u16 *)pci_ioaddr(port);
- else
- p = (volatile u16 *)port;
-
- while (count--)
- *buf++ = *p;
-
- flush_dcache_all();
-}
-
-void r7780rp_insl(unsigned long port, void *dst, unsigned long count)
-{
- if (is_pci_ioaddr(port)) {
- volatile u32 *p = (volatile u32 *)pci_ioaddr(port);
- u32 *buf = dst;
-
- while (count--)
- *buf++ = *p;
- }
-}
-
-void r7780rp_outsb(unsigned long port, const void *src, unsigned long count)
-{
- volatile u16 *p;
- const u8 *buf = src;
-
- if (CHECK_AX88796L_PORT(port)) {
- p = (volatile u16 *)port88796l(port, 0);
- while (count--)
- *p = *buf++;
- } else if (is_pci_ioaddr(port)) {
- volatile u8 *bp = (volatile u8 *)pci_ioaddr(port);
-
- while (count--)
- *bp = *buf++;
- } else
- while (count--)
- ctrl_outb(*buf++, port);
-}
-
-void r7780rp_outsw(unsigned long port, const void *src, unsigned long count)
-{
- volatile u16 *p;
- const u16 *buf = src;
-
- if (CHECK_AX88796L_PORT(port))
- p = (volatile u16 *)port88796l(port, 1);
- else if (is_pci_ioaddr(port))
- p = (volatile u16 *)pci_ioaddr(port);
- else
- p = (volatile u16 *)port;
-
- while (count--)
- *p = *buf++;
-
- flush_dcache_all();
-}
-
-void r7780rp_outsl(unsigned long port, const void *src, unsigned long count)
-{
- const u32 *buf = src;
- u32 *p;
-
- if (is_pci_ioaddr(port))
- p = (u32 *)pci_ioaddr(port);
- else
- p = (u32 *)port;
-
- while (count--)
- ctrl_outl(*buf++, (unsigned long)p);
-}
-
-void __iomem *r7780rp_ioport_map(unsigned long port, unsigned int size)
-{
- if (CHECK_AX88796L_PORT(port))
- return (void __iomem *)port88796l(port, size > 1);
- else if (is_pci_ioaddr(port))
- return (void __iomem *)pci_ioaddr(port);
-
- return (void __iomem *)port;
-}
struct sh_machine_vector mv_r7780rp __initmv = {
.mv_name = "Highlander R7780RP-1",
.mv_setup = r7780rp_setup,
-
.mv_nr_irqs = 109,
-
- .mv_inb = r7780rp_inb,
- .mv_inw = r7780rp_inw,
- .mv_inl = r7780rp_inl,
- .mv_outb = r7780rp_outb,
- .mv_outw = r7780rp_outw,
- .mv_outl = r7780rp_outl,
-
- .mv_inb_p = r7780rp_inb_p,
- .mv_inw_p = r7780rp_inw,
- .mv_inl_p = r7780rp_inl,
- .mv_outb_p = r7780rp_outb_p,
- .mv_outw_p = r7780rp_outw,
- .mv_outl_p = r7780rp_outl,
-
- .mv_insb = r7780rp_insb,
- .mv_insw = r7780rp_insw,
- .mv_insl = r7780rp_insl,
- .mv_outsb = r7780rp_outsb,
- .mv_outsw = r7780rp_outsw,
- .mv_outsl = r7780rp_outsl,
-
- .mv_ioport_map = r7780rp_ioport_map,
.mv_init_irq = init_r7780rp_IRQ,
};
ALIAS_MV(r7780rp)
#include <linux/platform_device.h>
#include <linux/pata_platform.h>
#include <linux/serial_8250.h>
+#include <linux/sm501.h>
#include <linux/pm.h>
#include <asm/machvec.h>
#include <asm/rts7751r2d.h>
.resource = heartbeat_resources,
};
+static struct resource sm501_resources[] = {
+ [0] = {
+ .start = 0x10000000,
+ .end = 0x13e00000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 0x13e00000,
+ .end = 0x13ffffff,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = 32,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sm501_device = {
+ .name = "sm501",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(sm501_resources),
+ .resource = sm501_resources,
+};
+
static struct platform_device *rts7751r2d_devices[] __initdata = {
&uart_device,
&heartbeat_device,
&cf_ide_device,
+ &sm501_device,
};
static int __init rts7751r2d_devices_setup(void)
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.20
-# Thu Feb 15 17:17:29 2007
+# Linux kernel version: 2.6.21-rc1
+# Thu Mar 1 16:42:40 2007
#
CONFIG_SUPERH=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
# CONFIG_GENERIC_TIME is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
# CONFIG_IPC_NS is not set
+CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_SH_SHMIN is not set
# CONFIG_SH_7206_SOLUTION_ENGINE is not set
# CONFIG_SH_7619_SOLUTION_ENGINE is not set
-# CONFIG_SH_ASDAP310 is not set
# CONFIG_SH_UNKNOWN is not set
#
CONFIG_BOOT_LINK_OFFSET=0x00800000
# CONFIG_UBC_WAKEUP is not set
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
+CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1"
#
# Bus options
#
# Plug and Play support
#
+# CONFIG_PNPACPI is not set
#
# Block devices
#
# Input device support
#
-# CONFIG_INPUT is not set
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
#
# Hardware I/O ports
#
# Character devices
#
-# CONFIG_VT is not set
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_SM501=y
+
#
# Multimedia devices
#
#
# Graphics support
#
-CONFIG_FIRMWARE_EDID=y
-# CONFIG_FB is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frambuffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_EPSON1355 is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_TRIDENT is not set
+CONFIG_FB_SM501=y
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_LOGO_SUPERH_MONO is not set
+# CONFIG_LOGO_SUPERH_VGA16 is not set
+CONFIG_LOGO_SUPERH_CLUT224=y
#
# Sound
# CONFIG_SOUND_VIA82CXXX is not set
CONFIG_AC97_BUS=m
+#
+# HID Devices
+#
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+
#
# USB support
#
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
CONFIG_EARLY_PRINTK=y
-# CONFIG_KGDB is not set
+# CONFIG_SH_KGDB is not set
#
# Security options
syscall_exit_work:
! r0: current_thread_info->flags
! r8: current_thread_info
- tst #_TIF_SYSCALL_TRACE, r0
+ tst #_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP, r0
bt/s work_pending
tst #_TIF_NEED_RESCHED, r0
#ifdef CONFIG_TRACE_IRQFLAGS
#include <linux/module.h>
#include <linux/io.h>
#include <asm/machvec.h>
-#include <asm/cacheflush.h>
#ifdef CONFIG_CPU_SH3
/* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a
while (count--)
*buf++ = *port_addr;
- flush_dcache_all();
dummy_read();
}
while (count--)
*port_addr = *buf++;
- flush_dcache_all();
dummy_read();
}
childregs->regs[15] = usp;
ti->addr_limit = USER_DS;
} else {
- childregs->regs[15] = (unsigned long)task_stack_page(p) +
- THREAD_SIZE;
+ childregs->regs[15] = (unsigned long)childregs;
ti->addr_limit = KERNEL_DS;
}
- if (clone_flags & CLONE_SETTLS)
+ if (clone_flags & CLONE_SETTLS)
childregs->gbr = childregs->regs[0];
childregs->regs[0] = 0; /* Set return value for child */
* SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
*/
-
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/signal.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
return 0;
}
+static void ptrace_disable_singlestep(struct task_struct *child)
+{
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+
+ /*
+ * Ensure the UBC is not programmed at the next context switch.
+ *
+ * Normally this is not needed but there are sequences such as
+ * singlestep, signal delivery, and continue that leave the
+ * ubc_pc non-zero leading to spurious SIGTRAPs.
+ */
+ if (child->thread.ubc_pc != 0) {
+ ubc_usercnt -= 1;
+ child->thread.ubc_pc = 0;
+ }
+}
+
/*
* Called by kernel/ptrace.c when detaching..
*
*/
void ptrace_disable(struct task_struct *child)
{
- /* nothing to do.. */
+ ptrace_disable_singlestep(child);
}
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
unsigned long tmp;
int copied;
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
+ if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
+ if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
+ ptrace_disable_singlestep(child);
+
child->exit_code = data;
wake_up_process(child);
ret = 0;
}
/*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
* exit.
*/
case PTRACE_KILL: {
ret = 0;
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
break;
+ ptrace_disable_singlestep(child);
child->exit_code = SIGKILL;
wake_up_process(child);
break;
ubc_usercnt += 1;
child->thread.ubc_pc = pc;
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
child->exit_code = data;
/* give it a chance to run. */
wake_up_process(child);
{
struct task_struct *tsk = current;
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
+ !test_thread_flag(TIF_SINGLESTEP))
return;
if (!(tsk->ptrace & PT_PTRACED))
return;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
+ !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+
+ return;
}
no_signal:
if (regs->regs[0] == -ERESTARTNOHAND ||
regs->regs[0] == -ERESTARTSYS ||
regs->regs[0] == -ERESTARTNOINTR) {
- regs->regs[0] = save_r0;
+ regs->regs[0] = save_r0;
regs->pc -= 2;
} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
regs->pc -= 2;
* Written by Niibe Yutaka
*/
#include <asm/thread_info.h>
+#include <asm/cache.h>
#include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
. = ALIGN(PAGE_SIZE);
.data.page_aligned : { *(.data.page_aligned) }
- . = ALIGN(32);
+ . = ALIGN(L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
/*
* Write back & invalidate the D-cache of the page.
* (To avoid "alias" issues)
- *
- * This uses a lazy write-back on UP, which is explicitly
- * disabled on SMP.
*/
void flush_dcache_page(struct page *page)
{
-#ifndef CONFIG_SMP
- struct address_space *mapping = page_mapping(page);
-
- if (mapping && !mapping_mapped(mapping))
- set_bit(PG_dcache_dirty, &page->flags);
- else
-#endif
- {
+ if (test_bit(PG_mapped, &page->flags)) {
unsigned long phys = PHYSADDR(page_address(page));
unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
int i, n;
*
* Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2004 Alex Song
- * Copyright (C) 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
+ *
*/
#include <linux/init.h>
#include <linux/mman.h>
if ((data & v) == v)
ctrl_outl(data & ~v, addr);
+
}
addrstart += current_cpu_data.dcache.way_incr;
*/
void flush_dcache_page(struct page *page)
{
- struct address_space *mapping = page_mapping(page);
-
- if (mapping && !mapping_mapped(mapping))
- set_bit(PG_dcache_dirty, &page->flags);
- else
+ if (test_bit(PG_mapped, &page->flags))
__flush_dcache_page(PHYSADDR(page_address(page)));
}
*/
void clear_user_page(void *to, unsigned long address, struct page *page)
{
+ __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
clear_page(to);
else {
void copy_user_page(void *to, void *from, unsigned long address,
struct page *page)
{
+ __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from);
else {
mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
}
}
+
+/*
+ * For SH-4, we have our own implementation for ptep_get_and_clear
+ */
+inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+
+ pte_clear(mm, addr, ptep);
+ if (!pte_not_present(pte)) {
+ unsigned long pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+ struct address_space *mapping = page_mapping(page);
+ if (!mapping || !mapping_writably_mapped(mapping))
+ __clear_bit(PG_mapped, &page->flags);
+ }
+ }
+ return pte;
+}
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
+ *
*/
+
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
{
struct page *page = virt_to_page(to);
+ __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
clear_page(to);
__flush_wback_region(to, PAGE_SIZE);
* @from: P1 address
* @address: U0 address to be mapped
*/
-void copy_user_page(void *to, void *from, unsigned long address,
- struct page *pg)
+void copy_user_page(void *to, void *from, unsigned long address, struct page *pg)
{
struct page *page = virt_to_page(to);
+
+ __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
copy_page(to, from);
__flush_wback_region(to, PAGE_SIZE);
__flush_wback_region(to, PAGE_SIZE);
}
}
+
+/*
+ * For SH7705, we have our own implementation for ptep_get_and_clear
+ * Copied from pg-sh4.c
+ */
+inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+
+ pte_clear(mm, addr, ptep);
+ if (!pte_not_present(pte)) {
+ unsigned long pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+ struct address_space *mapping = page_mapping(page);
+ if (!mapping || !mapping_writably_mapped(mapping))
+ __clear_bit(PG_mapped, &page->flags);
+ }
+ }
+
+ return pte;
+}
+
* TLB flushing operations for SH with an MMU.
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2003 - 2006 Paul Mundt
+ * Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mm.h>
-#include <linux/io.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
ctrl_barrier();
local_irq_restore(flags);
}
-
-void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t pte)
-{
- unsigned long flags;
- unsigned long pteval;
- unsigned long vpn;
- struct page *page;
- unsigned long pfn = pte_pfn(pte);
- struct address_space *mapping;
-
- if (!pfn_valid(pfn))
- return;
-
- page = pfn_to_page(pfn);
- mapping = page_mapping(page);
- if (mapping) {
- unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
- int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
-
- if (dirty)
- __flush_wback_region((void *)P1SEGADDR(phys),
- PAGE_SIZE);
- }
-
- local_irq_save(flags);
-
- /* Set PTEH register */
- vpn = (address & MMU_VPN_MASK) | get_asid();
- ctrl_outl(vpn, MMU_PTEH);
-
- pteval = pte_val(pte);
-
-#ifdef CONFIG_CPU_HAS_PTEA
- /* Set PTEA register */
- /* TODO: make this look less hacky */
- ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
-#endif
-
- /* Set PTEL register */
- pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-#if defined(CONFIG_SH_WRITETHROUGH) && defined(CONFIG_CPU_SH4)
- pteval |= _PAGE_WT;
-#endif
- /* conveniently, we want all the software flags to be 0 anyway */
- ctrl_outl(pteval, MMU_PTEL);
-
- /* Load the TLB */
- asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
- local_irq_restore(flags);
-}
*
* Released under the terms of the GNU GPL v2.0.
*/
-#include <linux/io.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+
#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ unsigned long pteval;
+ unsigned long vpn;
+
+ /* Ptrace may call this routine. */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
+
+#if defined(CONFIG_SH7705_CACHE_32KB)
+ {
+ struct page *page = pte_page(pte);
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+
+ __flush_wback_region((void *)P1SEGADDR(phys),
+ PAGE_SIZE);
+ __set_bit(PG_mapped, &page->flags);
+ }
+ }
+#endif
+
+ local_irq_save(flags);
+
+ /* Set PTEH register */
+ vpn = (address & MMU_VPN_MASK) | get_asid();
+ ctrl_outl(vpn, MMU_PTEH);
+
+ pteval = pte_val(pte);
+
+ /* Set PTEL register */
+ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+ /* conveniently, we want all the software flags to be 0 anyway */
+ ctrl_outl(pteval, MMU_PTEL);
+
+ /* Load the TLB */
+ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
+ local_irq_restore(flags);
+}
void local_flush_tlb_one(unsigned long asid, unsigned long page)
{
for (i = 0; i < ways; i++)
ctrl_outl(data, addr + (i << 8));
}
+
*
* Released under the terms of the GNU GPL v2.0.
*/
-#include <linux/io.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+
#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ unsigned long pteval;
+ unsigned long vpn;
+ struct page *page;
+ unsigned long pfn;
+
+ /* Ptrace may call this routine. */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
+
+ pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (!test_bit(PG_mapped, &page->flags)) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+ __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
+ __set_bit(PG_mapped, &page->flags);
+ }
+ }
+
+ local_irq_save(flags);
+
+ /* Set PTEH register */
+ vpn = (address & MMU_VPN_MASK) | get_asid();
+ ctrl_outl(vpn, MMU_PTEH);
+
+ pteval = pte_val(pte);
+
+ /* Set PTEA register */
+ if (cpu_data->flags & CPU_HAS_PTEA)
+ /* TODO: make this look less hacky */
+ ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
+
+ /* Set PTEL register */
+ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+#ifdef CONFIG_SH_WRITETHROUGH
+ pteval |= _PAGE_WT;
+#endif
+ /* conveniently, we want all the software flags to be 0 anyway */
+ ctrl_outl(pteval, MMU_PTEL);
+
+ /* Load the TLB */
+ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
+ local_irq_restore(flags);
+}
void local_flush_tlb_one(unsigned long asid, unsigned long page)
{
ctrl_outl(data, addr);
back_to_P1();
}
+
u32 *reg = (preg + (index * ((na + ns) * 4)));
struct device_node *dp = op->node;
struct device_node *pp = p_op->node;
- struct of_bus *pbus;
+ struct of_bus *pbus, *dbus;
u64 size, result = OF_BAD_ADDR;
unsigned long flags;
int dna, dns;
dna = na;
dns = ns;
+ dbus = bus;
while (1) {
dp = pp;
pbus = of_match_bus(pp);
pbus->count_cells(dp, &pna, &pns);
- if (build_one_resource(dp, bus, pbus, addr,
+ if (build_one_resource(dp, dbus, pbus, addr,
dna, dns, pna))
break;
dna = pna;
dns = pns;
- bus = pbus;
+ dbus = pbus;
}
build_res:
r->start = result & 0xffffffff;
r->end = result + size - 1;
r->flags = flags | ((result >> 32ULL) & 0xffUL);
- } else {
- r->start = ~0UL;
- r->end = ~0UL;
}
r->name = op->node->name;
}
return -ENXIO;
}
+struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+{
+ struct pcidev_cookie *pc = pdev->sysdata;
+
+ return pc->prom_node;
+}
+EXPORT_SYMBOL(pci_device_to_OF_node);
+
/*
* This probably belongs here rather than ioport.c because
* we do not want this crud linked into SBus kernels.
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.21-rc1
-# Mon Feb 26 10:45:21 2007
+# Linux kernel version: 2.6.21-rc2
+# Wed Feb 28 09:50:51 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
#
u32 *reg = (preg + (index * ((na + ns) * 4)));
struct device_node *dp = op->node;
struct device_node *pp = p_op->node;
- struct of_bus *pbus;
+ struct of_bus *pbus, *dbus;
u64 size, result = OF_BAD_ADDR;
unsigned long flags;
int dna, dns;
dna = na;
dns = ns;
+ dbus = bus;
while (1) {
dp = pp;
pbus = of_match_bus(pp);
pbus->count_cells(dp, &pna, &pns);
- if (build_one_resource(dp, bus, pbus, addr,
+ if (build_one_resource(dp, dbus, pbus, addr,
dna, dns, pna))
break;
dna = pna;
dns = pns;
- bus = pbus;
+ dbus = pbus;
}
build_res:
r->start = result;
r->end = result + size - 1;
r->flags = flags;
- } else {
- r->start = ~0UL;
- r->end = ~0UL;
}
r->name = op->node->name;
}
unsigned int irq)
{
struct linux_prom_pci_registers *regs;
- unsigned int devfn, slot, ret;
+ unsigned int bus, devfn, slot, ret;
if (irq < 1 || irq > 4)
return irq;
if (!regs)
return irq;
+ bus = (regs->phys_hi >> 16) & 0xff;
devfn = (regs->phys_hi >> 8) & 0xff;
slot = (devfn >> 3) & 0x1f;
- ret = ((irq - 1 + (slot & 3)) & 3) + 1;
+ if (pp->irq_trans) {
+ /* Derived from Table 8-3, U2P User's Manual. This branch
+ * is handling a PCI controller that lacks a proper set of
+ * interrupt-map and interrupt-map-mask properties. The
+ * Ultra-E450 is one example.
+ *
+ * The bit layout is BSSLL, where:
+ * B: 0 on bus A, 1 on bus B
+ * D: 2-bit slot number, derived from PCI device number as
+ * (dev - 1) for bus A, or (dev - 2) for bus B
+ * L: 2-bit line number
+ *
+ * Actually, more "portable" way to calculate the funky
+ * slot number is to subtract pbm->pci_first_slot from the
+ * device number, and that's exactly what the pre-OF
+ * sparc64 code did, but we're building this stuff generically
+ * using the OBP tree, not in the PCI controller layer.
+ */
+ if (bus & 0x80) {
+ /* PBM-A */
+ bus = 0x00;
+ slot = (slot - 1) << 2;
+ } else {
+ /* PBM-B */
+ bus = 0x10;
+ slot = (slot - 2) << 2;
+ }
+ irq -= 1;
+
+ ret = (bus | slot | irq);
+ } else {
+ /* Going through a PCI-PCI bridge that lacks a set of
+ * interrupt-map and interrupt-map-mask properties.
+ */
+ ret = ((irq - 1 + (slot & 3)) & 3) + 1;
+ }
return ret;
}
}
#endif /* !(CONFIG_PCI_MSI) */
+struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+{
+ struct pcidev_cookie *pc = pdev->sysdata;
+
+ return pc->op->node;
+}
+EXPORT_SYMBOL(pci_device_to_OF_node);
+
#endif /* !(CONFIG_PCI) */
security. This option enables these legacy devices; on most
systems, it is safe to say N.
+config RAW_DRIVER
+ tristate "RAW driver (/dev/raw/rawN) (OBSOLETE)"
+ help
+ The raw driver permits block devices to be bound to /dev/raw/rawN.
+ Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
+ See the raw(8) manpage for more details.
+
+ The raw driver is deprecated and will be removed soon.
+ Applications should simply open the device (eg /dev/hda1)
+ with the O_DIRECT flag.
+
+config MAX_RAW_DEVS
+ int "Maximum number of RAW devices to support (1-8192)"
+ depends on RAW_DRIVER
+ default "256"
+ help
+ The maximum number of RAW devices that are supported.
+ Default is 256. Increase this number in case you need lots of
+ raw devices.
config LEGACY_PTY_COUNT
int "Maximum number of legacy PTY in use"
pri->control = socket(AF_UNIX, SOCK_STREAM, 0);
if(pri->control < 0){
+ err = -errno;
printk("daemon_open : control socket failed, errno = %d\n",
- errno);
- return(-errno);
+ -err);
+ return err;
}
if(connect(pri->control, (struct sockaddr *) ctl_addr,
sizeof(*ctl_addr)) < 0){
- printk("daemon_open : control connect failed, errno = %d\n",
- errno);
err = -errno;
+ printk("daemon_open : control connect failed, errno = %d\n",
+ -err);
goto out;
}
fd = socket(AF_UNIX, SOCK_DGRAM, 0);
if(fd < 0){
- printk("daemon_open : data socket failed, errno = %d\n",
- errno);
err = -errno;
+ printk("daemon_open : data socket failed, errno = %d\n",
+ -err);
goto out;
}
if(bind(fd, (struct sockaddr *) local_addr, sizeof(*local_addr)) < 0){
- printk("daemon_open : data bind failed, errno = %d\n",
- errno);
err = -errno;
+ printk("daemon_open : data bind failed, errno = %d\n",
+ -err);
goto out_close;
}
struct tty_struct *tty = line->tty;
int err;
- /* Interrupts are enabled here because we registered the interrupt with
+ /* Interrupts are disabled here because we registered the interrupt with
* IRQF_DISABLED (see line_setup_irq).*/
- spin_lock_irq(&line->lock);
+ spin_lock(&line->lock);
err = flush_buffer(line);
if (err == 0) {
return IRQ_NONE;
line->head = line->buffer;
line->tail = line->buffer;
}
- spin_unlock_irq(&line->lock);
+ spin_unlock(&line->lock);
if(tty == NULL)
return IRQ_NONE;
pri->dev = dev;
}
+static void mcast_remove(void *data)
+{
+ struct mcast_data *pri = data;
+
+ kfree(pri->mcast_addr);
+ pri->mcast_addr = NULL;
+}
+
static int mcast_open(void *data)
{
struct mcast_data *pri = data;
.init = mcast_user_init,
.open = mcast_open,
.close = mcast_close,
- .remove = NULL,
+ .remove = mcast_remove,
.set_mtu = mcast_set_mtu,
.add_address = NULL,
.delete_address = NULL,
.write = ssl_console_write,
.device = ssl_console_device,
.setup = ssl_console_setup,
- .flags = CON_PRINTBUFFER,
+ .flags = CON_PRINTBUFFER|CON_ANYTIME,
.index = -1,
};
.write = uml_console_write,
.device = uml_console_device,
.setup = uml_console_setup,
- .flags = CON_PRINTBUFFER,
+ .flags = CON_PRINTBUFFER|CON_ANYTIME,
.index = -1,
};
extern void os_stop_process(int pid);
extern void os_kill_process(int pid, int reap_child);
extern void os_kill_ptraced_process(int pid, int reap_child);
+#ifdef UML_CONFIG_MODE_TT
extern void os_usr1_process(int pid);
+#endif
extern long os_ptrace_ldt(long pid, long addr, long data);
extern int os_getpid(void);
extern void unblock_signals(void);
extern int get_signals(void);
extern int set_signals(int enable);
-extern void os_usr1_signal(int on);
/* trap.c */
extern void os_fill_handlinfo(struct kern_handlers h);
.events = events,
.current_events = 0 } );
+ err = -EBUSY;
spin_lock_irqsave(&irq_lock, flags);
for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
- return(handled_sig);
+ return handled_sig;
}
int do_signal(void)
{
- return(kern_do_signal(¤t->thread.regs));
+ return kern_do_signal(¤t->thread.regs);
}
/*
long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
- return(do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs)));
+ return do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs));
}
switch ( auxv->a_type ) {
case AT_SYSINFO:
__kernel_vsyscall = auxv->a_un.a_val;
+ /* See if the page is under TASK_SIZE */
+ if (__kernel_vsyscall < (unsigned long) envp)
+ __kernel_vsyscall = 0;
break;
case AT_SYSINFO_EHDR:
vsyscall_ehdr = auxv->a_un.a_val;
#include "longjmp.h"
#include "skas_ptrace.h"
#include "kern_constants.h"
+#include "uml-config.h"
#define ARBITRARY_ADDR -1
#define FAILURE_PID -1
CATCH_EINTR(waitpid(pid, NULL, 0));
}
+#ifdef UML_CONFIG_MODE_TT
void os_usr1_process(int pid)
{
kill(pid, SIGUSR1);
}
+#endif
/* Don't use the glibc version, which caches the result in TLS. It misses some
* syscalls, and also breaks with clone(), which does not unshare the TLS.
sigio_lock();
err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
- if(err)
+ if(err){
+ printk("maybe_sigio_broken - failed to add pollfd for "
+ "descriptor %d\n", fd);
goto out;
+ }
all_sigio_fds.poll[all_sigio_fds.used++] =
((struct pollfd) { .fd = fd,
return ret;
}
-
-void os_usr1_signal(int on)
-{
- change_sig(SIGUSR1, on);
-}
.offset = code_offset
} } });
n = os_write_file(fd, &mmop, sizeof(mmop));
- if(n != sizeof(mmop))
+ if(n != sizeof(mmop)){
+ printk("mmap args - addr = 0x%lx, fd = %d, offset = %llx\n",
+ code, code_fd, (unsigned long long) code_offset);
panic("map_stub_pages : /proc/mm map for code failed, "
"err = %d\n", -n);
+ }
if ( stack ) {
__u64 map_offset;
CHOOSE_MODE(syscall_handler_tt(sig, regs), (void) 0);
}
+/* Initialized from linux_main() */
void (*sig_info[NSIG])(int, union uml_pt_regs *);
void os_fill_handlinfo(struct kern_handlers h)
#include "linux/slab.h"
#include "linux/types.h"
#include "linux/errno.h"
+#include "linux/spinlock.h"
#include "asm/uaccess.h"
#include "asm/smp.h"
#include "asm/ldt.h"
return ret;
}
-short dummy_list[9] = {0, -1};
-short * host_ldt_entries = NULL;
+static DEFINE_SPINLOCK(host_ldt_lock);
+static short dummy_list[9] = {0, -1};
+static short * host_ldt_entries = NULL;
-void ldt_get_host_info(void)
+static void ldt_get_host_info(void)
{
long ret;
- struct ldt_entry * ldt;
+ struct ldt_entry * ldt, *tmp;
int i, size, k, order;
+ spin_lock(&host_ldt_lock);
+
+ if(host_ldt_entries != NULL){
+ spin_unlock(&host_ldt_lock);
+ return;
+ }
host_ldt_entries = dummy_list+1;
+ spin_unlock(&host_ldt_lock);
+
for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
ldt = (struct ldt_entry *)
__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
if(ldt == NULL) {
- printk("ldt_get_host_info: couldn't allocate buffer for host ldt\n");
+ printk("ldt_get_host_info: couldn't allocate buffer for host "
+ "ldt\n");
return;
}
host_ldt_entries = dummy_list;
else {
size = (size + 1) * sizeof(dummy_list[0]);
- host_ldt_entries = kmalloc(size, GFP_KERNEL);
- if(host_ldt_entries == NULL) {
- printk("ldt_get_host_info: couldn't allocate host ldt list\n");
+ tmp = kmalloc(size, GFP_KERNEL);
+ if(tmp == NULL) {
+ printk("ldt_get_host_info: couldn't allocate host ldt "
+ "list\n");
goto out_free;
}
+ host_ldt_entries = tmp;
}
for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
* inherited from the host. All ldt-entries found
* will be reset in the following loop
*/
- if(host_ldt_entries == NULL)
- ldt_get_host_info();
+ ldt_get_host_info();
for(num_p=host_ldt_entries; *num_p != -1; num_p++){
desc.entry_number = *num_p;
err = write_ldt_entry(&new_mm->id, 1, &desc,
int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
{
- return(CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
- ptr, bytecount));
+ return CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
+ ptr, bytecount);
}
switch(code){
case ARCH_SET_FS:
+ current->thread.arch.fs = (unsigned long) ptr;
+ save_registers(pid, ¤t->thread.regs.regs);
+ break;
case ARCH_SET_GS:
save_registers(pid, ¤t->thread.regs.regs);
break;
void arch_switch_to_skas(struct task_struct *from, struct task_struct *to)
{
- if(to->thread.arch.fs == 0)
+ if((to->thread.arch.fs == 0) || (to->mm == NULL))
return;
arch_prctl_skas(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
}
-
case PTRACE_SINGLESTEP:
case PTRACE_DETACH:
case PTRACE_SYSCALL:
+ case PTRACE_OLDSETOPTIONS:
case PTRACE_SETOPTIONS:
case PTRACE_SET_THREAD_AREA:
case PTRACE_GET_THREAD_AREA:
#include <asm/timex.h>
#include <asm/hpet.h>
+#define HPET_MASK 0xFFFFFFFF
+#define HPET_SHIFT 22
+
+/* FSEC = 10^-15 NSEC = 10^-9 */
+#define FSEC_PER_NSEC 1000000
+
int nohpet __initdata;
unsigned long hpet_address;
return 0;
}
+static cycle_t read_hpet(void)
+{
+ return (cycle_t)hpet_readl(HPET_COUNTER);
+}
+
+static cycle_t __vsyscall_fn vread_hpet(void)
+{
+ return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
+}
+
+struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = (cycle_t)HPET_MASK,
+ .mult = 0, /* set below */
+ .shift = HPET_SHIFT,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .vread = vread_hpet,
+};
+
int hpet_arch_init(void)
{
unsigned int id;
+ u64 tmp;
if (!hpet_address)
return -1;
hpet_use_timer = (id & HPET_ID_LEGSUP);
+ /*
+ * hpet period is in femto seconds per cycle
+ * so we need to convert this to ns/cyc units
+ * aproximated by mult/2^shift
+ *
+ * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
+ * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
+ * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
+ * (fsec/cyc << shift)/1000000 = mult
+ * (hpet_period << shift)/FSEC_PER_NSEC = mult
+ */
+ tmp = (u64)hpet_period << HPET_SHIFT;
+ do_div(tmp, FSEC_PER_NSEC);
+ clocksource_hpet.mult = (u32)tmp;
+ clocksource_register(&clocksource_hpet);
+
return hpet_timer_stop_set_go(hpet_tick);
}
}
__setup("nohpet", nohpet_setup);
-
-#define HPET_MASK 0xFFFFFFFF
-#define HPET_SHIFT 22
-
-/* FSEC = 10^-15 NSEC = 10^-9 */
-#define FSEC_PER_NSEC 1000000
-
-static void *hpet_ptr;
-
-static cycle_t read_hpet(void)
-{
- return (cycle_t)readl(hpet_ptr);
-}
-
-static cycle_t __vsyscall_fn vread_hpet(void)
-{
- return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
-}
-
-struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = (cycle_t)HPET_MASK,
- .mult = 0, /* set below */
- .shift = HPET_SHIFT,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .vread = vread_hpet,
-};
-
-static int __init init_hpet_clocksource(void)
-{
- unsigned long hpet_period;
- void __iomem *hpet_base;
- u64 tmp;
-
- if (!hpet_address)
- return -ENODEV;
-
- /* calculate the hpet address: */
- hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
- hpet_ptr = hpet_base + HPET_COUNTER;
-
- /* calculate the frequency: */
- hpet_period = readl(hpet_base + HPET_PERIOD);
-
- /*
- * hpet period is in femto seconds per cycle
- * so we need to convert this to ns/cyc units
- * aproximated by mult/2^shift
- *
- * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
- * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
- * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
- * (fsec/cyc << shift)/1000000 = mult
- * (hpet_period << shift)/FSEC_PER_NSEC = mult
- */
- tmp = (u64)hpet_period << HPET_SHIFT;
- do_div(tmp, FSEC_PER_NSEC);
- clocksource_hpet.mult = (u32)tmp;
-
- return clocksource_register(&clocksource_hpet);
-}
-
-module_init(init_hpet_clocksource);
struct irq_cfg *cfg = irq_cfg + irq;
struct IO_APIC_route_entry entry;
cpumask_t mask;
- unsigned long flags;
if (!IO_APIC_IRQ(irq))
return;
disable_8259A_irq(irq);
ioapic_write_entry(apic, pin, entry);
-
- spin_lock_irqsave(&ioapic_lock, flags);
- irq_desc[irq].affinity = TARGET_CPUS;
- spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void __init setup_IO_APIC_irqs(void)
*/
int __cpuinit __cpu_up(unsigned int cpu)
{
- int err;
int apicid = cpu_present_to_apicid(cpu);
+ unsigned long flags;
+ int err;
WARN_ON(irqs_disabled());
/*
* Make sure and check TSC sync:
*/
+ local_irq_save(flags);
check_tsc_sync_source(cpu);
+ local_irq_restore(flags);
while (!cpu_isset(cpu, cpu_online_map))
cpu_relax();
set_cyc2ns_scale(cpu_khz);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
+ init_tsc_clocksource();
+
setup_irq(0, &irq0);
}
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
-static int __init init_tsc_clocksource(void)
+void __init init_tsc_clocksource(void)
{
if (!notsc) {
clocksource_tsc.mult = clocksource_khz2mult(cpu_khz,
if (check_tsc_unstable())
clocksource_tsc.rating = 0;
- return clocksource_register(&clocksource_tsc);
+ clocksource_register(&clocksource_tsc);
}
- return 0;
}
-
-module_init(init_tsc_clocksource);
# Makefile for the acorn character device drivers.
#
-obj-$(CONFIG_ARCH_ACORN) += i2c.o pcf8583.o
obj-$(CONFIG_L7200_KEYB) += defkeymap-l7200.o keyb_l7200.o
+++ /dev/null
-/*
- * linux/drivers/acorn/char/i2c.c
- *
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * ARM IOC/IOMD i2c driver.
- *
- * On Acorn machines, the following i2c devices are on the bus:
- * - PCF8583 real time clock & static RAM
- */
-#include <linux/capability.h>
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/miscdevice.h>
-#include <linux/rtc.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/fs.h>
-
-#include <asm/hardware.h>
-#include <asm/io.h>
-#include <asm/hardware/ioc.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#include "pcf8583.h"
-
-extern int (*set_rtc)(void);
-
-static struct i2c_client *rtc_client;
-static const unsigned char days_in_mon[] =
- { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
-
-#define CMOS_CHECKSUM (63)
-
-/*
- * Acorn machines store the year in the static RAM at
- * location 128.
- */
-#define CMOS_YEAR (64 + 128)
-
-static inline int rtc_command(int cmd, void *data)
-{
- int ret = -EIO;
-
- if (rtc_client)
- ret = rtc_client->driver->command(rtc_client, cmd, data);
-
- return ret;
-}
-
-/*
- * Update the century + year bytes in the CMOS RAM, ensuring
- * that the check byte is correctly adjusted for the change.
- */
-static int rtc_update_year(unsigned int new_year)
-{
- unsigned char yr[2], chk;
- struct mem cmos_year = { CMOS_YEAR, sizeof(yr), yr };
- struct mem cmos_check = { CMOS_CHECKSUM, 1, &chk };
- int ret;
-
- ret = rtc_command(MEM_READ, &cmos_check);
- if (ret)
- goto out;
- ret = rtc_command(MEM_READ, &cmos_year);
- if (ret)
- goto out;
-
- chk -= yr[1] + yr[0];
-
- yr[1] = new_year / 100;
- yr[0] = new_year % 100;
-
- chk += yr[1] + yr[0];
-
- ret = rtc_command(MEM_WRITE, &cmos_year);
- if (ret == 0)
- ret = rtc_command(MEM_WRITE, &cmos_check);
- out:
- return ret;
-}
-
-/*
- * Read the current RTC time and date, and update xtime.
- */
-static void get_rtc_time(struct rtc_tm *rtctm, unsigned int *year)
-{
- unsigned char ctrl, yr[2];
- struct mem rtcmem = { CMOS_YEAR, sizeof(yr), yr };
- int real_year, year_offset;
-
- /*
- * Ensure that the RTC is running.
- */
- rtc_command(RTC_GETCTRL, &ctrl);
- if (ctrl & 0xc0) {
- unsigned char new_ctrl = ctrl & ~0xc0;
-
- printk(KERN_WARNING "RTC: resetting control %02x -> %02x\n",
- ctrl, new_ctrl);
-
- rtc_command(RTC_SETCTRL, &new_ctrl);
- }
-
- if (rtc_command(RTC_GETDATETIME, rtctm) ||
- rtc_command(MEM_READ, &rtcmem))
- return;
-
- real_year = yr[0];
-
- /*
- * The RTC year holds the LSB two bits of the current
- * year, which should reflect the LSB two bits of the
- * CMOS copy of the year. Any difference indicates
- * that we have to correct the CMOS version.
- */
- year_offset = rtctm->year_off - (real_year & 3);
- if (year_offset < 0)
- /*
- * RTC year wrapped. Adjust it appropriately.
- */
- year_offset += 4;
-
- *year = real_year + year_offset + yr[1] * 100;
-}
-
-static int set_rtc_time(struct rtc_tm *rtctm, unsigned int year)
-{
- unsigned char leap;
- int ret;
-
- leap = (!(year % 4) && (year % 100)) || !(year % 400);
-
- if (rtctm->mon > 12 || rtctm->mon == 0 || rtctm->mday == 0)
- return -EINVAL;
-
- if (rtctm->mday > (days_in_mon[rtctm->mon] + (rtctm->mon == 2 && leap)))
- return -EINVAL;
-
- if (rtctm->hours >= 24 || rtctm->mins >= 60 || rtctm->secs >= 60)
- return -EINVAL;
-
- /*
- * The RTC's own 2-bit year must reflect the least
- * significant two bits of the CMOS year.
- */
- rtctm->year_off = (year % 100) & 3;
-
- ret = rtc_command(RTC_SETDATETIME, rtctm);
- if (ret == 0)
- ret = rtc_update_year(year);
-
- return ret;
-}
-
-/*
- * Set the RTC time only. Note that
- * we do not touch the date.
- */
-static int k_set_rtc_time(void)
-{
- struct rtc_tm new_rtctm, old_rtctm;
- unsigned long nowtime = xtime.tv_sec;
-
- if (rtc_command(RTC_GETDATETIME, &old_rtctm))
- return 0;
-
- new_rtctm.cs = xtime.tv_nsec / 10000000;
- new_rtctm.secs = nowtime % 60; nowtime /= 60;
- new_rtctm.mins = nowtime % 60; nowtime /= 60;
- new_rtctm.hours = nowtime % 24;
-
- /*
- * avoid writing when we're going to change the day
- * of the month. We will retry in the next minute.
- * This basically means that if the RTC must not drift
- * by more than 1 minute in 11 minutes.
- *
- * [ rtc: 1/1/2000 23:58:00, real 2/1/2000 00:01:00,
- * rtc gets set to 1/1/2000 00:01:00 ]
- */
- if ((old_rtctm.hours == 23 && old_rtctm.mins == 59) ||
- (new_rtctm.hours == 23 && new_rtctm.mins == 59))
- return 1;
-
- return rtc_command(RTC_SETTIME, &new_rtctm);
-}
-
-static int rtc_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- unsigned int year;
- struct rtc_time rtctm;
- struct rtc_tm rtc_raw;
-
- switch (cmd) {
- case RTC_ALM_READ:
- case RTC_ALM_SET:
- break;
-
- case RTC_RD_TIME:
- memset(&rtctm, 0, sizeof(struct rtc_time));
- get_rtc_time(&rtc_raw, &year);
- rtctm.tm_sec = rtc_raw.secs;
- rtctm.tm_min = rtc_raw.mins;
- rtctm.tm_hour = rtc_raw.hours;
- rtctm.tm_mday = rtc_raw.mday;
- rtctm.tm_mon = rtc_raw.mon - 1; /* month starts at 0 */
- rtctm.tm_year = year - 1900; /* starts at 1900 */
- return copy_to_user((void *)arg, &rtctm, sizeof(rtctm))
- ? -EFAULT : 0;
-
- case RTC_SET_TIME:
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
-
- if (copy_from_user(&rtctm, (void *)arg, sizeof(rtctm)))
- return -EFAULT;
- rtc_raw.secs = rtctm.tm_sec;
- rtc_raw.mins = rtctm.tm_min;
- rtc_raw.hours = rtctm.tm_hour;
- rtc_raw.mday = rtctm.tm_mday;
- rtc_raw.mon = rtctm.tm_mon + 1;
- year = rtctm.tm_year + 1900;
- return set_rtc_time(&rtc_raw, year);
- break;
-
- case RTC_EPOCH_READ:
- return put_user(1900, (unsigned long *)arg);
-
- }
- return -EINVAL;
-}
-
-static const struct file_operations rtc_fops = {
- .ioctl = rtc_ioctl,
-};
-
-static struct miscdevice rtc_dev = {
- .minor = RTC_MINOR,
- .name = "rtc",
- .fops = &rtc_fops,
-};
-
-/* IOC / IOMD i2c driver */
-
-#define FORCE_ONES 0xdc
-#define SCL 0x02
-#define SDA 0x01
-
-/*
- * We must preserve all non-i2c output bits in IOC_CONTROL.
- * Note also that we need to preserve the value of SCL and
- * SDA outputs as well (which may be different from the
- * values read back from IOC_CONTROL).
- */
-static u_int force_ones;
-
-static void ioc_setscl(void *data, int state)
-{
- u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA);
- u_int ones = force_ones;
-
- if (state)
- ones |= SCL;
- else
- ones &= ~SCL;
-
- force_ones = ones;
-
- ioc_writeb(ioc_control | ones, IOC_CONTROL);
-}
-
-static void ioc_setsda(void *data, int state)
-{
- u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA);
- u_int ones = force_ones;
-
- if (state)
- ones |= SDA;
- else
- ones &= ~SDA;
-
- force_ones = ones;
-
- ioc_writeb(ioc_control | ones, IOC_CONTROL);
-}
-
-static int ioc_getscl(void *data)
-{
- return (ioc_readb(IOC_CONTROL) & SCL) != 0;
-}
-
-static int ioc_getsda(void *data)
-{
- return (ioc_readb(IOC_CONTROL) & SDA) != 0;
-}
-
-static struct i2c_algo_bit_data ioc_data = {
- .setsda = ioc_setsda,
- .setscl = ioc_setscl,
- .getsda = ioc_getsda,
- .getscl = ioc_getscl,
- .udelay = 80,
- .timeout = 100
-};
-
-static int ioc_client_reg(struct i2c_client *client)
-{
- if (client->driver->id == I2C_DRIVERID_PCF8583 &&
- client->addr == 0x50) {
- struct rtc_tm rtctm;
- unsigned int year;
- struct timespec tv;
-
- rtc_client = client;
- get_rtc_time(&rtctm, &year);
-
- tv.tv_nsec = rtctm.cs * 10000000;
- tv.tv_sec = mktime(year, rtctm.mon, rtctm.mday,
- rtctm.hours, rtctm.mins, rtctm.secs);
- do_settimeofday(&tv);
- set_rtc = k_set_rtc_time;
- }
-
- return 0;
-}
-
-static int ioc_client_unreg(struct i2c_client *client)
-{
- if (client == rtc_client) {
- set_rtc = NULL;
- rtc_client = NULL;
- }
-
- return 0;
-}
-
-static struct i2c_adapter ioc_ops = {
- .id = I2C_HW_B_IOC,
- .algo_data = &ioc_data,
- .client_register = ioc_client_reg,
- .client_unregister = ioc_client_unreg,
-};
-
-static int __init i2c_ioc_init(void)
-{
- int ret;
-
- force_ones = FORCE_ONES | SCL | SDA;
-
- ret = i2c_bit_add_bus(&ioc_ops);
-
- if (ret >= 0){
- ret = misc_register(&rtc_dev);
- if(ret < 0)
- i2c_del_adapter(&ioc_ops);
- }
-
- return ret;
-}
-
-__initcall(i2c_ioc_init);
+++ /dev/null
-/*
- * linux/drivers/acorn/char/pcf8583.c
- *
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Driver for PCF8583 RTC & RAM chip
- */
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/mc146818rtc.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/bcd.h>
-
-#include "pcf8583.h"
-
-static struct i2c_driver pcf8583_driver;
-
-static unsigned short ignore[] = { I2C_CLIENT_END };
-static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END };
-static unsigned short *forces[] = { NULL };
-
-static struct i2c_client_address_data addr_data = {
- .normal_i2c = normal_addr,
- .probe = ignore,
- .ignore = ignore,
- .forces = forces,
-};
-
-#define set_ctrl(x, v) i2c_set_clientdata(x, (void *)(unsigned int)(v))
-#define get_ctrl(x) ((unsigned int)i2c_get_clientdata(x))
-
-static int
-pcf8583_attach(struct i2c_adapter *adap, int addr, int kind)
-{
- struct i2c_client *c;
- unsigned char buf[1], ad[1] = { 0 };
- struct i2c_msg msgs[2] = {
- {
- .addr = addr,
- .flags = 0,
- .len = 1,
- .buf = ad,
- }, {
- .addr = addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- c = kmalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
-
- memset(c, 0, sizeof(*c));
- c->addr = addr;
- c->adapter = adap;
- c->driver = &pcf8583_driver;
-
- if (i2c_transfer(c->adapter, msgs, 2) == 2)
- set_ctrl(c, buf[0]);
-
- return i2c_attach_client(c);
-}
-
-static int
-pcf8583_probe(struct i2c_adapter *adap)
-{
- return i2c_probe(adap, &addr_data, pcf8583_attach);
-}
-
-static int
-pcf8583_detach(struct i2c_client *client)
-{
- i2c_detach_client(client);
- kfree(client);
- return 0;
-}
-
-static int
-pcf8583_get_datetime(struct i2c_client *client, struct rtc_tm *dt)
-{
- unsigned char buf[8], addr[1] = { 1 };
- struct i2c_msg msgs[2] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = addr,
- }, {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = 6,
- .buf = buf,
- }
- };
- int ret = -EIO;
-
- memset(buf, 0, sizeof(buf));
-
- ret = i2c_transfer(client->adapter, msgs, 2);
- if (ret == 2) {
- dt->year_off = buf[4] >> 6;
- dt->wday = buf[5] >> 5;
-
- buf[4] &= 0x3f;
- buf[5] &= 0x1f;
-
- dt->cs = BCD_TO_BIN(buf[0]);
- dt->secs = BCD_TO_BIN(buf[1]);
- dt->mins = BCD_TO_BIN(buf[2]);
- dt->hours = BCD_TO_BIN(buf[3]);
- dt->mday = BCD_TO_BIN(buf[4]);
- dt->mon = BCD_TO_BIN(buf[5]);
-
- ret = 0;
- }
-
- return ret;
-}
-
-static int
-pcf8583_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo)
-{
- unsigned char buf[8];
- int ret, len = 6;
-
- buf[0] = 0;
- buf[1] = get_ctrl(client) | 0x80;
- buf[2] = BIN_TO_BCD(dt->cs);
- buf[3] = BIN_TO_BCD(dt->secs);
- buf[4] = BIN_TO_BCD(dt->mins);
- buf[5] = BIN_TO_BCD(dt->hours);
-
- if (datetoo) {
- len = 8;
- buf[6] = BIN_TO_BCD(dt->mday) | (dt->year_off << 6);
- buf[7] = BIN_TO_BCD(dt->mon) | (dt->wday << 5);
- }
-
- ret = i2c_master_send(client, (char *)buf, len);
- if (ret == len)
- ret = 0;
-
- buf[1] = get_ctrl(client);
- i2c_master_send(client, (char *)buf, 2);
-
- return ret;
-}
-
-static int
-pcf8583_get_ctrl(struct i2c_client *client, unsigned char *ctrl)
-{
- *ctrl = get_ctrl(client);
- return 0;
-}
-
-static int
-pcf8583_set_ctrl(struct i2c_client *client, unsigned char *ctrl)
-{
- unsigned char buf[2];
-
- buf[0] = 0;
- buf[1] = *ctrl;
- set_ctrl(client, *ctrl);
-
- return i2c_master_send(client, (char *)buf, 2);
-}
-
-static int
-pcf8583_read_mem(struct i2c_client *client, struct mem *mem)
-{
- unsigned char addr[1];
- struct i2c_msg msgs[2] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = addr,
- }, {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = mem->nr,
- .buf = mem->data,
- }
- };
-
- if (mem->loc < 8)
- return -EINVAL;
-
- addr[0] = mem->loc;
-
- return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
-}
-
-static int
-pcf8583_write_mem(struct i2c_client *client, struct mem *mem)
-{
- unsigned char addr[1];
- struct i2c_msg msgs[2] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = addr,
- }, {
- .addr = client->addr,
- .flags = I2C_M_NOSTART,
- .len = mem->nr,
- .buf = mem->data,
- }
- };
-
- if (mem->loc < 8)
- return -EINVAL;
-
- addr[0] = mem->loc;
-
- return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
-}
-
-static int
-pcf8583_command(struct i2c_client *client, unsigned int cmd, void *arg)
-{
- switch (cmd) {
- case RTC_GETDATETIME:
- return pcf8583_get_datetime(client, arg);
-
- case RTC_SETTIME:
- return pcf8583_set_datetime(client, arg, 0);
-
- case RTC_SETDATETIME:
- return pcf8583_set_datetime(client, arg, 1);
-
- case RTC_GETCTRL:
- return pcf8583_get_ctrl(client, arg);
-
- case RTC_SETCTRL:
- return pcf8583_set_ctrl(client, arg);
-
- case MEM_READ:
- return pcf8583_read_mem(client, arg);
-
- case MEM_WRITE:
- return pcf8583_write_mem(client, arg);
-
- default:
- return -EINVAL;
- }
-}
-
-static struct i2c_driver pcf8583_driver = {
- .driver = {
- .name = "PCF8583",
- },
- .id = I2C_DRIVERID_PCF8583,
- .attach_adapter = pcf8583_probe,
- .detach_client = pcf8583_detach,
- .command = pcf8583_command
-};
-
-static __init int pcf8583_init(void)
-{
- return i2c_add_driver(&pcf8583_driver);
-}
-
-static __exit void pcf8583_exit(void)
-{
- i2c_del_driver(&pcf8583_driver);
-}
-
-module_init(pcf8583_init);
-module_exit(pcf8583_exit);
-
-MODULE_AUTHOR("Russell King");
-MODULE_DESCRIPTION("PCF8583 I2C RTC driver");
-MODULE_LICENSE("GPL");
+++ /dev/null
-/*
- * linux/drivers/acorn/char/pcf8583.h
- *
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-struct rtc_tm {
- unsigned char cs;
- unsigned char secs;
- unsigned char mins;
- unsigned char hours;
- unsigned char mday;
- unsigned char mon;
- unsigned char year_off;
- unsigned char wday;
-};
-
-struct mem {
- unsigned int loc;
- unsigned int nr;
- unsigned char *data;
-};
-
-#define RTC_GETDATETIME 0
-#define RTC_SETTIME 1
-#define RTC_SETDATETIME 2
-#define RTC_GETCTRL 3
-#define RTC_SETCTRL 4
-#define MEM_READ 5
-#define MEM_WRITE 6
-
-#define CTRL_STOP 0x80
-#define CTRL_HOLD 0x40
-#define CTRL_32KHZ 0x00
-#define CTRL_MASK 0x08
-#define CTRL_ALARMEN 0x04
-#define CTRL_ALARM 0x02
-#define CTRL_TIMER 0x01
config ACPI_BAY
tristate "Removable Drive Bay (EXPERIMENTAL)"
depends on EXPERIMENTAL
+ depends on ACPI_DOCK
help
This driver adds support for ACPI controlled removable drive
bays such as the IBM ultrabay or the Dell Module Bay.
If unsure, say N.
+config PATA_SCC
+ tristate "Toshiba's Cell Reference Set IDE support"
+ depends on PCI && PPC_IBM_CELL_BLADE
+ help
+ This option enables support for the built-in IDE controller on
+ Toshiba Cell Reference Board.
+
+ If unsure, say N.
+
endif
endmenu
obj-$(CONFIG_PATA_SIS) += pata_sis.o
obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
+obj-$(CONFIG_PATA_SCC) += pata_scc.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
# Should be last but one libata driver
obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
#include <linux/libata.h>
#define DRV_NAME "ahci"
-#define DRV_VERSION "2.0"
+#define DRV_VERSION "2.1"
enum {
void *rx_fis;
dma_addr_t rx_fis_dma;
/* for NCQ spurious interrupt analysis */
- int ncq_saw_spurious_sdb_cnt;
unsigned int ncq_saw_d2h:1;
unsigned int ncq_saw_dmas:1;
+ unsigned int ncq_saw_sdb:1;
};
static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void ahci_error_handler(struct ata_port *ap);
static void ahci_vt8251_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+#ifdef CONFIG_PM
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int ahci_port_resume(struct ata_port *ap);
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
+#endif
static struct scsi_host_template ahci_sht = {
.module = THIS_MODULE,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.suspend = ata_scsi_device_suspend,
.resume = ata_scsi_device_resume,
+#endif
};
static const struct ata_port_operations ahci_ops = {
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
+#ifdef CONFIG_PM
.port_suspend = ahci_port_suspend,
.port_resume = ahci_port_resume,
+#endif
.port_start = ahci_port_start,
.port_stop = ahci_port_stop,
.error_handler = ahci_vt8251_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
+#ifdef CONFIG_PM
.port_suspend = ahci_port_suspend,
.port_resume = ahci_port_resume,
+#endif
.port_start = ahci_port_start,
.port_stop = ahci_port_stop,
{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pi }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
- /* JMicron */
- { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */
- { PCI_VDEVICE(JMICRON, 0x2361), board_ahci_ign_iferr }, /* JMB361 */
- { PCI_VDEVICE(JMICRON, 0x2363), board_ahci_ign_iferr }, /* JMB363 */
- { PCI_VDEVICE(JMICRON, 0x2365), board_ahci_ign_iferr }, /* JMB365 */
- { PCI_VDEVICE(JMICRON, 0x2366), board_ahci_ign_iferr }, /* JMB366 */
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
/* ATI */
{ PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ahci_pci_device_suspend,
.resume = ahci_pci_device_resume,
+#endif
};
writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
}
+#ifdef CONFIG_PM
static void ahci_power_down(void __iomem *port_mmio, u32 cap)
{
u32 cmd, scontrol;
cmd &= ~PORT_CMD_SPIN_UP;
writel(cmd, port_mmio + PORT_CMD);
}
+#endif
static void ahci_init_port(void __iomem *port_mmio, u32 cap,
dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
known_irq = 1;
}
- if (status & PORT_IRQ_SDB_FIS &&
- pp->ncq_saw_spurious_sdb_cnt < 10) {
- /* SDB FIS containing spurious completions might be
- * dangerous, we need to know more about them. Print
- * more of it.
- */
+ if (status & PORT_IRQ_SDB_FIS) {
const __le32 *f = pp->rx_fis + RX_FIS_SDB;
- ata_port_printk(ap, KERN_INFO, "Spurious SDB FIS during NCQ "
- "issue=0x%x SAct=0x%x FIS=%08x:%08x%s\n",
+ if (le32_to_cpu(f[1])) {
+ /* SDB FIS containing spurious completions
+ * might be dangerous, whine and fail commands
+ * with HSM violation. EH will turn off NCQ
+ * after several such failures.
+ */
+ ata_ehi_push_desc(ehi,
+ "spurious completions during NCQ "
+ "issue=0x%x SAct=0x%x FIS=%08x:%08x",
readl(port_mmio + PORT_CMD_ISSUE),
readl(port_mmio + PORT_SCR_ACT),
- le32_to_cpu(f[0]), le32_to_cpu(f[1]),
- pp->ncq_saw_spurious_sdb_cnt < 10 ?
- "" : ", shutting up");
-
- pp->ncq_saw_spurious_sdb_cnt++;
+ le32_to_cpu(f[0]), le32_to_cpu(f[1]));
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_SOFTRESET;
+ ata_port_freeze(ap);
+ } else {
+ if (!pp->ncq_saw_sdb)
+ ata_port_printk(ap, KERN_INFO,
+ "spurious SDB FIS %08x:%08x during NCQ, "
+ "this message won't be printed again\n",
+ le32_to_cpu(f[0]), le32_to_cpu(f[1]));
+ pp->ncq_saw_sdb = 1;
+ }
known_irq = 1;
}
}
}
+#ifdef CONFIG_PM
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
return 0;
}
+#endif
static int ahci_port_start(struct ata_port *ap)
{
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
- /* Function 1 is the PATA controller except on the 368, where
- we are not AHCI anyway */
- if (PCI_FUNC(pdev->devfn))
- return -ENODEV;
- }
-
rc = pcim_enable_device(pdev);
if (rc)
return rc;
#include <linux/libata.h>
#define DRV_NAME "ata_generic"
-#define DRV_VERSION "0.2.10"
+#define DRV_VERSION "0.2.11"
/*
* A generic parallel ATA driver using libata
/* We do need the right mode information for DMA or PIO
and this comes from the current configuration flags */
if (dma_enabled & (1 << (5 + i))) {
- dev->xfer_mode = XFER_MW_DMA_0;
- dev->xfer_shift = ATA_SHIFT_MWDMA;
+ ata_id_to_dma_mode(dev, XFER_MW_DMA_0);
dev->flags &= ~ATA_DFLAG_PIO;
} else {
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations generic_port_ops = {
.id_table = ata_generic,
.probe = ata_generic_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init ata_generic_init(void)
#include <linux/libata.h>
#define DRV_NAME "ata_piix"
-#define DRV_VERSION "2.00ac7"
+#define DRV_VERSION "2.10"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
/* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
/* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
- { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
- { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel PIIX4 */
{ 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX4 */
.id_table = piix_pci_tbl,
.probe = piix_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static struct scsi_host_template piix_sht = {
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static const struct ata_port_operations piix_pata_ops = {
u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
};
+/*
+ * Helper - belongs in the PCI layer somewhere eventually
+ */
+static int is_pci_dev(struct device *dev)
+{
+ return (dev->bus == &pci_bus_type);
+}
/**
* sata_get_dev_handle - finds acpi_handle and PCI device.function
struct pci_dev *pci_dev;
acpi_integer addr;
+ if (!is_pci_dev(dev))
+ return -ENODEV;
+
pci_dev = to_pci_dev(dev); /* NOTE: PCI-specific */
/* Please refer to the ACPI spec for the syntax of _ADR. */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
acpi_status status;
struct acpi_device_info *dinfo = NULL;
int ret = -ENODEV;
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *pdev;
+
+ if (!is_pci_dev(dev))
+ return -ENODEV;
+
+ pdev = to_pci_dev(dev);
bus = pdev->bus->number;
devnum = PCI_SLOT(pdev->devfn);
return 0;
if (ata_msg_probe(ap))
- ata_dev_printk(atadev, KERN_DEBUG,
- "%s: ENTER: ap->id: %d, port#: %d\n",
- __FUNCTION__, ap->id, ap->port_no);
+ ata_dev_printk(atadev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
+ __FUNCTION__, ap->port_no);
if (!ata_dev_enabled(atadev) || (ap->flags & ATA_FLAG_DISABLED)) {
if (ata_msg_probe(ap))
struct ata_device *atadev,
const struct taskfile_array *gtf)
{
+ struct ata_taskfile tf;
+ unsigned int err;
+
if (ata_msg_probe(ap))
ata_dev_printk(atadev, KERN_DEBUG, "%s: (0x1f1-1f7): hex: "
"%02x %02x %02x %02x %02x %02x %02x\n",
&& (gtf->tfa[6] == 0))
return;
- if (ap->ops->qc_issue) {
- struct ata_taskfile tf;
- unsigned int err;
-
- ata_tf_init(atadev, &tf);
-
- /* convert gtf to tf */
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */
- tf.protocol = atadev->class == ATA_DEV_ATAPI ?
- ATA_PROT_ATAPI_NODATA : ATA_PROT_NODATA;
- tf.feature = gtf->tfa[0]; /* 0x1f1 */
- tf.nsect = gtf->tfa[1]; /* 0x1f2 */
- tf.lbal = gtf->tfa[2]; /* 0x1f3 */
- tf.lbam = gtf->tfa[3]; /* 0x1f4 */
- tf.lbah = gtf->tfa[4]; /* 0x1f5 */
- tf.device = gtf->tfa[5]; /* 0x1f6 */
- tf.command = gtf->tfa[6]; /* 0x1f7 */
-
- err = ata_exec_internal(atadev, &tf, NULL, DMA_NONE, NULL, 0);
- if (err && ata_msg_probe(ap))
- ata_dev_printk(atadev, KERN_ERR,
- "%s: ata_exec_internal failed: %u\n",
- __FUNCTION__, err);
- } else
- if (ata_msg_warn(ap))
- ata_dev_printk(atadev, KERN_WARNING,
- "%s: SATA driver is missing qc_issue function"
- " entry points\n",
- __FUNCTION__);
+ ata_tf_init(atadev, &tf);
+
+ /* convert gtf to tf */
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */
+ tf.protocol = atadev->class == ATA_DEV_ATAPI ?
+ ATA_PROT_ATAPI_NODATA : ATA_PROT_NODATA;
+ tf.feature = gtf->tfa[0]; /* 0x1f1 */
+ tf.nsect = gtf->tfa[1]; /* 0x1f2 */
+ tf.lbal = gtf->tfa[2]; /* 0x1f3 */
+ tf.lbam = gtf->tfa[3]; /* 0x1f4 */
+ tf.lbah = gtf->tfa[4]; /* 0x1f5 */
+ tf.device = gtf->tfa[5]; /* 0x1f6 */
+ tf.command = gtf->tfa[6]; /* 0x1f7 */
+
+ err = ata_exec_internal(atadev, &tf, NULL, DMA_NONE, NULL, 0);
+ if (err && ata_msg_probe(ap))
+ ata_dev_printk(atadev, KERN_ERR,
+ "%s: ata_exec_internal failed: %u\n",
+ __FUNCTION__, err);
}
/**
struct taskfile_array *gtf;
if (ata_msg_probe(ap))
- ata_dev_printk(atadev, KERN_DEBUG,
- "%s: ENTER: ap->id: %d, port#: %d\n",
- __FUNCTION__, ap->id, ap->port_no);
+ ata_dev_printk(atadev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
+ __FUNCTION__, ap->port_no);
if (noacpi || !(ap->cbl == ATA_CBL_SATA))
return 0;
return 0;
if (ata_msg_probe(ap))
- ata_dev_printk(atadev, KERN_DEBUG,
- "%s: ap->id: %d, ix = %d, port#: %d\n",
- __FUNCTION__, ap->id, ix, ap->port_no);
+ ata_dev_printk(atadev, KERN_DEBUG, "%s: ix = %d, port#: %d\n",
+ __FUNCTION__, ix, ap->port_no);
/* Don't continue if not a SATA device. */
if (!(ap->cbl == ATA_CBL_SATA)) {
if (err < 0) {
if (ata_msg_probe(ap))
ata_dev_printk(atadev, KERN_DEBUG,
- "ata%u(%u): %s _SDD error: status = 0x%x\n",
- ap->id, ap->device->devno,
- __FUNCTION__, status);
+ "%s _SDD error: status = 0x%x\n",
+ __FUNCTION__, status);
}
/* always return success */
#include "libata.h"
-#define DRV_VERSION "2.10" /* must be exactly four chars */
+#define DRV_VERSION "2.20" /* must be exactly four chars */
/* debounce timing parameters in msecs { interval, duration, timeout } */
static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
-static unsigned int ata_unique_id = 1;
+static unsigned int ata_print_id = 1;
static struct workqueue_struct *ata_wq;
struct workqueue_struct *ata_aux_wq;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->flags |= tf_flags;
- if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
- ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
- likely(tag != ATA_TAG_INTERNAL)) {
+ if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
/* yay, NCQ */
if (!lba_48_ok(block, n_block))
return -ERANGE;
{
if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
ata_dev_printk(dev, KERN_WARNING, "disabled\n");
+ ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
+ ATA_DNXFER_QUIET);
dev->class++;
}
}
* Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
*/
-static unsigned int
+unsigned int
ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
{
struct ata_taskfile tf;
}
}
+/**
+ * ata_id_to_dma_mode - Identify DMA mode from id block
+ * @dev: device to identify
+ * @mode: mode to assume if we cannot tell
+ *
+ * Set up the timing values for the device based upon the identify
+ * reported values for the DMA mode. This function is used by drivers
+ * which rely upon firmware configured modes, but wish to report the
+ * mode correctly when possible.
+ *
+ * In addition we emit similarly formatted messages to the default
+ * ata_dev_set_mode handler, in order to provide consistency of
+ * presentation.
+ */
+
+void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
+{
+ unsigned int mask;
+ u8 mode;
+
+ /* Pack the DMA modes */
+ mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
+ if (dev->id[53] & 0x04)
+ mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
+
+ /* Select the mode in use */
+ mode = ata_xfer_mask2mode(mask);
+
+ if (mode != 0) {
+ ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
+ ata_mode_string(mask));
+ } else {
+ /* SWDMA perhaps ? */
+ mode = unknown;
+ ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
+ }
+
+ /* Configure the device reporting */
+ dev->xfer_mode = mode;
+ dev->xfer_shift = ata_xfer_mode2shift(mode);
+}
+
/**
* ata_noop_dev_select - Select device 0/1 on ATA bus
* @ap: ATA channel to manipulate
unsigned int wait, unsigned int can_sleep)
{
if (ata_msg_probe(ap))
- ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
- "device %u, wait %u\n", ap->id, device, wait);
+ ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
+ "device %u, wait %u\n", device, wait);
if (wait)
ata_wait_idle(ap);
int rc;
if (ata_msg_ctl(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
if (err_mask) {
if (err_mask & AC_ERR_NODEV_HINT) {
DPRINTK("ata%u.%d: NODEV after polling detection\n",
- ap->id, dev->devno);
+ ap->print_id, dev->devno);
return -ENOENT;
}
int rc;
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
- ata_dev_printk(dev, KERN_INFO,
- "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
+ __FUNCTION__);
return 0;
}
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
/* set _SDD */
rc = ata_acpi_push_id(ap, dev->devno);
if (dev->class == ATA_DEV_ATA) {
if (ata_id_is_cfa(id)) {
if (id[162] & 1) /* CPRM may make this media unusable */
- ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
- ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_WARNING,
+ "supports DRM functions and may "
+ "not be fully accessable.\n");
snprintf(revbuf, 7, "CFA");
}
else
"%s: %s, %s, max %s\n",
revbuf, modelbuf, fwrevbuf,
ata_mode_string(xfer_mask));
- ata_dev_printk(dev, KERN_INFO,
+ ata_dev_printk(dev, KERN_INFO,
"%Lu sectors, multi %u, CHS %u/%u/%u\n",
(unsigned long long)dev->n_sectors,
dev->multi_count, dev->cylinders,
{
unsigned int classes[ATA_MAX_DEVICES];
int tries[ATA_MAX_DEVICES];
- int i, rc, down_xfermask;
+ int i, rc;
struct ata_device *dev;
ata_port_probe(ap);
tries[i] = ATA_PROBE_MAX_TRIES;
retry:
- down_xfermask = 0;
-
/* reset and determine device classes */
ap->ops->phy_reset(ap);
for (i = 0; i < ATA_MAX_DEVICES; i++)
ap->device[i].pio_mode = XFER_PIO_0;
- /* read IDENTIFY page and configure devices */
- for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ /* read IDENTIFY page and configure devices. We have to do the identify
+ specific sequence bass-ackwards so that PDIAG- is released by
+ the slave device */
+
+ for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
dev = &ap->device[i];
if (tries[i])
dev->id);
if (rc)
goto fail;
+ }
+
+ /* After the identify sequence we can now set up the devices. We do
+ this in the normal order so that the user doesn't get confused */
+
+ for(i = 0; i < ATA_MAX_DEVICES; i++) {
+ dev = &ap->device[i];
+ if (!ata_dev_enabled(dev))
+ continue;
ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
rc = ata_dev_configure(dev);
/* configure transfer mode */
rc = ata_set_mode(ap, &dev);
- if (rc) {
- down_xfermask = 1;
+ if (rc)
goto fail;
- }
for (i = 0; i < ATA_MAX_DEVICES; i++)
if (ata_dev_enabled(&ap->device[i]))
return -ENODEV;
fail:
+ tries[dev->devno]--;
+
switch (rc) {
case -EINVAL:
- case -ENODEV:
+ /* eeek, something went very wrong, give up */
tries[dev->devno] = 0;
break;
+
+ case -ENODEV:
+ /* give it just one more chance */
+ tries[dev->devno] = min(tries[dev->devno], 1);
case -EIO:
- sata_down_spd_limit(ap);
- /* fall through */
- default:
- tries[dev->devno]--;
- if (down_xfermask &&
- ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
- tries[dev->devno] = 0;
+ if (tries[dev->devno] == 1) {
+ /* This is the last chance, better to slow
+ * down than lose it.
+ */
+ sata_down_spd_limit(ap);
+ ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+ }
}
- if (!tries[dev->devno]) {
- ata_down_xfermask_limit(dev, 1);
+ if (!tries[dev->devno])
ata_dev_disable(dev);
- }
goto retry;
}
/**
* ata_down_xfermask_limit - adjust dev xfer masks downward
* @dev: Device to adjust xfer masks
- * @force_pio0: Force PIO0
+ * @sel: ATA_DNXFER_* selector
*
* Adjust xfer masks of @dev downward. Note that this function
* does not apply the change. Invoking ata_set_mode() afterwards
* RETURNS:
* 0 on success, negative errno on failure
*/
-int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
+int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
{
- unsigned long xfer_mask;
- int highbit;
+ char buf[32];
+ unsigned int orig_mask, xfer_mask;
+ unsigned int pio_mask, mwdma_mask, udma_mask;
+ int quiet, highbit;
- xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
- dev->udma_mask);
+ quiet = !!(sel & ATA_DNXFER_QUIET);
+ sel &= ~ATA_DNXFER_QUIET;
- if (!xfer_mask)
- goto fail;
- /* don't gear down to MWDMA from UDMA, go directly to PIO */
- if (xfer_mask & ATA_MASK_UDMA)
- xfer_mask &= ~ATA_MASK_MWDMA;
+ xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
+ dev->mwdma_mask,
+ dev->udma_mask);
+ ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
- highbit = fls(xfer_mask) - 1;
- xfer_mask &= ~(1 << highbit);
- if (force_pio0)
- xfer_mask &= 1 << ATA_SHIFT_PIO;
- if (!xfer_mask)
- goto fail;
+ switch (sel) {
+ case ATA_DNXFER_PIO:
+ highbit = fls(pio_mask) - 1;
+ pio_mask &= ~(1 << highbit);
+ break;
+
+ case ATA_DNXFER_DMA:
+ if (udma_mask) {
+ highbit = fls(udma_mask) - 1;
+ udma_mask &= ~(1 << highbit);
+ if (!udma_mask)
+ return -ENOENT;
+ } else if (mwdma_mask) {
+ highbit = fls(mwdma_mask) - 1;
+ mwdma_mask &= ~(1 << highbit);
+ if (!mwdma_mask)
+ return -ENOENT;
+ }
+ break;
+
+ case ATA_DNXFER_40C:
+ udma_mask &= ATA_UDMA_MASK_40C;
+ break;
+
+ case ATA_DNXFER_FORCE_PIO0:
+ pio_mask &= 1;
+ case ATA_DNXFER_FORCE_PIO:
+ mwdma_mask = 0;
+ udma_mask = 0;
+ break;
+
+ default:
+ BUG();
+ }
+
+ xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
+
+ if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
+ return -ENOENT;
+
+ if (!quiet) {
+ if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
+ snprintf(buf, sizeof(buf), "%s:%s",
+ ata_mode_string(xfer_mask),
+ ata_mode_string(xfer_mask & ATA_MASK_PIO));
+ else
+ snprintf(buf, sizeof(buf), "%s",
+ ata_mode_string(xfer_mask));
+
+ ata_dev_printk(dev, KERN_WARNING,
+ "limiting speed to %s\n", buf);
+ }
ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
&dev->udma_mask);
- ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
- ata_mode_string(xfer_mask));
-
return 0;
-
- fail:
- return -EINVAL;
}
static int ata_dev_set_mode(struct ata_device *dev)
* host channels are not permitted to do so.
*/
if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
- ap->host->simplex_claimed = 1;
+ ap->host->simplex_claimed = ap;
/* step5: chip specific finalisation */
if (ap->ops->post_set_mode)
ap->ops->post_set_mode(ap);
-
out:
if (rc)
*r_failed_dev = dev;
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- DPRINTK("ata%u: bus reset via SRST\n", ap->id);
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
/* software reset. causes dev0 to be selected */
iowrite8(ap->ctl, ioaddr->ctl_addr);
u8 err;
unsigned int dev0, dev1 = 0, devmask = 0;
- DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
+ DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
/* determine if device 0/1 are present */
if (ap->flags & ATA_FLAG_SATA_RESET)
{ "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
- { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
{ "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
/* Devices where NCQ should be avoided */
/* NCQ is slow */
{ "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
+ /* http://thread.gmane.org/gmane.linux.ide/14907 */
+ { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
/* Devices with NCQ limits */
"device is on DMA blacklist, disabling DMA\n");
}
- if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
+ if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed != ap) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
"other device, disabling DMA\n");
struct scatterlist *lsg = &sg[qc->n_elem - 1];
int n_elem, pre_n_elem, dir, trim_sg = 0;
- VPRINTK("ENTER, ata%u\n", ap->id);
+ VPRINTK("ENTER, ata%u\n", ap->print_id);
WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
/* we must lengthen transfers to end on a 32-bit boundary */
if (do_write != i_write)
goto err_out;
- VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
+ VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
__atapi_pio_bytes(qc, bytes);
fsm_start:
DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
- ap->id, qc->tf.protocol, ap->hsm_task_state, status);
+ ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
switch (ap->hsm_task_state) {
case HSM_ST_FIRST:
* let the EH abort the command or reset the device.
*/
if (unlikely(status & (ATA_ERR | ATA_DF))) {
- printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
- ap->id, status);
+ ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
+ "error, dev_stat 0x%X\n", status);
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
* let the EH abort the command or reset the device.
*/
if (unlikely(status & (ATA_ERR | ATA_DF))) {
- printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
- ap->id, status);
+ ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
+ "device error, dev_stat 0x%X\n",
+ status);
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
/* no more data to transfer */
DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
- ap->id, qc->dev->devno, status);
+ ap->print_id, qc->dev->devno, status);
WARN_ON(qc->err_mask);
u8 status, host_stat = 0;
VPRINTK("ata%u: protocol %d task_state %d\n",
- ap->id, qc->tf.protocol, ap->hsm_task_state);
+ ap->print_id, qc->tf.protocol, ap->hsm_task_state);
/* Check whether we are expecting interrupt in this state */
switch (ap->hsm_task_state) {
qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
/* check status of DMA engine */
host_stat = ap->ops->bmdma_status(ap);
- VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
+ VPRINTK("ata%u: host_stat 0x%X\n",
+ ap->print_id, host_stat);
/* if it's not our irq... */
if (!(host_stat & ATA_DMA_INTR))
return 0;
}
+#ifdef CONFIG_PM
static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
int wait)
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
host->dev->power.power_state = PMSG_ON;
}
+#endif
/**
* ata_port_start - Set port up for dma.
ap->lock = &host->lock;
ap->flags = ATA_FLAG_DISABLED;
- ap->id = ata_unique_id++;
+ ap->print_id = ata_print_id++;
ap->ctl = ATA_DEVCTL_OBS;
ap->host = host;
ap->dev = ent->dev;
{
ap->scsi_host = shost;
- shost->unique_id = ap->id;
+ shost->unique_id = ap->print_id;
shost->max_id = 16;
shost->max_lun = 1;
shost->max_channel = 1;
if (host->ops->host_stop)
host->ops->host_stop(host);
+
+ dev_set_drvdata(gendev, NULL);
}
/**
/* wait for EH to finish */
ata_port_wait_eh(ap);
} else {
- DPRINTK("ata%u: bus probe begin\n", ap->id);
+ DPRINTK("ata%u: bus probe begin\n", ap->print_id);
rc = ata_bus_probe(ap);
- DPRINTK("ata%u: bus probe end\n", ap->id);
+ DPRINTK("ata%u: bus probe end\n", ap->print_id);
if (rc) {
/* FIXME: do something useful here?
err_out:
devres_release_group(dev, ata_device_add);
- dev_set_drvdata(dev, NULL);
VPRINTK("EXIT, returning %d\n", rc);
return 0;
}
{
struct ata_probe_ent *probe_ent;
- /* XXX - the following if can go away once all LLDs are managed */
- if (!list_empty(&dev->devres_head))
- probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
- else
- probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
+ probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
if (!probe_ent) {
printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
kobject_name(&(dev->kobj)));
return (tmp == bits->val) ? 1 : 0;
}
+#ifdef CONFIG_PM
void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
pci_save_state(pdev);
+ pci_disable_device(pdev);
- if (mesg.event == PM_EVENT_SUSPEND) {
- pci_disable_device(pdev);
+ if (mesg.event == PM_EVENT_SUSPEND)
pci_set_power_state(pdev, PCI_D3hot);
- }
}
int ata_pci_device_do_resume(struct pci_dev *pdev)
ata_host_resume(host);
return rc;
}
+#endif /* CONFIG_PM */
+
#endif /* CONFIG_PCI */
EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
EXPORT_SYMBOL_GPL(ata_port_probe);
+EXPORT_SYMBOL_GPL(ata_dev_disable);
EXPORT_SYMBOL_GPL(sata_set_spd);
EXPORT_SYMBOL_GPL(sata_phy_debounce);
EXPORT_SYMBOL_GPL(sata_phy_resume);
EXPORT_SYMBOL_GPL(sata_scr_write_flush);
EXPORT_SYMBOL_GPL(ata_port_online);
EXPORT_SYMBOL_GPL(ata_port_offline);
+#ifdef CONFIG_PM
EXPORT_SYMBOL_GPL(ata_host_suspend);
EXPORT_SYMBOL_GPL(ata_host_resume);
+#endif /* CONFIG_PM */
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
+EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
EXPORT_SYMBOL_GPL(ata_device_blacklisted);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+#ifdef CONFIG_PM
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
EXPORT_SYMBOL_GPL(ata_pci_device_resume);
+#endif /* CONFIG_PM */
EXPORT_SYMBOL_GPL(ata_pci_default_filter);
EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
#endif /* CONFIG_PCI */
+#ifdef CONFIG_PM
EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
+#endif /* CONFIG_PM */
EXPORT_SYMBOL_GPL(ata_eng_timeout);
EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
EXPORT_SYMBOL_GPL(ata_irq_ack);
EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
+EXPORT_SYMBOL_GPL(ata_dev_try_classify);
#include "libata.h"
+enum {
+ ATA_EH_SPDN_NCQ_OFF = (1 << 0),
+ ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
+ ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
+};
+
static void __ata_port_freeze(struct ata_port *ap);
static void ata_eh_finish(struct ata_port *ap);
+#ifdef CONFIG_PM
static void ata_eh_handle_port_suspend(struct ata_port *ap);
static void ata_eh_handle_port_resume(struct ata_port *ap);
+static int ata_eh_suspend(struct ata_port *ap,
+ struct ata_device **r_failed_dev);
+static void ata_eh_prep_resume(struct ata_port *ap);
+static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev);
+#else /* CONFIG_PM */
+static void ata_eh_handle_port_suspend(struct ata_port *ap)
+{ }
+
+static void ata_eh_handle_port_resume(struct ata_port *ap)
+{ }
+
+static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
+{
+ return 0;
+}
+
+static void ata_eh_prep_resume(struct ata_port *ap)
+{ }
+
+static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
static void ata_ering_record(struct ata_ering *ering, int is_io,
unsigned int err_mask)
ent->timestamp = get_jiffies_64();
}
-static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
+static void ata_ering_clear(struct ata_ering *ering)
{
- struct ata_ering_entry *ent = &ering->ring[ering->cursor];
- if (!ent->err_mask)
- return NULL;
- return ent;
+ memset(ering, 0, sizeof(*ering));
}
static int ata_ering_map(struct ata_ering *ering,
ap->pflags |= ATA_PFLAG_FROZEN;
- DPRINTK("ata%u port frozen\n", ap->id);
+ DPRINTK("ata%u port frozen\n", ap->print_id);
}
/**
spin_unlock_irqrestore(ap->lock, flags);
- DPRINTK("ata%u port thawed\n", ap->id);
+ DPRINTK("ata%u port thawed\n", ap->print_id);
}
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
return action;
}
-static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
+static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
{
- if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
+ if (err_mask & AC_ERR_ATA_BUS)
return 1;
- if (ent->is_io) {
- if (ent->err_mask & AC_ERR_HSM)
- return 1;
- if ((ent->err_mask &
- (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
+ if (err_mask & AC_ERR_TIMEOUT)
+ return 2;
+
+ if (is_io) {
+ if (err_mask & AC_ERR_HSM)
return 2;
+ if ((err_mask &
+ (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
+ return 3;
}
return 0;
}
-struct speed_down_needed_arg {
+struct speed_down_verdict_arg {
u64 since;
- int nr_errors[3];
+ int nr_errors[4];
};
-static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
+static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
{
- struct speed_down_needed_arg *arg = void_arg;
+ struct speed_down_verdict_arg *arg = void_arg;
+ int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask);
if (ent->timestamp < arg->since)
return -1;
- arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
+ arg->nr_errors[cat]++;
return 0;
}
/**
- * ata_eh_speed_down_needed - Determine wheter speed down is necessary
+ * ata_eh_speed_down_verdict - Determine speed down verdict
* @dev: Device of interest
*
* This function examines error ring of @dev and determines
- * whether speed down is necessary. Speed down is necessary if
- * there have been more than 3 of Cat-1 errors or 10 of Cat-2
- * errors during last 15 minutes.
+ * whether NCQ needs to be turned off, transfer speed should be
+ * stepped down, or falling back to PIO is necessary.
*
- * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
- * violation for known supported commands.
+ * Cat-1 is ATA_BUS error for any command.
*
- * Cat-2 errors are unclassified DEV error for known supported
+ * Cat-2 is TIMEOUT for any command or HSM violation for known
+ * supported commands.
+ *
+ * Cat-3 is is unclassified DEV error for known supported
* command.
*
+ * NCQ needs to be turned off if there have been more than 3
+ * Cat-2 + Cat-3 errors during last 10 minutes.
+ *
+ * Speed down is necessary if there have been more than 3 Cat-1 +
+ * Cat-2 errors or 10 Cat-3 errors during last 10 minutes.
+ *
+ * Falling back to PIO mode is necessary if there have been more
+ * than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes.
+ *
* LOCKING:
* Inherited from caller.
*
* RETURNS:
- * 1 if speed down is necessary, 0 otherwise
+ * OR of ATA_EH_SPDN_* flags.
*/
-static int ata_eh_speed_down_needed(struct ata_device *dev)
+static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
{
- const u64 interval = 15LLU * 60 * HZ;
- static const int err_limits[3] = { -1, 3, 10 };
- struct speed_down_needed_arg arg;
- struct ata_ering_entry *ent;
- int err_cat;
- u64 j64;
+ const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
+ u64 j64 = get_jiffies_64();
+ struct speed_down_verdict_arg arg;
+ unsigned int verdict = 0;
- ent = ata_ering_top(&dev->ering);
- if (!ent)
- return 0;
+ /* scan past 10 mins of error history */
+ memset(&arg, 0, sizeof(arg));
+ arg.since = j64 - min(j64, j10mins);
+ ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
- err_cat = ata_eh_categorize_ering_entry(ent);
- if (err_cat == 0)
- return 0;
+ if (arg.nr_errors[2] + arg.nr_errors[3] > 3)
+ verdict |= ATA_EH_SPDN_NCQ_OFF;
+ if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10)
+ verdict |= ATA_EH_SPDN_SPEED_DOWN;
+ /* scan past 3 mins of error history */
memset(&arg, 0, sizeof(arg));
+ arg.since = j64 - min(j64, j5mins);
+ ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
- j64 = get_jiffies_64();
- if (j64 >= interval)
- arg.since = j64 - interval;
- else
- arg.since = 0;
-
- ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
+ if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10)
+ verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
- return arg.nr_errors[err_cat] > err_limits[err_cat];
+ return verdict;
}
/**
* Kernel thread context (may sleep).
*
* RETURNS:
- * 0 on success, -errno otherwise
+ * Determined recovery action.
*/
-static int ata_eh_speed_down(struct ata_device *dev, int is_io,
- unsigned int err_mask)
+static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
+ unsigned int err_mask)
{
- if (!err_mask)
+ unsigned int verdict;
+ unsigned int action = 0;
+
+ /* don't bother if Cat-0 error */
+ if (ata_eh_categorize_error(is_io, err_mask) == 0)
return 0;
/* record error and determine whether speed down is necessary */
ata_ering_record(&dev->ering, is_io, err_mask);
+ verdict = ata_eh_speed_down_verdict(dev);
- if (!ata_eh_speed_down_needed(dev))
- return 0;
+ /* turn off NCQ? */
+ if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
+ (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
+ ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
+ dev->flags |= ATA_DFLAG_NCQ_OFF;
+ ata_dev_printk(dev, KERN_WARNING,
+ "NCQ disabled due to excessive errors\n");
+ goto done;
+ }
- /* speed down SATA link speed if possible */
- if (sata_down_spd_limit(dev->ap) == 0)
- return ATA_EH_HARDRESET;
+ /* speed down? */
+ if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
+ /* speed down SATA link speed if possible */
+ if (sata_down_spd_limit(dev->ap) == 0) {
+ action |= ATA_EH_HARDRESET;
+ goto done;
+ }
- /* lower transfer mode */
- if (ata_down_xfermask_limit(dev, 0) == 0)
- return ATA_EH_SOFTRESET;
+ /* lower transfer mode */
+ if (dev->spdn_cnt < 2) {
+ static const int dma_dnxfer_sel[] =
+ { ATA_DNXFER_DMA, ATA_DNXFER_40C };
+ static const int pio_dnxfer_sel[] =
+ { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
+ int sel;
+
+ if (dev->xfer_shift != ATA_SHIFT_PIO)
+ sel = dma_dnxfer_sel[dev->spdn_cnt];
+ else
+ sel = pio_dnxfer_sel[dev->spdn_cnt];
+
+ dev->spdn_cnt++;
+
+ if (ata_down_xfermask_limit(dev, sel) == 0) {
+ action |= ATA_EH_SOFTRESET;
+ goto done;
+ }
+ }
+ }
+
+ /* Fall back to PIO? Slowing down to PIO is meaningless for
+ * SATA. Consider it only for PATA.
+ */
+ if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
+ (dev->ap->cbl != ATA_CBL_SATA) &&
+ (dev->xfer_shift != ATA_SHIFT_PIO)) {
+ if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
+ dev->spdn_cnt = 0;
+ action |= ATA_EH_SOFTRESET;
+ goto done;
+ }
+ }
- ata_dev_printk(dev, KERN_ERR,
- "speed down requested but no transfer mode left\n");
return 0;
+ done:
+ /* device has been slowed down, blow error history */
+ ata_ering_clear(&dev->ering);
+ return action;
}
/**
return rc;
}
+#ifdef CONFIG_PM
/**
* ata_eh_suspend - handle suspend EH action
* @ap: target host port
DPRINTK("EXIT\n");
return 0;
}
+#endif /* CONFIG_PM */
static int ata_port_nr_enabled(struct ata_port *ap)
{
{
struct ata_eh_context *ehc = &ap->eh_context;
struct ata_device *dev;
- int down_xfermask, i, rc;
+ int i, rc;
DPRINTK("ENTER\n");
}
retry:
- down_xfermask = 0;
rc = 0;
/* if UNLOADING, finish immediately */
/* configure transfer mode if necessary */
if (ehc->i.flags & ATA_EHI_SETMODE) {
rc = ata_set_mode(ap, &dev);
- if (rc) {
- down_xfermask = 1;
+ if (rc)
goto dev_fail;
- }
ehc->i.flags &= ~ATA_EHI_SETMODE;
}
goto out;
dev_fail:
+ ehc->tries[dev->devno]--;
+
switch (rc) {
- case -ENODEV:
- /* device missing, schedule probing */
- ehc->i.probe_mask |= (1 << dev->devno);
case -EINVAL:
+ /* eeek, something went very wrong, give up */
ehc->tries[dev->devno] = 0;
break;
+
+ case -ENODEV:
+ /* device missing or wrong IDENTIFY data, schedule probing */
+ ehc->i.probe_mask |= (1 << dev->devno);
+ /* give it just one more chance */
+ ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
case -EIO:
- sata_down_spd_limit(ap);
- default:
- ehc->tries[dev->devno]--;
- if (down_xfermask &&
- ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
- ehc->tries[dev->devno] = 0;
+ if (ehc->tries[dev->devno] == 1) {
+ /* This is the last chance, better to slow
+ * down than lose it.
+ */
+ sata_down_spd_limit(ap);
+ ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+ }
}
if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
ata_eh_finish(ap);
}
+#ifdef CONFIG_PM
/**
* ata_eh_handle_port_suspend - perform port suspend operation
* @ap: port to suspend
}
spin_unlock_irqrestore(ap->lock, flags);
}
+#endif /* CONFIG_PM */
scsi_cmd[8] = args[3];
scsi_cmd[10] = args[4];
scsi_cmd[12] = args[5];
+ scsi_cmd[13] = args[6] & 0x0f;
scsi_cmd[14] = args[0];
/* Good values for timeout and retries? Values below
}
}
+#ifdef CONFIG_PM
/**
* ata_scsi_device_suspend - suspend ATA device associated with sdev
* @sdev: the SCSI device to suspend
sdev->sdev_gendev.power.power_state = PMSG_ON;
return 0;
}
+#endif /* CONFIG_PM */
/**
* ata_to_sense_error - convert ATA error to SCSI error
*/
if (qc->err_mask ||
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
+ ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3], verbose);
sb[1] &= 0x0f;
}
*/
if (qc->err_mask ||
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
- ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
+ ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3], verbose);
sb[1] &= 0x0f;
}
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
unsigned long flags;
- int max_depth;
- if (queue_depth < 1)
+ if (queue_depth < 1 || queue_depth == sdev->queue_depth)
return sdev->queue_depth;
dev = ata_scsi_find_dev(ap, sdev);
if (!dev || !ata_dev_enabled(dev))
return sdev->queue_depth;
- max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
- max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
- if (queue_depth > max_depth)
- queue_depth = max_depth;
-
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
-
+ /* NCQ enabled? */
spin_lock_irqsave(ap->lock, flags);
- if (queue_depth > 1)
- dev->flags &= ~ATA_DFLAG_NCQ_OFF;
- else
+ dev->flags &= ~ATA_DFLAG_NCQ_OFF;
+ if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
dev->flags |= ATA_DFLAG_NCQ_OFF;
+ queue_depth = 1;
+ }
spin_unlock_irqrestore(ap->lock, flags);
+ /* limit and apply queue depth */
+ queue_depth = min(queue_depth, sdev->host->can_queue);
+ queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
+ queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
+
+ if (sdev->queue_depth == queue_depth)
+ return -EINVAL;
+
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
return queue_depth;
}
}
if (need_sense && !ap->ops->error_handler)
- ata_dump_status(ap->id, &qc->result_tf);
+ ata_dump_status(ap->print_id, &qc->result_tf);
qc->scsidone(cmd);
static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
{
struct ata_port *ap = dev->ap;
+ int is_ncq = is_io && ata_ncq_enabled(dev);
- if (!(dev->flags & ATA_DFLAG_NCQ))
- return 0;
-
- if (is_io) {
+ if (is_ncq) {
if (!ata_tag_valid(ap->active_tag))
return 0;
} else {
u8 *scsicmd = cmd->cmnd;
DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- ap->id,
+ ap->print_id,
scsidev->channel, scsidev->id, scsidev->lun,
scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
ata_port_init(ap, host, ent, 0);
ap->lock = shost->host_lock;
- kfree(ent);
+ devm_kfree(host->dev, ent);
return ap;
}
EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
*/
void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
iowrite8(tf->command, ap->ioaddr.command_addr);
ata_pause(ap);
static int ata_resources_present(struct pci_dev *pdev, int port)
{
int i;
-
+
/* Check the PCI resources for this channel are enabled */
port = port * 2;
for (i = 0; i < 2; i ++) {
}
return 1;
}
-
+
/**
* ata_pci_init_native_mode - Initialize native-mode driver
* @pdev: pci device to be initialized
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = IRQF_SHARED;
-
+
/* Discard disabled ports. Some controllers show their
unused channels this way */
if (ata_resources_present(pdev, 0) == 0)
enum {
/* flags for ata_dev_read_id() */
ATA_READID_POSTRESET = (1 << 0), /* reading ID after reset */
+
+ /* selector for ata_down_xfermask_limit() */
+ ATA_DNXFER_PIO = 0, /* speed down PIO */
+ ATA_DNXFER_DMA = 1, /* speed down DMA */
+ ATA_DNXFER_40C = 2, /* apply 40c cable limit */
+ ATA_DNXFER_FORCE_PIO = 3, /* force PIO */
+ ATA_DNXFER_FORCE_PIO0 = 4, /* force PIO0 */
+
+ ATA_DNXFER_QUIET = (1 << 31),
};
extern struct workqueue_struct *ata_aux_wq;
extern int ata_dev_configure(struct ata_device *dev);
extern int sata_down_spd_limit(struct ata_port *ap);
extern int sata_set_spd_needed(struct ata_port *ap);
-extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
+extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
extern void ata_sg_clean(struct ata_queued_cmd *qc);
extern void ata_qc_free(struct ata_queued_cmd *qc);
/* libata-sff.c */
extern u8 ata_irq_on(struct ata_port *ap);
-/* pata_sis.c */
-extern struct ata_port_info sis_info133;
#endif /* __LIBATA_H__ */
#include <linux/dmi.h>
#define DRV_NAME "pata_ali"
-#define DRV_VERSION "0.7.2"
+#define DRV_VERSION "0.7.3"
/*
* Cable special cases
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
/*
return ata_pci_init_one(pdev, port_info, 2);
}
+#ifdef CONFIG_PM
static int ali_reinit_one(struct pci_dev *pdev)
{
ali_init_chipset(pdev);
return ata_pci_device_resume(pdev);
}
+#endif
static const struct pci_device_id ali[] = {
{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
.id_table = ali,
.probe = ali_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ali_reinit_one,
+#endif
};
static int __init ali_init(void)
#include <linux/libata.h>
#define DRV_NAME "pata_amd"
-#define DRV_VERSION "0.2.7"
+#define DRV_VERSION "0.2.8"
/**
* timing_setup - shared timing computation and load
static int amd_pre_reset(struct ata_port *ap)
{
- static const u32 bitmask[2] = {0x03, 0xC0};
+ static const u32 bitmask[2] = {0x03, 0x0C};
static const struct pci_bits amd_enable_bits[] = {
{ 0x40, 1, 0x02, 0x02 },
{ 0x40, 1, 0x01, 0x01 }
*/
static int nv_pre_reset(struct ata_port *ap) {
- static const u8 bitmask[2] = {0x03, 0xC0};
+ static const u8 bitmask[2] = {0x03, 0x0C};
static const struct pci_bits nv_enable_bits[] = {
{ 0x50, 1, 0x02, 0x02 },
{ 0x50, 1, 0x01, 0x01 }
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations amd33_port_ops = {
return ata_pci_init_one(pdev, port_info, 2);
}
+#ifdef CONFIG_PM
static int amd_reinit_one(struct pci_dev *pdev)
{
if (pdev->vendor == PCI_VENDOR_ID_AMD) {
}
return ata_pci_device_resume(pdev);
}
+#endif
static const struct pci_device_id amd[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
.id_table = amd,
.probe = amd_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = amd_reinit_one,
+#endif
};
static int __init amd_init(void)
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations atiixp_port_ops = {
.id_table = atiixp,
.probe = atiixp_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.resume = ata_pci_device_resume,
.suspend = ata_pci_device_suspend,
+#endif
};
static int __init atiixp_init(void)
/*
- * pata_cmd64x.c - ATI PATA for new ATA layer
+ * pata_cmd64x.c - CMD64x PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations cmd64x_port_ops = {
return ata_pci_init_one(pdev, port_info, 2);
}
+#ifdef CONFIG_PM
static int cmd64x_reinit_one(struct pci_dev *pdev)
{
u8 mrdmode;
#endif
return ata_pci_device_resume(pdev);
}
+#endif
static const struct pci_device_id cmd64x[] = {
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
.id_table = cmd64x,
.probe = cmd64x_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = cmd64x_reinit_one,
+#endif
};
static int __init cmd64x_init(void)
#include <linux/libata.h>
#define DRV_NAME "pata_cs5520"
-#define DRV_VERSION "0.6.3"
+#define DRV_VERSION "0.6.4"
struct pio_clocks
{
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations cs5520_port_ops = {
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
- dev_set_drvdata(dev, NULL);
}
+#ifdef CONFIG_PM
/**
* cs5520_reinit_one - device resume
* @pdev: PCI device
pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
return ata_pci_device_resume(pdev);
}
+
+/**
+ * cs5520_pci_device_suspend - device suspend
+ * @pdev: PCI device
+ *
+ * We have to cut and waste bits from the standard method because
+ * the 5520 is a bit odd and not just a pure ATA device. As a result
+ * we must not disable it. The needed code is short and this avoids
+ * chip specific mess in the core code.
+ */
+
+static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc = 0;
+
+ rc = ata_host_suspend(host, mesg);
+ if (rc)
+ return rc;
+
+ pci_save_state(pdev);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
/* For now keep DMA off. We can set it for all but A rev CS5510 once the
core ATA code can handle it */
.id_table = pata_cs5520,
.probe = cs5520_init_one,
.remove = cs5520_remove_one,
- .suspend = ata_pci_device_suspend,
+#ifdef CONFIG_PM
+ .suspend = cs5520_pci_device_suspend,
.resume = cs5520_reinit_one,
+#endif
};
static int __init cs5520_init(void)
#include <linux/dmi.h>
#define DRV_NAME "pata_cs5530"
-#define DRV_VERSION "0.7.1"
+#define DRV_VERSION "0.7.2"
static void __iomem *cs5530_port_base(struct ata_port *ap)
{
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations cs5530_port_ops = {
return ata_pci_init_one(pdev, port_info, 2);
}
+#ifdef CONFIG_PM
static int cs5530_reinit_one(struct pci_dev *pdev)
{
/* If we fail on resume we are doomed */
BUG();
return ata_pci_device_resume(pdev);
}
+#endif /* CONFIG_PM */
static const struct pci_device_id cs5530[] = {
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
.id_table = cs5530,
.probe = cs5530_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = cs5530_reinit_one,
+#endif
};
static int __init cs5530_init(void)
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations cs5535_port_ops = {
.id_table = cs5535,
.probe = cs5535_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init cs5535_init(void)
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations cy82c693_port_ops = {
.id_table = cy82c693,
.probe = cy82c693_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init cy82c693_init(void)
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static const struct ata_port_operations efar_ops = {
.id_table = efar_pci_tbl,
.probe = efar_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init efar_init(void)
#include <linux/libata.h>
#define DRV_NAME "pata_hpt366"
-#define DRV_VERSION "0.5.3"
+#define DRV_VERSION "0.6.0"
struct hpt_clock {
u8 xfer_speed;
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
/*
return ata_pci_init_one(dev, port_info, 2);
}
+#ifdef CONFIG_PM
static int hpt36x_reinit_one(struct pci_dev *dev)
{
hpt36x_init_chipset(dev);
return ata_pci_device_resume(dev);
}
-
+#endif
static const struct pci_device_id hpt36x[] = {
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
.id_table = hpt36x,
.probe = hpt36x_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = hpt36x_reinit_one,
+#endif
};
static int __init hpt36x_init(void)
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.5.2"
+#define DRV_VERSION "0.6.0"
struct hpt_clock {
u8 xfer_speed;
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations hpt3x3_port_ops = {
return ata_pci_init_one(dev, port_info, 2);
}
+#ifdef CONFIG_PM
static int hpt3x3_reinit_one(struct pci_dev *dev)
{
hpt3x3_init_chipset(dev);
return ata_pci_device_resume(dev);
}
+#endif
static const struct pci_device_id hpt3x3[] = {
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
.id_table = hpt3x3,
.probe = hpt3x3_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = hpt3x3_reinit_one,
+#endif
};
static int __init hpt3x3_init(void)
#include <linux/libata.h>
#define DRV_NAME "pata_isapnp"
-#define DRV_VERSION "0.1.5"
+#define DRV_VERSION "0.2.0"
static struct scsi_host_template isapnp_sht = {
.module = THIS_MODULE,
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
- dev_set_drvdata(dev, NULL);
}
static struct pnp_device_id isapnp_devices[] = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static const struct ata_port_operations it8213_ops = {
.id_table = it8213_pci_tbl,
.probe = it8213_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init it8213_init(void)
#define DRV_NAME "pata_it821x"
-#define DRV_VERSION "0.3.3"
+#define DRV_VERSION "0.3.4"
struct it821x_dev
{
/* We do need the right mode information for DMA or PIO
and this comes from the current configuration flags */
if (dma_enabled & (1 << (5 + i))) {
+ ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
dev->xfer_mode = XFER_MW_DMA_0;
dev->xfer_shift = ATA_SHIFT_MWDMA;
dev->flags &= ~ATA_DFLAG_PIO;
} else {
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations it821x_smart_port_ops = {
return ata_pci_init_one(pdev, port_info, 2);
}
+#ifdef CONFIG_PM
static int it821x_reinit_one(struct pci_dev *pdev)
{
/* Resume - turn raid back off if need be */
it821x_disable_raid(pdev);
return ata_pci_device_resume(pdev);
}
+#endif
static const struct pci_device_id it821x[] = {
{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
.id_table = it821x,
.probe = it821x_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = it821x_reinit_one,
+#endif
};
static int __init it821x_init(void)
#include <scsi/scsi_host.h>
#define DRV_NAME "pata_ixp4xx_cf"
-#define DRV_VERSION "0.1.1ac1"
+#define DRV_VERSION "0.1.2"
-static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device *adev)
+static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device **error)
{
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->device[i];
- if (ata_dev_enabled(dev)) {
+ if (ata_dev_ready(dev)) {
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
.slave_destroy = ata_scsi_slave_destroy,
/* Use standard CHS mapping rules */
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
+ .suspend = ata_scsi_device_suspend,
+ .resume = ata_scsi_device_resume,
+#endif
};
static const struct ata_port_operations jmicron_ops = {
};
struct ata_port_info *port_info[2] = { &info, &info };
- u32 reg;
-
- /* PATA controller is fn 1, AHCI is fn 0 */
- if (id->driver_data != 368 && PCI_FUNC(pdev->devfn) != 1)
- return -ENODEV;
-
- /* The 365/66 have two PATA channels, redirect the second */
- if (id->driver_data == 365 || id->driver_data == 366) {
- pci_read_config_dword(pdev, 0x80, ®);
- reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
- pci_write_config_dword(pdev, 0x80, reg);
- }
-
return ata_pci_init_one(pdev, port_info, 2);
}
-static int jmicron_reinit_one(struct pci_dev *pdev)
-{
- u32 reg;
-
- switch(pdev->device) {
- case PCI_DEVICE_ID_JMICRON_JMB368:
- break;
- case PCI_DEVICE_ID_JMICRON_JMB365:
- case PCI_DEVICE_ID_JMICRON_JMB366:
- /* Restore mapping or disks swap and boy does it get ugly */
- pci_read_config_dword(pdev, 0x80, ®);
- reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
- pci_write_config_dword(pdev, 0x80, reg);
- /* Fall through */
- default:
- /* Make sure AHCI is turned back on */
- pci_write_config_byte(pdev, 0x41, 0xa1);
- }
- return ata_pci_device_resume(pdev);
-}
-
static const struct pci_device_id jmicron_pci_tbl[] = {
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361},
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363},
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB365), 365},
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB366), 366},
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB368), 368},
+ { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 361 },
+ { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 363 },
+ { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 365 },
+ { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 366 },
+ { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 368 },
{ } /* terminate list */
};
.id_table = jmicron_pci_tbl,
.probe = jmicron_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
- .resume = jmicron_reinit_one,
+ .resume = ata_pci_device_resume,
+#endif
};
static int __init jmicron_init(void)
#include <linux/platform_device.h>
#define DRV_NAME "pata_legacy"
-#define DRV_VERSION "0.5.3"
+#define DRV_VERSION "0.5.4"
#define NR_HOST 6
static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
-static int legacy_irq[NR_HOST] = { 15, 14, 11, 10, 8, 12 };
+static int legacy_irq[NR_HOST] = { 14, 15, 11, 10, 8, 12 };
struct legacy_data {
unsigned long timing;
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = opti82c46x_qc_issue_prot,
.slave_destroy = ata_scsi_slave_destroy,
/* Use standard CHS mapping rules */
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static const struct ata_port_operations marvell_ops = {
.id_table = marvell_pci_tbl,
.probe = marvell_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init marvell_init(void)
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
+ .suspend = ata_scsi_device_suspend,
+ .resume = ata_scsi_device_resume,
+#endif
};
static struct ata_port_operations mpc52xx_ata_port_ops = {
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations mpiix_port_ops = {
.id_table = mpiix,
.probe = mpiix_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init mpiix_init(void)
.slave_destroy = ata_scsi_slave_destroy,
/* Use standard CHS mapping rules */
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static const struct ata_port_operations netcell_ops = {
.id_table = netcell_pci_tbl,
.probe = netcell_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init netcell_init(void)
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations ns87410_port_ops = {
.id_table = ns87410,
.probe = ns87410_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init ns87410_init(void)
#include <linux/ata.h>
#define DRV_NAME "pata_oldpiix"
-#define DRV_VERSION "0.5.3"
+#define DRV_VERSION "0.5.4"
/**
* oldpiix_pre_reset - probe begin
struct ata_device *adev = qc->dev;
if (adev != ap->private_data) {
+ oldpiix_set_piomode(ap, adev);
if (adev->dma_mode)
oldpiix_set_dmamode(ap, adev);
- else if (adev->pio_mode)
- oldpiix_set_piomode(ap, adev);
}
return ata_qc_issue_prot(qc);
}
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static const struct ata_port_operations oldpiix_pata_ops = {
.id_table = oldpiix_pci_tbl,
.probe = oldpiix_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init oldpiix_init(void)
#include <linux/libata.h>
#define DRV_NAME "pata_opti"
-#define DRV_VERSION "0.2.7"
+#define DRV_VERSION "0.2.8"
enum {
READ_REG = 0, /* index of Read cycle timing register */
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations opti_port_ops = {
.id_table = opti,
.probe = opti_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init opti_init(void)
#include <linux/libata.h>
#define DRV_NAME "pata_optidma"
-#define DRV_VERSION "0.2.3"
+#define DRV_VERSION "0.2.4"
enum {
READ_REG = 0, /* index of Read cycle timing register */
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations optidma_port_ops = {
.id_table = optidma,
.probe = optidma_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init optidma_init(void)
#define DRV_NAME "pata_pcmcia"
-#define DRV_VERSION "0.2.11"
+#define DRV_VERSION "0.3.0"
/*
* Private data structure to glue stuff together
static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_FUNC_ID(4),
PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
+ PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
+ PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
- PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),
+ PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
- PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */
+ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
+ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
#include <linux/libata.h>
#define DRV_NAME "pata_pdc2027x"
-#define DRV_VERSION "0.74-ac5"
+#define DRV_VERSION "0.8"
#undef PDC_DEBUG
#ifdef PDC_DEBUG
* pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
+ * (C) 2007 Bartlomiej Zolnierkiewicz
*
* Based in part on linux/drivers/ide/pci/pdc202xx_old.c
*
* First cut with LBA48/ATAPI
*
* TODO:
- * Channel interlock/reset on both required ?
+ * Channel interlock/reset on both required
*/
#include <linux/kernel.h>
#include <linux/libata.h>
#define DRV_NAME "pata_pdc202xx_old"
-#define DRV_VERSION "0.2.3"
+#define DRV_VERSION "0.4.0"
/**
* pdc2024x_pre_reset - probe begin
static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
+ int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
static u16 pio_timing[5] = {
0x0913, 0x050C , 0x0308, 0x0206, 0x0104
};
pci_read_config_byte(pdev, port, &r_ap);
pci_read_config_byte(pdev, port + 1, &r_bp);
r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
- r_bp &= ~0x07;
+ r_bp &= ~0x1F;
r_ap |= (pio_timing[pio] >> 8);
r_bp |= (pio_timing[pio] & 0xFF);
static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
+ int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
static u8 udma_timing[6][2] = {
{ 0x60, 0x03 }, /* 33 Mhz Clock */
{ 0x40, 0x02 },
{ 0x20, 0x01 },
{ 0x20, 0x01 }
};
+ static u8 mdma_timing[3][2] = {
+ { 0x60, 0x03 },
+ { 0x60, 0x04 },
+ { 0xe0, 0x0f },
+ };
u8 r_bp, r_cp;
pci_read_config_byte(pdev, port + 1, &r_bp);
pci_read_config_byte(pdev, port + 2, &r_cp);
- r_bp &= ~0xF0;
+ r_bp &= ~0xE0;
r_cp &= ~0x0F;
if (adev->dma_mode >= XFER_UDMA_0) {
} else {
int speed = adev->dma_mode - XFER_MW_DMA_0;
- r_bp |= 0x60;
- r_cp |= (5 - speed);
+ r_bp |= mdma_timing[speed][0];
+ r_cp |= mdma_timing[speed][1];
}
pci_write_config_byte(pdev, port + 1, r_bp);
pci_write_config_byte(pdev, port + 2, r_cp);
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations pdc2024x_port_ops = {
.id_table = pdc202xx,
.probe = pdc202xx_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init pdc202xx_init(void)
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
}
}
return 0;
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
- dev_set_drvdata(dev, NULL);
return 0;
}
#include <linux/platform_device.h>
#define DRV_NAME "pata_qdi"
-#define DRV_VERSION "0.2.4"
+#define DRV_VERSION "0.3.0"
#define NR_HOST 4 /* Two 6580s */
release_region(port, 2);
continue;
}
- ct += qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04);
+ if (qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
+ ct++;
}
if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
/* QD6580: dual channel */
res = inb(port + 3);
if (res & 1) {
/* Single channel mode */
- ct += qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04);
+ if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04))
+ ct++;
} else {
/* Dual channel mode */
- ct += qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04);
- ct += qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04);
+ if (qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04) == 0)
+ ct++;
+ if (qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04) == 0)
+ ct++;
}
}
}
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static const struct ata_port_operations radisys_pata_ops = {
.id_table = radisys_pci_tbl,
.probe = radisys_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init radisys_init(void)
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
}
}
return 0;
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations rz1000_port_ops = {
return -ENODEV;
}
+#ifdef CONFIG_PM
static int rz1000_reinit_one(struct pci_dev *pdev)
{
/* If this fails on resume (which is a "cant happen" case), we
panic("rz1000 fifo");
return ata_pci_device_resume(pdev);
}
+#endif
static const struct pci_device_id pata_rz1000[] = {
{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
.id_table = pata_rz1000,
.probe = rz1000_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = rz1000_reinit_one,
+#endif
};
static int __init rz1000_init(void)
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations sc1200_port_ops = {
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.id_table = sc1200,
.probe = sc1200_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init sc1200_init(void)
--- /dev/null
+/*
+ * Support for IDE interfaces on Celleb platform
+ *
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * This code is based on drivers/ata/ata_piix.c:
+ * Copyright 2003-2005 Red Hat Inc
+ * Copyright 2003-2005 Jeff Garzik
+ * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
+ *
+ * and drivers/ata/ahci.c:
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ * and drivers/ata/libata-core.c:
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_scc"
+#define DRV_VERSION "0.1"
+
+#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
+
+/* PCI BARs */
+#define SCC_CTRL_BAR 0
+#define SCC_BMID_BAR 1
+
+/* offset of CTRL registers */
+#define SCC_CTL_PIOSHT 0x000
+#define SCC_CTL_PIOCT 0x004
+#define SCC_CTL_MDMACT 0x008
+#define SCC_CTL_MCRCST 0x00C
+#define SCC_CTL_SDMACT 0x010
+#define SCC_CTL_SCRCST 0x014
+#define SCC_CTL_UDENVT 0x018
+#define SCC_CTL_TDVHSEL 0x020
+#define SCC_CTL_MODEREG 0x024
+#define SCC_CTL_ECMODE 0xF00
+#define SCC_CTL_MAEA0 0xF50
+#define SCC_CTL_MAEC0 0xF54
+#define SCC_CTL_CCKCTRL 0xFF0
+
+/* offset of BMID registers */
+#define SCC_DMA_CMD 0x000
+#define SCC_DMA_STATUS 0x004
+#define SCC_DMA_TABLE_OFS 0x008
+#define SCC_DMA_INTMASK 0x010
+#define SCC_DMA_INTST 0x014
+#define SCC_DMA_PTERADD 0x018
+#define SCC_REG_CMD_ADDR 0x020
+#define SCC_REG_DATA 0x000
+#define SCC_REG_ERR 0x004
+#define SCC_REG_FEATURE 0x004
+#define SCC_REG_NSECT 0x008
+#define SCC_REG_LBAL 0x00C
+#define SCC_REG_LBAM 0x010
+#define SCC_REG_LBAH 0x014
+#define SCC_REG_DEVICE 0x018
+#define SCC_REG_STATUS 0x01C
+#define SCC_REG_CMD 0x01C
+#define SCC_REG_ALTSTATUS 0x020
+
+/* register value */
+#define TDVHSEL_MASTER 0x00000001
+#define TDVHSEL_SLAVE 0x00000004
+
+#define MODE_JCUSFEN 0x00000080
+
+#define ECMODE_VALUE 0x01
+
+#define CCKCTRL_ATARESET 0x00040000
+#define CCKCTRL_BUFCNT 0x00020000
+#define CCKCTRL_CRST 0x00010000
+#define CCKCTRL_OCLKEN 0x00000100
+#define CCKCTRL_ATACLKOEN 0x00000002
+#define CCKCTRL_LCLKEN 0x00000001
+
+#define QCHCD_IOS_SS 0x00000001
+
+#define QCHSD_STPDIAG 0x00020000
+
+#define INTMASK_MSK 0xD1000012
+#define INTSTS_SERROR 0x80000000
+#define INTSTS_PRERR 0x40000000
+#define INTSTS_RERR 0x10000000
+#define INTSTS_ICERR 0x01000000
+#define INTSTS_BMSINT 0x00000010
+#define INTSTS_BMHE 0x00000008
+#define INTSTS_IOIRQS 0x00000004
+#define INTSTS_INTRQ 0x00000002
+#define INTSTS_ACTEINT 0x00000001
+
+
+/* PIO transfer mode table */
+/* JCHST */
+static const unsigned long JCHSTtbl[2][7] = {
+ {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
+ {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
+};
+
+/* JCHHT */
+static const unsigned long JCHHTtbl[2][7] = {
+ {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
+ {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
+};
+
+/* JCHCT */
+static const unsigned long JCHCTtbl[2][7] = {
+ {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
+ {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
+};
+
+/* DMA transfer mode table */
+/* JCHDCTM/JCHDCTS */
+static const unsigned long JCHDCTxtbl[2][7] = {
+ {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
+ {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
+};
+
+/* JCSTWTM/JCSTWTS */
+static const unsigned long JCSTWTxtbl[2][7] = {
+ {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
+ {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
+};
+
+/* JCTSS */
+static const unsigned long JCTSStbl[2][7] = {
+ {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
+ {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
+};
+
+/* JCENVT */
+static const unsigned long JCENVTtbl[2][7] = {
+ {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
+ {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
+};
+
+/* JCACTSELS/JCACTSELM */
+static const unsigned long JCACTSELtbl[2][7] = {
+ {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
+};
+
+static const struct pci_device_id scc_pci_tbl[] = {
+ {PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { } /* terminate list */
+};
+
+/**
+ * scc_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set PIO mode for device.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+ void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
+ void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
+ unsigned long reg;
+ int offset;
+
+ reg = in_be32(cckctrl_port);
+ if (reg & CCKCTRL_ATACLKOEN)
+ offset = 1; /* 133MHz */
+ else
+ offset = 0; /* 100MHz */
+
+ reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
+ out_be32(piosht_port, reg);
+ reg = JCHCTtbl[offset][pio];
+ out_be32(pioct_port, reg);
+}
+
+/**
+ * scc_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ * @udma: udma mode, 0 - 6
+ *
+ * Set UDMA mode for device.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int udma = adev->dma_mode;
+ unsigned int is_slave = (adev->devno != 0);
+ u8 speed = udma;
+ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+ void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
+ void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
+ void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
+ void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
+ void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
+ void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
+ int offset, idx;
+
+ if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
+ offset = 1; /* 133MHz */
+ else
+ offset = 0; /* 100MHz */
+
+ if (speed >= XFER_UDMA_0)
+ idx = speed - XFER_UDMA_0;
+ else
+ return;
+
+ if (is_slave) {
+ out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
+ out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
+ out_be32(tdvhsel_port,
+ (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
+ } else {
+ out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
+ out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
+ out_be32(tdvhsel_port,
+ (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
+ }
+ out_be32(udenvt_port,
+ JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
+}
+
+/**
+ * scc_tf_load - send taskfile registers to host controller
+ * @ap: Port to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Note: Original code is ata_tf_load().
+ */
+
+static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ out_be32(ioaddr->ctl_addr, tf->ctl);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ out_be32(ioaddr->feature_addr, tf->hob_feature);
+ out_be32(ioaddr->nsect_addr, tf->hob_nsect);
+ out_be32(ioaddr->lbal_addr, tf->hob_lbal);
+ out_be32(ioaddr->lbam_addr, tf->hob_lbam);
+ out_be32(ioaddr->lbah_addr, tf->hob_lbah);
+ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ if (is_addr) {
+ out_be32(ioaddr->feature_addr, tf->feature);
+ out_be32(ioaddr->nsect_addr, tf->nsect);
+ out_be32(ioaddr->lbal_addr, tf->lbal);
+ out_be32(ioaddr->lbam_addr, tf->lbam);
+ out_be32(ioaddr->lbah_addr, tf->lbah);
+ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ out_be32(ioaddr->device_addr, tf->device);
+ VPRINTK("device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+/**
+ * scc_check_status - Read device status reg & clear interrupt
+ * @ap: port where the device is
+ *
+ * Note: Original code is ata_check_status().
+ */
+
+static u8 scc_check_status (struct ata_port *ap)
+{
+ return in_be32(ap->ioaddr.status_addr);
+}
+
+/**
+ * scc_tf_read - input device's ATA taskfile shadow registers
+ * @ap: Port from which input is read
+ * @tf: ATA taskfile register set for storing input
+ *
+ * Note: Original code is ata_tf_read().
+ */
+
+static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->command = scc_check_status(ap);
+ tf->feature = in_be32(ioaddr->error_addr);
+ tf->nsect = in_be32(ioaddr->nsect_addr);
+ tf->lbal = in_be32(ioaddr->lbal_addr);
+ tf->lbam = in_be32(ioaddr->lbam_addr);
+ tf->lbah = in_be32(ioaddr->lbah_addr);
+ tf->device = in_be32(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
+ tf->hob_feature = in_be32(ioaddr->error_addr);
+ tf->hob_nsect = in_be32(ioaddr->nsect_addr);
+ tf->hob_lbal = in_be32(ioaddr->lbal_addr);
+ tf->hob_lbam = in_be32(ioaddr->lbam_addr);
+ tf->hob_lbah = in_be32(ioaddr->lbah_addr);
+ }
+}
+
+/**
+ * scc_exec_command - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Note: Original code is ata_exec_command().
+ */
+
+static void scc_exec_command (struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+ out_be32(ap->ioaddr.command_addr, tf->command);
+ ata_pause(ap);
+}
+
+/**
+ * scc_check_altstatus - Read device alternate status reg
+ * @ap: port where the device is
+ */
+
+static u8 scc_check_altstatus (struct ata_port *ap)
+{
+ return in_be32(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ * scc_std_dev_select - Select device 0/1 on ATA bus
+ * @ap: ATA channel to manipulate
+ * @device: ATA device (numbered from zero) to select
+ *
+ * Note: Original code is ata_std_dev_select().
+ */
+
+static void scc_std_dev_select (struct ata_port *ap, unsigned int device)
+{
+ u8 tmp;
+
+ if (device == 0)
+ tmp = ATA_DEVICE_OBS;
+ else
+ tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+ out_be32(ap->ioaddr.device_addr, tmp);
+ ata_pause(ap);
+}
+
+/**
+ * scc_bmdma_setup - Set up PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_setup().
+ */
+
+static void scc_bmdma_setup (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 dmactl;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ /* load PRD table addr */
+ out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = in_be32(mmio + SCC_DMA_CMD);
+ dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+ out_be32(mmio + SCC_DMA_CMD, dmactl);
+
+ /* issue r/w command */
+ ap->ops->exec_command(ap, &qc->tf);
+}
+
+/**
+ * scc_bmdma_start - Start a PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_start().
+ */
+
+static void scc_bmdma_start (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u8 dmactl;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ /* start host DMA transaction */
+ dmactl = in_be32(mmio + SCC_DMA_CMD);
+ out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
+}
+
+/**
+ * scc_devchk - PATA device presence detection
+ * @ap: ATA channel to examine
+ * @device: Device to examine (starting at zero)
+ *
+ * Note: Original code is ata_devchk().
+ */
+
+static unsigned int scc_devchk (struct ata_port *ap,
+ unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ ap->ops->dev_select(ap, device);
+
+ out_be32(ioaddr->nsect_addr, 0x55);
+ out_be32(ioaddr->lbal_addr, 0xaa);
+
+ out_be32(ioaddr->nsect_addr, 0xaa);
+ out_be32(ioaddr->lbal_addr, 0x55);
+
+ out_be32(ioaddr->nsect_addr, 0x55);
+ out_be32(ioaddr->lbal_addr, 0xaa);
+
+ nsect = in_be32(ioaddr->nsect_addr);
+ lbal = in_be32(ioaddr->lbal_addr);
+
+ if ((nsect == 0x55) && (lbal == 0xaa))
+ return 1; /* we found a device */
+
+ return 0; /* nothing found */
+}
+
+/**
+ * scc_bus_post_reset - PATA device post reset
+ *
+ * Note: Original code is ata_bus_post_reset().
+ */
+
+static void scc_bus_post_reset (struct ata_port *ap, unsigned int devmask)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int dev0 = devmask & (1 << 0);
+ unsigned int dev1 = devmask & (1 << 1);
+ unsigned long timeout;
+
+ /* if device 0 was found in ata_devchk, wait for its
+ * BSY bit to clear
+ */
+ if (dev0)
+ ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+ /* if device 1 was found in ata_devchk, wait for
+ * register access, then wait for BSY to clear
+ */
+ timeout = jiffies + ATA_TMOUT_BOOT;
+ while (dev1) {
+ u8 nsect, lbal;
+
+ ap->ops->dev_select(ap, 1);
+ nsect = in_be32(ioaddr->nsect_addr);
+ lbal = in_be32(ioaddr->lbal_addr);
+ if ((nsect == 1) && (lbal == 1))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev1 = 0;
+ break;
+ }
+ msleep(50); /* give drive a breather */
+ }
+ if (dev1)
+ ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+ /* is all this really necessary? */
+ ap->ops->dev_select(ap, 0);
+ if (dev1)
+ ap->ops->dev_select(ap, 1);
+ if (dev0)
+ ap->ops->dev_select(ap, 0);
+}
+
+/**
+ * scc_bus_softreset - PATA device software reset
+ *
+ * Note: Original code is ata_bus_softreset().
+ */
+
+static unsigned int scc_bus_softreset (struct ata_port *ap,
+ unsigned int devmask)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+ /* software reset. causes dev0 to be selected */
+ out_be32(ioaddr->ctl_addr, ap->ctl);
+ udelay(20);
+ out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
+ udelay(20);
+ out_be32(ioaddr->ctl_addr, ap->ctl);
+
+ /* spec mandates ">= 2ms" before checking status.
+ * We wait 150ms, because that was the magic delay used for
+ * ATAPI devices in Hale Landis's ATADRVR, for the period of time
+ * between when the ATA command register is written, and then
+ * status is checked. Because waiting for "a while" before
+ * checking status is fine, post SRST, we perform this magic
+ * delay here as well.
+ *
+ * Old drivers/ide uses the 2mS rule and then waits for ready
+ */
+ msleep(150);
+
+ /* Before we perform post reset processing we want to see if
+ * the bus shows 0xFF because the odd clown forgets the D7
+ * pulldown resistor.
+ */
+ if (scc_check_status(ap) == 0xFF)
+ return 0;
+
+ scc_bus_post_reset(ap, devmask);
+
+ return 0;
+}
+
+/**
+ * scc_std_softreset - reset host port via ATA SRST
+ * @ap: port to reset
+ * @classes: resulting classes of attached devices
+ *
+ * Note: Original code is ata_std_softreset().
+ */
+
+static int scc_std_softreset (struct ata_port *ap, unsigned int *classes)
+{
+ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+ unsigned int devmask = 0, err_mask;
+ u8 err;
+
+ DPRINTK("ENTER\n");
+
+ if (ata_port_offline(ap)) {
+ classes[0] = ATA_DEV_NONE;
+ goto out;
+ }
+
+ /* determine if device 0/1 are present */
+ if (scc_devchk(ap, 0))
+ devmask |= (1 << 0);
+ if (slave_possible && scc_devchk(ap, 1))
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ ap->ops->dev_select(ap, 0);
+
+ /* issue bus reset */
+ DPRINTK("about to softreset, devmask=%x\n", devmask);
+ err_mask = scc_bus_softreset(ap, devmask);
+ if (err_mask) {
+ ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
+ err_mask);
+ return -EIO;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_dev_try_classify(ap, 0, &err);
+ if (slave_possible && err != 0x81)
+ classes[1] = ata_dev_try_classify(ap, 1, &err);
+
+ out:
+ DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+ return 0;
+}
+
+/**
+ * scc_bmdma_stop - Stop PCI IDE BMDMA transfer
+ * @qc: Command we are ending DMA for
+ */
+
+static void scc_bmdma_stop (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+ void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
+ u32 reg;
+
+ while (1) {
+ reg = in_be32(bmid_base + SCC_DMA_INTST);
+
+ if (reg & INTSTS_SERROR) {
+ printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+ continue;
+ }
+
+ if (reg & INTSTS_PRERR) {
+ u32 maea0, maec0;
+ maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
+ maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
+ printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+ continue;
+ }
+
+ if (reg & INTSTS_RERR) {
+ printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+ continue;
+ }
+
+ if (reg & INTSTS_ICERR) {
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+ printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
+ continue;
+ }
+
+ if (reg & INTSTS_BMSINT) {
+ unsigned int classes;
+ printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
+ /* TBD: SW reset */
+ scc_std_softreset(ap, &classes);
+ continue;
+ }
+
+ if (reg & INTSTS_BMHE) {
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
+ continue;
+ }
+
+ if (reg & INTSTS_ACTEINT) {
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
+ continue;
+ }
+
+ if (reg & INTSTS_IOIRQS) {
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
+ continue;
+ }
+ break;
+ }
+
+ /* clear start/stop bit */
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_altstatus(ap); /* dummy read */
+}
+
+/**
+ * scc_bmdma_status - Read PCI IDE BMDMA status
+ * @ap: Port associated with this ATA transaction.
+ */
+
+static u8 scc_bmdma_status (struct ata_port *ap)
+{
+ u8 host_stat;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ host_stat = in_be32(mmio + SCC_DMA_STATUS);
+
+ /* Workaround for PTERADD: emulate DMA_INTR when
+ * - IDE_STATUS[ERR] = 1
+ * - INT_STATUS[INTRQ] = 1
+ * - DMA_STATUS[IORACTA] = 1
+ */
+ if (!(host_stat & ATA_DMA_INTR)) {
+ u32 int_status = in_be32(mmio + SCC_DMA_INTST);
+ if (ata_altstatus(ap) & ATA_ERR &&
+ int_status & INTSTS_INTRQ &&
+ host_stat & ATA_DMA_ACTIVE)
+ host_stat |= ATA_DMA_INTR;
+ }
+
+ return host_stat;
+}
+
+/**
+ * scc_data_xfer - Transfer data by PIO
+ * @adev: device for this I/O
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @write_data: read/write
+ *
+ * Note: Original code is ata_data_xfer().
+ */
+
+static void scc_data_xfer (struct ata_device *adev, unsigned char *buf,
+ unsigned int buflen, int write_data)
+{
+ struct ata_port *ap = adev->ap;
+ unsigned int words = buflen >> 1;
+ unsigned int i;
+ u16 *buf16 = (u16 *) buf;
+ void __iomem *mmio = ap->ioaddr.data_addr;
+
+ /* Transfer multiple of 2 bytes */
+ if (write_data) {
+ for (i = 0; i < words; i++)
+ out_be32(mmio, cpu_to_le16(buf16[i]));
+ } else {
+ for (i = 0; i < words; i++)
+ buf16[i] = le16_to_cpu(in_be32(mmio));
+ }
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ u16 align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (write_data) {
+ memcpy(align_buf, trailing_buf, 1);
+ out_be32(mmio, cpu_to_le16(align_buf[0]));
+ } else {
+ align_buf[0] = le16_to_cpu(in_be32(mmio));
+ memcpy(trailing_buf, align_buf, 1);
+ }
+ }
+}
+
+/**
+ * scc_irq_on - Enable interrupts on a port.
+ * @ap: Port on which interrupts are enabled.
+ *
+ * Note: Original code is ata_irq_on().
+ */
+
+static u8 scc_irq_on (struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 tmp;
+
+ ap->ctl &= ~ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ out_be32(ioaddr->ctl_addr, ap->ctl);
+ tmp = ata_wait_idle(ap);
+
+ ap->ops->irq_clear(ap);
+
+ return tmp;
+}
+
+/**
+ * scc_irq_ack - Acknowledge a device interrupt.
+ * @ap: Port on which interrupts are enabled.
+ *
+ * Note: Original code is ata_irq_ack().
+ */
+
+static u8 scc_irq_ack (struct ata_port *ap, unsigned int chk_drq)
+{
+ unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
+ u8 host_stat, post_stat, status;
+
+ status = ata_busy_wait(ap, bits, 1000);
+ if (status & bits)
+ if (ata_msg_err(ap))
+ printk(KERN_ERR "abnormal status 0x%X\n", status);
+
+ /* get controller status; clear intr, err bits */
+ host_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
+ out_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS,
+ host_stat | ATA_DMA_INTR | ATA_DMA_ERR);
+
+ post_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
+
+ if (ata_msg_intr(ap))
+ printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
+ __FUNCTION__,
+ host_stat, post_stat, status);
+
+ return status;
+}
+
+/**
+ * scc_bmdma_freeze - Freeze BMDMA controller port
+ * @ap: port to freeze
+ *
+ * Note: Original code is ata_bmdma_freeze().
+ */
+
+static void scc_bmdma_freeze (struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ ap->ctl |= ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ out_be32(ioaddr->ctl_addr, ap->ctl);
+
+ /* Under certain circumstances, some controllers raise IRQ on
+ * ATA_NIEN manipulation. Also, many controllers fail to mask
+ * previously pending IRQ on ATA_NIEN assertion. Clear it.
+ */
+ ata_chk_status(ap);
+
+ ap->ops->irq_clear(ap);
+}
+
+/**
+ * scc_pata_prereset - prepare for reset
+ * @ap: ATA port to be reset
+ */
+
+static int scc_pata_prereset (struct ata_port *ap)
+{
+ ap->cbl = ATA_CBL_PATA80;
+ return ata_std_prereset(ap);
+}
+
+/**
+ * scc_std_postreset - standard postreset callback
+ * @ap: the target ata_port
+ * @classes: classes of attached devices
+ *
+ * Note: Original code is ata_std_postreset().
+ */
+
+static void scc_std_postreset (struct ata_port *ap, unsigned int *classes)
+{
+ DPRINTK("ENTER\n");
+
+ /* re-enable interrupts */
+ if (!ap->ops->error_handler)
+ ap->ops->irq_on(ap);
+
+ /* is double-select really necessary? */
+ if (classes[0] != ATA_DEV_NONE)
+ ap->ops->dev_select(ap, 1);
+ if (classes[1] != ATA_DEV_NONE)
+ ap->ops->dev_select(ap, 0);
+
+ /* bail out if no device is present */
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+ DPRINTK("EXIT, no device\n");
+ return;
+ }
+
+ /* set up device control */
+ if (ap->ioaddr.ctl_addr)
+ out_be32(ap->ioaddr.ctl_addr, ap->ctl);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * scc_error_handler - Stock error handler for BMDMA controller
+ * @ap: port to handle error for
+ */
+
+static void scc_error_handler (struct ata_port *ap)
+{
+ ata_bmdma_drive_eh(ap, scc_pata_prereset, scc_std_softreset, NULL,
+ scc_std_postreset);
+}
+
+/**
+ * scc_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_irq_clear().
+ */
+
+static void scc_bmdma_irq_clear (struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ if (!mmio)
+ return;
+
+ out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
+}
+
+/**
+ * scc_port_start - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Allocate space for PRD table using ata_port_start().
+ * Set PRD table address for PTERADD. (PRD Transfer End Read)
+ */
+
+static int scc_port_start (struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+ int rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma);
+ return 0;
+}
+
+/**
+ * scc_port_stop - Undo scc_port_start()
+ * @ap: Port to shut down
+ *
+ * Reset PTERADD.
+ */
+
+static void scc_port_stop (struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ out_be32(mmio + SCC_DMA_PTERADD, 0);
+}
+
+static struct scsi_host_template scc_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .ioctl = ata_scsi_ioctl,
+ .queuecommand = ata_scsi_queuecmd,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = LIBATA_MAX_PRD,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = ATA_SHT_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+ .slave_destroy = ata_scsi_slave_destroy,
+ .bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
+ .resume = ata_scsi_device_resume,
+ .suspend = ata_scsi_device_suspend,
+#endif
+};
+
+static const struct ata_port_operations scc_pata_ops = {
+ .port_disable = ata_port_disable,
+ .set_piomode = scc_set_piomode,
+ .set_dmamode = scc_set_dmamode,
+ .mode_filter = ata_pci_default_filter,
+
+ .tf_load = scc_tf_load,
+ .tf_read = scc_tf_read,
+ .exec_command = scc_exec_command,
+ .check_status = scc_check_status,
+ .check_altstatus = scc_check_altstatus,
+ .dev_select = scc_std_dev_select,
+
+ .bmdma_setup = scc_bmdma_setup,
+ .bmdma_start = scc_bmdma_start,
+ .bmdma_stop = scc_bmdma_stop,
+ .bmdma_status = scc_bmdma_status,
+ .data_xfer = scc_data_xfer,
+
+ .qc_prep = ata_qc_prep,
+ .qc_issue = ata_qc_issue_prot,
+
+ .freeze = scc_bmdma_freeze,
+ .error_handler = scc_error_handler,
+ .post_internal_cmd = scc_bmdma_stop,
+
+ .irq_handler = ata_interrupt,
+ .irq_clear = scc_bmdma_irq_clear,
+ .irq_on = scc_irq_on,
+ .irq_ack = scc_irq_ack,
+
+ .port_start = scc_port_start,
+ .port_stop = scc_port_stop,
+};
+
+static struct ata_port_info scc_port_info[] = {
+ {
+ .sht = &scc_sht,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x00,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &scc_pata_ops,
+ },
+};
+
+/**
+ * scc_reset_controller - initialize SCC PATA controller.
+ */
+
+static int scc_reset_controller(struct ata_probe_ent *probe_ent)
+{
+ void __iomem *ctrl_base = probe_ent->iomap[SCC_CTRL_BAR];
+ void __iomem *bmid_base = probe_ent->iomap[SCC_BMID_BAR];
+ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+ void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
+ void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
+ void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
+ void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
+ u32 reg = 0;
+
+ out_be32(cckctrl_port, reg);
+ reg |= CCKCTRL_ATACLKOEN;
+ out_be32(cckctrl_port, reg);
+ reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
+ out_be32(cckctrl_port, reg);
+ reg |= CCKCTRL_CRST;
+ out_be32(cckctrl_port, reg);
+
+ for (;;) {
+ reg = in_be32(cckctrl_port);
+ if (reg & CCKCTRL_CRST)
+ break;
+ udelay(5000);
+ }
+
+ reg |= CCKCTRL_ATARESET;
+ out_be32(cckctrl_port, reg);
+ out_be32(ecmode_port, ECMODE_VALUE);
+ out_be32(mode_port, MODE_JCUSFEN);
+ out_be32(intmask_port, INTMASK_MSK);
+
+ if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
+ printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * scc_setup_ports - initialize ioaddr with SCC PATA port offsets.
+ * @ioaddr: IO address structure to be initialized
+ * @base: base address of BMID region
+ */
+
+static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
+{
+ ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
+ ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
+ ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
+ ioaddr->bmdma_addr = base;
+ ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
+ ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
+ ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
+ ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
+ ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
+ ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
+ ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
+ ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
+ ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
+ ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
+}
+
+static int scc_host_init(struct ata_probe_ent *probe_ent)
+{
+ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
+ int rc;
+
+ rc = scc_reset_controller(probe_ent);
+ if (rc)
+ return rc;
+
+ probe_ent->n_ports = 1;
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ scc_setup_ports(&probe_ent->port[0], probe_ent->iomap[SCC_BMID_BAR]);
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+/**
+ * scc_init_one - Register SCC PATA device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in scc_pci_tbl matching with @pdev
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+ struct device *dev = &pdev->dev;
+ struct ata_probe_ent *probe_ent;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+
+ probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
+ if (!probe_ent)
+ return -ENOMEM;
+
+ probe_ent->dev = dev;
+ INIT_LIST_HEAD(&probe_ent->node);
+
+ probe_ent->sht = scc_port_info[board_idx].sht;
+ probe_ent->port_flags = scc_port_info[board_idx].flags;
+ probe_ent->pio_mask = scc_port_info[board_idx].pio_mask;
+ probe_ent->udma_mask = scc_port_info[board_idx].udma_mask;
+ probe_ent->port_ops = scc_port_info[board_idx].port_ops;
+
+ probe_ent->irq = pdev->irq;
+ probe_ent->irq_flags = IRQF_SHARED;
+ probe_ent->iomap = pcim_iomap_table(pdev);
+
+ rc = scc_host_init(probe_ent);
+ if (rc)
+ return rc;
+
+ if (!ata_device_add(probe_ent))
+ return -ENODEV;
+
+ devm_kfree(dev, probe_ent);
+ return 0;
+}
+
+static struct pci_driver scc_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = scc_pci_tbl,
+ .probe = scc_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init scc_init (void)
+{
+ int rc;
+
+ DPRINTK("pci_register_driver\n");
+ rc = pci_register_driver(&scc_pci_driver);
+ if (rc)
+ return rc;
+
+ DPRINTK("done\n");
+ return 0;
+}
+
+static void __exit scc_exit (void)
+{
+ pci_unregister_driver(&scc_pci_driver);
+}
+
+module_init(scc_init);
+module_exit(scc_exit);
+
+MODULE_AUTHOR("Toshiba corp");
+MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
#include <linux/libata.h>
#define DRV_NAME "pata_serverworks"
-#define DRV_VERSION "0.3.9"
+#define DRV_VERSION "0.4.0"
#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations serverworks_osb4_port_ops = {
return ata_pci_init_one(pdev, port_info, ports);
}
+#ifdef CONFIG_PM
static int serverworks_reinit_one(struct pci_dev *pdev)
{
/* Force master latency timer to 64 PCI clocks */
}
return ata_pci_device_resume(pdev);
}
+#endif
static const struct pci_device_id serverworks[] = {
{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
.id_table = serverworks,
.probe = serverworks_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = serverworks_reinit_one,
+#endif
};
static int __init serverworks_init(void)
#include <linux/libata.h>
#define DRV_NAME "pata_sil680"
-#define DRV_VERSION "0.4.1"
+#define DRV_VERSION "0.4.5"
/**
* sil680_selreg - return register base
unsigned long tfaddr = sil680_selreg(ap, 0x02);
unsigned long addr = sil680_seldev(ap, adev, 0x04);
+ unsigned long addr_mask = 0x80 + 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio = adev->pio_mode - XFER_PIO_0;
int lowest_pio = pio;
+ int port_shift = 4 * adev->devno;
u16 reg;
+ u8 mode;
struct ata_device *pair = ata_dev_pair(adev);
pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
pci_read_config_word(pdev, tfaddr-2, ®);
+ pci_read_config_byte(pdev, addr_mask, &mode);
+
reg &= ~0x0200; /* Clear IORDY */
- if (ata_pio_need_iordy(adev))
+ mode &= ~(3 << port_shift); /* Clear IORDY and DMA bits */
+
+ if (ata_pio_need_iordy(adev)) {
reg |= 0x0200; /* Enable IORDY */
+ mode |= 1 << port_shift;
+ }
pci_write_config_word(pdev, tfaddr-2, reg);
+ pci_write_config_byte(pdev, addr_mask, mode);
}
/**
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
+ .suspend = ata_scsi_device_suspend,
+ .resume = ata_scsi_device_resume,
+#endif
};
static struct ata_port_operations sil680_port_ops = {
return ata_pci_init_one(pdev, port_info, 2);
}
+#ifdef CONFIG_PM
static int sil680_reinit_one(struct pci_dev *pdev)
{
sil680_init_chip(pdev);
return ata_pci_device_resume(pdev);
}
+#endif
static const struct pci_device_id sil680[] = {
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
.id_table = sil680,
.probe = sil680_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = sil680_reinit_one,
+#endif
};
static int __init sil680_init(void)
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
-#include "libata.h"
+#include "sis.h"
-#undef DRV_NAME /* already defined in libata.h, for libata-core */
#define DRV_NAME "pata_sis"
-#define DRV_VERSION "0.4.5"
+#define DRV_VERSION "0.5.0"
struct sis_chipset {
u16 device; /* PCI host ID */
if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
ata_port_disable(ap);
- printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
+ ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
return 0;
}
/* Older chips keep cable detect in bits 4/5 of reg 0x48 */
if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) {
ata_port_disable(ap);
- printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
+ ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
return 0;
}
ap->cbl = ATA_CBL_PATA40;
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static const struct ata_port_operations sis_133_ops = {
.id_table = sis_pci_tbl,
.probe = sis_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init sis_init(void)
* SL82C105/Winbond 553 IDE driver
*
* and in part on the documentation and errata sheet
+ *
+ *
+ * Note: The controller like many controllers has shared timings for
+ * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
+ * in the dma_stop function. Thus we actually don't need a set_dmamode
+ * method as the PIO method is always called and will set the right PIO
+ * timing parameters.
*/
#include <linux/kernel.h>
#include <linux/libata.h>
#define DRV_NAME "pata_sl82c105"
-#define DRV_VERSION "0.2.3"
+#define DRV_VERSION "0.3.0"
enum {
/*
pci_read_config_word(pdev, timing, &dummy);
}
-/**
- * sl82c105_set_dmamode - set initial DMA mode data
- * @ap: ATA interface
- * @adev: ATA device
- *
- * Called to do the DMA mode setup. This replaces the PIO timings
- * for the device in question. Set appropriate PIO timings not DMA
- * timings at this point.
- */
-
-static void sl82c105_set_dmamode(struct ata_port *ap, struct ata_device *adev)
-{
- switch(adev->dma_mode) {
- case XFER_MW_DMA_0:
- sl82c105_configure_piomode(ap, adev, 0);
- break;
- case XFER_MW_DMA_1:
- sl82c105_configure_piomode(ap, adev, 3);
- break;
- case XFER_MW_DMA_2:
- sl82c105_configure_piomode(ap, adev, 4);
- break;
- default:
- BUG();
- }
-}
-
/**
* sl82c105_reset_engine - Reset the DMA engine
* @ap: ATA interface
/* This will redo the initial setup of the DMA device to matching
PIO timings */
- sl82c105_set_dmamode(ap, qc->dev);
+ sl82c105_set_piomode(ap, qc->dev);
}
static struct scsi_host_template sl82c105_sht = {
static struct ata_port_operations sl82c105_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = sl82c105_set_piomode,
- .set_dmamode = sl82c105_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
.error_handler = sl82c105_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = sl82c105_bmdma_start,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations triflex_port_ops = {
.id_table = triflex,
.probe = triflex_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
+#endif
};
static int __init triflex_init(void)
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA_UNK;
-
+
return ata_std_prereset(ap);
}
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
+#endif
};
static struct ata_port_operations via_port_ops = {
return ata_pci_init_one(pdev, port_info, 2);
}
+#ifdef CONFIG_PM
/**
* via_reinit_one - reinit after resume
* @pdev; PCI device
}
return ata_pci_device_resume(pdev);
}
+#endif
static const struct pci_device_id via[] = {
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), },
.id_table = via,
.probe = via_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = via_reinit_one,
+#endif
};
static int __init via_init(void)
#include <linux/platform_device.h>
#define DRV_NAME "pata_winbond"
-#define DRV_VERSION "0.0.1"
+#define DRV_VERSION "0.0.2"
#define NR_HOST 4 /* Two winbond controllers, two channels each */
#include <linux/libata.h>
#define DRV_NAME "pdc_adma"
-#define DRV_VERSION "0.04"
+#define DRV_VERSION "0.05"
/* macro to calculate base address for ATA regs */
#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
if ((status & ATA_BUSY))
continue;
DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
- ap->id, qc->tf.protocol, status);
+ ap->print_id, qc->tf.protocol, status);
/* complete taskfile transaction */
pp->state = adma_state_idle;
.slave_configure = inic_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.suspend = ata_scsi_device_suspend,
.resume = ata_scsi_device_resume,
+#endif
};
static const int scr_map[] = {
return 0;
}
+#ifdef CONFIG_PM
static int inic_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
ata_pci_device_do_resume(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- printk("XXX\n");
rc = init_controller(mmio_base, hpriv->cached_hctl);
if (rc)
return rc;
return 0;
}
+#endif
static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static struct pci_driver inic_pci_driver = {
.name = DRV_NAME,
.id_table = inic_pci_tbl,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = inic_pci_device_resume,
+#endif
.probe = inic_init_one,
.remove = ata_pci_remove_one,
};
#include <linux/libata.h>
#define DRV_NAME "sata_mv"
-#define DRV_VERSION "0.7"
+#define DRV_VERSION "0.8"
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
PCI_ERR = (1 << 18),
TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
+ PORTS_0_3_COAL_DONE = (1 << 8),
+ PORTS_4_7_COAL_DONE = (1 << 17),
PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
GPIO_INT = (1 << 22),
SELF_INT = (1 << 23),
TWSI_INT = (1 << 24),
HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
+ HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
HC_MAIN_RSVD),
+ HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
+ HC_MAIN_RSVD_5),
/* SATAHC registers */
HC_CFG_OFS = 0,
u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
/* set up non-NCQ EDMA configuration */
- cfg &= ~0x1f; /* clear queue depth */
- cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
cfg &= ~(1 << 9); /* disable equeue */
- if (IS_GEN_I(hpriv))
+ if (IS_GEN_I(hpriv)) {
+ cfg &= ~0x1f; /* clear queue depth */
cfg |= (1 << 8); /* enab config burst size mask */
+ }
- else if (IS_GEN_II(hpriv))
+ else if (IS_GEN_II(hpriv)) {
+ cfg &= ~0x1f; /* clear queue depth */
cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
+ cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
+ }
else if (IS_GEN_IIE(hpriv)) {
- cfg |= (1 << 23); /* dis RX PM port mask */
- cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
+ cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
+ cfg |= (1 << 22); /* enab 4-entry host queue cache */
cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
cfg |= (1 << 18); /* enab early completion */
- cfg |= (1 << 17); /* enab host q cache */
- cfg |= (1 << 22); /* enab cutthrough */
+ cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
+ cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
+ cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
}
writelfl(cfg, port_mmio + EDMA_CFG_OFS);
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
}
DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
- "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
+ "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
/* Clear EDMA now that SERR cleanup done */
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
/* unused: */
- port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
+ port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
/* Clear any currently outstanding port interrupt conditions */
serr_ofs = mv_scr_offset(SCR_ERROR);
/* and unmask interrupt generation for host regs */
writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
- writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
+
+ if (IS_50XX(hpriv))
+ writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
+ else
+ writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
"PCI int cause/mask=0x%08x/0x%08x\n",
return rc;
/* Enable interrupts */
- if (msi && !pci_enable_msi(pdev))
+ if (msi && pci_enable_msi(pdev))
pci_intx(pdev, 1);
mv_dump_pci_cfg(pdev, 0x68);
void __iomem * gen_block;
void __iomem * notifier_clear_block;
u8 flags;
+ int last_issue_ncq;
};
struct nv_host_priv {
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static void nv_remove_one (struct pci_dev *pdev);
+#ifdef CONFIG_PM
static int nv_pci_device_resume(struct pci_dev *pdev);
+#endif
static void nv_ck804_host_stop(struct ata_host *host);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
static void nv_adma_irq_clear(struct ata_port *ap);
static int nv_adma_port_start(struct ata_port *ap);
static void nv_adma_port_stop(struct ata_port *ap);
+#ifdef CONFIG_PM
static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int nv_adma_port_resume(struct ata_port *ap);
+#endif
static void nv_adma_error_handler(struct ata_port *ap);
static void nv_adma_host_stop(struct ata_host *host);
-static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
-static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
-static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
-static u8 nv_adma_bmdma_status(struct ata_port *ap);
+static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
enum nv_host_type
{
.name = DRV_NAME,
.id_table = nv_pci_tbl,
.probe = nv_init_one,
+#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = nv_pci_device_resume,
+#endif
.remove = nv_remove_one,
};
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.suspend = ata_scsi_device_suspend,
.resume = ata_scsi_device_resume,
+#endif
};
static struct scsi_host_template nv_adma_sht = {
.slave_configure = nv_adma_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.suspend = ata_scsi_device_suspend,
.resume = ata_scsi_device_resume,
+#endif
};
static const struct ata_port_operations nv_generic_ops = {
.exec_command = ata_exec_command,
.check_status = ata_check_status,
.dev_select = ata_std_dev_select,
- .bmdma_setup = nv_adma_bmdma_setup,
- .bmdma_start = nv_adma_bmdma_start,
- .bmdma_stop = nv_adma_bmdma_stop,
- .bmdma_status = nv_adma_bmdma_status,
+ .bmdma_setup = ata_bmdma_setup,
+ .bmdma_start = ata_bmdma_start,
+ .bmdma_stop = ata_bmdma_stop,
+ .bmdma_status = ata_bmdma_status,
.qc_prep = nv_adma_qc_prep,
.qc_issue = nv_adma_qc_issue,
.freeze = nv_ck804_freeze,
.thaw = nv_ck804_thaw,
.error_handler = nv_adma_error_handler,
- .post_internal_cmd = nv_adma_bmdma_stop,
+ .post_internal_cmd = nv_adma_post_internal_cmd,
.data_xfer = ata_data_xfer,
.irq_handler = nv_adma_interrupt,
.irq_clear = nv_adma_irq_clear,
.scr_write = nv_scr_write,
.port_start = nv_adma_port_start,
.port_stop = nv_adma_port_stop,
+#ifdef CONFIG_PM
.port_suspend = nv_adma_port_suspend,
.port_resume = nv_adma_port_resume,
+#endif
.host_stop = nv_adma_host_stop,
};
{
unsigned int idx = 0;
- cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
-
- if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
- cpb[idx++] = cpu_to_le16(IGN);
- cpb[idx++] = cpu_to_le16(IGN);
- cpb[idx++] = cpu_to_le16(IGN);
- cpb[idx++] = cpu_to_le16(IGN);
- cpb[idx++] = cpu_to_le16(IGN);
- }
- else {
- cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
- cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
- cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
- cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
- cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
+ if(tf->flags & ATA_TFLAG_ISADDR) {
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
+ cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
+ cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
+ } else
+ cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
+
+ cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
}
- cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
- cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
- cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
- cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
- cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
+
+ if(tf->flags & ATA_TFLAG_DEVICE)
+ cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
+ while(idx < 12)
+ cpb[idx++] = cpu_to_le16(IGN);
+
return idx;
}
DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
qc->err_mask);
ata_qc_complete(qc);
+ } else {
+ struct ata_eh_info *ehi = &ap->eh_info;
+ /* Notifier bits set without a command may indicate the drive
+ is misbehaving. Raise host state machine violation on this
+ condition. */
+ ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
+ cpb_num);
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_SOFTRESET;
+ ata_port_freeze(ap);
+ return 1;
}
}
return 0;
if (status & (NV_ADMA_STAT_DONE |
NV_ADMA_STAT_CPBERR)) {
+ u32 check_commands = notifier | notifier_error;
+ int pos, error = 0;
/** Check CPBs for completed commands */
-
- if (ata_tag_valid(ap->active_tag)) {
- /* Non-NCQ command */
- nv_adma_check_cpb(ap, ap->active_tag,
- notifier_error & (1 << ap->active_tag));
- } else {
- int pos, error = 0;
- u32 active = ap->sactive;
-
- while ((pos = ffs(active)) && !error) {
- pos--;
- error = nv_adma_check_cpb(ap, pos,
- notifier_error & (1 << pos) );
- active &= ~(1 << pos );
- }
+ while ((pos = ffs(check_commands)) && !error) {
+ pos--;
+ error = nv_adma_check_cpb(ap, pos,
+ notifier_error & (1 << pos) );
+ check_commands &= ~(1 << pos );
}
}
}
iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
}
-static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
- struct nv_adma_port_priv *pp = ap->private_data;
- u8 dmactl;
-
- if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
- WARN_ON(1);
- return;
- }
-
- /* load PRD table addr. */
- iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
-
- /* specify data direction, triple-check start bit is clear */
- dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
- dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
- if (!rw)
- dmactl |= ATA_DMA_WR;
-
- iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
- /* issue r/w command */
- ata_exec_command(ap, &qc->tf);
-}
-
-static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct nv_adma_port_priv *pp = ap->private_data;
- u8 dmactl;
-
- if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
- WARN_ON(1);
- return;
- }
-
- /* start host DMA transaction */
- dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
- iowrite8(dmactl | ATA_DMA_START,
- ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-}
-
-static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct nv_adma_port_priv *pp = ap->private_data;
-
- if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
- return;
-
- /* clear start/stop bit */
- iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
- ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_altstatus(ap); /* dummy read */
-}
-
-static u8 nv_adma_bmdma_status(struct ata_port *ap)
+static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
{
- struct nv_adma_port_priv *pp = ap->private_data;
-
- WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
+ struct nv_adma_port_priv *pp = qc->ap->private_data;
- return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+ ata_bmdma_post_internal_cmd(qc);
}
static int nv_adma_port_start(struct ata_port *ap)
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
- writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
+ writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+ NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readl( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw( mmio + NV_ADMA_CTL ); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readl( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw( mmio + NV_ADMA_CTL ); /* flush posted write */
return 0;
}
writew(0, mmio + NV_ADMA_CTL);
}
+#ifdef CONFIG_PM
static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
struct nv_adma_port_priv *pp = ap->private_data;
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
- writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
+ writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+ NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readl( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw( mmio + NV_ADMA_CTL ); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readl( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw( mmio + NV_ADMA_CTL ); /* flush posted write */
return 0;
}
+#endif
static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
{
int idx,
struct nv_adma_prd *aprd)
{
- u8 flags;
-
- memset(aprd, 0, sizeof(struct nv_adma_prd));
-
- flags = 0;
+ u8 flags = 0;
if (qc->tf.flags & ATA_TFLAG_WRITE)
flags |= NV_APRD_WRITE;
if (idx == qc->n_elem - 1)
aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
aprd->flags = flags;
+ aprd->packet_len = 0;
}
static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
}
if (idx > 5)
cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
+ else
+ cpb->next_aprd = cpu_to_le64(0);
}
static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
return;
}
- memset(cpb, 0, sizeof(struct nv_adma_cpb));
+ cpb->resp_flags = NV_CPB_RESP_DONE;
+ wmb();
+ cpb->ctl_flags = 0;
+ wmb();
cpb->len = 3;
cpb->tag = qc->tag;
finished filling in all of the contents */
wmb();
cpb->ctl_flags = ctl_flags;
+ wmb();
+ cpb->resp_flags = 0;
}
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
void __iomem *mmio = pp->ctl_block;
+ int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
VPRINTK("ENTER\n");
/* write append register, command tag in lower 8 bits
and (number of cpbs to append -1) in top 8 bits */
wmb();
+
+ if(curr_ncq != pp->last_issue_ncq) {
+ /* Seems to need some delay before switching between NCQ and non-NCQ
+ commands, else we get command timeouts and such. */
+ udelay(20);
+ pp->last_issue_ncq = curr_ncq;
+ }
+
writew(qc->tag, mmio + NV_ADMA_APPEND);
DPRINTK("Issued tag %u\n",qc->tag);
int i;
u16 tmp;
+ if(ata_tag_valid(ap->active_tag) || ap->sactive) {
+ u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
+ u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+ u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+ u32 status = readw(mmio + NV_ADMA_STAT);
+ u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
+ u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
+
+ ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
+ "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
+ "next cpb count 0x%X next cpb idx 0x%x\n",
+ notifier, notifier_error, gen_ctl, status,
+ cpb_count, next_cpb_idx);
+
+ for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
+ struct nv_adma_cpb *cpb = &pp->cpb[i];
+ if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
+ ap->sactive & (1 << i) )
+ ata_port_printk(ap, KERN_ERR,
+ "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
+ i, cpb->ctl_flags, cpb->resp_flags);
+ }
+ }
+
/* Push us back into port register mode for error handling. */
nv_adma_register_mode(ap);
/* Reset channel */
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readl( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw( mmio + NV_ADMA_CTL ); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readl( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw( mmio + NV_ADMA_CTL ); /* flush posted write */
}
ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
kfree(hpriv);
}
+#ifdef CONFIG_PM
static int nv_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
return 0;
}
+#endif
static void nv_ck804_host_stop(struct ata_host *host)
{
#include "sata_promise.h"
#define DRV_NAME "sata_promise"
-#define DRV_VERSION "1.05"
+#define DRV_VERSION "2.00"
enum {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.error_handler = pdc_error_handler,
+ .post_internal_cmd = pdc_post_internal_cmd,
.data_xfer = ata_data_xfer,
.irq_handler = pdc_interrupt,
.irq_clear = pdc_irq_clear,
return pdc_check_atapi_dma(qc);
}
-static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
+static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base,
+ void __iomem *scr_addr)
{
port->cmd_addr = base;
port->data_addr = base;
port->status_addr = base + 0x1c;
port->altstatus_addr =
port->ctl_addr = base + 0x38;
+ port->scr_addr = scr_addr;
}
base = probe_ent->iomap[PDC_MMIO_BAR];
- pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
- pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
-
- probe_ent->port[0].scr_addr = base + 0x400;
- probe_ent->port[1].scr_addr = base + 0x500;
+ pdc_ata_setup_port(&probe_ent->port[0], base + 0x200, base + 0x400);
+ pdc_ata_setup_port(&probe_ent->port[1], base + 0x280, base + 0x500);
/* notice 4-port boards */
switch (board_idx) {
/* Fall through */
case board_20319:
probe_ent->n_ports = 4;
-
- pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
- pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
-
- probe_ent->port[2].scr_addr = base + 0x600;
- probe_ent->port[3].scr_addr = base + 0x700;
+ pdc_ata_setup_port(&probe_ent->port[2], base + 0x300, base + 0x600);
+ pdc_ata_setup_port(&probe_ent->port[3], base + 0x380, base + 0x700);
break;
case board_2057x:
hp->flags |= PDC_FLAG_GEN_II;
tmp = readb(base + PDC_FLASH_CTL+1);
if (!(tmp & 0x80)) {
probe_ent->n_ports = 3;
- pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
+ pdc_ata_setup_port(&probe_ent->port[2], base + 0x300, NULL);
hp->port_flags[2] = ATA_FLAG_SLAVE_POSS;
printk(KERN_INFO DRV_NAME " PATA port found\n");
} else
break;
case board_20619:
probe_ent->n_ports = 4;
-
- pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
- pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
-
- probe_ent->port[2].scr_addr = base + 0x600;
- probe_ent->port[3].scr_addr = base + 0x700;
+ pdc_ata_setup_port(&probe_ent->port[2], base + 0x300, NULL);
+ pdc_ata_setup_port(&probe_ent->port[3], base + 0x380, NULL);
break;
default:
BUG();
#include <linux/libata.h>
#define DRV_NAME "sata_qstor"
-#define DRV_VERSION "0.06"
+#define DRV_VERSION "0.07"
enum {
QS_MMIO_BAR = 4,
if ((status & ATA_BUSY))
continue;
DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
- ap->id, qc->tf.protocol, status);
+ ap->print_id, qc->tf.protocol, status);
/* complete taskfile transaction */
pp->state = qs_state_idle;
#include <linux/libata.h>
#define DRV_NAME "sata_sil"
-#define DRV_VERSION "2.0"
+#define DRV_VERSION "2.1"
enum {
SIL_MMIO_BAR = 5,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.suspend = ata_scsi_device_suspend,
.resume = ata_scsi_device_resume,
+#endif
};
static const struct ata_port_operations sil_ops = {
break;
}
- return 0;
+ return NULL;
}
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
goto freeze;
}
- if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
+ if (unlikely(!qc))
goto freeze;
+ if (unlikely(qc->tf.flags & ATA_TFLAG_POLLING)) {
+ /* this sometimes happens, just clear IRQ */
+ ata_chk_status(ap);
+ return;
+ }
+
/* Check whether we are expecting interrupt in this state */
switch (ap->hsm_task_state) {
case HSM_ST_FIRST:
#include <linux/libata.h>
#define DRV_NAME "sata_sil24"
-#define DRV_VERSION "0.3"
+#define DRV_VERSION "0.8"
/*
* Port request block (PRB) 32 bytes
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
.suspend = ata_scsi_device_suspend,
.resume = ata_scsi_device_resume,
+#endif
};
static const struct ata_port_operations sil24_ops = {
struct sil24_sge *sge)
{
struct scatterlist *sg;
- unsigned int idx = 0;
ata_for_each_sg(sg, qc) {
sge->addr = cpu_to_le64(sg_dma_address(sg));
sge->flags = cpu_to_le32(SGE_TRM);
else
sge->flags = 0;
-
sge++;
- idx++;
}
}
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
-#include "libata.h"
+#include "sis.h"
-#undef DRV_NAME /* already defined in libata.h, for libata-core */
#define DRV_NAME "sata_sis"
#define DRV_VERSION "0.7"
case 0x10:
ppi[1] = &sis_info133;
break;
-
+
case 0x30:
ppi[0] = &sis_info133;
break;
#endif /* CONFIG_PPC_OF */
#define DRV_NAME "sata_svw"
-#define DRV_VERSION "2.0"
+#define DRV_VERSION "2.1"
enum {
K2_FLAG_NO_ATAPI_DMA = (1 << 29),
#include "sata_promise.h"
#define DRV_NAME "sata_sx4"
-#define DRV_VERSION "0.9"
+#define DRV_VERSION "0.10"
enum {
WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
- VPRINTK("ata%u: ENTER\n", ap->id);
+ VPRINTK("ata%u: ENTER\n", ap->print_id);
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
unsigned int portno = ap->port_no;
unsigned int i;
- VPRINTK("ata%u: ENTER\n", ap->id);
+ VPRINTK("ata%u: ENTER\n", ap->print_id);
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
- VPRINTK("ata%u: ENTER\n", ap->id);
+ VPRINTK("ata%u: ENTER\n", ap->print_id);
wmb(); /* flush PRD, pkt writes */
/* step two - DMA from DIMM to host */
if (doing_hdma) {
- VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
+ VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* get drive status; clear intr; complete txn */
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
/* step one - exec ATA command */
else {
u8 seq = (u8) (port_no + 1 + 4);
- VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id,
+ VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* submit hdma pkt */
/* step one - DMA from host to DIMM */
if (doing_hdma) {
u8 seq = (u8) (port_no + 1);
- VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id,
+ VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* submit ata pkt */
/* step two - execute ATA command */
else {
- VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
+ VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* get drive status; clear intr; complete txn */
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
#include <linux/libata.h>
#define DRV_NAME "sata_uli"
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.1"
enum {
uli_5289 = 0,
#include <linux/libata.h>
#define DRV_NAME "sata_via"
-#define DRV_VERSION "2.0"
+#define DRV_VERSION "2.1"
enum board_ids_enum {
vt6420,
SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
PATA_PIO_TIMING = 0xAB, /* PATA timing register */
-
+
PORT0 = (1 << 1),
PORT1 = (1 << 0),
ALL_PORTS = PORT0 | PORT1,
static const struct ata_port_operations vt6421_pata_ops = {
.port_disable = ata_port_disable,
-
+
.set_piomode = vt6421_set_pio_mode,
.set_dmamode = vt6421_set_dma_mode,
static const struct ata_port_operations vt6421_sata_ops = {
.port_disable = ata_port_disable,
-
+
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
{
struct ata_probe_ent *probe_ent;
struct ata_port_info *ppi[2];
- void __iomem * const *iomap;
+ void __iomem *bar5;
ppi[0] = ppi[1] = &vt6420_port_info;
probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
if (!probe_ent)
return NULL;
- iomap = pcim_iomap_table(pdev);
- probe_ent->port[0].scr_addr = svia_scr_addr(iomap[5], 0);
- probe_ent->port[1].scr_addr = svia_scr_addr(iomap[5], 1);
+ bar5 = pcim_iomap(pdev, 5, 0);
+ if (!bar5) {
+ dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
+ return NULL;
+ }
+
+ probe_ent->port[0].scr_addr = svia_scr_addr(bar5, 0);
+ probe_ent->port[1].scr_addr = svia_scr_addr(bar5, 1);
return probe_ent;
}
probe_ent->mwdma_mask = 0x07;
probe_ent->udma_mask = 0x7f;
+ for (i = 0; i < 6; i++)
+ if (!pcim_iomap(pdev, i, 0)) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to iomap PCI BAR %d\n", i);
+ return NULL;
+ }
+
for (i = 0; i < N_PORTS; i++)
vt6421_init_addrs(probe_ent, pcim_iomap_table(pdev), i);
if (rc)
return rc;
- rc = pcim_iomap_regions(pdev, 0x1f, DRV_NAME);
+ rc = pci_request_regions(pdev, DRV_NAME);
if (rc) {
pcim_pin_device(pdev);
return rc;
#include <linux/libata.h>
#define DRV_NAME "sata_vsc"
-#define DRV_VERSION "2.0"
+#define DRV_VERSION "2.1"
enum {
VSC_MMIO_BAR = 0,
VSC_SATA_INT_PHY_CHANGE),
};
-#define is_vsc_sata_int_err(port_idx, int_status) \
- (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
-
-
static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
if (sc_reg > SCR_CONTROL)
}
+static void vsc_freeze(struct ata_port *ap)
+{
+ void __iomem *mask_addr;
+
+ mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+ VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+
+ writeb(0, mask_addr);
+}
+
+
+static void vsc_thaw(struct ata_port *ap)
+{
+ void __iomem *mask_addr;
+
+ mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+ VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+
+ writeb(0xff, mask_addr);
+}
+
+
static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
{
void __iomem *mask_addr;
}
}
+static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
+{
+ if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M))
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+}
+
+static void vsc_port_intr(u8 port_status, struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ int handled = 0;
+
+ if (unlikely(port_status & VSC_SATA_INT_ERROR)) {
+ vsc_error_intr(port_status, ap);
+ return;
+ }
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
+ handled = ata_host_intr(ap, qc);
+
+ /* We received an interrupt during a polled command,
+ * or some other spurious condition. Interrupt reporting
+ * with this hardware is fairly reliable so it is safe to
+ * simply clear the interrupt
+ */
+ if (unlikely(!handled))
+ ata_chk_status(ap);
+}
/*
* vsc_sata_interrupt
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
- u32 int_status;
-
- spin_lock(&host->lock);
+ u32 status;
- int_status = readl(host->iomap[VSC_MMIO_BAR] +
- VSC_SATA_INT_STAT_OFFSET);
+ status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET);
- for (i = 0; i < host->n_ports; i++) {
- if (int_status & ((u32) 0xFF << (8 * i))) {
- struct ata_port *ap;
+ if (unlikely(status == 0xffffffff || status == 0)) {
+ if (status)
+ dev_printk(KERN_ERR, host->dev,
+ ": IRQ status == 0xffffffff, "
+ "PCI fault or device removal?\n");
+ goto out;
+ }
- ap = host->ports[i];
+ spin_lock(&host->lock);
- if (is_vsc_sata_int_err(i, int_status)) {
- u32 err_status;
- printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
- err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
- vsc_sata_scr_write(ap, SCR_ERROR, err_status);
- handled++;
- }
+ for (i = 0; i < host->n_ports; i++) {
+ u8 port_status = (status >> (8 * i)) & 0xff;
+ if (port_status) {
+ struct ata_port *ap = host->ports[i];
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
-
- qc = ata_qc_from_tag(ap, ap->active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
- handled += ata_host_intr(ap, qc);
- else if (is_vsc_sata_int_err(i, int_status)) {
- /*
- * On some chips (i.e. Intel 31244), an error
- * interrupt will sneak in at initialization
- * time (phy state changes). Clearing the SCR
- * error register is not required, but it prevents
- * the phy state change interrupts from recurring
- * later.
- */
- u32 err_status;
- err_status = vsc_sata_scr_read(ap, SCR_ERROR);
- printk(KERN_DEBUG "%s: clearing interrupt, "
- "status %x; sata err status %x\n",
- __FUNCTION__,
- int_status, err_status);
- vsc_sata_scr_write(ap, SCR_ERROR, err_status);
- /* Clear interrupt status */
- ata_chk_status(ap);
- handled++;
- }
- }
+ vsc_port_intr(port_status, ap);
+ handled++;
+ } else
+ dev_printk(KERN_ERR, host->dev,
+ ": interrupt from disabled port %d\n", i);
}
}
spin_unlock(&host->lock);
-
+out:
return IRQ_RETVAL(handled);
}
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_data_xfer,
- .freeze = ata_bmdma_freeze,
- .thaw = ata_bmdma_thaw,
+ .freeze = vsc_freeze,
+ .thaw = vsc_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = vsc_sata_interrupt,
--- /dev/null
+
+struct ata_port_info;
+
+/* pata_sis.c */
+extern struct ata_port_info sis_info133;
setups function - apparently needed by the rd_load_image routine
that supposes the filesystem in the image uses a 1024 blocksize.
-config BLK_DEV_INITRD
- bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
- depends on BROKEN || !FRV
- help
- The initial RAM filesystem is a ramfs which is loaded by the
- boot loader (loadlin or lilo) and that is mounted as root
- before the normal boot procedure. It is typically used to
- load modules needed to mount the "real" root file system,
- etc. See <file:Documentation/initrd.txt> for details.
-
- If RAM disk support (BLK_DEV_RAM) is also included, this
- also enables initial RAM disk (initrd) support and adds
- 15 Kbytes (more on some other architectures) to the kernel size.
-
- If unsure say Y.
-
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media"
depends on !UML
u16 aoemajor;
hin = (struct aoe_hdr *) skb->mac.raw;
- aoemajor = be16_to_cpu(hin->major);
+ aoemajor = be16_to_cpu(get_unaligned(&hin->major));
d = aoedev_by_aoeaddr(aoemajor, hin->minor);
if (d == NULL) {
snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
spin_lock_irqsave(&d->lock, flags);
- n = be32_to_cpu(hin->tag);
+ n = be32_to_cpu(get_unaligned(&hin->tag));
f = getframe(d, n);
if (f == NULL) {
calc_rttavg(d, -tsince(n));
snprintf(ebuf, sizeof ebuf,
"%15s e%d.%d tag=%08x@%08lx\n",
"unexpected rsp",
- be16_to_cpu(hin->major),
+ be16_to_cpu(get_unaligned(&hin->major)),
hin->minor,
- be32_to_cpu(hin->tag),
+ be32_to_cpu(get_unaligned(&hin->tag)),
jiffies);
aoechr_error(ebuf);
return;
printk(KERN_INFO
"aoe: unrecognized ata command %2.2Xh for %d.%d\n",
ahout->cmdstat,
- be16_to_cpu(hin->major),
+ be16_to_cpu(get_unaligned(&hin->major)),
hin->minor);
}
}
* Enough people have their dip switches set backwards to
* warrant a loud message for this special case.
*/
- aoemajor = be16_to_cpu(h->major);
+ aoemajor = be16_to_cpu(get_unaligned(&h->major));
if (aoemajor == 0xfff) {
printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
"Check shelf dip switches.\n");
#include <linux/blkdev.h>
#include <linux/netdevice.h>
#include <linux/moduleparam.h>
+#include <asm/unaligned.h>
#include "aoe.h"
#define NECODES 5
skb_push(skb, ETH_HLEN); /* (1) */
h = (struct aoe_hdr *) skb->mac.raw;
- n = be32_to_cpu(h->tag);
+ n = be32_to_cpu(get_unaligned(&h->tag));
if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
goto exit;
n = 0;
if (net_ratelimit())
printk(KERN_ERR "aoe: error packet from %d.%d; ecode=%d '%s'\n",
- be16_to_cpu(h->major), h->minor,
+ be16_to_cpu(get_unaligned(&h->major)), h->minor,
h->err, aoe_errlist[n]);
goto exit;
}
if (inq_buff == NULL)
goto mem_msg;
+ /* testing to see if 16-byte CDBs are already being used */
+ if (h->cciss_read == CCISS_READ_16) {
+ cciss_read_capacity_16(h->ctlr, drv_index, 1,
+ &total_size, &block_size);
+ goto geo_inq;
+ }
+
cciss_read_capacity(ctlr, drv_index, 1,
&total_size, &block_size);
- /* total size = last LBA + 1 */
- /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
- /* so we assume this volume this must be >2TB in size */
- if (total_size == (__u32) 0) {
+ /* if read_capacity returns all F's this volume is >2TB in size */
+ /* so we switch to 16-byte CDB's for all read/write ops */
+ if (total_size == 0xFFFFFFFFULL) {
cciss_read_capacity_16(ctlr, drv_index, 1,
&total_size, &block_size);
h->cciss_read = CCISS_READ_16;
h->cciss_read = CCISS_READ_10;
h->cciss_write = CCISS_WRITE_10;
}
+geo_inq:
cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
inq_buff, &h->drv[drv_index]);
drv->raid_level = inq_buff->data_byte[8];
}
drv->block_size = block_size;
- drv->nr_blocks = total_size;
+ drv->nr_blocks = total_size + 1;
t = drv->heads * drv->sectors;
if (t > 1) {
- unsigned rem = sector_div(total_size, t);
+ sector_t real_size = total_size + 1;
+ unsigned long rem = sector_div(real_size, t);
if (rem)
- total_size++;
- drv->cylinders = total_size;
+ real_size++;
+ drv->cylinders = real_size;
}
} else { /* Get geometry failed */
printk(KERN_WARNING "cciss: reading geometry failed\n");
ctlr, buf, sizeof(ReadCapdata_struct),
1, logvol, 0, NULL, TYPE_CMD);
if (return_code == IO_OK) {
- *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
+ *total_size = be32_to_cpu(*(__u32 *) buf->total_size);
*block_size = be32_to_cpu(*(__u32 *) buf->block_size);
} else { /* read capacity command failed */
printk(KERN_WARNING "cciss: read capacity failed\n");
*total_size = 0;
*block_size = BLOCK_SIZE;
}
- if (*total_size != (__u32) 0)
+ if (*total_size != 0)
printk(KERN_INFO " blocks= %llu block_size= %d\n",
- (unsigned long long)*total_size, *block_size);
+ (unsigned long long)*total_size+1, *block_size);
kfree(buf);
return;
}
1, logvol, 0, NULL, TYPE_CMD);
}
if (return_code == IO_OK) {
- *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
+ *total_size = be64_to_cpu(*(__u64 *) buf->total_size);
*block_size = be32_to_cpu(*(__u32 *) buf->block_size);
} else { /* read capacity command failed */
printk(KERN_WARNING "cciss: read capacity failed\n");
*block_size = BLOCK_SIZE;
}
printk(KERN_INFO " blocks= %llu block_size= %d\n",
- (unsigned long long)*total_size, *block_size);
+ (unsigned long long)*total_size+1, *block_size);
kfree(buf);
return;
}
}
cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
- /* total_size = last LBA + 1 */
- if(total_size == (__u32) 0) {
+ /* If read_capacity returns all F's the logical is >2TB */
+ /* so we switch to 16-byte CDBs for all read/write ops */
+ if(total_size == 0xFFFFFFFFULL) {
cciss_read_capacity_16(cntl_num, i, 0,
&total_size, &block_size);
hba[cntl_num]->cciss_read = CCISS_READ_16;
return -1;
}
-static void __devexit cciss_remove_one(struct pci_dev *pdev)
+static void cciss_remove_one(struct pci_dev *pdev)
{
ctlr_info_t *tmp_ptr;
int i, j;
memset(flush_buf, 0, 4);
return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
TYPE_CMD);
- if (return_code != IO_OK) {
- printk(KERN_WARNING "Error Flushing cache on controller %d\n",
- i);
+ if (return_code == IO_OK) {
+ printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
+ } else {
+ printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
}
free_irq(hba[i]->intr[2], hba[i]);
.probe = cciss_init_one,
.remove = __devexit_p(cciss_remove_one),
.id_table = cciss_pci_device_id, /* id_table */
+ .shutdown = cciss_remove_one,
};
/*
return -ENOMEM;
err = major_nr = register_blkdev(0, "umem");
- if (err < 0)
+ if (err < 0) {
+ pci_unregister_driver(&mm_pci_driver);
return -EIO;
+ }
for (i = 0; i < num_cards; i++) {
mm_gendisk[i] = alloc_disk(1 << MM_SHIFT);
return 0;
out:
+ pci_unregister_driver(&mm_pci_driver);
unregister_blkdev(major_nr, "umem");
while (i--)
put_disk(mm_gendisk[i]);
return 0;
}
+static void viocd_end_request(struct request *req, int uptodate)
+{
+ int nsectors = req->hard_nr_sectors;
+
+ /*
+ * Make sure it's fully ended, and ensure that we process
+ * at least one sector.
+ */
+ if (blk_pc_request(req))
+ nsectors = (req->data_len + 511) >> 9;
+ if (!nsectors)
+ nsectors = 1;
+
+ if (end_that_request_first(req, uptodate, nsectors))
+ BUG();
+ add_disk_randomness(req->rq_disk);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req, uptodate);
+}
static int rwreq;
while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
if (!blk_fs_request(req))
- end_request(req, 0);
+ viocd_end_request(req, 0);
else if (send_request(req) < 0) {
printk(VIOCD_KERN_WARNING
"unable to send message to OS/400!");
- end_request(req, 0);
+ viocd_end_request(req, 0);
} else
rwreq++;
}
"with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
- end_request(req, 0);
+ viocd_end_request(req, 0);
} else
- end_request(req, 1);
+ viocd_end_request(req, 1);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);
source "drivers/char/tpm/Kconfig"
config TELCLOCK
- tristate "Telecom clock driver for MPBL0010 ATCA SBC"
+ tristate "Telecom clock driver for ATCA SBC"
depends on EXPERIMENTAL && X86
default n
help
- The telecom clock device is specific to the MPBL0010 ATCA computer and
- allows direct userspace access to the configuration of the telecom clock
- configuration settings. This device is used for hardware synchronization
- across the ATCA backplane fabric. Upon loading, the driver exports a
- sysfs directory, /sys/devices/platform/telco_clock, with a number of
- files for controlling the behavior of this hardware.
+ The telecom clock device is specific to the MPCBL0010 and MPCBL0050
+ ATCA computers and allows direct userspace access to the
+ configuration of the telecom clock configuration settings. This
+ device is used for hardware synchronization across the ATCA backplane
+ fabric. Upon loading, the driver exports a sysfs directory,
+ /sys/devices/platform/telco_clock, with a number of files for
+ controlling the behavior of this hardware.
endmenu
agp_device_command(command, (mode & AGP8X_MODE) != 0);
}
-struct const agp_bridge_driver hp_zx1_driver = {
+const struct agp_bridge_driver hp_zx1_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
.configure = hp_zx1_configure,
| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
}
-struct const agp_bridge_driver intel_i460_driver = {
+const struct agp_bridge_driver intel_i460_driver = {
.owner = THIS_MODULE,
.aperture_sizes = i460_sizes,
.size_type = U8_APER_SIZE,
agp_device_command(command, (mode & AGP8X_MODE) != 0);
}
-struct const agp_bridge_driver parisc_agp_driver = {
+static const struct agp_bridge_driver parisc_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
.configure = parisc_agp_configure,
return bridge;
}
-struct const agp_bridge_driver sgi_tioca_driver = {
+const struct agp_bridge_driver sgi_tioca_driver = {
.owner = THIS_MODULE,
.size_type = U16_APER_SIZE,
.configure = sgi_tioca_configure,
{4, 1024, 0, 1}
};
-struct const agp_bridge_driver uninorth_agp_driver = {
+const struct agp_bridge_driver uninorth_agp_driver = {
.owner = THIS_MODULE,
.aperture_sizes = (void *)uninorth_sizes,
.size_type = U32_APER_SIZE,
.cant_use_aperture = 1,
};
-struct const agp_bridge_driver u3_agp_driver = {
+const struct agp_bridge_driver u3_agp_driver = {
.owner = THIS_MODULE,
.aperture_sizes = (void *)u3_sizes,
.size_type = U32_APER_SIZE,
tmp.irq = cinfo->irq;
tmp.flags = info->flags;
tmp.close_delay = info->close_delay;
+ tmp.closing_wait = info->closing_wait;
tmp.baud_base = info->baud;
tmp.custom_divisor = info->custom_divisor;
tmp.hub6 = 0; /*!!! */
hrs = alm_tm.tm_hour;
min = alm_tm.tm_min;
+ sec = alm_tm.tm_sec;
if (hrs >= 24)
hrs = 0xff;
if (min >= 60)
min = 0xff;
- BIN_TO_BCD(sec);
- BIN_TO_BCD(min);
- BIN_TO_BCD(hrs);
+ if (sec != 0)
+ return -EINVAL;
+
+ min = BIN2BCD(min);
+ min = BIN2BCD(hrs);
spin_lock(&ds1286_lock);
rtc_write(hrs, RTC_HOURS_ALARM);
static void setup_empty_event(struct tty_struct *tty, struct channel *ch);
void epca_setup(char *, int *);
-static int get_termio(struct tty_struct *, struct termio __user *);
static int pc_write(struct tty_struct *, const unsigned char *, int);
static int pc_init(void);
static int init_PCI(void);
switch (cmd)
{ /* Begin switch cmd */
-
-#if 0 /* Handled by calling layer properly */
- case TCGETS:
- if (copy_to_user(argp, tty->termios, sizeof(struct ktermios)))
- return -EFAULT;
- return 0;
- case TCGETA:
- return get_termio(tty, argp);
-#endif
case TCSBRK: /* SVID version: non-zero arg --> no break */
retval = tty_check_change(tty);
if (retval)
memoff(ch);
} /* End setup_empty_event */
-/* --------------------- Begin get_termio ----------------------- */
-
-static int get_termio(struct tty_struct * tty, struct termio __user * termio)
-{ /* Begin get_termio */
- return kernel_termios_to_user_termio(termio, tty->termios);
-} /* End get_termio */
-
/* ---------------------- Begin epca_setup -------------------------- */
void epca_setup(char *str, int *ints)
{ /* Begin epca_setup */
if (!info)
return;
+#ifdef CONFIG_PPC_MERGE
+ if (check_legacy_ioport(ipmi_defaults[i].port))
+ continue;
+#endif
+
info->addr_source = NULL;
info->si_type = ipmi_defaults[i].type;
DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read);
min_bytes_to_read = min(count, bytes_to_read + 5);
+ min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read);
return 0;
}
- if (count < 5) {
+ if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) {
DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count);
return -EIO;
}
/* check whether we're reopening an existing tty */
if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
tty = devpts_get_tty(idx);
+ /*
+ * If we don't have a tty here on a slave open, it's because
+ * the master already started the close process and there's
+ * no relation between devpts file and tty anymore.
+ */
+ if (!tty && driver->subtype == PTY_TYPE_SLAVE) {
+ retval = -EIO;
+ goto end_init;
+ }
+ /*
+ * It's safe from now on because init_dev() is called with
+ * tty_mutex held and release_dev() won't change tty->count
+ * or tty->flags without having to grab tty_mutex
+ */
if (tty && driver->subtype == PTY_TYPE_MASTER)
tty = tty->link;
} else {
return clocksource_register(&clocksource_acpi_pm);
}
-module_init(init_acpi_pm_clocksource);
+/* We use fs_initcall because we want the PCI fixups to have run
+ * but we still need to load before device_initcall
+ */
+fs_initcall(init_acpi_pm_clocksource);
return clocksource_register(&clocksource_cyclone);
}
-module_init(init_cyclone_clocksource);
+arch_initcall(init_cyclone_clocksource);
*/
static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data)
{
- struct cn_callback_entry *__cbq;
+ struct cn_callback_entry *__cbq, *__new_cbq;
struct cn_dev *dev = &cdev;
int err = -ENODEV;
} else {
struct cn_callback_data *d;
- __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
- if (__cbq) {
- d = &__cbq->data;
+ err = -ENOMEM;
+ __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
+ if (__new_cbq) {
+ d = &__new_cbq->data;
d->callback_priv = msg;
d->callback = __cbq->data.callback;
d->ddata = data;
d->destruct_data = destruct_data;
- d->free = __cbq;
+ d->free = __new_cbq;
- INIT_WORK(&__cbq->work,
+ INIT_WORK(&__new_cbq->work,
&cn_queue_wrapper);
-
+
if (queue_work(dev->cbdev->cn_queue,
- &__cbq->work))
+ &__new_cbq->work))
err = 0;
else {
- kfree(__cbq);
+ kfree(__new_cbq);
err = -EINVAL;
}
- } else
- err = -ENOMEM;
+ }
}
break;
}
* (and isn't unregistered in the meantime).
*
*/
-int cpufreq_register_driver(const struct cpufreq_driver *driver_data)
+int cpufreq_register_driver(struct cpufreq_driver *driver_data)
{
unsigned long flags;
int ret;
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
*/
-int cpufreq_unregister_driver(const struct cpufreq_driver *driver)
+int cpufreq_unregister_driver(struct cpufreq_driver *driver)
{
unsigned long flags;
static unsigned int
geode_aes_crypt(struct geode_aes_op *op)
{
-
u32 flags = 0;
- int iflags;
+ unsigned long iflags;
if (op->len == 0 || op->src == op->dst)
return 0;
if (item.format != HID_ITEM_FORMAT_SHORT) {
dbg("unexpected long global item");
- kfree(device->collection);
hid_free_device(device);
kfree(parser);
return NULL;
if (dispatch_type[item.type](parser, &item)) {
dbg("item %u %u %u %u parsing failed\n",
item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag);
- kfree(device->collection);
hid_free_device(device);
kfree(parser);
return NULL;
if (start == end) {
if (parser->collection_stack_ptr) {
dbg("unbalanced collection at end of report description");
- kfree(device->collection);
hid_free_device(device);
kfree(parser);
return NULL;
}
if (parser->local.delimiter_depth) {
dbg("unbalanced delimiter at end of report description");
- kfree(device->collection);
hid_free_device(device);
kfree(parser);
return NULL;
}
dbg("item fetching failed at offset %d\n", (int)(end - start));
- kfree(device->collection);
hid_free_device(device);
kfree(parser);
return NULL;
/* make sure the unused bits in the last byte are zeros */
if (count > 0 && size > 0)
- data[(count*size-1)/8] = 0;
+ data[(offset+count*size-1)/8] = 0;
for (n = 0; n < count; n++) {
if (field->logical_minimum < 0) /* signed values */
*/
#include <linux/hid.h>
+#include <linux/hid-debug.h>
struct hid_usage_entry {
unsigned page;
* $Id: hid-input.c,v 1.2 2002/04/23 00:59:25 rdamazio Exp $
*
* Copyright (c) 2000-2001 Vojtech Pavlik
- * Copyright (c) 2006 Jiri Kosina
+ * Copyright (c) 2006-2007 Jiri Kosina
*
* HID to Linux Input mapping
*/
#define map_led(c) do { usage->code = c; usage->type = EV_LED; bit = input->ledbit; max = LED_MAX; } while (0)
#define map_abs_clear(c) do { map_abs(c); clear_bit(c, bit); } while (0)
-#define map_rel_clear(c) do { map_rel(c); clear_bit(c, bit); } while (0)
#define map_key_clear(c) do { map_key(c); clear_bit(c, bit); } while (0)
#ifdef CONFIG_USB_HIDINPUT_POWERBOOK
}
}
- map_key_clear(code);
+ map_key(code);
break;
case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL:
if (field->flags & HID_MAIN_ITEM_RELATIVE)
- map_rel_clear(usage->hid & 0xf);
+ map_rel(usage->hid & 0xf);
else
- map_abs_clear(usage->hid & 0xf);
+ map_abs(usage->hid & 0xf);
break;
case HID_GD_HATSWITCH:
case 0x22f: map_key_clear(KEY_ZOOMRESET); break;
case 0x233: map_key_clear(KEY_SCROLLUP); break;
case 0x234: map_key_clear(KEY_SCROLLDOWN); break;
- case 0x238: map_rel_clear(REL_HWHEEL); break;
+ case 0x238: map_rel(REL_HWHEEL); break;
case 0x25f: map_key_clear(KEY_CANCEL); break;
case 0x279: map_key_clear(KEY_REDO); break;
case 0x302: map_key_clear(KEY_PROG2); break;
case 0x303: map_key_clear(KEY_PROG3); break;
+ /* Reported on Logitech S510 wireless keyboard */
+ case 0x101f: map_key_clear(KEY_ZOOMIN); break;
+ case 0x1020: map_key_clear(KEY_ZOOMOUT); break;
+ case 0x1021: map_key_clear(KEY_ZOOMRESET); break;
+ /* this one is marked as 'Rotate' */
+ case 0x1028: map_key_clear(KEY_ANGLE); break;
+ case 0x1029: map_key_clear(KEY_SHUFFLE); break;
+ case 0x1041: map_key_clear(KEY_BATTERY); break;
+ case 0x1042: map_key_clear(KEY_WORDPROCESSOR); break;
+ case 0x1043: map_key_clear(KEY_SPREADSHEET); break;
+ case 0x1044: map_key_clear(KEY_PRESENTATION); break;
+ case 0x1045: map_key_clear(KEY_UNDO); break;
+ case 0x1046: map_key_clear(KEY_REDO); break;
+ case 0x1047: map_key_clear(KEY_PRINT); break;
+ case 0x1048: map_key_clear(KEY_SAVE); break;
+ case 0x1049: map_key_clear(KEY_PROG1); break;
+ case 0x104a: map_key_clear(KEY_PROG2); break;
+ case 0x104b: map_key_clear(KEY_PROG3); break;
+ case 0x104c: map_key_clear(KEY_PROG4); break;
+
default: goto ignore;
}
break;
set_bit(usage->type, input->evbit);
+ if (device->quirks & HID_QUIRK_DUPLICATE_USAGES &&
+ (usage->type == EV_KEY ||
+ usage->type == EV_REL ||
+ usage->type == EV_ABS))
+ clear_bit(usage->code, bit);
+
while (usage->code <= max && test_and_set_bit(usage->code, bit))
usage->code = find_next_zero_bit(bit, max + 1, usage->code);
This driver can also be built as a module. If so, the module
will be called i2c-versatile.
+config I2C_ACORN
+ bool "Acorn IOC/IOMD I2C bus support"
+ depends on I2C && ARCH_ACORN
+ default y
+ select I2C_ALGOBIT
+ help
+ Say yes if you want to support the I2C bus on Acorn platforms.
+
+ If you don't know, say Y.
+
config I2C_VIA
tristate "VIA 82C586B"
depends on I2C && PCI && EXPERIMENTAL
obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
+obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
obj-$(CONFIG_I2C_VIA) += i2c-via.o
obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
obj-$(CONFIG_I2C_VOODOO3) += i2c-voodoo3.o
--- /dev/null
+/*
+ * linux/drivers/acorn/char/i2c.c
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ARM IOC/IOMD i2c driver.
+ *
+ * On Acorn machines, the following i2c devices are on the bus:
+ * - PCF8583 real time clock & static RAM
+ */
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/hardware/ioc.h>
+#include <asm/system.h>
+
+#define FORCE_ONES 0xdc
+#define SCL 0x02
+#define SDA 0x01
+
+/*
+ * We must preserve all non-i2c output bits in IOC_CONTROL.
+ * Note also that we need to preserve the value of SCL and
+ * SDA outputs as well (which may be different from the
+ * values read back from IOC_CONTROL).
+ */
+static u_int force_ones;
+
+static void ioc_setscl(void *data, int state)
+{
+ u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA);
+ u_int ones = force_ones;
+
+ if (state)
+ ones |= SCL;
+ else
+ ones &= ~SCL;
+
+ force_ones = ones;
+
+ ioc_writeb(ioc_control | ones, IOC_CONTROL);
+}
+
+static void ioc_setsda(void *data, int state)
+{
+ u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA);
+ u_int ones = force_ones;
+
+ if (state)
+ ones |= SDA;
+ else
+ ones &= ~SDA;
+
+ force_ones = ones;
+
+ ioc_writeb(ioc_control | ones, IOC_CONTROL);
+}
+
+static int ioc_getscl(void *data)
+{
+ return (ioc_readb(IOC_CONTROL) & SCL) != 0;
+}
+
+static int ioc_getsda(void *data)
+{
+ return (ioc_readb(IOC_CONTROL) & SDA) != 0;
+}
+
+static struct i2c_algo_bit_data ioc_data = {
+ .setsda = ioc_setsda,
+ .setscl = ioc_setscl,
+ .getsda = ioc_getsda,
+ .getscl = ioc_getscl,
+ .udelay = 80,
+ .timeout = 100
+};
+
+static struct i2c_adapter ioc_ops = {
+ .id = I2C_HW_B_IOC,
+ .algo_data = &ioc_data,
+};
+
+static int __init i2c_ioc_init(void)
+{
+ force_ones = FORCE_ONES | SCL | SDA;
+
+ return i2c_bit_add_bus(&ioc_ops);
+}
+
+__initcall(i2c_ioc_init);
config BLK_DEV_ALI14XX
tristate "ALI M14xx support"
help
- This driver is enabled at runtime using the "ide0=ali14xx" kernel
+ This driver is enabled at runtime using the "ali14xx.probe" kernel
boot parameter. It enables support for the secondary IDE interface
of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
I/O speeds to be set as well. See the files
config BLK_DEV_DTC2278
tristate "DTC-2278 support"
help
- This driver is enabled at runtime using the "ide0=dtc2278" kernel
+ This driver is enabled at runtime using the "dtc2278.probe" kernel
boot parameter. It enables support for the secondary IDE interface
of the DTC-2278 card, and permits faster I/O speeds to be set as
well. See the <file:Documentation/ide.txt> and
config BLK_DEV_HT6560B
tristate "Holtek HT6560B support"
help
- This driver is enabled at runtime using the "ide0=ht6560b" kernel
+ This driver is enabled at runtime using the "ht6560b.probe" kernel
boot parameter. It enables support for the secondary IDE interface
of the Holtek card, and permits faster I/O speeds to be set as well.
See the <file:Documentation/ide.txt> and
config BLK_DEV_QD65XX
tristate "QDI QD65xx support"
help
- This driver is enabled at runtime using the "ide0=qd65xx" kernel
+ This driver is enabled at runtime using the "qd65xx.probe" kernel
boot parameter. It permits faster I/O speeds to be set. See the
<file:Documentation/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> for
more info.
config BLK_DEV_UMC8672
tristate "UMC-8672 support"
help
- This driver is enabled at runtime using the "ide0=umc8672" kernel
+ This driver is enabled at runtime using the "umc8672.probe" kernel
boot parameter. It enables support for the secondary IDE interface
of the UMC-8672, and permits faster I/O speeds to be set as well.
See the files <file:Documentation/ide.txt> and
* device can't do DMA handshaking for some stupid reason. We don't need to do that.
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#define IDEDISK_VERSION "1.18"
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
//#define DEBUG
#include <linux/module.h>
if(!(drive->id->hw_config & 0x4000))
return 0;
#endif /* CONFIG_IDEDMA_IVB */
+ if (!(drive->id->hw_config & 0x2000))
+ return 0;
return 1;
}
/**
* ide_get_best_pio_mode - get PIO mode from drive
- * @driver: drive to consider
+ * @drive: drive to consider
* @mode_wanted: preferred mode
- * @max_mode: highest allowed
- * @d: pio data
+ * @max_mode: highest allowed mode
+ * @d: PIO data
*
* This routine returns the recommended PIO settings for a given drive,
* based on the drive->id information and the ide_pio_blacklist[].
- * This is used by most chipset support modules when "auto-tuning".
*
- * Drive PIO mode auto selection
+ * Drive PIO mode is auto-selected if 255 is passed as mode_wanted.
+ * This is used by most chipset support modules when "auto-tuning".
*/
u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode, ide_pio_data_t *d)
if (mode_wanted != 255) {
pio_mode = mode_wanted;
+ use_iordy = (pio_mode > 2);
} else if (!drive->id) {
pio_mode = 0;
} else if ((pio_mode = ide_scan_pio_blacklist(id->model)) != -1) {
}
}
-#if 0
- if (drive->id->major_rev_num & 0x0004) printk("ATA-2 ");
-#endif
-
/*
* Conservative "downgrade" for all pre-ATA2 drives
*/
if (pio_mode && pio_mode < 4) {
pio_mode--;
overridden = 1;
-#if 0
- use_iordy = (pio_mode > 2);
-#endif
if (cycle_time && cycle_time < ide_pio_timings[pio_mode].cycle_time)
cycle_time = 0; /* use standard timing */
}
* valid after probe time even with noprobe
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#define REVISION "Revision: 7.00alpha2"
#define VERSION "Id: ide.c 7.00a2 20020906"
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#define _IDE_C /* Tell ide.h it's really us */
#include <linux/module.h>
}
#ifdef CONFIG_BLK_DEV_ALI14XX
-static int __initdata probe_ali14xx;
+extern int probe_ali14xx;
extern int ali14xx_init(void);
#endif
#ifdef CONFIG_BLK_DEV_UMC8672
-static int __initdata probe_umc8672;
+extern int probe_umc8672;
extern int umc8672_init(void);
#endif
#ifdef CONFIG_BLK_DEV_DTC2278
-static int __initdata probe_dtc2278;
+extern int probe_dtc2278;
extern int dtc2278_init(void);
#endif
#ifdef CONFIG_BLK_DEV_HT6560B
-static int __initdata probe_ht6560b;
+extern int probe_ht6560b;
extern int ht6560b_init(void);
#endif
#ifdef CONFIG_BLK_DEV_QD65XX
-static int __initdata probe_qd65xx;
+extern int probe_qd65xx;
extern int qd65xx_init(void);
#endif
*/
if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
const char *hd_words[] = {
- "none", "noprobe", "nowerr", "cdrom", "serialize",
+ "none", "noprobe", "nowerr", "cdrom", "minus5",
"autotune", "noautotune", "minus8", "swapdata", "bswap",
"noflush", "remap", "remap63", "scsi", NULL };
unit = s[2] - 'a';
drive->ready_stat = 0;
hwif->noprobe = 0;
goto done;
- case -5: /* "serialize" */
- printk(" -- USE \"ide%d=serialize\" INSTEAD", hw);
- goto do_serialize;
case -6: /* "autotune" */
drive->autotune = IDE_TUNE_AUTO;
goto obsolete_option;
* (-8, -9, -10) are reserved to ease the hardcoding.
*/
static const char *ide_words[] = {
- "noprobe", "serialize", "autotune", "noautotune",
+ "noprobe", "serialize", "minus3", "minus4",
"reset", "dma", "ata66", "minus8", "minus9",
"minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb",
"dtc2278", "umc8672", "ali14xx", NULL };
hwif->chipset = mate->chipset = ide_4drives;
mate->irq = hwif->irq;
memcpy(mate->io_ports, hwif->io_ports, sizeof(hwif->io_ports));
- goto do_serialize;
+ hwif->mate = mate;
+ mate->mate = hwif;
+ hwif->serialized = mate->serialized = 1;
+ goto obsolete_option;
}
#endif /* CONFIG_BLK_DEV_4DRIVES */
case -10: /* minus10 */
case -9: /* minus9 */
case -8: /* minus8 */
+ case -4:
+ case -3:
goto bad_option;
case -7: /* ata66 */
#ifdef CONFIG_BLK_DEV_IDEPCI
case -5: /* "reset" */
hwif->reset = 1;
goto obsolete_option;
- case -4: /* "noautotune" */
- hwif->drives[0].autotune = IDE_TUNE_NOAUTO;
- hwif->drives[1].autotune = IDE_TUNE_NOAUTO;
- goto obsolete_option;
- case -3: /* "autotune" */
- hwif->drives[0].autotune = IDE_TUNE_AUTO;
- hwif->drives[1].autotune = IDE_TUNE_AUTO;
- goto obsolete_option;
case -2: /* "serialize" */
- do_serialize:
hwif->mate = &ide_hwifs[hw^1];
hwif->mate->mate = hwif;
hwif->serialized = hwif->mate->serialized = 1;
#endif /* CONFIG_BLK_DEV_CMD640 */
#ifdef CONFIG_BLK_DEV_IDE_PMAC
{
- extern void pmac_ide_probe(void);
- pmac_ide_probe();
+ extern int pmac_ide_probe(void);
+ (void)pmac_ide_probe();
}
#endif /* CONFIG_BLK_DEV_IDE_PMAC */
#ifdef CONFIG_BLK_DEV_GAYLE
* mode 4 for a while now with no trouble.) -Derek
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
return 0;
}
+int probe_ali14xx = 0;
+
+module_param_named(probe, probe_ali14xx, bool, 0);
+MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
+
/* Can be called directly from ide.c. */
int __init ali14xx_init(void)
{
+ if (probe_ali14xx == 0)
+ goto out;
+
/* auto-detect IDE controller port */
if (findPort()) {
if (ali14xx_probe())
return 0;
}
printk(KERN_ERR "ali14xx: not found.\n");
+out:
return -ENODEV;
}
* Copyright (C) 1996 Linus Torvalds & author (see below)
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
HWIF(drive)->drives[!drive->select.b.unit].io_32bit = 1;
}
-static int __init probe_dtc2278(void)
+static int __init dtc2278_probe(void)
{
unsigned long flags;
ide_hwif_t *hwif, *mate;
return 0;
}
+int probe_dtc2278 = 0;
+
+module_param_named(probe, probe_dtc2278, bool, 0);
+MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
+
/* Can be called directly from ide.c. */
int __init dtc2278_init(void)
{
- if (probe_dtc2278()) {
+ if (probe_dtc2278 == 0)
+ return -ENODEV;
+
+ if (dtc2278_probe()) {
printk(KERN_ERR "dtc2278: ide interfaces already in use!\n");
return -EBUSY;
}
#define HT6560B_VERSION "v0.07"
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#endif
}
+int probe_ht6560b = 0;
+
+module_param_named(probe, probe_ht6560b, bool, 0);
+MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
+
/* Can be called directly from ide.c. */
int __init ht6560b_init(void)
{
ide_hwif_t *hwif, *mate;
int t;
+ if (probe_ht6560b == 0)
+ return -ENODEV;
+
hwif = &ide_hwifs[0];
mate = &ide_hwifs[1];
static struct pcmcia_device_id ide_ids[] = {
PCMCIA_DEVICE_FUNC_ID(4),
PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
+ PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
+ PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
- PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),
+ PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
- PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */
+ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
+ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
* Please set local bus speed using kernel parameter idebus
* for example, "idebus=33" stands for 33Mhz VLbus
* To activate controller support, use "ide0=qd65xx"
- * To enable tuning, use "ide0=autotune"
- * To enable second channel tuning (qd6580 only), use "ide1=autotune"
+ * To enable tuning, use "hda=autotune hdb=autotune"
+ * To enable 2nd channel tuning (qd6580 only), use "hdc=autotune hdd=autotune"
*/
/*
* Samuel Thibault <samuel.thibault@fnac.net>
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
return 1;
}
+int probe_qd65xx = 0;
+
+module_param_named(probe, probe_qd65xx, bool, 0);
+MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
+
/* Can be called directly from ide.c. */
int __init qd65xx_init(void)
{
+ if (probe_qd65xx == 0)
+ return -ENODEV;
+
if (qd_probe(0x30))
qd_probe(0xb0);
if (ide_hwifs[0].chipset != ide_qd65xx &&
return 0;
}
+int probe_umc8672 = 0;
+
+module_param_named(probe, probe_umc8672, bool, 0);
+MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
+
/* Can be called directly from ide.c. */
int __init umc8672_init(void)
{
- if (umc8672_probe())
- return -ENODEV;
- return 0;
+ if (probe_umc8672 == 0)
+ goto out;
+
+ if (umc8672_probe() == 0)
+ return 0;;
+out:
+ return -ENODEV;;
}
#ifdef MODULE
* Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
* Interface and Linux Device Driver" Application Note.
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kernel.h>
/*
- * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
+ * linux/drivers/ide/pci/alim15x3.c Version 0.21 2007/02/03
*
* Copyright (C) 1998-2000 Michel Aubry, Maintainer
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
* May be copied or modified under the terms of the GNU General Public License
* Copyright (C) 2002 Alan Cox <alan@redhat.com>
* ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
+ * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
*
* (U)DMA capable version of ali 1533/1543(C), 1535(D)
*
#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */
/**
- * ali15x3_tune_drive - set up a drive
+ * ali15x3_tune_pio - set up chipset for PIO mode
* @drive: drive to tune
- * @pio: unused
+ * @pio: desired mode
*
- * Select the best PIO timing for the drive in question. Then
- * program the controller for this drive set up
+ * Select the best PIO mode for the drive in question.
+ * Then program the controller for this mode.
+ *
+ * Returns the PIO mode programmed.
*/
-static void ali15x3_tune_drive (ide_drive_t *drive, u8 pio)
+static u8 ali15x3_tune_pio (ide_drive_t *drive, u8 pio)
{
ide_pio_data_t d;
ide_hwif_t *hwif = HWIF(drive);
* { 20, 50, 30 } PIO Mode 5 with IORDY (nonstandard)
*/
+ return pio;
+}
+
+/**
+ * ali15x3_tune_drive - set up drive for PIO mode
+ * @drive: drive to tune
+ * @pio: desired mode
+ *
+ * Program the controller with the best PIO timing for the given drive.
+ * Then set up the drive itself.
+ */
+
+static void ali15x3_tune_drive (ide_drive_t *drive, u8 pio)
+{
+ pio = ali15x3_tune_pio(drive, pio);
+ (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
}
/**
}
/**
- * ali15x3_tune_chipset - set up chiset for new speed
+ * ali15x3_tune_chipset - set up chipset/drive for new speed
* @drive: drive to configure for
* @xferspeed: desired speed
*
pci_write_config_byte(dev, m5229_udma, tmpbyte);
if (speed < XFER_SW_DMA_0)
- ali15x3_tune_drive(drive, speed);
+ (void) ali15x3_tune_pio(drive, speed - XFER_PIO_0);
} else {
pci_read_config_byte(dev, m5229_udma, &tmpbyte);
tmpbyte &= (0x0f << ((1-unit) << 2));
* (patch courtesy of Zoltan Hidvegi)
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
#define CMD640_PREFETCH_MASKS 1
//#define CMD640_DUMP_REGS
/* $Id: cmd64x.c,v 1.21 2000/01/30 23:23:16
*
- * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
+ * linux/drivers/ide/pci/cmd64x.c Version 1.41 Feb 3, 2007
*
* cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
* Note, this driver is not used at all on other systems because
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
*/
#include <linux/module.h>
}
/*
- * Attempts to set the interface PIO mode.
- * The preferred method of selecting PIO modes (e.g. mode 4) is
- * "echo 'piomode:4' > /proc/ide/hdx/settings". Special cases are
- * 8: prefetch off, 9: prefetch on, 255: auto-select best mode.
- * Called with 255 at boot time.
+ * This routine selects drive's best PIO mode, calculates setup/active/recovery
+ * counts, and then writes them into the chipset registers.
*/
-
-static void cmd64x_tuneproc (ide_drive_t *drive, u8 mode_wanted)
+static u8 cmd64x_tune_pio (ide_drive_t *drive, u8 mode_wanted)
{
int setup_time, active_time, recovery_time;
int clock_time, pio_mode, cycle_time;
u8 recovery_count2, cycle_count;
int setup_count, active_count, recovery_count;
int bus_speed = system_bus_clock();
- /*byte b;*/
ide_pio_data_t d;
- switch (mode_wanted) {
- case 8: /* set prefetch off */
- case 9: /* set prefetch on */
- mode_wanted &= 1;
- /*set_prefetch_mode(index, mode_wanted);*/
- cmdprintk("%s: %sabled cmd640 prefetch\n",
- drive->name, mode_wanted ? "en" : "dis");
- return;
- }
-
- mode_wanted = ide_get_best_pio_mode (drive, mode_wanted, 5, &d);
- pio_mode = d.pio_mode;
+ pio_mode = ide_get_best_pio_mode(drive, mode_wanted, 5, &d);
cycle_time = d.cycle_time;
/*
* I copied all this complicated stuff from cmd640.c and made a few
* minor changes. For now I am just going to pray that it is correct.
*/
- if (pio_mode > 5)
- pio_mode = 5;
setup_time = ide_pio_timings[pio_mode].setup_time;
active_time = ide_pio_timings[pio_mode].active_time;
recovery_time = cycle_time - (setup_time + active_time);
if (active_count > 16)
active_count = 16; /* maximum allowed by cmd646 */
- /*
- * In a perfect world, we might set the drive pio mode here
- * (using WIN_SETFEATURE) before continuing.
- *
- * But we do not, because:
- * 1) this is the wrong place to do it
- * (proper is do_special() in ide.c)
- * 2) in practice this is rarely, if ever, necessary
- */
program_drive_counts (drive, setup_count, active_count, recovery_count);
- cmdprintk("%s: selected cmd646 PIO mode%d : %d (%dns)%s, "
+ cmdprintk("%s: PIO mode wanted %d, selected %d (%dns)%s, "
"clocks=%d/%d/%d\n",
- drive->name, pio_mode, mode_wanted, cycle_time,
+ drive->name, mode_wanted, pio_mode, cycle_time,
d.overridden ? " (overriding vendor mode)" : "",
setup_count, active_count, recovery_count);
+
+ return pio_mode;
+}
+
+/*
+ * Attempts to set drive's PIO mode.
+ * Special cases are 8: prefetch off, 9: prefetch on (both never worked),
+ * and 255: auto-select best mode (used at boot time).
+ */
+static void cmd64x_tune_drive (ide_drive_t *drive, u8 pio)
+{
+ /*
+ * Filter out the prefetch control values
+ * to prevent PIO5 from being programmed
+ */
+ if (pio == 8 || pio == 9)
+ return;
+
+ pio = cmd64x_tune_pio(drive, pio);
+ (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
}
static u8 cmd64x_ratemask (ide_drive_t *drive)
return mode;
}
-static void config_cmd64x_chipset_for_pio (ide_drive_t *drive, u8 set_speed)
-{
- u8 speed = 0x00;
- u8 set_pio = ide_get_best_pio_mode(drive, 4, 5, NULL);
-
- cmd64x_tuneproc(drive, set_pio);
- speed = XFER_PIO_0 + set_pio;
- if (set_speed)
- (void) ide_config_drive_speed(drive, speed);
-}
-
-static void config_chipset_for_pio (ide_drive_t *drive, u8 set_speed)
-{
- config_cmd64x_chipset_for_pio(drive, set_speed);
-}
-
static int cmd64x_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
ide_hwif_t *hwif = HWIF(drive);
u8 speed = ide_rate_filter(cmd64x_ratemask(drive), xferspeed);
- if (speed > XFER_PIO_4) {
+ if (speed >= XFER_SW_DMA_0) {
(void) pci_read_config_byte(dev, pciD, ®D);
(void) pci_read_config_byte(dev, pciU, ®U);
regD &= ~(unit ? 0x40 : 0x20);
case XFER_SW_DMA_2: regD |= (unit ? 0x40 : 0x10); break;
case XFER_SW_DMA_1: regD |= (unit ? 0x80 : 0x20); break;
case XFER_SW_DMA_0: regD |= (unit ? 0xC0 : 0x30); break;
- case XFER_PIO_4: cmd64x_tuneproc(drive, 4); break;
- case XFER_PIO_3: cmd64x_tuneproc(drive, 3); break;
- case XFER_PIO_2: cmd64x_tuneproc(drive, 2); break;
- case XFER_PIO_1: cmd64x_tuneproc(drive, 1); break;
- case XFER_PIO_0: cmd64x_tuneproc(drive, 0); break;
+ case XFER_PIO_5:
+ case XFER_PIO_4:
+ case XFER_PIO_3:
+ case XFER_PIO_2:
+ case XFER_PIO_1:
+ case XFER_PIO_0:
+ (void) cmd64x_tune_pio(drive, speed - XFER_PIO_0);
+ break;
default:
return 1;
}
- if (speed > XFER_PIO_4) {
+ if (speed >= XFER_SW_DMA_0) {
(void) pci_write_config_byte(dev, pciU, regU);
regD |= (unit ? 0x40 : 0x20);
(void) pci_write_config_byte(dev, pciD, regD);
{
u8 speed = ide_dma_speed(drive, cmd64x_ratemask(drive));
- config_chipset_for_pio(drive, !speed);
-
if (!speed)
return 0;
return 0;
if (ide_use_fast_pio(drive))
- config_chipset_for_pio(drive, 1);
+ cmd64x_tune_drive(drive, 255);
return -1;
}
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xff;
- hwif->tuneproc = &cmd64x_tuneproc;
+ hwif->tuneproc = &cmd64x_tune_drive;
hwif->speedproc = &cmd64x_tune_chipset;
- if (!hwif->dma_base) {
- hwif->drives[0].autotune = 1;
- hwif->drives[1].autotune = 1;
+ hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
+
+ if (!hwif->dma_base)
return;
- }
hwif->atapi_dma = 1;
static int
delkin_cb_init (void)
{
- return pci_module_init(&driver);
+ return pci_register_driver(&driver);
}
static void
* are deemed to be part of the source code.
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kernel.h>
* There is a 25/33MHz switch in configuration
* register, but driver is written for use at any frequency which get
* (use idebus=xx to select PCI bus speed).
- * Use ide0=autotune for automatical tune of the PIO modes.
+ * Use hda=autotune and hdb=autotune for automatical tune of the PIO modes.
* If you get strange results, do not use this and set PIO manually
* by hdparm.
*
* 0.5 doesn't work.
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
#define OPTI621_DEBUG /* define for debug messages */
#include <linux/types.h>
/*
- * linux/drivers/ide/pci/piix.c Version 0.46 December 3, 2006
+ * linux/drivers/ide/pci/piix.c Version 0.47 February 8, 2007
*
* Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
- * Copyright (C) 2006 MontaVista Software, Inc. <source@mvista.com>
+ * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
*
* May be copied or modified under the terms of the GNU General Public License
*
}
/**
- * piix_tune_drive - tune a drive attached to a PIIX
+ * piix_tune_pio - tune PIIX for PIO mode
* @drive: drive to tune
* @pio: desired PIO mode
*
- * Set the interface PIO mode based upon the settings done by AMI BIOS
- * (might be useful if drive is not registered in CMOS for any reason).
+ * Set the interface PIO mode based upon the settings done by AMI BIOS.
*/
-static void piix_tune_drive (ide_drive_t *drive, u8 pio)
+static void piix_tune_pio (ide_drive_t *drive, u8 pio)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
{ 2, 1 },
{ 2, 3 }, };
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
-
/*
* Master vs slave is synchronized above us but the slave register is
* shared by the two hwifs so the corner case of two slave timeouts in
master_data |= 0x4000;
master_data &= ~0x0070;
if (pio > 1) {
- /* enable PPE, IE and TIME */
- master_data = master_data | (control << 4);
+ /* Set PPE, IE and TIME */
+ master_data |= control << 4;
}
pci_read_config_byte(dev, slave_port, &slave_data);
- slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0);
- slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0));
+ slave_data &= hwif->channel ? 0x0f : 0xf0;
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
+ (hwif->channel ? 4 : 0);
} else {
master_data &= ~0x3307;
if (pio > 1) {
/* enable PPE, IE and TIME */
- master_data = master_data | control;
+ master_data |= control;
}
- master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
+ master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
spin_unlock_irqrestore(&tune_lock, flags);
}
+/**
+ * piix_tune_drive - tune a drive attached to PIIX
+ * @drive: drive to tune
+ * @pio: desired PIO mode
+ *
+ * Set the drive's PIO mode (might be useful if drive is not registered
+ * in CMOS for any reason).
+ */
+static void piix_tune_drive (ide_drive_t *drive, u8 pio)
+{
+ pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ piix_tune_pio(drive, pio);
+ (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
+}
+
/**
* piix_tune_chipset - tune a PIIX interface
* @drive: IDE drive to tune
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
}
- piix_tune_drive(drive, piix_dma_2_pio(speed));
- return (ide_config_drive_speed(drive, speed));
+ piix_tune_pio(drive, piix_dma_2_pio(speed));
+ return ide_config_drive_speed(drive, speed);
}
/**
return 0;
if (ide_use_fast_pio(drive))
- /* Find best PIO mode. */
- piix_tune_chipset(drive, XFER_PIO_0 +
- ide_get_best_pio_mode(drive, 255, 4, NULL));
+ piix_tune_drive(drive, 255);
return -1;
}
* Dunno if this fixes both ports, or only the primary port (?).
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
-
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kernel.h>
* If you have strange problems with nVidia chipset systems please
* see the SI support documentation and update your system BIOS
* if neccessary
+ *
+ * The Dell DRAC4 has some interesting features including effectively hot
+ * unplugging/replugging the virtual CD interface when the DRAC is reset.
+ * This often causes drivers/ide/siimage to panic but is ok with the rather
+ * smarter code in libata.
*/
#include <linux/types.h>
/*
- * linux/drivers/ide/pci/slc90e66.c Version 0.13 December 30, 2006
+ * linux/drivers/ide/pci/slc90e66.c Version 0.14 February 8, 2007
*
* Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2006 MontaVista Software, Inc. <source@mvista.com>
+ * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
*
* This is a look-alike variation of the ICH0 PIIX4 Ultra-66,
* but this keeps the ISA-Bridge and slots alive.
}
}
-/*
- * Based on settings done by AMI BIOS
- * (might be useful if drive is not registered in CMOS for any reason).
- */
-static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
+static void slc90e66_tune_pio (ide_drive_t *drive, u8 pio)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
{ 2, 1 },
{ 2, 3 }, };
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
spin_lock_irqsave(&ide_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
master_data |= 0x4000;
master_data &= ~0x0070;
if (pio > 1) {
- /* enable PPE, IE and TIME */
- master_data = master_data | (control << 4);
+ /* Set PPE, IE and TIME */
+ master_data |= control << 4;
}
pci_read_config_byte(dev, slave_port, &slave_data);
- slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0);
- slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0));
+ slave_data &= hwif->channel ? 0x0f : 0xf0;
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
+ (hwif->channel ? 4 : 0);
} else {
master_data &= ~0x3307;
if (pio > 1) {
/* enable PPE, IE and TIME */
- master_data = master_data | control;
+ master_data |= control;
}
- master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
+ master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
spin_unlock_irqrestore(&ide_lock, flags);
}
+static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
+{
+ pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ slc90e66_tune_pio(drive, pio);
+ (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
+}
+
static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
ide_hwif_t *hwif = HWIF(drive);
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
}
- slc90e66_tune_drive(drive, slc90e66_dma_2_pio(speed));
- return (ide_config_drive_speed(drive, speed));
+ slc90e66_tune_pio(drive, slc90e66_dma_2_pio(speed));
+ return ide_config_drive_speed(drive, speed);
}
static int slc90e66_config_drive_for_dma (ide_drive_t *drive)
return 0;
if (ide_use_fast_pio(drive))
- (void)slc90e66_tune_chipset(drive, XFER_PIO_0 +
- ide_get_best_pio_mode(drive, 255, 4, NULL));
+ slc90e66_tune_drive(drive, 255);
return -1;
}
#include <asm/mediabay.h>
#endif
-#include "ide-timing.h"
+#include "../ide-timing.h"
#undef IDE_PMAC_DEBUG
};
MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match);
-void __init
-pmac_ide_probe(void)
+int __init pmac_ide_probe(void)
{
+ int error;
+
if (!machine_is(powermac))
- return;
+ return -ENODEV;
#ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
- pci_register_driver(&pmac_ide_pci_driver);
- macio_register_driver(&pmac_ide_macio_driver);
+ error = pci_register_driver(&pmac_ide_pci_driver);
+ if (error)
+ goto out;
+ error = macio_register_driver(&pmac_ide_macio_driver);
+ if (error) {
+ pci_unregister_driver(&pmac_ide_pci_driver);
+ goto out;
+ }
#else
- macio_register_driver(&pmac_ide_macio_driver);
- pci_register_driver(&pmac_ide_pci_driver);
+ error = macio_register_driver(&pmac_ide_macio_driver);
+ if (error)
+ goto out;
+ error = pci_register_driver(&pmac_ide_pci_driver);
+ if (error) {
+ macio_unregister_driver(&pmac_ide_macio_driver);
+ goto out;
+ }
#endif
+out:
+ return error;
}
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
{
}
-static int pmac_ide_dma_host_on(ide_drive_t *drive)
+static void pmac_ide_dma_host_on(ide_drive_t *drive)
{
}
return __ide_dma_end(drive);
}
+/* returns 1 if dma irq issued, 0 otherwise */
+static int scc_dma_test_irq(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ u8 dma_stat = hwif->INB(hwif->dma_status);
+
+ /* return 1 if INTR asserted */
+ if ((dma_stat & 4) == 4)
+ return 1;
+
+ /* Workaround for PTERADD: emulate DMA_INTR when
+ * - IDE_STATUS[ERR] = 1
+ * - INT_STATUS[INTRQ] = 1
+ * - DMA_STATUS[IORACTA] = 1
+ */
+ if (in_be32((void __iomem *)IDE_ALTSTATUS_REG) & ERR_STAT &&
+ in_be32((void __iomem *)(hwif->dma_base + 0x014)) & INTSTS_INTRQ &&
+ dma_stat & 1)
+ return 1;
+
+ if (!drive->waiting_for_dma)
+ printk(KERN_WARNING "%s: (%s) called while not waiting\n",
+ drive->name, __FUNCTION__);
+ return 0;
+}
+
/**
* setup_mmio_scc - map CTRL/BMID region
* @dev: PCI device we are configuring
hwif->speedproc = scc_tune_chipset;
hwif->tuneproc = scc_tuneproc;
hwif->ide_dma_check = scc_config_drive_for_dma;
+ hwif->ide_dma_test_irq = scc_dma_test_irq;
hwif->drives[0].autotune = IDE_TUNE_AUTO;
hwif->drives[1].autotune = IDE_TUNE_AUTO;
module will be called aaed2000_kbd.
config KEYBOARD_GPIO
- tristate "Buttons on CPU GPIOs (PXA)"
- depends on (ARCH_SA1100 || ARCH_PXA || ARCH_S3C2410)
+ tristate "GPIO Buttons"
+ depends on GENERIC_GPIO
help
This driver implements support for buttons connected
- directly to GPIO pins of SA1100, PXA or S3C24xx CPUs.
+ to GPIO pins of various CPUs (and some other chips).
Say Y here if your device has buttons connected
- directly to GPIO pins of the CPU.
+ directly to such GPIO pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which GPIOs are used.
To compile this driver as a module, choose M here: the
module will be called gpio-keys.
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/irq.h>
+#include <linux/gpio_keys.h>
#include <asm/gpio.h>
-#include <asm/arch/hardware.h>
-
-#include <asm/hardware/gpio_keys.h>
static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
{
*/
param = 0x5a;
- if (i8042_command(¶m, I8042_CMD_AUX_LOOP) || param != 0x5a) {
+ retval = i8042_command(¶m, I8042_CMD_AUX_LOOP);
+ if (retval || param != 0x5a) {
/*
* External connection test - filters out AT-soldered PS/2 i8042's
(param && param != 0xfa && param != 0xff))
return -1;
- aux_loop_broken = 1;
+/*
+ * If AUX_LOOP completed without error but returned unexpected data
+ * mark it as broken
+ */
+ if (!retval)
+ aux_loop_broken = 1;
}
/*
# Config.in for the CAPI subsystem
#
config ISDN_DRV_AVMB1_VERBOSE_REASON
- bool "Verbose reason code reporting (kernel size +=7K)"
+ bool "Verbose reason code reporting"
depends on ISDN_CAPI
+ default y
help
- If you say Y here, the AVM B1 driver will give verbose reasons for
+ If you say Y here, the CAPI drivers will give verbose reasons for
disconnecting. This will increase the size of the kernel by 7 KB. If
unsure, say Y.
+config CAPI_TRACE
+ bool "CAPI trace support"
+ depends on ISDN_CAPI
+ default y
+ help
+ If you say Y here, the kernelcapi driver can make verbose traces
+ of CAPI messages. This feature can be enabled/disabled via IOCTL for
+ every controler (default disabled).
+ This will increase the size of the kernelcapi module by 20 KB.
+ If unsure, say Y.
+
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
depends on ISDN_CAPI && EXPERIMENTAL
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
capidrv_plci *plcip;
isdn_ctrl cmd;
+ _cdebbuf *cdb;
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
break;
}
}
- printk(KERN_ERR "capidrv-%d: %s\n",
- card->contrnr, capi_cmsg2str(cmsg));
+ cdb = capi_cmsg2str(cmsg);
+ if (cdb) {
+ printk(KERN_WARNING "capidrv-%d: %s\n",
+ card->contrnr, cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_WARNING "capidrv-%d: CAPI_INFO_IND InfoNumber %x not handled\n",
+ card->contrnr, cmsg->InfoNumber);
+
break;
case CAPI_CONNECT_ACTIVE_CONF: /* plci */
static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
{
capi_message2cmsg(&s_cmsg, skb->data);
- if (debugmode > 3)
- printk(KERN_DEBUG "capidrv_signal: applid=%d %s\n",
- ap->applid, capi_cmsg2str(&s_cmsg));
-
+ if (debugmode > 3) {
+ _cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
+
+ if (cdb) {
+ printk(KERN_DEBUG "%s: applid=%d %s\n", __FUNCTION__,
+ ap->applid, cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_DEBUG "%s: applid=%d %s not traced\n",
+ __FUNCTION__, ap->applid,
+ capi_cmd2str(s_cmsg.Command, s_cmsg.Subcommand));
+ }
if (s_cmsg.Command == CAPI_DATA_B3
&& s_cmsg.Subcommand == CAPI_IND) {
handle_data(&s_cmsg, skb);
/*-------------------------------------------------------*/
+
+#ifdef CONFIG_CAPI_TRACE
+
/*-------------------------------------------------------*/
static char *pnames[] =
};
-static char buf[8192];
-static char *p = NULL;
#include <stdarg.h>
/*-------------------------------------------------------*/
-static void bufprint(char *fmt,...)
+static _cdebbuf *bufprint(_cdebbuf *cdb, char *fmt,...)
{
va_list f;
+ size_t n,r;
+
+ if (!cdb)
+ return NULL;
va_start(f, fmt);
- vsprintf(p, fmt, f);
+ r = cdb->size - cdb->pos;
+ n = vsnprintf(cdb->p, r, fmt, f);
va_end(f);
- p += strlen(p);
+ if (n >= r) {
+ /* truncated, need bigger buffer */
+ size_t ns = 2 * cdb->size;
+ u_char *nb;
+
+ while ((ns - cdb->pos) <= n)
+ ns *= 2;
+ nb = kmalloc(ns, GFP_ATOMIC);
+ if (!nb) {
+ cdebbuf_free(cdb);
+ return NULL;
+ }
+ memcpy(nb, cdb->buf, cdb->pos);
+ kfree(cdb->buf);
+ nb[cdb->pos] = 0;
+ cdb->buf = nb;
+ cdb->p = cdb->buf + cdb->pos;
+ cdb->size = ns;
+ va_start(f, fmt);
+ r = cdb->size - cdb->pos;
+ n = vsnprintf(cdb->p, r, fmt, f);
+ va_end(f);
+ }
+ cdb->p += n;
+ cdb->pos += n;
+ return cdb;
}
-static void printstructlen(u8 * m, unsigned len)
+static _cdebbuf *printstructlen(_cdebbuf *cdb, u8 * m, unsigned len)
{
unsigned hex = 0;
+
+ if (!cdb)
+ return NULL;
for (; len; len--, m++)
if (isalnum(*m) || *m == ' ') {
if (hex)
- bufprint(">");
- bufprint("%c", *m);
+ cdb = bufprint(cdb, ">");
+ cdb = bufprint(cdb, "%c", *m);
hex = 0;
} else {
if (!hex)
- bufprint("<%02x", *m);
+ cdb = bufprint(cdb, "<%02x", *m);
else
- bufprint(" %02x", *m);
+ cdb = bufprint(cdb, " %02x", *m);
hex = 1;
}
if (hex)
- bufprint(">");
+ cdb = bufprint(cdb, ">");
+ return cdb;
}
-static void printstruct(u8 * m)
+static _cdebbuf *printstruct(_cdebbuf *cdb, u8 * m)
{
unsigned len;
+
if (m[0] != 0xff) {
len = m[0];
m += 1;
len = ((u16 *) (m + 1))[0];
m += 3;
}
- printstructlen(m, len);
+ cdb = printstructlen(cdb, m, len);
+ return cdb;
}
/*-------------------------------------------------------*/
#define NAME (pnames[cmsg->par[cmsg->p]])
-static void protocol_message_2_pars(_cmsg * cmsg, int level)
+static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level)
{
for (; TYP != _CEND; cmsg->p++) {
int slen = 29 + 3 - level;
int i;
- bufprint(" ");
+ if (!cdb)
+ return NULL;
+ cdb = bufprint(cdb, " ");
for (i = 0; i < level - 1; i++)
- bufprint(" ");
+ cdb = bufprint(cdb, " ");
switch (TYP) {
case _CBYTE:
- bufprint("%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l));
+ cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l));
cmsg->l++;
break;
case _CWORD:
- bufprint("%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l));
+ cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l));
cmsg->l += 2;
break;
case _CDWORD:
- bufprint("%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l));
+ cdb = bufprint(cdb, "%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l));
cmsg->l += 4;
break;
case _CSTRUCT:
- bufprint("%-*s = ", slen, NAME);
+ cdb = bufprint(cdb, "%-*s = ", slen, NAME);
if (cmsg->m[cmsg->l] == '\0')
- bufprint("default");
+ cdb = bufprint(cdb, "default");
else
- printstruct(cmsg->m + cmsg->l);
- bufprint("\n");
+ cdb = printstruct(cdb, cmsg->m + cmsg->l);
+ cdb = bufprint(cdb, "\n");
if (cmsg->m[cmsg->l] != 0xff)
cmsg->l += 1 + cmsg->m[cmsg->l];
else
case _CMSTRUCT:
/*----- Metastruktur 0 -----*/
if (cmsg->m[cmsg->l] == '\0') {
- bufprint("%-*s = default\n", slen, NAME);
+ cdb = bufprint(cdb, "%-*s = default\n", slen, NAME);
cmsg->l++;
jumpcstruct(cmsg);
} else {
char *name = NAME;
unsigned _l = cmsg->l;
- bufprint("%-*s\n", slen, name);
+ cdb = bufprint(cdb, "%-*s\n", slen, name);
cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1;
cmsg->p++;
- protocol_message_2_pars(cmsg, level + 1);
+ cdb = protocol_message_2_pars(cdb, cmsg, level + 1);
}
break;
}
}
+ return cdb;
}
/*-------------------------------------------------------*/
-char *capi_message2str(u8 * msg)
+
+static _cdebbuf *g_debbuf;
+static u_long g_debbuf_lock;
+static _cmsg *g_cmsg;
+
+_cdebbuf *cdebbuf_alloc(void)
{
+ _cdebbuf *cdb;
+
+ if (likely(!test_and_set_bit(1, &g_debbuf_lock))) {
+ cdb = g_debbuf;
+ goto init;
+ } else
+ cdb = kmalloc(sizeof(_cdebbuf), GFP_ATOMIC);
+ if (!cdb)
+ return NULL;
+ cdb->buf = kmalloc(CDEBUG_SIZE, GFP_ATOMIC);
+ if (!cdb->buf) {
+ kfree(cdb);
+ return NULL;
+ }
+ cdb->size = CDEBUG_SIZE;
+init:
+ cdb->buf[0] = 0;
+ cdb->p = cdb->buf;
+ cdb->pos = 0;
+ return cdb;
+}
- _cmsg cmsg;
- p = buf;
- p[0] = 0;
+void cdebbuf_free(_cdebbuf *cdb)
+{
+ if (likely(cdb == g_debbuf)) {
+ test_and_clear_bit(1, &g_debbuf_lock);
+ return;
+ }
+ if (likely(cdb))
+ kfree(cdb->buf);
+ kfree(cdb);
+}
- cmsg.m = msg;
- cmsg.l = 8;
- cmsg.p = 0;
- byteTRcpy(cmsg.m + 4, &cmsg.Command);
- byteTRcpy(cmsg.m + 5, &cmsg.Subcommand);
- cmsg.par = cpars[command_2_index(cmsg.Command, cmsg.Subcommand)];
- bufprint("%-26s ID=%03d #0x%04x LEN=%04d\n",
- mnames[command_2_index(cmsg.Command, cmsg.Subcommand)],
+_cdebbuf *capi_message2str(u8 * msg)
+{
+ _cdebbuf *cdb;
+ _cmsg *cmsg;
+
+ cdb = cdebbuf_alloc();
+ if (unlikely(!cdb))
+ return NULL;
+ if (likely(cdb == g_debbuf))
+ cmsg = g_cmsg;
+ else
+ cmsg = kmalloc(sizeof(_cmsg), GFP_ATOMIC);
+ if (unlikely(!cmsg)) {
+ cdebbuf_free(cdb);
+ return NULL;
+ }
+ cmsg->m = msg;
+ cmsg->l = 8;
+ cmsg->p = 0;
+ byteTRcpy(cmsg->m + 4, &cmsg->Command);
+ byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
+ cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)];
+
+ cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n",
+ mnames[command_2_index(cmsg->Command, cmsg->Subcommand)],
((unsigned short *) msg)[1],
((unsigned short *) msg)[3],
((unsigned short *) msg)[0]);
- protocol_message_2_pars(&cmsg, 1);
- return buf;
+ cdb = protocol_message_2_pars(cdb, cmsg, 1);
+ if (unlikely(cmsg != g_cmsg))
+ kfree(cmsg);
+ return cdb;
}
-char *capi_cmsg2str(_cmsg * cmsg)
+_cdebbuf *capi_cmsg2str(_cmsg * cmsg)
{
- p = buf;
- p[0] = 0;
+ _cdebbuf *cdb;
+
+ cdb = cdebbuf_alloc();
+ if (!cdb)
+ return NULL;
cmsg->l = 8;
cmsg->p = 0;
- bufprint("%s ID=%03d #0x%04x LEN=%04d\n",
+ cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n",
mnames[command_2_index(cmsg->Command, cmsg->Subcommand)],
((u16 *) cmsg->m)[1],
((u16 *) cmsg->m)[3],
((u16 *) cmsg->m)[0]);
- protocol_message_2_pars(cmsg, 1);
- return buf;
+ cdb = protocol_message_2_pars(cdb, cmsg, 1);
+ return cdb;
}
+int __init cdebug_init(void)
+{
+ g_cmsg= kmalloc(sizeof(_cmsg), GFP_KERNEL);
+ if (!g_cmsg)
+ return ENOMEM;
+ g_debbuf = kmalloc(sizeof(_cdebbuf), GFP_KERNEL);
+ if (!g_debbuf) {
+ kfree(g_cmsg);
+ return ENOMEM;
+ }
+ g_debbuf->buf = kmalloc(CDEBUG_GSIZE, GFP_KERNEL);
+ if (!g_debbuf->buf) {
+ kfree(g_cmsg);
+ kfree(g_debbuf);
+ return ENOMEM;;
+ }
+ g_debbuf->size = CDEBUG_GSIZE;
+ g_debbuf->buf[0] = 0;
+ g_debbuf->p = g_debbuf->buf;
+ g_debbuf->pos = 0;
+ return 0;
+}
+
+void __exit cdebug_exit(void)
+{
+ if (g_debbuf)
+ kfree(g_debbuf->buf);
+ kfree(g_debbuf);
+ kfree(g_cmsg);
+}
+
+#else /* !CONFIG_CAPI_TRACE */
+
+static _cdebbuf g_debbuf = {"CONFIG_CAPI_TRACE not enabled", NULL, 0, 0};
+
+_cdebbuf *capi_message2str(u8 * msg)
+{
+ return &g_debbuf;
+}
+
+_cdebbuf *capi_cmsg2str(_cmsg * cmsg)
+{
+ return &g_debbuf;
+}
+
+_cdebbuf *cdebbuf_alloc(void)
+{
+ return &g_debbuf;
+}
+
+void cdebbuf_free(_cdebbuf *cdb)
+{
+}
+
+int __init cdebug_init(void)
+{
+ return 0;
+}
+
+void __exit cdebug_exit(void)
+{
+}
+
+#endif
+
+EXPORT_SYMBOL(cdebbuf_alloc);
+EXPORT_SYMBOL(cdebbuf_free);
EXPORT_SYMBOL(capi_cmsg2message);
EXPORT_SYMBOL(capi_message2cmsg);
EXPORT_SYMBOL(capi_cmsg_header);
int showctl = 0;
u8 cmd, subcmd;
unsigned long flags;
+ _cdebbuf *cdb;
if (card->cardstate != CARD_RUNNING) {
- printk(KERN_INFO "kcapi: controller %d not active, got: %s",
- card->cnr, capi_message2str(skb->data));
+ cdb = capi_message2str(skb->data);
+ if (cdb) {
+ printk(KERN_INFO "kcapi: controller [%03d] not active, got: %s",
+ card->cnr, cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_INFO "kcapi: controller [%03d] not active, cannot trace\n",
+ card->cnr);
goto error;
}
showctl |= (card->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
- printk(KERN_DEBUG "kcapi: got [0x%lx] id#%d %s len=%u\n",
- (unsigned long) card->cnr,
- CAPIMSG_APPID(skb->data),
+ printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u\n",
+ card->cnr, CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
} else {
- printk(KERN_DEBUG "kcapi: got [0x%lx] %s\n",
- (unsigned long) card->cnr,
- capi_message2str(skb->data));
+ cdb = capi_message2str(skb->data);
+ if (cdb) {
+ printk(KERN_DEBUG "kcapi: got [%03d] %s\n",
+ card->cnr, cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u, cannot trace\n",
+ card->cnr, CAPIMSG_APPID(skb->data),
+ capi_cmd2str(cmd, subcmd),
+ CAPIMSG_LEN(skb->data));
}
}
ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data));
if ((!ap) || (ap->release_in_progress)) {
read_unlock_irqrestore(&application_lock, flags);
- printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n",
- CAPIMSG_APPID(skb->data), capi_message2str(skb->data));
+ cdb = capi_message2str(skb->data);
+ if (cdb) {
+ printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n",
+ CAPIMSG_APPID(skb->data), cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s) cannot trace\n",
+ CAPIMSG_APPID(skb->data),
+ capi_cmd2str(cmd, subcmd));
goto error;
}
skb_queue_tail(&ap->recv_queue, skb);
{
card->cardstate = CARD_RUNNING;
- printk(KERN_NOTICE "kcapi: card %d \"%s\" ready.\n",
+ printk(KERN_NOTICE "kcapi: card [%03d] \"%s\" ready.\n",
card->cnr, card->name);
notify_push(KCI_CONTRUP, card->cnr, 0, 0);
capi_ctr_put(card);
}
- printk(KERN_NOTICE "kcapi: card %d down.\n", card->cnr);
+ printk(KERN_NOTICE "kcapi: card [%03d] down.\n", card->cnr);
notify_push(KCI_CONTRDOWN, card->cnr, 0, 0);
}
void capi_ctr_suspend_output(struct capi_ctr *card)
{
if (!card->blocked) {
- printk(KERN_DEBUG "kcapi: card %d suspend\n", card->cnr);
+ printk(KERN_DEBUG "kcapi: card [%03d] suspend\n", card->cnr);
card->blocked = 1;
}
}
void capi_ctr_resume_output(struct capi_ctr *card)
{
if (card->blocked) {
- printk(KERN_DEBUG "kcapi: card %d resume\n", card->cnr);
+ printk(KERN_DEBUG "kcapi: card [%03d] resume\n", card->cnr);
card->blocked = 0;
}
}
}
ncards++;
- printk(KERN_NOTICE "kcapi: Controller %d: %s attached\n",
+ printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n",
card->cnr, card->name);
return 0;
}
card->procent = NULL;
}
capi_cards[card->cnr - 1] = NULL;
- printk(KERN_NOTICE "kcapi: Controller %d: %s unregistered\n",
+ printk(KERN_NOTICE "kcapi: Controller [%03d]: %s unregistered\n",
card->cnr, card->name);
return 0;
showctl |= (card->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
- printk(KERN_DEBUG "kcapi: put [%#x] id#%d %s len=%u\n",
+ printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u\n",
CAPIMSG_CONTROLLER(skb->data),
CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
} else {
- printk(KERN_DEBUG "kcapi: put [%#x] %s\n",
- CAPIMSG_CONTROLLER(skb->data),
- capi_message2str(skb->data));
+ _cdebbuf *cdb = capi_message2str(skb->data);
+ if (cdb) {
+ printk(KERN_DEBUG "kcapi: put [%03d] %s\n",
+ CAPIMSG_CONTROLLER(skb->data),
+ cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u cannot trace\n",
+ CAPIMSG_CONTROLLER(skb->data),
+ CAPIMSG_APPID(skb->data),
+ capi_cmd2str(cmd, subcmd),
+ CAPIMSG_LEN(skb->data));
}
-
}
return card->send_message(card, skb);
}
return -ESRCH;
card->traceflag = fdef.flag;
- printk(KERN_INFO "kcapi: contr %d set trace=%d\n",
+ printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n",
card->cnr, card->traceflag);
return 0;
}
{
char *p;
char rev[32];
+ int ret;
+ ret = cdebug_init();
+ if (ret)
+ return ret;
kcapi_proc_init();
if ((p = strchr(revision, ':')) != 0 && p[1]) {
/* make sure all notifiers are finished */
flush_scheduled_work();
+ cdebug_exit();
}
module_init(kcapi_init);
gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o asyncdata.o
usb_gigaset-y := usb-gigaset.o
-bas_gigaset-y := bas-gigaset.o isocdata.o
ser_gigaset-y := ser-gigaset.o
+bas_gigaset-y := bas-gigaset.o isocdata.o
-obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o
-obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o
-obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o gigaset.o
+obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset.o
+obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o
+obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o
+obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o
* =====================================================================
*/
-/* not set by Kbuild when building both ser_gigaset and usb_gigaset */
-#ifndef KBUILD_MODNAME
-#define KBUILD_MODNAME "asy_gigaset"
-#endif
-
#include "gigaset.h"
#include <linux/crc-ccitt.h>
#include <linux/bitrev.h>
#include "vmx.h"
#include <linux/kvm.h>
+#include <linux/kvm_para.h>
#define CR0_PE_MASK (1ULL << 0)
#define CR0_TS_MASK (1ULL << 3)
unsigned long cr0;
unsigned long cr2;
unsigned long cr3;
+ gpa_t para_state_gpa;
+ struct page *para_state_page;
+ gpa_t hypercall_gpa;
unsigned long cr4;
unsigned long cr8;
u64 pdptrs[4]; /* pae */
int busy;
unsigned long rmap_overflow;
struct list_head vm_list;
+ struct file *filp;
};
struct kvm_stat {
int (*vcpu_create)(struct kvm_vcpu *vcpu);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
- struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu);
+ void (*vcpu_load)(struct kvm_vcpu *vcpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*vcpu_decache)(struct kvm_vcpu *vcpu);
int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+ void (*patch_hypercall)(struct kvm_vcpu *vcpu,
+ unsigned char *hypercall_addr);
};
extern struct kvm_stat kvm_stat;
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code)
{
{
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
- return (struct kvm_mmu_page *)page->private;
+ return (struct kvm_mmu_page *)page_private(page);
}
static inline u16 read_fs(void)
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/magic.h>
#include <asm/processor.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <asm/desc.h>
#include <linux/sysdev.h>
#include <linux/cpu.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
#include "x86_emulate.h"
#include "segment_descriptor.h"
static struct dentry *debugfs_dir;
+struct vfsmount *kvmfs_mnt;
+
#define MAX_IO_MSRS 256
#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
#endif
+static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
+ unsigned long arg);
+
+static struct inode *kvmfs_inode(struct file_operations *fops)
+{
+ int error = -ENOMEM;
+ struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
+
+ if (!inode)
+ goto eexit_1;
+
+ inode->i_fop = fops;
+
+ /*
+ * Mark the inode dirty from the very beginning,
+ * that way it will never be moved to the dirty
+ * list because mark_inode_dirty() will think
+ * that it already _is_ on the dirty list.
+ */
+ inode->i_state = I_DIRTY;
+ inode->i_mode = S_IRUSR | S_IWUSR;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ return inode;
+
+eexit_1:
+ return ERR_PTR(error);
+}
+
+static struct file *kvmfs_file(struct inode *inode, void *private_data)
+{
+ struct file *file = get_empty_filp();
+
+ if (!file)
+ return ERR_PTR(-ENFILE);
+
+ file->f_path.mnt = mntget(kvmfs_mnt);
+ file->f_path.dentry = d_alloc_anon(inode);
+ if (!file->f_path.dentry)
+ return ERR_PTR(-ENOMEM);
+ file->f_mapping = inode->i_mapping;
+
+ file->f_pos = 0;
+ file->f_flags = O_RDWR;
+ file->f_op = inode->i_fop;
+ file->f_mode = FMODE_READ | FMODE_WRITE;
+ file->f_version = 0;
+ file->private_data = private_data;
+ return file;
+}
+
unsigned long segment_base(u16 selector)
{
struct descriptor_table gdt;
return likely(n >= 0 && n < KVM_MAX_VCPUS);
}
-int kvm_read_guest(struct kvm_vcpu *vcpu,
- gva_t addr,
- unsigned long size,
- void *dest)
+int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
+ void *dest)
{
unsigned char *host_buf = dest;
unsigned long req_size = size;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);
-int kvm_write_guest(struct kvm_vcpu *vcpu,
- gva_t addr,
- unsigned long size,
- void *data)
+int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
+ void *data)
{
unsigned char *host_buf = data;
unsigned long req_size = size;
unsigned now;
unsigned offset;
hva_t guest_buf;
+ gfn_t gfn;
paddr = gva_to_hpa(vcpu, addr);
if (is_error_hpa(paddr))
break;
+ gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
+ mark_page_dirty(vcpu->kvm, gfn);
guest_buf = (hva_t)kmap_atomic(
pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
offset = addr & ~PAGE_MASK;
}
EXPORT_SYMBOL_GPL(kvm_write_guest);
-static int vcpu_slot(struct kvm_vcpu *vcpu)
+/*
+ * Switches to specified vcpu, until a matching vcpu_put()
+ */
+static void vcpu_load(struct kvm_vcpu *vcpu)
{
- return vcpu - vcpu->kvm->vcpus;
+ mutex_lock(&vcpu->mutex);
+ kvm_arch_ops->vcpu_load(vcpu);
}
/*
- * Switches to specified vcpu, until a matching vcpu_put()
+ * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL
+ * if the slot is not populated.
*/
-static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
+static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot)
{
- struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot];
+ struct kvm_vcpu *vcpu = &kvm->vcpus[slot];
mutex_lock(&vcpu->mutex);
- if (unlikely(!vcpu->vmcs)) {
+ if (!vcpu->vmcs) {
mutex_unlock(&vcpu->mutex);
return NULL;
}
- return kvm_arch_ops->vcpu_load(vcpu);
+ kvm_arch_ops->vcpu_load(vcpu);
+ return vcpu;
}
static void vcpu_put(struct kvm_vcpu *vcpu)
mutex_unlock(&vcpu->mutex);
}
-static int kvm_dev_open(struct inode *inode, struct file *filp)
+static struct kvm *kvm_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
int i;
if (!kvm)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&kvm->lock);
INIT_LIST_HEAD(&kvm->active_mmu_pages);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
}
- filp->private_data = kvm;
+ return kvm;
+}
+
+static int kvm_dev_open(struct inode *inode, struct file *filp)
+{
return 0;
}
static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
{
- if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu)))
+ if (!vcpu->vmcs)
return;
+ vcpu_load(vcpu);
kvm_mmu_destroy(vcpu);
vcpu_put(vcpu);
kvm_arch_ops->vcpu_free(vcpu);
static int kvm_dev_release(struct inode *inode, struct file *filp)
{
- struct kvm *kvm = filp->private_data;
+ return 0;
+}
+static void kvm_destroy_vm(struct kvm *kvm)
+{
spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_vcpus(kvm);
kvm_free_physmem(kvm);
kfree(kvm);
+}
+
+static int kvm_vm_release(struct inode *inode, struct file *filp)
+{
+ struct kvm *kvm = filp->private_data;
+
+ kvm_destroy_vm(kvm);
return 0;
}
void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (is_long_mode(vcpu)) {
- if ( cr3 & CR3_L_MODE_RESEVED_BITS) {
+ if (cr3 & CR3_L_MODE_RESEVED_BITS) {
printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
inject_gp(vcpu);
return;
}
EXPORT_SYMBOL_GPL(fx_init);
-/*
- * Creates some virtual cpus. Good luck creating more than one.
- */
-static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
+static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
{
- int r;
- struct kvm_vcpu *vcpu;
-
- r = -EINVAL;
- if (!valid_vcpu(n))
- goto out;
-
- vcpu = &kvm->vcpus[n];
-
- mutex_lock(&vcpu->mutex);
-
- if (vcpu->vmcs) {
- mutex_unlock(&vcpu->mutex);
- return -EEXIST;
- }
-
- vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
- FX_IMAGE_ALIGN);
- vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
-
- r = kvm_arch_ops->vcpu_create(vcpu);
- if (r < 0)
- goto out_free_vcpus;
-
- r = kvm_mmu_create(vcpu);
- if (r < 0)
- goto out_free_vcpus;
-
- kvm_arch_ops->vcpu_load(vcpu);
- r = kvm_mmu_setup(vcpu);
- if (r >= 0)
- r = kvm_arch_ops->vcpu_setup(vcpu);
- vcpu_put(vcpu);
-
- if (r < 0)
- goto out_free_vcpus;
-
- return 0;
-
-out_free_vcpus:
- kvm_free_vcpu(vcpu);
- mutex_unlock(&vcpu->mutex);
-out:
- return r;
+ spin_lock(&vcpu->kvm->lock);
+ kvm_mmu_slot_remove_write_access(vcpu, slot);
+ spin_unlock(&vcpu->kvm->lock);
}
/*
*
* Discontiguous memory is allowed, mostly for framebuffers.
*/
-static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
- struct kvm_memory_region *mem)
+static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
+ struct kvm_memory_region *mem)
{
int r;
gfn_t base_gfn;
| __GFP_ZERO);
if (!new.phys_mem[i])
goto out_free;
- new.phys_mem[i]->private = 0;
+ set_page_private(new.phys_mem[i],0);
}
}
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
struct kvm_vcpu *vcpu;
- vcpu = vcpu_load(kvm, i);
+ vcpu = vcpu_load_slot(kvm, i);
if (!vcpu)
continue;
+ if (new.flags & KVM_MEM_LOG_DIRTY_PAGES)
+ do_remove_write_access(vcpu, mem->slot);
kvm_mmu_reset_context(vcpu);
vcpu_put(vcpu);
}
return r;
}
-static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
-{
- spin_lock(&vcpu->kvm->lock);
- kvm_mmu_slot_remove_write_access(vcpu, slot);
- spin_unlock(&vcpu->kvm->lock);
-}
-
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
-static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
{
struct kvm_memory_slot *memslot;
int r, i;
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, 8) / 8;
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
- for (i = 0; !any && i < n; ++i)
+ for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i];
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
goto out;
-
if (any) {
cleared = 0;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
+ struct kvm_vcpu *vcpu;
+ vcpu = vcpu_load_slot(kvm, i);
if (!vcpu)
continue;
if (!cleared) {
return X86EMUL_CONTINUE;
else {
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+
if (gpa == UNMAPPED_GVA)
- return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT;
+ return X86EMUL_PROPAGATE_FAULT;
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
vcpu->mmio_size = bytes;
return 0;
page = gfn_to_page(m, gpa >> PAGE_SHIFT);
kvm_mmu_pre_write(vcpu, gpa, bytes);
+ mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0);
memcpy(virt + offset_in_page(gpa), &val, bytes);
kunmap_atomic(virt, KM_USER0);
}
EXPORT_SYMBOL_GPL(emulate_instruction);
+int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
+
+ kvm_arch_ops->decache_regs(vcpu);
+ ret = -KVM_EINVAL;
+#ifdef CONFIG_X86_64
+ if (is_long_mode(vcpu)) {
+ nr = vcpu->regs[VCPU_REGS_RAX];
+ a0 = vcpu->regs[VCPU_REGS_RDI];
+ a1 = vcpu->regs[VCPU_REGS_RSI];
+ a2 = vcpu->regs[VCPU_REGS_RDX];
+ a3 = vcpu->regs[VCPU_REGS_RCX];
+ a4 = vcpu->regs[VCPU_REGS_R8];
+ a5 = vcpu->regs[VCPU_REGS_R9];
+ } else
+#endif
+ {
+ nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
+ a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
+ a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
+ a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
+ a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
+ a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
+ a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
+ }
+ switch (nr) {
+ default:
+ ;
+ }
+ vcpu->regs[VCPU_REGS_RAX] = ret;
+ kvm_arch_ops->cache_regs(vcpu);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(kvm_hypercall);
+
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
{
return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
}
}
+/*
+ * Register the para guest with the host:
+ */
+static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
+{
+ struct kvm_vcpu_para_state *para_state;
+ hpa_t para_state_hpa, hypercall_hpa;
+ struct page *para_state_page;
+ unsigned char *hypercall;
+ gpa_t hypercall_gpa;
+
+ printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
+ printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
+
+ /*
+ * Needs to be page aligned:
+ */
+ if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
+ goto err_gp;
+
+ para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
+ printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
+ if (is_error_hpa(para_state_hpa))
+ goto err_gp;
+
+ mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
+ para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
+ para_state = kmap_atomic(para_state_page, KM_USER0);
+
+ printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
+ printk(KERN_DEBUG ".... size: %d\n", para_state->size);
+
+ para_state->host_version = KVM_PARA_API_VERSION;
+ /*
+ * We cannot support guests that try to register themselves
+ * with a newer API version than the host supports:
+ */
+ if (para_state->guest_version > KVM_PARA_API_VERSION) {
+ para_state->ret = -KVM_EINVAL;
+ goto err_kunmap_skip;
+ }
+
+ hypercall_gpa = para_state->hypercall_gpa;
+ hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
+ printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
+ if (is_error_hpa(hypercall_hpa)) {
+ para_state->ret = -KVM_EINVAL;
+ goto err_kunmap_skip;
+ }
+
+ printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
+ vcpu->para_state_page = para_state_page;
+ vcpu->para_state_gpa = para_state_gpa;
+ vcpu->hypercall_gpa = hypercall_gpa;
+
+ mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
+ hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
+ KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
+ kvm_arch_ops->patch_hypercall(vcpu, hypercall);
+ kunmap_atomic(hypercall, KM_USER1);
+
+ para_state->ret = 0;
+err_kunmap_skip:
+ kunmap_atomic(para_state, KM_USER0);
+ return 0;
+err_gp:
+ return 1;
+}
+
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data;
case MSR_IA32_MISC_ENABLE:
vcpu->ia32_misc_enable_msr = data;
break;
+ /*
+ * This is the 'probe whether the host is KVM' logic:
+ */
+ case MSR_KVM_API_MAGIC:
+ return vcpu_register_para(vcpu, data);
+
default:
printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
return 1;
{
vcpu_put(vcpu);
cond_resched();
- /* Cannot fail - no vcpu unplug yet. */
- vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
+ vcpu_load(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_resched);
}
EXPORT_SYMBOL_GPL(save_msrs);
-static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
+static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- struct kvm_vcpu *vcpu;
int r;
- if (!valid_vcpu(kvm_run->vcpu))
- return -EINVAL;
-
- vcpu = vcpu_load(kvm, kvm_run->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
/* re-sync apic's tpr */
vcpu->cr8 = kvm_run->cr8;
return r;
}
-static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
+static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
+ struct kvm_regs *regs)
{
- struct kvm_vcpu *vcpu;
-
- if (!valid_vcpu(regs->vcpu))
- return -EINVAL;
-
- vcpu = vcpu_load(kvm, regs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
kvm_arch_ops->cache_regs(vcpu);
return 0;
}
-static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
+static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
+ struct kvm_regs *regs)
{
- struct kvm_vcpu *vcpu;
-
- if (!valid_vcpu(regs->vcpu))
- return -EINVAL;
-
- vcpu = vcpu_load(kvm, regs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
vcpu->regs[VCPU_REGS_RAX] = regs->rax;
vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
return kvm_arch_ops->get_segment(vcpu, var, seg);
}
-static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
+static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
- struct kvm_vcpu *vcpu;
struct descriptor_table dt;
- if (!valid_vcpu(sregs->vcpu))
- return -EINVAL;
- vcpu = vcpu_load(kvm, sregs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
return kvm_arch_ops->set_segment(vcpu, var, seg);
}
-static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
+static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
- struct kvm_vcpu *vcpu;
int mmu_reset_needed = 0;
int i;
struct descriptor_table dt;
- if (!valid_vcpu(sregs->vcpu))
- return -EINVAL;
- vcpu = vcpu_load(kvm, sregs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
*
* @return number of msrs set successfully.
*/
-static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
+static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
struct kvm_msr_entry *entries,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
- struct kvm_vcpu *vcpu;
int i;
- if (!valid_vcpu(msrs->vcpu))
- return -EINVAL;
-
- vcpu = vcpu_load(kvm, msrs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
*
* @return number of msrs set successfully.
*/
-static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
+static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data),
int writeback)
if (copy_from_user(entries, user_msrs->entries, size))
goto out_free;
- r = n = __msr_io(kvm, &msrs, entries, do_msr);
+ r = n = __msr_io(vcpu, &msrs, entries, do_msr);
if (r < 0)
goto out_free;
/*
* Translate a guest virtual address to a guest physical address.
*/
-static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr)
+static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
{
unsigned long vaddr = tr->linear_address;
- struct kvm_vcpu *vcpu;
gpa_t gpa;
- vcpu = vcpu_load(kvm, tr->vcpu);
- if (!vcpu)
- return -ENOENT;
- spin_lock(&kvm->lock);
+ vcpu_load(vcpu);
+ spin_lock(&vcpu->kvm->lock);
gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
tr->usermode = 0;
- spin_unlock(&kvm->lock);
+ spin_unlock(&vcpu->kvm->lock);
vcpu_put(vcpu);
return 0;
}
-static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
+static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
{
- struct kvm_vcpu *vcpu;
-
- if (!valid_vcpu(irq->vcpu))
- return -EINVAL;
if (irq->irq < 0 || irq->irq >= 256)
return -EINVAL;
- vcpu = vcpu_load(kvm, irq->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
set_bit(irq->irq, vcpu->irq_pending);
set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
return 0;
}
-static int kvm_dev_ioctl_debug_guest(struct kvm *kvm,
- struct kvm_debug_guest *dbg)
+static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
{
- struct kvm_vcpu *vcpu;
int r;
- if (!valid_vcpu(dbg->vcpu))
- return -EINVAL;
- vcpu = vcpu_load(kvm, dbg->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
return r;
}
-static long kvm_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
- struct kvm *kvm = filp->private_data;
+ struct kvm_vcpu *vcpu = filp->private_data;
+
+ fput(vcpu->kvm->filp);
+ return 0;
+}
+
+static struct file_operations kvm_vcpu_fops = {
+ .release = kvm_vcpu_release,
+ .unlocked_ioctl = kvm_vcpu_ioctl,
+ .compat_ioctl = kvm_vcpu_ioctl,
+};
+
+/*
+ * Allocates an inode for the vcpu.
+ */
+static int create_vcpu_fd(struct kvm_vcpu *vcpu)
+{
+ int fd, r;
+ struct inode *inode;
+ struct file *file;
+
+ atomic_inc(&vcpu->kvm->filp->f_count);
+ inode = kvmfs_inode(&kvm_vcpu_fops);
+ if (IS_ERR(inode)) {
+ r = PTR_ERR(inode);
+ goto out1;
+ }
+
+ file = kvmfs_file(inode, vcpu);
+ if (IS_ERR(file)) {
+ r = PTR_ERR(file);
+ goto out2;
+ }
+
+ r = get_unused_fd();
+ if (r < 0)
+ goto out3;
+ fd = r;
+ fd_install(fd, file);
+
+ return fd;
+
+out3:
+ fput(file);
+out2:
+ iput(inode);
+out1:
+ fput(vcpu->kvm->filp);
+ return r;
+}
+
+/*
+ * Creates some virtual cpus. Good luck creating more than one.
+ */
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+{
+ int r;
+ struct kvm_vcpu *vcpu;
+
+ r = -EINVAL;
+ if (!valid_vcpu(n))
+ goto out;
+
+ vcpu = &kvm->vcpus[n];
+
+ mutex_lock(&vcpu->mutex);
+
+ if (vcpu->vmcs) {
+ mutex_unlock(&vcpu->mutex);
+ return -EEXIST;
+ }
+
+ vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
+ FX_IMAGE_ALIGN);
+ vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
+
+ r = kvm_arch_ops->vcpu_create(vcpu);
+ if (r < 0)
+ goto out_free_vcpus;
+
+ r = kvm_mmu_create(vcpu);
+ if (r < 0)
+ goto out_free_vcpus;
+
+ kvm_arch_ops->vcpu_load(vcpu);
+ r = kvm_mmu_setup(vcpu);
+ if (r >= 0)
+ r = kvm_arch_ops->vcpu_setup(vcpu);
+ vcpu_put(vcpu);
+
+ if (r < 0)
+ goto out_free_vcpus;
+
+ r = create_vcpu_fd(vcpu);
+ if (r < 0)
+ goto out_free_vcpus;
+
+ return r;
+
+out_free_vcpus:
+ kvm_free_vcpu(vcpu);
+ mutex_unlock(&vcpu->mutex);
+out:
+ return r;
+}
+
+static long kvm_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r = -EINVAL;
switch (ioctl) {
- case KVM_GET_API_VERSION:
- r = KVM_API_VERSION;
- break;
- case KVM_CREATE_VCPU: {
- r = kvm_dev_ioctl_create_vcpu(kvm, arg);
- if (r)
- goto out;
- break;
- }
case KVM_RUN: {
struct kvm_run kvm_run;
r = -EFAULT;
if (copy_from_user(&kvm_run, argp, sizeof kvm_run))
goto out;
- r = kvm_dev_ioctl_run(kvm, &kvm_run);
+ r = kvm_vcpu_ioctl_run(vcpu, &kvm_run);
if (r < 0 && r != -EINTR)
goto out;
if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) {
case KVM_GET_REGS: {
struct kvm_regs kvm_regs;
- r = -EFAULT;
- if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
- goto out;
- r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
+ memset(&kvm_regs, 0, sizeof kvm_regs);
+ r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
if (r)
goto out;
r = -EFAULT;
r = -EFAULT;
if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
goto out;
- r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs);
+ r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
if (r)
goto out;
r = 0;
case KVM_GET_SREGS: {
struct kvm_sregs kvm_sregs;
- r = -EFAULT;
- if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
- goto out;
- r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
+ memset(&kvm_sregs, 0, sizeof kvm_sregs);
+ r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
if (r)
goto out;
r = -EFAULT;
r = -EFAULT;
if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
goto out;
- r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs);
+ r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
if (r)
goto out;
r = 0;
r = -EFAULT;
if (copy_from_user(&tr, argp, sizeof tr))
goto out;
- r = kvm_dev_ioctl_translate(kvm, &tr);
+ r = kvm_vcpu_ioctl_translate(vcpu, &tr);
if (r)
goto out;
r = -EFAULT;
r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof irq))
goto out;
- r = kvm_dev_ioctl_interrupt(kvm, &irq);
+ r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
if (r)
goto out;
r = 0;
r = -EFAULT;
if (copy_from_user(&dbg, argp, sizeof dbg))
goto out;
- r = kvm_dev_ioctl_debug_guest(kvm, &dbg);
+ r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
if (r)
goto out;
r = 0;
break;
}
+ case KVM_GET_MSRS:
+ r = msr_io(vcpu, argp, get_msr, 1);
+ break;
+ case KVM_SET_MSRS:
+ r = msr_io(vcpu, argp, do_set_msr, 0);
+ break;
+ default:
+ ;
+ }
+out:
+ return r;
+}
+
+static long kvm_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm *kvm = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ int r = -EINVAL;
+
+ switch (ioctl) {
+ case KVM_CREATE_VCPU:
+ r = kvm_vm_ioctl_create_vcpu(kvm, arg);
+ if (r < 0)
+ goto out;
+ break;
case KVM_SET_MEMORY_REGION: {
struct kvm_memory_region kvm_mem;
r = -EFAULT;
if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
goto out;
- r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem);
+ r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
if (r)
goto out;
break;
r = -EFAULT;
if (copy_from_user(&log, argp, sizeof log))
goto out;
- r = kvm_dev_ioctl_get_dirty_log(kvm, &log);
+ r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
if (r)
goto out;
break;
}
- case KVM_GET_MSRS:
- r = msr_io(kvm, argp, get_msr, 1);
+ default:
+ ;
+ }
+out:
+ return r;
+}
+
+static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type)
+{
+ struct kvm *kvm = vma->vm_file->private_data;
+ unsigned long pgoff;
+ struct kvm_memory_slot *slot;
+ struct page *page;
+
+ *type = VM_FAULT_MINOR;
+ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ slot = gfn_to_memslot(kvm, pgoff);
+ if (!slot)
+ return NOPAGE_SIGBUS;
+ page = gfn_to_page(slot, pgoff);
+ if (!page)
+ return NOPAGE_SIGBUS;
+ get_page(page);
+ return page;
+}
+
+static struct vm_operations_struct kvm_vm_vm_ops = {
+ .nopage = kvm_vm_nopage,
+};
+
+static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &kvm_vm_vm_ops;
+ return 0;
+}
+
+static struct file_operations kvm_vm_fops = {
+ .release = kvm_vm_release,
+ .unlocked_ioctl = kvm_vm_ioctl,
+ .compat_ioctl = kvm_vm_ioctl,
+ .mmap = kvm_vm_mmap,
+};
+
+static int kvm_dev_ioctl_create_vm(void)
+{
+ int fd, r;
+ struct inode *inode;
+ struct file *file;
+ struct kvm *kvm;
+
+ inode = kvmfs_inode(&kvm_vm_fops);
+ if (IS_ERR(inode)) {
+ r = PTR_ERR(inode);
+ goto out1;
+ }
+
+ kvm = kvm_create_vm();
+ if (IS_ERR(kvm)) {
+ r = PTR_ERR(kvm);
+ goto out2;
+ }
+
+ file = kvmfs_file(inode, kvm);
+ if (IS_ERR(file)) {
+ r = PTR_ERR(file);
+ goto out3;
+ }
+ kvm->filp = file;
+
+ r = get_unused_fd();
+ if (r < 0)
+ goto out4;
+ fd = r;
+ fd_install(fd, file);
+
+ return fd;
+
+out4:
+ fput(file);
+out3:
+ kvm_destroy_vm(kvm);
+out2:
+ iput(inode);
+out1:
+ return r;
+}
+
+static long kvm_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int r = -EINVAL;
+
+ switch (ioctl) {
+ case KVM_GET_API_VERSION:
+ r = KVM_API_VERSION;
break;
- case KVM_SET_MSRS:
- r = msr_io(kvm, argp, do_set_msr, 0);
+ case KVM_CREATE_VM:
+ r = kvm_dev_ioctl_create_vm();
break;
case KVM_GET_MSR_INDEX_LIST: {
struct kvm_msr_list __user *user_msr_list = argp;
return r;
}
-static struct page *kvm_dev_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type)
-{
- struct kvm *kvm = vma->vm_file->private_data;
- unsigned long pgoff;
- struct kvm_memory_slot *slot;
- struct page *page;
-
- *type = VM_FAULT_MINOR;
- pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- slot = gfn_to_memslot(kvm, pgoff);
- if (!slot)
- return NOPAGE_SIGBUS;
- page = gfn_to_page(slot, pgoff);
- if (!page)
- return NOPAGE_SIGBUS;
- get_page(page);
- return page;
-}
-
-static struct vm_operations_struct kvm_dev_vm_ops = {
- .nopage = kvm_dev_nopage,
-};
-
-static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma)
-{
- vma->vm_ops = &kvm_dev_vm_ops;
- return 0;
-}
-
static struct file_operations kvm_chardev_ops = {
.open = kvm_dev_open,
.release = kvm_dev_release,
.unlocked_ioctl = kvm_dev_ioctl,
.compat_ioctl = kvm_dev_ioctl,
- .mmap = kvm_dev_mmap,
};
static struct miscdevice kvm_dev = {
int cpu = (long)v;
switch (val) {
- case CPU_DEAD:
+ case CPU_DOWN_PREPARE:
case CPU_UP_CANCELED:
+ printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
+ cpu);
decache_vcpus_on_cpu(cpu);
smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
NULL, 0, 1);
break;
- case CPU_UP_PREPARE:
+ case CPU_ONLINE:
+ printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
+ cpu);
smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
NULL, 0, 1);
break;
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
decache_vcpus_on_cpu(raw_smp_processor_id());
- on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
+ on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
return 0;
}
static int kvm_resume(struct sys_device *dev)
{
- on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1);
+ on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
return 0;
}
hpa_t bad_page_address;
+static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
+}
+
+static struct file_system_type kvm_fs_type = {
+ .name = "kvmfs",
+ .get_sb = kvmfs_get_sb,
+ .kill_sb = kill_anon_super,
+};
+
int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
{
int r;
static __init int kvm_init(void)
{
static struct page *bad_page;
- int r = 0;
+ int r;
+
+ r = register_filesystem(&kvm_fs_type);
+ if (r)
+ goto out3;
+ kvmfs_mnt = kern_mount(&kvm_fs_type);
+ r = PTR_ERR(kvmfs_mnt);
+ if (IS_ERR(kvmfs_mnt))
+ goto out2;
kvm_init_debug();
kvm_init_msr_list();
bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
memset(__va(bad_page_address), 0, PAGE_SIZE);
- return r;
+ return 0;
out:
kvm_exit_debug();
+ mntput(kvmfs_mnt);
+out2:
+ unregister_filesystem(&kvm_fs_type);
+out3:
return r;
}
{
kvm_exit_debug();
__free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
+ mntput(kvmfs_mnt);
+ unregister_filesystem(&kvm_fs_type);
}
module_init(kvm_init)
#ifndef __KVM_SVM_H
#define __KVM_SVM_H
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/list.h>
#include <asm/msr.h>
MSR_IA32_LASTBRANCHTOIP, MSR_IA32_LASTINTFROMIP,MSR_IA32_LASTINTTOIP,*/
};
-#define NR_HOST_SAVE_MSRS (sizeof(host_save_msrs) / sizeof(*host_save_msrs))
+#define NR_HOST_SAVE_MSRS ARRAY_SIZE(host_save_msrs)
#define NUM_DB_REGS 4
struct vcpu_svm {
if (!is_rmap_pte(*spte))
return;
page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
- if (!page->private) {
+ if (!page_private(page)) {
rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
- page->private = (unsigned long)spte;
- } else if (!(page->private & 1)) {
+ set_page_private(page,(unsigned long)spte);
+ } else if (!(page_private(page) & 1)) {
rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_rmap_desc(vcpu);
- desc->shadow_ptes[0] = (u64 *)page->private;
+ desc->shadow_ptes[0] = (u64 *)page_private(page);
desc->shadow_ptes[1] = spte;
- page->private = (unsigned long)desc | 1;
+ set_page_private(page,(unsigned long)desc | 1);
} else {
rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
- desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+ desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
desc = desc->more;
if (desc->shadow_ptes[RMAP_EXT-1]) {
if (j != 0)
return;
if (!prev_desc && !desc->more)
- page->private = (unsigned long)desc->shadow_ptes[0];
+ set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
else
if (prev_desc)
prev_desc->more = desc->more;
else
- page->private = (unsigned long)desc->more | 1;
+ set_page_private(page,(unsigned long)desc->more | 1);
mmu_free_rmap_desc(vcpu, desc);
}
if (!is_rmap_pte(*spte))
return;
page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
- if (!page->private) {
+ if (!page_private(page)) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
BUG();
- } else if (!(page->private & 1)) {
+ } else if (!(page_private(page) & 1)) {
rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
- if ((u64 *)page->private != spte) {
+ if ((u64 *)page_private(page) != spte) {
printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
spte, *spte);
BUG();
}
- page->private = 0;
+ set_page_private(page,0);
} else {
rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
- desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+ desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
prev_desc = NULL;
while (desc) {
for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
BUG_ON(!slot);
page = gfn_to_page(slot, gfn);
- while (page->private) {
- if (!(page->private & 1))
- spte = (u64 *)page->private;
+ while (page_private(page)) {
+ if (!(page_private(page) & 1))
+ spte = (u64 *)page_private(page);
else {
- desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+ desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
spte = desc->shadow_ptes[0];
}
BUG_ON(!spte);
INIT_LIST_HEAD(&page_header->link);
if ((page = alloc_page(GFP_KERNEL)) == NULL)
goto error_1;
- page->private = (unsigned long)page_header;
+ set_page_private(page, (unsigned long)page_header);
page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
list_add(&page_header->link, &vcpu->free_pages);
goto access_error;
#endif
- if (!(*ptep & PT_ACCESSED_MASK))
- *ptep |= PT_ACCESSED_MASK; /* avoid rmw */
+ if (!(*ptep & PT_ACCESSED_MASK)) {
+ mark_page_dirty(vcpu->kvm, table_gfn);
+ *ptep |= PT_ACCESSED_MASK;
+ }
if (walker->level == PT_PAGE_TABLE_LEVEL) {
walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
kunmap_atomic(walker->table, KM_USER0);
}
+static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
+ struct guest_walker *walker)
+{
+ mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
+}
+
static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
u64 *shadow_pte, u64 access_bits, gfn_t gfn)
{
} else if (kvm_mmu_lookup_page(vcpu, gfn)) {
pgprintk("%s: found shadow page for %lx, marking ro\n",
__FUNCTION__, gfn);
+ mark_page_dirty(vcpu->kvm, gfn);
+ FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
*guest_ent |= PT_DIRTY_MASK;
*write_pt = 1;
return 0;
}
mark_page_dirty(vcpu->kvm, gfn);
*shadow_ent |= PT_WRITABLE_MASK;
+ FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
*guest_ent |= PT_DIRTY_MASK;
rmap_add(vcpu, shadow_ent);
/*
* mmio: emulate if accessible, otherwise its a guest fault.
*/
- if (is_io_pte(*shadow_pte)) {
+ if (is_io_pte(*shadow_pte))
return 1;
- }
++kvm_stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
*/
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/profile.h>
static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
-#define NUM_MSR_MAPS (sizeof(msrpm_ranges) / sizeof(*msrpm_ranges))
+#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
+ (1ULL << INTERCEPT_SMI) |
/*
* selective cr0 intercept bug?
* 0: 0f 22 d8 mov %eax,%cr3
* cr0 val on cpu init should be 0x60000010, we enable cpu
* cache by default. the orderly way is to enable cache in bios.
*/
- save->cr0 = 0x00000010 | CR0_PG_MASK;
+ save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK;
save->cr4 = CR4_PAE_MASK;
/* rdx = ?? */
}
kfree(vcpu->svm);
}
-static struct kvm_vcpu *svm_vcpu_load(struct kvm_vcpu *vcpu)
+static void svm_vcpu_load(struct kvm_vcpu *vcpu)
{
get_cpu();
- return vcpu;
}
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
addr_mask = io_adress(vcpu, _in, &kvm_run->io.address);
if (!addr_mask) {
- printk(KERN_DEBUG "%s: get io address failed\n", __FUNCTION__);
+ printk(KERN_DEBUG "%s: get io address failed\n",
+ __FUNCTION__);
return 1;
}
if (kvm_run->io.rep) {
- kvm_run->io.count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
+ kvm_run->io.count
+ = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags
& X86_EFLAGS_DF) != 0;
}
- } else {
+ } else
kvm_run->io.value = vcpu->svm->vmcb->save.rax;
- }
return 0;
}
-
static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
return 1;
return 0;
}
+static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ vcpu->svm->vmcb->save.rip += 3;
+ return kvm_hypercall(vcpu, kvm_run);
+}
+
static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
inject_ud(vcpu);
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
[SVM_EXIT_VMRUN] = invalid_op_interception,
- [SVM_EXIT_VMMCALL] = invalid_op_interception,
+ [SVM_EXIT_VMMCALL] = vmmcall_interception,
[SVM_EXIT_VMLOAD] = invalid_op_interception,
[SVM_EXIT_VMSAVE] = invalid_op_interception,
[SVM_EXIT_STGI] = invalid_op_interception,
__FUNCTION__, vcpu->svm->vmcb->control.exit_int_info,
exit_code);
- if (exit_code >= sizeof(svm_exit_handlers) / sizeof(*svm_exit_handlers)
+ if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| svm_exit_handlers[exit_code] == 0) {
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n",
return 0;
}
+static void
+svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+{
+ /*
+ * Patch in the VMMCALL instruction:
+ */
+ hypercall[0] = 0x0f;
+ hypercall[1] = 0x01;
+ hypercall[2] = 0xd9;
+ hypercall[3] = 0xc3;
+}
+
static struct kvm_arch_ops svm_arch_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.run = svm_vcpu_run,
.skip_emulated_instruction = skip_emulated_instruction,
.vcpu_setup = svm_vcpu_setup,
+ .patch_hypercall = svm_patch_hypercall,
};
static int __init svm_init(void)
#include "vmx.h"
#include "kvm_vmx.h"
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/profile.h>
#include "segment_descriptor.h"
-
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
#endif
MSR_EFER, MSR_K6_STAR,
};
-#define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index))
+#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
static inline int is_page_fault(u32 intr_info)
{
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
*/
-static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu)
+static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
{
u64 phys_addr = __pa(vcpu->vmcs);
int cpu;
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
}
- return vcpu;
}
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
case MSR_IA32_SYSENTER_ESP:
vmcs_write32(GUEST_SYSENTER_ESP, data);
break;
- case MSR_IA32_TIME_STAMP_COUNTER: {
+ case MSR_IA32_TIME_STAMP_COUNTER:
guest_write_tsc(data);
break;
- }
default:
msr = find_msr_entry(vcpu, msr_index);
if (msr) {
*/
static void vmx_set_cr0_no_modeswitch(struct kvm_vcpu *vcpu, unsigned long cr0)
{
+ if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
+ enter_rmode(vcpu);
+
vcpu->rmode.active = ((cr0 & CR0_PE_MASK) == 0);
update_exception_bitmap(vcpu);
vmcs_writel(CR0_READ_SHADOW, cr0);
return 0;
}
+static void
+vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+{
+ /*
+ * Patch in the VMCALL instruction:
+ */
+ hypercall[0] = 0x0f;
+ hypercall[1] = 0x01;
+ hypercall[2] = 0xc1;
+ hypercall[3] = 0xc3;
+}
+
static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u64 exit_qualification;
return 0;
}
+static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3);
+ return kvm_hypercall(vcpu, kvm_run);
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
[EXIT_REASON_MSR_WRITE] = handle_wrmsr,
[EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
[EXIT_REASON_HLT] = handle_halt,
+ [EXIT_REASON_VMCALL] = handle_vmcall,
};
static const int kvm_vmx_max_exit_handlers =
.run = vmx_vcpu_run,
.skip_emulated_instruction = skip_emulated_instruction,
.vcpu_setup = vmx_vcpu_setup,
+ .patch_hypercall = vmx_patch_hypercall,
};
static int __init vmx_init(void)
.sync_super = super_1_sync,
},
};
-
-static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
-{
- struct list_head *tmp;
- mdk_rdev_t *rdev;
-
- ITERATE_RDEV(mddev,rdev,tmp)
- if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
- return rdev;
-
- return NULL;
-}
static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
{
- struct list_head *tmp;
- mdk_rdev_t *rdev;
+ struct list_head *tmp, *tmp2;
+ mdk_rdev_t *rdev, *rdev2;
ITERATE_RDEV(mddev1,rdev,tmp)
- if (match_dev_unit(mddev2, rdev))
- return 1;
+ ITERATE_RDEV(mddev2, rdev2, tmp2)
+ if (rdev->bdev->bd_contains ==
+ rdev2->bdev->bd_contains)
+ return 1;
return 0;
}
static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
{
- mdk_rdev_t *same_pdev;
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+ char b[BDEVNAME_SIZE];
struct kobject *ko;
char *s;
else
mddev->size = rdev->size;
}
- same_pdev = match_dev_unit(mddev, rdev);
- if (same_pdev)
- printk(KERN_WARNING
- "%s: WARNING: %s appears to be on the same physical"
- " disk as %s. True\n protection against single-disk"
- " failure might be compromised.\n",
- mdname(mddev), bdevname(rdev->bdev,b),
- bdevname(same_pdev->bdev,b2));
/* Verify rdev->desc_nr is unique.
* If it is -1, assign a free number, else
return -EINVAL;
}
+ if (pers->sync_request) {
+ /* Warn if this is a potentially silly
+ * configuration.
+ */
+ char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+ mdk_rdev_t *rdev2;
+ struct list_head *tmp2;
+ int warned = 0;
+ ITERATE_RDEV(mddev, rdev, tmp) {
+ ITERATE_RDEV(mddev, rdev2, tmp2) {
+ if (rdev < rdev2 &&
+ rdev->bdev->bd_contains ==
+ rdev2->bdev->bd_contains) {
+ printk(KERN_WARNING
+ "%s: WARNING: %s appears to be"
+ " on the same physical disk as"
+ " %s.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev,b),
+ bdevname(rdev2->bdev,b2));
+ warned = 1;
+ }
+ }
+ }
+ if (warned)
+ printk(KERN_WARNING
+ "True protection against single-disk"
+ " failure might be compromised.\n");
+ }
+
mddev->recovery = 0;
mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
mddev->barriers_work = 1;
set_disk_ro(disk, 0);
blk_queue_make_request(mddev->queue, md_fail_request);
mddev->pers->stop(mddev);
+ mddev->queue->merge_bvec_fn = NULL;
+ mddev->queue->unplug_fn = NULL;
+ mddev->queue->issue_flush_fn = NULL;
if (mddev->pers->sync_request)
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
EXPORT_SYMBOL_GPL(md_do_sync);
+static int remove_and_add_spares(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ struct list_head *rtmp;
+ int spares = 0;
+
+ ITERATE_RDEV(mddev,rdev,rtmp)
+ if (rdev->raid_disk >= 0 &&
+ (test_bit(Faulty, &rdev->flags) ||
+ ! test_bit(In_sync, &rdev->flags)) &&
+ atomic_read(&rdev->nr_pending)==0) {
+ if (mddev->pers->hot_remove_disk(
+ mddev, rdev->raid_disk)==0) {
+ char nm[20];
+ sprintf(nm,"rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ rdev->raid_disk = -1;
+ }
+ }
+
+ if (mddev->degraded) {
+ ITERATE_RDEV(mddev,rdev,rtmp)
+ if (rdev->raid_disk < 0
+ && !test_bit(Faulty, &rdev->flags)) {
+ rdev->recovery_offset = 0;
+ if (mddev->pers->hot_add_disk(mddev,rdev)) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_create_link(&mddev->kobj,
+ &rdev->kobj, nm);
+ spares++;
+ md_new_event(mddev);
+ } else
+ break;
+ }
+ }
+ return spares;
+}
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
return;
if (mddev_trylock(mddev)) {
- int spares =0;
+ int spares = 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
* Spare are also removed and re-added, to allow
* the personality to fail the re-add.
*/
- ITERATE_RDEV(mddev,rdev,rtmp)
- if (rdev->raid_disk >= 0 &&
- (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
- atomic_read(&rdev->nr_pending)==0) {
- if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
- char nm[20];
- sprintf(nm,"rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- rdev->raid_disk = -1;
- }
- }
-
- if (mddev->degraded) {
- ITERATE_RDEV(mddev,rdev,rtmp)
- if (rdev->raid_disk < 0
- && !test_bit(Faulty, &rdev->flags)) {
- rdev->recovery_offset = 0;
- if (mddev->pers->hot_add_disk(mddev,rdev)) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
- spares++;
- md_new_event(mddev);
- } else
- break;
- }
- }
- if (spares) {
+ if (mddev->reshape_position != MaxSector) {
+ if (mddev->pers->check_reshape(mddev) != 0)
+ /* Cannot proceed */
+ goto unlock;
+ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+ } else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
if (dev < 0)
dev += conf->raid_disks;
} else {
- while (sector > conf->stride) {
+ while (sector >= conf->stride) {
sector -= conf->stride;
if (dev < conf->near_copies)
dev += conf->raid_disks - conf->near_copies;
for (k=0; k<conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
+ BUG_ON(k == conf->copies);
bio = r10_bio->devs[1].bio;
bio->bi_next = biolist;
biolist = bio;
if (!conf->tmppage)
goto out_free_conf;
+ conf->mddev = mddev;
+ conf->raid_disks = mddev->raid_disks;
conf->near_copies = nc;
conf->far_copies = fc;
conf->copies = nc*fc;
conf->far_offset = fo;
conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
+ size = mddev->size >> (conf->chunk_shift-1);
+ sector_div(size, fc);
+ size = size * conf->raid_disks;
+ sector_div(size, nc);
+ /* 'size' is now the number of chunks in the array */
+ /* calculate "used chunks per device" in 'stride' */
+ stride = size * conf->copies;
+ sector_div(stride, conf->raid_disks);
+ mddev->size = stride << (conf->chunk_shift-1);
+
if (fo)
- conf->stride = 1 << conf->chunk_shift;
- else {
- stride = mddev->size >> (conf->chunk_shift-1);
+ stride = 1;
+ else
sector_div(stride, fc);
- conf->stride = stride << conf->chunk_shift;
- }
+ conf->stride = stride << conf->chunk_shift;
+
conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
r10bio_pool_free, conf);
if (!conf->r10bio_pool) {
disk->head_position = 0;
}
- conf->raid_disks = mddev->raid_disks;
- conf->mddev = mddev;
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
/*
* Ok, everything is just fine now
*/
- if (conf->far_offset) {
- size = mddev->size >> (conf->chunk_shift-1);
- size *= conf->raid_disks;
- size <<= conf->chunk_shift;
- sector_div(size, conf->far_copies);
- } else
- size = conf->stride * conf->raid_disks;
- sector_div(size, conf->near_copies);
- mddev->array_size = size/2;
- mddev->resync_max_sectors = size;
+ mddev->array_size = size << (conf->chunk_shift-1);
+ mddev->resync_max_sectors = size << conf->chunk_shift;
mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->issue_flush_fn = raid10_issue_flush;
static void compute_parity6(struct stripe_head *sh, int method)
{
raid6_conf_t *conf = sh->raid_conf;
- int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
+ int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
struct bio *chosen;
/**** FIX THIS: This could be very bad if disks is close to 256 ****/
void *ptrs[disks];
/* Compute one missing block */
static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
{
- raid6_conf_t *conf = sh->raid_conf;
- int i, count, disks = conf->raid_disks;
+ int i, count, disks = sh->disks;
void *ptr[MAX_XOR_BLOCKS], *p;
int pd_idx = sh->pd_idx;
int qd_idx = raid6_next_disk(pd_idx, disks);
/* Compute two missing blocks */
static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
{
- raid6_conf_t *conf = sh->raid_conf;
- int i, count, disks = conf->raid_disks;
+ int i, count, disks = sh->disks;
int pd_idx = sh->pd_idx;
int qd_idx = raid6_next_disk(pd_idx, disks);
int d0_idx = raid6_next_disk(qd_idx, disks);
static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
{
raid6_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks;
+ int disks = sh->disks;
struct bio *return_bi= NULL;
struct bio *bi;
int i;
- int syncing;
+ int syncing, expanding, expanded;
int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
int non_overwrite = 0;
int failed_num[2] = {0, 0};
clear_bit(STRIPE_DELAYED, &sh->state);
syncing = test_bit(STRIPE_SYNCING, &sh->state);
+ expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
/* Now to look around and see what can be done */
rcu_read_lock();
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
- if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) {
+ if (to_read || non_overwrite || (to_write && failed) ||
+ (syncing && (uptodate < disks)) || expanding) {
for (i=disks; i--;) {
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
syncing ||
+ expanding ||
(failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
(failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
)
}
}
}
+
+ if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
+ /* Need to write out all blocks after computing P&Q */
+ sh->disks = conf->raid_disks;
+ sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
+ conf->raid_disks);
+ compute_parity6(sh, RECONSTRUCT_WRITE);
+ for (i = conf->raid_disks ; i-- ; ) {
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
+ locked++;
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+ }
+ clear_bit(STRIPE_EXPANDING, &sh->state);
+ } else if (expanded) {
+ clear_bit(STRIPE_EXPAND_READY, &sh->state);
+ atomic_dec(&conf->reshape_stripes);
+ wake_up(&conf->wait_for_overlap);
+ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
+ }
+
+ if (expanding && locked == 0) {
+ /* We have read all the blocks in this stripe and now we need to
+ * copy some of them into a target stripe for expand.
+ */
+ clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ for (i = 0; i < sh->disks ; i++)
+ if (i != pd_idx && i != qd_idx) {
+ int dd_idx2, pd_idx2, j;
+ struct stripe_head *sh2;
+
+ sector_t bn = compute_blocknr(sh, i);
+ sector_t s = raid5_compute_sector(
+ bn, conf->raid_disks,
+ conf->raid_disks - conf->max_degraded,
+ &dd_idx2, &pd_idx2, conf);
+ sh2 = get_active_stripe(conf, s,
+ conf->raid_disks,
+ pd_idx2, 1);
+ if (sh2 == NULL)
+ /* so for only the early blocks of
+ * this stripe have been requests.
+ * When later blocks get requests, we
+ * will try again
+ */
+ continue;
+ if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
+ test_bit(R5_Expanded,
+ &sh2->dev[dd_idx2].flags)) {
+ /* must have already done this block */
+ release_stripe(sh2);
+ continue;
+ }
+ memcpy(page_address(sh2->dev[dd_idx2].page),
+ page_address(sh->dev[i].page),
+ STRIPE_SIZE);
+ set_bit(R5_Expanded, &sh2->dev[dd_idx2].flags);
+ set_bit(R5_UPTODATE, &sh2->dev[dd_idx2].flags);
+ for (j = 0 ; j < conf->raid_disks ; j++)
+ if (j != sh2->pd_idx &&
+ j != raid6_next_disk(sh2->pd_idx,
+ sh2->disks) &&
+ !test_bit(R5_Expanded,
+ &sh2->dev[j].flags))
+ break;
+ if (j == conf->raid_disks) {
+ set_bit(STRIPE_EXPAND_READY,
+ &sh2->state);
+ set_bit(STRIPE_HANDLE, &sh2->state);
+ }
+ release_stripe(sh2);
+ }
+ }
+
spin_unlock(&sh->lock);
while ((bi=return_bi)) {
rcu_read_unlock();
if (rdev) {
- if (syncing)
+ if (syncing || expanding || expanded)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev;
struct stripe_head *sh;
int pd_idx;
sector_t first_sector, last_sector;
- int raid_disks;
- int data_disks;
+ int raid_disks = conf->previous_raid_disks;
+ int data_disks = raid_disks - conf->max_degraded;
+ int new_data_disks = conf->raid_disks - conf->max_degraded;
int i;
int dd_idx;
sector_t writepos, safepos, gap;
conf->expand_progress != 0) {
/* restarting in the middle, skip the initial sectors */
sector_nr = conf->expand_progress;
- sector_div(sector_nr, conf->raid_disks-1);
+ sector_div(sector_nr, new_data_disks);
*skipped = 1;
return sector_nr;
}
* to after where expand_lo old_maps to
*/
writepos = conf->expand_progress +
- conf->chunk_size/512*(conf->raid_disks-1);
- sector_div(writepos, conf->raid_disks-1);
+ conf->chunk_size/512*(new_data_disks);
+ sector_div(writepos, new_data_disks);
safepos = conf->expand_lo;
- sector_div(safepos, conf->previous_raid_disks-1);
+ sector_div(safepos, data_disks);
gap = conf->expand_progress - conf->expand_lo;
if (writepos >= safepos ||
- gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
+ gap > (new_data_disks)*3000*2 /*3Meg*/) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes)==0);
sector_t s;
if (j == sh->pd_idx)
continue;
+ if (conf->level == 6 &&
+ j == raid6_next_disk(sh->pd_idx, sh->disks))
+ continue;
s = compute_blocknr(sh, j);
if (s < (mddev->array_size<<1)) {
skipped = 1;
release_stripe(sh);
}
spin_lock_irq(&conf->device_lock);
- conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
+ conf->expand_progress = (sector_nr + i) * new_data_disks;
spin_unlock_irq(&conf->device_lock);
/* Ok, those stripe are ready. We can start scheduling
* reads on the source stripes.
* The source stripes are determined by mapping the first and last
* block on the destination stripes.
*/
- raid_disks = conf->previous_raid_disks;
- data_disks = raid_disks - 1;
first_sector =
- raid5_compute_sector(sector_nr*(conf->raid_disks-1),
+ raid5_compute_sector(sector_nr*(new_data_disks),
raid_disks, data_disks,
&dd_idx, &pd_idx, conf);
last_sector =
raid5_compute_sector((sector_nr+conf->chunk_size/512)
- *(conf->raid_disks-1) -1,
+ *(new_data_disks) -1,
raid_disks, data_disks,
&dd_idx, &pd_idx, conf);
if (last_sector >= (mddev->size<<1))
last_sector = (mddev->size<<1)-1;
while (first_sector <= last_sector) {
- pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
+ pd_idx = stripe_to_pdidx(first_sector, conf,
+ conf->previous_raid_disks);
sh = get_active_stripe(conf, first_sector,
conf->previous_raid_disks, pd_idx, 0);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
*/
sector_t here_new, here_old;
int old_disks;
+ int max_degraded = (mddev->level == 5 ? 1 : 2);
if (mddev->new_level != mddev->level ||
mddev->new_layout != mddev->layout ||
mddev->new_chunk != mddev->chunk_size) {
- printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n",
+ printk(KERN_ERR "raid5: %s: unsupported reshape "
+ "required - aborting.\n",
mdname(mddev));
return -EINVAL;
}
if (mddev->delta_disks <= 0) {
- printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n",
+ printk(KERN_ERR "raid5: %s: unsupported reshape "
+ "(reduce disks) required - aborting.\n",
mdname(mddev));
return -EINVAL;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
/* reshape_position must be on a new-stripe boundary, and one
- * further up in new geometry must map after here in old geometry.
+ * further up in new geometry must map after here in old
+ * geometry.
*/
here_new = mddev->reshape_position;
- if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) {
- printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n");
+ if (sector_div(here_new, (mddev->chunk_size>>9)*
+ (mddev->raid_disks - max_degraded))) {
+ printk(KERN_ERR "raid5: reshape_position not "
+ "on a stripe boundary\n");
return -EINVAL;
}
/* here_new is the stripe we will write to */
here_old = mddev->reshape_position;
- sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1));
- /* here_old is the first stripe that we might need to read from */
+ sector_div(here_old, (mddev->chunk_size>>9)*
+ (old_disks-max_degraded));
+ /* here_old is the first stripe that we might need to read
+ * from */
if (here_new >= here_old) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n");
+ printk(KERN_ERR "raid5: reshape_position too early for "
+ "auto-recovery - aborting.\n");
return -EINVAL;
}
printk(KERN_INFO "raid5: reshape will continue\n");
if (err)
return err;
+ if (mddev->degraded > conf->max_degraded)
+ return -EINVAL;
/* looks like we might be able to manage this */
return 0;
}
int added_devices = 0;
unsigned long flags;
- if (mddev->degraded ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
ITERATE_RDEV(mddev, rdev, rtmp)
!test_bit(Faulty, &rdev->flags))
spares++;
- if (spares < mddev->delta_disks-1)
+ if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
/* Not enough devices even to make a degraded array
* of that size
*/
struct block_device *bdev;
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1);
+ conf->mddev->array_size = conf->mddev->size *
+ (conf->raid_disks - conf->max_degraded);
set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
conf->mddev->changed = 1;
.spare_active = raid5_spare_active,
.sync_request = sync_request,
.resize = raid5_resize,
+#ifdef CONFIG_MD_RAID5_RESHAPE
+ .check_reshape = raid5_check_reshape,
+ .start_reshape = raid5_start_reshape,
+#endif
.quiesce = raid5_quiesce,
};
static struct mdk_personality raid5_personality =
static int raid6_have_mmx(void)
{
-#ifdef __KERNEL__
/* Not really "boot_cpu" but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX);
-#else
- /* User space test code */
- u32 features = cpuid_features();
- return ( (features & (1<<23)) == (1<<23) );
-#endif
}
/*
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_mmx_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_mmx(&sa);
+ kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("pxor %mm4,%mm4");
}
- raid6_after_mmx(&sa);
+ kernel_fpu_end();
}
const struct raid6_calls raid6_mmxx1 = {
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_mmx_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_mmx(&sa);
+ kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
}
- raid6_after_mmx(&sa);
+ kernel_fpu_end();
}
const struct raid6_calls raid6_mmxx2 = {
static int raid6_have_sse1_or_mmxext(void)
{
-#ifdef __KERNEL__
/* Not really boot_cpu but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX) &&
(boot_cpu_has(X86_FEATURE_XMM) ||
boot_cpu_has(X86_FEATURE_MMXEXT));
-#else
- /* User space test code - this incorrectly breaks on some Athlons */
- u32 features = cpuid_features();
- return ( (features & (5<<23)) == (5<<23) );
-#endif
}
/*
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_mmx_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- /* This is really MMX code, not SSE */
- raid6_before_mmx(&sa);
+ kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
}
- raid6_after_mmx(&sa);
asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse1x1 = {
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_mmx_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_mmx(&sa);
+ kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
}
- raid6_after_mmx(&sa);
asm volatile("sfence" : :: "memory");
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse1x2 = {
static int raid6_have_sse2(void)
{
-#ifdef __KERNEL__
/* Not really boot_cpu but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX) &&
boot_cpu_has(X86_FEATURE_FXSR) &&
boot_cpu_has(X86_FEATURE_XMM) &&
boot_cpu_has(X86_FEATURE_XMM2);
-#else
- /* User space test code */
- u32 features = cpuid_features();
- return ( (features & (15<<23)) == (15<<23) );
-#endif
}
/*
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_sse_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_sse2(&sa);
+ kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
asm volatile("pxor %xmm4,%xmm4");
}
- raid6_after_sse2(&sa);
asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x1 = {
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_sse_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_sse2(&sa);
+ kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
}
- raid6_after_sse2(&sa);
asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x2 = {
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_sse16_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_sse16(&sa);
+ kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
asm volatile("pxor %xmm14,%xmm14");
}
+
asm volatile("sfence" : : : "memory");
- raid6_after_sse16(&sa);
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x4 = {
#if defined(__i386__) || defined(__x86_64__)
-#ifdef __x86_64__
-
-typedef struct {
- unsigned int fsave[27];
- unsigned long cr0;
-} raid6_mmx_save_t __attribute__((aligned(16)));
-
-/* N.B.: For SSE we only save %xmm0-%xmm7 even for x86-64, since
- the code doesn't know about the additional x86-64 registers */
-typedef struct {
- unsigned int sarea[8*4+2];
- unsigned long cr0;
-} raid6_sse_save_t __attribute__((aligned(16)));
-
-/* This is for x86-64-specific code which uses all 16 XMM registers */
-typedef struct {
- unsigned int sarea[16*4+2];
- unsigned long cr0;
-} raid6_sse16_save_t __attribute__((aligned(16)));
-
-/* On x86-64 the stack *SHOULD* be 16-byte aligned, but currently this
- is buggy in the kernel and it's only 8-byte aligned in places, so
- we need to do this anyway. Sigh. */
-#define SAREA(x) ((unsigned int *)((((unsigned long)&(x)->sarea)+15) & ~15))
-
-#else /* __i386__ */
-
-typedef struct {
- unsigned int fsave[27];
- unsigned long cr0;
-} raid6_mmx_save_t;
-
-/* On i386, the stack is only 8-byte aligned, but SSE requires 16-byte
- alignment. The +3 is so we have the slack space to manually align
- a properly-sized area correctly. */
-typedef struct {
- unsigned int sarea[8*4+3];
- unsigned long cr0;
-} raid6_sse_save_t;
-
-/* Find the 16-byte aligned save area */
-#define SAREA(x) ((unsigned int *)((((unsigned long)&(x)->sarea)+15) & ~15))
-
-#endif
-
#ifdef __KERNEL__ /* Real code */
-/* Note: %cr0 is 32 bits on i386 and 64 bits on x86-64 */
-
-static inline unsigned long raid6_get_fpu(void)
-{
- unsigned long cr0;
-
- preempt_disable();
- asm volatile("mov %%cr0,%0 ; clts" : "=r" (cr0));
- return cr0;
-}
-
-static inline void raid6_put_fpu(unsigned long cr0)
-{
- asm volatile("mov %0,%%cr0" : : "r" (cr0));
- preempt_enable();
-}
+#include <asm/i387.h>
#else /* Dummy code for user space testing */
-static inline unsigned long raid6_get_fpu(void)
-{
- return 0xf00ba6;
-}
-
-static inline void raid6_put_fpu(unsigned long cr0)
-{
- (void)cr0;
-}
-
-#endif
-
-static inline void raid6_before_mmx(raid6_mmx_save_t *s)
-{
- s->cr0 = raid6_get_fpu();
- asm volatile("fsave %0 ; fwait" : "=m" (s->fsave[0]));
-}
-
-static inline void raid6_after_mmx(raid6_mmx_save_t *s)
-{
- asm volatile("frstor %0" : : "m" (s->fsave[0]));
- raid6_put_fpu(s->cr0);
-}
-
-static inline void raid6_before_sse(raid6_sse_save_t *s)
-{
- unsigned int *rsa = SAREA(s);
-
- s->cr0 = raid6_get_fpu();
-
- asm volatile("movaps %%xmm0,%0" : "=m" (rsa[0]));
- asm volatile("movaps %%xmm1,%0" : "=m" (rsa[4]));
- asm volatile("movaps %%xmm2,%0" : "=m" (rsa[8]));
- asm volatile("movaps %%xmm3,%0" : "=m" (rsa[12]));
- asm volatile("movaps %%xmm4,%0" : "=m" (rsa[16]));
- asm volatile("movaps %%xmm5,%0" : "=m" (rsa[20]));
- asm volatile("movaps %%xmm6,%0" : "=m" (rsa[24]));
- asm volatile("movaps %%xmm7,%0" : "=m" (rsa[28]));
-}
-
-static inline void raid6_after_sse(raid6_sse_save_t *s)
-{
- unsigned int *rsa = SAREA(s);
-
- asm volatile("movaps %0,%%xmm0" : : "m" (rsa[0]));
- asm volatile("movaps %0,%%xmm1" : : "m" (rsa[4]));
- asm volatile("movaps %0,%%xmm2" : : "m" (rsa[8]));
- asm volatile("movaps %0,%%xmm3" : : "m" (rsa[12]));
- asm volatile("movaps %0,%%xmm4" : : "m" (rsa[16]));
- asm volatile("movaps %0,%%xmm5" : : "m" (rsa[20]));
- asm volatile("movaps %0,%%xmm6" : : "m" (rsa[24]));
- asm volatile("movaps %0,%%xmm7" : : "m" (rsa[28]));
-
- raid6_put_fpu(s->cr0);
-}
-
-static inline void raid6_before_sse2(raid6_sse_save_t *s)
+static inline void kernel_fpu_begin(void)
{
- unsigned int *rsa = SAREA(s);
-
- s->cr0 = raid6_get_fpu();
-
- asm volatile("movdqa %%xmm0,%0" : "=m" (rsa[0]));
- asm volatile("movdqa %%xmm1,%0" : "=m" (rsa[4]));
- asm volatile("movdqa %%xmm2,%0" : "=m" (rsa[8]));
- asm volatile("movdqa %%xmm3,%0" : "=m" (rsa[12]));
- asm volatile("movdqa %%xmm4,%0" : "=m" (rsa[16]));
- asm volatile("movdqa %%xmm5,%0" : "=m" (rsa[20]));
- asm volatile("movdqa %%xmm6,%0" : "=m" (rsa[24]));
- asm volatile("movdqa %%xmm7,%0" : "=m" (rsa[28]));
}
-static inline void raid6_after_sse2(raid6_sse_save_t *s)
+static inline void kernel_fpu_end(void)
{
- unsigned int *rsa = SAREA(s);
-
- asm volatile("movdqa %0,%%xmm0" : : "m" (rsa[0]));
- asm volatile("movdqa %0,%%xmm1" : : "m" (rsa[4]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (rsa[8]));
- asm volatile("movdqa %0,%%xmm3" : : "m" (rsa[12]));
- asm volatile("movdqa %0,%%xmm4" : : "m" (rsa[16]));
- asm volatile("movdqa %0,%%xmm5" : : "m" (rsa[20]));
- asm volatile("movdqa %0,%%xmm6" : : "m" (rsa[24]));
- asm volatile("movdqa %0,%%xmm7" : : "m" (rsa[28]));
-
- raid6_put_fpu(s->cr0);
}
-#ifdef __x86_64__
-
-static inline void raid6_before_sse16(raid6_sse16_save_t *s)
-{
- unsigned int *rsa = SAREA(s);
-
- s->cr0 = raid6_get_fpu();
+#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions
+ * (fast save and restore) */
+#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
- asm volatile("movdqa %%xmm0,%0" : "=m" (rsa[0]));
- asm volatile("movdqa %%xmm1,%0" : "=m" (rsa[4]));
- asm volatile("movdqa %%xmm2,%0" : "=m" (rsa[8]));
- asm volatile("movdqa %%xmm3,%0" : "=m" (rsa[12]));
- asm volatile("movdqa %%xmm4,%0" : "=m" (rsa[16]));
- asm volatile("movdqa %%xmm5,%0" : "=m" (rsa[20]));
- asm volatile("movdqa %%xmm6,%0" : "=m" (rsa[24]));
- asm volatile("movdqa %%xmm7,%0" : "=m" (rsa[28]));
- asm volatile("movdqa %%xmm8,%0" : "=m" (rsa[32]));
- asm volatile("movdqa %%xmm9,%0" : "=m" (rsa[36]));
- asm volatile("movdqa %%xmm10,%0" : "=m" (rsa[40]));
- asm volatile("movdqa %%xmm11,%0" : "=m" (rsa[44]));
- asm volatile("movdqa %%xmm12,%0" : "=m" (rsa[48]));
- asm volatile("movdqa %%xmm13,%0" : "=m" (rsa[52]));
- asm volatile("movdqa %%xmm14,%0" : "=m" (rsa[56]));
- asm volatile("movdqa %%xmm15,%0" : "=m" (rsa[60]));
-}
-
-static inline void raid6_after_sse16(raid6_sse16_save_t *s)
+/* Should work well enough on modern CPUs for testing */
+static inline int boot_cpu_has(int flag)
{
- unsigned int *rsa = SAREA(s);
+ u32 eax = (flag >> 5) ? 0x80000001 : 1;
+ u32 edx;
- asm volatile("movdqa %0,%%xmm0" : : "m" (rsa[0]));
- asm volatile("movdqa %0,%%xmm1" : : "m" (rsa[4]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (rsa[8]));
- asm volatile("movdqa %0,%%xmm3" : : "m" (rsa[12]));
- asm volatile("movdqa %0,%%xmm4" : : "m" (rsa[16]));
- asm volatile("movdqa %0,%%xmm5" : : "m" (rsa[20]));
- asm volatile("movdqa %0,%%xmm6" : : "m" (rsa[24]));
- asm volatile("movdqa %0,%%xmm7" : : "m" (rsa[28]));
- asm volatile("movdqa %0,%%xmm8" : : "m" (rsa[32]));
- asm volatile("movdqa %0,%%xmm9" : : "m" (rsa[36]));
- asm volatile("movdqa %0,%%xmm10" : : "m" (rsa[40]));
- asm volatile("movdqa %0,%%xmm11" : : "m" (rsa[44]));
- asm volatile("movdqa %0,%%xmm12" : : "m" (rsa[48]));
- asm volatile("movdqa %0,%%xmm13" : : "m" (rsa[52]));
- asm volatile("movdqa %0,%%xmm14" : : "m" (rsa[56]));
- asm volatile("movdqa %0,%%xmm15" : : "m" (rsa[60]));
+ asm volatile("cpuid"
+ : "+a" (eax), "=d" (edx)
+ : : "ecx", "ebx");
- raid6_put_fpu(s->cr0);
+ return (edx >> (flag & 31)) & 1;
}
-#endif /* __x86_64__ */
-
-/* User space test hack */
-#ifndef __KERNEL__
-static inline int cpuid_features(void)
-{
- u32 eax = 1;
- u32 ebx, ecx, edx;
-
- asm volatile("cpuid" :
- "+a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx));
-
- return edx;
-}
#endif /* ndef __KERNEL__ */
#endif
fw->data[BLUEBIRD_01_ID_OFFSET + 1] == USB_VID_DVICO >> 8) {
fw->data[BLUEBIRD_01_ID_OFFSET + 2] =
- udev->descriptor.idProduct + 1;
+ le16_to_cpu(udev->descriptor.idProduct) + 1;
fw->data[BLUEBIRD_01_ID_OFFSET + 3] =
- udev->descriptor.idProduct >> 8;
+ le16_to_cpu(udev->descriptor.idProduct) >> 8;
return usb_cypress_load_firmware(udev, fw, CYPRESS_FX2);
}
struct dvb_usb_adapter *adap = fe->dvb->priv;
u8 b[5];
dvb_usb_tuner_calc_regs(fe,fep,b, 5);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
return digitv_ctrl_msg(adap->dev, USB_WRITE_TUNER, 0, &b[1], 4, NULL, 0);
}
char __user *buffer, size_t len, loff_t *pos)
{
struct cafe_camera *cam = filp->private_data;
- int ret;
+ int ret = 0;
/*
* Perhaps we're in speculative read mode and already
if (cam->n_sbufs == 0) /* no luck at all - ret already set */
kfree(cam->sb_bufs);
- else
- ret = 0;
req->count = cam->n_sbufs; /* In case of partial success */
out:
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_CX25840)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
state->vbi_line_offset = 8;
state->id = id;
+ i2c_attach_client(client);
+
if (state->is_cx25836)
cx25836_initialize(client);
else
cx25840_initialize(client, 1);
- i2c_attach_client(client);
-
return 0;
}
*/
#define FWSEND 48
-#define FWDEV(x) &((x)->adapter->dev)
+#define FWDEV(x) &((x)->dev)
static char *firmware = FWFILE;
/* ------------------------------------------------------------------ */
-#define BLACKBIRD_FIRM_IMAGE_SIZE 256*1024
+#define OLD_BLACKBIRD_FIRM_IMAGE_SIZE 262144
+#define BLACKBIRD_FIRM_IMAGE_SIZE 376836
/* defines below are from ivtv-driver.h */
u32 value;
int i;
- for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) {
+ for (i = 0; i < dev->fw_size; i++) {
memory_read(dev->core, i, &value);
if (value == signature[signaturecnt])
signaturecnt++;
return -1;
}
- if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
- dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n",
- firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
+ if ((firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) &&
+ (firmware->size != OLD_BLACKBIRD_FIRM_IMAGE_SIZE)) {
+ dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d or %d)\n",
+ firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE,
+ OLD_BLACKBIRD_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
}
+ dev->fw_size = firmware->size;
if (0 != memcmp(firmware->data, magic, 8)) {
dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n");
{
struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
- if (reg->i2c_id != 0)
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
return -EINVAL;
/* cx2388x has a 24-bit register space */
reg->val = cx_read(reg->reg&0xffffff);
{
struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
- if (reg->i2c_id != 0)
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
return -EINVAL;
cx_write(reg->reg&0xffffff, reg->val);
return 0;
u32 mailbox;
int width;
int height;
+ int fw_size;
#if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE)
/* for dvb only */
if (fw_len % sizeof(u32)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"size of %s firmware"
- " must be a multiple of %u bytes",
+ " must be a multiple of %zu bytes",
fw_files[fwidx],sizeof(u32));
release_firmware(fw_entry);
return -1;
int pvr2_hdw_register_access(struct pvr2_hdw *hdw,
- u32 chip_id, u64 reg_id,
- int setFl,u32 *val_ptr)
+ u32 match_type, u32 match_chip, u64 reg_id,
+ int setFl,u64 *val_ptr)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct list_head *item;
if (!capable(CAP_SYS_ADMIN)) return -EPERM;
- req.i2c_id = chip_id;
+ req.match_type = match_type;
+ req.match_chip = match_chip;
req.reg = reg_id;
if (setFl) req.val = *val_ptr;
mutex_lock(&hdw->i2c_list_lock); do {
list_for_each(item,&hdw->i2c_clients) {
cp = list_entry(item,struct pvr2_i2c_client,list);
- if (cp->client->driver->id != chip_id) continue;
+ if (!v4l2_chip_match_i2c_client(cp->client, req.match_type, req.match_chip)) {
+ continue;
+ }
stat = pvr2_i2c_client_cmd(
cp,(setFl ? VIDIOC_DBG_S_REGISTER :
VIDIOC_DBG_G_REGISTER),&req);
enum pvr2_v4l_type index,int);
/* Direct read/write access to chip's registers:
- chip_id - unique id of chip (e.g. I2C_DRIVERD_xxxx)
+ match_type - how to interpret match_chip (e.g. driver ID, i2c address)
+ match_chip - chip match value (e.g. I2C_DRIVERD_xxxx)
reg_id - register number to access
setFl - true to set the register, false to read it
val_ptr - storage location for source / result. */
int pvr2_hdw_register_access(struct pvr2_hdw *,
- u32 chip_id,u64 reg_id,
- int setFl,u32 *val_ptr);
+ u32 match_type, u32 match_chip,u64 reg_id,
+ int setFl,u64 *val_ptr);
/* The following entry points are all lower level things you normally don't
want to worry about. */
case VIDIOC_DBG_S_REGISTER:
case VIDIOC_DBG_G_REGISTER:
{
- u32 val;
+ u64 val;
struct v4l2_register *req = (struct v4l2_register *)arg;
if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val;
ret = pvr2_hdw_register_access(
- hdw,req->i2c_id,req->reg,
+ hdw,req->match_type,req->match_chip,req->reg,
cmd == VIDIOC_DBG_S_REGISTER,&val);
if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val;
break;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_SAA711X)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_SAA7127)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_TVP5150)
+ if (!v4l2_chip_match_i2c_client(c, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_UPD64031A)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_UPD64083)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
struct v4l2_register *reg = arg;
int errCode;
- if (reg->i2c_id != 0)
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
PDEBUG(DBG_IOCTL, "VIDIOC_DBG_%c_REGISTER reg=0x%02X, value=0x%02X",
cmd == VIDIOC_DBG_G_REGISTER ? 'G' : 'S',
- (unsigned int)reg->reg, reg->val);
+ (unsigned int)reg->reg, (unsigned int)reg->val);
return 0;
}
#endif
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/i2c.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>
[_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT",
[_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY",
[_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY",
-#if 1
[_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP",
-#endif
[_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS",
[_IOC_NR(VIDIOC_G_EXT_CTRLS)] = "VIDIOC_G_EXT_CTRLS",
[_IOC_NR(VIDIOC_S_EXT_CTRLS)] = "VIDIOC_S_EXT_CTRLS",
- [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS"
+ [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS",
+#if 1
+ [_IOC_NR(VIDIOC_ENUM_FRAMESIZES)] = "VIDIOC_ENUM_FRAMESIZES",
+ [_IOC_NR(VIDIOC_ENUM_FRAMEINTERVALS)] = "VIDIOC_ENUM_FRAMEINTERVALS",
+ [_IOC_NR(VIDIOC_G_ENC_INDEX)] = "VIDIOC_G_ENC_INDEX",
+ [_IOC_NR(VIDIOC_ENCODER_CMD)] = "VIDIOC_ENCODER_CMD",
+ [_IOC_NR(VIDIOC_TRY_ENCODER_CMD)] = "VIDIOC_TRY_ENCODER_CMD",
+
+ [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
+ [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
+#endif
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
[_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY",
[_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG",
- [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
- [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
-
[_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE",
[_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET",
[_IOC_NR(VIDIOC_INT_AUDIO_CLOCK_FREQ)] = "VIDIOC_INT_AUDIO_CLOCK_FREQ",
return **ctrl_classes;
}
+int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 match_type, u32 match_chip)
+{
+ switch (match_type) {
+ case V4L2_CHIP_MATCH_I2C_DRIVER:
+ return (c != NULL && c->driver != NULL && c->driver->id == match_chip);
+ case V4L2_CHIP_MATCH_I2C_ADDR:
+ return (c != NULL && c->addr == match_chip);
+ default:
+ return 0;
+ }
+}
+
+int v4l2_chip_match_host(u32 match_type, u32 match_chip)
+{
+ switch (match_type) {
+ case V4L2_CHIP_MATCH_HOST:
+ return match_chip == 0;
+ default:
+ return 0;
+ }
+}
+
/* ----------------------------------------------------------------- */
EXPORT_SYMBOL(v4l2_norm_to_name);
EXPORT_SYMBOL(v4l2_ctrl_query_fill);
EXPORT_SYMBOL(v4l2_ctrl_query_fill_std);
+EXPORT_SYMBOL(v4l2_chip_match_i2c_client);
+EXPORT_SYMBOL(v4l2_chip_match_host);
+
/*
* Local variables:
* c-basic-offset: 8
ret=vfd->vidioc_s_jpegcomp(file, fh, p);
break;
}
+ case VIDIOC_G_ENC_INDEX:
+ {
+ struct v4l2_enc_idx *p=arg;
+
+ if (!vfd->vidioc_g_enc_index)
+ break;
+ ret=vfd->vidioc_g_enc_index(file, fh, p);
+ if (!ret)
+ dbgarg (cmd, "entries=%d, entries_cap=%d\n",
+ p->entries,p->entries_cap);
+ break;
+ }
+ case VIDIOC_ENCODER_CMD:
+ {
+ struct v4l2_encoder_cmd *p=arg;
+
+ if (!vfd->vidioc_encoder_cmd)
+ break;
+ ret=vfd->vidioc_encoder_cmd(file, fh, p);
+ if (!ret)
+ dbgarg (cmd, "cmd=%d, flags=%d\n",
+ p->cmd,p->flags);
+ break;
+ }
+ case VIDIOC_TRY_ENCODER_CMD:
+ {
+ struct v4l2_encoder_cmd *p=arg;
+
+ if (!vfd->vidioc_try_encoder_cmd)
+ break;
+ ret=vfd->vidioc_try_encoder_cmd(file, fh, p);
+ if (!ret)
+ dbgarg (cmd, "cmd=%d, flags=%d\n",
+ p->cmd,p->flags);
+ break;
+ }
case VIDIOC_G_PARM:
{
struct v4l2_streamparm *p=arg;
{
struct mmc_ios *ios = &host->ios;
- pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
+ pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
+ "width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
- ios->bus_width);
+ ios->bus_width, ios->timing);
host->ops->set_ios(host, ios);
}
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.power_mode = MMC_POWER_UP;
host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
mmc_delay(1);
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.power_mode = MMC_POWER_OFF;
host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
}
continue;
}
- /* Activate highspeed support. */
- cmd.opcode = MMC_SWITCH;
- cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
- (EXT_CSD_HS_TIMING << 16) |
- (1 << 8) |
- EXT_CSD_CMD_SET_NORMAL;
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ if (host->caps & MMC_CAP_MMC_HIGHSPEED) {
+ /* Activate highspeed support. */
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_HS_TIMING << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
- if (err != MMC_ERR_NONE) {
- printk("%s: failed to switch card to mmc v4 "
- "high-speed mode.\n",
- mmc_hostname(card->host));
- continue;
- }
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err != MMC_ERR_NONE) {
+ printk("%s: failed to switch card to mmc v4 "
+ "high-speed mode.\n",
+ mmc_hostname(card->host));
+ continue;
+ }
- mmc_card_set_highspeed(card);
+ mmc_card_set_highspeed(card);
- /* Check for host support for wide-bus modes. */
- if (!(host->caps & MMC_CAP_4_BIT_DATA)) {
- continue;
+ host->ios.timing = MMC_TIMING_SD_HS;
+ mmc_set_ios(host);
}
- /* Activate 4-bit support. */
- cmd.opcode = MMC_SWITCH;
- cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
- (EXT_CSD_BUS_WIDTH << 16) |
- (EXT_CSD_BUS_WIDTH_4 << 8) |
- EXT_CSD_CMD_SET_NORMAL;
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ /* Check for host support for wide-bus modes. */
+ if (host->caps & MMC_CAP_4_BIT_DATA) {
+ /* Activate 4-bit support. */
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_BUS_WIDTH << 16) |
+ (EXT_CSD_BUS_WIDTH_4 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
- if (err != MMC_ERR_NONE) {
- printk("%s: failed to switch card to "
- "mmc v4 4-bit bus mode.\n",
- mmc_hostname(card->host));
- continue;
- }
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err != MMC_ERR_NONE) {
+ printk("%s: failed to switch card to "
+ "mmc v4 4-bit bus mode.\n",
+ mmc_hostname(card->host));
+ continue;
+ }
- host->ios.bus_width = MMC_BUS_WIDTH_4;
+ host->ios.bus_width = MMC_BUS_WIDTH_4;
+ mmc_set_ios(host);
+ }
}
kfree(ext_csd);
unsigned char *status;
struct scatterlist sg;
+ if (!(host->caps & MMC_CAP_SD_HIGHSPEED))
+ return;
+
status = kmalloc(64, GFP_KERNEL);
if (!status) {
printk(KERN_WARNING "%s: Unable to allocate buffer for "
}
mmc_card_set_highspeed(card);
+
+ host->ios.timing = MMC_TIMING_SD_HS;
+ mmc_set_ios(host);
}
kfree(status);
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div;
- u8 ctrl;
u16 clk;
unsigned long timeout;
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
- ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
- if (clock > 25000000)
- ctrl |= SDHCI_CTRL_HISPD;
- else
- ctrl &= ~SDHCI_CTRL_HISPD;
- writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
-
if (clock == 0)
goto out;
sdhci_set_power(host, ios->vdd);
ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
+
+ if (ios->timing == MMC_TIMING_SD_HS)
+ ctrl |= SDHCI_CTRL_HISPD;
+ else
+ ctrl &= ~SDHCI_CTRL_HISPD;
+
writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
mmiowb();
intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
- if (!intmask) {
+ if (!intmask || intmask == 0xffffffff) {
result = IRQ_NONE;
goto out;
}
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+
+ for (i = 0;i < chip->num_slots;i++) {
+ if (!chip->hosts[i])
+ continue;
+ free_irq(chip->hosts[i]->irq, chip->hosts[i]);
+ }
+
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
continue;
if (chip->hosts[i]->flags & SDHCI_USE_DMA)
pci_set_master(pdev);
+ ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
+ IRQF_SHARED, chip->hosts[i]->slot_descr,
+ chip->hosts[i]);
+ if (ret)
+ return ret;
sdhci_init(chip->hosts[i]);
mmiowb();
ret = mmc_resume_host(chip->hosts[i]->mmc);
mmc->f_max = host->max_clk;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
+ if (caps & SDHCI_CAN_DO_HISPD)
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
mmc->ocr_avail = 0;
if (caps & SDHCI_CAN_VDD_330)
mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
if (caps & SDHCI_CAN_VDD_180)
mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
- if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
- printk(KERN_ERR "%s: Controller reports > 25 MHz base clock,"
- " but no high speed support.\n",
- host->slot_descr);
- mmc->f_max = 25000000;
- }
-
if (mmc->ocr_avail == 0) {
printk(KERN_ERR "%s: Hardware doesn't report any "
"support voltages.\n", host->slot_descr);
{
struct net_device *dev = pci_get_drvdata(pdev);
struct vortex_private *vp = netdev_priv(dev);
+ int err;
if (dev && vp) {
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_enable_device(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_WARNING "%s: Could not enable device \n",
+ dev->name);
+ return err;
+ }
pci_set_master(pdev);
if (request_irq(dev->irq, vp->full_bus_master_rx ?
&boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) {
spin_lock_irqsave(&cp->lock, flags);
cp->cpcmd &= ~RxVlanOn;
cpw16(CpCmd, cp->cpcmd);
- if (cp->vlgrp)
- cp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(cp->vlgrp, vid, NULL);
spin_unlock_irqrestore(&cp->lock, flags);
}
#endif /* CP_VLAN_TAG_USED */
config SPIDER_NET
tristate "Spider Gigabit Ethernet driver"
- depends on PCI && PPC_IBM_CELL_BLADE
+ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
select FW_LOADER
help
This driver supports the Gigabit Ethernet chips present on the
local_irq_save(flags);
ace_mask_irq(dev);
-
- if (ap->vlgrp)
- ap->vlgrp->vlan_devices[vid] = NULL;
-
+ vlan_group_set_device(ap->vlgrp, vid, NULL);
ace_unmask_irq(dev);
local_irq_restore(flags);
}
{
struct amd8111e_priv *lp = netdev_priv(dev);
spin_lock_irq(&lp->lock);
- if (lp->vlgrp)
- lp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(lp->vlgrp, vid, NULL);
spin_unlock_irq(&lp->lock);
}
#endif
spin_lock_irqsave(&adapter->lock, flags);
/* atl1_irq_disable(adapter); */
- if (adapter->vlgrp)
- adapter->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
/* atl1_irq_enable(adapter); */
spin_unlock_irqrestore(&adapter->lock, flags);
/* We don't do Vlan filtering */
if (adapter->vlgrp) {
u16 vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
- if (!adapter->vlgrp->vlan_devices[vid])
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
atl1_vlan_rx_add_vid(adapter->netdev, vid);
}
struct bnx2 *bp = netdev_priv(dev);
bnx2_netif_stop(bp);
-
- if (bp->vlgrp)
- bp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(bp->vlgrp, vid, NULL);
bnx2_set_rx_mode(dev);
bnx2_netif_start(bp);
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
+#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
/* Save and then restore vlan_dev in the grp array,
* since the slave's driver might clear it.
*/
- vlan_dev = bond->vlgrp->vlan_devices[vid];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vid);
slave_dev->vlan_rx_kill_vid(slave_dev, vid);
- bond->vlgrp->vlan_devices[vid] = vlan_dev;
+ vlan_group_set_device(bond->vlgrp, vid, vlan_dev);
}
}
/* Save and then restore vlan_dev in the grp array,
* since the slave's driver might clear it.
*/
- vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
- bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev;
+ vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev);
}
unreg:
}
}
+
+/*
+ * Retrieve the list of registered multicast addresses for the bonding
+ * device and retransmit an IGMP JOIN request to the current active
+ * slave.
+ */
+static void bond_resend_igmp_join_requests(struct bonding *bond)
+{
+ struct in_device *in_dev;
+ struct ip_mc_list *im;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(bond->dev);
+ if (in_dev) {
+ for (im = in_dev->mc_list; im; im = im->next) {
+ ip_mc_rejoin_group(im);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
/*
* Totally destroys the mc_list in bond
*/
kfree(dmi);
dmi = bond->mc_list;
}
+ bond->mc_list = NULL;
}
/*
for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
+ bond_resend_igmp_join_requests(bond);
}
}
vlan_id = 0;
list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
vlan_list) {
- vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan_dev == rt->u.dst.dev) {
vlan_id = vlan->vlan_id;
dprintk("basa: vlan match on %s %d\n",
}
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan->vlan_ip) {
bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip,
vlan->vlan_ip, vlan->vlan_id);
list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
vlan_list) {
- vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan_dev == event_dev) {
switch (event) {
case NETDEV_UP:
{
struct packet_type *pt = &bond->arp_mon_pt;
+ if (pt->type)
+ return;
+
pt->type = htons(ETH_P_ARP);
- pt->dev = NULL; /*bond->dev;XXX*/
+ pt->dev = bond->dev;
pt->func = bond_arp_rcv;
dev_add_pack(pt);
}
void bond_unregister_arp(struct bonding *bond)
{
- dev_remove_pack(&bond->arp_mon_pt);
+ struct packet_type *pt = &bond->arp_mon_pt;
+
+ dev_remove_pack(pt);
+ pt->type = 0;
}
/*---------------------------- Hashing Policies -----------------------------*/
return 0;
}
-static void bond_activebackup_xmit_copy(struct sk_buff *skb,
- struct bonding *bond,
- struct slave *slave)
-{
- struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
- struct ethhdr *eth_data;
- u8 *hwaddr;
- int res;
-
- if (!skb2) {
- printk(KERN_ERR DRV_NAME ": Error: "
- "bond_activebackup_xmit_copy(): skb_copy() failed\n");
- return;
- }
-
- skb2->mac.raw = (unsigned char *)skb2->data;
- eth_data = eth_hdr(skb2);
-
- /* Pick an appropriate source MAC address
- * -- use slave's perm MAC addr, unless used by bond
- * -- otherwise, borrow active slave's perm MAC addr
- * since that will not be used
- */
- hwaddr = slave->perm_hwaddr;
- if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN))
- hwaddr = bond->curr_active_slave->perm_hwaddr;
-
- /* Set source MAC address appropriately */
- memcpy(eth_data->h_source, hwaddr, ETH_ALEN);
-
- res = bond_dev_queue_xmit(bond, skb2, slave->dev);
- if (res)
- dev_kfree_skb(skb2);
-
- return;
-}
/*
* in active-backup mode, we know that bond->curr_active_slave is always valid if
if (!bond->curr_active_slave)
goto out;
- /* Xmit IGMP frames on all slaves to ensure rapid fail-over
- for multicast traffic on snooping switches */
- if (skb->protocol == __constant_htons(ETH_P_IP) &&
- skb->nh.iph->protocol == IPPROTO_IGMP) {
- struct slave *slave, *active_slave;
- int i;
-
- active_slave = bond->curr_active_slave;
- bond_for_each_slave_from_to(bond, slave, i, active_slave->next,
- active_slave->prev)
- if (IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP))
- bond_activebackup_xmit_copy(skb, bond, slave);
- }
-
res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
out:
struct adapter *adapter = dev->priv;
spin_lock_irq(&adapter->async_lock);
- if (adapter->vlan_grp)
- adapter->vlan_grp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(adapter->vlan_grp, vid, NULL);
spin_unlock_irq(&adapter->async_lock);
}
#endif
{
int work_done;
struct adapter *adapter = cookie;
+ struct respQ *Q = &adapter->sge->respQ;
spin_lock(&adapter->async_lock);
struct rx_desc;
struct rx_sw_desc;
+struct sge_fl_page {
+ struct skb_frag_struct frag;
+ unsigned char *va;
+};
+
struct sge_fl { /* SGE per free-buffer list state */
unsigned int buf_size; /* size of each Rx buffer */
unsigned int credits; /* # of available Rx buffers */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned int gen; /* free list generation */
+ unsigned int cntxt_id; /* SGE context id for the free list */
+ struct sge_fl_page page;
struct rx_desc *desc; /* address of HW Rx descriptor ring */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
dma_addr_t phys_addr; /* physical address of HW ring start */
- unsigned int cntxt_id; /* SGE context id for the free list */
unsigned long empty; /* # of times queue ran out of buffers */
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
};
/*
unsigned long empty; /* # of times queue ran out of credits */
unsigned long nomem; /* # of responses deferred due to no mem */
unsigned long unhandled_irqs; /* # of spurious intrs */
+ unsigned long starved;
+ unsigned long restarted;
};
struct tx_desc;
* Ioctl commands specific to this driver.
*/
enum {
- CHELSIO_SETREG = 1024,
- CHELSIO_GETREG,
- CHELSIO_SETTPI,
- CHELSIO_GETTPI,
- CHELSIO_GETMTUTAB,
- CHELSIO_SETMTUTAB,
- CHELSIO_GETMTU,
- CHELSIO_SET_PM,
- CHELSIO_GET_PM,
- CHELSIO_GET_TCAM,
- CHELSIO_SET_TCAM,
- CHELSIO_GET_TCB,
- CHELSIO_GET_MEM,
- CHELSIO_LOAD_FW,
- CHELSIO_GET_PROTO,
- CHELSIO_SET_PROTO,
- CHELSIO_SET_TRACE_FILTER,
- CHELSIO_SET_QSET_PARAMS,
- CHELSIO_GET_QSET_PARAMS,
- CHELSIO_SET_QSET_NUM,
- CHELSIO_GET_QSET_NUM,
- CHELSIO_SET_PKTSCHED,
+ CHELSIO_GETMTUTAB = 1029,
+ CHELSIO_SETMTUTAB = 1030,
+ CHELSIO_SET_PM = 1032,
+ CHELSIO_GET_PM = 1033,
+ CHELSIO_GET_MEM = 1038,
+ CHELSIO_LOAD_FW = 1041,
+ CHELSIO_SET_TRACE_FILTER = 1044,
+ CHELSIO_SET_QSET_PARAMS = 1045,
+ CHELSIO_GET_QSET_PARAMS = 1046,
+ CHELSIO_SET_QSET_NUM = 1047,
+ CHELSIO_GET_QSET_NUM = 1048,
};
struct ch_reg {
static ssize_t attr_show(struct device *d, struct device_attribute *attr,
char *buf,
- ssize_t(*format) (struct adapter *, char *))
+ ssize_t(*format) (struct net_device *, char *))
{
ssize_t len;
- struct adapter *adap = to_net_dev(d)->priv;
/* Synchronize with ioctls that may shut down the device */
rtnl_lock();
- len = (*format) (adap, buf);
+ len = (*format) (to_net_dev(d), buf);
rtnl_unlock();
return len;
}
static ssize_t attr_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len,
- ssize_t(*set) (struct adapter *, unsigned int),
+ ssize_t(*set) (struct net_device *, unsigned int),
unsigned int min_val, unsigned int max_val)
{
char *endp;
ssize_t ret;
unsigned int val;
- struct adapter *adap = to_net_dev(d)->priv;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return -EINVAL;
rtnl_lock();
- ret = (*set) (adap, val);
+ ret = (*set) (to_net_dev(d), val);
if (!ret)
ret = len;
rtnl_unlock();
}
#define CXGB3_SHOW(name, val_expr) \
-static ssize_t format_##name(struct adapter *adap, char *buf) \
+static ssize_t format_##name(struct net_device *dev, char *buf) \
{ \
+ struct adapter *adap = dev->priv; \
return sprintf(buf, "%u\n", val_expr); \
} \
static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
return attr_show(d, attr, buf, format_##name); \
}
-static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
+static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
{
+ struct adapter *adap = dev->priv;
+
if (adap->flags & FULL_INIT_DONE)
return -EBUSY;
if (val && adap->params.rev == 0)
return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
}
-static ssize_t set_nservers(struct adapter *adap, unsigned int val)
+static ssize_t set_nservers(struct net_device *dev, unsigned int val)
{
+ struct adapter *adap = dev->priv;
+
if (adap->flags & FULL_INIT_DONE)
return -EBUSY;
if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
return -EFAULT;
switch (cmd) {
- case CHELSIO_SETREG:{
- struct ch_reg edata;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- if ((edata.addr & 3) != 0
- || edata.addr >= adapter->mmio_len)
- return -EINVAL;
- writel(edata.val, adapter->regs + edata.addr);
- break;
- }
- case CHELSIO_GETREG:{
- struct ch_reg edata;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- if ((edata.addr & 3) != 0
- || edata.addr >= adapter->mmio_len)
- return -EINVAL;
- edata.val = readl(adapter->regs + edata.addr);
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- break;
- }
case CHELSIO_SET_QSET_PARAMS:{
int i;
struct qset_params *q;
return -EINVAL;
/*
- * Version scheme:
- * bits 0..9: chip version
- * bits 10..15: chip revision
- */
+ * Version scheme:
+ * bits 0..9: chip version
+ * bits 10..15: chip revision
+ */
t.version = 3 | (adapter->params.rev << 10);
if (copy_to_user(useraddr, &t, sizeof(t)))
return -EFAULT;
t.trace_rx);
break;
}
- case CHELSIO_SET_PKTSCHED:{
- struct ch_pktsched_params p;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (!adapter->open_device_map)
- return -EAGAIN; /* uP and SGE must be running */
- if (copy_from_user(&p, useraddr, sizeof(p)))
- return -EFAULT;
- send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
- p.binding);
- break;
-
- }
default:
return -EOPNOTSUPP;
}
int i;
for_each_port(adapter, i) {
- const struct vlan_group *grp;
+ struct vlan_group *grp;
struct net_device *dev = adapter->port[i];
const struct port_info *p = netdev_priv(dev);
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
if (vlan && vlan != VLAN_VID_MASK) {
grp = p->vlan_grp;
- dev = grp ? grp->vlan_devices[vlan] : NULL;
+ dev = NULL;
+ if (grp)
+ dev = vlan_group_get_device(grp, vlan);
} else
while (dev->master)
dev = dev->master;
#define USE_GTS 0
#define SGE_RX_SM_BUF_SIZE 1536
+
+/*
+ * If USE_RX_PAGE is defined, the small freelist populated with (partial)
+ * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
+ * be a multiple of the host page size).
+ */
+#define USE_RX_PAGE
+#define RX_PAGE_SIZE 2048
+
+/*
+ * skb freelist packets are copied into a new skb (and the freelist one is
+ * reused) if their len is <=
+ */
#define SGE_RX_COPY_THRES 256
-# define SGE_RX_DROP_THRES 16
+/*
+ * Minimum number of freelist entries before we start dropping TUNNEL frames.
+ */
+#define SGE_RX_DROP_THRES 16
/*
* Period of the Tx buffer reclaim timer. This timer does not need to run
};
struct rx_sw_desc { /* SW state per Rx descriptor */
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct sge_fl_page page;
+ } t;
DECLARE_PCI_UNMAP_ADDR(dma_addr);
};
u32 len; /* mapped length of skb main body */
};
+/*
+ * Holds unmapping information for Tx packets that need deferred unmapping.
+ * This structure lives at skb->head and must be allocated by callers.
+ */
+struct deferred_unmap_info {
+ struct pci_dev *pdev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
/*
* Maps a number of flits to the number of Tx descriptors that can hold them.
* The formula is
struct pci_dev *pdev = adapter->pdev;
unsigned int cidx = q->cidx;
+ const int need_unmap = need_skb_unmap() &&
+ q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+
d = &q->sdesc[cidx];
while (n--) {
if (d->skb) { /* an SGL is present */
- if (need_skb_unmap())
+ if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
if (d->skb->priority == cidx)
kfree_skb(d->skb);
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
- kfree_skb(d->skb);
- d->skb = NULL;
+
+ if (q->buf_size != RX_PAGE_SIZE) {
+ kfree_skb(d->t.skb);
+ d->t.skb = NULL;
+ } else {
+ if (d->t.page.frag.page)
+ put_page(d->t.page.frag.page);
+ d->t.page.frag.page = NULL;
+ }
if (++cidx == q->size)
cidx = 0;
}
+
+ if (q->page.frag.page)
+ put_page(q->page.frag.page);
+ q->page.frag.page = NULL;
}
/**
* add_one_rx_buf - add a packet buffer to a free-buffer list
- * @skb: the buffer to add
+ * @va: va of the buffer to add
* @len: the buffer length
* @d: the HW Rx descriptor to write
* @sd: the SW Rx descriptor to write
* Add a buffer of the given length to the supplied HW and SW Rx
* descriptors.
*/
-static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
+static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
struct rx_desc *d, struct rx_sw_desc *sd,
unsigned int gen, struct pci_dev *pdev)
{
dma_addr_t mapping;
- sd->skb = skb;
- mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+ mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
pci_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
{
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
struct rx_desc *d = &q->desc[q->pidx];
+ struct sge_fl_page *p = &q->page;
while (n--) {
- struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+ unsigned char *va;
- if (!skb)
- break;
+ if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
+ struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+
+ if (!skb) {
+ q->alloc_failed++;
+ break;
+ }
+ va = skb->data;
+ sd->t.skb = skb;
+ } else {
+ if (!p->frag.page) {
+ p->frag.page = alloc_pages(gfp, 0);
+ if (unlikely(!p->frag.page)) {
+ q->alloc_failed++;
+ break;
+ } else {
+ p->frag.size = RX_PAGE_SIZE;
+ p->frag.page_offset = 0;
+ p->va = page_address(p->frag.page);
+ }
+ }
+
+ memcpy(&sd->t, p, sizeof(*p));
+ va = p->va;
+
+ p->frag.page_offset += RX_PAGE_SIZE;
+ BUG_ON(p->frag.page_offset > PAGE_SIZE);
+ p->va += RX_PAGE_SIZE;
+ if (p->frag.page_offset == PAGE_SIZE)
+ p->frag.page = NULL;
+ else
+ get_page(p->frag.page);
+ }
+
+ add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
- add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
d++;
sd++;
if (++q->pidx == q->size) {
struct rx_desc *from = &q->desc[idx];
struct rx_desc *to = &q->desc[q->pidx];
- q->sdesc[q->pidx] = q->sdesc[idx];
+ memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
to->addr_lo = from->addr_lo; /* already big endian */
to->addr_hi = from->addr_hi; /* likewise */
wmb();
* of the SW ring.
*/
static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
- size_t sw_size, dma_addr_t *phys, void *metadata)
+ size_t sw_size, dma_addr_t * phys, void *metadata)
{
size_t len = nelem * elem_size;
void *s = NULL;
return flit_desc_map[n];
}
-/**
- * get_packet - return the next ingress packet buffer from a free list
- * @adap: the adapter that received the packet
- * @fl: the SGE free list holding the packet
- * @len: the packet length including any SGE padding
- * @drop_thres: # of remaining buffers before we start dropping packets
- *
- * Get the next packet from a free list and complete setup of the
- * sk_buff. If the packet is small we make a copy and recycle the
- * original buffer, otherwise we use the original buffer itself. If a
- * positive drop threshold is supplied packets are dropped and their
- * buffers recycled if (a) the number of remaining buffers is under the
- * threshold and the packet is too big to copy, or (b) the packet should
- * be copied but there is no memory for the copy.
- */
-static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
- unsigned int len, unsigned int drop_thres)
-{
- struct sk_buff *skb = NULL;
- struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
-
- prefetch(sd->skb->data);
-
- if (len <= SGE_RX_COPY_THRES) {
- skb = alloc_skb(len, GFP_ATOMIC);
- if (likely(skb != NULL)) {
- __skb_put(skb, len);
- pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd,
- dma_addr),
- len, PCI_DMA_FROMDEVICE);
- memcpy(skb->data, sd->skb->data, len);
- pci_dma_sync_single_for_device(adap->pdev,
- pci_unmap_addr(sd,
- dma_addr),
- len, PCI_DMA_FROMDEVICE);
- } else if (!drop_thres)
- goto use_orig_buf;
- recycle:
- recycle_rx_buf(adap, fl, fl->cidx);
- return skb;
- }
-
- if (unlikely(fl->credits < drop_thres))
- goto recycle;
-
- use_orig_buf:
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
- fl->buf_size, PCI_DMA_FROMDEVICE);
- skb = sd->skb;
- skb_put(skb, len);
- __refill_fl(adap, fl);
- return skb;
-}
-
/**
* get_imm_packet - return the next ingress packet buffer from a response
* @resp: the response descriptor containing the packet data
return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
}
+/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ int i;
+ const dma_addr_t *p;
+ const struct skb_shared_info *si;
+ const struct deferred_unmap_info *dui;
+ const struct unmap_info *ui = (struct unmap_info *)skb->cb;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ p = dui->addr;
+
+ if (ui->len)
+ pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
+
+ si = skb_shinfo(skb);
+ for (i = 0; i < si->nr_frags; i++)
+ pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
+ PCI_DMA_TODEVICE);
+}
+
+static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
+ const struct sg_ent *sgl, int sgl_flits)
+{
+ dma_addr_t *p;
+ struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ dui->pdev = pdev;
+ for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
+ *p++ = be64_to_cpu(sgl->addr[0]);
+ *p++ = be64_to_cpu(sgl->addr[1]);
+ }
+ if (sgl_flits)
+ *p = be64_to_cpu(sgl->addr[0]);
+}
+
/**
* write_ofld_wr - write an offload work request
* @adap: the adapter
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
adap->pdev);
- if (need_skb_unmap())
+ if (need_skb_unmap()) {
+ setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+ skb->destructor = deferred_unmap_destructor;
((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
+ }
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
gen, from->wr_hi, from->wr_lo);
struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
struct port_info *pi;
- rq->eth_pkts++;
skb_pull(skb, sizeof(*p) + pad);
skb->dev = adap->port[p->iff];
skb->dev->last_rx = jiffies;
netif_rx(skb);
}
+#define SKB_DATA_SIZE 128
+
+static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
+ unsigned int len)
+{
+ skb->len = len;
+ if (len <= SKB_DATA_SIZE) {
+ memcpy(skb->data, p->va, len);
+ skb->tail += len;
+ put_page(p->frag.page);
+ } else {
+ memcpy(skb->data, p->va, SKB_DATA_SIZE);
+ skb_shinfo(skb)->frags[0].page = p->frag.page;
+ skb_shinfo(skb)->frags[0].page_offset =
+ p->frag.page_offset + SKB_DATA_SIZE;
+ skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
+ skb_shinfo(skb)->nr_frags = 1;
+ skb->data_len = len - SKB_DATA_SIZE;
+ skb->tail += SKB_DATA_SIZE;
+ skb->truesize += skb->data_len;
+ }
+}
+
+/**
+* get_packet - return the next ingress packet buffer from a free list
+* @adap: the adapter that received the packet
+* @fl: the SGE free list holding the packet
+* @len: the packet length including any SGE padding
+* @drop_thres: # of remaining buffers before we start dropping packets
+*
+* Get the next packet from a free list and complete setup of the
+* sk_buff. If the packet is small we make a copy and recycle the
+* original buffer, otherwise we use the original buffer itself. If a
+* positive drop threshold is supplied packets are dropped and their
+* buffers recycled if (a) the number of remaining buffers is under the
+* threshold and the packet is too big to copy, or (b) the packet should
+* be copied but there is no memory for the copy.
+*/
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+ unsigned int len, unsigned int drop_thres)
+{
+ struct sk_buff *skb = NULL;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ prefetch(sd->t.skb->data);
+
+ if (len <= SGE_RX_COPY_THRES) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ struct rx_desc *d = &fl->desc[fl->cidx];
+ dma_addr_t mapping =
+ (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
+ be32_to_cpu(d->addr_lo));
+
+ __skb_put(skb, len);
+ pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
+ PCI_DMA_FROMDEVICE);
+ memcpy(skb->data, sd->t.skb->data, len);
+ pci_dma_sync_single_for_device(adap->pdev, mapping, len,
+ PCI_DMA_FROMDEVICE);
+ } else if (!drop_thres)
+ goto use_orig_buf;
+recycle:
+ recycle_rx_buf(adap, fl, fl->cidx);
+ return skb;
+ }
+
+ if (unlikely(fl->credits < drop_thres))
+ goto recycle;
+
+use_orig_buf:
+ pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ fl->buf_size, PCI_DMA_FROMDEVICE);
+ skb = sd->t.skb;
+ skb_put(skb, len);
+ __refill_fl(adap, fl);
+ return skb;
+}
+
/**
* handle_rsp_cntrl_info - handles control information in a response
* @qs: the queue set corresponding to the response
q->next_holdoff = q->holdoff_tmr;
while (likely(budget_left && is_new_response(r, q))) {
- int eth, ethpad = 0;
+ int eth, ethpad = 2;
struct sk_buff *skb = NULL;
u32 len, flags = ntohl(r->flags);
u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
break;
}
q->imm_data++;
+ ethpad = 0;
} else if ((len = ntohl(r->len_cq)) != 0) {
- struct sge_fl *fl;
+ struct sge_fl *fl =
+ (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+
+ if (fl->buf_size == RX_PAGE_SIZE) {
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct sge_fl_page *p = &sd->t.page;
+
+ prefetch(p->va);
+ prefetch(p->va + L1_CACHE_BYTES);
+
+ __refill_fl(adap, fl);
+
+ pci_unmap_single(adap->pdev,
+ pci_unmap_addr(sd, dma_addr),
+ fl->buf_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (eth) {
+ if (unlikely(fl->credits <
+ SGE_RX_DROP_THRES))
+ goto eth_recycle;
+
+ skb = alloc_skb(SKB_DATA_SIZE,
+ GFP_ATOMIC);
+ if (unlikely(!skb)) {
+eth_recycle:
+ q->rx_drops++;
+ recycle_rx_buf(adap, fl,
+ fl->cidx);
+ goto eth_done;
+ }
+ } else {
+ skb = alloc_skb(SKB_DATA_SIZE,
+ GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto no_mem;
+ }
+
+ skb_data_init(skb, p, G_RSPD_LEN(len));
+eth_done:
+ fl->credits--;
+ q->eth_pkts++;
+ } else {
+ fl->credits--;
+ skb = get_packet(adap, fl, G_RSPD_LEN(len),
+ eth ? SGE_RX_DROP_THRES : 0);
+ }
- fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
- fl->credits--;
- skb = get_packet(adap, fl, G_RSPD_LEN(len),
- eth ? SGE_RX_DROP_THRES : 0);
- if (!skb)
- q->rx_drops++;
- else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
- __skb_pull(skb, 2);
- ethpad = 2;
if (++fl->cidx == fl->size)
fl->cidx = 0;
} else
q->credits = 0;
}
- if (likely(skb != NULL)) {
+ if (skb) {
+ /* Preserve the RSS info in csum & priority */
+ skb->csum = rss_hi;
+ skb->priority = rss_lo;
+
if (eth)
rx_eth(adap, q, skb, ethpad);
else {
- /* Preserve the RSS info in csum & priority */
- skb->csum = rss_hi;
- skb->priority = rss_lo;
- ngathered = rx_offload(&adap->tdev, q, skb,
- offload_skbs, ngathered);
+ if (unlikely(r->rss_hdr.opcode ==
+ CPL_TRACE_PKT))
+ __skb_pull(skb, ethpad);
+
+ ngathered = rx_offload(&adap->tdev, q,
+ skb, offload_skbs,
+ ngathered);
}
}
-
--budget_left;
}
&adap->sge.qs[0].rspq.lock;
if (spin_trylock_irq(lock)) {
if (!napi_is_scheduled(qs->netdev)) {
+ u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+
if (qs->fl[0].credits < qs->fl[0].size)
__refill_fl(adap, &qs->fl[0]);
if (qs->fl[1].credits < qs->fl[1].size)
__refill_fl(adap, &qs->fl[1]);
+
+ if (status & (1 << qs->rspq.cntxt_id)) {
+ qs->rspq.starved++;
+ if (qs->rspq.credits) {
+ refill_rspq(adap, &qs->rspq, 1);
+ qs->rspq.credits--;
+ qs->rspq.restarted++;
+ t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
+ 1 << qs->rspq.cntxt_id);
+ }
+ }
}
spin_unlock_irq(lock);
}
flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
if (ntxq == 1) {
+#ifdef USE_RX_PAGE
+ q->fl[0].buf_size = RX_PAGE_SIZE;
+#else
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
sizeof(struct cpl_rx_pkt);
+#endif
q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
sizeof(struct cpl_rx_pkt);
} else {
+#ifdef USE_RX_PAGE
+ q->fl[0].buf_size = RX_PAGE_SIZE;
+#else
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
sizeof(struct cpl_rx_data);
+#endif
q->fl[1].buf_size = (16 * 1024) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
q->polling = adap->params.rev > 0;
q->coalesce_usecs = 5;
q->rspq_size = 1024;
- q->fl_size = 4096;
+ q->fl_size = 1024;
q->jumbo_size = 512;
q->txq_size[TXQ_ETH] = 1024;
q->txq_size[TXQ_OFLD] = 1024;
major = G_FW_VERSION_MAJOR(vers);
minor = G_FW_VERSION_MINOR(vers);
- if (type == FW_VERSION_T3 && major == 3 && minor == 1)
+ if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
+ minor == FW_VERSION_MINOR)
return 0;
CH_ERR(adapter, "found wrong FW version(%u.%u), "
- "driver needs version 3.1\n", major, minor);
+ "driver needs version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
return -EINVAL;
}
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.0-ko"
+#define FW_VERSION_MAJOR 3
+#define FW_VERSION_MINOR 2
#endif /* __CHELSIO_VERSION_H */
/* Add more time here if your adapter won't work OK: */
#define DE600_SLOW_DOWN udelay(delay_time)
- /*
- * If you still have trouble reading/writing to the adapter,
- * modify the following "#define": (see <asm/io.h> for more info)
-#define REALLY_SLOW_IO
- */
-
/* use 0 for production, 1 for verification, >2 for debug */
#ifdef DE600_DEBUG
#define PRINTK(x) if (de600_debug >= 2) printk x
uint16_t vid = adapter->hw.mng_cookie.vlan_id;
uint16_t old_vid = adapter->mng_vlan_id;
if (adapter->vlgrp) {
- if (!adapter->vlgrp->vlan_devices[vid]) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid)) {
if (adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
e1000_vlan_rx_add_vid(netdev, vid);
if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
(vid != old_vid) &&
- !adapter->vlgrp->vlan_devices[old_vid])
+ !vlan_group_get_device(adapter->vlgrp, old_vid))
e1000_vlan_rx_kill_vid(netdev, old_vid);
} else
adapter->mng_vlan_id = vid;
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!(adapter->vlgrp &&
- adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) {
+ vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
uint32_t vfta, index;
e1000_irq_disable(adapter);
-
- if (adapter->vlgrp)
- adapter->vlgrp->vlan_devices[vid] = NULL;
-
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
e1000_irq_enable(adapter);
if ((adapter->hw.mng_cookie.status &
if (adapter->vlgrp) {
uint16_t vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
- if (!adapter->vlgrp->vlan_devices[vid])
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
e1000_vlan_rx_add_vid(adapter->netdev, vid);
}
int index;
u64 hret;
- if (port->vgrp)
- port->vgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(port->vgrp, vid, NULL);
cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
NV_MSIX_INT_DISABLED,
NV_MSIX_INT_ENABLED
};
-static int msix = NV_MSIX_INT_ENABLED;
+static int msix = NV_MSIX_INT_DISABLED;
/*
* DMA 64bit
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
+ int retcode;
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
pkts = nv_rx_process(dev, limit);
- else
+ retcode = nv_alloc_rx(dev);
+ } else {
pkts = nv_rx_process_optimized(dev, limit);
+ retcode = nv_alloc_rx_optimized(dev);
+ }
- if (nv_alloc_rx(dev)) {
+ if (retcode) {
spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
spin_lock_irqsave(&priv->rxlock, flags);
- if (priv->vlgrp)
- priv->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(priv->vlgrp, vid, NULL);
spin_unlock_irqrestore(&priv->rxlock, flags);
}
ixgb_irq_disable(adapter);
- if(adapter->vlgrp)
- adapter->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
ixgb_irq_enable(adapter);
if(adapter->vlgrp) {
uint16_t vid;
for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
- if(!adapter->vlgrp->vlan_devices[vid])
+ if(!vlan_group_get_device(adapter->vlgrp, vid))
continue;
ixgb_vlan_rx_add_vid(adapter->netdev, vid);
}
int unaligned;
while (mp->rx_desc_count < mp->rx_ring_size) {
- skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
+ skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
if (!skb)
break;
mp->rx_desc_count++;
- unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1);
+ unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
if (unaligned)
- skb_reserve(skb, ETH_DMA_ALIGN - unaligned);
+ skb_reserve(skb, dma_get_cache_alignment() - unaligned);
pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
static int mv643xx_eth_probe(struct platform_device *pdev)
{
struct mv643xx_eth_platform_data *pd;
- int port_num = pdev->id;
+ int port_num;
struct mv643xx_private *mp;
struct net_device *dev;
u8 *p;
int duplex = DUPLEX_HALF;
int speed = 0; /* default to auto-negotiation */
+ pd = pdev->dev.platform_data;
+ if (pd == NULL) {
+ printk(KERN_ERR "No mv643xx_eth_platform_data\n");
+ return -ENODEV;
+ }
+
dev = alloc_etherdev(sizeof(struct mv643xx_private));
if (!dev)
return -ENOMEM;
BUG_ON(!res);
dev->irq = res->start;
- mp->port_num = port_num;
-
dev->open = mv643xx_eth_open;
dev->stop = mv643xx_eth_stop;
dev->hard_start_xmit = mv643xx_eth_start_xmit;
spin_lock_init(&mp->lock);
+ port_num = pd->port_number;
+
/* set default config values */
eth_port_uc_addr_get(dev, dev->dev_addr);
mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
- pd = pdev->dev.platform_data;
- if (pd) {
- if (pd->mac_addr)
- memcpy(dev->dev_addr, pd->mac_addr, 6);
+ if (is_valid_ether_addr(pd->mac_addr))
+ memcpy(dev->dev_addr, pd->mac_addr, 6);
- if (pd->phy_addr || pd->force_phy_addr)
- ethernet_phy_set(port_num, pd->phy_addr);
+ if (pd->phy_addr || pd->force_phy_addr)
+ ethernet_phy_set(port_num, pd->phy_addr);
- if (pd->rx_queue_size)
- mp->rx_ring_size = pd->rx_queue_size;
+ if (pd->rx_queue_size)
+ mp->rx_ring_size = pd->rx_queue_size;
- if (pd->tx_queue_size)
- mp->tx_ring_size = pd->tx_queue_size;
+ if (pd->tx_queue_size)
+ mp->tx_ring_size = pd->tx_queue_size;
- if (pd->tx_sram_size) {
- mp->tx_sram_size = pd->tx_sram_size;
- mp->tx_sram_addr = pd->tx_sram_addr;
- }
-
- if (pd->rx_sram_size) {
- mp->rx_sram_size = pd->rx_sram_size;
- mp->rx_sram_addr = pd->rx_sram_addr;
- }
+ if (pd->tx_sram_size) {
+ mp->tx_sram_size = pd->tx_sram_size;
+ mp->tx_sram_addr = pd->tx_sram_addr;
+ }
- duplex = pd->duplex;
- speed = pd->speed;
+ if (pd->rx_sram_size) {
+ mp->rx_sram_size = pd->rx_sram_size;
+ mp->rx_sram_addr = pd->rx_sram_addr;
}
+ duplex = pd->duplex;
+ speed = pd->speed;
+
+ mp->port_num = port_num;
+
/* Hook up MII support for ethtool */
mp->mii.dev = dev;
mp->mii.mdio_read = mv643xx_mdio_read;
#include <linux/mv643xx.h>
+#include <asm/dma-mapping.h>
+
/* Checksum offload for Tx works for most packets, but
* fails if previous packet sent did not use hw csum
*/
#define MAX_DESCS_PER_SKB 1
#endif
-/*
- * The MV643XX HW requires 8-byte alignment. However, when I/O
- * is non-cache-coherent, we need to ensure that the I/O buffers
- * we use don't share cache lines with other data.
- */
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
-#define ETH_DMA_ALIGN L1_CACHE_BYTES
-#else
-#define ETH_DMA_ALIGN 8
-#endif
-
#define ETH_VLAN_HLEN 4
#define ETH_FCS_LEN 4
#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
ETH_VLAN_HLEN + ETH_FCS_LEN)
-#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN)
+#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment())
#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
/*************************************************************************
* myri10ge.c: Myricom Myri-10G Ethernet driver.
*
- * Copyright (C) 2005, 2006 Myricom, Inc.
+ * Copyright (C) 2005 - 2007 Myricom, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*
*
* If the eeprom on your board is not recent enough, you will need to get a
char *fw_name;
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
char fw_version[128];
+ int fw_ver_major;
+ int fw_ver_minor;
+ int fw_ver_tiny;
+ int adopted_rx_filter_bug;
u8 mac_addr[6]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
struct mcp_gen_header *hdr)
{
struct device *dev = &mgp->pdev->dev;
- int major, minor;
/* check firmware type */
if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
/* save firmware version for ethtool */
strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
- sscanf(mgp->fw_version, "%d.%d", &major, &minor);
+ sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
+ &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
- if (!(major == MXGEFW_VERSION_MAJOR && minor == MXGEFW_VERSION_MINOR)) {
+ if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR
+ && mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
MXGEFW_VERSION_MINOR);
memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
status = myri10ge_validate_firmware(mgp, hdr);
kfree(hdr);
+
+ /* check to see if adopted firmware has bug where adopting
+ * it will cause broadcasts to be filtered unless the NIC
+ * is kept in ALLMULTI mode */
+ if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
+ mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
+ mgp->adopted_rx_filter_bug = 1;
+ dev_warn(dev, "Adopting fw %d.%d.%d: "
+ "working around rx filter bug\n",
+ mgp->fw_ver_major, mgp->fw_ver_minor,
+ mgp->fw_ver_tiny);
+ }
return status;
}
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
myri10ge_change_promisc(mgp, 0, 0);
myri10ge_change_pause(mgp, mgp->pause);
+ if (mgp->adopted_rx_filter_bug)
+ (void)myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
return status;
}
myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
/* This firmware is known to not support multicast */
- if (!mgp->fw_multicast_support)
+ if (!mgp->fw_multicast_support || mgp->adopted_rx_filter_bug)
return;
/* Disable multicast filtering */
static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
- { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
struct netdev_private *np = netdev_priv(dev);
void __iomem * ioaddr = ns_ioaddr(dev);
unsigned entry;
+ unsigned long flags;
/* Note: Ordering is important here, set the field with the
"ownership" bit last, and only then increment cur_tx. */
np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
if (!np->hands_off) {
np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
dev_kfree_skb_irq(skb);
np->stats.tx_dropped++;
}
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
dev->trans_start = jiffies;
pkt_len = (desc_status & DescSizeMask) - 4;
if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
if (desc_status & DescMore) {
+ unsigned long flags;
+
if (netif_msg_rx_err(np))
printk(KERN_WARNING
"%s: Oversized(?) Ethernet "
* reset procedure documented in
* AN-1287. */
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
reset_rx(dev);
reinit_rx(dev);
writel(np->ring_dma, ioaddr + RxRingPtr);
check_link(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
/* We'll enable RX on exit from this
* function. */
#ifdef CONFIG_NET_POLL_CONTROLLER
static void natsemi_poll_controller(struct net_device *dev)
{
+ struct netdev_private *np = netdev_priv(dev);
+
disable_irq(dev->irq);
- intr_handler(dev->irq, dev);
+
+ /*
+ * A real interrupt might have already reached us at this point
+ * but NAPI might still haven't called us back. As the interrupt
+ * status register is cleared by reading, we should prevent an
+ * interrupt loss in this case...
+ */
+ if (!np->intr_status)
+ intr_handler(dev->irq, dev);
+
enable_irq(dev->irq);
}
#endif
#define FLASH_SECTOR_SIZE (64 * 1024)
#define FLASH_TOTAL_SIZE (NUM_FLASH_SECTORS * FLASH_SECTOR_SIZE)
+#define PHAN_VENDOR_ID 0x4040
+
#define RCV_DESC_RINGSIZE \
(sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
#define STATUS_DESC_RINGSIZE \
(sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
#define RCV_BUFFSIZE \
(sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count)
-#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
+#define find_diff_among(a,b,range) ((a)<=(b)?((b)-(a)):((b)+(range)-(a)))
#define NETXEN_NETDEV_STATUS 0x1
#define NETXEN_RCV_PRODUCER_OFFSET 0
#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats)
static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
- "Register_Test_offline", "EEPROM_Test_offline",
- "Interrupt_Test_offline", "Loopback_Test_offline",
+ "Register_Test_on_offline",
"Link_Test_on_offline"
};
}
}
-static void
-netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
- /* options can be added depending upon the mode */
- wol->wolopts = 0;
-}
-
static u32 netxen_nic_test_link(struct net_device *dev)
{
struct netxen_port *port = netdev_priv(dev);
struct netxen_adapter *adapter = port->adapter;
__u32 status;
+ int val;
/* read which mode */
if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
&status) != 0)
return -EIO;
- else
- return (netxen_get_phy_link(status));
+ else {
+ val = netxen_get_phy_link(status);
+ return !val;
+ }
} else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
- int val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
- return val == XG_LINK_UP;
+ val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
+ return (val == XG_LINK_UP) ? 0 : 1;
}
return -EIO;
}
static int netxen_nic_reg_test(struct net_device *dev)
{
- struct netxen_port *port = netdev_priv(dev);
- struct netxen_adapter *adapter = port->adapter;
- u32 data_read, data_written, save;
- __u32 mode;
-
- /*
- * first test the "Read Only" registers by writing which mode
- */
- netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
- if (netxen_get_niu_enable_ge(mode)) { /* GB Mode */
- netxen_nic_read_w0(adapter,
- NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum),
- &data_read);
-
- save = data_read;
- if (data_read)
- data_written = data_read & NETXEN_NIC_INVALID_DATA;
- else
- data_written = NETXEN_NIC_INVALID_DATA;
- netxen_nic_write_w0(adapter,
- NETXEN_NIU_GB_MII_MGMT_STATUS(port->
- portnum),
- data_written);
- netxen_nic_read_w0(adapter,
- NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum),
- &data_read);
-
- if (data_written == data_read) {
- netxen_nic_write_w0(adapter,
- NETXEN_NIU_GB_MII_MGMT_STATUS(port->
- portnum),
- save);
-
- return 0;
- }
-
- /* netxen_niu_gb_mii_mgmt_indicators is read only */
- netxen_nic_read_w0(adapter,
- NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
- portnum),
- &data_read);
-
- save = data_read;
- if (data_read)
- data_written = data_read & NETXEN_NIC_INVALID_DATA;
- else
- data_written = NETXEN_NIC_INVALID_DATA;
- netxen_nic_write_w0(adapter,
- NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
- portnum),
- data_written);
-
- netxen_nic_read_w0(adapter,
- NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
- portnum),
- &data_read);
-
- if (data_written == data_read) {
- netxen_nic_write_w0(adapter,
- NETXEN_NIU_GB_MII_MGMT_INDICATE
- (port->portnum), save);
- return 0;
- }
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 data_read, data_written;
- /* netxen_niu_gb_interface_status is read only */
- netxen_nic_read_w0(adapter,
- NETXEN_NIU_GB_INTERFACE_STATUS(port->
- portnum),
- &data_read);
+ netxen_nic_read_w0(adapter, NETXEN_PCIX_PH_REG(0), &data_read);
+ if ((data_read & 0xffff) != PHAN_VENDOR_ID)
+ return 1;
- save = data_read;
- if (data_read)
- data_written = data_read & NETXEN_NIC_INVALID_DATA;
- else
- data_written = NETXEN_NIC_INVALID_DATA;
- netxen_nic_write_w0(adapter,
- NETXEN_NIU_GB_INTERFACE_STATUS(port->
- portnum),
- data_written);
+ data_written = (u32)0xa5a5a5a5;
- netxen_nic_read_w0(adapter,
- NETXEN_NIU_GB_INTERFACE_STATUS(port->
- portnum),
- &data_read);
+ netxen_nic_reg_write(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_SCRATCHPAD_TEST));
+ if (data_written != data_read)
+ return 1;
- if (data_written == data_read) {
- netxen_nic_write_w0(adapter,
- NETXEN_NIU_GB_INTERFACE_STATUS
- (port->portnum), save);
-
- return 0;
- }
- } /* GB Mode */
- return 1;
+ return 0;
}
static int netxen_nic_diag_test_count(struct net_device *dev)
{
if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* offline tests */
/* link test */
- if (!(data[4] = (u64) netxen_nic_test_link(dev)))
+ if ((data[1] = (u64) netxen_nic_test_link(dev)))
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (netif_running(dev))
- dev->stop(dev);
-
/* register tests */
- if (!(data[0] = netxen_nic_reg_test(dev)))
+ if ((data[0] = netxen_nic_reg_test(dev)))
eth_test->flags |= ETH_TEST_FL_FAILED;
- /* other tests pass as of now */
- data[1] = data[2] = data[3] = 1;
- if (netif_running(dev))
- dev->open(dev);
} else { /* online tests */
- /* link test */
- if (!(data[4] = (u64) netxen_nic_test_link(dev)))
+ /* register tests */
+ if((data[0] = netxen_nic_reg_test(dev)))
eth_test->flags |= ETH_TEST_FL_FAILED;
- /* other tests pass by default */
- data[0] = data[1] = data[2] = data[3] = 1;
+ /* link test */
+ if ((data[1] = (u64) netxen_nic_test_link(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
.get_drvinfo = netxen_nic_get_drvinfo,
.get_regs_len = netxen_nic_get_regs_len,
.get_regs = netxen_nic_get_regs,
- .get_wol = netxen_nic_get_wol,
.get_link = ethtool_op_get_link,
.get_eeprom_len = netxen_nic_get_eeprom_len,
.get_eeprom = netxen_nic_get_eeprom,
adapter->cmd_consumer = (uint32_t *) (((char *)addr) +
sizeof(struct netxen_ring_ctx));
- addr = pci_alloc_consistent(adapter->ahw.pdev,
- sizeof(struct cmd_desc_type0) *
- adapter->max_tx_desc_count,
- (dma_addr_t *) & hw->cmd_desc_phys_addr);
+ addr = netxen_alloc(adapter->ahw.pdev,
+ sizeof(struct cmd_desc_type0) *
+ adapter->max_tx_desc_count,
+ (dma_addr_t *) & hw->cmd_desc_phys_addr,
+ &adapter->ahw.cmd_desc_pdev);
printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr);
if (addr == NULL) {
void netxen_load_firmware(struct netxen_adapter *adapter)
{
int i;
- long data, size = 0;
- long flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE;
+ u32 data, size = 0;
+ u32 flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE;
u64 off;
void __iomem *addr;
netxen_nic_driver_name);
return;
}
+ *ptr32 = le32_to_cpu(*ptr32);
ptr32++;
addr += sizeof(u32);
}
#include "netxen_nic_phan_reg.h"
struct crb_addr_pair {
- long addr;
- long data;
+ u32 addr;
+ u32 data;
};
#define NETXEN_MAX_CRB_XFORM 60
static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
-#define NETXEN_ADDR_ERROR ((unsigned long ) 0xffffffff )
+#define NETXEN_ADDR_ERROR (0xffffffff)
#define crb_addr_transform(name) \
crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
* netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
* address to external PCI CRB address.
*/
-unsigned long netxen_decode_crb_addr(unsigned long addr)
+u32 netxen_decode_crb_addr(u32 addr)
{
int i;
- unsigned long base_addr, offset, pci_base;
+ u32 base_addr, offset, pci_base;
crb_addr_transform_setup();
while(1) {
int data1;
- do_rom_fast_read(adapter, addridx, &data1);
+ ret = do_rom_fast_read(adapter, addridx, &data1);
+ if (ret < 0)
+ return ret;
+
if (data1 == data)
break;
int n, i;
int init_delay = 0;
struct crb_addr_pair *buf;
- unsigned long off;
+ u32 off;
/* resetall */
status = netxen_nic_get_board_info(adapter);
if (verbose)
printk("%s: PCI: 0x%08x == 0x%08x\n",
netxen_nic_driver_name, (unsigned int)
- netxen_decode_crb_addr((unsigned long)
- addr), val);
+ netxen_decode_crb_addr(addr), val);
}
for (i = 0; i < n; i++) {
- off = netxen_decode_crb_addr((unsigned long)buf[i].addr);
+ off = netxen_decode_crb_addr(buf[i].addr);
if (off == NETXEN_ADDR_ERROR) {
- printk(KERN_ERR"CRB init value out of range %lx\n",
+ printk(KERN_ERR"CRB init value out of range %x\n",
buf[i].addr);
continue;
}
void netxen_free_adapter_offload(struct netxen_adapter *adapter)
{
if (adapter->dummy_dma.addr) {
+ writel(0, NETXEN_CRB_NORMALIZE(adapter,
+ CRB_HOST_DUMMY_BUF_ADDR_HI));
+ writel(0, NETXEN_CRB_NORMALIZE(adapter,
+ CRB_HOST_DUMMY_BUF_ADDR_LO));
pci_free_consistent(adapter->ahw.pdev,
NETXEN_HOST_DUMMY_DMA_SIZE,
adapter->dummy_dma.addr,
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
-#define PHAN_VENDOR_ID 0x4040
-
MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
netdev->tx_timeout = netxen_tx_timeout;
netdev->watchdog_timeo = HZ;
+ netxen_nic_change_mtu(netdev, netdev->mtu);
+
SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
netdev->poll = netxen_nic_poll;
netdev->weight = NETXEN_NETDEV_WEIGHT;
adapter->port_count++;
adapter->port[i] = port;
}
-#ifndef CONFIG_PPC64
writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
netxen_pinit_from_rom(adapter, 0);
udelay(500);
netxen_load_firmware(adapter);
netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
-#endif
/*
* delay a while to ensure that the Pegs are up & running.
* Otherwise, we might see some flaky behaviour.
if (adapter == NULL)
return;
+ if (adapter->irq)
+ free_irq(adapter->irq, adapter);
netxen_nic_stop_all_ports(adapter);
/* leave the hw in the same state as reboot */
- netxen_pinit_from_rom(adapter, 0);
writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+ netxen_pinit_from_rom(adapter, 0);
+ udelay(500);
netxen_load_firmware(adapter);
netxen_free_adapter_offload(adapter);
- udelay(500); /* Delay for a while to drain the DMA engines */
+ mdelay(1000); /* Delay for a while to drain the DMA engines */
for (i = 0; i < adapter->port_count; i++) {
port = adapter->port[i];
if ((port) && (port->netdev)) {
if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
netxen_free_hw_resources(adapter);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
recv_ctx = &adapter->recv_ctx[ctxid];
if (!adapter->active_ports) {
netxen_nic_disable_int(adapter);
- if (adapter->irq)
- free_irq(adapter->irq, adapter);
cmd_buff = adapter->cmd_buf_arr;
for (i = 0; i < adapter->max_tx_desc_count; i++) {
buffrag = cmd_buff->frag_array;
/*
* Wait for some time to allow the dma to drain, if any.
*/
- destroy_workqueue(netxen_workq);
pci_unregister_driver(&netxen_driver);
+ destroy_workqueue(netxen_workq);
}
module_exit(netxen_exit_module);
#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0)
#define CRB_TEMP_STATE NETXEN_NIC_REG(0x1b4)
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280)
+
/*
* CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
* which can be read by the Phantom host to get producer/consumer indexes from
static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
static int fifo=0x8; /* don't change */
-/* #define REALLY_SLOW_IO */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
spin_lock_irq(&dev->misc_lock);
spin_lock(&dev->tx_lock);
- if (dev->vlgrp)
- dev->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(dev->vlgrp, vid, NULL);
spin_unlock(&dev->tx_lock);
spin_unlock_irq(&dev->misc_lock);
}
skb_put(skb, pkt_len); /* Make room */
pci_dma_sync_single_for_cpu(lp->pci_dev,
lp->rx_dma_addr[entry],
- PKT_BUF_SZ - 2,
+ pkt_len,
PCI_DMA_FROMDEVICE);
eth_copy_and_sum(skb,
(unsigned char *)(lp->rx_skbuff[entry]->data),
pkt_len, 0);
pci_dma_sync_single_for_device(lp->pci_dev,
lp->rx_dma_addr[entry],
- PKT_BUF_SZ - 2,
+ pkt_len,
PCI_DMA_FROMDEVICE);
}
lp->stats.rx_bytes += skb->len;
*
* Version: 0.7.0
*
+ * 070228 : Fix to allow multiple sessions with same remote MAC and same
+ * session id by including the local device ifindex in the
+ * tuple identifying a session. This also ensures packets can't
+ * be injected into a session from interfaces other than the one
+ * specified by userspace. Florian Zumbiehl <florz@florz.de>
+ * (Oh, BTW, this one is YYMMDD, in case you were wondering ...)
* 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme
* 030700 : Fixed connect logic to allow for disconnect.
* 270700 : Fixed potential SMP problems; we must protect against
* Set/get/delete/rehash items (internal versions)
*
**********************************************************************/
-static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr)
+static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
struct pppox_sock *ret;
ret = item_hash_table[hash];
- while (ret && !cmp_addr(&ret->pppoe_pa, sid, addr))
+ while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex))
ret = ret->next;
return ret;
ret = item_hash_table[hash];
while (ret) {
- if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa))
+ if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex)
return -EALREADY;
ret = ret->next;
}
- if (!ret) {
- po->next = item_hash_table[hash];
- item_hash_table[hash] = po;
- }
+ po->next = item_hash_table[hash];
+ item_hash_table[hash] = po;
return 0;
}
-static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
+static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
struct pppox_sock *ret, **src;
src = &item_hash_table[hash];
while (ret) {
- if (cmp_addr(&ret->pppoe_pa, sid, addr)) {
+ if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) {
*src = ret->next;
break;
}
*
**********************************************************************/
static inline struct pppox_sock *get_item(unsigned long sid,
- unsigned char *addr)
+ unsigned char *addr, int ifindex)
{
struct pppox_sock *po;
read_lock_bh(&pppoe_hash_lock);
- po = __get_item(sid, addr);
+ po = __get_item(sid, addr, ifindex);
if (po)
sock_hold(sk_pppox(po));
read_unlock_bh(&pppoe_hash_lock);
static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
{
- return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote);
+ struct net_device *dev = NULL;
+ int ifindex;
+
+ dev = dev_get_by_name(sp->sa_addr.pppoe.dev);
+ if(!dev)
+ return NULL;
+ ifindex = dev->ifindex;
+ dev_put(dev);
+ return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex);
}
static inline int set_item(struct pppox_sock *po)
return i;
}
-static inline struct pppox_sock *delete_item(unsigned long sid, char *addr)
+static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex)
{
struct pppox_sock *ret;
write_lock_bh(&pppoe_hash_lock);
- ret = __delete_item(sid, addr);
+ ret = __delete_item(sid, addr, ifindex);
write_unlock_bh(&pppoe_hash_lock);
return ret;
ph = (struct pppoe_hdr *) skb->nh.raw;
- po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source);
+ po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po != NULL)
return sk_receive_skb(sk_pppox(po), skb, 0);
drop:
if (ph->code != PADT_CODE)
goto abort;
- po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source);
+ po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po) {
struct sock *sk = sk_pppox(po);
po = pppox_sk(sk);
if (po->pppoe_pa.sid) {
- delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
+ delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex);
}
if (po->pppoe_dev)
int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
- struct net_device *dev = NULL;
+ struct net_device *dev;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
int error;
pppox_unbind_sock(sk);
/* Delete the old binding */
- delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote);
+ delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex);
if(po->pppoe_dev)
dev_put(po->pppoe_dev);
goto end;
po->pppoe_dev = dev;
+ po->pppoe_ifindex = dev->ifindex;
if (!(dev->flags & IFF_UP))
goto err_put;
break;
/* PPPoE address from the user specifies an outbound
- PPPoE address to which frames are forwarded to */
+ PPPoE address which frames are forwarded to */
err = -EFAULT;
if (copy_from_user(&po->pppoe_relay,
(void __user *)arg,
#define DRV_NAME "qla3xxx"
#define DRV_STRING "QLogic ISP3XXX Network Driver"
-#define DRV_VERSION "v2.02.00-k36"
+#define DRV_VERSION "v2.03.00-k3"
#define PFX DRV_NAME " "
static const char ql3xxx_driver_name[] = DRV_NAME;
static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
struct ql_rcv_buf_cb *lrg_buf_cb)
{
- u64 map;
+ dma_addr_t map;
+ int err;
lrg_buf_cb->next = NULL;
if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
}
if (!lrg_buf_cb->skb) {
- lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
+ lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
if (unlikely(!lrg_buf_cb->skb)) {
- printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
+ printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
qdev->ndev->name);
qdev->lrg_buf_skb_check++;
} else {
qdev->lrg_buffer_len -
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
+ err = pci_dma_mapping_error(map);
+ if(err) {
+ printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
+ qdev->ndev->name, err);
+ dev_kfree_skb(lrg_buf_cb->skb);
+ lrg_buf_cb->skb = NULL;
+
+ qdev->lrg_buf_skb_check++;
+ return;
+ }
+
lrg_buf_cb->buf_phy_addr_low =
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
printk(KERN_INFO PFX
"%s: Reset in progress, skip processing link "
"state.\n", qdev->ndev->name);
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
return;
}
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
- 2) << 7))
+ 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
return 0;
+ }
status = ql_is_auto_cfg(qdev);
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
- 2) << 7))
+ 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
return 0;
+ }
status = ql_get_link_speed(qdev);
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
- 2) << 7))
+ 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
return 0;
+ }
status = ql_is_link_full_dup(qdev);
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
static int ql_populate_free_queue(struct ql3_adapter *qdev)
{
struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
- u64 map;
+ dma_addr_t map;
+ int err;
while (lrg_buf_cb) {
if (!lrg_buf_cb->skb) {
- lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
+ lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
if (unlikely(!lrg_buf_cb->skb)) {
printk(KERN_DEBUG PFX
- "%s: Failed dev_alloc_skb().\n",
+ "%s: Failed netdev_alloc_skb().\n",
qdev->ndev->name);
break;
} else {
qdev->lrg_buffer_len -
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
+
+ err = pci_dma_mapping_error(map);
+ if(err) {
+ printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
+ qdev->ndev->name, err);
+ dev_kfree_skb(lrg_buf_cb->skb);
+ lrg_buf_cb->skb = NULL;
+ break;
+ }
+
+
lrg_buf_cb->buf_phy_addr_low =
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
qdev->lrg_buf_q_producer_index++;
- if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
+ if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
qdev->lrg_buf_q_producer_index = 0;
if (qdev->lrg_buf_q_producer_index ==
- (NUM_LBUFQ_ENTRIES - 1)) {
+ (qdev->num_lbufq_entries - 1)) {
lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
}
}
{
struct ql_tx_buf_cb *tx_cb;
int i;
+ int retval = 0;
+ if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+ printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
+ }
+
tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
+
+ /* Check the transmit response flags for any errors */
+ if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+ printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
+
+ qdev->stats.tx_errors++;
+ retval = -EIO;
+ goto frame_not_sent;
+ }
+
+ if(tx_cb->seg_count == 0) {
+ printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
+
+ qdev->stats.tx_errors++;
+ retval = -EIO;
+ goto invalid_seg_count;
+ }
+
pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_cb->map[0], mapaddr),
pci_unmap_len(&tx_cb->map[0], maplen),
}
qdev->stats.tx_packets++;
qdev->stats.tx_bytes += tx_cb->skb->len;
+
+frame_not_sent:
dev_kfree_skb_irq(tx_cb->skb);
tx_cb->skb = NULL;
+
+invalid_seg_count:
atomic_inc(&qdev->tx_count);
}
+void ql_get_sbuf(struct ql3_adapter *qdev)
+{
+ if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+ qdev->small_buf_index = 0;
+ qdev->small_buf_release_cnt++;
+}
+
+struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
+ lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
+ qdev->lrg_buf_release_cnt++;
+ if (++qdev->lrg_buf_index == qdev->num_large_buffers)
+ qdev->lrg_buf_index = 0;
+ return(lrg_buf_cb);
+}
+
/*
* The difference between 3022 and 3032 for inbound completions:
* 3022 uses two buffers per completion. The first buffer contains
static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
{
- long int offset;
- u32 lrg_buf_phy_addr_low = 0;
struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
- u32 *curr_ial_ptr;
struct sk_buff *skb;
u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
/*
* Get the inbound address list (small buffer).
*/
- offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
- if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
- qdev->small_buf_index = 0;
+ ql_get_sbuf(qdev);
- curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
- qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
- qdev->small_buf_release_cnt++;
-
- if (qdev->device_id == QL3022_DEVICE_ID) {
- /* start of first buffer (3022 only) */
- lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
- lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
- qdev->lrg_buf_release_cnt++;
- if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
- qdev->lrg_buf_index = 0;
- }
- curr_ial_ptr++; /* 64-bit pointers require two incs. */
- curr_ial_ptr++;
- }
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ lrg_buf_cb1 = ql_get_lbuf(qdev);
/* start of second buffer */
- lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
- lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
-
- /*
- * Second buffer gets sent up the stack.
- */
- qdev->lrg_buf_release_cnt++;
- if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
- qdev->lrg_buf_index = 0;
+ lrg_buf_cb2 = ql_get_lbuf(qdev);
skb = lrg_buf_cb2->skb;
qdev->stats.rx_packets++;
static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
{
- long int offset;
- u32 lrg_buf_phy_addr_low = 0;
struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
- u32 *curr_ial_ptr;
struct sk_buff *skb1 = NULL, *skb2;
struct net_device *ndev = qdev->ndev;
u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
* Get the inbound address list (small buffer).
*/
- offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
- if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
- qdev->small_buf_index = 0;
- curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
- qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
- qdev->small_buf_release_cnt++;
+ ql_get_sbuf(qdev);
if (qdev->device_id == QL3022_DEVICE_ID) {
/* start of first buffer on 3022 */
- lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
- lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
- qdev->lrg_buf_release_cnt++;
- if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
- qdev->lrg_buf_index = 0;
+ lrg_buf_cb1 = ql_get_lbuf(qdev);
skb1 = lrg_buf_cb1->skb;
- curr_ial_ptr++; /* 64-bit pointers require two incs. */
- curr_ial_ptr++;
size = ETH_HLEN;
if (*((u16 *) skb1->data) != 0xFFFF)
size += VLAN_ETH_HLEN - ETH_HLEN;
}
/* start of second buffer */
- lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
- lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
+ lrg_buf_cb2 = ql_get_lbuf(qdev);
skb2 = lrg_buf_cb2->skb;
- qdev->lrg_buf_release_cnt++;
- if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
- qdev->lrg_buf_index = 0;
skb_put(skb2, length); /* Just the second buffer length here. */
pci_unmap_single(qdev->pdev,
struct net_rsp_iocb *net_rsp;
struct net_device *ndev = qdev->ndev;
unsigned long hw_flags;
+ int work_done = 0;
+
+ u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));
/* While there are entries in the completion queue. */
- while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
- qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
+ while ((rsp_producer_index !=
+ qdev->rsp_consumer_index) && (work_done < work_to_do)) {
net_rsp = qdev->rsp_current;
switch (net_rsp->opcode) {
} else {
qdev->rsp_current++;
}
+
+ work_done = *tx_cleaned + *rx_cleaned;
}
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if(work_done) {
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- ql_update_lrg_bufq_prod_index(qdev);
+ ql_update_lrg_bufq_prod_index(qdev);
- if (qdev->small_buf_release_cnt >= 16) {
- while (qdev->small_buf_release_cnt >= 16) {
- qdev->small_buf_q_producer_index++;
+ if (qdev->small_buf_release_cnt >= 16) {
+ while (qdev->small_buf_release_cnt >= 16) {
+ qdev->small_buf_q_producer_index++;
- if (qdev->small_buf_q_producer_index ==
- NUM_SBUFQ_ENTRIES)
- qdev->small_buf_q_producer_index = 0;
- qdev->small_buf_release_cnt -= 8;
- }
+ if (qdev->small_buf_q_producer_index ==
+ NUM_SBUFQ_ENTRIES)
+ qdev->small_buf_q_producer_index = 0;
+ qdev->small_buf_release_cnt -= 8;
+ }
- ql_write_common_reg(qdev,
- &port_regs->CommonRegs.
- rxSmallQProducerIndex,
- qdev->small_buf_q_producer_index);
- }
+ wmb();
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ rxSmallQProducerIndex,
+ qdev->small_buf_q_producer_index);
- ql_write_common_reg(qdev,
- &port_regs->CommonRegs.rspQConsumerIndex,
- qdev->rsp_consumer_index);
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ }
- if (unlikely(netif_queue_stopped(qdev->ndev))) {
- if (netif_queue_stopped(qdev->ndev) &&
- (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
- netif_wake_queue(qdev->ndev);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
}
return *tx_cleaned + *rx_cleaned;
struct ql3_adapter *qdev = netdev_priv(ndev);
int work_to_do = min(*budget, ndev->quota);
int rx_cleaned = 0, tx_cleaned = 0;
+ unsigned long hw_flags;
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
if (!netif_carrier_ok(ndev))
goto quit_polling;
*budget -= rx_cleaned;
ndev->quota -= rx_cleaned;
- if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
+ if( tx_cleaned + rx_cleaned != work_to_do ||
+ !netif_running(ndev)) {
quit_polling:
netif_rx_complete(ndev);
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.rspQConsumerIndex,
+ qdev->rsp_consumer_index);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
ql_enable_interrupts(qdev);
return 0;
}
spin_unlock(&qdev->adapter_lock);
} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
ql_disable_interrupts(qdev);
- if (likely(netif_rx_schedule_prep(ndev)))
+ if (likely(netif_rx_schedule_prep(ndev))) {
__netif_rx_schedule(ndev);
- else
- ql_enable_interrupts(qdev);
+ }
} else {
return IRQ_NONE;
}
* the next AOL if more frags are coming.
* That is why the frags:segment count ratio is not linear.
*/
-static int ql_get_seg_count(unsigned short frags)
+static int ql_get_seg_count(struct ql3_adapter *qdev,
+ unsigned short frags)
{
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ return 1;
+
switch(frags) {
case 0: return 1; /* just the skb->data seg */
case 1: return 2; /* skb->data + 1 frag */
if (ip) {
if (ip->protocol == IPPROTO_TCP) {
- mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
+ mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
+ OB_3032MAC_IOCB_REQ_IC;
mac_iocb_ptr->ip_hdr_off = offset;
mac_iocb_ptr->ip_hdr_len = ip->ihl;
} else if (ip->protocol == IPPROTO_UDP) {
- mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
+ mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
+ OB_3032MAC_IOCB_REQ_IC;
mac_iocb_ptr->ip_hdr_off = offset;
mac_iocb_ptr->ip_hdr_len = ip->ihl;
}
}
/*
- * The difference between 3022 and 3032 sends:
- * 3022 only supports a simple single segment transmission.
- * 3032 supports checksumming and scatter/gather lists (fragments).
- * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
- * in the IOCB plus a chain of outbound address lists (OAL) that
- * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
- * will used to point to an OAL when more ALP entries are required.
- * The IOCB is always the top of the chain followed by one or more
- * OALs (when necessary).
+ * Map the buffers for this transmit. This will return
+ * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
*/
-static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
+static int ql_send_map(struct ql3_adapter *qdev,
+ struct ob_mac_iocb_req *mac_iocb_ptr,
+ struct ql_tx_buf_cb *tx_cb,
+ struct sk_buff *skb)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
- struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
- struct ql_tx_buf_cb *tx_cb;
- u32 tot_len = skb->len;
struct oal *oal;
struct oal_entry *oal_entry;
- int len;
- struct ob_mac_iocb_req *mac_iocb_ptr;
- u64 map;
+ int len = skb_headlen(skb);
+ dma_addr_t map;
+ int err;
+ int completed_segs, i;
int seg_cnt, seg = 0;
int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
- if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
- }
- tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
- seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
+ seg_cnt = tx_cb->seg_count = ql_get_seg_count(qdev,
+ (skb_shinfo(skb)->nr_frags));
if(seg_cnt == -1) {
printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
- return NETDEV_TX_OK;
-
+ return NETDEV_TX_BUSY;
}
- mac_iocb_ptr = tx_cb->queue_entry;
- memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
- mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
- mac_iocb_ptr->flags |= qdev->mb_bit_mask;
- mac_iocb_ptr->transaction_id = qdev->req_producer_index;
- mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
- tx_cb->skb = skb;
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- ql_hw_csum_setup(skb, mac_iocb_ptr);
- len = skb_headlen(skb);
+ /*
+ * Map the skb buffer first.
+ */
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(map);
+ if(err) {
+ printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
+ qdev->ndev->name, err);
+
+ return NETDEV_TX_BUSY;
+ }
+
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
seg++;
- if (!skb_shinfo(skb)->nr_frags) {
+ if (seg_cnt == 1) {
/* Terminate the last segment. */
oal_entry->len =
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
} else {
- int i;
oal = tx_cb->oal;
- for (i=0; i<frag_cnt; i++,seg++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
oal_entry++;
if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
(seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
map = pci_map_single(qdev->pdev, oal,
sizeof(struct oal),
PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(map);
+ if(err) {
+
+ printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
+ qdev->ndev->name, err);
+ goto map_error;
+ }
+
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len =
pci_map_page(qdev->pdev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(map);
+ if(err) {
+ printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
+ qdev->ndev->name, err);
+ goto map_error;
+ }
+
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(frag->size);
oal_entry->len =
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
}
+
+ return NETDEV_TX_OK;
+
+map_error:
+ /* A PCI mapping failed and now we will need to back out
+ * We need to traverse through the oal's and associated pages which
+ * have been mapped and now we must unmap them to clean up properly
+ */
+
+ seg = 1;
+ oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+ oal = tx_cb->oal;
+ for (i=0; i<completed_segs; i++,seg++) {
+ oal_entry++;
+
+ if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
+ (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
+ (seg == 12 && seg_cnt > 13) || /* but necessary. */
+ (seg == 17 && seg_cnt > 18)) {
+ pci_unmap_single(qdev->pdev,
+ pci_unmap_addr(&tx_cb->map[seg], mapaddr),
+ pci_unmap_len(&tx_cb->map[seg], maplen),
+ PCI_DMA_TODEVICE);
+ oal++;
+ seg++;
+ }
+
+ pci_unmap_page(qdev->pdev,
+ pci_unmap_addr(&tx_cb->map[seg], mapaddr),
+ pci_unmap_len(&tx_cb->map[seg], maplen),
+ PCI_DMA_TODEVICE);
+ }
+
+ pci_unmap_single(qdev->pdev,
+ pci_unmap_addr(&tx_cb->map[0], mapaddr),
+ pci_unmap_addr(&tx_cb->map[0], maplen),
+ PCI_DMA_TODEVICE);
+
+ return NETDEV_TX_BUSY;
+
+}
+
+/*
+ * The difference between 3022 and 3032 sends:
+ * 3022 only supports a simple single segment transmission.
+ * 3032 supports checksumming and scatter/gather lists (fragments).
+ * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
+ * in the IOCB plus a chain of outbound address lists (OAL) that
+ * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
+ * will used to point to an OAL when more ALP entries are required.
+ * The IOCB is always the top of the chain followed by one or more
+ * OALs (when necessary).
+ */
+static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ struct ql_tx_buf_cb *tx_cb;
+ u32 tot_len = skb->len;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+
+ if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
+ return NETDEV_TX_BUSY;
+ }
+
+ tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
+ if((tx_cb->seg_count = ql_get_seg_count(qdev,
+ (skb_shinfo(skb)->nr_frags))) == -1) {
+ printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
+ return NETDEV_TX_OK;
+ }
+
+ mac_iocb_ptr = tx_cb->queue_entry;
+ mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
+ mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
+ mac_iocb_ptr->flags |= qdev->mb_bit_mask;
+ mac_iocb_ptr->transaction_id = qdev->req_producer_index;
+ mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
+ tx_cb->skb = skb;
+ if (qdev->device_id == QL3032_DEVICE_ID &&
+ skb->ip_summed == CHECKSUM_PARTIAL)
+ ql_hw_csum_setup(skb, mac_iocb_ptr);
+
+ if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
+ printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
+ return NETDEV_TX_BUSY;
+ }
+
wmb();
qdev->req_producer_index++;
if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
{
/* Create Large Buffer Queue */
qdev->lrg_buf_q_size =
- NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+ qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
if (qdev->lrg_buf_q_size < PAGE_SIZE)
qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
else
qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
+ qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
+ if (qdev->lrg_buf == NULL) {
+ printk(KERN_ERR PFX
+ "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
+ return -ENOMEM;
+ }
+
qdev->lrg_buf_q_alloc_virt_addr =
pci_alloc_consistent(qdev->pdev,
qdev->lrg_buf_q_alloc_size,
"%s: Already done.\n", qdev->ndev->name);
return;
}
-
+ if(qdev->lrg_buf) kfree(qdev->lrg_buf);
pci_free_consistent(qdev->pdev,
qdev->lrg_buf_q_alloc_size,
qdev->lrg_buf_q_alloc_virt_addr,
small_buf_q_entry = qdev->small_buf_q_virt_addr;
- qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
-
/* Initialize the small buffer queue. */
for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
small_buf_q_entry->addr_high =
int i = 0;
struct ql_rcv_buf_cb *lrg_buf_cb;
- for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
+ for (i = 0; i < qdev->num_large_buffers; i++) {
lrg_buf_cb = &qdev->lrg_buf[i];
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
struct ql_rcv_buf_cb *lrg_buf_cb;
struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
- for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
+ for (i = 0; i < qdev->num_large_buffers; i++) {
lrg_buf_cb = &qdev->lrg_buf[i];
buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
int i;
struct ql_rcv_buf_cb *lrg_buf_cb;
struct sk_buff *skb;
- u64 map;
+ dma_addr_t map;
+ int err;
- for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
- skb = dev_alloc_skb(qdev->lrg_buffer_len);
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
if (unlikely(!skb)) {
/* Better luck next round */
printk(KERN_ERR PFX
qdev->lrg_buffer_len -
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
+
+ err = pci_dma_mapping_error(map);
+ if(err) {
+ printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
+ qdev->ndev->name, err);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ }
+
pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
pci_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
{
- if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
+ if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
+ qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
+ }
else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+ /*
+ * Bigger buffers, so less of them.
+ */
+ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
} else {
printk(KERN_ERR PFX
qdev->ndev->name);
return -ENOMEM;
}
+ qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
qdev->max_frame_size =
(qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
&hmem_regs->rxLargeQBaseAddrLow,
LS_64BITS(qdev->lrg_buf_q_phy_addr));
- ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
+ ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
ql_write_page1_reg(qdev,
&hmem_regs->rxLargeBufferLength,
qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
qdev->small_buf_release_cnt = 8;
- qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
+ qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
qdev->lrg_buf_release_cnt = 8;
qdev->lrg_buf_next_free =
(struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
err_init:
ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
err_lock:
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
free_irq(qdev->pdev->irq, ndev);
err_irq:
if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
return &qdev->stats;
}
-static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
-{
- struct ql3_adapter *qdev = netdev_priv(ndev);
- printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
- if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
- printk(KERN_ERR PFX
- "%s: mtu size of %d is not valid. Use exactly %d or "
- "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
- JUMBO_MTU_SIZE);
- return -EINVAL;
- }
-
- if (!netif_running(ndev)) {
- ndev->mtu = new_mtu;
- return 0;
- }
-
- ndev->mtu = new_mtu;
- return ql_cycle_adapter(qdev,QL_DO_RESET);
-}
-
static void ql3xxx_set_multicast_list(struct net_device *ndev)
{
/*
}
ndev = alloc_etherdev(sizeof(struct ql3_adapter));
- if (!ndev)
+ if (!ndev) {
+ printk(KERN_ERR PFX "%s could not alloc etherdev\n",
+ pci_name(pdev));
+ err = -ENOMEM;
goto err_out_free_regions;
+ }
SET_MODULE_OWNER(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
if (!qdev->mem_map_registers) {
printk(KERN_ERR PFX "%s: cannot map device registers\n",
pci_name(pdev));
+ err = -EIO;
goto err_out_free_ndev;
}
ndev->hard_start_xmit = ql3xxx_send;
ndev->stop = ql3xxx_close;
ndev->get_stats = ql3xxx_get_stats;
- ndev->change_mtu = ql3xxx_change_mtu;
ndev->set_multicast_list = ql3xxx_set_multicast_list;
SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
ndev->set_mac_address = ql3xxx_set_mac_address;
printk(KERN_ALERT PFX
"ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
qdev->index);
+ err = -EIO;
goto err_out_iounmap;
}
/* Validate and set parameters */
if (qdev->mac_index) {
+ ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
ETH_ALEN);
} else {
+ ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
ETH_ALEN);
}
/* Transmit and Receive Buffers */
#define NUM_LBUFQ_ENTRIES 128
+#define JUMBO_NUM_LBUFQ_ENTRIES \
+(NUM_LBUFQ_ENTRIES/(JUMBO_MTU_SIZE/NORMAL_MTU_SIZE))
#define NUM_SBUFQ_ENTRIES 64
#define QL_SMALL_BUFFER_SIZE 32
#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
/* Each send has at least control block. This is how many we keep. */
#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
-#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+
#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
/*
* Large & Small Buffers for Receives
u32 len;
#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
- u32 reserved;
};
struct oal {
struct net_rsp_iocb *rsp_current;
u16 rsp_consumer_index;
u16 reserved_06;
- u32 *prsp_producer_index;
+ volatile u32 *prsp_producer_index;
u32 rsp_producer_index_phy_addr_high;
u32 rsp_producer_index_phy_addr_low;
u32 lrg_buf_q_producer_index;
u32 lrg_buf_release_cnt;
struct bufq_addr_element *lrg_buf_next_free;
+ u32 num_large_buffers;
+ u32 num_lbufq_entries;
/* Large (Receive) Buffers */
- struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS];
+ struct ql_rcv_buf_cb *lrg_buf;
struct ql_rcv_buf_cb *lrg_buf_free_head;
struct ql_rcv_buf_cb *lrg_buf_free_tail;
u32 lrg_buf_free_count;
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
- if (tp->vlgrp)
- tp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(tp->vlgrp, vid, NULL);
spin_unlock_irqrestore(&tp->lock, flags);
}
#define TX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
#define TX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
#define TX_PA_CFG_IGNORE_L2_ERR BIT(6)
+#define RX_PA_CFG_STRIP_VLAN_TAG BIT(15)
/* Recent add, used only debug purposes. */
u64 pcc_enable;
* Possible values '1' for enable '0' for disable. Default is '0'
* lro_max_pkts: This parameter defines maximum number of packets can be
* aggregated as a single large packet
+ * napi: This parameter used to enable/disable NAPI (polling Rx)
+ * Possible values '1' for enable and '0' for disable. Default is '1'
+ * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
+ * Possible values '1' for enable and '0' for disable. Default is '0'
+ * vlan_tag_strip: This can be used to enable or disable vlan stripping.
+ * Possible values '1' for enable , '0' for disable.
+ * Default is '2' - which means disable in promisc mode
+ * and enable in non-promiscuous mode.
************************************************************************/
#include <linux/module.h>
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.16.1"
+#define DRV_VERSION "2.0.17.1"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
"BIST Test\t(offline)"
};
-static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
+static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
{"tmac_frms"},
{"tmac_data_octets"},
{"tmac_drop_frms"},
{"rxd_rd_cnt"},
{"rxd_wr_cnt"},
{"txf_rd_cnt"},
- {"rxf_wr_cnt"},
+ {"rxf_wr_cnt"}
+};
+
+static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
{"rmac_ttl_1519_4095_frms"},
{"rmac_ttl_4096_8191_frms"},
{"rmac_ttl_8192_max_frms"},
{"rmac_red_discard"},
{"rmac_rts_discard"},
{"rmac_ingm_full_discard"},
- {"link_fault_cnt"},
+ {"link_fault_cnt"}
+};
+
+static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
{"\n DRIVER STATISTICS"},
{"single_bit_ecc_errs"},
{"double_bit_ecc_errs"},
("lro_avg_aggr_pkts"),
};
-#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
-#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
+#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
+#define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
+ ETH_GSTRING_LEN
+#define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
+
+#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
+#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
+
+#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
+#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
spin_unlock_irqrestore(&nic->tx_lock, flags);
}
+/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
+int vlan_strip_flag;
+
/* Unregister the vlan */
static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
{
unsigned long flags;
spin_lock_irqsave(&nic->tx_lock, flags);
- if (nic->vlgrp)
- nic->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(nic->vlgrp, vid, NULL);
spin_unlock_irqrestore(&nic->tx_lock, flags);
}
S2IO_PARM_INT(napi, 1);
S2IO_PARM_INT(ufo, 0);
+S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
&bar0->rts_frm_len_n[i]);
}
}
+
+ /* Disable differentiated services steering logic */
+ for (i = 0; i < 64; i++) {
+ if (rts_ds_steer(nic, i, 0) == FAILURE) {
+ DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
+ return FAILURE;
+ }
+ }
/* Program statistics memory */
writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
writeq(val64, &bar0->rx_pa_cfg);
}
+ if (vlan_tag_strip == 0) {
+ val64 = readq(&bar0->rx_pa_cfg);
+ val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
+ writeq(val64, &bar0->rx_pa_cfg);
+ vlan_strip_flag = 0;
+ }
+
/*
* Enabling MC-RLDRAM. After enabling the device, we timeout
* for around 100ms, which is approximately the time required
* SUCCESS on success and FAILURE on failure.
*/
-static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
+static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
+ int bit_state)
{
- int ret = FAILURE, cnt = 0;
+ int ret = FAILURE, cnt = 0, delay = 1;
u64 val64;
- while (TRUE) {
+ if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
+ return FAILURE;
+
+ do {
val64 = readq(addr);
- if (!(val64 & busy_bit)) {
- ret = SUCCESS;
- break;
+ if (bit_state == S2IO_BIT_RESET) {
+ if (!(val64 & busy_bit)) {
+ ret = SUCCESS;
+ break;
+ }
+ } else {
+ if (!(val64 & busy_bit)) {
+ ret = SUCCESS;
+ break;
+ }
}
if(in_interrupt())
- mdelay(50);
+ mdelay(delay);
else
- msleep(50);
+ msleep(delay);
- if (cnt++ > 10)
- break;
- }
+ if (++cnt >= 10)
+ delay = 50;
+ } while (cnt < 20);
return ret;
}
/*
writeq(val64, &bar0->pcc_err_reg);
}
+ /* restore the previously assigned mac address */
+ s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
+
sp->device_enabled_once = FALSE;
}
val64 &= ~GPIO_INT_MASK_LINK_UP;
val64 |= GPIO_INT_MASK_LINK_DOWN;
writeq(val64, &bar0->gpio_int_mask);
+
+ /* turn off LED */
+ val64 = readq(&bar0->adapter_control);
+ val64 = val64 &(~ADAPTER_LED_ON);
+ writeq(val64, &bar0->adapter_control);
}
}
val64 = readq(&bar0->gpio_int_mask);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET);
sp->m_cast_flg = 1;
sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET);
sp->m_cast_flg = 0;
sp->all_multi_pos = 0;
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64 >> 32), (add + 4));
+ if (vlan_tag_strip != 1) {
+ val64 = readq(&bar0->rx_pa_cfg);
+ val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
+ writeq(val64, &bar0->rx_pa_cfg);
+ vlan_strip_flag = 0;
+ }
+
val64 = readq(&bar0->mac_cfg);
sp->promisc_flg = 1;
DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64 >> 32), (add + 4));
+ if (vlan_tag_strip != 0) {
+ val64 = readq(&bar0->rx_pa_cfg);
+ val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
+ writeq(val64, &bar0->rx_pa_cfg);
+ vlan_strip_flag = 1;
+ }
+
val64 = readq(&bar0->mac_cfg);
sp->promisc_flg = 0;
DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
/* Wait for command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET)) {
DBG_PRINT(ERR_DBG, "%s: Adding ",
dev->name);
DBG_PRINT(ERR_DBG, "Multicasts failed\n");
/* Wait for command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET)) {
DBG_PRINT(ERR_DBG, "%s: Adding ",
dev->name);
DBG_PRINT(ERR_DBG, "Multicasts failed\n");
struct XENA_dev_config __iomem *bar0 = sp->bar0;
register u64 val64, mac_addr = 0;
int i;
+ u64 old_mac_addr = 0;
/*
* Set the new MAC address as the new unicast filter and reflect this
for (i = 0; i < ETH_ALEN; i++) {
mac_addr <<= 8;
mac_addr |= addr[i];
+ old_mac_addr <<= 8;
+ old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
+ }
+
+ if(0 == mac_addr)
+ return SUCCESS;
+
+ /* Update the internal structure with this new mac address */
+ if(mac_addr != old_mac_addr) {
+ memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
+ sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
+ sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
+ sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
+ sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
+ sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
+ sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
}
writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
return FAILURE;
}
info->regdump_len = XENA_REG_SPACE;
info->eedump_len = XENA_EEPROM_SPACE;
info->testinfo_len = S2IO_TEST_LEN;
- info->n_stats = S2IO_STAT_LEN;
+
+ if (sp->device_type == XFRAME_I_DEVICE)
+ info->n_stats = XFRAME_I_STAT_LEN;
+ else
+ info->n_stats = XFRAME_II_STAT_LEN;
}
/**
tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
- tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
- tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
- tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
- tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
- tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
- tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
- tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
+
+ /* Enhanced statistics exist only for Hercules */
+ if(sp->device_type == XFRAME_II_DEVICE) {
+ tmp_stats[i++] =
+ le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
+ tmp_stats[i++] =
+ le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
+ tmp_stats[i++] =
+ le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
+ }
+
tmp_stats[i++] = 0;
tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
static void s2io_ethtool_get_strings(struct net_device *dev,
u32 stringset, u8 * data)
{
+ int stat_size = 0;
+ struct s2io_nic *sp = dev->priv;
+
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
break;
case ETH_SS_STATS:
- memcpy(data, ðtool_stats_keys,
- sizeof(ethtool_stats_keys));
+ stat_size = sizeof(ethtool_xena_stats_keys);
+ memcpy(data, ðtool_xena_stats_keys,stat_size);
+ if(sp->device_type == XFRAME_II_DEVICE) {
+ memcpy(data + stat_size,
+ ðtool_enhanced_stats_keys,
+ sizeof(ethtool_enhanced_stats_keys));
+ stat_size += sizeof(ethtool_enhanced_stats_keys);
+ }
+
+ memcpy(data + stat_size, ðtool_driver_stats_keys,
+ sizeof(ethtool_driver_stats_keys));
}
}
static int s2io_ethtool_get_stats_count(struct net_device *dev)
{
- return (S2IO_STAT_LEN);
+ struct s2io_nic *sp = dev->priv;
+ int stat_count = 0;
+ switch(sp->device_type) {
+ case XFRAME_I_DEVICE:
+ stat_count = XFRAME_I_STAT_LEN;
+ break;
+
+ case XFRAME_II_DEVICE:
+ stat_count = XFRAME_II_STAT_LEN;
+ break;
+ }
+
+ return stat_count;
}
static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
clear_bit(0, &(nic->link_state));
out_unlock:
- rtnl_lock();
+ rtnl_unlock();
}
static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
rx_blocks[j].rxds[k].virt_addr;
if(sp->rxd_mode >= RXD_MODE_3A)
ba = &mac_control->rings[i].ba[j][k];
- set_rxd_buffer_pointer(sp, rxdp, ba,
+ if (set_rxd_buffer_pointer(sp, rxdp, ba,
&skb,(u64 *)&temp0_64,
(u64 *)&temp1_64,
- (u64 *)&temp2_64, size);
+ (u64 *)&temp2_64,
+ size) == ENOMEM) {
+ return 0;
+ }
set_rxd_buffer_size(sp, rxdp, size);
wmb();
}
}
if (sp->intr_type == MSI_X) {
- int i;
+ int i, msix_tx_cnt=0,msix_rx_cnt=0;
for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
err = request_irq(sp->entries[i].vector,
s2io_msix_fifo_handle, 0, sp->desc[i],
sp->s2io_entries[i].arg);
- DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
- (unsigned long long)sp->msix_info[i].addr);
+ /* If either data or addr is zero print it */
+ if(!(sp->msix_info[i].addr &&
+ sp->msix_info[i].data)) {
+ DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
+ "Data:0x%lx\n",sp->desc[i],
+ (unsigned long long)
+ sp->msix_info[i].addr,
+ (unsigned long)
+ ntohl(sp->msix_info[i].data));
+ } else {
+ msix_tx_cnt++;
+ }
} else {
sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle, 0, sp->desc[i],
sp->s2io_entries[i].arg);
- DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
- (unsigned long long)sp->msix_info[i].addr);
+ /* If either data or addr is zero print it */
+ if(!(sp->msix_info[i].addr &&
+ sp->msix_info[i].data)) {
+ DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
+ "Data:0x%lx\n",sp->desc[i],
+ (unsigned long long)
+ sp->msix_info[i].addr,
+ (unsigned long)
+ ntohl(sp->msix_info[i].data));
+ } else {
+ msix_rx_cnt++;
+ }
}
if (err) {
DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
}
sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
}
+ printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
+ printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
}
if (sp->intr_type == INTA) {
err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
if (!sp->lro) {
skb->protocol = eth_type_trans(skb, dev);
- if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
+ if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
+ vlan_strip_flag)) {
/* Queueing the vlan frame to the upper layer */
if (napi)
vlan_hwaccel_receive_skb(skb, sp->vlgrp,
"Defaulting to INTA\n");
*dev_intr_type = INTA;
}
- if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) )
- napi = 0;
+
if (rx_ring_mode > 3) {
DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
return SUCCESS;
}
+/**
+ * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
+ * or Traffic class respectively.
+ * @nic: device peivate variable
+ * Description: The function configures the receive steering to
+ * desired receive ring.
+ * Return Value: SUCCESS on success and
+ * '-1' on failure (endian settings incorrect).
+ */
+static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0;
+
+ if (ds_codepoint > 63)
+ return FAILURE;
+
+ val64 = RTS_DS_MEM_DATA(ring);
+ writeq(val64, &bar0->rts_ds_mem_data);
+
+ val64 = RTS_DS_MEM_CTRL_WE |
+ RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
+ RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
+
+ writeq(val64, &bar0->rts_ds_mem_ctrl);
+
+ return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
+ RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
+ S2IO_BIT_RESET);
+}
+
/**
* s2io_init_nic - Initialization of the adapter .
* @pdev : structure containing the PCI related information of the device.
RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
writeq(val64, &bar0->rmac_addr_cmd_mem);
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
- RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
tmp64 = readq(&bar0->rmac_addr_data0_mem);
mac_down = (u32) tmp64;
mac_up = (u32) (tmp64 >> 32);
- memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
-
sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
#define FAILURE -1
#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
-
+#define S2IO_BIT_RESET 1
+#define S2IO_BIT_SET 2
#define CHECKBIT(value, nbit) (value & (1 << nbit))
/* Maximum time to flicker LED when asked to identify NIC using ethtool */
struct xpakStat xpak_stat;
};
+/* Default value for 'vlan_strip_tag' configuration parameter */
+#define NO_STRIP_IN_PROMISC 2
+
/*
* Structures representing different init time configuration
* parameters of the NIC.
static void s2io_card_down(struct s2io_nic *nic);
static int s2io_card_up(struct s2io_nic *nic);
static int get_xena_rev_id(struct pci_dev *pdev);
-static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit);
+static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
+ int bit_state);
static int s2io_add_isr(struct s2io_nic * sp);
static void s2io_rem_isr(struct s2io_nic * sp);
static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
struct sk_buff *skb, u32 tcp_len);
+static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
#include <linux/init.h>
#include <linux/types.h>
#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/route.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
-#include <asm/sgialib.h>
#include "sgiseeq.h"
static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
{
- int i = 0;
+ int i;
u16 status;
- while (i++ < 2)
+ for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
int i = 0;
u32 status;
- while (i++ < 2)
+ for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
if (!(status & MII_STAT_LINK)){
int phy_addr = sis_priv->cur_phy;
u32 status;
u16 autoadv, autorec;
- int i = 0;
+ int i;
- while (i++ < 2)
+ for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
if (!(status & MII_STAT_LINK))
/*
* map from state to downstream port type
*/
-static const u_char cf_to_ptype[] = {
+static const unsigned char cf_to_ptype[] = {
TNONE,TNONE,TNONE,TNONE,TNONE,
TNONE,TB,TB,TS,
TA,TB,TS,TB
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
{ PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
- { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, skge_id_table);
return err;
}
+static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
+
+static void genesis_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 crc, bit;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+ bit = ~crc & 0x3f;
+ filter[bit/8] |= 1 << (bit%8);
+}
+
static void genesis_set_multicast(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
memset(filter, 0xff, sizeof(filter));
else {
memset(filter, 0, sizeof(filter));
- for (i = 0; list && i < count; i++, list = list->next) {
- u32 crc, bit;
- crc = ether_crc_le(ETH_ALEN, list->dmi_addr);
- bit = ~crc & 0x3f;
- filter[bit/8] |= 1 << (bit%8);
- }
+
+ if (skge->flow_status == FLOW_STAT_REM_SEND
+ || skge->flow_status == FLOW_STAT_SYMMETRIC)
+ genesis_add_filter(filter, pause_mc_addr);
+
+ for (i = 0; list && i < count; i++, list = list->next)
+ genesis_add_filter(filter, list->dmi_addr);
}
xm_write32(hw, port, XM_MODE, mode);
xm_outhash(hw, port, XM_HSM, filter);
}
+static void yukon_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
+ filter[bit/8] |= 1 << (bit%8);
+}
+
static void yukon_set_multicast(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
struct dev_mc_list *list = dev->mc_list;
+ int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND
+ || skge->flow_status == FLOW_STAT_SYMMETRIC);
u16 reg;
u8 filter[8];
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI) /* all multicast */
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0) /* no multicast */
+ else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
reg &= ~GM_RXCR_MCF_ENA;
else {
int i;
reg |= GM_RXCR_MCF_ENA;
- for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
- u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
- filter[bit/8] |= 1 << (bit%8);
- }
+ if (rx_pause)
+ yukon_add_filter(filter, pause_mc_addr);
+
+ for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ yukon_add_filter(filter, list->dmi_addr);
}
GMR_FS_JABBER,
/* Rx GMAC FIFO Flush Mask (default) */
RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
- GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE |
- GMR_FS_JABBER,
+ GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER,
};
/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
- if (sky2->vlgrp)
- sky2->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(sky2->vlgrp, vid, NULL);
netif_tx_unlock_bh(dev);
}
/*
- * Network device driver for Cell Processor-Based Blade
+ * Network device driver for Cell Processor-Based Blade and Celleb platform
*
* (C) Copyright IBM Corp. 2005
+ * (C) Copyright 2006 TOSHIBA CORPORATION
*
* Authors : Utz Bacher <utz.bacher@de.ibm.com>
* Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
return readvalue;
}
+/**
+ * spider_net_setup_aneg - initial auto-negotiation setup
+ * @card: device structure
+ **/
+static void
+spider_net_setup_aneg(struct spider_net_card *card)
+{
+ struct mii_phy *phy = &card->phy;
+ u32 advertise = 0;
+ u16 bmcr, bmsr, stat1000, estat;
+
+ bmcr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMCR);
+ bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
+ stat1000 = spider_net_read_phy(card->netdev, phy->mii_id, MII_STAT1000);
+ estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
+
+ if (bmsr & BMSR_10HALF)
+ advertise |= ADVERTISED_10baseT_Half;
+ if (bmsr & BMSR_10FULL)
+ advertise |= ADVERTISED_10baseT_Full;
+ if (bmsr & BMSR_100HALF)
+ advertise |= ADVERTISED_100baseT_Half;
+ if (bmsr & BMSR_100FULL)
+ advertise |= ADVERTISED_100baseT_Full;
+
+ if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
+ advertise |= SUPPORTED_1000baseT_Full;
+ if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
+ advertise |= SUPPORTED_1000baseT_Half;
+
+ mii_phy_probe(phy, phy->mii_id);
+ phy->def->ops->setup_aneg(phy, advertise);
+
+}
+
/**
* spider_net_rx_irq_off - switch off rx irq on this spider card
* @card: device structure
* returns the status as in the dmac_cmd_status field of the descriptor
*/
static inline int
-spider_net_get_descr_status(struct spider_net_descr *descr)
+spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
{
- return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
+ return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
}
/**
descr = chain->ring;
do {
descr->bus_addr = 0;
- descr->next_descr_addr = 0;
+ descr->hwdescr->next_descr_addr = 0;
descr = descr->next;
} while (descr != chain->ring);
dma_free_coherent(&card->pdev->dev, chain->num_desc,
- chain->ring, chain->dma_addr);
+ chain->hwring, chain->dma_addr);
}
/**
{
int i;
struct spider_net_descr *descr;
+ struct spider_net_hw_descr *hwdescr;
dma_addr_t buf;
size_t alloc_size;
- alloc_size = chain->num_desc * sizeof (struct spider_net_descr);
+ alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
- chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
+ chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
&chain->dma_addr, GFP_KERNEL);
- if (!chain->ring)
+ if (!chain->hwring)
return -ENOMEM;
- descr = chain->ring;
- memset(descr, 0, alloc_size);
+ memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
/* Set up the hardware pointers in each descriptor */
+ descr = chain->ring;
+ hwdescr = chain->hwring;
buf = chain->dma_addr;
- for (i=0; i < chain->num_desc; i++, descr++) {
- descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ hwdescr->next_descr_addr = 0;
+ descr->hwdescr = hwdescr;
descr->bus_addr = buf;
- descr->next_descr_addr = 0;
descr->next = descr + 1;
descr->prev = descr - 1;
- buf += sizeof(struct spider_net_descr);
+ buf += sizeof(struct spider_net_hw_descr);
}
/* do actual circular list */
(descr-1)->next = chain->ring;
descr = card->rx_chain.head;
do {
if (descr->skb) {
- dev_kfree_skb(descr->skb);
- pci_unmap_single(card->pdev, descr->buf_addr,
+ pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
SPIDER_NET_MAX_FRAME,
PCI_DMA_BIDIRECTIONAL);
+ dev_kfree_skb(descr->skb);
+ descr->skb = NULL;
}
descr = descr->next;
} while (descr != card->rx_chain.head);
spider_net_prepare_rx_descr(struct spider_net_card *card,
struct spider_net_descr *descr)
{
+ struct spider_net_hw_descr *hwdescr = descr->hwdescr;
dma_addr_t buf;
int offset;
int bufsize;
card->spider_stats.alloc_rx_skb_error++;
return -ENOMEM;
}
- descr->buf_size = bufsize;
- descr->result_size = 0;
- descr->valid_size = 0;
- descr->data_status = 0;
- descr->data_error = 0;
+ hwdescr->buf_size = bufsize;
+ hwdescr->result_size = 0;
+ hwdescr->valid_size = 0;
+ hwdescr->data_status = 0;
+ hwdescr->data_error = 0;
offset = ((unsigned long)descr->skb->data) &
(SPIDER_NET_RXBUF_ALIGN - 1);
/* iommu-map the skb */
buf = pci_map_single(card->pdev, descr->skb->data,
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
- descr->buf_addr = buf;
if (pci_dma_mapping_error(buf)) {
dev_kfree_skb_any(descr->skb);
+ descr->skb = NULL;
if (netif_msg_rx_err(card) && net_ratelimit())
pr_err("Could not iommu-map rx buffer\n");
card->spider_stats.rx_iommu_map_error++;
- descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
} else {
- descr->next_descr_addr = 0;
+ hwdescr->buf_addr = buf;
+ hwdescr->next_descr_addr = 0;
wmb();
- descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
SPIDER_NET_DMAC_NOINTR_COMPLETE;
wmb();
- descr->prev->next_descr_addr = descr->bus_addr;
+ descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
}
return 0;
if (!spin_trylock_irqsave(&chain->lock, flags))
return;
- while (spider_net_get_descr_status(chain->head) ==
+ while (spider_net_get_descr_status(chain->head->hwdescr) ==
SPIDER_NET_DESCR_NOT_IN_USE) {
if (spider_net_prepare_rx_descr(card, chain->head))
break;
spider_net_prepare_tx_descr(struct spider_net_card *card,
struct sk_buff *skb)
{
+ struct spider_net_descr_chain *chain = &card->tx_chain;
struct spider_net_descr *descr;
+ struct spider_net_hw_descr *hwdescr;
dma_addr_t buf;
unsigned long flags;
return -ENOMEM;
}
- spin_lock_irqsave(&card->tx_chain.lock, flags);
+ spin_lock_irqsave(&chain->lock, flags);
descr = card->tx_chain.head;
- card->tx_chain.head = descr->next;
+ if (descr->next == chain->tail->prev) {
+ spin_unlock_irqrestore(&chain->lock, flags);
+ pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
+ return -ENOMEM;
+ }
+ hwdescr = descr->hwdescr;
+ chain->head = descr->next;
- descr->buf_addr = buf;
- descr->buf_size = skb->len;
- descr->next_descr_addr = 0;
descr->skb = skb;
- descr->data_status = 0;
+ hwdescr->buf_addr = buf;
+ hwdescr->buf_size = skb->len;
+ hwdescr->next_descr_addr = 0;
+ hwdescr->data_status = 0;
- descr->dmac_cmd_status =
+ hwdescr->dmac_cmd_status =
SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
- spin_unlock_irqrestore(&card->tx_chain.lock, flags);
+ spin_unlock_irqrestore(&chain->lock, flags);
if (skb->protocol == htons(ETH_P_IP))
switch (skb->nh.iph->protocol) {
case IPPROTO_TCP:
- descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
+ hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
break;
case IPPROTO_UDP:
- descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
+ hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
break;
}
/* Chain the bus address, so that the DMA engine finds this descr. */
- descr->prev->next_descr_addr = descr->bus_addr;
+ wmb();
+ descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
return 0;
static int
spider_net_set_low_watermark(struct spider_net_card *card)
{
+ struct spider_net_descr *descr = card->tx_chain.tail;
+ struct spider_net_hw_descr *hwdescr;
unsigned long flags;
int status;
int cnt=0;
int i;
- struct spider_net_descr *descr = card->tx_chain.tail;
/* Measure the length of the queue. Measurement does not
* need to be precise -- does not need a lock. */
while (descr != card->tx_chain.head) {
- status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
+ status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
if (status == SPIDER_NET_DESCR_NOT_IN_USE)
break;
descr = descr->next;
/* Set the new watermark, clear the old watermark */
spin_lock_irqsave(&card->tx_chain.lock, flags);
- descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
- if (card->low_watermark && card->low_watermark != descr)
- card->low_watermark->dmac_cmd_status =
- card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
+ descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
+ if (card->low_watermark && card->low_watermark != descr) {
+ hwdescr = card->low_watermark->hwdescr;
+ hwdescr->dmac_cmd_status =
+ hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
+ }
card->low_watermark = descr;
spin_unlock_irqrestore(&card->tx_chain.lock, flags);
return cnt;
{
struct spider_net_descr_chain *chain = &card->tx_chain;
struct spider_net_descr *descr;
+ struct spider_net_hw_descr *hwdescr;
struct sk_buff *skb;
u32 buf_addr;
unsigned long flags;
int status;
- while (chain->tail != chain->head) {
+ while (1) {
spin_lock_irqsave(&chain->lock, flags);
+ if (chain->tail == chain->head) {
+ spin_unlock_irqrestore(&chain->lock, flags);
+ return 0;
+ }
descr = chain->tail;
+ hwdescr = descr->hwdescr;
- status = spider_net_get_descr_status(descr);
+ status = spider_net_get_descr_status(hwdescr);
switch (status) {
case SPIDER_NET_DESCR_COMPLETE:
card->netdev_stats.tx_packets++;
}
chain->tail = descr->next;
- descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
+ hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
skb = descr->skb;
- buf_addr = descr->buf_addr;
+ descr->skb = NULL;
+ buf_addr = hwdescr->buf_addr;
spin_unlock_irqrestore(&chain->lock, flags);
/* unmap the skb */
descr = card->tx_chain.tail;
for (;;) {
- if (spider_net_get_descr_status(descr) ==
+ if (spider_net_get_descr_status(descr->hwdescr) ==
SPIDER_NET_DESCR_CARDOWNED) {
spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
descr->bus_addr);
{
int cnt;
struct spider_net_card *card = netdev_priv(netdev);
- struct spider_net_descr_chain *chain = &card->tx_chain;
spider_net_release_tx_chain(card, 0);
- if ((chain->head->next == chain->tail->prev) ||
- (spider_net_prepare_tx_descr(card, skb) != 0)) {
-
+ if (spider_net_prepare_tx_descr(card, skb) != 0) {
card->netdev_stats.tx_dropped++;
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
spider_net_pass_skb_up(struct spider_net_descr *descr,
struct spider_net_card *card)
{
+ struct spider_net_hw_descr *hwdescr= descr->hwdescr;
struct sk_buff *skb;
struct net_device *netdev;
u32 data_status, data_error;
- data_status = descr->data_status;
- data_error = descr->data_error;
+ data_status = hwdescr->data_status;
+ data_error = hwdescr->data_error;
netdev = card->netdev;
skb = descr->skb;
skb->dev = netdev;
- skb_put(skb, descr->valid_size);
+ skb_put(skb, hwdescr->valid_size);
/* the card seems to add 2 bytes of junk in front
* of the ethernet frame */
#endif
/**
- * spider_net_decode_one_descr - processes an rx descriptor
+ * spider_net_decode_one_descr - processes an RX descriptor
* @card: card structure
*
- * Returns 1 if a packet has been sent to the stack, otherwise 0
+ * Returns 1 if a packet has been sent to the stack, otherwise 0.
*
- * Processes an rx descriptor by iommu-unmapping the data buffer and passing
- * the packet up to the stack. This function is called in softirq
- * context, e.g. either bottom half from interrupt or NAPI polling context
+ * Processes an RX descriptor by iommu-unmapping the data buffer
+ * and passing the packet up to the stack. This function is called
+ * in softirq context, e.g. either bottom half from interrupt or
+ * NAPI polling context.
*/
static int
spider_net_decode_one_descr(struct spider_net_card *card)
{
struct spider_net_descr_chain *chain = &card->rx_chain;
struct spider_net_descr *descr = chain->tail;
+ struct spider_net_hw_descr *hwdescr = descr->hwdescr;
int status;
- status = spider_net_get_descr_status(descr);
+ status = spider_net_get_descr_status(hwdescr);
/* Nothing in the descriptor, or ring must be empty */
if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
chain->tail = descr->next;
/* unmap descriptor */
- pci_unmap_single(card->pdev, descr->buf_addr,
+ pci_unmap_single(card->pdev, hwdescr->buf_addr,
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
(status != SPIDER_NET_DESCR_FRAME_END) ) {
if (netif_msg_rx_err(card))
- pr_err("%s: RX descriptor with unkown state %d\n",
+ pr_err("%s: RX descriptor with unknown state %d\n",
card->netdev->name, status);
card->spider_stats.rx_desc_unk_state++;
goto bad_desc;
}
/* The cases we'll throw away the packet immediately */
- if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
+ if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
if (netif_msg_rx_err(card))
pr_err("%s: error in received descriptor found, "
"data_status=x%08x, data_error=x%08x\n",
card->netdev->name,
- descr->data_status, descr->data_error);
+ hwdescr->data_status, hwdescr->data_error);
goto bad_desc;
}
- if (descr->dmac_cmd_status & 0xfefe) {
+ if (hwdescr->dmac_cmd_status & 0xfefe) {
pr_err("%s: bad status, cmd_status=x%08x\n",
card->netdev->name,
- descr->dmac_cmd_status);
- pr_err("buf_addr=x%08x\n", descr->buf_addr);
- pr_err("buf_size=x%08x\n", descr->buf_size);
- pr_err("next_descr_addr=x%08x\n", descr->next_descr_addr);
- pr_err("result_size=x%08x\n", descr->result_size);
- pr_err("valid_size=x%08x\n", descr->valid_size);
- pr_err("data_status=x%08x\n", descr->data_status);
- pr_err("data_error=x%08x\n", descr->data_error);
- pr_err("bus_addr=x%08x\n", descr->bus_addr);
+ hwdescr->dmac_cmd_status);
+ pr_err("buf_addr=x%08x\n", hwdescr->buf_addr);
+ pr_err("buf_size=x%08x\n", hwdescr->buf_size);
+ pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
+ pr_err("result_size=x%08x\n", hwdescr->result_size);
+ pr_err("valid_size=x%08x\n", hwdescr->valid_size);
+ pr_err("data_status=x%08x\n", hwdescr->data_status);
+ pr_err("data_error=x%08x\n", hwdescr->data_error);
pr_err("which=%ld\n", descr - card->rx_chain.ring);
card->spider_stats.rx_desc_error++;
/* Ok, we've got a packet in descr */
spider_net_pass_skb_up(descr, card);
- descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
return 1;
bad_desc:
dev_kfree_skb_irq(descr->skb);
- descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ descr->skb = NULL;
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
return 0;
}
return 0;
}
+/**
+ * spider_net_link_reset
+ * @netdev: net device structure
+ *
+ * This is called when the PHY_LINK signal is asserted. For the blade this is
+ * not connected so we should never get here.
+ *
+ */
+static void
+spider_net_link_reset(struct net_device *netdev)
+{
+
+ struct spider_net_card *card = netdev_priv(netdev);
+
+ del_timer_sync(&card->aneg_timer);
+
+ /* clear interrupt, block further interrupts */
+ spider_net_write_reg(card, SPIDER_NET_GMACST,
+ spider_net_read_reg(card, SPIDER_NET_GMACST));
+ spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
+
+ /* reset phy and setup aneg */
+ spider_net_setup_aneg(card);
+ mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
+
+}
+
/**
* spider_net_handle_error_irq - handles errors raised by an interrupt
* @card: card structure
switch (i)
{
case SPIDER_NET_GTMFLLINT:
- if (netif_msg_intr(card) && net_ratelimit())
- pr_err("Spider TX RAM full\n");
+ /* TX RAM full may happen on a usual case.
+ * Logging is not needed. */
show_error = 0;
break;
case SPIDER_NET_GRFDFLLINT: /* fallthrough */
if (status_reg & SPIDER_NET_TXINT)
netif_rx_schedule(netdev);
+ if (status_reg & SPIDER_NET_LINKINT)
+ spider_net_link_reset(netdev);
+
if (status_reg & SPIDER_NET_ERRINT )
spider_net_handle_error_irq(card, status_reg);
spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
SPIDER_NET_CKRCTRL_RUN_VALUE);
+
+ /* trigger ETOMOD signal */
+ spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
+ spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
+
}
/**
spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
SPIDER_NET_LENLMT_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GMACMODE,
- SPIDER_NET_MACMODE_VALUE);
spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
SPIDER_NET_OPMODE_VALUE);
SPIDER_NET_GDTBSTA);
}
-/**
- * spider_net_open - called upon ifonfig up
- * @netdev: interface device structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_open allocates all the descriptors and memory needed for
- * operation, sets up multicast list and enables interrupts
- */
-int
-spider_net_open(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- int result;
-
- result = spider_net_init_chain(card, &card->tx_chain);
- if (result)
- goto alloc_tx_failed;
- card->low_watermark = NULL;
-
- result = spider_net_init_chain(card, &card->rx_chain);
- if (result)
- goto alloc_rx_failed;
-
- /* Allocate rx skbs */
- if (spider_net_alloc_rx_skbs(card))
- goto alloc_skbs_failed;
-
- spider_net_set_multi(netdev);
-
- /* further enhancement: setup hw vlan, if needed */
-
- result = -EBUSY;
- if (request_irq(netdev->irq, spider_net_interrupt,
- IRQF_SHARED, netdev->name, netdev))
- goto register_int_failed;
-
- spider_net_enable_card(card);
-
- netif_start_queue(netdev);
- netif_carrier_on(netdev);
- netif_poll_enable(netdev);
-
- return 0;
-
-register_int_failed:
- spider_net_free_rx_chain_contents(card);
-alloc_skbs_failed:
- spider_net_free_chain(card, &card->rx_chain);
-alloc_rx_failed:
- spider_net_free_chain(card, &card->tx_chain);
-alloc_tx_failed:
- return result;
-}
-
-/**
- * spider_net_setup_phy - setup PHY
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_setup_phy is used as part of spider_net_probe. Sets
- * the PHY to 1000 Mbps
- **/
-static int
-spider_net_setup_phy(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
- SPIDER_NET_DMASEL_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
- SPIDER_NET_PHY_CTRL_VALUE);
- phy->mii_id = 1;
- phy->dev = card->netdev;
- phy->mdio_read = spider_net_read_phy;
- phy->mdio_write = spider_net_write_phy;
-
- mii_phy_probe(phy, phy->mii_id);
-
- if (phy->def->ops->setup_forced)
- phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
-
- phy->def->ops->enable_fiber(phy);
-
- phy->def->ops->read_link(phy);
- pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
- phy->speed, phy->duplex==1 ? "Full" : "Half");
-
- return 0;
-}
-
/**
* spider_net_download_firmware - loads firmware into the adapter
* @card: card structure
return err;
}
+/**
+ * spider_net_open - called upon ifonfig up
+ * @netdev: interface device structure
+ *
+ * returns 0 on success, <0 on failure
+ *
+ * spider_net_open allocates all the descriptors and memory needed for
+ * operation, sets up multicast list and enables interrupts
+ */
+int
+spider_net_open(struct net_device *netdev)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+ int result;
+
+ result = spider_net_init_firmware(card);
+ if (result)
+ goto init_firmware_failed;
+
+ /* start probing with copper */
+ spider_net_setup_aneg(card);
+ if (card->phy.def->phy_id)
+ mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
+
+ result = spider_net_init_chain(card, &card->tx_chain);
+ if (result)
+ goto alloc_tx_failed;
+ card->low_watermark = NULL;
+
+ result = spider_net_init_chain(card, &card->rx_chain);
+ if (result)
+ goto alloc_rx_failed;
+
+ /* Allocate rx skbs */
+ if (spider_net_alloc_rx_skbs(card))
+ goto alloc_skbs_failed;
+
+ spider_net_set_multi(netdev);
+
+ /* further enhancement: setup hw vlan, if needed */
+
+ result = -EBUSY;
+ if (request_irq(netdev->irq, spider_net_interrupt,
+ IRQF_SHARED, netdev->name, netdev))
+ goto register_int_failed;
+
+ spider_net_enable_card(card);
+
+ netif_start_queue(netdev);
+ netif_carrier_on(netdev);
+ netif_poll_enable(netdev);
+
+ return 0;
+
+register_int_failed:
+ spider_net_free_rx_chain_contents(card);
+alloc_skbs_failed:
+ spider_net_free_chain(card, &card->rx_chain);
+alloc_rx_failed:
+ spider_net_free_chain(card, &card->tx_chain);
+alloc_tx_failed:
+ del_timer_sync(&card->aneg_timer);
+init_firmware_failed:
+ return result;
+}
+
+/**
+ * spider_net_link_phy
+ * @data: used for pointer to card structure
+ *
+ */
+static void spider_net_link_phy(unsigned long data)
+{
+ struct spider_net_card *card = (struct spider_net_card *)data;
+ struct mii_phy *phy = &card->phy;
+
+ /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
+ if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
+
+ pr_info("%s: link is down trying to bring it up\n", card->netdev->name);
+
+ switch (card->medium) {
+ case BCM54XX_COPPER:
+ /* enable fiber with autonegotiation first */
+ if (phy->def->ops->enable_fiber)
+ phy->def->ops->enable_fiber(phy, 1);
+ card->medium = BCM54XX_FIBER;
+ break;
+
+ case BCM54XX_FIBER:
+ /* fiber didn't come up, try to disable fiber autoneg */
+ if (phy->def->ops->enable_fiber)
+ phy->def->ops->enable_fiber(phy, 0);
+ card->medium = BCM54XX_UNKNOWN;
+ break;
+
+ case BCM54XX_UNKNOWN:
+ /* copper, fiber with and without failed,
+ * retry from beginning */
+ spider_net_setup_aneg(card);
+ card->medium = BCM54XX_COPPER;
+ break;
+ }
+
+ card->aneg_count = 0;
+ mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
+ return;
+ }
+
+ /* link still not up, try again later */
+ if (!(phy->def->ops->poll_link(phy))) {
+ card->aneg_count++;
+ mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
+ return;
+ }
+
+ /* link came up, get abilities */
+ phy->def->ops->read_link(phy);
+
+ spider_net_write_reg(card, SPIDER_NET_GMACST,
+ spider_net_read_reg(card, SPIDER_NET_GMACST));
+ spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
+
+ if (phy->speed == 1000)
+ spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
+ else
+ spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
+
+ card->aneg_count = 0;
+
+ pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n",
+ phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half",
+ phy->autoneg==1 ? "" : "no ");
+
+ return;
+}
+
+/**
+ * spider_net_setup_phy - setup PHY
+ * @card: card structure
+ *
+ * returns 0 on success, <0 on failure
+ *
+ * spider_net_setup_phy is used as part of spider_net_probe.
+ **/
+static int
+spider_net_setup_phy(struct spider_net_card *card)
+{
+ struct mii_phy *phy = &card->phy;
+
+ spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
+ SPIDER_NET_DMASEL_VALUE);
+ spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
+ SPIDER_NET_PHY_CTRL_VALUE);
+
+ phy->dev = card->netdev;
+ phy->mdio_read = spider_net_read_phy;
+ phy->mdio_write = spider_net_write_phy;
+
+ for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
+ unsigned short id;
+ id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
+ if (id != 0x0000 && id != 0xffff) {
+ if (!mii_phy_probe(phy, phy->mii_id)) {
+ pr_info("Found %s.\n", phy->def->name);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
/**
* spider_net_workaround_rxramfull - work around firmware bug
* @card: card structure
netif_carrier_off(netdev);
netif_stop_queue(netdev);
del_timer_sync(&card->tx_timer);
+ del_timer_sync(&card->aneg_timer);
/* disable/mask all interrupts */
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
+ spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
- /* free_irq(netdev->irq, netdev);*/
- free_irq(to_pci_dev(netdev->dev.parent)->irq, netdev);
+ free_irq(netdev->irq, netdev);
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
SPIDER_NET_DMA_TX_FEND_VALUE);
spider_net_release_tx_chain(card, 1);
spider_net_free_rx_chain_contents(card);
- spider_net_free_rx_chain_contents(card);
-
spider_net_free_chain(card, &card->tx_chain);
spider_net_free_chain(card, &card->rx_chain);
if (spider_net_setup_phy(card))
goto out;
- if (spider_net_init_firmware(card))
- goto out;
spider_net_open(netdev);
spider_net_kick_tx_dma(card);
card->tx_timer.data = (unsigned long) card;
netdev->irq = card->pdev->irq;
- card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
+ card->aneg_count = 0;
+ init_timer(&card->aneg_timer);
+ card->aneg_timer.function = spider_net_link_phy;
+ card->aneg_timer.data = (unsigned long) card;
- card->tx_chain.num_desc = tx_descriptors;
- card->rx_chain.num_desc = rx_descriptors;
+ card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
spider_net_setup_netdev_ops(netdev);
{
struct net_device *netdev;
struct spider_net_card *card;
+ size_t alloc_size;
- netdev = alloc_etherdev(sizeof(struct spider_net_card));
+ alloc_size = sizeof(struct spider_net_card) +
+ (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
+ netdev = alloc_etherdev(alloc_size);
if (!netdev)
return NULL;
init_waitqueue_head(&card->waitq);
atomic_set(&card->tx_timeout_task_counter, 0);
+ card->rx_chain.num_desc = rx_descriptors;
+ card->rx_chain.ring = card->darray;
+ card->tx_chain.num_desc = tx_descriptors;
+ card->tx_chain.ring = card->darray + rx_descriptors;
+
return card;
}
if (err)
goto out_undo_pci;
- err = spider_net_init_firmware(card);
- if (err)
- goto out_undo_pci;
-
err = spider_net_setup_netdev(card);
if (err)
goto out_undo_pci;
/*
- * Network device driver for Cell Processor-Based Blade
+ * Network device driver for Cell Processor-Based Blade and Celleb platform
*
* (C) Copyright IBM Corp. 2005
+ * (C) Copyright 2006 TOSHIBA CORPORATION
*
* Authors : Utz Bacher <utz.bacher@de.ibm.com>
* Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
#ifndef _SPIDER_NET_H
#define _SPIDER_NET_H
-#define VERSION "1.6 B"
+#define VERSION "2.0 A"
#include "sungem_phy.h"
#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
#define SPIDER_NET_TX_TIMER (HZ/5)
+#define SPIDER_NET_ANEG_TIMER (HZ)
+#define SPIDER_NET_ANEG_TIMEOUT 2
#define SPIDER_NET_RX_CSUM_DEFAULT 1
#define SPIDER_NET_GMACOPEMD 0x00000100
#define SPIDER_NET_GMACLENLMT 0x00000108
+#define SPIDER_NET_GMACST 0x00000110
#define SPIDER_NET_GMACINTEN 0x00000118
#define SPIDER_NET_GMACPHYCTRL 0x00000120
/* pause frames: automatic, no upper retransmission count */
/* outside loopback mode: ETOMOD signal dont matter, not connected */
-#define SPIDER_NET_OPMODE_VALUE 0x00000063
+/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
+#define SPIDER_NET_OPMODE_VALUE 0x00000067
/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
#define SPIDER_NET_LENLMT_VALUE 0x00000908
/* We rely on flagged descriptor interrupts */
#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
+#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
+
#define SPIDER_NET_ERRINT ( 0xffffffff & \
(~SPIDER_NET_TXINT) & \
- (~SPIDER_NET_RXINT) )
+ (~SPIDER_NET_RXINT) & \
+ (~SPIDER_NET_LINKINT) )
#define SPIDER_NET_GPREXEC 0x80000000
#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
-struct spider_net_descr {
- /* as defined by the hardware */
+/* Descriptor, as defined by the hardware */
+struct spider_net_hw_descr {
u32 buf_addr;
u32 buf_size;
u32 next_descr_addr;
u32 valid_size; /* all zeroes for tx */
u32 data_status;
u32 data_error; /* all zeroes for tx */
+} __attribute__((aligned(32)));
- /* used in the driver */
+struct spider_net_descr {
+ struct spider_net_hw_descr *hwdescr;
struct sk_buff *skb;
u32 bus_addr;
struct spider_net_descr *next;
struct spider_net_descr *prev;
-} __attribute__((aligned(32)));
+};
struct spider_net_descr_chain {
spinlock_t lock;
struct spider_net_descr *tail;
struct spider_net_descr *ring;
int num_desc;
+ struct spider_net_hw_descr *hwring;
dma_addr_t dma_addr;
};
struct pci_dev *pdev;
struct mii_phy phy;
+ int medium;
+
void __iomem *regs;
struct spider_net_descr_chain tx_chain;
struct spider_net_descr_chain rx_chain;
struct spider_net_descr *low_watermark;
+ int aneg_count;
+ struct timer_list aneg_timer;
struct timer_list tx_timer;
struct work_struct tx_timeout_task;
atomic_t tx_timeout_task_counter;
struct net_device_stats netdev_stats;
struct spider_net_extra_stats spider_stats;
struct spider_net_options options;
+
+ /* Must be last item in struct */
+ struct spider_net_descr darray[0];
};
#define pr_err(fmt,arg...) \
spin_lock(&np->lock);
if (debug > 1)
printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
- if (np->vlgrp)
- np->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(np->vlgrp, vid, NULL);
set_rx_mode(dev);
spin_unlock(&np->lock);
}
int vlan_count = 0;
void __iomem *filter_addr = ioaddr + HashTable + 8;
for (i = 0; i < VLAN_VID_MASK; i++) {
- if (np->vlgrp->vlan_devices[i]) {
+ if (vlan_group_get_device(np->vlgrp, i)) {
if (vlan_count >= 32)
break;
writew(cpu_to_be16(i), filter_addr);
static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
static int fifo=0x8; /* don't change */
-/* #define REALLY_SLOW_IO */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
return 0;
}
+static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+ u16 ctl, adv;
+
+ phy->autoneg = 1;
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = 0;
+ phy->advertising = advertise;
+
+ /* Setup standard advertise */
+ adv = phy_read(phy, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write(phy, MII_ADVERTISE, adv);
+
+ /* Start/Restart aneg */
+ ctl = phy_read(phy, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+ u16 ctl;
+
+ phy->autoneg = 0;
+ phy->speed = speed;
+ phy->duplex = fd;
+ phy->pause = 0;
+
+ ctl = phy_read(phy, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
+
+ /* First reset the PHY */
+ phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
+
+ /* Select speed & duplex */
+ switch(speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ ctl |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ default:
+ return -EINVAL;
+ }
+ if (fd == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int genmii_poll_link(struct mii_phy *phy)
+{
+ u16 status;
+
+ (void)phy_read(phy, MII_BMSR);
+ status = phy_read(phy, MII_BMSR);
+ if ((status & BMSR_LSTATUS) == 0)
+ return 0;
+ if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
+ return 0;
+ return 1;
+}
+
+static int genmii_read_link(struct mii_phy *phy)
+{
+ u16 lpa;
+
+ if (phy->autoneg) {
+ lpa = phy_read(phy, MII_LPA);
+
+ if (lpa & (LPA_10FULL | LPA_100FULL))
+ phy->duplex = DUPLEX_FULL;
+ else
+ phy->duplex = DUPLEX_HALF;
+ if (lpa & (LPA_100FULL | LPA_100HALF))
+ phy->speed = SPEED_100;
+ else
+ phy->speed = SPEED_10;
+ phy->pause = 0;
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
static int generic_suspend(struct mii_phy* phy)
{
phy_write(phy, MII_BMCR, BMCR_PDOWN);
return 0;
}
-static int bcm5421_enable_fiber(struct mii_phy* phy)
-{
- /* enable fiber mode */
- phy_write(phy, MII_NCONFIG, 0x9020);
- /* LEDs active in both modes, autosense prio = fiber */
- phy_write(phy, MII_NCONFIG, 0x945f);
-
- /* switch off fibre autoneg */
- phy_write(phy, MII_NCONFIG, 0xfc01);
- phy_write(phy, 0x0b, 0x0004);
-
- return 0;
-}
-
-static int bcm5461_enable_fiber(struct mii_phy* phy)
-{
- phy_write(phy, MII_NCONFIG, 0xfc0c);
- phy_write(phy, MII_BMCR, 0x4140);
- phy_write(phy, MII_NCONFIG, 0xfc0b);
- phy_write(phy, MII_BMCR, 0x0140);
-
- return 0;
-}
-
static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
return 0;
}
+#define BCM5421_MODE_MASK (1 << 5)
+
+static int bcm5421_poll_link(struct mii_phy* phy)
+{
+ u32 phy_reg;
+ int mode;
+
+ /* find out in what mode we are */
+ phy_write(phy, MII_NCONFIG, 0x1000);
+ phy_reg = phy_read(phy, MII_NCONFIG);
+
+ mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
+
+ if ( mode == BCM54XX_COPPER)
+ return genmii_poll_link(phy);
+
+ /* try to find out wether we have a link */
+ phy_write(phy, MII_NCONFIG, 0x2000);
+ phy_reg = phy_read(phy, MII_NCONFIG);
+
+ if (phy_reg & 0x0020)
+ return 0;
+ else
+ return 1;
+}
+
+static int bcm5421_read_link(struct mii_phy* phy)
+{
+ u32 phy_reg;
+ int mode;
+
+ /* find out in what mode we are */
+ phy_write(phy, MII_NCONFIG, 0x1000);
+ phy_reg = phy_read(phy, MII_NCONFIG);
+
+ mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
+
+ if ( mode == BCM54XX_COPPER)
+ return bcm54xx_read_link(phy);
+
+ phy->speed = SPEED_1000;
+
+ /* find out wether we are running half- or full duplex */
+ phy_write(phy, MII_NCONFIG, 0x2000);
+ phy_reg = phy_read(phy, MII_NCONFIG);
+
+ if ( (phy_reg & 0x0080) >> 7)
+ phy->duplex |= DUPLEX_HALF;
+ else
+ phy->duplex |= DUPLEX_FULL;
+
+ return 0;
+}
+
+static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
+{
+ /* enable fiber mode */
+ phy_write(phy, MII_NCONFIG, 0x9020);
+ /* LEDs active in both modes, autosense prio = fiber */
+ phy_write(phy, MII_NCONFIG, 0x945f);
+
+ if (!autoneg) {
+ /* switch off fibre autoneg */
+ phy_write(phy, MII_NCONFIG, 0xfc01);
+ phy_write(phy, 0x0b, 0x0004);
+ }
+
+ phy->autoneg = autoneg;
+
+ return 0;
+}
+
+#define BCM5461_FIBER_LINK (1 << 2)
+#define BCM5461_MODE_MASK (3 << 1)
+
+static int bcm5461_poll_link(struct mii_phy* phy)
+{
+ u32 phy_reg;
+ int mode;
+
+ /* find out in what mode we are */
+ phy_write(phy, MII_NCONFIG, 0x7c00);
+ phy_reg = phy_read(phy, MII_NCONFIG);
+
+ mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
+
+ if ( mode == BCM54XX_COPPER)
+ return genmii_poll_link(phy);
+
+ /* find out wether we have a link */
+ phy_write(phy, MII_NCONFIG, 0x7000);
+ phy_reg = phy_read(phy, MII_NCONFIG);
+
+ if (phy_reg & BCM5461_FIBER_LINK)
+ return 1;
+ else
+ return 0;
+}
+
+#define BCM5461_FIBER_DUPLEX (1 << 3)
+
+static int bcm5461_read_link(struct mii_phy* phy)
+{
+ u32 phy_reg;
+ int mode;
+
+ /* find out in what mode we are */
+ phy_write(phy, MII_NCONFIG, 0x7c00);
+ phy_reg = phy_read(phy, MII_NCONFIG);
+
+ mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
+
+ if ( mode == BCM54XX_COPPER) {
+ return bcm54xx_read_link(phy);
+ }
+
+ phy->speed = SPEED_1000;
+
+ /* find out wether we are running half- or full duplex */
+ phy_write(phy, MII_NCONFIG, 0x7000);
+ phy_reg = phy_read(phy, MII_NCONFIG);
+
+ if (phy_reg & BCM5461_FIBER_DUPLEX)
+ phy->duplex |= DUPLEX_FULL;
+ else
+ phy->duplex |= DUPLEX_HALF;
+
+ return 0;
+}
+
+static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
+{
+ /* select fiber mode, enable 1000 base-X registers */
+ phy_write(phy, MII_NCONFIG, 0xfc0b);
+
+ if (autoneg) {
+ /* enable fiber with no autonegotiation */
+ phy_write(phy, MII_ADVERTISE, 0x01e0);
+ phy_write(phy, MII_BMCR, 0x1140);
+ } else {
+ /* enable fiber with autonegotiation */
+ phy_write(phy, MII_BMCR, 0x0140);
+ }
+
+ phy->autoneg = autoneg;
+
+ return 0;
+}
+
static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
return 0;
}
-static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
-{
- u16 ctl, adv;
-
- phy->autoneg = 1;
- phy->speed = SPEED_10;
- phy->duplex = DUPLEX_HALF;
- phy->pause = 0;
- phy->advertising = advertise;
-
- /* Setup standard advertise */
- adv = phy_read(phy, MII_ADVERTISE);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
- if (advertise & ADVERTISED_10baseT_Half)
- adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- adv |= ADVERTISE_100FULL;
- if (advertise & ADVERTISED_Pause)
- adv |= ADVERTISE_PAUSE_CAP;
- if (advertise & ADVERTISED_Asym_Pause)
- adv |= ADVERTISE_PAUSE_ASYM;
- phy_write(phy, MII_ADVERTISE, adv);
-
- /* Start/Restart aneg */
- ctl = phy_read(phy, MII_BMCR);
- ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
- phy_write(phy, MII_BMCR, ctl);
-
- return 0;
-}
-
-static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
-{
- u16 ctl;
-
- phy->autoneg = 0;
- phy->speed = speed;
- phy->duplex = fd;
- phy->pause = 0;
-
- ctl = phy_read(phy, MII_BMCR);
- ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
-
- /* First reset the PHY */
- phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
-
- /* Select speed & duplex */
- switch(speed) {
- case SPEED_10:
- break;
- case SPEED_100:
- ctl |= BMCR_SPEED100;
- break;
- case SPEED_1000:
- default:
- return -EINVAL;
- }
- if (fd == DUPLEX_FULL)
- ctl |= BMCR_FULLDPLX;
- phy_write(phy, MII_BMCR, ctl);
-
- return 0;
-}
-
-static int genmii_poll_link(struct mii_phy *phy)
-{
- u16 status;
-
- (void)phy_read(phy, MII_BMSR);
- status = phy_read(phy, MII_BMSR);
- if ((status & BMSR_LSTATUS) == 0)
- return 0;
- if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
- return 0;
- return 1;
-}
-
-static int genmii_read_link(struct mii_phy *phy)
-{
- u16 lpa;
-
- if (phy->autoneg) {
- lpa = phy_read(phy, MII_LPA);
-
- if (lpa & (LPA_10FULL | LPA_100FULL))
- phy->duplex = DUPLEX_FULL;
- else
- phy->duplex = DUPLEX_HALF;
- if (lpa & (LPA_100FULL | LPA_100HALF))
- phy->speed = SPEED_100;
- else
- phy->speed = SPEED_10;
- phy->pause = (phy->duplex == DUPLEX_FULL) &&
- ((lpa & LPA_PAUSE) != 0);
- }
- /* On non-aneg, we assume what we put in BMCR is the speed,
- * though magic-aneg shouldn't prevent this case from occurring
- */
-
- return 0;
-}
-
-
#define MII_BASIC_FEATURES \
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
- .poll_link = genmii_poll_link,
- .read_link = bcm54xx_read_link,
+ .poll_link = bcm5421_poll_link,
+ .read_link = bcm5421_read_link,
.enable_fiber = bcm5421_enable_fiber,
};
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
- .poll_link = genmii_poll_link,
- .read_link = bcm54xx_read_link,
+ .poll_link = bcm5461_poll_link,
+ .read_link = bcm5461_read_link,
.enable_fiber = bcm5461_enable_fiber,
};
int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
int (*poll_link)(struct mii_phy *phy);
int (*read_link)(struct mii_phy *phy);
- int (*enable_fiber)(struct mii_phy *phy);
+ int (*enable_fiber)(struct mii_phy *phy, int autoneg);
};
/* Structure used to statically define an mii/gii based PHY */
const struct mii_phy_ops* ops;
};
+enum {
+ BCM54XX_COPPER,
+ BCM54XX_FIBER,
+ BCM54XX_GBIC,
+ BCM54XX_SGMII,
+ BCM54XX_UNKNOWN,
+};
+
/* An instance of a PHY, partially borrowed from mii_if_info */
struct mii_phy
{
dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
#endif
} else {
- clear_page(lp->fd_buf);
+ memset(lp->fd_buf, 0, PAGE_SIZE * FD_PAGE_NUM);
#ifdef __mips__
dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
#endif
spin_unlock_irqrestore(&lp->lock, flags);
}
-/* XXX */
-void
-tc35815_killall(void)
-{
- struct net_device *dev;
-
- for (dev = root_tc35815_dev; dev; dev = ((struct tc35815_local *)dev->priv)->next_module) {
- if (dev->flags&IFF_UP){
- dev->stop(dev);
- }
- }
-}
-
static struct pci_driver tc35815_driver = {
.name = TC35815_MODULE_NAME,
.probe = tc35815_probe,
{
struct net_device *next_dev;
+ /*
+ * TODO: implement a tc35815_driver.remove hook, and
+ * move this code into that function. Then, delete
+ * all root_tc35815_dev list handling code.
+ */
while (root_tc35815_dev) {
struct net_device *dev = root_tc35815_dev;
next_dev = ((struct tc35815_local *)dev->priv)->next_module;
free_netdev(dev);
root_tc35815_dev = next_dev;
}
+
+ pci_unregister_driver(&tc35815_driver);
}
+
module_init(tc35815_init_module);
module_exit(tc35815_cleanup_module);
tg3_netif_stop(tp);
tg3_full_lock(tp, 0);
- if (tp->vlgrp)
- tp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(tp->vlgrp, vid, NULL);
tg3_full_unlock(tp);
if (netif_running(dev))
/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
- || defined(__sparc_) || defined(__ia64__) \
+ || defined(__sparc__) || defined(__ia64__) \
|| defined(__sh__) || defined(__mips__)
static int rx_copybreak = 1518;
#else
.get_regs = de_get_regs,
};
-static void __init de21040_get_mac_address (struct de_private *de)
+static void __devinit de21040_get_mac_address (struct de_private *de)
{
unsigned i;
}
}
-static void __init de21040_get_media_info(struct de_private *de)
+static void __devinit de21040_get_media_info(struct de_private *de)
{
unsigned int i;
return retval;
}
-static void __init de21041_get_srom_info (struct de_private *de)
+static void __devinit de21041_get_srom_info (struct de_private *de)
{
unsigned i, sa_offset = 0, ofs;
u8 ee_data[DE_EEPROM_SIZE + 6] = {};
#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
-#define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+#define DMFE_DBUG(dbug_now, msg, value) \
+ do { \
+ if (dmfe_debug || (dbug_now)) \
+ printk(KERN_ERR DRV_NAME ": %s %lx\n",\
+ (msg), (long) (value)); \
+ } while (0)
-#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
+ (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
/* CR9 definition: SROM/MII */
#define SROM_V41_CODE 0x14
-#define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);
+#define SROM_CLK_WRITE(data, ioaddr) \
+ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
+ udelay(5); \
+ outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
+ udelay(5); \
+ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
+ udelay(5);
+
+#define __CHK_IO_SIZE(pci_id, dev_rev) \
+ (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
+ DM9102A_IO_SIZE: DM9102_IO_SIZE)
-#define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE
-#define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)
+#define CHK_IO_SIZE(pci_dev, dev_rev) \
+ (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
/* Sten Check */
#define DEVICE net_device
struct dmfe_board_info {
u32 chip_id; /* Chip vendor/Device ID */
u32 chip_revision; /* Chip revision */
- struct DEVICE *dev; /* net device */
+ struct DEVICE *next_dev; /* next device */
struct pci_dev *pdev; /* PCI device */
spinlock_t lock;
u8 media_mode; /* user specify media mode */
u8 op_mode; /* real work media mode */
u8 phy_addr;
- u8 link_failed; /* Ever link failed */
u8 wait_reset; /* Hardware failed, need to reset */
u8 dm910x_chk_mode; /* Operating mode check */
u8 first_in_callback; /* Flag to record state */
static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
static void dmfe_set_phyxcer(struct dmfe_board_info *);
-/* DM910X network baord routine ---------------------------- */
+/* DM910X network board routine ---------------------------- */
/*
* Search DM910X board ,allocate space and register it
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
- printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+ printk(KERN_WARNING DRV_NAME
+ ": 32-bit PCI DMA not available.\n");
err = -ENODEV;
goto err_out_free;
}
/* Init system & device */
db = netdev_priv(dev);
- db->dev = dev;
-
/* Allocate Tx/Rx descriptor memory */
- db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
- db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
+ DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+
+ db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
+ TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
db->first_tx_desc_dma = db->desc_pool_dma_ptr;
dev->poll_controller = &poll_dmfe;
#endif
dev->ethtool_ops = &netdev_ethtool_ops;
- netif_carrier_off(db->dev);
+ netif_carrier_off(dev);
spin_lock_init(&db->lock);
pci_read_config_dword(pdev, 0x50, &pci_pmr);
/* read 64 word srom data */
for (i = 0; i < 64; i++)
- ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+ ((u16 *) db->srom)[i] =
+ cpu_to_le16(read_srom_word(db->ioaddr, i));
/* Set Node address */
for (i = 0; i < 6; i++)
DMFE_DBUG(0, "dmfe_remove_one()", 0);
if (dev) {
+
+ unregister_netdev(dev);
+
pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
db->desc_pool_dma_ptr);
pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
db->buf_pool_ptr, db->buf_pool_dma_ptr);
- unregister_netdev(dev);
pci_release_regions(pdev);
free_netdev(dev); /* free board information */
+
pci_set_drvdata(pdev, NULL);
}
DMFE_DBUG(0, "dmfe_open", 0);
- ret = request_irq(dev->irq, &dmfe_interrupt, IRQF_SHARED, dev->name, dev);
+ ret = request_irq(dev->irq, &dmfe_interrupt,
+ IRQF_SHARED, dev->name, dev);
if (ret)
return ret;
db->tx_packet_cnt = 0;
db->tx_queue_cnt = 0;
db->rx_avail_cnt = 0;
- db->link_failed = 1;
db->wait_reset = 0;
db->first_in_callback = 0;
/* No Tx resource check, it never happen nromally */
if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt);
+ printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
+ db->tx_queue_cnt);
return 1;
}
#if 0
/* show statistic counter */
- printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+ printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
+ " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
db->tx_fifo_underrun, db->tx_excessive_collision,
db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
{
struct rx_desc *rxptr;
- struct sk_buff *skb;
+ struct sk_buff *skb, *newskb;
int rxlen;
u32 rdes0;
db->rx_avail_cnt--;
db->interval_rx_cnt++;
- pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+
if ( (rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
/* reuse this SKB */
} else {
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
- if ( (rxlen < RX_COPY_SIZE) &&
- ( (skb = dev_alloc_skb(rxlen + 2) )
- != NULL) ) {
+ if ((rxlen < RX_COPY_SIZE) &&
+ ((newskb = dev_alloc_skb(rxlen + 2))
+ != NULL)) {
+
+ skb = newskb;
/* size less than COPY_SIZE, allocate a rxlen SKB */
skb->dev = dev;
skb_reserve(skb, 2); /* 16byte align */
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
+ int link_ok, link_ok_phy;
+
DMFE_DBUG(0, "dmfe_timer()", 0);
spin_lock_irqsave(&db->lock, flags);
if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
db->cr6_data &= ~0x40000;
update_cr6(db->cr6_data, db->ioaddr);
- phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, 0x1000, db->chip_id);
db->cr6_data |= 0x40000;
update_cr6(db->cr6_data, db->ioaddr);
db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
(db->chip_revision == 0x02000010)) ) {
/* DM9102A Chip */
if (tmp_cr12 & 2)
- tmp_cr12 = 0x0; /* Link failed */
+ link_ok = 0;
else
- tmp_cr12 = 0x3; /* Link OK */
+ link_ok = 1;
}
+ else
+ /*0x43 is used instead of 0x3 because bit 6 should represent
+ link status of external PHY */
+ link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
+
+
+ /* If chip reports that link is failed it could be because external
+ PHY link status pin is not conected correctly to chip
+ To be sure ask PHY too.
+ */
+
+ /* need a dummy read because of PHY's register latch*/
+ phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
+ link_ok_phy = (phy_read (db->ioaddr,
+ db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
- if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
+ if (link_ok_phy != link_ok) {
+ DMFE_DBUG (0, "PHY and chip report different link status", 0);
+ link_ok = link_ok | link_ok_phy;
+ }
+
+ if ( !link_ok && netif_carrier_ok(dev)) {
/* Link Failed */
DMFE_DBUG(0, "Link Failed", tmp_cr12);
- db->link_failed = 1;
- netif_carrier_off(db->dev);
+ netif_carrier_off(dev);
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
/* AUTO or force 1M Homerun/Longrun don't need */
if ( !(db->media_mode & 0x38) )
- phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+ phy_write(db->ioaddr, db->phy_addr,
+ 0, 0x1000, db->chip_id);
/* AUTO mode, if INT phyxcer link failed, select EXT device */
if (db->media_mode & DMFE_AUTO) {
db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
update_cr6(db->cr6_data, db->ioaddr);
}
- } else
- if ((tmp_cr12 & 0x3) && db->link_failed) {
- DMFE_DBUG(0, "Link link OK", tmp_cr12);
- db->link_failed = 0;
-
- /* Auto Sense Speed */
- if ( (db->media_mode & DMFE_AUTO) &&
- dmfe_sense_speed(db) )
- db->link_failed = 1;
- else
- netif_carrier_on(db->dev);
- dmfe_process_mode(db);
- /* SHOW_MEDIA_TYPE(db->op_mode); */
+ } else if (!netif_carrier_ok(dev)) {
+
+ DMFE_DBUG(0, "Link link OK", tmp_cr12);
+
+ /* Auto Sense Speed */
+ if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
+ netif_carrier_on(dev);
+ SHOW_MEDIA_TYPE(db->op_mode);
}
+ dmfe_process_mode(db);
+ }
+
/* HPNA remote command check */
if (db->HPNA_command & 0xf00) {
db->HPNA_timer--;
db->tx_packet_cnt = 0;
db->tx_queue_cnt = 0;
db->rx_avail_cnt = 0;
- db->link_failed = 1;
+ netif_carrier_off(dev);
db->wait_reset = 0;
/* Re-initilize DM910X board */
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
rxptr->rx_skb_ptr = skb;
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
+ skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
db->rx_avail_cnt++;
outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
/* rx descriptor start pointer */
- db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
- db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
+ db->first_rx_desc = (void *)db->first_tx_desc +
+ sizeof(struct tx_desc) * TX_DESC_CNT;
+
+ db->first_rx_desc_dma = db->first_tx_desc_dma +
+ sizeof(struct tx_desc) * TX_DESC_CNT;
db->rx_insert_ptr = db->first_rx_desc;
db->rx_ready_ptr = db->first_rx_desc;
outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
+ RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
rxptr = rxptr->next_rx_desc;
for (i = 16; i > 0; i--) {
outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
udelay(5);
- srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
+ srom_data = (srom_data << 1) |
+ ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
udelay(5);
}
if ( (phy_mode & 0x24) == 0x24 ) {
if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
- phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000;
+ phy_mode = phy_read(db->ioaddr,
+ db->phy_addr, 7, db->chip_id) & 0xf000;
else /* DM9102/DM9102A */
- phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000;
+ phy_mode = phy_read(db->ioaddr,
+ db->phy_addr, 17, db->chip_id) & 0xf000;
/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
/* DM9009 Chip: Phyxcer reg18 bit12=0 */
if (db->chip_id == PCI_DM9009_ID) {
- phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000;
- phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id);
+ phy_reg = phy_read(db->ioaddr,
+ db->phy_addr, 18, db->chip_id) & ~0x1000;
+
+ phy_write(db->ioaddr,
+ db->phy_addr, 18, phy_reg, db->chip_id);
}
/* Phyxcer capability setting */
case DMFE_100MHF: phy_reg = 0x2000; break;
case DMFE_100MFD: phy_reg = 0x2100; break;
}
- phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, phy_reg, db->chip_id);
if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
mdelay(20);
- phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, phy_reg, db->chip_id);
}
}
}
* Write a word to Phy register
*/
-static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
+ u16 phy_data, u32 chip_id)
{
u16 i;
unsigned long ioaddr;
/* Send Phy address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
/* Send register address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ offset & i ? PHY_DATA_1 : PHY_DATA_0);
/* written trasnition */
phy_write_1bit(ioaddr, PHY_DATA_1);
/* Write a word data to PHY controller */
for ( i = 0x8000; i > 0; i >>= 1)
- phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
}
}
/* Send Phy address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
/* Send register address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ offset & i ? PHY_DATA_1 : PHY_DATA_0);
/* Skip transition state */
phy_read_1bit(ioaddr);
/* Check remote device status match our setting ot not */
if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
- phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
+ db->chip_id);
db->HPNA_timer=8;
} else
db->HPNA_timer=600; /* Match, every 10 minutes, check */
module_param(HPNA_NoiseFloor, byte, 0);
module_param(SF_mode, byte, 0);
MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
-MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
-MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
+MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
+ "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+
+MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
+ "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
/* Description:
* when user used insmod to add module, system invoked init_module()
/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
- || defined(__sparc_) || defined(__ia64__) \
+ || defined(__sparc__) || defined(__ia64__) \
|| defined(__sh__) || defined(__mips__)
static int rx_copybreak = 1518;
#else
{
struct typhoon *tp = netdev_priv(dev);
spin_lock_bh(&tp->state_lock);
- if(tp->vlgrp)
- tp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(tp->vlgrp, vid, NULL);
spin_unlock_bh(&tp->state_lock);
}
/* Move to next BD in the ring */
if (!(bd_status & T_W))
- ugeth->txBd[txQ] = bd + sizeof(struct qe_bd);
+ bd += sizeof(struct qe_bd);
else
- ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
+ bd = ugeth->p_tx_bd_ring[txQ];
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
if (bd == ugeth->confBd[txQ]) {
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
}
+ ugeth->txBd[txQ] = bd;
+
if (ugeth->p_scheduler) {
ugeth->cpucount[txQ]++;
/* Indicate to QE that there are more Tx bds ready for
spin_unlock_irq(&ugeth->lock);
- return 0;
+ return NETDEV_TX_OK;
}
static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
- if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
break;
ugeth->stats.tx_packets++;
/* Advance the confirmation BD pointer */
if (!(bd_status & T_W))
- ugeth->confBd[txQ] += sizeof(struct qe_bd);
+ bd += sizeof(struct qe_bd);
else
- ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
+ bd = ugeth->p_tx_bd_ring[txQ];
+ bd_status = in_be32((u32 *)bd);
}
+ ugeth->confBd[txQ] = bd;
return 0;
}
ugeth->ug_info = ug_info;
ugeth->dev = dev;
- mac_addr = get_property(np, "mac-address", NULL);
- if (mac_addr == NULL)
- mac_addr = get_property(np, "local-mac-address", NULL);
+ mac_addr = of_get_mac_address(np);
if (mac_addr)
memcpy(dev->dev_addr, mac_addr, 6);
#include <linux/device.h>
#undef COSA_SLOW_IO /* for testing purposes only */
-#undef REALLY_SLOW_IO
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/hdlc.h>
-static const char* version = "HDLC support module revision 1.20";
+static const char* version = "HDLC support module revision 1.21";
#undef DEBUG_LINK
return -EINVAL;
}
+static void hdlc_setup_dev(struct net_device *dev)
+{
+ /* Re-init all variables changed by HDLC protocol drivers,
+ * including ether_setup() called from hdlc_raw_eth.c.
+ */
+ dev->get_stats = hdlc_get_stats;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = HDLC_MAX_MTU;
+ dev->type = ARPHRD_RAWHDLC;
+ dev->hard_header_len = 16;
+ dev->addr_len = 0;
+ dev->hard_header = NULL;
+ dev->rebuild_header = NULL;
+ dev->set_mac_address = NULL;
+ dev->hard_header_cache = NULL;
+ dev->header_cache_update = NULL;
+ dev->change_mtu = hdlc_change_mtu;
+ dev->hard_header_parse = NULL;
+}
+
static void hdlc_setup(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
- dev->get_stats = hdlc_get_stats;
- dev->change_mtu = hdlc_change_mtu;
- dev->mtu = HDLC_MAX_MTU;
-
- dev->type = ARPHRD_RAWHDLC;
- dev->hard_header_len = 16;
-
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
-
+ hdlc_setup_dev(dev);
hdlc->carrier = 1;
hdlc->open = 0;
spin_lock_init(&hdlc->state_lock);
}
kfree(hdlc->state);
hdlc->state = NULL;
+ hdlc_setup_dev(dev);
}
memcpy(&state(hdlc)->settings, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = cisco_hard_header;
- dev->hard_header_cache = NULL;
dev->type = ARPHRD_CISCO;
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->addr_len = 0;
netif_dormant_on(dev);
return 0;
}
memcpy(&state(hdlc)->settings, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit;
- dev->hard_header = NULL;
dev->type = ARPHRD_FRAD;
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->addr_len = 0;
return 0;
case IF_PROTO_FR_ADD_PVC:
if (result)
return result;
dev->hard_start_xmit = hdlc->xmit;
- dev->hard_header = NULL;
dev->type = ARPHRD_PPP;
- dev->addr_len = 0;
netif_dormant_off(dev);
return 0;
}
return result;
memcpy(hdlc->state, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit;
- dev->hard_header = NULL;
dev->type = ARPHRD_RAWHDLC;
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->addr_len = 0;
netif_dormant_off(dev);
return 0;
}
x25_rx, 0)) != 0)
return result;
dev->hard_start_xmit = x25_xmit;
- dev->hard_header = NULL;
dev->type = ARPHRD_X25;
- dev->addr_len = 0;
netif_dormant_off(dev);
return 0;
}
* dangling pins on the second core. Be careful
* and ignore these cores here.
*/
- if (bcm->pci_dev->device != 0x4324) {
- dprintk(KERN_INFO PFX "Ignoring additional 802.11 core.\n");
+ if (1 /*bcm->pci_dev->device != 0x4324*/ ) {
+ /* TODO: A PHY */
+ dprintk(KERN_INFO PFX "Ignoring additional 802.11a core.\n");
continue;
}
}
* Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode
* with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60)
*/
-#undef REALLY_SLOW_IO /* most systems can safely undef this */
#include <linux/delay.h>
#include <linux/types.h>
return 0;
}
+static void msi_set_enable(struct pci_dev *dev, int enable)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ }
+}
+
+static void msix_set_enable(struct pci_dev *dev, int enable)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ }
+}
+
static void msi_set_mask_bit(unsigned int irq, int flag)
{
struct msi_desc *entry;
mask_bits &= ~(1);
mask_bits |= flag;
pci_write_config_dword(entry->dev, pos, mask_bits);
+ } else {
+ msi_set_enable(entry->dev, !flag);
}
break;
case PCI_CAP_ID_MSIX:
return entry;
}
-static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
-{
- u16 control;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (type == PCI_CAP_ID_MSI) {
- /* Set enabled bits to single MSI & enable MSI_enable bit */
- msi_enable(control, 1);
- pci_write_config_word(dev, msi_control_reg(pos), control);
- dev->msi_enabled = 1;
- } else {
- msix_enable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
- dev->msix_enabled = 1;
- }
-
- pci_intx(dev, 0); /* disable intx */
-}
-
-void disable_msi_mode(struct pci_dev *dev, int pos, int type)
-{
- u16 control;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (type == PCI_CAP_ID_MSI) {
- /* Set enabled bits to single MSI & enable MSI_enable bit */
- msi_disable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
- dev->msi_enabled = 0;
- } else {
- msix_disable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
- dev->msix_enabled = 0;
- }
-
- pci_intx(dev, 1); /* enable intx */
-}
-
#ifdef CONFIG_PM
static int __pci_save_msi_state(struct pci_dev *dev)
{
struct pci_cap_saved_state *save_state;
u32 *cap;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (pos <= 0 || dev->no_msi)
+ if (!dev->msi_enabled)
return 0;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE))
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos <= 0)
return 0;
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
struct pci_cap_saved_state *save_state;
u32 *cap;
+ if (!dev->msi_enabled)
+ return;
+
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (!save_state || pos <= 0)
return;
cap = &save_state->data[0];
+ pci_intx(dev, 0); /* disable intx */
control = cap[i++] >> 16;
+ msi_set_enable(dev, 0);
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
if (control & PCI_MSI_FLAGS_64BIT) {
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
if (control & PCI_MSI_FLAGS_MASKBIT)
pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
pci_remove_saved_cap(save_state);
kfree(save_state);
}
return 0;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos <= 0 || dev->no_msi)
+ if (pos <= 0)
return 0;
/* save the capability */
pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSIX_FLAGS_ENABLE))
- return 0;
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
GFP_KERNEL);
if (!save_state) {
return;
/* route the table */
+ pci_intx(dev, 0); /* disable intx */
+ msix_set_enable(dev, 0);
irq = head = dev->first_msi_irq;
while (head != tail) {
entry = get_irq_msi(irq);
}
pci_write_config_word(dev, msi_control_reg(pos), save);
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
}
void pci_restore_msi_state(struct pci_dev *dev)
int pos, irq;
u16 control;
+ msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
+
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
pci_read_config_word(dev, msi_control_reg(pos), &control);
/* MSI Entry Initialization */
set_irq_msi(irq, entry);
/* Set MSI enabled bits */
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+ pci_intx(dev, 0); /* disable intx */
+ msi_set_enable(dev, 1);
+ dev->msi_enabled = 1;
dev->irq = irq;
return 0;
u8 bir;
void __iomem *base;
+ msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
+
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
/* Request & Map MSI-X table region */
pci_read_config_word(dev, msi_control_reg(pos), &control);
}
dev->first_msi_irq = entries[0].vector;
/* Set MSI-X enabled bits */
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+ pci_intx(dev, 0); /* disable intx */
+ msix_set_enable(dev, 1);
+ dev->msix_enabled = 1;
return 0;
}
WARN_ON(!!dev->msi_enabled);
/* Check whether driver already requested for MSI-X irqs */
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos > 0 && dev->msix_enabled) {
- printk(KERN_INFO "PCI: %s: Can't enable MSI. "
- "Device already has MSI-X enabled\n",
- pci_name(dev));
- return -EINVAL;
+ if (dev->msix_enabled) {
+ printk(KERN_INFO "PCI: %s: Can't enable MSI. "
+ "Device already has MSI-X enabled\n",
+ pci_name(dev));
+ return -EINVAL;
}
status = msi_capability_init(dev);
return status;
void pci_disable_msi(struct pci_dev* dev)
{
struct msi_desc *entry;
- int pos, default_irq;
- u16 control;
+ int default_irq;
if (!pci_msi_enable)
return;
if (!dev->msi_enabled)
return;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!pos)
- return;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE))
- return;
-
-
- disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+ msi_set_enable(dev, 0);
+ pci_intx(dev, 1); /* enable intx */
+ dev->msi_enabled = 0;
entry = get_irq_msi(dev->first_msi_irq);
if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
- if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
- dev->msi_enabled) {
+ if (dev->msi_enabled) {
printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
"Device already has an MSI irq assigned\n",
pci_name(dev));
void pci_disable_msix(struct pci_dev* dev)
{
int irq, head, tail = 0, warning = 0;
- int pos;
- u16 control;
if (!pci_msi_enable)
return;
if (!dev->msix_enabled)
return;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (!pos)
- return;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSIX_FLAGS_ENABLE))
- return;
-
- disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+ msix_set_enable(dev, 0);
+ pci_intx(dev, 1); /* enable intx */
+ dev->msix_enabled = 0;
irq = head = dev->first_msi_irq;
while (head != tail) {
if (atomic_sub_return(1, &dev->enable_cnt) != 0)
return;
- if (dev->msi_enabled)
- disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
- PCI_CAP_ID_MSI);
- if (dev->msix_enabled)
- disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
- PCI_CAP_ID_MSIX);
-
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
if (pci_command & PCI_COMMAND_MASTER) {
pci_command &= ~PCI_COMMAND_MASTER;
}
}
+/**
+ * pci_msi_off - disables any msi or msix capabilities
+ * @pdev: the PCI device to operate on
+ *
+ * If you want to use msi see pci_enable_msi and friends.
+ * This is a lower level primitive that allows us to disable
+ * msi operation at the device level.
+ */
+void pci_msi_off(struct pci_dev *dev)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ }
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ }
+}
+
#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
/*
* These can be overridden by arch-specific implementations
extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
-void disable_msi_mode(struct pci_dev *dev, int pos, int type);
void pci_no_msi(void);
#else
-static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
static inline void pci_no_msi(void) { }
#endif
dev->irq = irq;
}
-#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
+static void change_legacy_io_resource(struct pci_dev * dev, unsigned index,
+ unsigned start, unsigned end)
+{
+ unsigned base = start & PCI_BASE_ADDRESS_IO_MASK;
+ unsigned len = (end | ~PCI_BASE_ADDRESS_IO_MASK) - base + 1;
+
+ /*
+ * Some X versions get confused when the BARs reported through
+ * /sys or /proc differ from those seen in config space, thus
+ * try to update the config space values, too.
+ */
+ if (!(pci_resource_flags(dev, index) & IORESOURCE_IO))
+ printk(KERN_WARNING "%s: cannot adjust BAR%u (not I/O)\n",
+ pci_name(dev), index);
+ else if (pci_resource_len(dev, index) != len)
+ printk(KERN_WARNING "%s: cannot adjust BAR%u (size %04X)\n",
+ pci_name(dev), index, (unsigned)pci_resource_len(dev, index));
+ else {
+ printk(KERN_INFO "%s: trying to change BAR%u from %04X to %04X\n",
+ pci_name(dev), index,
+ (unsigned)pci_resource_start(dev, index), base);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + index * 4, base);
+ }
+ pci_resource_start(dev, index) = start;
+ pci_resource_end(dev, index) = end;
+ pci_resource_flags(dev, index) =
+ IORESOURCE_IO | IORESOURCE_PCI_FIXED | PCI_BASE_ADDRESS_SPACE_IO;
+}
/**
* pci_setup_device - fill in class and map information of a device
u8 progif;
pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
if ((progif & 1) == 0) {
- dev->resource[0].start = 0x1F0;
- dev->resource[0].end = 0x1F7;
- dev->resource[0].flags = LEGACY_IO_RESOURCE;
- dev->resource[1].start = 0x3F6;
- dev->resource[1].end = 0x3F6;
- dev->resource[1].flags = LEGACY_IO_RESOURCE;
+ change_legacy_io_resource(dev, 0, 0x1F0, 0x1F7);
+ change_legacy_io_resource(dev, 1, 0x3F6, 0x3F6);
}
if ((progif & 4) == 0) {
- dev->resource[2].start = 0x170;
- dev->resource[2].end = 0x177;
- dev->resource[2].flags = LEGACY_IO_RESOURCE;
- dev->resource[3].start = 0x376;
- dev->resource[3].end = 0x376;
- dev->resource[3].flags = LEGACY_IO_RESOURCE;
+ change_legacy_io_resource(dev, 2, 0x170, 0x177);
+ change_legacy_io_resource(dev, 3, 0x376, 0x376);
}
}
break;
* do this early on to make the additional device appear during
* the PCI scanning.
*/
-
-static void quirk_jmicron_dualfn(struct pci_dev *pdev)
+static void quirk_jmicron_ata(struct pci_dev *pdev)
{
- u32 conf;
+ u32 conf1, conf5, class;
u8 hdr;
/* Only poke fn 0 */
if (PCI_FUNC(pdev->devfn))
return;
- switch(pdev->device) {
- case PCI_DEVICE_ID_JMICRON_JMB365:
- case PCI_DEVICE_ID_JMICRON_JMB366:
- /* Redirect IDE second PATA port to the right spot */
- pci_read_config_dword(pdev, 0x80, &conf);
- conf |= (1 << 24);
- /* Fall through */
- pci_write_config_dword(pdev, 0x80, conf);
- case PCI_DEVICE_ID_JMICRON_JMB361:
- case PCI_DEVICE_ID_JMICRON_JMB363:
- pci_read_config_dword(pdev, 0x40, &conf);
- /* Enable dual function mode, AHCI on fn 0, IDE fn1 */
- /* Set the class codes correctly and then direct IDE 0 */
- conf &= ~0x000FF200; /* Clear bit 9 and 12-19 */
- conf |= 0x00C2A102; /* Set 1, 8, 13, 15, 17, 22, 23 */
- pci_write_config_dword(pdev, 0x40, conf);
-
- /* Reconfigure so that the PCI scanner discovers the
- device is now multifunction */
-
- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
- pdev->hdr_type = hdr & 0x7f;
- pdev->multifunction = !!(hdr & 0x80);
+ pci_read_config_dword(pdev, 0x40, &conf1);
+ pci_read_config_dword(pdev, 0x80, &conf5);
- break;
+ conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */
+ conf5 &= ~(1 << 24); /* Clear bit 24 */
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMB360:
+ /* The controller should be in single function ahci mode */
+ conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
+ break;
+
+ case PCI_DEVICE_ID_JMICRON_JMB365:
+ case PCI_DEVICE_ID_JMICRON_JMB366:
+ /* Redirect IDE second PATA port to the right spot */
+ conf5 |= (1 << 24);
+ /* Fall through */
+ case PCI_DEVICE_ID_JMICRON_JMB361:
+ case PCI_DEVICE_ID_JMICRON_JMB363:
+ /* Enable dual function mode, AHCI on fn 0, IDE fn1 */
+ /* Set the class codes correctly and then direct IDE 0 */
+ conf1 |= 0x00C2A102; /* Set 1, 8, 13, 15, 17, 22, 23 */
+ break;
+
+ case PCI_DEVICE_ID_JMICRON_JMB368:
+ /* The controller should be in single function IDE mode */
+ conf1 |= 0x00C00000; /* Set 22, 23 */
+ break;
}
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn);
+
+ pci_write_config_dword(pdev, 0x40, conf1);
+ pci_write_config_dword(pdev, 0x80, conf5);
+
+ /* Update pdev accordingly */
+ pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
+ pdev->hdr_type = hdr & 0x7f;
+ pdev->multifunction = !!(hdr & 0x80);
+
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
+ pdev->class = class >> 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
#endif
*/
static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
{
- disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
- PCI_CAP_ID_MSI);
+ pci_msi_off(dev);
+
dev->no_msi = 1;
printk(KERN_WARNING "PCI: PXH quirk detected, "
config RTC_DRV_PCF8583
tristate "Philips PCF8583"
- depends on RTC_CLASS && I2C
+ depends on RTC_CLASS && I2C && ARCH_RPC
help
- If you say yes here you get support for the
- Philips PCF8583 RTC chip.
+ If you say yes here you get support for the Philips PCF8583
+ RTC chip found on Acorn RiscPCs. This driver supports the
+ platform specific method of retrieving the current year from
+ the RTC's SRAM.
This driver can also be built as a module. If so, the module
will be called rtc-pcf8583.
*/
void rtc_device_unregister(struct rtc_device *rtc)
{
- mutex_lock(&rtc->ops_lock);
- rtc->ops = NULL;
- mutex_unlock(&rtc->ops_lock);
- class_device_unregister(&rtc->class_dev);
+ if (class_device_get(&rtc->class_dev) != NULL) {
+ mutex_lock(&rtc->ops_lock);
+ /* remove innards of this RTC, then disable it, before
+ * letting any rtc_class_open() users access it again
+ */
+ class_device_unregister(&rtc->class_dev);
+ rtc->ops = NULL;
+ mutex_unlock(&rtc->ops_lock);
+ class_device_put(&rtc->class_dev);
+ }
}
EXPORT_SYMBOL_GPL(rtc_device_unregister);
down(&rtc_class->sem);
list_for_each_entry(class_dev_tmp, &rtc_class->children, node) {
if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) {
- class_dev = class_dev_tmp;
+ class_dev = class_device_get(class_dev_tmp);
break;
}
}
void rtc_class_close(struct class_device *class_dev)
{
module_put(to_rtc_device(class_dev)->owner);
+ class_device_put(class_dev);
}
EXPORT_SYMBOL_GPL(rtc_class_close);
#define CTRL_ALARM 0x02
#define CTRL_TIMER 0x01
-static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+static unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END };
/* Module parameters */
I2C_CLIENT_INSMOD;
buf[4] &= 0x3f;
buf[5] &= 0x1f;
- dt->tm_sec = BCD_TO_BIN(buf[1]);
- dt->tm_min = BCD_TO_BIN(buf[2]);
- dt->tm_hour = BCD_TO_BIN(buf[3]);
- dt->tm_mday = BCD_TO_BIN(buf[4]);
- dt->tm_mon = BCD_TO_BIN(buf[5]);
+ dt->tm_sec = BCD2BIN(buf[1]);
+ dt->tm_min = BCD2BIN(buf[2]);
+ dt->tm_hour = BCD2BIN(buf[3]);
+ dt->tm_mday = BCD2BIN(buf[4]);
+ dt->tm_mon = BCD2BIN(buf[5]) - 1;
}
return ret == 2 ? 0 : -EIO;
buf[0] = 0;
buf[1] = get_ctrl(client) | 0x80;
buf[2] = 0;
- buf[3] = BIN_TO_BCD(dt->tm_sec);
- buf[4] = BIN_TO_BCD(dt->tm_min);
- buf[5] = BIN_TO_BCD(dt->tm_hour);
+ buf[3] = BIN2BCD(dt->tm_sec);
+ buf[4] = BIN2BCD(dt->tm_min);
+ buf[5] = BIN2BCD(dt->tm_hour);
if (datetoo) {
len = 8;
- buf[6] = BIN_TO_BCD(dt->tm_mday) | (dt->tm_year << 6);
- buf[7] = BIN_TO_BCD(dt->tm_mon) | (dt->tm_wday << 5);
+ buf[6] = BIN2BCD(dt->tm_mday) | (dt->tm_year << 6);
+ buf[7] = BIN2BCD(dt->tm_mon + 1) | (dt->tm_wday << 5);
}
ret = i2c_master_send(client, (char *)buf, len);
*/
year_offset += 4;
- tm->tm_year = real_year + year_offset + year[1] * 100;
+ tm->tm_year = (real_year + year_offset + year[1] * 100) - 1900;
return 0;
}
unsigned char year[2], chk;
struct rtc_mem cmos_year = { CMOS_YEAR, sizeof(year), year };
struct rtc_mem cmos_check = { CMOS_CHECKSUM, 1, &chk };
+ unsigned int proper_year = tm->tm_year + 1900;
int ret;
/*
chk -= year[1] + year[0];
- year[1] = tm->tm_year / 100;
- year[0] = tm->tm_year % 100;
+ year[1] = proper_year / 100;
+ year[0] = proper_year % 100;
chk += year[1] + year[0];
cqr->device = device;
cqr->retries = 255;
cqr->expires = 10 * HZ;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS;
cqr->cpaddr->count = SNSS_DATA_SIZE;
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
- *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08;
+ if (mt_count == 0)
+ *device->modeset_byte &= ~0x08;
+ else
+ *device->modeset_byte |= 0x08;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
ret = stsch(sch->schid, &sch->schib);
if (ret || !sch->schib.pmcw.dnv)
return -ENODEV;
- if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
- /* Not operational or no activity -> done. */
+ if (!sch->schib.pmcw.ena)
+ /* Not operational -> done. */
return 0;
/* Stage 1: cancel io. */
if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
+ unsigned long flags;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
+ spin_lock_irqsave(cdev->ccwlock, flags);
sch = to_subchannel(cdev->dev.parent);
- ret = (sch->driver && sch->driver->notify) ?
- sch->driver->notify(&sch->dev, CIO_OPER) : 0;
- if (!ret)
- /* Driver doesn't want device back. */
- ccw_device_do_unreg_rereg(work);
- else {
+ if (sch->driver && sch->driver->notify) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ ret = sch->driver->notify(&sch->dev, CIO_OPER);
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ } else
+ ret = 0;
+ if (ret) {
/* Reenable channel measurements, if needed. */
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
cmf_reenable(cdev);
+ spin_lock_irqsave(cdev->ccwlock, flags);
wake_up(&cdev->private->wait_q);
}
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ if (!ret)
+ /* Driver doesn't want device back. */
+ ccw_device_do_unreg_rereg(work);
}
/*
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
+ unsigned long flags;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
+ spin_lock_irqsave(cdev->ccwlock, flags);
sch = to_subchannel(cdev->dev.parent);
/* Extra sanity. */
if (sch->lpm)
- return;
- ret = (sch->driver && sch->driver->notify) ?
- sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
+ goto out_unlock;
+ if (sch->driver && sch->driver->notify) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ } else
+ ret = 0;
if (!ret) {
if (get_device(&sch->dev)) {
/* Driver doesn't want to keep device. */
cdev->private->state = DEV_STATE_DISCONNECTED;
wake_up(&cdev->private->wait_q);
}
+out_unlock:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
}
void
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work, &cdev->private->kick_work);
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ if (cdev->online) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify);
+ queue_work(ccw_device_notify_work,
+ &cdev->private->kick_work);
+ } else
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
}
ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
+ int ret;
sch = to_subchannel(cdev->dev.parent);
- if (sch->driver->notify &&
- sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
- ccw_device_set_timeout(cdev, 0);
- cdev->private->flags.fake_irb = 0;
- cdev->private->state = DEV_STATE_DISCONNECTED;
- wake_up(&cdev->private->wait_q);
- return;
+ if (sch->driver->notify) {
+ spin_unlock_irq(cdev->ccwlock);
+ ret = sch->driver->notify(&sch->dev,
+ sch->lpm ? CIO_GONE : CIO_NO_PATH);
+ spin_lock_irq(cdev->ccwlock);
+ } else
+ ret = 0;
+ if (ret) {
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->flags.fake_irb = 0;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ wake_up(&cdev->private->wait_q);
+ return;
}
cdev->private->state = DEV_STATE_NOT_OPER;
cio_disable_subchannel(sch);
sch = to_subchannel(cdev->dev.parent);
ccw_device_set_timeout(cdev, 0);
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
/* OK, i/o is dead now. Call interrupt handler. */
- cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work, &cdev->private->kick_work);
- } else if (cdev->private->flags.doverify)
- /* Start delayed path verification. */
- ccw_device_online_verify(cdev, 0);
}
static void
ccw_device_set_timeout(cdev, 3*HZ);
return;
}
- if (ret == -ENODEV) {
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work,
- &cdev->private->kick_work);
- } else
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- return;
- }
- //FIXME: Can we get here?
- cdev->private->state = DEV_STATE_ONLINE;
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
- if (ret == -ENODEV) {
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work,
- &cdev->private->kick_work);
- } else
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- return;
- }
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work, &cdev->private->kick_work);
- } else
- /* Start delayed path verification. */
- ccw_device_online_verify(cdev, 0);
}
static void
return rc;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
- if (vg->vlan_devices[i] == dev){
+ if (vlan_group_get_device(vg, i) == dev){
rc = QETH_VLAN_CARD;
break;
}
QETH_DBF_TEXT(trace, 4, "frvaddr4");
rcu_read_lock();
- in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
+ in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid));
if (!in_dev)
goto out;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
QETH_DBF_TEXT(trace, 4, "frvaddr6");
- in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
+ in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
return;
for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
if (!card->vlangrp)
return;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- if (card->vlangrp->vlan_devices[i] == NULL)
+ if (vlan_group_get_device(card->vlangrp, i) == NULL)
continue;
if (clear)
qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
qeth_free_vlan_addresses(card, vid);
- if (card->vlangrp)
- card->vlangrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(card->vlangrp, vid, NULL);
spin_unlock_irqrestore(&card->vlanlock, flags);
if (card->options.layer2)
qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
vg = card->vlangrp;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- if (vg->vlan_devices[i] == NULL ||
- !(vg->vlan_devices[i]->flags & IFF_UP))
+ struct net_device *netdev = vlan_group_get_device(vg, i);
+ if (netdev == NULL ||
+ !(netdev->flags & IFF_UP))
continue;
- in_dev = in_dev_get(vg->vlan_devices[i]);
+ in_dev = in_dev_get(netdev);
if (!in_dev)
continue;
read_lock(&in_dev->mc_list_lock);
vg = card->vlangrp;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- if (vg->vlan_devices[i] == NULL ||
- !(vg->vlan_devices[i]->flags & IFF_UP))
+ struct net_device *netdev = vlan_group_get_device(vg, i);
+ if (netdev == NULL ||
+ !(netdev->flags & IFF_UP))
continue;
- in_dev = in6_dev_get(vg->vlan_devices[i]);
+ in_dev = in6_dev_get(netdev);
if (!in_dev)
continue;
read_lock(&in_dev->lock);
dma_dir = DMA_MODE_READ,
alatch_dir = ALATCH_DMA_IN;
- dma_map_sg(dev, info->sg, bufs + 1, map_dir);
+ dma_map_sg(dev, info->sg, bufs, map_dir);
disable_dma(dmach);
- set_dma_sg(dmach, info->sg, bufs + 1);
+ set_dma_sg(dmach, info->sg, bufs);
writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH);
set_dma_mode(dmach, dma_dir);
enable_dma(dmach);
map_dir = DMA_FROM_DEVICE,
dma_dir = DMA_MODE_READ;
- dma_map_sg(dev, info->sg, bufs + 1, map_dir);
+ dma_map_sg(dev, info->sg, bufs, map_dir);
disable_dma(dmach);
- set_dma_sg(dmach, info->sg, bufs + 1);
+ set_dma_sg(dmach, info->sg, bufs);
set_dma_mode(dmach, dma_dir);
enable_dma(dmach);
return fasdma_real_all;
BUG_ON(bytes_transferred < 0);
- info->SCpnt->request_bufflen -= bytes_transferred;
+ SCp->phase -= bytes_transferred;
while (bytes_transferred != 0) {
if (SCp->this_residual > bytes_transferred)
return;
if (dmatype == fasdma_real_all)
- total = info->SCpnt->request_bufflen;
+ total = info->scsi.SCp.phase;
else
total = info->scsi.SCp.this_residual;
fas216_log(info, LOG_BUFFER,
"starttransfer: buffer %p length 0x%06x reqlen 0x%06x",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual,
- info->SCpnt->request_bufflen);
+ info->scsi.SCp.phase);
if (!info->scsi.SCp.ptr) {
fas216_log(info, LOG_ERROR, "null buffer passed to "
info->dma.transfer_type = dmatype;
if (dmatype == fasdma_real_all)
- fas216_set_stc(info, info->SCpnt->request_bufflen);
+ fas216_set_stc(info, info->scsi.SCp.phase);
else
fas216_set_stc(info, info->scsi.SCp.this_residual);
SCpnt->SCp.buffers_residual = 0;
SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer;
SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer);
+ SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer);
SCpnt->SCp.Message = 0;
SCpnt->SCp.Status = 0;
SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
map_dir = DMA_FROM_DEVICE,
dma_dir = DMA_MODE_READ;
- dma_map_sg(dev, info->sg, bufs + 1, map_dir);
+ dma_map_sg(dev, info->sg, bufs, map_dir);
disable_dma(dmach);
- set_dma_sg(dmach, info->sg, bufs + 1);
+ set_dma_sg(dmach, info->sg, bufs);
set_dma_mode(dmach, dma_dir);
enable_dma(dmach);
return fasdma_real_all;
info->base = base;
powertecscsi_terminator_ctl(host, term[ec->slot_no]);
+ info->ec = ec;
info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET;
info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT;
info->info.scsi.irq = ec->irq;
(page_address(SCpnt->SCp.buffer->page) +
SCpnt->SCp.buffer->offset);
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ SCpnt->SCp.phase = SCpnt->request_bufflen;
#ifdef BELT_AND_BRACES
/*
} else {
SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer;
SCpnt->SCp.this_residual = SCpnt->request_bufflen;
+ SCpnt->SCp.phase = SCpnt->request_bufflen;
}
/*
* This routine deals with inputs from any lines.
* ------------------------------------------------------------
*/
-static inline void dz_receive_chars(struct dz_port *dport_in,
- struct pt_regs *regs)
+static inline void dz_receive_chars(struct dz_port *dport_in)
{
struct dz_port *dport;
struct tty_struct *tty = NULL;
break;
}
- if (uart_handle_sysrq_char(&dport->port, ch, regs))
+ if (uart_handle_sysrq_char(&dport->port, ch))
continue;
if ((status & dport->port.ignore_status_mask) == 0) {
status = dz_in(dport, DZ_CSR);
if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
- dz_receive_chars(dport, regs);
+ dz_receive_chars(dport);
if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
dz_transmit_chars(dport);
* -------------------------------------------------------------------
*/
-static void mcfrs_offintr(void *private)
+static void mcfrs_offintr(struct work_struct *work)
{
- struct mcf_serial *info = (struct mcf_serial *) private;
- struct tty_struct *tty;
+ struct mcf_serial *info = container_of(work, struct mcf_serial, tqueue);
+ struct tty_struct *tty = info->tty;
- tty = info->tty;
- if (!tty)
- return;
- tty_wakeup(tty);
+ if (tty)
+ tty_wakeup(tty);
}
* do_serial_hangup() -> tty->hangup() -> mcfrs_hangup()
*
*/
-static void do_serial_hangup(void *private)
+static void do_serial_hangup(struct work_struct *work)
{
- struct mcf_serial *info = (struct mcf_serial *) private;
- struct tty_struct *tty;
+ struct mcf_serial *info = container_of(work, struct mcf_serial, tqueue_hangup);
+ struct tty_struct *tty = info->tty;
- tty = info->tty;
- if (!tty)
- return;
-
- tty_hangup(tty);
+ if (tty)
+ tty_hangup(tty);
}
static int startup(struct mcf_serial * info)
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
- printk("throttle %s: %d....\n", _tty_name(tty, buf),
+ printk("throttle %s: %d....\n", tty_name(tty, buf),
tty->ldisc.chars_in_buffer(tty));
#endif
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
- printk("unthrottle %s: %d....\n", _tty_name(tty, buf),
+ printk("unthrottle %s: %d....\n", tty_name(tty, buf),
tty->ldisc.chars_in_buffer(tty));
#endif
* External Pin Mask Setting & Enable External Pin for Interface
* mrcbis@aliceposta.it
*/
- unsigned short *serpin_enable_mask;
- serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART);
+ u16 *serpin_enable_mask;
+ serpin_enable_mask = (u16 *) (MCF_IPSBAR + MCF_GPIO_PAR_UART);
if (info->line == 0)
*serpin_enable_mask |= UART0_ENABLE_MASK;
else if (info->line == 1)
*serpin_enable_mask |= UART2_ENABLE_MASK;
}
#endif
+#if defined(CONFIG_M528x)
+ /* make sure PUAPAR is set for UART0 and UART1 */
+ if (info->line < 2) {
+ volatile unsigned char *portp = (volatile unsigned char *) (MCF_MBAR + MCF5282_GPIO_PUAPAR);
+ *portp |= (0x03 << (info->line * 2));
+ }
+#endif
#elif defined(CONFIG_M520x)
volatile unsigned char *icrp, *uartp;
volatile unsigned long *imrp;
info->event = 0;
info->count = 0;
info->blocked_open = 0;
- INIT_WORK(&info->tqueue, mcfrs_offintr, info);
- INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+ INIT_WORK(&info->tqueue, mcfrs_offintr);
+ INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
return IRQ_HANDLED;
}
-/**
- * sn_sal_connect_interrupt - Request interrupt, handled by sn_sal_interrupt
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * returns the console irq if interrupt is successfully registered, else 0
- *
- */
-static int sn_sal_connect_interrupt(struct sn_cons_port *port)
-{
- if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
- IRQF_DISABLED | IRQF_SHARED,
- "SAL console driver", port) >= 0) {
- return SGI_UART_VECTOR;
- }
-
- printk(KERN_INFO "sn_console: console proceeding in polled mode\n");
- return 0;
-}
-
/**
* sn_sal_timer_poll - this function handles polled console mode
* @data: A pointer to our sn_cons_port (which contains the uart port)
* mode. We were previously in asynch/polling mode (using init_timer).
*
* We attempt to switch to interrupt mode here by calling
- * sn_sal_connect_interrupt. If that works out, we enable receive interrupts.
+ * request_irq. If that works out, we enable receive interrupts.
*/
static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
{
- int irq;
unsigned long flags;
- if (!port)
- return;
-
- DPRINTF("sn_console: switching to interrupt driven console\n");
-
- spin_lock_irqsave(&port->sc_port.lock, flags);
+ if (port) {
+ DPRINTF("sn_console: switching to interrupt driven console\n");
- irq = sn_sal_connect_interrupt(port);
+ if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
+ IRQF_DISABLED | IRQF_SHARED,
+ "SAL console driver", port) >= 0) {
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+ port->sc_port.irq = SGI_UART_VECTOR;
+ port->sc_ops = &intr_ops;
- if (irq) {
- port->sc_port.irq = irq;
- port->sc_ops = &intr_ops;
-
- /* turn on receive interrupts */
- ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
+ /* turn on receive interrupts */
+ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+ }
+ else {
+ printk(KERN_INFO
+ "sn_console: console proceeding in polled mode\n");
+ }
}
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
}
/*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- * Copyright (c) 2006 Jiri Kosina
+ * Copyright (c) 2006-2007 Jiri Kosina
*/
/*
#include <linux/input.h>
#include <linux/wait.h>
-#undef DEBUG
-#undef DEBUG_DATA
-
#include <linux/usb.h>
#include <linux/hid.h>
#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802
#define USB_VENDOR_ID_CODEMERCS 0x07c0
-#define USB_DEVICE_ID_CODEMERCS_IOW40 0x1500
-#define USB_DEVICE_ID_CODEMERCS_IOW24 0x1501
-#define USB_DEVICE_ID_CODEMERCS_IOW48 0x1502
-#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1503
+#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
+#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_USB_RECEIVER 0xc101
+#define USB_DEVICE_ID_LOGITECH_USB_RECEIVER_2 0xc517
+#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
unsigned quirks;
} hid_blacklist[] = {
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
+
{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW48, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS },
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER_2, HID_QUIRK_LOGITECH_S510_DESCRIPTOR },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
kfree(buf);
}
+/*
+ * Logitech S510 keyboard sends in report #3 keys which are far
+ * above the logical maximum described in descriptor. This extends
+ * the original value of 0x28c of logical maximum to 0x104d
+ */
+static void hid_fixup_s510_descriptor(unsigned char *rdesc, int rsize)
+{
+ if (rsize >= 90 && rdesc[83] == 0x26
+ && rdesc[84] == 0x8c
+ && rdesc[85] == 0x02) {
+ info("Fixing up Logitech S510 report descriptor");
+ rdesc[84] = rdesc[89] = 0x4d;
+ rdesc[85] = rdesc[90] = 0x10;
+ }
+}
+
static struct hid_device *usb_hid_configure(struct usb_interface *intf)
{
struct usb_host_interface *interface = intf->cur_altsetting;
int n, len, insize = 0;
struct usbhid_device *usbhid;
- /* Ignore all Wacom devices */
- if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM)
- return NULL;
+ /* Ignore all Wacom devices */
+ if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM)
+ return NULL;
+ /* ignore all Code Mercenaries IOWarrior devices */
+ if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_CODEMERCS)
+ if (le16_to_cpu(dev->descriptor.idProduct) >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
+ le16_to_cpu(dev->descriptor.idProduct) <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
+ return NULL;
for (n = 0; hid_blacklist[n].idVendor; n++)
if ((hid_blacklist[n].idVendor == le16_to_cpu(dev->descriptor.idVendor)) &&
if ((quirks & HID_QUIRK_CYMOTION))
hid_fixup_cymotion_descriptor(rdesc, rsize);
-#ifdef DEBUG_DATA
+ if (quirks & HID_QUIRK_LOGITECH_S510_DESCRIPTOR)
+ hid_fixup_s510_descriptor(rdesc, rsize);
+
+#ifdef CONFIG_HID_DEBUG
printk(KERN_DEBUG __FILE__ ": report descriptor (size %u, read %d) = ", rsize, n);
for (n = 0; n < rsize; n++)
printk(" %02x", (unsigned char) rdesc[n]);
le16_to_cpu(dev->descriptor.idProduct));
hid->bus = BUS_USB;
- hid->vendor = dev->descriptor.idVendor;
- hid->product = dev->descriptor.idProduct;
+ hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
+ hid->product = le16_to_cpu(dev->descriptor.idProduct);
usb_make_path(dev, hid->phys, sizeof(hid->phys));
strlcat(hid->phys, "/input", sizeof(hid->phys));
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
-#include <linux/utsrelease.h>
+#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
idesc->bInterfaceSubClass,
idesc->bInterfaceProtocol,
msgs[msg],
- UTS_RELEASE);
+ utsname()->release);
}
return 0;
config FB_NVIDIA
tristate "nVidia Framebuffer Support"
depends on FB && PCI
- select I2C_ALGOBIT if FB_NVIDIA_I2C
- select I2C if FB_NVIDIA_I2C
select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
config FB_NVIDIA_I2C
bool "Enable DDC Support"
depends on FB_NVIDIA
+ select FB_DDC
help
This enables I2C support for nVidia Chipsets. This is used
only for getting EDID information from the attached display
config FB_RIVA
tristate "nVidia Riva support"
depends on FB && PCI
- select FB_DDC if FB_RIVA_I2C
select FB_BACKLIGHT if FB_RIVA_BACKLIGHT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
config FB_RIVA_I2C
bool "Enable DDC Support"
depends on FB_RIVA
+ select FB_DDC
help
This enables I2C support for nVidia Chipsets. This is used
only for getting EDID information from the attached display
depends on FB && EXPERIMENTAL && PCI && X86
select AGP
select AGP_INTEL
- select I2C_ALGOBIT if FB_INTEL_I2C
- select I2C if FB_INTEL_I2C
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
config FB_INTEL_I2C
bool "DDC/I2C for Intel framebuffer support"
depends on FB_INTEL
+ select FB_DDC
default y
help
Say Y here if you want DDC/I2C support for your on-board Intel graphics.
config FB_MATROX_I2C
tristate "Matrox I2C support"
- depends on FB_MATROX && I2C
- select I2C_ALGOBIT
+ depends on FB_MATROX
+ select FB_DDC
---help---
This drivers creates I2C buses which are needed for accessing the
DDC (I2C) bus present on all Matroxes, an I2C bus which
config FB_RADEON
tristate "ATI Radeon display support"
depends on FB && PCI
- select FB_DDC if FB_RADEON_I2C
select FB_BACKLIGHT if FB_RADEON_BACKLIGHT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
config FB_RADEON_I2C
bool "DDC/I2C for ATI Radeon support"
depends on FB_RADEON
+ select FB_DDC
default y
help
Say Y here if you want DDC/I2C support for your Radeon board.
config FB_SAVAGE
tristate "S3 Savage support"
depends on FB && PCI && EXPERIMENTAL
- select FB_DDC if FB_SAVAGE_I2C
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
config FB_SAVAGE_I2C
bool "Enable DDC2 Support"
depends on FB_SAVAGE
+ select FB_DDC
help
This enables I2C support for S3 Savage Chipsets. This is used
only for getting EDID information from the attached display
config FB_PS3
bool "PS3 GPU framebuffer driver"
- depends on FB && PPC_PS3
- select PS3_PS3AV
+ depends on FB && PS3_PS3AV
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
static int mtrr = 1;
#endif
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight __devinitdata = 1;
+#else
+static int backlight __devinitdata = 0;
+#endif
+
/* PLL constants */
struct aty128_constants {
u32 ref_clk;
} else if (!strncmp(this_opt, "crt:", 4)) {
default_crt_on = simple_strtoul(this_opt+4, NULL, 0);
continue;
+ } else if (!strncmp(this_opt, "backlight:", 10)) {
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
+ continue;
}
#ifdef CONFIG_MTRR
if(!strncmp(this_opt, "nomtrr", 6)) {
par->lock_blank = 0;
#ifdef CONFIG_FB_ATY128_BACKLIGHT
- aty128_bl_init(par);
+ if (backlight)
+ aty128_bl_init(par);
#endif
if (register_framebuffer(info) < 0)
#endif
}
-#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD)
+#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
+defined (CONFIG_FB_ATY_GENERIC_LCD) || defined (CONFIG_FB_ATY_BACKLIGHT)
extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par);
extern u32 aty_ld_lcd(int index, const struct atyfb_par *par);
#endif
#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
-#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD)
+#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
+defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT)
static const u32 lt_lcd_regs[] = {
CONFIG_PANEL_LG,
LCD_GEN_CNTL_LG,
static int comp_sync __devinitdata = -1;
static char *mode;
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight __devinitdata = 1;
+#else
+static int backlight __devinitdata = 0;
+#endif
+
#ifdef CONFIG_PPC
static int default_vmode __devinitdata = VMODE_CHOOSE;
static int default_cmode __devinitdata = CMODE_CHOOSE;
| (USE_F32KHZ | TRISTATE_MEM_EN), par);
} else
#endif
- if (M64_HAS(MOBIL_BUS)) {
+ if (M64_HAS(MOBIL_BUS) && backlight) {
#ifdef CONFIG_FB_ATY_BACKLIGHT
aty_bl_init (par);
#endif
xclk = simple_strtoul(this_opt+5, NULL, 0);
else if (!strncmp(this_opt, "comp_sync:", 10))
comp_sync = simple_strtoul(this_opt+10, NULL, 0);
+ else if (!strncmp(this_opt, "backlight:", 10))
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_PPC
else if (!strncmp(this_opt, "vmode:", 6)) {
unsigned int vmode =
struct atyfb_par *par = info->par;
if (par->mclk_per != par->xclk_per) {
- int i;
/*
* This disables the sclk, crashes the computer as reported:
* aty_st_pll_ct(SPLL_CNTL2, 3, info);
* helps for Rage Mobilities that sometimes crash when
* we switch to sclk. (Daniel Mantione, 13-05-2003)
*/
- for (i=0;i<=0x1ffff;i++);
+ udelay(500);
}
aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par);
#endif
static int force_sleep;
static int ignore_devlist;
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight = 1;
+#else
+static int backlight = 0;
+#endif
/*
* prototypes
break;
}
- /* let fbcon do a soft blank for us */
- return (blank == FB_BLANK_NORMAL) ? -EINVAL : 0;
+ return 0;
}
static int radeonfb_blank (int blank, struct fb_info *info)
MTRR_TYPE_WRCOMB, 1);
#endif
- radeonfb_bl_init(rinfo);
+ if (backlight)
+ radeonfb_bl_init(rinfo);
printk ("radeonfb (%s): %s\n", pci_name(rinfo->pdev), rinfo->name);
force_dfp = 1;
} else if (!strncmp(this_opt, "panel_yres:", 11)) {
panel_yres = simple_strtoul((this_opt+11), NULL, 0);
+ } else if (!strncmp(this_opt, "backlight:", 10)) {
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/pci.h>
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
+
#include "nv_local.h"
#include "nv_type.h"
#include "nv_proto.h"
#define MAX_LEVEL 0x534
#define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX)
-static struct backlight_properties nvidia_bl_data;
-
static int nvidia_bl_get_level_brightness(struct nvidia_par *par,
int level)
{
0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
- bd->props.brightness = nvidia_bl_data.max_brightness;
+ bd->props.brightness = bd->props.max_brightness;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
#ifdef CONFIG_MTRR
static int nomtrr __devinitdata = 0;
#endif
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight __devinitdata = 1;
+#else
+static int backlight __devinitdata = 0;
+#endif
static char *mode_option __devinitdata = NULL;
nvidia_save_vga(par, &par->SavedReg);
pci_set_drvdata(pd, info);
- nvidia_bl_init(par);
+
+ if (backlight)
+ nvidia_bl_init(par);
+
if (register_framebuffer(info) < 0) {
printk(KERN_ERR PFX "error registering nVidia framebuffer\n");
goto err_out_iounmap_fb;
paneltweak = simple_strtoul(this_opt+11, NULL, 0);
} else if (!strncmp(this_opt, "vram:", 5)) {
vram = simple_strtoul(this_opt+5, NULL, 0);
+ } else if (!strncmp(this_opt, "backlight:", 10)) {
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
#ifdef CONFIG_MTRR
static int nomtrr __devinitdata = 0;
#endif
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight __devinitdata = 1;
+#else
+static int backlight __devinitdata = 0;
+#endif
static char *mode_option __devinitdata = NULL;
static int strictmode = 0;
info->monspecs.modedb = NULL;
pci_set_drvdata(pd, info);
- riva_bl_init(info->par);
+
+ if (backlight)
+ riva_bl_init(info->par);
+
ret = register_framebuffer(info);
if (ret < 0) {
printk(KERN_ERR PFX
forceCRTC = -1;
} else if (!strncmp(this_opt, "flatpanel", 9)) {
flatpanel = 1;
+ } else if (!strncmp(this_opt, "backlight:", 10)) {
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
#define SM501_MEMF_CRT (4)
#define SM501_MEMF_ACCEL (8)
-int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
- unsigned int why, size_t size)
+static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
+ unsigned int why, size_t size)
{
unsigned int ptr = 0;
* set or change the hardware cursor parameters
*/
-int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
if (len < 1)
return -EINVAL;
- if (strnicmp(buf, "crt", sizeof("crt")) == 0)
+ if (strnicmp(buf, "crt", 3) == 0)
head = HEAD_CRT;
- else if (strnicmp(buf, "panel", sizeof("panel")) == 0)
+ else if (strnicmp(buf, "panel", 5) == 0)
head = HEAD_PANEL;
else
return -EINVAL;
writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
sm501fb_sync_regs(info);
- return (head == HEAD_CRT) ? 3 : 5;
+ return len;
}
/* Prepare the device_attr for registration with sysfs later */
* initialise hw cursor parameters
*/
-int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
+static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
{
struct sm501fb_par *par = fbi->par;
struct sm501fb_info *info = par->info;
},
};
-int __devinit sm501fb_init(void)
+static int __devinit sm501fb_init(void)
{
return platform_driver_register(&sm501fb_driver);
}
}
EXPORT_SYMBOL(nobh_prepare_write);
+/*
+ * Make sure any changes to nobh_commit_write() are reflected in
+ * nobh_truncate_page(), since it doesn't call commit_write().
+ */
int nobh_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
+ /*
+ * It would be more correct to call aops->commit_write()
+ * here, but this is more efficient.
+ */
+ SetPageUptodate(page);
set_page_dirty(page);
}
unlock_page(page);
+Verison 1.48
+------------
+Fix mtime bouncing around from local idea of last write times to remote time.
+Fix hang (in i_size_read) when simultaneous size update of same remote file
+on smp system corrupts sequence number. Do not reread unnecessarily partial page
+(which we are about to overwrite anyway) when writing out file opened rw.
+
Version 1.47
------------
Fix oops in list_del during mount caused by unaligned string.
#
obj-$(CONFIG_CIFS) += cifs.o
-cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o
+cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o export.o
d) Kerberos/SPNEGO session setup support - (started)
-e) NTLMv2 authentication (mostly implemented)
+e) NTLMv2 authentication (mostly implemented - double check
+that NTLMv2 signing works, also need to cleanup now unneeded SessSetup code in
+fs/cifs/connect.c)
f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup
used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM
time to the client (default time, of now or time 0 is used now for these
very old servers)
-x) Add support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers)
+x) In support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers)
+need to add ability to set time to server (utimes command)
y) Finish testing of Windows 9x/Windows ME server support (started).
-KNOWN BUGS (updated April 29, 2005)
+KNOWN BUGS (updated February 26, 2007)
====================================
See http://bugzilla.samba.org - search on product "CifsVFS" for
current bug list.
succeed but still return access denied (appears to be Windows
server not cifs client problem) and has not been reproduced recently.
NTFS partitions do not have this problem.
-4) debug connectathon lock test case 10 which fails against
-Samba (may be unmappable due to POSIX to Windows lock model
-differences but worth investigating). Also debug Samba to
-see why lock test case 7 takes longer to complete to Samba
-than to Windows.
Misc testing to do
==================
types. Try nested symlinks (8 deep). Return max path name in stat -f information
2) Modify file portion of ltp so it can run against a mounted network
-share and run it against cifs vfs.
+share and run it against cifs vfs in automated fashion.
3) Additional performance testing and optimization using iozone and similar -
there are some easy changes that can be done to parallelize sequential writes,
/*
* fs/cifs/cifsfs.c
*
- * Copyright (C) International Business Machines Corp., 2002,2004
+ * Copyright (C) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Common Internet FileSystem (CIFS) client
#ifdef CONFIG_CIFS_QUOTA
static struct quotactl_ops cifs_quotactl_ops;
-#endif
+#endif /* QUOTA */
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+extern struct export_operations cifs_export_ops;
+#endif /* EXPERIMENTAL */
int cifsFYI = 0;
int cifsERROR = 1;
unsigned int sign_CIFS_PDUs = 1;
extern struct task_struct * oplockThread; /* remove sparse warning */
struct task_struct * oplockThread = NULL;
-extern struct task_struct * dnotifyThread; /* remove sparse warning */
-struct task_struct * dnotifyThread = NULL;
+/* extern struct task_struct * dnotifyThread; remove sparse warning */
+static struct task_struct * dnotifyThread = NULL;
static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
module_param(CIFSMaxBufSize, int, 0);
sb->s_magic = CIFS_MAGIC_NUMBER;
sb->s_op = &cifs_super_ops;
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+ if(experimEnabled != 0)
+ sb->s_export_op = &cifs_export_ops;
+#endif /* EXPERIMENTAL */
/* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
#ifdef CONFIG_CIFS_QUOTA
/* Functions related to super block operations */
/* extern const struct super_operations cifs_super_ops;*/
extern void cifs_read_inode(struct inode *);
-extern void cifs_delete_inode(struct inode *);
-/* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */
+/*extern void cifs_delete_inode(struct inode *);*/ /* BB not needed yet */
+/* extern void cifs_write_inode(struct inode *); */ /* BB not needed yet */
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
*/
GLOBAL_EXTERN struct smbUidInfo *GlobalUidList[UID_HASH];
-GLOBAL_EXTERN struct list_head GlobalServerList; /* BB not implemented yet */
+/* GLOBAL_EXTERN struct list_head GlobalServerList; BB not implemented yet */
GLOBAL_EXTERN struct list_head GlobalSMBSessionList;
GLOBAL_EXTERN struct list_head GlobalTreeConnectionList;
GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
GLOBAL_EXTERN struct list_head GlobalOplock_Q;
-GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */
-GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;/* DirNotify response queue */
+/* Outstanding dir notify requests */
+GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
+/* DirNotify response queue */
+GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;
/*
* Global transaction id (XID) information
*/
#define CIFS_NO_HANDLE 0xFFFF
+#define NO_CHANGE_64 0xFFFFFFFFFFFFFFFFULL
+#define NO_CHANGE_32 0xFFFFFFFFUL
+
/* IPC$ in ASCII */
#define CIFS_IPC_RESOURCE "\x49\x50\x43\x24"
#define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__FUNCTION__,curr_xid,(int)rc));}
extern char *build_path_from_dentry(struct dentry *);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
-extern void renew_parental_timestamps(struct dentry *direntry);
+/* extern void renew_parental_timestamps(struct dentry *direntry);*/
extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
pSMB->Reserved4 = 0;
pSMB->hdr.smb_buf_length += byte_count;
+ /* Samba server ignores set of file size to zero due to bugs in some
+ older clients, but we should be precise - we use SetFileSize to
+ set file size and do not want to truncate file size to zero
+ accidently as happened on one Samba server beta by putting
+ zero instead of -1 here */
+ data_offset->EndOfFile = NO_CHANGE_64;
+ data_offset->NumOfBytes = NO_CHANGE_64;
+ data_offset->LastStatusChange = NO_CHANGE_64;
+ data_offset->LastAccessTime = NO_CHANGE_64;
+ data_offset->LastModificationTime = NO_CHANGE_64;
data_offset->Uid = cpu_to_le64(uid);
data_offset->Gid = cpu_to_le64(gid);
/* better to leave device as zero when it is */
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
-void
+static void
renew_parental_timestamps(struct dentry *direntry)
{
/* BB check if there is a way to get the kernel to do this or if we really need this */
--- /dev/null
+/*
+ * fs/cifs/export.c
+ *
+ * Copyright (C) International Business Machines Corp., 2007
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * Common Internet FileSystem (CIFS) client
+ *
+ * Operations related to support for exporting files via NFSD
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+ /*
+ * See Documentation/filesystems/Exporting
+ * and examples in fs/exportfs
+ */
+
+#include <linux/fs.h>
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+
+static struct dentry *cifs_get_parent(struct dentry *dentry)
+{
+ /* BB need to add code here eventually to enable export via NFSD */
+ return ERR_PTR(-EACCES);
+}
+
+struct export_operations cifs_export_ops = {
+ .get_parent = cifs_get_parent,
+/* Following five export operations are unneeded so far and can default */
+/* .get_dentry =
+ .get_name =
+ .find_exported_dentry =
+ .decode_fh =
+ .encode_fs = */
+ };
+
+#endif /* EXPERIMENTAL */
+
cifs_stats_bytes_written(pTcon, total_written);
/* since the write may have blocked check these pointers again */
- if (file->f_path.dentry) {
- if (file->f_path.dentry->d_inode) {
- struct inode *inode = file->f_path.dentry->d_inode;
- inode->i_ctime = inode->i_mtime =
- current_fs_time(inode->i_sb);
- if (total_written > 0) {
- if (*poffset > file->f_path.dentry->d_inode->i_size)
- i_size_write(file->f_path.dentry->d_inode,
+ if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
+ struct inode *inode = file->f_path.dentry->d_inode;
+/* Do not update local mtime - server will set its actual value on write
+ * inode->i_ctime = inode->i_mtime =
+ * current_fs_time(inode->i_sb);*/
+ if (total_written > 0) {
+ spin_lock(&inode->i_lock);
+ if (*poffset > file->f_path.dentry->d_inode->i_size)
+ i_size_write(file->f_path.dentry->d_inode,
*poffset);
- }
- mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+ spin_unlock(&inode->i_lock);
}
+ mark_inode_dirty_sync(file->f_path.dentry->d_inode);
}
FreeXid(xid);
return total_written;
cifs_stats_bytes_written(pTcon, total_written);
/* since the write may have blocked check these pointers again */
- if (file->f_path.dentry) {
- if (file->f_path.dentry->d_inode) {
+ if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
/*BB We could make this contingent on superblock ATIME flag too */
-/* file->f_path.dentry->d_inode->i_ctime =
- file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
- if (total_written > 0) {
- if (*poffset > file->f_path.dentry->d_inode->i_size)
- i_size_write(file->f_path.dentry->d_inode,
- *poffset);
- }
- mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+/* file->f_path.dentry->d_inode->i_ctime =
+ file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
+ if (total_written > 0) {
+ spin_lock(&file->f_path.dentry->d_inode->i_lock);
+ if (*poffset > file->f_path.dentry->d_inode->i_size)
+ i_size_write(file->f_path.dentry->d_inode,
+ *poffset);
+ spin_unlock(&file->f_path.dentry->d_inode->i_lock);
}
+ mark_inode_dirty_sync(file->f_path.dentry->d_inode);
}
FreeXid(xid);
return total_written;
xid = GetXid();
cFYI(1, ("commit write for page %p up to position %lld for %d",
page, position, to));
+ spin_lock(&inode->i_lock);
if (position > inode->i_size) {
i_size_write(inode, position);
/* if (file->private_data == NULL) {
cFYI(1, (" SetEOF (commit write) rc = %d", rc));
} */
}
+ spin_unlock(&inode->i_lock);
if (!PageUptodate(page)) {
position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
/* can not rely on (or let) writepage write this data */
unsigned from, unsigned to)
{
int rc = 0;
- loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ loff_t i_size;
+ loff_t offset;
+
cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
- if (!PageUptodate(page)) {
- /* if (to - from != PAGE_CACHE_SIZE) {
- void *kaddr = kmap_atomic(page, KM_USER0);
+ if (PageUptodate(page))
+ return 0;
+
+ /* If we are writing a full page it will be up to date,
+ no need to read from the server */
+ if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
+ SetPageUptodate(page);
+ return 0;
+ }
+
+ offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ i_size = i_size_read(page->mapping->host);
+
+ if ((offset >= i_size) ||
+ ((from == 0) && (offset + to) >= i_size)) {
+ /*
+ * We don't need to read data beyond the end of the file.
+ * zero it, and set the page uptodate
+ */
+ void *kaddr = kmap_atomic(page, KM_USER0);
+
+ if (from)
memset(kaddr, 0, from);
+ if (to < PAGE_CACHE_SIZE)
memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- } */
- /* If we are writing a full page it will be up to date,
- no need to read from the server */
- if ((to == PAGE_CACHE_SIZE) && (from == 0))
- SetPageUptodate(page);
-
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+ SetPageUptodate(page);
+ } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
/* might as well read a page, it is fast enough */
- if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
- rc = cifs_readpage_worker(file, page, &offset);
- } else {
- /* should we try using another file handle if there is one -
- how would we lock it to prevent close of that handle
- racing with this read?
- In any case this will be written out by commit_write */
- }
+ rc = cifs_readpage_worker(file, page, &offset);
+ } else {
+ /* we could try using another file handle if there is one -
+ but how would we lock it to prevent close of that handle
+ racing with this read? In any case
+ this will be written out by commit_write so is fine */
}
- /* BB should we pass any errors back?
- e.g. if we do not have read access to the file */
+ /* we do not need to pass errors back
+ e.g. if we do not have read access to the file
+ because cifs_commit_write will do the right thing. -- shaggy */
+
return 0;
}
inode->i_gid = le64_to_cpu(findData.Gid);
inode->i_nlink = le64_to_cpu(findData.Nlinks);
+ spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
/* can not safely change the file size here if the
client is writing to it due to potential races */
-
i_size_write(inode, end_of_file);
/* blksize needs to be multiple of two. So safer to default to
/* for this calculation */
inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
}
+ spin_unlock(&inode->i_lock);
if (num_of_bytes < end_of_file)
cFYI(1, ("allocation size less than end of file"));
/* BB add code here -
validate if device or weird share or device type? */
}
+
+ spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) {
/* can not safely shrink the file size here if the
client is writing to it due to potential races */
inode->i_blocks = (512 - 1 + le64_to_cpu(
pfindData->AllocationSize)) >> 9;
}
+ spin_unlock(&inode->i_lock);
inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
if (!rc) {
drop_nlink(inode);
+ spin_lock(&direntry->d_inode->i_lock);
i_size_write(direntry->d_inode,0);
clear_nlink(direntry->d_inode);
+ spin_unlock(&direntry->d_inode->i_lock);
}
cifsInode = CIFS_I(direntry->d_inode);
return rc;
}
+static int cifs_vmtruncate(struct inode * inode, loff_t offset)
+{
+ struct address_space *mapping = inode->i_mapping;
+ unsigned long limit;
+
+ spin_lock(&inode->i_lock);
+ if (inode->i_size < offset)
+ goto do_expand;
+ /*
+ * truncation of in-use swapfiles is disallowed - it would cause
+ * subsequent swapout to scribble on the now-freed blocks.
+ */
+ if (IS_SWAPFILE(inode)) {
+ spin_unlock(&inode->i_lock);
+ goto out_busy;
+ }
+ i_size_write(inode, offset);
+ spin_unlock(&inode->i_lock);
+ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+ truncate_inode_pages(mapping, offset);
+ goto out_truncate;
+
+do_expand:
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && offset > limit) {
+ spin_unlock(&inode->i_lock);
+ goto out_sig;
+ }
+ if (offset > inode->i_sb->s_maxbytes) {
+ spin_unlock(&inode->i_lock);
+ goto out_big;
+ }
+ i_size_write(inode, offset);
+ spin_unlock(&inode->i_lock);
+out_truncate:
+ if (inode->i_op && inode->i_op->truncate)
+ inode->i_op->truncate(inode);
+ return 0;
+out_sig:
+ send_sig(SIGXFSZ, current, 0);
+out_big:
+ return -EFBIG;
+out_busy:
+ return -ETXTBSY;
+}
+
int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
int xid;
*/
if (rc == 0) {
- rc = vmtruncate(direntry->d_inode, attrs->ia_size);
+ rc = cifs_vmtruncate(direntry->d_inode, attrs->ia_size);
cifs_truncate_page(direntry->d_inode->i_mapping,
direntry->d_inode->i_size);
} else
return rc;
}
+#if 0
void cifs_delete_inode(struct inode *inode)
{
cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode));
/* may have to add back in if and when safe distributed caching of
directories added e.g. via FindNotify */
}
+#endif
*
* Directory search handling
*
- * Copyright (C) International Business Machines Corp., 2004, 2005
+ * Copyright (C) International Business Machines Corp., 2004, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
atomic_set(&cifsInfo->inUse, 1);
}
+ spin_lock(&tmp_inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
/* can not safely change the file size here if the
client is writing to it due to potential races */
/* for this calculation, even though the reported blocksize is larger */
tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9;
}
+ spin_unlock(&tmp_inode->i_lock);
if (allocation_size < end_of_file)
cFYI(1, ("May be sparse file, allocation less than file size"));
tmp_inode->i_gid = le64_to_cpu(pfindData->Gid);
tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks);
+ spin_lock(&tmp_inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
/* can not safely change the file size here if the
client is writing to it due to potential races */
/* for this calculation, not the real blocksize */
tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
}
+ spin_unlock(&tmp_inode->i_lock);
if (S_ISREG(tmp_inode->i_mode)) {
cFYI(1, ("File inode"));
due to last connection to this server being unmounted */
if (signal_pending(current)) {
/* if signal pending do not hold up user for full smb timeout
- but we still give response a change to complete */
+ but we still give response a chance to complete */
timeout = 2 * HZ;
}
}
out:
-
DeleteMidQEntry(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
due to last connection to this server being unmounted */
if (signal_pending(current)) {
/* if signal pending do not hold up user for full smb timeout
- but we still give response a change to complete */
+ but we still give response a chance to complete */
timeout = 2 * HZ;
}
}
out:
-
DeleteMidQEntry(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
#include <linux/highmem.h>
#include <linux/poll.h>
#include <linux/mm.h>
+#include <linux/eventpoll.h>
#include <net/sock.h> /* siocdevprivate_ioctl */
return sys_ni_syscall();
}
#endif
+
+#ifdef CONFIG_EPOLL
+
+#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
+asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd,
+ struct compat_epoll_event __user *event)
+{
+ long err = 0;
+ struct compat_epoll_event user;
+ struct epoll_event __user *kernel = NULL;
+
+ if (event) {
+ if (copy_from_user(&user, event, sizeof(user)))
+ return -EFAULT;
+ kernel = compat_alloc_user_space(sizeof(struct epoll_event));
+ err |= __put_user(user.events, &kernel->events);
+ err |= __put_user(user.data, &kernel->data);
+ }
+
+ return err ? err : sys_epoll_ctl(epfd, op, fd, kernel);
+}
+
+
+asmlinkage long compat_sys_epoll_wait(int epfd,
+ struct compat_epoll_event __user *events,
+ int maxevents, int timeout)
+{
+ long i, ret, err = 0;
+ struct epoll_event __user *kbuf;
+ struct epoll_event ev;
+
+ if ((maxevents <= 0) ||
+ (maxevents > (INT_MAX / sizeof(struct epoll_event))))
+ return -EINVAL;
+ kbuf = compat_alloc_user_space(sizeof(struct epoll_event) * maxevents);
+ ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
+ for (i = 0; i < ret; i++) {
+ err |= __get_user(ev.events, &kbuf[i].events);
+ err |= __get_user(ev.data, &kbuf[i].data);
+ err |= __put_user(ev.events, &events->events);
+ err |= __put_user_unaligned(ev.data, &events->data);
+ events++;
+ }
+
+ return err ? -EFAULT: ret;
+}
+#endif /* CONFIG_HAS_COMPAT_EPOLL_EVENT */
+
+#ifdef TIF_RESTORE_SIGMASK
+asmlinkage long compat_sys_epoll_pwait(int epfd,
+ struct compat_epoll_event __user *events,
+ int maxevents, int timeout,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize)
+{
+ long err;
+ compat_sigset_t csigmask;
+ sigset_t ksigmask, sigsaved;
+
+ /*
+ * If the caller wants a certain signal mask to be set during the wait,
+ * we apply it here.
+ */
+ if (sigmask) {
+ if (sigsetsize != sizeof(compat_sigset_t))
+ return -EINVAL;
+ if (copy_from_user(&csigmask, sigmask, sizeof(csigmask)))
+ return -EFAULT;
+ sigset_from_compat(&ksigmask, &csigmask);
+ sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
+ sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+ }
+
+#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
+ err = compat_sys_epoll_wait(epfd, events, maxevents, timeout);
+#else
+ err = sys_epoll_wait(epfd, events, maxevents, timeout);
+#endif
+
+ /*
+ * If we changed the signal mask, we need to restore the original one.
+ * In case we've got a signal while waiting, we do not restore the
+ * signal mask yet, and we allow do_signal() to deliver the signal on
+ * the way back to userspace, before the signal mask is restored.
+ */
+ if (sigmask) {
+ if (err == -EINTR) {
+ memcpy(¤t->saved_sigmask, &sigsaved,
+ sizeof(sigsaved));
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ } else
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ }
+
+ return err;
+}
+#endif /* TIF_RESTORE_SIGMASK */
+
+#endif /* CONFIG_EPOLL */
#include "lockspace.h"
#include "lock.h"
#include "lvb_table.h"
+#include "user.h"
static const char *name_prefix="dlm";
static struct miscdevice ctl_device;
{
int rc = 0;
+ flags |= O_LARGEFILE;
dget(lower_dentry);
mntget(lower_mnt);
*lower_file = dentry_open(lower_dentry, lower_mnt, flags);
struct dentry *dir;
dir = dget(dentry->d_parent);
- mutex_lock(&(dir->d_inode->i_mutex));
+ mutex_lock_nested(&(dir->d_inode->i_mutex), I_MUTEX_PARENT);
return dir;
}
goto out;
}
i_size_write(inode, 0);
- ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode, inode,
- ecryptfs_dentry,
- ECRYPTFS_LOWER_I_MUTEX_NOT_HELD);
+ rc = ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode,
+ inode, ecryptfs_dentry,
+ ECRYPTFS_LOWER_I_MUTEX_NOT_HELD);
ecryptfs_inode_to_private(inode)->crypt_stat.flags |= ECRYPTFS_NEW_FILE;
out:
return rc;
inode = ecryptfs_dentry->d_inode;
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
lower_flags = ((O_CREAT | O_TRUNC) & O_ACCMODE) | O_RDWR;
-#if BITS_PER_LONG != 32
- lower_flags |= O_LARGEFILE;
-#endif
lower_mnt = ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry);
/* Corresponding fput() at end of this function */
if ((rc = ecryptfs_open_lower_file(&lower_file, lower_dentry, lower_mnt,
struct vfsmount *lower_mnt;
memset(&nd, 0, sizeof(struct nameidata));
- rc = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
+ rc = path_lookup(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &nd);
if (rc) {
ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
- goto out_free;
+ goto out;
}
lower_root = nd.dentry;
- if (!lower_root->d_inode) {
- ecryptfs_printk(KERN_WARNING,
- "No directory to interpose on\n");
- rc = -ENOENT;
- goto out_free;
- }
lower_mnt = nd.mnt;
ecryptfs_set_superblock_lower(sb, lower_root->d_sb);
sb->s_maxbytes = lower_root->d_sb->s_maxbytes;
lower_page_data = kmap_atomic(lower_page, KM_USER1);
memcpy(page_data, lower_page_data, PAGE_CACHE_SIZE);
kunmap_atomic(lower_page_data, KM_USER1);
- flush_dcache_page(lower_page);
kunmap_atomic(page_data, KM_USER0);
flush_dcache_page(page);
rc = 0;
return rc;
}
-static void ecryptfs_release_lower_page(struct page *lower_page)
+static
+void ecryptfs_release_lower_page(struct page *lower_page, int page_locked)
{
- unlock_page(lower_page);
+ if (page_locked)
+ unlock_page(lower_page);
page_cache_release(lower_page);
}
const struct address_space_operations *lower_a_ops;
u64 file_size;
+retry:
header_page = grab_cache_page(lower_inode->i_mapping, 0);
if (!header_page) {
ecryptfs_printk(KERN_ERR, "grab_cache_page for "
}
lower_a_ops = lower_inode->i_mapping->a_ops;
rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8);
+ if (rc) {
+ if (rc == AOP_TRUNCATED_PAGE) {
+ ecryptfs_release_lower_page(header_page, 0);
+ goto retry;
+ } else
+ ecryptfs_release_lower_page(header_page, 1);
+ goto out;
+ }
file_size = (u64)i_size_read(inode);
ecryptfs_printk(KERN_DEBUG, "Writing size: [0x%.16x]\n", file_size);
file_size = cpu_to_be64(file_size);
if (rc < 0)
ecryptfs_printk(KERN_ERR, "Error commiting header page "
"write\n");
- ecryptfs_release_lower_page(header_page);
+ if (rc == AOP_TRUNCATED_PAGE) {
+ ecryptfs_release_lower_page(header_page, 0);
+ goto retry;
+ } else
+ ecryptfs_release_lower_page(header_page, 1);
lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
mark_inode_dirty_sync(inode);
out:
goto out;
}
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
- if (!lower_dentry->d_inode->i_op->getxattr) {
+ if (!lower_dentry->d_inode->i_op->getxattr ||
+ !lower_dentry->d_inode->i_op->setxattr) {
printk(KERN_WARNING
"No support for setting xattr in lower filesystem\n");
rc = -ENOSYS;
{
int rc = 0;
+retry:
*lower_page = grab_cache_page(lower_inode->i_mapping, lower_page_index);
if (!(*lower_page)) {
rc = -EINVAL;
byte_offset,
region_bytes);
if (rc) {
- ecryptfs_printk(KERN_ERR, "prepare_write for "
+ if (rc == AOP_TRUNCATED_PAGE) {
+ ecryptfs_release_lower_page(*lower_page, 0);
+ goto retry;
+ } else {
+ ecryptfs_printk(KERN_ERR, "prepare_write for "
"lower_page_index = [0x%.16x] failed; rc = "
"[%d]\n", lower_page_index, rc);
+ ecryptfs_release_lower_page(*lower_page, 1);
+ (*lower_page) = NULL;
+ }
}
out:
- if (rc && (*lower_page)) {
- ecryptfs_release_lower_page(*lower_page);
- (*lower_page) = NULL;
- }
return rc;
}
struct file *lower_file, int byte_offset,
int region_size)
{
+ int page_locked = 1;
int rc = 0;
rc = lower_inode->i_mapping->a_ops->commit_write(
lower_file, lower_page, byte_offset, region_size);
+ if (rc == AOP_TRUNCATED_PAGE)
+ page_locked = 0;
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error committing write; rc = [%d]\n", rc);
} else
rc = 0;
- ecryptfs_release_lower_page(lower_page);
+ ecryptfs_release_lower_page(lower_page, page_locked);
return rc;
}
struct buffer_head *bh)
{
struct mb_cache_entry *ce = NULL;
+ int error = 0;
ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev, bh->b_blocknr);
+ error = ext3_journal_get_write_access(handle, bh);
+ if (error)
+ goto out;
+
+ lock_buffer(bh);
+
if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
ea_bdebug(bh, "refcount now=0; freeing");
if (ce)
get_bh(bh);
ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
} else {
- if (ext3_journal_get_write_access(handle, bh) == 0) {
- lock_buffer(bh);
- BHDR(bh)->h_refcount = cpu_to_le32(
+ BHDR(bh)->h_refcount = cpu_to_le32(
le32_to_cpu(BHDR(bh)->h_refcount) - 1);
- ext3_journal_dirty_metadata(handle, bh);
- if (IS_SYNC(inode))
- handle->h_sync = 1;
- DQUOT_FREE_BLOCK(inode, 1);
- unlock_buffer(bh);
- ea_bdebug(bh, "refcount now=%d; releasing",
- le32_to_cpu(BHDR(bh)->h_refcount));
- }
+ error = ext3_journal_dirty_metadata(handle, bh);
+ handle->h_sync = 1;
+ DQUOT_FREE_BLOCK(inode, 1);
+ ea_bdebug(bh, "refcount now=%d; releasing",
+ le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
mb_cache_entry_release(ce);
}
+ unlock_buffer(bh);
+out:
+ ext3_std_error(inode->i_sb, error);
+ return;
}
struct ext3_xattr_info {
struct buffer_head *new_bh = NULL;
struct ext3_xattr_search *s = &bs->s;
struct mb_cache_entry *ce = NULL;
- int error;
+ int error = 0;
#define header(x) ((struct ext3_xattr_header *)(x))
if (s->base) {
ce = mb_cache_entry_get(ext3_xattr_cache, bs->bh->b_bdev,
bs->bh->b_blocknr);
+ error = ext3_journal_get_write_access(handle, bs->bh);
+ if (error)
+ goto cleanup;
+ lock_buffer(bs->bh);
+
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
if (ce) {
mb_cache_entry_free(ce);
ce = NULL;
}
ea_bdebug(bs->bh, "modifying in-place");
- error = ext3_journal_get_write_access(handle, bs->bh);
- if (error)
- goto cleanup;
- lock_buffer(bs->bh);
error = ext3_xattr_set_entry(i, s);
if (!error) {
if (!IS_LAST_ENTRY(s->first))
} else {
int offset = (char *)s->here - bs->bh->b_data;
+ unlock_buffer(bs->bh);
+ journal_release_buffer(handle, bs->bh);
+
if (ce) {
mb_cache_entry_release(ce);
ce = NULL;
struct buffer_head *bh)
{
struct mb_cache_entry *ce = NULL;
+ int error = 0;
ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
+ error = ext4_journal_get_write_access(handle, bh);
+ if (error)
+ goto out;
+
+ lock_buffer(bh);
if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
ea_bdebug(bh, "refcount now=0; freeing");
if (ce)
get_bh(bh);
ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
} else {
- if (ext4_journal_get_write_access(handle, bh) == 0) {
- lock_buffer(bh);
- BHDR(bh)->h_refcount = cpu_to_le32(
+ BHDR(bh)->h_refcount = cpu_to_le32(
le32_to_cpu(BHDR(bh)->h_refcount) - 1);
- ext4_journal_dirty_metadata(handle, bh);
- if (IS_SYNC(inode))
- handle->h_sync = 1;
- DQUOT_FREE_BLOCK(inode, 1);
- unlock_buffer(bh);
- ea_bdebug(bh, "refcount now=%d; releasing",
- le32_to_cpu(BHDR(bh)->h_refcount));
- }
+ error = ext4_journal_dirty_metadata(handle, bh);
+ if (IS_SYNC(inode))
+ handle->h_sync = 1;
+ DQUOT_FREE_BLOCK(inode, 1);
+ ea_bdebug(bh, "refcount now=%d; releasing",
+ le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
mb_cache_entry_release(ce);
}
+ unlock_buffer(bh);
+out:
+ ext4_std_error(inode->i_sb, error);
+ return;
}
struct ext4_xattr_info {
struct buffer_head *new_bh = NULL;
struct ext4_xattr_search *s = &bs->s;
struct mb_cache_entry *ce = NULL;
- int error;
+ int error = 0;
#define header(x) ((struct ext4_xattr_header *)(x))
if (s->base) {
ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
bs->bh->b_blocknr);
+ error = ext4_journal_get_write_access(handle, bs->bh);
+ if (error)
+ goto cleanup;
+ lock_buffer(bs->bh);
+
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
if (ce) {
mb_cache_entry_free(ce);
ce = NULL;
}
ea_bdebug(bs->bh, "modifying in-place");
- error = ext4_journal_get_write_access(handle, bs->bh);
- if (error)
- goto cleanup;
- lock_buffer(bs->bh);
error = ext4_xattr_set_entry(i, s);
if (!error) {
if (!IS_LAST_ENTRY(s->first))
} else {
int offset = (char *)s->here - bs->bh->b_data;
+ unlock_buffer(bs->bh);
+ jbd2_journal_release_buffer(handle, bs->bh);
if (ce) {
mb_cache_entry_release(ce);
ce = NULL;
#include <linux/list.h>
#include <linux/lm_interface.h>
#include <linux/wait.h>
+#include <linux/module.h>
#include <linux/rwsem.h>
#include <asm/uaccess.h>
spin_unlock(&gl->gl_spin);
}
- if (glops->go_drop_bh)
- glops->go_drop_bh(gl);
-
spin_lock(&gl->gl_spin);
gl->gl_req_gh = NULL;
gl->gl_req_bh = NULL;
if (ip && S_ISREG(ip->i_inode.i_mode)) {
truncate_inode_pages(ip->i_inode.i_mapping, 0);
- gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !ip->i_inode.i_mapping->nrpages);
clear_bit(GIF_PAGED, &ip->i_flags);
}
}
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
+ .go_xmote_th = meta_go_sync,
+ .go_drop_th = meta_go_sync,
.go_inval = meta_go_inval,
.go_demote_ok = rgrp_go_demote_ok,
.go_lock = rgrp_go_lock,
void (*go_xmote_th) (struct gfs2_glock *gl);
void (*go_xmote_bh) (struct gfs2_glock *gl);
void (*go_drop_th) (struct gfs2_glock *gl);
- void (*go_drop_bh) (struct gfs2_glock *gl);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
unsigned int gt_stall_secs; /* Detects trouble! */
unsigned int gt_complain_secs;
unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */
- unsigned int gt_entries_per_readdir;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
};
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_inum_host *inum = opaque;
- if (ip->i_num.no_addr == inum->no_addr)
+ if (ip->i_num.no_addr == inum->no_addr &&
+ inode->i_private != NULL)
return 1;
return 0;
struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum)
{
- return ilookup5(sb, (unsigned long)inum->no_formal_ino,
+ return ilookup5(sb, (unsigned long)inum->no_addr,
iget_test, inum);
}
static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum)
{
- return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
+ return iget5_locked(sb, (unsigned long)inum->no_addr,
iget_test, iget_set, inum);
}
out:
return error;
out_unlock:
- if (error == GLR_TRYFAILED)
- error = AOP_TRUNCATED_PAGE;
unlock_page(page);
+ if (error == GLR_TRYFAILED) {
+ error = AOP_TRUNCATED_PAGE;
+ yield();
+ }
if (do_unlock)
gfs2_holder_uninit(&gh);
goto out;
if (error == GLR_TRYFAILED) {
unlock_page(page);
error = AOP_TRUNCATED_PAGE;
+ yield();
}
goto out_uninit;
}
struct gfs2_fh_obj fh_obj;
struct gfs2_inum_host *this, parent;
- if (fh_type != fh_len)
- return NULL;
-
this = &fh_obj.this;
fh_obj.imode = DT_UNKNOWN;
memset(&parent, 0, sizeof(struct gfs2_inum));
- switch (fh_type) {
+ switch (fh_len) {
case GFS2_LARGE_FH_SIZE:
parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
parent.no_formal_ino |= be32_to_cpu(fh[5]);
}
printk(KERN_WARNING "GFS2: Unrecognized block device or "
- "mount point %s", dev_name);
+ "mount point %s\n", dev_name);
free_nd:
path_release(&nd);
(bh->b_data + sizeof(struct gfs2_meta_header) +
offset * sizeof(struct gfs2_quota_change));
- mutex_lock(&sdp->sd_quota_mutex);
+ mutex_unlock(&sdp->sd_quota_mutex);
return 0;
gt->gt_stall_secs = 600;
gt->gt_complain_secs = 10;
gt->gt_reclaim_limit = 5000;
- gt->gt_entries_per_readdir = 32;
gt->gt_statfs_quantum = 30;
gt->gt_statfs_slow = 0;
}
};
/* Changed in hostfs_args before the kernel starts running */
-static char *root_ino = "/";
+static char *root_ino = "";
static int append = 0;
#define HOSTFS_SUPER_MAGIC 0x00c0ffee
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
- if((data == NULL) || (*data == '\0'))
- data = root_ino;
+ /* NULL is printed as <NULL> by sprintf: avoid that. */
+ if (data == NULL)
+ data = "";
err = -ENOMEM;
- name = kmalloc(strlen(data) + 1, GFP_KERNEL);
+ name = kmalloc(strlen(root_ino) + 1
+ + strlen(data) + 1, GFP_KERNEL);
if(name == NULL)
goto out;
- strcpy(name, data);
+ sprintf(name, "%s/%s", root_ino, data);
root_inode = iget(sb, 0);
if(root_inode == NULL)
goto out_put;
HOSTFS_I(root_inode)->host_filename = name;
+ /* Avoid that in the error path, iput(root_inode) frees again name through
+ * hostfs_destroy_inode! */
+ name = NULL;
err = -ENOMEM;
sb->s_root = d_alloc_root(root_inode);
/* No iput in this case because the dput does that for us */
dput(sb->s_root);
sb->s_root = NULL;
- goto out_free;
+ goto out;
}
return(0);
.lookup = simple_lookup,
};
+static const struct super_operations simple_super_operations = {
+ .statfs = simple_statfs,
+};
+
/*
* Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
* will never be mountable)
struct vfsmount *mnt)
{
struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
- static const struct super_operations default_ops = {.statfs = simple_statfs};
struct dentry *dentry;
struct inode *root;
struct qstr d_name = {.name = name, .len = strlen(name)};
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = magic;
- s->s_op = ops ? ops : &default_ops;
+ s->s_op = ops ? ops : &simple_super_operations;
s->s_time_gran = 1;
root = new_inode(s);
if (!root)
int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
{
- static struct super_operations s_ops = {.statfs = simple_statfs};
struct inode *inode;
struct dentry *root;
struct dentry *dentry;
s->s_blocksize = PAGE_CACHE_SIZE;
s->s_blocksize_bits = PAGE_CACHE_SHIFT;
s->s_magic = magic;
- s->s_op = &s_ops;
+ s->s_op = &simple_super_operations;
s->s_time_gran = 1;
inode = new_inode(s);
server->packet = vmalloc(NCP_PACKET_SIZE);
if (server->packet == NULL)
goto out_nls;
+ server->txbuf = vmalloc(NCP_PACKET_SIZE);
+ if (server->txbuf == NULL)
+ goto out_packet;
+ server->rxbuf = vmalloc(NCP_PACKET_SIZE);
+ if (server->rxbuf == NULL)
+ goto out_txbuf;
sock->sk->sk_data_ready = ncp_tcp_data_ready;
sock->sk->sk_error_report = ncp_tcp_error_report;
error = ncp_connect(server);
ncp_unlock_server(server);
if (error < 0)
- goto out_packet;
+ goto out_rxbuf;
DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb));
error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */
ncp_lock_server(server);
ncp_disconnect(server);
ncp_unlock_server(server);
-out_packet:
+out_rxbuf:
ncp_stop_tasks(server);
+ vfree(server->rxbuf);
+out_txbuf:
+ vfree(server->txbuf);
+out_packet:
vfree(server->packet);
out_nls:
#ifdef CONFIG_NCPFS_NLS
kfree(server->priv.data);
kfree(server->auth.object_name);
+ vfree(server->rxbuf);
+ vfree(server->txbuf);
vfree(server->packet);
sb->s_fs_info = NULL;
kfree(server);
#include <linux/socket.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <asm/uaccess.h>
#include <linux/in.h>
#include <linux/net.h>
struct ncp_request_reply {
struct list_head req;
wait_queue_head_t wq;
- struct ncp_reply_header* reply_buf;
+ atomic_t refs;
+ unsigned char* reply_buf;
size_t datalen;
int result;
- enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE } status;
+ enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
struct kvec* tx_ciov;
size_t tx_totallen;
size_t tx_iovlen;
u_int32_t sign[6];
};
+static inline struct ncp_request_reply* ncp_alloc_req(void)
+{
+ struct ncp_request_reply *req;
+
+ req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ init_waitqueue_head(&req->wq);
+ atomic_set(&req->refs, (1));
+ req->status = RQ_IDLE;
+
+ return req;
+}
+
+static void ncp_req_get(struct ncp_request_reply *req)
+{
+ atomic_inc(&req->refs);
+}
+
+static void ncp_req_put(struct ncp_request_reply *req)
+{
+ if (atomic_dec_and_test(&req->refs))
+ kfree(req);
+}
+
void ncp_tcp_data_ready(struct sock *sk, int len)
{
struct ncp_server *server = sk->sk_user_data;
schedule_work(&server->timeout_tq);
}
-static inline void ncp_finish_request(struct ncp_request_reply *req, int result)
+static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
{
req->result = result;
+ if (req->status != RQ_ABANDONED)
+ memcpy(req->reply_buf, server->rxbuf, req->datalen);
req->status = RQ_DONE;
wake_up_all(&req->wq);
+ ncp_req_put(req);
}
-static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err)
+static void __abort_ncp_connection(struct ncp_server *server)
{
struct ncp_request_reply *req;
req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
list_del_init(&req->req);
- if (req == aborted) {
- ncp_finish_request(req, err);
- } else {
- ncp_finish_request(req, -EIO);
- }
+ ncp_finish_request(server, req, -EIO);
}
req = server->rcv.creq;
if (req) {
server->rcv.creq = NULL;
- if (req == aborted) {
- ncp_finish_request(req, err);
- } else {
- ncp_finish_request(req, -EIO);
- }
+ ncp_finish_request(server, req, -EIO);
server->rcv.ptr = NULL;
server->rcv.state = 0;
}
req = server->tx.creq;
if (req) {
server->tx.creq = NULL;
- if (req == aborted) {
- ncp_finish_request(req, err);
- } else {
- ncp_finish_request(req, -EIO);
- }
+ ncp_finish_request(server, req, -EIO);
}
}
break;
case RQ_QUEUED:
list_del_init(&req->req);
- ncp_finish_request(req, err);
+ ncp_finish_request(server, req, err);
break;
case RQ_INPROGRESS:
- __abort_ncp_connection(server, req, err);
+ req->status = RQ_ABANDONED;
+ break;
+ case RQ_ABANDONED:
break;
}
}
static inline void __ncptcp_abort(struct ncp_server *server)
{
- __abort_ncp_connection(server, NULL, 0);
+ __abort_ncp_connection(server);
}
static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
{
+ /* we copy the data so that we do not depend on the caller
+ staying alive */
+ memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
+ req->tx_iov[1].iov_base = server->txbuf;
+
if (server->ncp_sock->type == SOCK_STREAM)
ncptcp_start_request(server, req);
else
printk(KERN_ERR "ncpfs: tcp: Server died\n");
return -EIO;
}
+ ncp_req_get(req);
if (server->tx.creq || server->rcv.creq) {
req->status = RQ_QUEUED;
list_add_tail(&req->req, &server->tx.requests);
server->timeout_last = NCP_MAX_RPC_TIMEOUT;
mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
} else if (reply.type == NCP_REPLY) {
- result = _recv(sock, (void*)req->reply_buf, req->datalen, MSG_DONTWAIT);
+ result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
#ifdef CONFIG_NCPFS_PACKET_SIGNING
if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
if (result < 8 + 8) {
result -= 8;
hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
- if (sign_verify_reply(server, ((char*)req->reply_buf) + hdrl, result - hdrl, cpu_to_le32(result), ((char*)req->reply_buf) + result)) {
+ if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
printk(KERN_INFO "ncpfs: Signature violation\n");
result = -EIO;
}
#endif
del_timer(&server->timeout_tm);
server->rcv.creq = NULL;
- ncp_finish_request(req, result);
+ ncp_finish_request(server, req, result);
__ncp_next_request(server);
mutex_unlock(&server->rcv.creq_mutex);
continue;
mutex_unlock(&server->rcv.creq_mutex);
}
-static inline void ncp_init_req(struct ncp_request_reply* req)
-{
- init_waitqueue_head(&req->wq);
- req->status = RQ_IDLE;
-}
-
static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
{
int result;
goto skipdata;
}
req->datalen = datalen - 8;
- req->reply_buf->type = NCP_REPLY;
- server->rcv.ptr = (unsigned char*)(req->reply_buf) + 2;
+ ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
+ server->rcv.ptr = server->rxbuf + 2;
server->rcv.len = datalen - 10;
server->rcv.state = 1;
break;
case 1:
req = server->rcv.creq;
if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
- if (req->reply_buf->sequence != server->sequence) {
+ if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
}
- if ((req->reply_buf->conn_low | (req->reply_buf->conn_high << 8)) != server->connection) {
+ if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
}
#ifdef CONFIG_NCPFS_PACKET_SIGNING
if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
- if (sign_verify_reply(server, (unsigned char*)(req->reply_buf) + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
+ if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
printk(KERN_ERR "ncpfs: tcp: Signature violation\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
}
}
#endif
- ncp_finish_request(req, req->datalen);
+ ncp_finish_request(server, req, req->datalen);
nextreq:;
__ncp_next_request(server);
case 2:
server->rcv.state = 0;
break;
case 3:
- ncp_finish_request(server->rcv.creq, -EIO);
+ ncp_finish_request(server, server->rcv.creq, -EIO);
goto nextreq;
case 5:
info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
}
static int do_ncp_rpc_call(struct ncp_server *server, int size,
- struct ncp_reply_header* reply_buf, int max_reply_size)
+ unsigned char* reply_buf, int max_reply_size)
{
int result;
- struct ncp_request_reply req;
-
- ncp_init_req(&req);
- req.reply_buf = reply_buf;
- req.datalen = max_reply_size;
- req.tx_iov[1].iov_base = server->packet;
- req.tx_iov[1].iov_len = size;
- req.tx_iovlen = 1;
- req.tx_totallen = size;
- req.tx_type = *(u_int16_t*)server->packet;
-
- result = ncp_add_request(server, &req);
- if (result < 0) {
- return result;
- }
- if (wait_event_interruptible(req.wq, req.status == RQ_DONE)) {
- ncp_abort_request(server, &req, -EIO);
+ struct ncp_request_reply *req;
+
+ req = ncp_alloc_req();
+ if (!req)
+ return -ENOMEM;
+
+ req->reply_buf = reply_buf;
+ req->datalen = max_reply_size;
+ req->tx_iov[1].iov_base = server->packet;
+ req->tx_iov[1].iov_len = size;
+ req->tx_iovlen = 1;
+ req->tx_totallen = size;
+ req->tx_type = *(u_int16_t*)server->packet;
+
+ result = ncp_add_request(server, req);
+ if (result < 0)
+ goto out;
+
+ if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
+ ncp_abort_request(server, req, -EINTR);
+ result = -EINTR;
+ goto out;
}
- return req.result;
+
+ result = req->result;
+
+out:
+ ncp_req_put(req);
+
+ return result;
}
/*
DDPRINTK("do_ncp_rpc_call returned %d\n", result);
- if (result < 0) {
- /* There was a problem with I/O, so the connections is
- * no longer usable. */
- ncp_invalidate_conn(server);
- }
return result;
}
}
if (res > 0)
return state;
- if (!err)
+ if (err)
/* The partition is unrecognized. So report I/O errors if there were any */
res = err;
if (!res)
new_parent_dentry = new_parent ?
new_parent->dentry : sysfs_mount->mnt_sb->s_root;
+ if (old_parent_dentry->d_inode == new_parent_dentry->d_inode)
+ return 0; /* nothing to move */
again:
mutex_lock(&old_parent_dentry->d_inode->i_mutex);
if (!mutex_trylock(&new_parent_dentry->d_inode->i_mutex)) {
mutex_lock_nested(&node->i_mutex, I_MUTEX_CHILD);
if (node->i_private) {
- list_for_each_entry(buf, &set->associates, associates) {
- down(&buf->sem);
+ list_for_each_entry(buf, &set->associates, associates)
buf->orphaned = 1;
- up(&buf->sem);
- }
}
mutex_unlock(&node->i_mutex);
}
.macro disable_fiq
.endm
+ .macro get_irqnr_preamble, base, tmp
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \base, =VA_IC_BASE
ldr \irqnr, [\base, #0x98] /* IRQ pending reg 1 */
#define virt_to_lbus(x) ((x) - PAGE_OFFSET + OMAP1510_LB_OFFSET)
#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET)
-#define is_lbus_device(dev) (cpu_is_omap1510() && dev && (strncmp(dev->bus_id, "ohci", 4) == 0))
+#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev->bus_id, "ohci", 4) == 0))
#define __arch_page_to_dma(dev, page) ({is_lbus_device(dev) ? \
(dma_addr_t)virt_to_lbus(page_address(page)) : \
int period; /* current transfer period */
int periods; /* current count of periods registerd in the DMA engine */
spinlock_t dma_lock; /* for locking in DMA operations */
- snd_pcm_substream_t *stream; /* the pcm stream */
+ struct snd_pcm_substream *stream; /* the pcm stream */
unsigned linked:1; /* dma channels linked */
int offset; /* store start position of the last period in the alsa buffer */
int (*hw_start)(void); /* interface to start HW interface, e.g. McBSP */
* Alsa card structure for aic23
*/
struct snd_card_omap_codec {
- snd_card_t *card;
- snd_pcm_t *pcm;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
long samplerate;
struct audio_stream s[2]; /* playback & capture */
};
struct omap_alsa_codec_config {
char *name;
struct omap_mcbsp_reg_cfg *mcbsp_regs_alsa;
- snd_pcm_hw_constraint_list_t *hw_constraints_rates;
- snd_pcm_hardware_t *snd_omap_alsa_playback;
- snd_pcm_hardware_t *snd_omap_alsa_capture;
+ struct snd_pcm_hw_constraint_list *hw_constraints_rates;
+ struct snd_pcm_hardware *snd_omap_alsa_playback;
+ struct snd_pcm_hardware *snd_omap_alsa_capture;
void (*codec_configure_dev)(void);
void (*codec_set_samplerate)(long);
void (*codec_clock_setup)(void);
+++ /dev/null
-#ifndef _GPIO_KEYS_H
-#define _GPIO_KEYS_H
-
-struct gpio_keys_button {
- /* Configuration parameters */
- int keycode;
- int gpio;
- int active_low;
- char *desc;
-};
-
-struct gpio_keys_platform_data {
- struct gpio_keys_button *buttons;
- int nbuttons;
-};
-
-#endif
: : "r" (0) : "memory")
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
-#define mb() dmb()
-#define rmb() mb()
-#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
-#define set_mb(var, value) do { var = value; mb(); } while (0)
+
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() dmb()
+#define smp_rmb() dmb()
+#define smp_wmb() dmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() read_barrier_depends()
+#endif /* CONFIG_SMP */
+
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
{
}
-#ifdef CONFIG_SMP
-
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-
-#else
-
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-
-#endif /* CONFIG_SMP */
-
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
include include/asm-generic/Kbuild.asm
-headers-y += cachectl.h
+header-y += cachectl.h
dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
}
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
+}
+
/**
* dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
#define swapper_pg_dir ((pgd_t *) NULL)
-#define pgtable_cache_init() do {} while(0)
+#define pgtable_cache_init() do {} while (0)
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_enter_lazy_cpu_mode() do {} while (0)
+#define arch_leave_lazy_cpu_mode() do {} while (0)
#else /* !CONFIG_MMU */
/*****************************************************************************/
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#include <linux/log2.h>
+#include <linux/compiler.h>
-/*
- * non-const pure 2^n version of get_order
- * - the arch may override these in asm/bitops.h if they can be implemented
- * more efficiently than using the arch log2 routines
- * - we use the non-const log2() instead if the arch has defined one suitable
- */
-#ifndef ARCH_HAS_GET_ORDER
-static inline __attribute__((const))
-int __get_order(unsigned long size, int page_shift)
+/* Pure 2^n version of get_order */
+static __inline__ __attribute_const__ int get_order(unsigned long size)
{
-#if BITS_PER_LONG == 32 && defined(ARCH_HAS_ILOG2_U32)
- int order = __ilog2_u32(size) - page_shift;
- return order >= 0 ? order : 0;
-#elif BITS_PER_LONG == 64 && defined(ARCH_HAS_ILOG2_U64)
- int order = __ilog2_u64(size) - page_shift;
- return order >= 0 ? order : 0;
-#else
int order;
- size = (size - 1) >> (page_shift - 1);
+ size = (size - 1) >> (PAGE_SHIFT - 1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
-#endif
}
-#endif
-
-/**
- * get_order - calculate log2(pages) to hold a block of the specified size
- * @n - size
- *
- * calculate allocation order based on the current page size
- * - this can be used to initialise global variables from constant data
- */
-#define get_order(n) \
-( \
- __builtin_constant_p(n) ? \
- ((n < (1UL << PAGE_SHIFT)) ? 0 : ilog2(n) - PAGE_SHIFT) : \
- __get_order(n, PAGE_SHIFT) \
- )
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
-#if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY)
-#define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul)
-
-#define ndelay(n) paravirt_ops.const_udelay((n) * 5ul)
-
-#else /* !PARAVIRT || USE_REAL_TIME_DELAY */
-
/* 0x10c7 is 2**32 / 1000000 (rounded up) */
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
-#endif
void use_tsc_delay(void);
#include <asm/types.h>
#include <asm/mpspec.h>
+#include <asm/apicdef.h>
/*
* Intel IO-APIC support for SMP and UP systems.
extern atomic_t nmi_active;
extern unsigned int nmi_watchdog;
-#define NMI_DEFAULT -1
+#define NMI_DEFAULT 0
#define NMI_NONE 0
#define NMI_IO_APIC 1
#define NMI_LOCAL_APIC 2
u64 (*read_tsc)(void);
u64 (*read_pmc)(void);
+ u64 (*get_scheduled_cycles)(void);
+ unsigned long (*get_cpu_khz)(void);
void (*load_tr_desc)(void);
void (*load_gdt)(const struct Xgt_desc_struct *);
void (*set_iopl_mask)(unsigned mask);
void (*io_delay)(void);
- void (*const_udelay)(unsigned long loops);
#ifdef CONFIG_X86_LOCAL_APIC
void (*apic_write)(unsigned long reg, unsigned long v);
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(u32 addr);
+ void (fastcall *map_pt_hook)(int type, pte_t *va, u32 pfn);
+
void (*alloc_pt)(u32 pfn);
void (*alloc_pd)(u32 pfn);
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
return paravirt_ops.set_wallclock(nowtime);
}
-static inline void do_time_init(void)
+static inline void (*choose_time_init(void))(void)
{
- return paravirt_ops.time_init();
+ return paravirt_ops.time_init;
}
/* The paravirtualized CPUID instruction. */
#define rdtscll(val) (val = paravirt_ops.read_tsc())
+#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
+#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
+
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
#define rdpmc(counter,low,high) do { \
#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
+#define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn)
+
#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn)
#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn)
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
+#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
#endif
/*
#endif
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_offset_map(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
+ paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
+ __ptep = __ptep + pte_index(address); \
+ __ptep; \
+})
+#define pte_offset_map_nested(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
+ paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
+ __ptep = __ptep + pte_index(address); \
+ __ptep; \
+})
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
#else
return retval;
}
+extern void (*late_time_init)(void);
+extern void hpet_time_init(void);
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
-extern unsigned long long native_sched_clock(void);
#else /* !CONFIG_PARAVIRT */
#define get_wallclock() native_get_wallclock()
#define set_wallclock(x) native_set_wallclock(x)
-#define do_time_init() time_init_hook()
+#define choose_time_init() hpet_time_init
#endif /* CONFIG_PARAVIRT */
#include <linux/pm.h>
#define TICK_SIZE (tick_nsec / 1000)
+
void setup_pit_timer(void);
+unsigned long long native_sched_clock(void);
+unsigned long native_calculate_cpu_khz(void);
+
/* Modifiers for buggy PIT handling */
extern int pit_latch_buggy;
extern int timer_ack;
extern int no_timer_check;
-extern unsigned long long (*custom_sched_clock)(void);
extern int no_sync_cmos_clock;
extern int recalibrate_cpu_khz(void);
+#ifndef CONFIG_PARAVIRT
+#define get_scheduled_cycles(val) rdtscll(val)
+#define calculate_cpu_khz() native_calculate_cpu_khz()
+#endif
+
#endif
.idle_idx = 1, \
.newidle_idx = 2, \
.wake_idx = 1, \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
-#include <asm-x86_64/tsc.h>
+/*
+ * linux/include/asm-i386/tsc.h
+ *
+ * i386 TSC related functions
+ */
+#ifndef _ASM_i386_TSC_H
+#define _ASM_i386_TSC_H
+
+#include <asm/processor.h>
+
+/*
+ * Standard way to access the cycle counter.
+ */
+typedef unsigned long long cycles_t;
+
+extern unsigned int cpu_khz;
+extern unsigned int tsc_khz;
+
+static inline cycles_t get_cycles(void)
+{
+ unsigned long long ret = 0;
+
+#ifndef CONFIG_X86_TSC
+ if (!cpu_has_tsc)
+ return 0;
+#endif
+
+#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
+ rdtscll(ret);
+#endif
+ return ret;
+}
+
+/* Like get_cycles, but make sure the CPU is synchronized. */
+static __always_inline cycles_t get_cycles_sync(void)
+{
+ unsigned long long ret;
+#ifdef X86_FEATURE_SYNC_RDTSC
+ unsigned eax;
+
+ /*
+ * Don't do an additional sync on CPUs where we know
+ * RDTSC is already synchronous:
+ */
+ alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
+ "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
+#else
+ sync_core();
+#endif
+ rdtscll(ret);
+
+ return ret;
+}
+
+extern void tsc_init(void);
+extern void mark_tsc_unstable(void);
+extern int unsynchronized_tsc(void);
+extern void init_tsc_clocksource(void);
+
+/*
+ * Boot-time check whether the TSCs are synchronized across
+ * all CPUs/cores:
+ */
+extern void check_tsc_sync_source(int cpu);
+extern void check_tsc_sync_target(void);
+
+#endif
#define VMI_CALL_SetInitialAPState 62
#define VMI_CALL_APICWrite 63
#define VMI_CALL_APICRead 64
+#define VMI_CALL_IODelay 65
#define VMI_CALL_SetLazyMode 73
/*
extern void __init vmi_time_init(void);
extern unsigned long vmi_get_wallclock(void);
extern int vmi_set_wallclock(unsigned long now);
-extern unsigned long long vmi_sched_clock(void);
+extern unsigned long long vmi_get_sched_cycles(void);
+extern unsigned long vmi_cpu_khz(void);
#ifdef CONFIG_X86_LOCAL_APIC
extern void __init vmi_timer_setup_boot_alarm(void);
#ifdef CONFIG_NO_IDLE_HZ
extern int vmi_stop_hz_timer(void);
extern void vmi_account_time_restart_hz_timer(void);
+#else
+static inline int vmi_stop_hz_timer(void)
+{
+ return 0;
+}
+static inline void vmi_account_time_restart_hz_timer(void)
+{
+}
#endif
/*
* - kernel code & data
* - crash dumping code reserved region
* - Kernel memory map built from EFI memory map
+ * - ELF core header
*
* More could be added if necessary
*/
-#define IA64_MAX_RSVD_REGIONS 7
+#define IA64_MAX_RSVD_REGIONS 8
struct rsvd_region {
unsigned long start; /* virtual address of beginning of element */
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
extern void efi_memmap_init(unsigned long *, unsigned long *);
+extern unsigned long vmcore_find_descriptor_size(unsigned long address);
+extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
+
/*
* For rounding an address to the next IA64_GRANULE_SIZE or order
*/
#define _ASM_IA64_RESOURCE_H
#include <asm/ustack.h>
-#define _STK_LIM_MAX DEFAULT_USER_STACK_SIZE
#include <asm-generic/resource.h>
#endif /* _ASM_IA64_RESOURCE_H */
+++ /dev/null
-#ifndef _ASM_SWIOTLB_H
-#define _ASM_SWIOTLB_H 1
-
-#include <asm/machvec.h>
-
-#define SWIOTLB_ARCH_NEED_LATE_INIT
-#define SWIOTLB_ARCH_NEED_ALLOC
-
-#endif /* _ASM_SWIOTLB_H */
.max_interval = 4, \
.busy_factor = 64, \
.imbalance_pct = 125, \
- .per_cpu_gain = 100, \
.cache_nice_tries = 2, \
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 0, /* unused */ \
.wake_idx = 1, \
.forkexec_idx = 1, \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
/* set Port AS pin for I2C or UART */
#define MCF5282_GPIO_PASPAR (volatile u16 *) (MCF_IPSBAR + 0x00100056)
+/* Port UA Pin Assignment Register (8 Bit) */
+#define MCF5282_GPIO_PUAPAR 0x10005C
+
/* Interrupt Mask Register Register Low */
#define MCF5282_INTC0_IMRL (volatile u32 *) (MCF_IPSBAR + 0x0C0C)
/* Interrupt Control Register 7 */
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned short bit = nr & SZLONG_MASK;
unsigned long temp;
if (cpu_has_llsc && R10000_LLSC_WAR) {
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ : "ir" (1UL << bit), "m" (*m));
#ifdef CONFIG_CPU_MIPSR2
- } else if (__builtin_constant_p(nr)) {
+ } else if (__builtin_constant_p(bit)) {
__asm__ __volatile__(
"1: " __LL "%0, %1 # set_bit \n"
" " __INS "%0, %4, %2, 1 \n"
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (nr & SZLONG_MASK), "m" (*m), "r" (~0));
+ : "ir" (bit), "m" (*m), "r" (~0));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (cpu_has_llsc) {
__asm__ __volatile__(
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ : "ir" (1UL << bit), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
*a |= mask;
local_irq_restore(flags);
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned short bit = nr & SZLONG_MASK;
unsigned long temp;
if (cpu_has_llsc && R10000_LLSC_WAR) {
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
+ : "ir" (~(1UL << bit)), "m" (*m));
#ifdef CONFIG_CPU_MIPSR2
- } else if (__builtin_constant_p(nr)) {
+ } else if (__builtin_constant_p(bit)) {
__asm__ __volatile__(
"1: " __LL "%0, %1 # clear_bit \n"
" " __INS "%0, $0, %2, 1 \n"
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (nr & SZLONG_MASK), "m" (*m));
+ : "ir" (bit), "m" (*m));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (cpu_has_llsc) {
__asm__ __volatile__(
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
+ : "ir" (~(1UL << bit)), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
*a &= ~mask;
local_irq_restore(flags);
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
+ unsigned short bit = nr & SZLONG_MASK;
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ : "ir" (1UL << bit), "m" (*m));
} else if (cpu_has_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ : "ir" (1UL << bit), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
*a ^= mask;
local_irq_restore(flags);
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
+ unsigned short bit = nr & SZLONG_MASK;
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
retval = (mask & *a) != 0;
*a |= mask;
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
+ unsigned short bit = nr & SZLONG_MASK;
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "ri" (nr & SZLONG_MASK), "m" (*m)
+ : "ri" (bit), "m" (*m)
: "memory");
return res;
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
retval = (mask & *a) != 0;
*a &= ~mask;
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
+ unsigned short bit = nr & SZLONG_MASK;
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
retval = (mask & *a) != 0;
*a ^= mask;
*
*/
-#ifndef GENERIC_ISA_DMA_SUPPORT_BROKEN
+#ifndef CONFIG_GENERIC_ISA_DMA_SUPPORT_BROKEN
#define MAX_DMA_CHANNELS 8
#endif
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
- */
-#ifndef _ASM_DS1742_H
-#define _ASM_DS1742_H
-
-#include <ds1742.h>
-
-#endif /* _ASM_DS1742_H */
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-/*
- * We to additionally limit parameters to a maximum 255 bytes.
- */
-#define _IOC_SLMASK 0xff
-
/*
* Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
* And this turns out useful to catch old ioctl numbers in header
#define jmr3927_have_nvram() \
((jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR) & JMR3927_IDT_MASK) == JMR3927_IOC_IDT)
-/* NVRAM macro */
-#define jmr3927_nvram_in(ofs) \
- jmr3927_ioc_reg_in(JMR3927_IOC_NVRAMB_ADDR + ((ofs) << 1))
-#define jmr3927_nvram_out(d, ofs) \
- jmr3927_ioc_reg_out(d, JMR3927_IOC_NVRAMB_ADDR + ((ofs) << 1))
-
/* LED macro */
#define jmr3927_led_set(n/*0-16*/) jmr3927_ioc_reg_out(~(n), JMR3927_IOC_LED_ADDR)
#define jmr3927_io_led_set(n/*0-3*/) jmr3927_isac_reg_out((n), JMR3927_ISAC_LED_ADDR)
__delay(ns / lasat_ndelay_divider);
}
-extern void (* prom_printf)(const char *fmt, ...);
-
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
#define LASAT_SERVICEMODE_MAGIC_1 0xdeadbeef
#define RTC_ALWAYS_BCD 0
-#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1970)
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
#endif /* __ASM_MACH_ATLAS_MC146818RTC_H */
struct device;
-static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
+static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
+ size_t size)
{
return virt_to_phys(addr);
}
-static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
{
return page_to_phys(page);
}
-static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
+static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
{
return dma_addr;
}
-static void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
{
}
#define RTC_ALWAYS_BCD 1
#ifndef mc146818_decode_year
-#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1970)
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
#endif
#endif /* __ASM_MACH_GENERIC_MC146818RTC_H */
.busy_factor = 32, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_WAKE_BALANCE, \
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003, 06 by Ralf Baechle
- */
-#ifndef __ASM_MACH_JMR3927_DS1742_H
-#define __ASM_MACH_JMR3927_DS1742_H
-
-#include <asm/jmr3927/jmr3927.h>
-
-#define rtc_read(reg) (jmr3927_nvram_in(reg))
-#define rtc_write(data, reg) (jmr3927_nvram_out((data),(reg)))
-
-#endif /* __ASM_MACH_JMR3927_DS1742_H */
--- /dev/null
+#ifndef __ASM_MACH_JMR3927_MANGLE_PORT_H
+#define __ASM_MACH_JMR3927_MANGLE_PORT_H
+
+extern unsigned long __swizzle_addr_b(unsigned long port);
+#define __swizzle_addr_w(port) (port)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+#define ioswabb(a,x) (x)
+#define __mem_ioswabb(a,x) (x)
+#define ioswabw(a,x) le16_to_cpu(x)
+#define __mem_ioswabw(a,x) (x)
+#define ioswabl(a,x) le32_to_cpu(x)
+#define __mem_ioswabl(a,x) (x)
+#define ioswabq(a,x) le64_to_cpu(x)
+#define __mem_ioswabq(a,x) (x)
+
+#endif /* __ASM_MACH_JMR3927_MANGLE_PORT_H */
#define RTC_ALWAYS_BCD 0
-#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1970)
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
#endif /* __ASM_MACH_MALTA_MC146818RTC_H */
*
* RTC routines for PC style attached Dallas chip with ARC epoch.
*/
-#ifndef __ASM_MACH_RM200_MC146818RTC_H
-#define __ASM_MACH_RM200_MC146818RTC_H
+#ifndef __ASM_MACH_RM_MC146818RTC_H
+#define __ASM_MACH_RM_MC146818RTC_H
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
+#else
#define mc146818_decode_year(year) ((year) + 1980)
+#endif
#include_next <mc146818rtc.h>
-#endif /* __ASM_MACH_RM200_MC146818RTC_H */
+#endif /* __ASM_MACH_RM_MC146818RTC_H */
extern char *prom_getcmdline(void);
extern char *prom_getenv(char *name);
-extern void setup_prom_printf(int tty_no);
-extern void prom_printf(char *fmt, ...);
extern void prom_init_cmdline(void);
extern void prom_meminit(void);
extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
#ifndef __ASM_MIPS_MT_H
#define __ASM_MIPS_MT_H
+#include <linux/cpumask.h>
+
extern cpumask_t mt_fpu_cpumask;
extern unsigned long mt_fpemul_threshold;
extern void prom_putchar(char c);
extern char prom_getchar(void);
-/* Generic printf() using ARCS console I/O. */
-extern void prom_printf(char *fmt, ...);
-
/* Memory descriptor management. */
#define PROM_MAX_PMEMBLOCKS 32
struct prom_pmemblock {
extern void bcm1480_unmask_irq(int cpu, int irq);
extern void bcm1480_smp_finish(void);
-extern void prom_printf(char *fmt, ...);
-
#define AT_spin \
__asm__ __volatile__ ( \
".set noat\n" \
extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
+struct mm_struct;
+struct task_struct;
+
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
void smtc_flush_tlb_asid(unsigned long asid);
#ifndef __ASM_SMTC_IPI_H
#define __ASM_SMTC_IPI_H
+#include <linux/spinlock.h>
+
//#define SMTC_IPI_DEBUG
#ifdef SMTC_IPI_DEBUG
#define A20R_PT_TIM0_ACK 0xbc050000
#define A20R_PT_TIM1_ACK 0xbc060000
-#define SNI_MIPS_IRQ_CPU_BASE 16
-#define SNI_MIPS_IRQ_CPU_TIMER (SNI_MIPS_IRQ_CPU_BASE+7)
+#define SNI_MIPS_IRQ_CPU_TIMER (MIPS_CPU_IRQ_BASE+7)
-#define SNI_A20R_IRQ_BASE SNI_MIPS_IRQ_CPU_BASE
+#define SNI_A20R_IRQ_BASE MIPS_CPU_IRQ_BASE
#define SNI_A20R_IRQ_TIMER (SNI_A20R_IRQ_BASE+5)
#define SNI_DS1216_A20R_BASE 0xbc081ffc
#define SNI_PCIT_INT_START 24
#define SNI_PCIT_INT_END 30
-#define PCIT_IRQ_ETHERNET (SNI_MIPS_IRQ_CPU_BASE + 5)
+#define PCIT_IRQ_ETHERNET (MIPS_CPU_IRQ_BASE + 5)
#define PCIT_IRQ_INTA (SNI_PCIT_INT_START + 0)
#define PCIT_IRQ_INTB (SNI_PCIT_INT_START + 1)
#define PCIT_IRQ_INTC (SNI_PCIT_INT_START + 2)
#define PCIMT_IRQ_EISA 29
#define PCIMT_IRQ_SCSI 30
-#define PCIMT_IRQ_ETHERNET (SNI_MIPS_IRQ_CPU_BASE+6)
+#define PCIMT_IRQ_ETHERNET (MIPS_CPU_IRQ_BASE+6)
#if 0
#define PCIMT_IRQ_TEMPERATURE 24
" .set noreorder # __raw_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
- " bnez %1, 2f \n"
+ " bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" .set reorder \n"
" .set noreorder # __raw_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
- " bnez %1, 2f \n"
+ " bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
__cu_len; \
})
+extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
+
#define __copy_to_user_inatomic(to,from,n) \
({ \
void __user *__cu_to; \
#define __NR_kexec_load (__NR_Linux + 311)
#define __NR_getcpu (__NR_Linux + 312)
#define __NR_epoll_pwait (__NR_Linux + 313)
+#define __NR_ioprio_set (__NR_Linux + 314)
+#define __NR_ioprio_get (__NR_Linux + 315)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 313
+#define __NR_Linux_syscalls 315
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 313
+#define __NR_O32_Linux_syscalls 315
#if _MIPS_SIM == _MIPS_SIM_ABI64
#define __NR_kexec_load (__NR_Linux + 270)
#define __NR_getcpu (__NR_Linux + 271)
#define __NR_epoll_pwait (__NR_Linux + 272)
+#define __NR_ioprio_set (__NR_Linux + 273)
+#define __NR_ioprio_get (__NR_Linux + 274)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 272
+#define __NR_Linux_syscalls 274
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 272
+#define __NR_64_Linux_syscalls 274
#if _MIPS_SIM == _MIPS_SIM_NABI32
#define __NR_kexec_load (__NR_Linux + 274)
#define __NR_getcpu (__NR_Linux + 275)
#define __NR_epoll_pwait (__NR_Linux + 276)
+#define __NR_ioprio_set (__NR_Linux + 277)
+#define __NR_ioprio_get (__NR_Linux + 278)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 276
+#define __NR_Linux_syscalls 278
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 276
+#define __NR_N32_Linux_syscalls 278
#ifdef __KERNEL__
.busy_factor = 32, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
.busy_idx = 3, \
.idle_idx = 1, \
.newidle_idx = 2, \
* void check_bugs(void);
*/
-static void __init check_bugs(void)
+static inline void check_bugs(void)
{
/* s390 has no bugs ... */
}
extern u32 ipl_flags;
extern u16 ipl_devno;
+extern u32 dump_prefix_page;
extern void do_reipl(void);
extern void ipl_save_parameters(void);
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+#ifndef __ASSEMBLY__
struct cache_info {
unsigned int ways; /* Number of cache ways */
unsigned int sets; /* Number of cache sets */
unsigned long flags;
};
-
+#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */
#define HAVE_ARCH_UNMAPPED_AREA
-/* Page flag for lazy dcache write-back for the aliasing UP caches */
-#define PG_dcache_dirty PG_arch_1
-
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
/* 32KB cache, 4kb PAGE sizes need to check bit 12 */
#define CACHE_ALIAS 0x00001000
+#define PG_mapped PG_arch_1
+
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/* Initialization of P3 area for copy_user_page */
void p3_cache_init(void);
+#define PG_mapped PG_arch_1
+
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
extern unsigned int kobjsize(const void *objp);
#endif /* !CONFIG_MMU */
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+#endif
+
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
+#define TIF_SINGLESTEP 5 /* singlestepping active */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1<<TIF_FREEZE)
return (dma_addr == PCI_DMA_ERROR_CODE);
}
+struct device_node;
+extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
+
#endif /* __KERNEL__ */
/* generic pci stuff */
extern int of_device_is_compatible(struct device_node *device, const char *);
extern void *of_get_property(struct device_node *node, const char *name,
int *lenp);
+#define get_property(node,name,lenp) of_get_property(node,name,lenp)
extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
extern int of_getintprop_default(struct device_node *np,
const char *name,
#include <asm/delay.h>
#include <asm/oplib.h>
-extern spinlock_t dma_spin_lock;
-
-#define claim_dma_lock() \
-({ unsigned long flags; \
- spin_lock_irqsave(&dma_spin_lock, flags); \
- flags; \
-})
-
-#define release_dma_lock(__flags) \
- spin_unlock_irqrestore(&dma_spin_lock, __flags);
-
/* These are irrelevant for Sparc DMA, but we leave it in so that
* things can compile.
*/
#define for_each_dvma(dma) \
for((dma) = dma_chain; (dma); (dma) = (dma)->next)
-extern int get_dma_list(char *);
-extern int request_dma(unsigned int, __const__ char *);
-extern void free_dma(unsigned int);
-
/* From PCI */
#ifdef CONFIG_PCI
#define EXTRA_FLOPPY_PARAMS
+static DEFINE_SPINLOCK(dma_spin_lock);
+
+#define claim_dma_lock() \
+({ unsigned long flags; \
+ spin_lock_irqsave(&dma_spin_lock, flags); \
+ flags; \
+})
+
+#define release_dma_lock(__flags) \
+ spin_unlock_irqrestore(&dma_spin_lock, __flags);
+
#endif /* !(__ASM_SPARC64_FLOPPY_H) */
*/
#define HAS_DMA
+static DEFINE_SPINLOCK(dma_spin_lock);
+
+#define claim_dma_lock() \
+({ unsigned long flags; \
+ spin_lock_irqsave(&dma_spin_lock, flags); \
+ flags; \
+})
+
+#define release_dma_lock(__flags) \
+ spin_unlock_irqrestore(&dma_spin_lock, __flags);
+
static struct sparc_ebus_info {
struct ebus_dma_info info;
unsigned int addr;
unsigned int count;
+ int lock;
} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
+static __inline__ int request_dma(unsigned int dmanr, const char *device_id)
+{
+ if (dmanr >= PARPORT_PC_MAX_PORTS)
+ return -EINVAL;
+ if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
+ return -EBUSY;
+ return 0;
+}
+
+static __inline__ void free_dma(unsigned int dmanr)
+{
+ if (dmanr >= PARPORT_PC_MAX_PORTS) {
+ printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
+ return;
+ }
+ if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
+ printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
+ return;
+ }
+}
+
static __inline__ void enable_dma(unsigned int dmanr)
{
ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
return PCI_IRQ_NONE;
}
+struct device_node;
+extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
+
#endif /* __KERNEL__ */
#endif /* __SPARC64_PCI_H */
extern int of_device_is_compatible(struct device_node *device, const char *);
extern void *of_get_property(struct device_node *node, const char *name,
int *lenp);
+#define get_property(node,name,lenp) of_get_property(node,name,lenp)
extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
extern int of_getintprop_default(struct device_node *np,
const char *name,
static inline pte_t pte_mkread(pte_t pte)
{
- pte_set_bits(pte, _PAGE_RW);
+ pte_set_bits(pte, _PAGE_USER);
return(pte_mknewprot(pte));
}
#include <asm/types.h>
#include <asm/mpspec.h>
+#include <asm/apicdef.h>
/*
* Intel IO-APIC support for SMP and UP systems.
extern atomic_t nmi_active;
extern unsigned int nmi_watchdog;
-#define NMI_DEFAULT -1
+#define NMI_DEFAULT 0
#define NMI_NONE 0
#define NMI_IO_APIC 1
#define NMI_LOCAL_APIC 2
extern int swiotlb_force;
#ifdef CONFIG_SWIOTLB
-#define SWIOTLB_ARCH_NEED_ALLOC
extern int swiotlb;
#else
#define swiotlb 0
.newidle_idx = 0, \
.wake_idx = 1, \
.forkexec_idx = 1, \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_FORK \
| SD_BALANCE_EXEC \
-/*
- * linux/include/asm-x86_64/tsc.h
- *
- * x86_64 TSC related functions
- */
-#ifndef _ASM_x86_64_TSC_H
-#define _ASM_x86_64_TSC_H
-
-#include <asm/processor.h>
-
-/*
- * Standard way to access the cycle counter.
- */
-typedef unsigned long long cycles_t;
-
-extern unsigned int cpu_khz;
-extern unsigned int tsc_khz;
-
-static inline cycles_t get_cycles(void)
-{
- unsigned long long ret = 0;
-
-#ifndef CONFIG_X86_TSC
- if (!cpu_has_tsc)
- return 0;
-#endif
-
-#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
- rdtscll(ret);
-#endif
- return ret;
-}
-
-/* Like get_cycles, but make sure the CPU is synchronized. */
-static __always_inline cycles_t get_cycles_sync(void)
-{
- unsigned long long ret;
-#ifdef X86_FEATURE_SYNC_RDTSC
- unsigned eax;
-
- /*
- * Don't do an additional sync on CPUs where we know
- * RDTSC is already synchronous:
- */
- alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
- "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
-#else
- sync_core();
-#endif
- rdtscll(ret);
-
- return ret;
-}
-
-extern void tsc_init(void);
-extern void mark_tsc_unstable(void);
-extern int unsynchronized_tsc(void);
-
-/*
- * Boot-time check whether the TSCs are synchronized across
- * all CPUs/cores:
- */
-extern void check_tsc_sync_source(int cpu);
-extern void check_tsc_sync_target(void);
-
-#endif
+#include <asm-i386/tsc.h>
};
#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0)
-#define ata_id_is_sata(id) ((id)[93] == 0)
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
#define ata_id_hpa_enabled(id) ((id)[85] & (1 << 10))
return mver;
}
+static inline int ata_id_is_sata(const u16 *id)
+{
+ return ata_id_major_version(id) >= 5 && id[93] == 0;
+}
+
static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
static inline int ata_drive_40wire(const u16 *dev_id)
{
- if (ata_id_major_version(dev_id) >= 5 && ata_id_is_sata(dev_id))
+ if (ata_id_is_sata(dev_id))
return 0; /* SATA */
if ((dev_id[93] & 0xE000) == 0x6000)
return 0; /* 80 wire */
#ifndef _LINUX_AUDIT_H_
#define _LINUX_AUDIT_H_
+#include <linux/types.h>
#include <linux/elf-em.h>
/* The netlink messages for the audit system is divided into blocks:
compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
const compat_ulong_t __user *new_nodes);
+/*
+ * epoll (fs/eventpoll.c) compat bits follow ...
+ */
+#ifndef CONFIG_HAS_COMPAT_EPOLL_EVENT
+struct epoll_event;
+#define compat_epoll_event epoll_event
+#else
+asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd,
+ struct compat_epoll_event __user *event);
+asmlinkage long compat_sys_epoll_wait(int epfd,
+ struct compat_epoll_event __user *events,
+ int maxevents, int timeout);
+#endif
+asmlinkage long compat_sys_epoll_pwait(int epfd,
+ struct compat_epoll_event __user *events,
+ int maxevents, int timeout,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
* mismatches */
-int cpufreq_register_driver(const struct cpufreq_driver *driver_data);
-int cpufreq_unregister_driver(const struct cpufreq_driver *driver_data);
+int cpufreq_register_driver(struct cpufreq_driver *driver_data);
+int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state);
+++ /dev/null
-/*
- * ds1742rtc.h - register definitions for the Real-Time-Clock / CMOS RAM
- *
- * Copyright (C) 1999-2001 Toshiba Corporation
- * Copyright (C) 2003 Ralf Baechle (ralf@linux-mips.org)
- *
- * Permission is hereby granted to copy, modify and redistribute this code
- * in terms of the GNU Library General Public License, Version 2 or later,
- * at your option.
- */
-#ifndef __LINUX_DS1742RTC_H
-#define __LINUX_DS1742RTC_H
-
-#include <asm/ds1742.h>
-
-#define RTC_BRAM_SIZE 0x800
-#define RTC_OFFSET 0x7f8
-
-/*
- * Register summary
- */
-#define RTC_CONTROL (RTC_OFFSET + 0)
-#define RTC_CENTURY (RTC_OFFSET + 0)
-#define RTC_SECONDS (RTC_OFFSET + 1)
-#define RTC_MINUTES (RTC_OFFSET + 2)
-#define RTC_HOURS (RTC_OFFSET + 3)
-#define RTC_DAY (RTC_OFFSET + 4)
-#define RTC_DATE (RTC_OFFSET + 5)
-#define RTC_MONTH (RTC_OFFSET + 6)
-#define RTC_YEAR (RTC_OFFSET + 7)
-
-#define RTC_CENTURY_MASK 0x3f
-#define RTC_SECONDS_MASK 0x7f
-#define RTC_DAY_MASK 0x07
-
-/*
- * Bits in the Control/Century register
- */
-#define RTC_WRITE 0x80
-#define RTC_READ 0x40
-
-/*
- * Bits in the Seconds register
- */
-#define RTC_STOP 0x80
-
-/*
- * Bits in the Day register
- */
-#define RTC_BATT_FLAG 0x80
-#define RTC_FREQ_TEST 0x40
-
-#endif /* __LINUX_DS1742RTC_H */
--- /dev/null
+#ifndef _GPIO_KEYS_H
+#define _GPIO_KEYS_H
+
+struct gpio_keys_button {
+ /* Configuration parameters */
+ int keycode;
+ int gpio;
+ int active_low;
+ char *desc;
+};
+
+struct gpio_keys_platform_data {
+ struct gpio_keys_button *buttons;
+ int nbuttons;
+};
+
+#endif
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2001 Vojtech Pavlik
- * Copyright (c) 2006 Jiri Kosina
+ * Copyright (c) 2006-2007 Jiri Kosina
*/
/*
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00020000
#define HID_QUIRK_IGNORE_MOUSE 0x00040000
#define HID_QUIRK_SONY_PS3_CONTROLLER 0x00080000
+#define HID_QUIRK_LOGITECH_S510_DESCRIPTOR 0x00100000
+#define HID_QUIRK_DUPLICATE_USAGES 0x00200000
/*
* This is the global environment of the parser. This information is
*/
#define HID_MAX_DESCRIPTOR_SIZE 4096
-#define HID_MAX_USAGES 1024
+#define HID_MAX_USAGES 8192
#define HID_DEFAULT_NUM_COLLECTIONS 16
struct hid_local {
* HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
* HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
* does not restart the timer
- * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in softirq context
+ * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context
* Special mode for tick emultation
*/
enum hrtimer_cb_mode {
};
/**
- * struct hrtimer_base - the timer base for a specific clock
+ * struct hrtimer_clock_base - the timer base for a specific clock
+ * @cpu_base: per cpu clock base
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @active: red black tree root node for the active timers
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
+#include <linux/shm.h>
#include <asm/tlbflush.h>
struct ctl_table;
static inline int is_file_hugepages(struct file *file)
{
- return file->f_op == &hugetlbfs_file_operations;
+ if (file->f_op == &hugetlbfs_file_operations)
+ return 1;
+ if (is_file_shm_hugepages(file))
+ return 1;
+
+ return 0;
}
static inline void set_file_hugepages(struct file *file)
typedef struct ide_pio_timings_s {
int setup_time; /* Address setup (ns) minimum */
int active_time; /* Active pulse (ns) minimum */
- int cycle_time; /* Cycle time (ns) minimum = (setup + active + recovery) */
+ int cycle_time; /* Cycle time (ns) minimum = */
+ /* active + recovery (+ setup for some chips) */
} ide_pio_timings_t;
typedef struct ide_pio_data_s {
#ifdef __KERNEL__
struct pppoe_opt {
struct net_device *dev; /* device associated with socket*/
+ int ifindex; /* ifindex of device associated with socket */
struct pppoe_addr pa; /* what this socket is bound to*/
struct sockaddr_pppox relay; /* what socket data will be
relayed to (PPPoE relaying) */
unsigned short num;
};
#define pppoe_dev proto.pppoe.dev
+#define pppoe_ifindex proto.pppoe.ifindex
#define pppoe_pa proto.pppoe.pa
#define pppoe_relay proto.pppoe.relay
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory.
*/
-#define VLAN_GROUP_ARRAY_LEN 4096
+#define VLAN_GROUP_ARRAY_LEN 4096
+#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
+#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS)
struct vlan_group {
int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */
struct hlist_node hlist; /* linked list */
- struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN];
+ struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
struct rcu_head rcu;
};
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id)
+{
+ struct net_device **array;
+ array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN];
+}
+
+static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
+ struct net_device *dev)
+{
+ struct net_device **array;
+ if (!vg)
+ return;
+ array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
+}
+
struct vlan_priority_tci_mapping {
unsigned long priority;
unsigned short vlan_qos; /* This should be shifted when first set, so we only do it
return NET_RX_DROP;
}
- skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
+ skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK);
if (skb->dev == NULL) {
dev_kfree_skb_any(skb);
extern void ip_mc_down(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+extern void ip_mc_rejoin_group(struct ip_mc_list *im);
+
#endif
#endif
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/errno.h>
#include <asm/irq.h>
#include <asm/ptrace.h>
/*
* Debugging / Tracing functions
*/
+
char *capi_cmd2str(__u8 cmd, __u8 subcmd);
-char *capi_cmsg2str(_cmsg * cmsg);
-char *capi_message2str(__u8 * msg);
+
+typedef struct {
+ u_char *buf;
+ u_char *p;
+ size_t size;
+ size_t pos;
+} _cdebbuf;
+
+#define CDEBUG_SIZE 1024
+#define CDEBUG_GSIZE 4096
+
+_cdebbuf *cdebbuf_alloc(void);
+void cdebbuf_free(_cdebbuf *cdb);
+int cdebug_init(void);
+void cdebug_exit(void);
+
+_cdebbuf *capi_cmsg2str(_cmsg *cmsg);
+_cdebbuf *capi_message2str(__u8 *msg);
/*-----------------------------------------------------------------------*/
#include <asm/types.h>
#include <linux/ioctl.h>
-#define KVM_API_VERSION 3
+#define KVM_API_VERSION 4
/*
* Architectural interrupt line count, and the size of the bitmap needed
/* for KVM_RUN */
struct kvm_run {
/* in */
- __u32 vcpu;
__u32 emulated; /* skip current instruction */
__u32 mmio_completed; /* mmio request completed */
__u8 request_interrupt_window;
- __u8 padding1[3];
+ __u8 padding1[7];
/* out */
__u32 exit_type;
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
- /* in */
- __u32 vcpu;
- __u32 padding;
-
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
__u64 rax, rbx, rcx, rdx;
__u64 rsi, rdi, rsp, rbp;
/* for KVM_GET_SREGS and KVM_SET_SREGS */
struct kvm_sregs {
- /* in */
- __u32 vcpu;
- __u32 padding;
-
/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
struct kvm_segment cs, ds, es, fs, gs, ss;
struct kvm_segment tr, ldt;
/* for KVM_GET_MSRS and KVM_SET_MSRS */
struct kvm_msrs {
- __u32 vcpu;
__u32 nmsrs; /* number of msrs in entries */
+ __u32 pad;
struct kvm_msr_entry entries[0];
};
struct kvm_translation {
/* in */
__u64 linear_address;
- __u32 vcpu;
- __u32 padding;
/* out */
__u64 physical_address;
/* for KVM_INTERRUPT */
struct kvm_interrupt {
/* in */
- __u32 vcpu;
__u32 irq;
};
/* for KVM_DEBUG_GUEST */
struct kvm_debug_guest {
/* int */
- __u32 vcpu;
__u32 enabled;
+ __u32 pad;
struct kvm_breakpoint breakpoints[4];
__u32 singlestep;
};
#define KVMIO 0xAE
+/*
+ * ioctls for /dev/kvm fds:
+ */
#define KVM_GET_API_VERSION _IO(KVMIO, 1)
+#define KVM_CREATE_VM _IO(KVMIO, 2) /* returns a VM fd */
+#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 15, struct kvm_msr_list)
+
+/*
+ * ioctls for VM fds
+ */
+#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region)
+/*
+ * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
+ * a vcpu fd.
+ */
+#define KVM_CREATE_VCPU _IOW(KVMIO, 11, int)
+#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log)
+
+/*
+ * ioctls for vcpu fds
+ */
#define KVM_RUN _IOWR(KVMIO, 2, struct kvm_run)
-#define KVM_GET_REGS _IOWR(KVMIO, 3, struct kvm_regs)
+#define KVM_GET_REGS _IOR(KVMIO, 3, struct kvm_regs)
#define KVM_SET_REGS _IOW(KVMIO, 4, struct kvm_regs)
-#define KVM_GET_SREGS _IOWR(KVMIO, 5, struct kvm_sregs)
+#define KVM_GET_SREGS _IOR(KVMIO, 5, struct kvm_sregs)
#define KVM_SET_SREGS _IOW(KVMIO, 6, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 7, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 8, struct kvm_interrupt)
#define KVM_DEBUG_GUEST _IOW(KVMIO, 9, struct kvm_debug_guest)
-#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region)
-#define KVM_CREATE_VCPU _IOW(KVMIO, 11, int /* vcpu_slot */)
-#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log)
#define KVM_GET_MSRS _IOWR(KVMIO, 13, struct kvm_msrs)
-#define KVM_SET_MSRS _IOWR(KVMIO, 14, struct kvm_msrs)
-#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 15, struct kvm_msr_list)
+#define KVM_SET_MSRS _IOW(KVMIO, 14, struct kvm_msrs)
#endif
--- /dev/null
+#ifndef __LINUX_KVM_PARA_H
+#define __LINUX_KVM_PARA_H
+
+/*
+ * Guest OS interface for KVM paravirtualization
+ *
+ * Note: this interface is totally experimental, and is certain to change
+ * as we make progress.
+ */
+
+/*
+ * Per-VCPU descriptor area shared between guest and host. Writable to
+ * both guest and host. Registered with the host by the guest when
+ * a guest acknowledges paravirtual mode.
+ *
+ * NOTE: all addresses are guest-physical addresses (gpa), to make it
+ * easier for the hypervisor to map between the various addresses.
+ */
+struct kvm_vcpu_para_state {
+ /*
+ * API version information for compatibility. If there's any support
+ * mismatch (too old host trying to execute too new guest) then
+ * the host will deny entry into paravirtual mode. Any other
+ * combination (new host + old guest and new host + new guest)
+ * is supposed to work - new host versions will support all old
+ * guest API versions.
+ */
+ u32 guest_version;
+ u32 host_version;
+ u32 size;
+ u32 ret;
+
+ /*
+ * The address of the vm exit instruction (VMCALL or VMMCALL),
+ * which the host will patch according to the CPU model the
+ * VM runs on:
+ */
+ u64 hypercall_gpa;
+
+} __attribute__ ((aligned(PAGE_SIZE)));
+
+#define KVM_PARA_API_VERSION 1
+
+/*
+ * This is used for an RDMSR's ECX parameter to probe for a KVM host.
+ * Hopefully no CPU vendor will use up this number. This is placed well
+ * out of way of the typical space occupied by CPU vendors' MSR indices,
+ * and we think (or at least hope) it wont be occupied in the future
+ * either.
+ */
+#define MSR_KVM_API_MAGIC 0x87655678
+
+#define KVM_EINVAL 1
+
+/*
+ * Hypercall calling convention:
+ *
+ * Each hypercall may have 0-6 parameters.
+ *
+ * 64-bit hypercall index is in RAX, goes from 0 to __NR_hypercalls-1
+ *
+ * 64-bit parameters 1-6 are in the standard gcc x86_64 calling convention
+ * order: RDI, RSI, RDX, RCX, R8, R9.
+ *
+ * 32-bit index is EBX, parameters are: EAX, ECX, EDX, ESI, EDI, EBP.
+ * (the first 3 are according to the gcc regparm calling convention)
+ *
+ * No registers are clobbered by the hypercall, except that the
+ * return value is in RAX.
+ */
+#define __NR_hypercalls 0
+
+#endif
void *private_data;
const struct ata_port_operations *ops;
unsigned long flags;
- int simplex_claimed; /* Keep seperate in case we
- ever need to do this locked */
+ struct ata_port *simplex_claimed; /* channel owning the DMA */
struct ata_port *ports[0];
};
/* error history */
struct ata_ering ering;
+ int spdn_cnt;
unsigned int horkage; /* List of broken features */
#ifdef CONFIG_SATA_ACPI
/* ACPI objects info */
spinlock_t *lock;
unsigned long flags; /* ATA_FLAG_xxx */
unsigned int pflags; /* ATA_PFLAG_xxx */
- unsigned int id; /* unique id req'd by scsi midlyr */
- unsigned int port_no; /* unique port #; from zero */
+ unsigned int print_id; /* user visible unique port ID */
+ unsigned int port_no; /* 0 based port no. inside the host */
struct ata_prd *prd; /* our SG list */
dma_addr_t prd_dma; /* and its DMA mapping */
extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
unsigned int n_ports);
extern void ata_pci_remove_one (struct pci_dev *pdev);
+#ifdef CONFIG_PM
extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
extern int ata_pci_device_resume(struct pci_dev *pdev);
+#endif
extern int ata_pci_clear_simplex(struct pci_dev *pdev);
#endif /* CONFIG_PCI */
extern int ata_device_add(const struct ata_probe_ent *ent);
extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
extern int ata_port_online(struct ata_port *ap);
extern int ata_port_offline(struct ata_port *ap);
+#ifdef CONFIG_PM
extern int ata_scsi_device_resume(struct scsi_device *);
extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg);
extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
+#endif
extern int ata_ratelimit(void);
extern int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat, unsigned long timeout);
extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
unsigned long interval_msec,
unsigned long timeout_msec);
+extern unsigned int ata_dev_try_classify(struct ata_port *, unsigned int, u8 *);
/*
* Default driver ops implementations
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern void ata_dev_disable(struct ata_device *adev);
extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
+extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown);
extern unsigned long ata_device_blacklisted(const struct ata_device *dev);
extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
extern void ata_bmdma_start (struct ata_queued_cmd *qc);
* printk helpers
*/
#define ata_port_printk(ap, lv, fmt, args...) \
- printk(lv"ata%u: "fmt, (ap)->id , ##args)
+ printk(lv"ata%u: "fmt, (ap)->print_id , ##args)
#define ata_dev_printk(dev, lv, fmt, args...) \
- printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
+ printk(lv"ata%u.%02u: "fmt, (dev)->ap->print_id, (dev)->devno , ##args)
/*
* ata_eh_info helpers
return ap->ops->check_status(ap);
}
+/**
+ * ata_ncq_enabled - Test whether NCQ is enabled
+ * @dev: ATA device to test for
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * 1 if NCQ is enabled for @dev, 0 otherwise.
+ */
+static inline int ata_ncq_enabled(struct ata_device *dev)
+{
+ return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
+ ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+}
/**
* ata_pause - Flush writes and pause 400 nanoseconds.
#define HPFS_SUPER_MAGIC 0xf995e849
#define ISOFS_SUPER_MAGIC 0x9660
#define JFFS2_SUPER_MAGIC 0x72b6
+#define KVMFS_SUPER_MAGIC 0x19700426
#define MINIX_SUPER_MAGIC 0x137F /* original minix fs */
#define MINIX_SUPER_MAGIC2 0x138F /* minix fs, 30 char names */
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+/* Check if a vma is migratable */
+static inline int vma_migratable(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
+ return 0;
+ return 1;
+}
+
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
extern void show_free_areas(void);
#ifdef CONFIG_SHMEM
-struct page *shmem_nopage(struct vm_area_struct *vma,
- unsigned long address, int *type);
int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr);
int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
-#define shmem_nopage filemap_nopage
-
static inline int shmem_lock(struct file *file, int lock,
struct user_struct *user)
{
}
#endif
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
-extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
int shmem_zero_setup(struct vm_area_struct *);
#define MMC_BUS_WIDTH_1 0
#define MMC_BUS_WIDTH_4 2
+
+ unsigned char timing; /* timing specification used */
+
+#define MMC_TIMING_LEGACY 0
+#define MMC_TIMING_MMC_HS 1
+#define MMC_TIMING_SD_HS 2
};
struct mmc_host_ops {
#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
#define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */
#define MMC_CAP_BYTEBLOCK (1 << 2) /* Can do non-log2 block sizes */
+#define MMC_CAP_MMC_HIGHSPEED (1 << 3) /* Can do MMC high-speed timing */
+#define MMC_CAP_SD_HIGHSPEED (1 << 4) /* Can do SD high-speed timing */
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
#define MV643XX_ETH_NAME "mv643xx_eth"
struct mv643xx_eth_platform_data {
- char *mac_addr; /* pointer to mac address */
+ int port_number;
u16 force_phy_addr; /* force override if phy_addr == 0 */
u16 phy_addr;
u32 tx_sram_size;
u32 rx_sram_addr;
u32 rx_sram_size;
+ u8 mac_addr[6]; /* mac address if non-zero*/
};
#endif /* __ASM_MV643XX_H */
int packet_size;
unsigned char *packet; /* Here we prepare requests and
receive replies */
+ unsigned char *txbuf; /* Storage for current request */
+ unsigned char *rxbuf; /* Storage for reply to current request */
int lock; /* To prevent mismatch in protocols. */
struct mutex mutex;
int ret = NF_ACCEPT;
if (ct) {
- if (!is_confirmed(ct))
+ if (!is_confirmed(ct) && !is_dying(ct))
ret = __ip_conntrack_confirm(pskb);
ip_ct_deliver_cached_events(ct);
}
#define PG_active 6
#define PG_slab 7 /* slab debug (Suparna wants this) */
-#define PG_checked 8 /* kill me in 2.5.<early>. */
+#define PG_owner_priv_1 8 /* Owner use. If pagecache, fs may use*/
#define PG_arch_1 9
#define PG_reserved 10
#define PG_private 11 /* If pagecache, has fs-private data */
#define PG_nosave_free 18 /* Used for system suspend/resume */
#define PG_buddy 19 /* Page is free, on buddy lists */
+/* PG_owner_priv_1 users should have descriptive aliases */
+#define PG_checked PG_owner_priv_1 /* Used by some filesystems */
#if (BITS_PER_LONG > 32)
/*
int __must_check pci_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
+void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
-/* MSI-X registers (these are at offset PCI_MSI_FLAGS) */
-#define PCI_MSIX_FLAGS_QSIZE 0x7FF
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
+/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
+#define PCI_MSIX_FLAGS 2
+#define PCI_MSIX_FLAGS_QSIZE 0x7FF
+#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
- unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */
unsigned int busy_idx;
unsigned int idle_idx;
unsigned int newidle_idx;
#ifdef CONFIG_SYSVIPC
long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr);
+extern int is_file_shm_hugepages(struct file *file);
#else
static inline long do_shmat(int shmid, char __user *shmaddr,
int shmflg, unsigned long *addr)
{
return -ENOSYS;
}
+static inline int is_file_shm_hugepages(struct file *file)
+{
+ return 0;
+}
#endif
#endif /* __KERNEL__ */
/* _SS_MAXSIZE value minus size of ss_family */
} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */
-#ifdef __KERNEL__
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
#include <asm/socket.h> /* arch-dependent defines */
#include <linux/sockios.h> /* the SIOCxxx I/O controls */
1 : ({ local_irq_restore(flags); 0; }); \
})
+/*
+ * Locks two spinlocks l1 and l2.
+ * l1_first indicates if spinlock l1 should be taken first.
+ */
+static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2,
+ bool l1_first)
+ __acquires(l1)
+ __acquires(l2)
+{
+ if (l1_first) {
+ spin_lock(l1);
+ spin_lock(l2);
+ } else {
+ spin_lock(l2);
+ spin_lock(l1);
+ }
+}
+
+/*
+ * Unlocks two spinlocks l1 and l2.
+ * l1_taken_first indicates if spinlock l1 was taken first and therefore
+ * should be released after spinlock l2.
+ */
+static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2,
+ bool l1_taken_first)
+ __releases(l1)
+ __releases(l2)
+{
+ if (l1_taken_first) {
+ spin_unlock(l2);
+ spin_unlock(l1);
+ } else {
+ spin_unlock(l1);
+ spin_unlock(l2);
+ }
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
#endif
-#ifdef __KERNEL__
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
#define S_IFMT 00170000
#define S_IFSOCK 0140000
union svc_addr_u {
struct in_addr addr;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr addr6;
-#endif
};
/*
* Function prototypes.
*/
int svc_makesock(struct svc_serv *, int, unsigned short, int flags);
-void svc_close_socket(struct svc_sock *);
+void svc_force_close_socket(struct svc_sock *);
int svc_recv(struct svc_rqst *, long);
int svc_send(struct svc_rqst *);
void svc_drop(struct svc_rqst *);
}
static inline void sysfs_remove_file_from_group(struct kobject *kobj,
- const struct attribute *attr, const char *group);
+ const struct attribute *attr, const char *group)
{
- ;
}
static inline void sysfs_notify(struct kobject * k, char *dir, char *attr)
.busy_factor = 64, \
.imbalance_pct = 110, \
.cache_nice_tries = 0, \
- .per_cpu_gain = 25, \
.busy_idx = 0, \
.idle_idx = 0, \
.newidle_idx = 1, \
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 2, \
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 2, \
.newidle_idx = 0, /* unused */ \
.wake_idx = 0, /* unused */ \
.forkexec_idx = 0, /* unused */ \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_SERIALIZE, \
.last_balance = jiffies, \
__u32 reserved[2];
};
+/*
+ * M P E G S E R V I C E S
+ *
+ * NOTE: EXPERIMENTAL API
+ */
+#if 1
+#define V4L2_ENC_IDX_FRAME_I (0)
+#define V4L2_ENC_IDX_FRAME_P (1)
+#define V4L2_ENC_IDX_FRAME_B (2)
+#define V4L2_ENC_IDX_FRAME_MASK (0xf)
+
+struct v4l2_enc_idx_entry {
+ __u64 offset;
+ __u64 pts;
+ __u32 length;
+ __u32 flags;
+ __u32 reserved[2];
+};
+
+#define V4L2_ENC_IDX_ENTRIES (64)
+struct v4l2_enc_idx {
+ __u32 entries;
+ __u32 entries_cap;
+ __u32 reserved[4];
+ struct v4l2_enc_idx_entry entry[V4L2_ENC_IDX_ENTRIES];
+};
+
+
+#define V4L2_ENC_CMD_START (0)
+#define V4L2_ENC_CMD_STOP (1)
+#define V4L2_ENC_CMD_PAUSE (2)
+#define V4L2_ENC_CMD_RESUME (3)
+
+/* Flags for V4L2_ENC_CMD_STOP */
+#define V4L2_ENC_CMD_STOP_AT_GOP_END (1 << 0)
+
+struct v4l2_encoder_cmd {
+ __u32 cmd;
+ __u32 flags;
+ union {
+ struct {
+ __u32 data[8];
+ } raw;
+ };
+};
+
+#endif
+
+
/*
* D A T A S E R V I C E S ( V B I )
*
/*
* A D V A N C E D D E B U G G I N G
+ *
+ * NOTE: EXPERIMENTAL API
*/
/* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */
+
+#define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */
+#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */
+#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */
+
struct v4l2_register {
+ __u32 match_type; /* Match type */
+ __u32 match_chip; /* Match this chip, meaning determined by match_type */
__u64 reg;
- __u32 i2c_id; /* I2C driver ID of the I2C chip, or 0 for the host */
- __u32 val;
+ __u64 val;
};
/*
#if 1
#define VIDIOC_ENUM_FRAMESIZES _IOWR ('V', 74, struct v4l2_frmsizeenum)
#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR ('V', 75, struct v4l2_frmivalenum)
+#define VIDIOC_G_ENC_INDEX _IOR ('V', 76, struct v4l2_enc_idx)
+#define VIDIOC_ENCODER_CMD _IOWR ('V', 77, struct v4l2_encoder_cmd)
+#define VIDIOC_TRY_ENCODER_CMD _IOWR ('V', 78, struct v4l2_encoder_cmd)
+
+/* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
+#define VIDIOC_DBG_S_REGISTER _IOW ('V', 79, struct v4l2_register)
+#define VIDIOC_DBG_G_REGISTER _IOWR ('V', 80, struct v4l2_register)
#endif
-/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
-#define VIDIOC_DBG_S_REGISTER _IOW ('d', 100, struct v4l2_register)
-#define VIDIOC_DBG_G_REGISTER _IOWR('d', 101, struct v4l2_register)
#ifdef __OLD_VIDIOC_
/* for compatibility, will go away some day */
int wakeup_pdflush(long nr_pages);
void laptop_io_completion(void);
void laptop_sync_completion(void);
-void throttle_vm_writeout(void);
+void throttle_vm_writeout(gfp_t gfp_mask);
/* These are exported to sysctl. */
extern int dirty_background_ratio;
/* ------------------------------------------------------------------------- */
+/* Register/chip ident helper function */
+
+struct i2c_client; /* forward reference */
+int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 id_type, u32 chip_id);
+int v4l2_chip_match_host(u32 id_type, u32 chip_id);
+
+/* ------------------------------------------------------------------------- */
+
/* Internal ioctls */
/* VIDIOC_INT_DECODE_VBI_LINE */
struct v4l2_jpegcompression *a);
int (*vidioc_s_jpegcomp) (struct file *file, void *fh,
struct v4l2_jpegcompression *a);
+ int (*vidioc_g_enc_index) (struct file *file, void *fh,
+ struct v4l2_enc_idx *a);
+ int (*vidioc_encoder_cmd) (struct file *file, void *fh,
+ struct v4l2_encoder_cmd *a);
+ int (*vidioc_try_encoder_cmd) (struct file *file, void *fh,
+ struct v4l2_encoder_cmd *a);
/* Stream type-dependent parameter ioctls */
int (*vidioc_g_parm) (struct file *file, void *fh,
struct inet_timewait_death_row {
/* Short-time timewait calendar */
int twcal_hand;
- int twcal_jiffie;
+ unsigned long twcal_jiffie;
struct timer_list twcal_timer;
struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS];
int ret = NF_ACCEPT;
if (ct) {
- if (!nf_ct_is_confirmed(ct))
+ if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
ret = __nf_conntrack_confirm(pskb);
nf_ct_deliver_cached_events(ct);
}
static inline gfp_t gfp_any(void)
{
- return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
+ return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
}
static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
struct xfrm_selector *sel,
- struct xfrm_sec_ctx *ctx, int delete);
-struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete);
+ struct xfrm_sec_ctx *ctx, int delete,
+ int *err);
+struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete, int *err);
void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
u32 xfrm_get_acqseq(void);
void xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi);
/* include/version.h. Generated by alsa/ksync script. */
-#define CONFIG_SND_VERSION "1.0.14rc2"
-#define CONFIG_SND_DATE " (Wed Feb 14 07:42:13 2007 UTC)"
+#define CONFIG_SND_VERSION "1.0.14rc3"
+#define CONFIG_SND_DATE " (Tue Mar 06 13:10:00 2007 UTC)"
If unsure, say N.
+config BLK_DEV_INITRD
+ bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
+ depends on BROKEN || !FRV
+ help
+ The initial RAM filesystem is a ramfs which is loaded by the
+ boot loader (loadlin or lilo) and that is mounted as root
+ before the normal boot procedure. It is typically used to
+ load modules needed to mount the "real" root file system,
+ etc. See <file:Documentation/initrd.txt> for details.
+
+ If RAM disk support (BLK_DEV_RAM) is also included, this
+ also enables initial RAM disk (initrd) support and adds
+ 15 Kbytes (more on some other architectures) to the kernel size.
+
+ If unsure say Y.
+
if BLK_DEV_INITRD
source "usr/Kconfig"
if (IS_ERR(name))
return PTR_ERR(name);
- mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
+ mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex,
+ I_MUTEX_PARENT);
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
mutex_unlock(&shm_ids(ns).mutex);
}
-struct page *shm_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type)
+static struct page *shm_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
return 0;
}
-#ifndef CONFIG_MMU
+static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ int (*fsync) (struct file *, struct dentry *, int datasync);
+ struct shm_file_data *sfd = shm_file_data(file);
+ int ret = -EINVAL;
+
+ fsync = sfd->file->f_op->fsync;
+ if (fsync)
+ ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
+ return ret;
+}
+
static unsigned long shm_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct shm_file_data *sfd = shm_file_data(file);
- return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff,
- flags);
+ return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
+}
+
+int is_file_shm_hugepages(struct file *file)
+{
+ int ret = 0;
+
+ if (file->f_op == &shm_file_operations) {
+ struct shm_file_data *sfd;
+ sfd = shm_file_data(file);
+ ret = is_file_hugepages(sfd->file);
+ }
+ return ret;
}
-#else
-#define shm_get_unmapped_area NULL
-#endif
static const struct file_operations shm_file_operations = {
.mmap = shm_mmap,
+ .fsync = shm_fsync,
.release = shm_release,
.get_unmapped_area = shm_get_unmapped_area,
};
/*
* Switch to high resolution mode
*/
-static void hrtimer_switch_to_hres(void)
+static int hrtimer_switch_to_hres(void)
{
struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
unsigned long flags;
if (base->hres_active)
- return;
+ return 1;
local_irq_save(flags);
if (tick_init_highres()) {
local_irq_restore(flags);
- return;
+ return 0;
}
base->hres_active = 1;
base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
local_irq_restore(flags);
printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
smp_processor_id());
+ return 1;
}
#else
static inline int hrtimer_hres_active(void) { return 0; }
static inline int hrtimer_is_hres_enabled(void) { return 0; }
-static inline void hrtimer_switch_to_hres(void) { }
+static inline int hrtimer_switch_to_hres(void) { return 0; }
static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
if (base->softirq_time.tv64 <= timer->expires.tv64)
break;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ);
+#endif
timer_stats_account_hrtimer(timer);
fn = timer->function;
* deadlock vs. xtime_lock.
*/
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
- hrtimer_switch_to_hres();
+ if (hrtimer_switch_to_hres())
+ return;
hrtimer_get_softirq_time(cpu_base);
tick_cancel_sched_timer(cpu);
local_irq_disable();
-
- spin_lock(&new_base->lock);
- spin_lock(&old_base->lock);
+ double_spin_lock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ double_spin_unlock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
local_irq_enable();
put_cpu_var(hrtimer_bases);
}
raw_local_irq_restore(flags);
}
-void __init lockdep_init(void)
+void lockdep_init(void)
{
int i;
bool "Software Suspend"
depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
---help---
- Enable the possibility of suspending the machine.
- It doesn't need ACPI or APM.
- You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
- (patch for sysvinit needed).
+ Enable the suspend to disk (STD) functionality.
- It creates an image which is saved in your active swap. Upon next
+ You can suspend your machine with 'echo disk > /sys/power/state'.
+ Alternatively, you can use the additional userland tools available
+ from <http://suspend.sf.net>.
+
+ In principle it does not require ACPI or APM, although for example
+ ACPI will be used if available.
+
+ It creates an image which is saved in your active swap. Upon the next
boot, pass the 'resume=/dev/swappartition' argument to the kernel to
have it detect the saved image, restore memory state from it, and
continue to run as before. If you do not want the previous state to
- be reloaded, then use the 'noresume' kernel argument. However, note
- that your partitions will be fsck'd and you must re-mkswap your swap
- partitions. It does not work with swap files.
+ be reloaded, then use the 'noresume' kernel command line argument.
+ Note, however, that fsck will be run on your filesystems and you will
+ need to run mkswap against the swap partition used for the suspend.
- Right now you may boot without resuming and then later resume but
- in meantime you cannot use those swap partitions/files which were
- involved in suspending. Also in this case there is a risk that buffers
- on disk won't match with saved ones.
+ It also works with swap files to a limited extent (for details see
+ <file:Documentation/power/swsusp-and-swap-files.txt>).
- For more information take a look at <file:Documentation/power/swsusp.txt>.
+ Right now you may boot without resuming and resume later but in the
+ meantime you cannot use the swap partition(s)/file(s) involved in
+ suspending. Also in this case you must not use the filesystems
+ that were mounted before the suspend. In particular, you MUST NOT
+ MOUNT any journaled filesystems mounted before the suspend or they
+ will get corrupted in a nasty way.
- (For now, swsusp is incompatible with PAE aka HIGHMEM_64G on i386.
- we need identity mapping for resume to work, and that is trivial
- to get with 4MB pages, but less than trivial on PAE).
+ For more information take a look at <file:Documentation/power/swsusp.txt>.
config PM_STD_PARTITION
string "Default resume partition"
static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
static char *torture_type = "rcu"; /* What RCU implementation to torture. */
-module_param(nreaders, int, 0);
+module_param(nreaders, int, 0444);
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
-module_param(nfakewriters, int, 0);
+module_param(nfakewriters, int, 0444);
MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-module_param(stat_interval, int, 0);
+module_param(stat_interval, int, 0444);
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
-module_param(verbose, bool, 0);
+module_param(verbose, bool, 0444);
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
-module_param(test_no_idle_hz, bool, 0);
+module_param(test_no_idle_hz, bool, 0444);
MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
-module_param(shuffle_interval, int, 0);
+module_param(shuffle_interval, int, 0444);
MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-module_param(torture_type, charp, 0);
+module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
#define TORTURE_FLAG "-torture:"
}
/**
- *
* relay_hotcpu_callback - CPU hotplug callback
* @nb: notifier block
* @action: hotplug action to take
* @hcpu: CPU number
*
- * Returns the success/failure of the operation. (NOTIFY_OK, NOTIFY_BAD)
+ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
*/
static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
unsigned long action,
}
#endif
-static inline void wake_priority_sleeper(struct rq *rq)
-{
-#ifdef CONFIG_SCHED_SMT
- if (!rq->nr_running)
- return;
-
- spin_lock(&rq->lock);
- /*
- * If an SMT sibling task has been put to sleep for priority
- * reasons reschedule the idle task to see if it can now run.
- */
- if (rq->nr_running)
- resched_task(rq->idle);
- spin_unlock(&rq->lock);
-#endif
-}
-
DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat);
update_cpu_clock(p, rq, now);
- if (p == rq->idle)
- /* Task on the idle queue */
- wake_priority_sleeper(rq);
- else
+ if (p != rq->idle)
task_running_tick(rq, p);
#ifdef CONFIG_SMP
update_load(rq);
#endif
}
-#ifdef CONFIG_SCHED_SMT
-static inline void wakeup_busy_runqueue(struct rq *rq)
-{
- /* If an SMT runqueue is sleeping due to priority reasons wake it up */
- if (rq->curr == rq->idle && rq->nr_running)
- resched_task(rq->idle);
-}
-
-/*
- * Called with interrupt disabled and this_rq's runqueue locked.
- */
-static void wake_sleeping_dependent(int this_cpu)
-{
- struct sched_domain *tmp, *sd = NULL;
- int i;
-
- for_each_domain(this_cpu, tmp) {
- if (tmp->flags & SD_SHARE_CPUPOWER) {
- sd = tmp;
- break;
- }
- }
-
- if (!sd)
- return;
-
- for_each_cpu_mask(i, sd->span) {
- struct rq *smt_rq = cpu_rq(i);
-
- if (i == this_cpu)
- continue;
- if (unlikely(!spin_trylock(&smt_rq->lock)))
- continue;
-
- wakeup_busy_runqueue(smt_rq);
- spin_unlock(&smt_rq->lock);
- }
-}
-
-/*
- * number of 'lost' timeslices this task wont be able to fully
- * utilize, if another task runs on a sibling. This models the
- * slowdown effect of other tasks running on siblings:
- */
-static inline unsigned long
-smt_slice(struct task_struct *p, struct sched_domain *sd)
-{
- return p->time_slice * (100 - sd->per_cpu_gain) / 100;
-}
-
-/*
- * To minimise lock contention and not have to drop this_rq's runlock we only
- * trylock the sibling runqueues and bypass those runqueues if we fail to
- * acquire their lock. As we only trylock the normal locking order does not
- * need to be obeyed.
- */
-static int
-dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
-{
- struct sched_domain *tmp, *sd = NULL;
- int ret = 0, i;
-
- /* kernel/rt threads do not participate in dependent sleeping */
- if (!p->mm || rt_task(p))
- return 0;
-
- for_each_domain(this_cpu, tmp) {
- if (tmp->flags & SD_SHARE_CPUPOWER) {
- sd = tmp;
- break;
- }
- }
-
- if (!sd)
- return 0;
-
- for_each_cpu_mask(i, sd->span) {
- struct task_struct *smt_curr;
- struct rq *smt_rq;
-
- if (i == this_cpu)
- continue;
-
- smt_rq = cpu_rq(i);
- if (unlikely(!spin_trylock(&smt_rq->lock)))
- continue;
-
- smt_curr = smt_rq->curr;
-
- if (!smt_curr->mm)
- goto unlock;
-
- /*
- * If a user task with lower static priority than the
- * running task on the SMT sibling is trying to schedule,
- * delay it till there is proportionately less timeslice
- * left of the sibling task to prevent a lower priority
- * task from using an unfair proportion of the
- * physical cpu's resources. -ck
- */
- if (rt_task(smt_curr)) {
- /*
- * With real time tasks we run non-rt tasks only
- * per_cpu_gain% of the time.
- */
- if ((jiffies % DEF_TIMESLICE) >
- (sd->per_cpu_gain * DEF_TIMESLICE / 100))
- ret = 1;
- } else {
- if (smt_curr->static_prio < p->static_prio &&
- !TASK_PREEMPTS_CURR(p, smt_rq) &&
- smt_slice(smt_curr, sd) > task_timeslice(p))
- ret = 1;
- }
-unlock:
- spin_unlock(&smt_rq->lock);
- }
- return ret;
-}
-#else
-static inline void wake_sleeping_dependent(int this_cpu)
-{
-}
-static inline int
-dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
-{
- return 0;
-}
-#endif
-
#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
void fastcall add_preempt_count(int val)
if (!rq->nr_running) {
next = rq->idle;
rq->expired_timestamp = 0;
- wake_sleeping_dependent(cpu);
goto switch_tasks;
}
}
}
}
next->sleep_type = SLEEP_NORMAL;
- if (dependent_sleeper(cpu, rq, next))
- next = rq->idle;
switch_tasks:
if (next == rq->idle)
schedstat_inc(rq, sched_goidle);
sched_info_switch(prev, next);
if (likely(prev != next)) {
- next->timestamp = now;
+ next->timestamp = next->last_ran = now;
rq->nr_switches++;
rq->curr = next;
++*switch_count;
.extra2 = &one_hundred,
},
#endif
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86_32) || \
+ (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL))
{
.ctl_name = VM_VDSO_ENABLED,
.procname = "vdso_enabled",
}
#else /* !CONFIG_SYSCTL */
-struct ctl_table_header * register_sysctl_table(ctl_table * table,
- int insert_at_head)
+struct ctl_table_header *register_sysctl_table(ctl_table * table)
{
return NULL;
}
static char override_name[32];
static int finished_booting;
-/* clocksource_done_booting - Called near the end of bootup
+/* clocksource_done_booting - Called near the end of core bootup
*
- * Hack to avoid lots of clocksource churn at boot time
+ * Hack to avoid lots of clocksource churn at boot time.
+ * We use fs_initcall because we want this to start before
+ * device_initcall but after subsys_initcall.
*/
static int __init clocksource_done_booting(void)
{
finished_booting = 1;
return 0;
}
-late_initcall(clocksource_done_booting);
+fs_initcall(clocksource_done_booting);
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static LIST_HEAD(watchdog_list);
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
+void tick_suspend_broadcast(void)
+{
+ struct clock_event_device *bc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tick_broadcast_lock, flags);
+
+ bc = tick_broadcast_device.evtdev;
+ if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+
+ spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+}
+
+int tick_resume_broadcast(void)
+{
+ struct clock_event_device *bc;
+ unsigned long flags;
+ int broadcast = 0;
+
+ spin_lock_irqsave(&tick_broadcast_lock, flags);
+
+ bc = tick_broadcast_device.evtdev;
+ if (bc) {
+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC &&
+ !cpus_empty(tick_broadcast_mask))
+ tick_broadcast_start_periodic(bc);
+
+ broadcast = cpu_isset(smp_processor_id(), tick_broadcast_mask);
+ }
+ spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+
+ return broadcast;
+}
+
+
#ifdef CONFIG_TICK_ONESHOT
static cpumask_t tick_broadcast_oneshot_mask;
spin_unlock_irqrestore(&tick_device_lock, flags);
}
+static void tick_suspend_periodic(void)
+{
+ struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tick_device_lock, flags);
+ if (td->mode == TICKDEV_MODE_PERIODIC)
+ clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+ spin_unlock_irqrestore(&tick_device_lock, flags);
+}
+
+static void tick_resume_periodic(void)
+{
+ struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tick_device_lock, flags);
+ if (td->mode == TICKDEV_MODE_PERIODIC)
+ tick_setup_periodic(td->evtdev, 0);
+ spin_unlock_irqrestore(&tick_device_lock, flags);
+}
+
/*
* Notification about clock event devices
*/
tick_shutdown(dev);
break;
+ case CLOCK_EVT_NOTIFY_SUSPEND:
+ tick_suspend_periodic();
+ tick_suspend_broadcast();
+ break;
+
+ case CLOCK_EVT_NOTIFY_RESUME:
+ if (!tick_resume_broadcast())
+ tick_resume_periodic();
+ break;
+
default:
break;
}
extern int tick_is_broadcast_device(struct clock_event_device *dev);
extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
extern void tick_shutdown_broadcast(unsigned int *cpup);
+extern void tick_suspend_broadcast(void);
+extern int tick_resume_broadcast(void);
extern void
tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { }
static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
+static inline void tick_suspend_broadcast(void) { }
+static inline int tick_resume_broadcast(void) { return 0; }
/*
* Set the periodic handler in non broadcast mode
/**
* next_timer_interrupt - return the jiffy of the next pending timer
+ * @now: current time (in jiffies)
*/
unsigned long get_next_timer_interrupt(unsigned long now)
{
clock->error = 0;
ntp_clear();
+ update_vsyscall(&xtime, clock);
+
write_sequnlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
#endif
/**
- * timeofday_is_continuous - check to see if timekeeping is free running
+ * timekeeping_is_continuous - check to see if timekeeping is free running
*/
int timekeeping_is_continuous(void)
{
write_sequnlock_irqrestore(&xtime_lock, flags);
touch_softlockup_watchdog();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
+
/* Resume hrtimers */
clock_was_set();
timekeeping_suspended = 1;
timekeeping_suspend_time = read_persistent_clock();
write_sequnlock_irqrestore(&xtime_lock, flags);
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
+
return 0;
}
new_base = get_cpu_var(tvec_bases);
local_irq_disable();
- spin_lock(&new_base->lock);
- spin_lock(&old_base->lock);
+ double_spin_lock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
BUG_ON(old_base->running_timer);
migrate_timer_list(new_base, old_base->tv5.vec + i);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ double_spin_unlock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
local_irq_enable();
put_cpu_var(tvec_bases);
}
/**
* __bitmap_shift_right - logical right shift of the bits in a bitmap
- * @dst - destination bitmap
- * @src - source bitmap
- * @nbits - shift by this many bits
- * @bits - bitmap size, in bits
+ * @dst : destination bitmap
+ * @src : source bitmap
+ * @shift : shift by this many bits
+ * @bits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
/**
* __bitmap_shift_left - logical left shift of the bits in a bitmap
- * @dst - destination bitmap
- * @src - source bitmap
- * @nbits - shift by this many bits
- * @bits - bitmap size, in bits
+ * @dst : destination bitmap
+ * @src : source bitmap
+ * @shift : shift by this many bits
+ * @bits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/scatterlist.h>
-#include <asm/swiotlb.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
-#ifndef SG_ENT_VIRT_ADDRESS
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
-#endif
/*
* Maximum allowable number of contiguous slabs to map,
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
-#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
-typedef char *io_tlb_addr_t;
-#define swiotlb_orig_addr_null(buffer) (!(buffer))
-#define ptr_to_io_tlb_addr(ptr) (ptr)
-#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
-#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
-#endif
-static io_tlb_addr_t *io_tlb_orig_addr;
+static unsigned char **io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
-#ifdef SWIOTLB_EXTRA_VARIABLES
-SWIOTLB_EXTRA_VARIABLES;
-#endif
-
-#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
static int __init
setup_io_tlb_npages(char *str)
{
swiotlb_force = 1;
return 1;
}
-#endif
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
-#ifndef swiotlb_adjust_size
-#define swiotlb_adjust_size(size) ((void)0)
-#endif
-
-#ifndef swiotlb_adjust_seg
-#define swiotlb_adjust_seg(start, size) ((void)0)
-#endif
-
-#ifndef swiotlb_print_info
-#define swiotlb_print_info(bytes) \
- printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
- "0x%lx\n", bytes >> 20, \
- virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
-#endif
-
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
- swiotlb_adjust_size(io_tlb_nslabs);
- swiotlb_adjust_size(io_tlb_overflow);
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
- for (i = 0; i < io_tlb_nslabs; i++) {
- if ( !(i % IO_TLB_SEGSIZE) )
- swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
- IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+ for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- }
io_tlb_index = 0;
- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
+ io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
/*
* Get the overflow emergency buffer
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
- swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
- swiotlb_print_info(bytes);
+ printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
+ virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
}
-#ifndef __swiotlb_init_with_default_size
-#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
-#endif
void __init
swiotlb_init(void)
{
- __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
+ swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
}
-#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
- io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
+ io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
+ get_order(io_tlb_nslabs * sizeof(char *)));
if (!io_tlb_orig_addr)
goto cleanup3;
- memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
+ memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
/*
* Get the overflow emergency buffer
if (!io_tlb_overflow_buffer)
goto cleanup4;
- swiotlb_print_info(bytes);
+ printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
+ "0x%lx\n", bytes >> 20,
+ virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
return 0;
cleanup4:
- free_pages((unsigned long)io_tlb_orig_addr,
- get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
+ free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
+ sizeof(char *)));
io_tlb_orig_addr = NULL;
cleanup3:
- free_pages((unsigned long)io_tlb_list,
- get_order(io_tlb_nslabs * sizeof(int)));
+ free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+ sizeof(int)));
io_tlb_list = NULL;
cleanup2:
io_tlb_end = NULL;
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
-#endif
-#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
static int
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
{
return (addr & ~mask) != 0;
}
-static inline int range_needs_mapping(const void *ptr, size_t size)
-{
- return swiotlb_force;
-}
-
-static inline int order_needs_mapping(unsigned int order)
-{
- return 0;
-}
-#endif
-
-static void
-__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
-{
-#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
- if (dir == DMA_TO_DEVICE)
- memcpy(dma_addr, buffer, size);
- else
- memcpy(buffer, dma_addr, size);
-#else
- __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
-#endif
-}
-
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static void *
-map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
+map_single(struct device *hwdev, char *buffer, size_t size, int dir)
{
unsigned long flags;
char *dma_addr;
*/
io_tlb_orig_addr[index] = buffer;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
- __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
+ memcpy(dma_addr, buffer, size);
return dma_addr;
}
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- io_tlb_addr_t buffer = io_tlb_orig_addr[index];
+ char *buffer = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
- if (!swiotlb_orig_addr_null(buffer)
- && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
/*
* bounce... copy the data back into the original buffer * and
* delete the bounce buffer.
*/
- __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
+ memcpy(buffer, dma_addr, size);
/*
* Return the buffer to the free list by setting the corresponding
int dir, int target)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- io_tlb_addr_t buffer = io_tlb_orig_addr[index];
+ char *buffer = io_tlb_orig_addr[index];
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
+ memcpy(buffer, dma_addr, size);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
+ memcpy(dma_addr, buffer, size);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
}
}
-#ifdef SWIOTLB_ARCH_NEED_ALLOC
-
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
*/
flags |= GFP_DMA;
- if (!order_needs_mapping(order))
- ret = (void *)__get_free_pages(flags, order);
- else
- ret = NULL;
+ ret = (void *)__get_free_pages(flags, order);
if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
/*
* The allocated memory isn't reachable by the device.
*dma_handle = dev_addr;
return ret;
}
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
}
-EXPORT_SYMBOL(swiotlb_free_coherent);
-
-#endif
static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!range_needs_mapping(ptr, size)
- && !address_needs_mapping(hwdev, dev_addr))
+ if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
return dev_addr;
/*
* Oh well, have to allocate and map a bounce buffer.
*/
- map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
+ map = map_single(hwdev, ptr, size, dir);
if (!map) {
swiotlb_full(hwdev, size, dir, 1);
map = io_tlb_overflow_buffer;
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{
+ void *addr;
dma_addr_t dev_addr;
int i;
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++) {
- dev_addr = SG_ENT_PHYS_ADDRESS(sg);
- if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
- || address_needs_mapping(hwdev, dev_addr)) {
- void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
+ addr = SG_ENT_VIRT_ADDRESS(sg);
+ dev_addr = virt_to_bus(addr);
+ if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+ void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
-#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
-
-dma_addr_t
-swiotlb_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_addr_t dev_addr;
- char *map;
-
- dev_addr = page_to_bus(page) + offset;
- if (address_needs_mapping(hwdev, dev_addr)) {
- map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
- if (!map) {
- swiotlb_full(hwdev, size, direction, 1);
- map = io_tlb_overflow_buffer;
- }
- dev_addr = virt_to_bus(map);
- }
-
- return dev_addr;
-}
-
-void
-swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction direction)
-{
- char *dma_addr = bus_to_virt(dev_addr);
-
- BUG_ON(direction == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
- unmap_single(hwdev, dma_addr, size, direction);
- else if (direction == DMA_FROM_DEVICE)
- dma_mark_clean(dma_addr, size);
-}
-
-#endif
-
int
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
{
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
-#ifndef __swiotlb_dma_supported
-#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
-#endif
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return __swiotlb_dma_supported(hwdev, mask);
+ return virt_to_bus(io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL(swiotlb_init);
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
+EXPORT_SYMBOL(swiotlb_free_coherent);
EXPORT_SYMBOL(swiotlb_dma_supported);
return 0;
}
-/* Check if a vma is migratable */
-static inline int vma_migratable(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & (
- VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
- return 0;
- return 1;
-}
-
/*
* Check if all pages in a range are on a set of nodes.
* If pagelist != NULL then isolate pages from the LRU and
err = -EFAULT;
vma = find_vma(mm, pp->addr);
- if (!vma)
+ if (!vma || !vma_migratable(vma))
goto set_status;
page = follow_page(vma, pp->addr, FOLL_GET);
printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
i++;
pn = nd;
+ prev = vma->vm_start;
+ pend = vma->vm_end;
}
j = 0;
for (nd = pn; nd; nd = rb_prev(nd)) {
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
-void throttle_vm_writeout(void)
+void throttle_vm_writeout(gfp_t gfp_mask)
{
long background_thresh;
long dirty_thresh;
+ if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
+ /*
+ * The caller might hold locks which can prevent IO completion
+ * or progress in the filesystem. So we cannot just sit here
+ * waiting for IO to complete.
+ */
+ congestion_wait(WRITE, HZ/10);
+ return;
+ }
+
for ( ; ; ) {
get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
}
}
-
/*
* writeback at least _min_pages, and keep writing until the amount of dirty
* memory is less than the background threshold, or until we're all clean.
page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
1 << PG_referenced | 1 << PG_arch_1 |
- 1 << PG_checked | 1 << PG_mappedtodisk);
+ 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
set_page_private(page, 0);
set_page_refcounted(page);
*/
static struct anon_vma *page_lock_anon_vma(struct page *page)
{
- struct anon_vma *anon_vma = NULL;
+ struct anon_vma *anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
spin_lock(&anon_vma->lock);
+ return anon_vma;
out:
rcu_read_unlock();
- return anon_vma;
+ return NULL;
+}
+
+static void page_unlock_anon_vma(struct anon_vma *anon_vma)
+{
+ spin_unlock(&anon_vma->lock);
+ rcu_read_unlock();
}
/*
if (!mapcount)
break;
}
- spin_unlock(&anon_vma->lock);
+
+ page_unlock_anon_vma(anon_vma);
return referenced;
}
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
- spin_unlock(&anon_vma->lock);
+
+ page_unlock_anon_vma(anon_vma);
return ret;
}
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}
-static struct super_operations shmem_ops;
+static const struct super_operations shmem_ops;
static const struct address_space_operations shmem_aops;
static const struct file_operations shmem_file_operations;
static const struct inode_operations shmem_inode_operations;
return error;
}
-struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
+static struct page *shmem_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
struct page *page = NULL;
return retval;
}
-int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
#endif
};
-static struct super_operations shmem_ops = {
+static const struct super_operations shmem_ops = {
.alloc_inode = shmem_alloc_inode,
.destroy_inode = shmem_destroy_inode,
#ifdef CONFIG_TMPFS
/**
* cache_reap - Reclaim memory from caches.
- * @unused: unused parameter
+ * @w: work descriptor
*
* Called from workqueue/eventd every few seconds.
* Purpose:
return 0;
}
+#if 0
int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
return 0;
#endif
}
+#endif /* 0 */
#ifndef CONFIG_MMU
unsigned long shmem_get_unmapped_area(struct file *file,
pagevec_init(&pvec, 0);
next = start;
- while (next <= end && !ret && !wrapped &&
+ while (next <= end && !wrapped &&
pagevec_lookup(&pvec, mapping, next,
min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
- for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
+ for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index;
}
}
- throttle_vm_writeout();
+ throttle_vm_writeout(sc->gfp_mask);
atomic_dec(&zone->reclaim_in_progress);
return nr_reclaimed;
struct vlan_group *grp = __vlan_find_group(real_dev->ifindex);
if (grp)
- return grp->vlan_devices[VID];
+ return vlan_group_get_device(grp, VID);
return NULL;
}
+static void vlan_group_free(struct vlan_group *grp)
+{
+ int i;
+
+ for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
+ kfree(grp->vlan_devices_arrays[i]);
+ kfree(grp);
+}
+
static void vlan_rcu_free(struct rcu_head *rcu)
{
- kfree(container_of(rcu, struct vlan_group, rcu));
+ vlan_group_free(container_of(rcu, struct vlan_group, rcu));
}
ret = 0;
if (grp) {
- dev = grp->vlan_devices[vlan_id];
+ dev = vlan_group_get_device(grp, vlan_id);
if (dev) {
/* Remove proc entry */
vlan_proc_rem_dev(dev);
real_dev->vlan_rx_kill_vid(real_dev, vlan_id);
}
- grp->vlan_devices[vlan_id] = NULL;
+ vlan_group_set_device(grp, vlan_id, NULL);
synchronize_net();
* group.
*/
for (i = 0; i < VLAN_VID_MASK; i++)
- if (grp->vlan_devices[i])
+ if (vlan_group_get_device(grp, i))
break;
if (i == VLAN_VID_MASK) {
struct net_device *new_dev;
struct net_device *real_dev; /* the ethernet device */
char name[IFNAMSIZ];
+ int i;
#ifdef VLAN_DEBUG
printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n",
if (!grp)
goto out_free_unregister;
+ for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) {
+ grp->vlan_devices_arrays[i] = kzalloc(
+ sizeof(struct net_device *)*VLAN_GROUP_ARRAY_PART_LEN,
+ GFP_KERNEL);
+
+ if (!grp->vlan_devices_arrays[i])
+ goto out_free_arrays;
+ }
+
/* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
grp->real_dev_ifindex = real_dev->ifindex;
real_dev->vlan_rx_register(real_dev, grp);
}
- grp->vlan_devices[VLAN_ID] = new_dev;
+ vlan_group_set_device(grp, VLAN_ID, new_dev);
if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */
printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n",
#endif
return new_dev;
+out_free_arrays:
+ vlan_group_free(grp);
+
out_free_unregister:
unregister_netdev(new_dev);
goto out_unlock;
case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
+ vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
case NETDEV_DOWN:
/* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
+ vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
+ vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
int ret;
- vlandev = grp->vlan_devices[i];
+ vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
/* Detach sockets from device */
read_lock(&hci_sk_list.lock);
sk_for_each(sk, node, &hci_sk_list.head) {
- bh_lock_sock(sk);
+ lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
sk->sk_err = EPIPE;
hci_dev_put(hdev);
}
- bh_unlock_sock(sk);
+ release_sock(sk);
}
read_unlock(&hci_sk_list.lock);
}
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
br_features_recompute(br);
+
+ if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
+ (br->dev->flags & IFF_UP))
+ br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
dev_set_mtu(br->dev, br_min_mtu(br));
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- spin_lock_bh(&br->lock);
if ((p = br_get_port(br, args[1])) == NULL)
ret = -EINVAL;
else
br_stp_set_path_cost(p, args[2]);
- spin_unlock_bh(&br->lock);
+
return ret;
}
err:
while ((skb = segs)) {
segs = skb->next;
- kfree(skb);
+ kfree_skb(skb);
}
return ERR_PTR(err);
}
{
struct sock *sk = sock->sk;
- if (sk->sk_prot->compat_setsockopt != NULL)
+ if (sk->sk_prot->compat_getsockopt != NULL)
return sk->sk_prot->compat_getsockopt(sk, level, optname,
optval, optlen);
return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
/* set idle flag */
hctx->ccid3hctx_idle = 1;
break;
- case TFRC_SSTATE_NO_SENT:
- /*
- * XXX when implementing bidirectional rx/tx check this again
- */
- DCCP_WARN("Illegal ACK received - no packet sent\n");
- /* fall through */
+ case TFRC_SSTATE_NO_SENT: /* fall through */
case TFRC_SSTATE_TERM: /* ignore feedback when closing */
break;
}
DCCP_ACKVEC_STATE_RECEIVED))
goto discard;
- /*
- * Deliver to the CCID module in charge.
- * FIXME: Currently DCCP operates one-directional only, i.e. a listening
- * server is not at the same time a connecting client. There is
- * not much sense in delivering to both rx/tx sides at the moment
- * (only one is active at a time); when moving to bidirectional
- * service, this needs to be revised.
- */
- if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
- ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
- else
- ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+ ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
+ ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
return __dccp_rcv_established(sk, skb, dh, len);
discard:
DCCP_ACKVEC_STATE_RECEIVED))
goto discard;
- /* XXX see the comments in dccp_rcv_established about this */
- if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
- ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
- else
- ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+ ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
+ ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
}
/*
if (newsk != NULL) {
const struct dccp_request_sock *dreq = dccp_rsk(req);
- struct inet_connection_sock *newicsk = inet_csk(sk);
+ struct inet_connection_sock *newicsk = inet_csk(newsk);
struct dccp_sock *newdp = dccp_sk(newsk);
struct dccp_minisock *newdmsk = dccp_msk(newsk);
err);
} else {
dccp_pr_debug("packet discarded\n");
- kfree(skb);
+ kfree_skb(skb);
}
}
}
*net_lvl = host_lvl;
return 0;
case CIPSO_V4_MAP_STD:
- if (host_lvl < doi_def->map.std->lvl.local_size) {
+ if (host_lvl < doi_def->map.std->lvl.local_size &&
+ doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
*net_lvl = doi_def->map.std->lvl.local[host_lvl];
return 0;
}
- break;
+ return -EPERM;
}
return -EINVAL;
*host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
return 0;
}
- break;
+ return -EPERM;
}
return -EINVAL;
return;
}
+/*
+ * Resend IGMP JOIN report; used for bonding.
+ */
+void ip_mc_rejoin_group(struct ip_mc_list *im)
+{
+ struct in_device *in_dev = im->interface;
+
+#ifdef CONFIG_IP_MULTICAST
+ if (im->multiaddr == IGMP_ALL_HOSTS)
+ return;
+
+ if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
+ igmp_mod_timer(im, IGMP_Initial_Report_Delay);
+ return;
+ }
+ /* else, v3 */
+ im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
+ IGMP_Unsolicited_Report_Count;
+ igmp_ifc_event(in_dev);
+#endif
+}
+
/*
* A socket has left a multicast group on device dev
*/
EXPORT_SYMBOL(ip_mc_dec_group);
EXPORT_SYMBOL(ip_mc_inc_group);
EXPORT_SYMBOL(ip_mc_join_group);
+EXPORT_SYMBOL(ip_mc_rejoin_group);
list_for_each_entry(h, &unconfirmed, list) {
ct = tuplehash_to_ctrack(h);
if (iter(ct, data))
- goto found;
+ set_bit(IPS_DYING_BIT, &ct->status);
}
write_unlock_bh(&ip_conntrack_lock);
return NULL;
static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] =
{
[TH_SYN] = 1,
- [TH_SYN|TH_ACK] = 1,
[TH_SYN|TH_PUSH] = 1,
+ [TH_SYN|TH_URG] = 1,
+ [TH_SYN|TH_PUSH|TH_URG] = 1,
+ [TH_SYN|TH_ACK] = 1,
[TH_SYN|TH_ACK|TH_PUSH] = 1,
[TH_RST] = 1,
[TH_RST|TH_ACK] = 1,
return -ENOENT;
}
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
.print_conntrack = ipv4_print_conntrack,
.prepare = ipv4_prepare,
.get_features = ipv4_get_features,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = ipv4_tuple_to_nfattr,
.nfattr_to_tuple = ipv4_nfattr_to_tuple,
#endif
return icmp_error_message(skb, ctinfo, hooknum);
}
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
.error = icmp_error,
.destroy = NULL,
.me = NULL,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = icmp_tuple_to_nfattr,
.nfattr_to_tuple = icmp_nfattr_to_tuple,
#endif
}
EXPORT_SYMBOL(nf_nat_protocol_unregister);
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
int
nf_nat_port_range_to_nfattr(struct sk_buff *skb,
const struct nf_nat_range *range)
.manip_pkt = gre_manip_pkt,
.in_range = gre_in_range,
.unique_tuple = gre_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nfattr = nf_nat_port_range_to_nfattr,
.nfattr_to_range = nf_nat_port_nfattr_to_range,
#endif
.manip_pkt = icmp_manip_pkt,
.in_range = icmp_in_range,
.unique_tuple = icmp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nfattr = nf_nat_port_range_to_nfattr,
.nfattr_to_range = nf_nat_port_nfattr_to_range,
#endif
.manip_pkt = tcp_manip_pkt,
.in_range = tcp_in_range,
.unique_tuple = tcp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nfattr = nf_nat_port_range_to_nfattr,
.nfattr_to_range = nf_nat_port_nfattr_to_range,
#endif
.manip_pkt = udp_manip_pkt,
.in_range = udp_in_range,
.unique_tuple = udp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nfattr = nf_nat_port_range_to_nfattr,
.nfattr_to_range = nf_nat_port_nfattr_to_range,
#endif
if (newsk != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
- struct inet_connection_sock *newicsk = inet_csk(sk);
+ struct inet_connection_sock *newicsk = inet_csk(newsk);
struct tcp_sock *newtp;
/* Now setup tcp_sock */
if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
goto short_packet;
+ uh = skb->h.uh;
udp4_csum_init(skb, uh);
}
#endif
- if (netif_carrier_ok(dev))
- ndev->if_flags |= IF_READY;
-
-
ipv6_mc_init_dev(ndev);
ndev->tstamp = jiffies;
#ifdef CONFIG_SYSCTL
ipv6_dev_mc_dec(dev, &addr);
}
for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) {
+ if (ifa->flags&IFA_F_TENTATIVE)
+ continue;
if (idev->cnf.forwarding)
addrconf_join_anycast(ifa);
else
break;
}
read_unlock_bh(&idev->lock);
+ in6_dev_put(idev);
}
return im;
}
EXPORT_SYMBOL(compat_ipv6_setsockopt);
#endif
-static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_opt_hdr *hdr,
+static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
char __user *optval, int len)
{
- if (!hdr)
+ struct ipv6_opt_hdr *hdr;
+
+ if (!opt || !opt->hopopt)
return 0;
+ hdr = opt->hopopt;
+
len = min_t(int, len, ipv6_optlen(hdr));
if (copy_to_user(optval, hdr, ipv6_optlen(hdr)))
return -EFAULT;
{
lock_sock(sk);
- len = ipv6_getsockopt_sticky(sk, np->opt->hopopt,
+ len = ipv6_getsockopt_sticky(sk, np->opt,
optval, len);
release_sock(sk);
return put_user(len, optlen);
struct dst_entry *dst;
struct flowi fl = {
.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
+ .mark = skb->mark,
.nl_u =
{ .ip6_u =
{ .daddr = iph->daddr,
}
nf_conntrack_get(reasm->nfct);
(*pskb)->nfct = reasm->nfct;
+ (*pskb)->nfctinfo = reasm->nfctinfo;
return NF_ACCEPT;
}
};
#endif
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
.print_tuple = ipv6_print_tuple,
.print_conntrack = ipv6_print_conntrack,
.prepare = ipv6_prepare,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = ipv6_tuple_to_nfattr,
.nfattr_to_tuple = ipv6_nfattr_to_tuple,
#endif
return icmpv6_error_message(skb, dataoff, ctinfo, hooknum);
}
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
.packet = icmpv6_packet,
.new = icmpv6_new,
.error = icmpv6_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = icmpv6_tuple_to_nfattr,
.nfattr_to_tuple = icmpv6_nfattr_to_tuple,
#endif
+++ /dev/null
- Revision 0.21: Uses the new generic socket option code.
-
- Revision 0.22: Gcc clean ups and drop out device registration. Use the
- new multi-protocol edition of hard_header
-
- Revision 0.23: IPX /proc by Mark Evans. Adding a route will
- will overwrite any existing route to the same network.
-
- Revision 0.24: Supports new /proc with no 4K limit
-
- Revision 0.25: Add ephemeral sockets, passive local network
- identification, support for local net 0 and
- multiple datalinks <Greg Page>
-
- Revision 0.26: Device drop kills IPX routes via it. (needed for module)
-
- Revision 0.27: Autobind <Mark Evans>
-
- Revision 0.28: Small fix for multiple local networks <Thomas Winder>
-
- Revision 0.29: Assorted major errors removed <Mark Evans>
- Small correction to promisc mode error fix <Alan Cox>
- Asynchronous I/O support. Changed to use notifiers
- and the newer packet_type stuff. Assorted major
- fixes <Alejandro Liu>
-
- Revision 0.30: Moved to net/ipx/... <Alan Cox>
- Don't set address length on recvfrom that errors.
- Incorrect verify_area.
-
- Revision 0.31: New sk_buffs. This still needs a lot of
- testing. <Alan Cox>
-
- Revision 0.32: Using sock_alloc_send_skb, firewall hooks. <Alan Cox>
- Supports sendmsg/recvmsg
-
- Revision 0.33: Internal network support, routing changes, uses a
- protocol private area for ipx data.
-
- Revision 0.34: Module support. <Jim Freeman>
-
- Revision 0.35: Checksum support. <Neil Turton>, hooked in by <Alan Cox>
- Handles WIN95 discovery packets <Volker Lendecke>
-
- Revision 0.36: Internal bump up for 2.1
-
- Revision 0.37: Began adding POSIXisms.
-
- Revision 0.38: Asynchronous socket stuff made current.
-
- Revision 0.39: SPX interfaces
-
- Revision 0.40: Tiny SIOCGSTAMP fix (chris@cybernet.co.nz)
-
- Revision 0.41: 802.2TR removed (p.norton@computer.org)
- Fixed connecting to primary net,
- Automatic binding on send & receive,
- Martijn van Oosterhout <kleptogimp@geocities.com>
-
- Revision 042: Multithreading - use spinlocks and refcounting to
- protect some structures: ipx_interface sock list, list
- of ipx interfaces, etc.
- Bugfixes - do refcounting on net_devices, check function
- results, etc. Thanks to davem and freitag for
- suggestions and guidance.
- Arnaldo Carvalho de Melo <acme@conectiva.com.br>,
- November, 2000
-
- Revision 043: Shared SKBs, don't mangle packets, some cleanups
- Arnaldo Carvalho de Melo <acme@conectiva.com.br>,
- December, 2000
-
- Revision 044: Call ipxitf_hold on NETDEV_UP - acme
-
- Revision 045: fix PPROP routing bug - acme
-
- Revision 046: Further fixes to PPROP, ipxitf_create_internal was
- doing an unneeded MOD_INC_USE_COUNT, implement
- sysctl for ipx_pprop_broacasting, fix the ipx sysctl
- handling, making it dynamic, some cleanups, thanks to
- Petr Vandrovec for review and good suggestions. (acme)
-
- Revision 047: Cleanups, CodingStyle changes, move the ncp connection
- hack out of line - acme
-
- Revision 048: Use sk->protinfo to store the pointer to IPX private
- area, remove af_ipx from sk->protinfo and move ipx_opt
- to include/net/ipx.h, use IPX_SK like DecNET, etc - acme
-
- Revision 049: SPX support dropped, see comment in ipx_create - acme
-
- Revision 050: Use seq_file for proc stuff, moving it to ipx_proc.c - acme
-
-Other fixes:
-
- Protect the module by a MOD_INC_USE_COUNT/MOD_DEC_USE_COUNT pair. Also, now
- usage count is managed this way:
- -Count one if the auto_interface mode is on
- -Count one per configured interface
-
- Jacques Gelinas (jacques@solucorp.qc.ca)
support", below.
IPX is similar in scope to IP, while SPX, which runs on top of IPX,
- is similar to TCP. There is also experimental support for SPX in
- Linux (see "SPX networking", below).
+ is similar to TCP.
To turn your Linux box into a fully featured NetWare file server and
IPX router, say Y here and fetch either lwared from
information, read the IPX-HOWTO available from
<http://www.tldp.org/docs.html#howto>.
- General information about how to connect Linux, Windows machines and
- Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
-
The IPX driver would enlarge your kernel by about 16 KB. To compile
this driver as a module, choose M here: the module will be called ipx.
Unless you want to integrate your Linux box with a local Novell
err = xfrm_state_delete(x);
- xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
- AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
-
if (err < 0)
goto out;
c.event = XFRM_MSG_DELSA;
km_state_notify(x, &c);
out:
+ xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+ AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
xfrm_state_put(x);
return err;
}
xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir-1,
- &sel, tmp.security, 1);
+ &sel, tmp.security, 1, &err);
security_xfrm_policy_free(&tmp);
if (xp == NULL)
return -ENOENT;
- err = security_xfrm_policy_delete(xp);
-
xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
{
unsigned int dir;
- int err;
+ int err = 0, delete;
struct sadb_x_policy *pol;
struct xfrm_policy *xp;
struct km_event c;
if (dir >= XFRM_POLICY_MAX)
return -EINVAL;
+ delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
xp = xfrm_policy_byid(XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id,
- hdr->sadb_msg_type == SADB_X_SPDDELETE2);
+ delete, &err);
if (xp == NULL)
return -ENOENT;
- err = 0;
+ if (delete) {
+ xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+ AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
- c.seq = hdr->sadb_msg_seq;
- c.pid = hdr->sadb_msg_pid;
- if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) {
+ if (err)
+ goto out;
+ c.seq = hdr->sadb_msg_seq;
+ c.pid = hdr->sadb_msg_pid;
c.data.byid = 1;
c.event = XFRM_MSG_DELPOLICY;
km_policy_notify(xp, dir, &c);
err = key_pol_get_resp(sk, xp, hdr, dir);
}
+out:
xfrm_pol_put(xp);
return err;
}
}
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
list_for_each_entry(h, &unconfirmed, list) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
- goto found;
+ set_bit(IPS_DYING_BIT, &ct->status);
}
write_unlock_bh(&nf_conntrack_lock);
return NULL;
.new = gre_new,
.destroy = gre_destroy,
.me = THIS_MODULE,
-#if defined(CONFIG_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
.nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
#endif
static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] =
{
[TH_SYN] = 1,
- [TH_SYN|TH_ACK] = 1,
[TH_SYN|TH_PUSH] = 1,
+ [TH_SYN|TH_URG] = 1,
+ [TH_SYN|TH_PUSH|TH_URG] = 1,
+ [TH_SYN|TH_ACK] = 1,
[TH_SYN|TH_ACK|TH_PUSH] = 1,
[TH_RST] = 1,
[TH_RST|TH_ACK] = 1,
return 1;
}
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
.packet = tcp_packet,
.new = tcp_new,
.error = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nfattr = tcp_to_nfattr,
.from_nfattr = nfattr_to_tcp,
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
.packet = tcp_packet,
.new = tcp_new,
.error = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nfattr = tcp_to_nfattr,
.from_nfattr = nfattr_to_tcp,
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
.packet = udp_packet,
.new = udp_new,
.error = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
.nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
#endif
.packet = udp_packet,
.new = udp_new,
.error = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
.nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
#endif
int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
{
- gfp_t allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
int err = 0;
NETLINK_CB(skb).dst_group = group;
if (echo)
atomic_inc(&skb->users);
- netlink_broadcast(nfnl, skb, pid, group, allocation);
+ netlink_broadcast(nfnl, skb, pid, group, gfp_any());
if (echo)
err = netlink_unicast(nfnl, skb, pid, MSG_DONTWAIT);
if (inst && atomic_dec_and_test(&inst->use)) {
UDEBUG("kfree(inst=%p)\n", inst);
kfree(inst);
+ module_put(THIS_MODULE);
}
}
spin_lock_bh(&inst->lock);
if (inst->skb) {
+ /* timer "holds" one reference (we have one more) */
+ if (del_timer(&inst->timer))
+ instance_put(inst);
if (inst->qlen)
__nfulnl_send(inst);
if (inst->skb) {
/* and finally put the refcount */
instance_put(inst);
-
- module_put(THIS_MODULE);
}
static inline void
{
int status;
- if (timer_pending(&inst->timer))
- del_timer(&inst->timer);
-
if (!inst->skb)
return 0;
spin_lock_bh(&inst->lock);
__nfulnl_send(inst);
- instance_put(inst);
spin_unlock_bh(&inst->lock);
+ instance_put(inst);
}
/* This is an inline function, we don't really care about a long
* for physical device (when called from ipv4) */
NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
sizeof(tmp_uint), &tmp_uint);
- if (skb->nf_bridge) {
+ if (skb->nf_bridge && skb->nf_bridge->physoutdev) {
tmp_uint =
htonl(skb->nf_bridge->physoutdev->ifindex);
NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
}
nlh->nlmsg_len = inst->skb->tail - old_tail;
+ inst->lastnlh = nlh;
return 0;
nlmsg_failure:
plen = 0;
if (prefix)
- plen = strlen(prefix);
+ plen = strlen(prefix) + 1;
/* all macros expand to constant values at compile time */
/* FIXME: do we want to make the size calculation conditional based on
* enough room in the skb left. flush to userspace. */
UDEBUG("flushing old skb\n");
+ /* timer "holds" one reference (we have another one) */
+ if (del_timer(&inst->timer))
+ instance_put(inst);
__nfulnl_send(inst);
if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
add_timer(&inst->timer);
}
- spin_unlock_bh(&inst->lock);
+unlock_and_release:
+ spin_unlock_bh(&inst->lock);
+ instance_put(inst);
return;
alloc_failure:
- spin_unlock_bh(&inst->lock);
- instance_put(inst);
UDEBUG("error allocating skb\n");
/* FIXME: statistics */
+ goto unlock_and_release;
}
static int
ret = -EINVAL;
break;
}
+
+ if (!inst)
+ goto out;
} else {
if (!inst) {
UDEBUG("no config command, and no instance for "
out_put:
instance_put(inst);
+out:
return ret;
}
return 0;
list_failure:
- kfree(ans_skb);
+ kfree_skb(ans_skb);
return ret_val;
}
return;
}
- read_lock(&in6_dev->lock);
+ read_lock_bh(&in6_dev->lock);
for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) {
/* Add the address to the local list. */
addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
}
}
- read_unlock(&in6_dev->lock);
+ read_unlock_bh(&in6_dev->lock);
rcu_read_unlock();
}
#define RPCDBG_FACILITY RPCDBG_SVCDSP
+#define svc_serv_is_pooled(serv) ((serv)->sv_function)
+
/*
* Mode for mapping cpus to pools.
*/
enum {
- SVC_POOL_NONE = -1, /* uninitialised, choose one of the others */
+ SVC_POOL_AUTO = -1, /* choose one of the others */
SVC_POOL_GLOBAL, /* no mapping, just a single global pool
* (legacy & UP mode) */
SVC_POOL_PERCPU, /* one pool per cpu */
SVC_POOL_PERNODE /* one pool per numa node */
};
+#define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
/*
* Structure for mapping cpus to pools and vice versa.
* Setup once during sunrpc initialisation.
*/
static struct svc_pool_map {
+ int count; /* How many svc_servs use us */
int mode; /* Note: int not enum to avoid
* warnings about "enumeration value
* not handled in switch" */
unsigned int *pool_to; /* maps pool id to cpu or node */
unsigned int *to_pool; /* maps cpu or node to pool id */
} svc_pool_map = {
- .mode = SVC_POOL_NONE
+ .count = 0,
+ .mode = SVC_POOL_DEFAULT
};
+static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
+
+static int
+param_set_pool_mode(const char *val, struct kernel_param *kp)
+{
+ int *ip = (int *)kp->arg;
+ struct svc_pool_map *m = &svc_pool_map;
+ int err;
+
+ mutex_lock(&svc_pool_map_mutex);
+
+ err = -EBUSY;
+ if (m->count)
+ goto out;
+
+ err = 0;
+ if (!strncmp(val, "auto", 4))
+ *ip = SVC_POOL_AUTO;
+ else if (!strncmp(val, "global", 6))
+ *ip = SVC_POOL_GLOBAL;
+ else if (!strncmp(val, "percpu", 6))
+ *ip = SVC_POOL_PERCPU;
+ else if (!strncmp(val, "pernode", 7))
+ *ip = SVC_POOL_PERNODE;
+ else
+ err = -EINVAL;
+
+out:
+ mutex_unlock(&svc_pool_map_mutex);
+ return err;
+}
+static int
+param_get_pool_mode(char *buf, struct kernel_param *kp)
+{
+ int *ip = (int *)kp->arg;
+
+ switch (*ip)
+ {
+ case SVC_POOL_AUTO:
+ return strlcpy(buf, "auto", 20);
+ case SVC_POOL_GLOBAL:
+ return strlcpy(buf, "global", 20);
+ case SVC_POOL_PERCPU:
+ return strlcpy(buf, "percpu", 20);
+ case SVC_POOL_PERNODE:
+ return strlcpy(buf, "pernode", 20);
+ default:
+ return sprintf(buf, "%d", *ip);
+ }
+}
+
+module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
+ &svc_pool_map.mode, 0644);
/*
* Detect best pool mapping mode heuristically,
/*
- * Build the global map of cpus to pools and vice versa.
+ * Add a reference to the global map of cpus to pools (and
+ * vice versa). Initialise the map if we're the first user.
+ * Returns the number of pools.
*/
static unsigned int
-svc_pool_map_init(void)
+svc_pool_map_get(void)
{
struct svc_pool_map *m = &svc_pool_map;
int npools = -1;
- if (m->mode != SVC_POOL_NONE)
+ mutex_lock(&svc_pool_map_mutex);
+
+ if (m->count++) {
+ mutex_unlock(&svc_pool_map_mutex);
return m->npools;
+ }
- m->mode = svc_pool_map_choose_mode();
+ if (m->mode == SVC_POOL_AUTO)
+ m->mode = svc_pool_map_choose_mode();
switch (m->mode) {
case SVC_POOL_PERCPU:
}
m->npools = npools;
+ mutex_unlock(&svc_pool_map_mutex);
return m->npools;
}
+
+/*
+ * Drop a reference to the global map of cpus to pools.
+ * When the last reference is dropped, the map data is
+ * freed; this allows the sysadmin to change the pool
+ * mode using the pool_mode module option without
+ * rebooting or re-loading sunrpc.ko.
+ */
+static void
+svc_pool_map_put(void)
+{
+ struct svc_pool_map *m = &svc_pool_map;
+
+ mutex_lock(&svc_pool_map_mutex);
+
+ if (!--m->count) {
+ m->mode = SVC_POOL_DEFAULT;
+ kfree(m->to_pool);
+ kfree(m->pool_to);
+ m->npools = 0;
+ }
+
+ mutex_unlock(&svc_pool_map_mutex);
+}
+
+
/*
* Set the current thread's cpus_allowed mask so that it
* will only run on cpus in the given pool.
/*
* The caller checks for sv_nrpools > 1, which
- * implies that we've been initialized and the
- * map mode is not NONE.
+ * implies that we've been initialized.
*/
- BUG_ON(m->mode == SVC_POOL_NONE);
+ BUG_ON(m->count == 0);
switch (m->mode)
{
unsigned int pidx = 0;
/*
- * SVC_POOL_NONE happens in a pure client when
+ * An uninitialised map happens in a pure client when
* lockd is brought up, so silently treat it the
* same as SVC_POOL_GLOBAL.
*/
-
- switch (m->mode) {
- case SVC_POOL_PERCPU:
- pidx = m->to_pool[cpu];
- break;
- case SVC_POOL_PERNODE:
- pidx = m->to_pool[cpu_to_node(cpu)];
- break;
+ if (svc_serv_is_pooled(serv)) {
+ switch (m->mode) {
+ case SVC_POOL_PERCPU:
+ pidx = m->to_pool[cpu];
+ break;
+ case SVC_POOL_PERNODE:
+ pidx = m->to_pool[cpu_to_node(cpu)];
+ break;
+ }
}
return &serv->sv_pools[pidx % serv->sv_nrpools];
}
svc_thread_fn func, int sig, struct module *mod)
{
struct svc_serv *serv;
- unsigned int npools = svc_pool_map_init();
+ unsigned int npools = svc_pool_map_get();
serv = __svc_create(prog, bufsize, npools, shutdown);
svc_destroy(struct svc_serv *serv)
{
struct svc_sock *svsk;
+ struct svc_sock *tmp;
dprintk("svc: svc_destroy(%s, %d)\n",
serv->sv_program->pg_name,
del_timer_sync(&serv->sv_temptimer);
- while (!list_empty(&serv->sv_tempsocks)) {
- svsk = list_entry(serv->sv_tempsocks.next,
- struct svc_sock,
- sk_list);
- svc_close_socket(svsk);
- }
+ list_for_each_entry_safe(svsk, tmp, &serv->sv_tempsocks, sk_list)
+ svc_force_close_socket(svsk);
+
if (serv->sv_shutdown)
serv->sv_shutdown(serv);
- while (!list_empty(&serv->sv_permsocks)) {
- svsk = list_entry(serv->sv_permsocks.next,
- struct svc_sock,
- sk_list);
- svc_close_socket(svsk);
- }
+ list_for_each_entry_safe(svsk, tmp, &serv->sv_permsocks, sk_list)
+ svc_force_close_socket(svsk);
+
+ BUG_ON(!list_empty(&serv->sv_permsocks));
+ BUG_ON(!list_empty(&serv->sv_tempsocks));
cache_clean_deferred(serv);
+ if (svc_serv_is_pooled(serv))
+ svc_pool_map_put();
+
/* Unregister service with the portmapper */
svc_register(serv, 0, 0);
kfree(serv->sv_pools);
static void svc_udp_data_ready(struct sock *, int);
static int svc_udp_recvfrom(struct svc_rqst *);
static int svc_udp_sendto(struct svc_rqst *);
+static void svc_close_socket(struct svc_sock *svsk);
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
static int svc_deferred_recv(struct svc_rqst *rqstp);
NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
htons(((struct sockaddr_in *) addr)->sin_port));
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
case AF_INET6:
snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
htons(((struct sockaddr_in6 *) addr)->sin6_port));
break;
-#endif
+
default:
snprintf(buf, len, "unknown address type: %d", addr->sa_family);
break;
union svc_pktinfo_u {
struct in_pktinfo pkti;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_pktinfo pkti6;
-#endif
};
static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
case AF_INET6: {
struct in6_pktinfo *pki = CMSG_DATA(cmh);
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
-#endif
}
return;
}
}
}
-static void svc_udp_get_sender_address(struct svc_rqst *rqstp,
- struct sk_buff *skb)
+static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp,
+ struct cmsghdr *cmh)
{
switch (rqstp->rq_sock->sk_sk->sk_family) {
case AF_INET: {
- /* this seems to come from net/ipv4/udp.c:udp_recvmsg */
- struct sockaddr_in *sin = svc_addr_in(rqstp);
-
- sin->sin_family = AF_INET;
- sin->sin_port = skb->h.uh->source;
- sin->sin_addr.s_addr = skb->nh.iph->saddr;
- rqstp->rq_addrlen = sizeof(struct sockaddr_in);
- /* Remember which interface received this request */
- rqstp->rq_daddr.addr.s_addr = skb->nh.iph->daddr;
- }
+ struct in_pktinfo *pki = CMSG_DATA(cmh);
+ rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- case AF_INET6: {
- /* this is derived from net/ipv6/udp.c:udpv6_recvmesg */
- struct sockaddr_in6 *sin6 = svc_addr_in6(rqstp);
-
- sin6->sin6_family = AF_INET6;
- sin6->sin6_port = skb->h.uh->source;
- sin6->sin6_flowinfo = 0;
- sin6->sin6_scope_id = 0;
- if (ipv6_addr_type(&sin6->sin6_addr) &
- IPV6_ADDR_LINKLOCAL)
- sin6->sin6_scope_id = IP6CB(skb)->iif;
- ipv6_addr_copy(&sin6->sin6_addr,
- &skb->nh.ipv6h->saddr);
- rqstp->rq_addrlen = sizeof(struct sockaddr_in);
- /* Remember which interface received this request */
- ipv6_addr_copy(&rqstp->rq_daddr.addr6,
- &skb->nh.ipv6h->saddr);
}
+ case AF_INET6: {
+ struct in6_pktinfo *pki = CMSG_DATA(cmh);
+ ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
break;
-#endif
+ }
}
- return;
}
/*
struct svc_sock *svsk = rqstp->rq_sock;
struct svc_serv *serv = svsk->sk_server;
struct sk_buff *skb;
+ char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))];
+ struct cmsghdr *cmh = (struct cmsghdr *)buffer;
int err, len;
+ struct msghdr msg = {
+ .msg_name = svc_addr(rqstp),
+ .msg_control = cmh,
+ .msg_controllen = sizeof(buffer),
+ .msg_flags = MSG_DONTWAIT,
+ };
if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
/* udp sockets need large rcvbuf as all pending
}
clear_bit(SK_DATA, &svsk->sk_flags);
- while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
+ while ((err == kernel_recvmsg(svsk->sk_sock, &msg, NULL,
+ 0, 0, MSG_PEEK | MSG_DONTWAIT)) < 0 ||
+ (skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
if (err == -EAGAIN) {
svc_sock_received(svsk);
return err;
/* possibly an icmp error */
dprintk("svc: recvfrom returned error %d\n", -err);
}
+ rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
if (skb->tstamp.off_sec == 0) {
struct timeval tv;
rqstp->rq_prot = IPPROTO_UDP;
- svc_udp_get_sender_address(rqstp, skb);
+ if (cmh->cmsg_level != IPPROTO_IP ||
+ cmh->cmsg_type != IP_PKTINFO) {
+ if (net_ratelimit())
+ printk("rpcsvc: received unknown control message:"
+ "%d/%d\n",
+ cmh->cmsg_level, cmh->cmsg_type);
+ skb_free_datagram(svsk->sk_sk, skb);
+ return 0;
+ }
+ svc_udp_get_dest_address(rqstp, cmh);
if (skb_is_nonlinear(skb)) {
/* we have to copy */
static void
svc_udp_init(struct svc_sock *svsk)
{
+ int one = 1;
+ mm_segment_t oldfs;
+
svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
svsk->sk_sk->sk_write_space = svc_write_space;
svsk->sk_recvfrom = svc_udp_recvfrom;
set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
set_bit(SK_CHNGBUF, &svsk->sk_flags);
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ /* make sure we get destination address info */
+ svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
+ (char __user *)&one, sizeof(one));
+ set_fs(oldfs);
}
/*
case AF_INET:
return ntohs(((struct sockaddr_in *)sin)->sin_port)
< PROT_SOCK;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
< PROT_SOCK;
-#endif
default:
return 0;
}
spin_unlock_bh(&serv->sv_lock);
}
-void svc_close_socket(struct svc_sock *svsk)
+static void svc_close_socket(struct svc_sock *svsk)
{
set_bit(SK_CLOSE, &svsk->sk_flags);
if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
svc_sock_put(svsk);
}
+void svc_force_close_socket(struct svc_sock *svsk)
+{
+ set_bit(SK_CLOSE, &svsk->sk_flags);
+ if (test_bit(SK_BUSY, &svsk->sk_flags)) {
+ /* Waiting to be processed, but no threads left,
+ * So just remove it from the waiting list
+ */
+ list_del_init(&svsk->sk_ready);
+ clear_bit(SK_BUSY, &svsk->sk_flags);
+ }
+ svc_close_socket(svsk);
+}
+
/**
* svc_makesock - Make a socket for nfsd and lockd
* @serv: RPC server structure
struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
struct xfrm_selector *sel,
- struct xfrm_sec_ctx *ctx, int delete)
+ struct xfrm_sec_ctx *ctx, int delete,
+ int *err)
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
struct hlist_node *entry;
+ *err = 0;
write_lock_bh(&xfrm_policy_lock);
chain = policy_hash_bysel(sel, sel->family, dir);
ret = NULL;
xfrm_sec_ctx_match(ctx, pol->security)) {
xfrm_pol_hold(pol);
if (delete) {
+ *err = security_xfrm_policy_delete(pol);
+ if (*err) {
+ write_unlock_bh(&xfrm_policy_lock);
+ return pol;
+ }
hlist_del(&pol->bydst);
hlist_del(&pol->byidx);
xfrm_policy_count[dir]--;
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
-struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete)
+struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
+ int *err)
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
struct hlist_node *entry;
+ *err = 0;
write_lock_bh(&xfrm_policy_lock);
chain = xfrm_policy_byidx + idx_hash(id);
ret = NULL;
if (pol->type == type && pol->index == id) {
xfrm_pol_hold(pol);
if (delete) {
+ *err = security_xfrm_policy_delete(pol);
+ if (*err) {
+ write_unlock_bh(&xfrm_policy_lock);
+ return pol;
+ }
hlist_del(&pol->bydst);
hlist_del(&pol->byidx);
xfrm_policy_count[dir]--;
err = xfrm_state_delete(x);
- xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
- AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
-
if (err < 0)
goto out;
km_state_notify(x, &c);
out:
+ xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+ AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
xfrm_state_put(x);
return err;
}
return err;
if (p->index)
- xp = xfrm_policy_byid(type, p->dir, p->index, delete);
+ xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err);
else {
struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
struct xfrm_policy tmp;
if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
return err;
}
- xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete);
+ xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
+ delete, &err);
security_xfrm_policy_free(&tmp);
}
if (xp == NULL)
MSG_DONTWAIT);
}
} else {
- err = security_xfrm_policy_delete(xp);
-
xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
km_policy_notify(xp, p->dir, &c);
}
- xfrm_pol_put(xp);
-
out:
+ xfrm_pol_put(xp);
return err;
}
x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family);
if (x == NULL) {
- kfree(r_skb);
+ kfree_skb(r_skb);
return -ESRCH;
}
return err;
if (p->index)
- xp = xfrm_policy_byid(type, p->dir, p->index, 0);
+ xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err);
else {
struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
struct xfrm_policy tmp;
if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
return err;
}
- xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, 0);
+ xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
+ 0, &err);
security_xfrm_policy_free(&tmp);
}
if (xp == NULL)
- return err;
- read_lock(&xp->lock);
+ return -ENOENT;
+ read_lock(&xp->lock);
if (xp->dead) {
read_unlock(&xp->lock);
goto out;
struct xfrm_usersa_info *p = &ue->state;
x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family);
- err = -ENOENT;
+ err = -ENOENT;
if (x == NULL)
return err;
- err = -EINVAL;
-
spin_lock_bh(&x->lock);
+ err = -EINVAL;
if (x->km.state != XFRM_STATE_VALID)
goto out;
km_state_expired(x, ue->hard, current->pid);
xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
AUDIT_MAC_IPSEC_DELSA, 1, NULL, x);
}
+ err = 0;
out:
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
$prototype =~ s/^noinline +//;
$prototype =~ s/__devinit +//;
$prototype =~ s/^#define\s+//; #ak added
- $prototype =~ s/__attribute__ \(\([a-z,]*\)\)//;
+ $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
# Yes, this truly is vile. We are looking for:
# 1. Return type (may be nothing if we're looking at a macro)
* (SS vendor << 16 | device)
*/
static unsigned int ad1981_jacks_blacklist[] = {
+ 0x10140523, /* Thinkpad R40 */
+ 0x10140534, /* Thinkpad X31 */
0x10140537, /* Thinkpad T41p */
0x10140554, /* Thinkpad T42p/R50p */
0 /* end */
return err;
}
+ snd_card_set_dev(card, &pci->dev);
+
/* initialise synth voices*/
for (i = 0; i < ALI_CHANNELS; i++ ) {
codec->synth.voices[i].number = i;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */
static long mpu_port[SNDRV_CARDS];
-static long fm_port[SNDRV_CARDS];
+static long fm_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1};
static int soft_ac3[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1};
#ifdef SUPPORT_JOYSTICK
static int joystick_port[SNDRV_CARDS];
struct snd_opl3 *opl3;
int err;
+ if (!fm_port)
+ goto disable_fm;
+
/* first try FM regs in PCI port range */
iosynth = cm->iobase + CM_REG_FM_PCI;
err = snd_opl3_create(cm->card, iosynth, iosynth + 2,
case 0x3C8: val |= CM_FMSEL_3C8; break;
case 0x388: val |= CM_FMSEL_388; break;
default:
- return 0;
+ goto disable_fm;
}
snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, val);
/* enable FM */
OPL3_HW_OPL3, 0, &opl3) < 0) {
printk(KERN_ERR "cmipci: no OPL device at %#lx, "
"skipping...\n", iosynth);
- /* disable FM */
- snd_cmipci_write(cm, CM_REG_LEGACY_CTRL,
- val & ~CM_FMSEL_MASK);
- snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN);
- return 0;
+ goto disable_fm;
}
}
if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
return err;
}
return 0;
+
+ disable_fm:
+ snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_FMSEL_MASK);
+ snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN);
+ return 0;
}
static int __devinit snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
if (card == NULL)
return -ENOMEM;
+ snd_card_set_dev(card, &pci->dev);
+
if ((err = snd_echo_create(card, pci, &chip)) < 0) {
snd_card_free(card);
return err;
HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x18, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x18, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
/* HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x18, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x18, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
{
static void cxt5045_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- unsigned int bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0;
+ unsigned int bits;
spec->hp_present = snd_hda_codec_read(codec, 0x11, 0,
AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
+
+ bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0;
snd_hda_codec_amp_update(codec, 0x10, 0, HDA_OUTPUT, 0, 0x80, bits);
snd_hda_codec_amp_update(codec, 0x10, 1, HDA_OUTPUT, 0, 0x80, bits);
}
static void cxt5047_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- unsigned int bits = spec->hp_present || !spec->cur_eapd ? 0x80 : 0;
+ unsigned int bits;
spec->hp_present = snd_hda_codec_read(codec, 0x13, 0,
AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
+
+ bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0;
snd_hda_codec_amp_update(codec, 0x1d, 0, HDA_OUTPUT, 0, 0x80, bits);
snd_hda_codec_amp_update(codec, 0x1d, 1, HDA_OUTPUT, 0, 0x80, bits);
/* Mute/Unmute PCM 2 for good measure - some systems need this */
alc882_cfg_tbl);
if (board_config < 0 || board_config >= ALC882_MODEL_LAST) {
- printk(KERN_INFO "hda_codec: Unknown model for ALC882, "
- "trying auto-probe from BIOS...\n");
- board_config = ALC882_AUTO;
+ /* Pick up systems that don't supply PCI SSID */
+ switch (codec->subsystem_id) {
+ case 0x106b0c00: /* Mac Pro */
+ board_config = ALC885_MACPRO;
+ break;
+ default:
+ printk(KERN_INFO "hda_codec: Unknown model for ALC882, "
+ "trying auto-probe from BIOS...\n");
+ board_config = ALC882_AUTO;
+ }
}
if (board_config == ALC882_AUTO) {
HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
/* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT),
HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */
HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
/* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT),
HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */
/*HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),*/
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x16, 2, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x1a, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = {
HDA_CODEC_VOLUME("Rear Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Rear Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Rear Mic Boost", 0x18, 0, HDA_INPUT),
{ } /* end */
};
STAC_D945GTP3,
STAC_D945GTP5,
STAC_MACMINI,
+ STAC_MACBOOK,
+ STAC_MACBOOK_PRO,
STAC_922X_MODELS
};
"Dell Inspiron E1705/9400", STAC_REF),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ce,
"Dell XPS M1710", STAC_REF),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cf,
+ "Dell Precision M90", STAC_REF),
{} /* terminator */
};
0x02a19320, 0x40000100,
};
+static unsigned int macbook_pin_configs[10] = {
+ 0x0321e230, 0x03a1e020, 0x400000fd, 0x9017e110,
+ 0x400000fe, 0x0381e021, 0x1345e240, 0x13c5e22e,
+ 0x400000fc, 0x400000fb,
+};
+
+static unsigned int macbook_pro_pin_configs[10] = {
+ 0x0221401f, 0x90a70120, 0x01813024, 0x01014010,
+ 0x400000fd, 0x01016011, 0x1345e240, 0x13c5e22e,
+ 0x400000fc, 0x400000fb,
+};
+
static unsigned int *stac922x_brd_tbl[STAC_922X_MODELS] = {
[STAC_D945_REF] = ref922x_pin_configs,
[STAC_D945GTP3] = d945gtp3_pin_configs,
[STAC_D945GTP5] = d945gtp5_pin_configs,
[STAC_MACMINI] = d945gtp5_pin_configs,
+ [STAC_MACBOOK] = macbook_pin_configs,
+ [STAC_MACBOOK_PRO] = macbook_pro_pin_configs,
};
static const char *stac922x_models[STAC_922X_MODELS] = {
[STAC_D945GTP5] = "5stack",
[STAC_D945GTP3] = "3stack",
[STAC_MACMINI] = "macmini",
+ [STAC_MACBOOK] = "macbook",
+ [STAC_MACBOOK_PRO] = "macbook-pro",
};
static struct snd_pci_quirk stac922x_cfg_tbl[] = {
spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS,
stac922x_models,
stac922x_cfg_tbl);
+ if (spec->board_config == STAC_MACMINI) {
+ spec->gpio_mute = 1;
+ /* Intel Macs have all same PCI SSID, so we need to check
+ * codec SSID to distinguish the exact models
+ */
+ switch (codec->subsystem_id) {
+ case 0x106b1e00:
+ spec->board_config = STAC_MACBOOK_PRO;
+ break;
+ }
+ }
+
again:
if (spec->board_config < 0) {
snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC922x, "
return err;
}
- if (spec->board_config == STAC_MACMINI)
- spec->gpio_mute = 1;
-
codec->patch_ops = stac92xx_patch_ops;
return 0;
return err;
}
+ snd_card_set_dev(card, &pci->dev);
+
*rchip = chip;
return 0;
}
hdspm->dev = dev;
hdspm->pci = pci;
+ snd_card_set_dev(card, &pci->dev);
+
if ((err =
snd_hdspm_create(card, hdspm, precise_ptr[dev],
enable_monitor[dev])) < 0) {
*/
static const u16 wm9712_reg[] = {
0x6174, 0x8000, 0x8000, 0x8000, // 6
- 0xf0f0, 0xaaa0, 0xc008, 0x6808, // e
+ 0x0f0f, 0xaaa0, 0xc008, 0x6808, // e
0xe808, 0xaaa0, 0xad00, 0x8000, // 16
0xe808, 0x3000, 0x8000, 0x0000, // 1e
0x0000, 0x0000, 0x0000, 0x000f, // 26
SOC_SINGLE("Speaker Playback Switch", AC97_MASTER, 15, 1, 1),
SOC_DOUBLE("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1),
SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE,15, 1, 1),
+SOC_DOUBLE("PCM Playback Volume", AC97_PCM, 8, 0, 31, 1),
SOC_SINGLE("Speaker Playback ZC Switch", AC97_MASTER, 7, 1, 0),
SOC_SINGLE("Speaker Playback Invert Switch", AC97_MASTER, 6, 1, 0),