22: Newly-added code has been compiled with `gcc -W'. This will generate
lots of noise, but is good for finding bugs like "warning: comparison
between signed and unsigned".
+
+23: Tested after it has been merged into the -mm patchset to make sure
+ that it still works with all of the other queued patches and various
+ changes in the VM, VFS, and other subsystems.
--- /dev/null
+CPU load
+--------
+
+Linux exports various bits of information via `/proc/stat' and
+`/proc/uptime' that userland tools, such as top(1), use to calculate
+the average time system spent in a particular state, for example:
+
+ $ iostat
+ Linux 2.6.18.3-exp (linmac) 02/20/2007
+
+ avg-cpu: %user %nice %system %iowait %steal %idle
+ 10.01 0.00 2.92 5.44 0.00 81.63
+
+ ...
+
+Here the system thinks that over the default sampling period the
+system spent 10.01% of the time doing work in user space, 2.92% in the
+kernel, and was overall 81.63% of the time idle.
+
+In most cases the `/proc/stat' information reflects the reality quite
+closely, however due to the nature of how/when the kernel collects
+this data sometimes it can not be trusted at all.
+
+So how is this information collected? Whenever timer interrupt is
+signalled the kernel looks what kind of task was running at this
+moment and increments the counter that corresponds to this tasks
+kind/state. The problem with this is that the system could have
+switched between various states multiple times between two timer
+interrupts yet the counter is incremented only for the last state.
+
+
+Example
+-------
+
+If we imagine the system with one task that periodically burns cycles
+in the following manner:
+
+ time line between two timer interrupts
+|--------------------------------------|
+ ^ ^
+ |_ something begins working |
+ |_ something goes to sleep
+ (only to be awaken quite soon)
+
+In the above situation the system will be 0% loaded according to the
+`/proc/stat' (since the timer interrupt will always happen when the
+system is executing the idle handler), but in reality the load is
+closer to 99%.
+
+One can imagine many more situations where this behavior of the kernel
+will lead to quite erratic information inside `/proc/stat'.
+
+
+/* gcc -o hog smallhog.c */
+#include <time.h>
+#include <limits.h>
+#include <signal.h>
+#include <sys/time.h>
+#define HIST 10
+
+static volatile sig_atomic_t stop;
+
+static void sighandler (int signr)
+{
+ (void) signr;
+ stop = 1;
+}
+static unsigned long hog (unsigned long niters)
+{
+ stop = 0;
+ while (!stop && --niters);
+ return niters;
+}
+int main (void)
+{
+ int i;
+ struct itimerval it = { .it_interval = { .tv_sec = 0, .tv_usec = 1 },
+ .it_value = { .tv_sec = 0, .tv_usec = 1 } };
+ sigset_t set;
+ unsigned long v[HIST];
+ double tmp = 0.0;
+ unsigned long n;
+ signal (SIGALRM, &sighandler);
+ setitimer (ITIMER_REAL, &it, NULL);
+
+ hog (ULONG_MAX);
+ for (i = 0; i < HIST; ++i) v[i] = ULONG_MAX - hog (ULONG_MAX);
+ for (i = 0; i < HIST; ++i) tmp += v[i];
+ tmp /= HIST;
+ n = tmp - (tmp / 3.0);
+
+ sigemptyset (&set);
+ sigaddset (&set, SIGALRM);
+
+ for (;;) {
+ hog (n);
+ sigwait (&set, &i);
+ }
+ return 0;
+}
+
+
+References
+----------
+
+http://lkml.org/lkml/2007/2/12/6
+Documentation/filesystems/proc.txt (1.8)
+
+
+Thanks
+------
+
+Con Kolivas, Pavel Machek
FRAMEBUFFER LAYER
P: Antonino Daplas
-M: adaplas@pol.net
+M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
W: http://linux-fbdev.sourceforge.net/
S: Maintained
INTEL 810/815 FRAMEBUFFER DRIVER
P: Antonino Daplas
-M: adaplas@pol.net
+M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
P: Antonino Daplas
-M: adaplas@pol.net
+M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
S3 SAVAGE FRAMEBUFFER DRIVER
P: Antonino Daplas
-M: adaplas@pol.net
+M: adaplas@gmail.com
L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
zero_fp
get_scno
-#ifdef CONFIG_ALIGNMENT_TRAP
- ldr ip, __cr_alignment
- ldr ip, [ip]
- mcr p15, 0, ip, c1, c0 @ update control register
-#endif
enable_irqs ip
str r4, [sp, #-S_OFF]! @ push fifth arg
b ret_slow_syscall
.align 5
-#ifdef CONFIG_ALIGNMENT_TRAP
- .type __cr_alignment, #object
-__cr_alignment:
- .word cr_alignment
-#endif
.type sys_call_table, #object
ENTRY(sys_call_table)
return DMA_MEMORY_IO;
free1_out:
- kfree(dev->dma_mem->bitmap);
+ kfree(dev->dma_mem);
out:
return 0;
}
/* distribute the allocatable pages across the various zones and pass them to the allocator
*/
- zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
- zones_size[ZONE_NORMAL] = 0;
+ zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
#endif
return DMA_MEMORY_IO;
free1_out:
- kfree(dev->dma_mem->bitmap);
+ kfree(dev->dma_mem);
out:
if (mem_base)
iounmap(mem_base);
menu "PS3 Platform Options"
depends on PPC_PS3
+config PS3_ADVANCED
+ depends on PPC_PS3
+ bool "PS3 Advanced configuration options"
+ help
+ This gives you access to some advanced options for the PS3. The
+ defaults should be fine for most users, but these options may make
+ it possible to better control the kernel configuration if you know
+ what you are doing.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about these options.
+
+ Most users should say N to this question.
+
config PS3_HTAB_SIZE
depends on PPC_PS3
- int "PS3 Platform pagetable size"
+ int "PS3 Platform pagetable size" if PS3_ADVANCED
range 18 20
default 20
help
config PS3_VUART
depends on PPC_PS3
- bool "PS3 Virtual UART support"
+ bool "PS3 Virtual UART support" if PS3_ADVANCED
default y
help
Include support for the PS3 Virtual UART.
general, all users will say Y.
config PS3_PS3AV
- tristate "PS3 AV settings driver"
- depends on PPC_PS3
- select PS3_VUART
+ tristate "PS3 AV settings driver" if PS3_ADVANCED
+ depends on PS3_VUART
default y
help
Include support for the PS3 AV Settings driver.
general, all users will say Y or M.
config PS3_SYS_MANAGER
- bool "PS3 System Manager driver"
- select PS3_VUART
+ bool "PS3 System Manager driver" if PS3_ADVANCED
+ depends on PS3_VUART
default y
help
Include support for the PS3 System Manager.
security. This option enables these legacy devices; on most
systems, it is safe to say N.
+config RAW_DRIVER
+ tristate "RAW driver (/dev/raw/rawN) (OBSOLETE)"
+ help
+ The raw driver permits block devices to be bound to /dev/raw/rawN.
+ Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
+ See the raw(8) manpage for more details.
+
+ The raw driver is deprecated and will be removed soon.
+ Applications should simply open the device (eg /dev/hda1)
+ with the O_DIRECT flag.
+
+config MAX_RAW_DEVS
+ int "Maximum number of RAW devices to support (1-8192)"
+ depends on RAW_DRIVER
+ default "256"
+ help
+ The maximum number of RAW devices that are supported.
+ Default is 256. Increase this number in case you need lots of
+ raw devices.
config LEGACY_PTY_COUNT
int "Maximum number of legacy PTY in use"
switch ( auxv->a_type ) {
case AT_SYSINFO:
__kernel_vsyscall = auxv->a_un.a_val;
+ /* See if the page is under TASK_SIZE */
+ if (__kernel_vsyscall < (unsigned long) envp)
+ __kernel_vsyscall = 0;
break;
case AT_SYSINFO_EHDR:
vsyscall_ehdr = auxv->a_un.a_val;
sigio_lock();
err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
- if(err)
+ if(err){
+ printk("maybe_sigio_broken - failed to add pollfd for "
+ "descriptor %d\n", fd);
goto out;
+ }
all_sigio_fds.poll[all_sigio_fds.used++] =
((struct pollfd) { .fd = fd,
#include "linux/slab.h"
#include "linux/types.h"
#include "linux/errno.h"
+#include "linux/spinlock.h"
#include "asm/uaccess.h"
#include "asm/smp.h"
#include "asm/ldt.h"
return ret;
}
-short dummy_list[9] = {0, -1};
-short * host_ldt_entries = NULL;
+static DEFINE_SPINLOCK(host_ldt_lock);
+static short dummy_list[9] = {0, -1};
+static short * host_ldt_entries = NULL;
-void ldt_get_host_info(void)
+static void ldt_get_host_info(void)
{
long ret;
- struct ldt_entry * ldt;
+ struct ldt_entry * ldt, *tmp;
int i, size, k, order;
+ spin_lock(&host_ldt_lock);
+
+ if(host_ldt_entries != NULL){
+ spin_unlock(&host_ldt_lock);
+ return;
+ }
host_ldt_entries = dummy_list+1;
+ spin_unlock(&host_ldt_lock);
+
for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
ldt = (struct ldt_entry *)
__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
if(ldt == NULL) {
- printk("ldt_get_host_info: couldn't allocate buffer for host ldt\n");
+ printk("ldt_get_host_info: couldn't allocate buffer for host "
+ "ldt\n");
return;
}
host_ldt_entries = dummy_list;
else {
size = (size + 1) * sizeof(dummy_list[0]);
- host_ldt_entries = kmalloc(size, GFP_KERNEL);
- if(host_ldt_entries == NULL) {
- printk("ldt_get_host_info: couldn't allocate host ldt list\n");
+ tmp = kmalloc(size, GFP_KERNEL);
+ if(tmp == NULL) {
+ printk("ldt_get_host_info: couldn't allocate host ldt "
+ "list\n");
goto out_free;
}
+ host_ldt_entries = tmp;
}
for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
* inherited from the host. All ldt-entries found
* will be reset in the following loop
*/
- if(host_ldt_entries == NULL)
- ldt_get_host_info();
+ ldt_get_host_info();
for(num_p=host_ldt_entries; *num_p != -1; num_p++){
desc.entry_number = *num_p;
err = write_ldt_entry(&new_mm->id, 1, &desc,
int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
{
- return(CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
- ptr, bytecount));
+ return CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
+ ptr, bytecount);
}
config ACPI_BAY
tristate "Removable Drive Bay (EXPERIMENTAL)"
depends on EXPERIMENTAL
+ depends on ACPI_DOCK
help
This driver adds support for ACPI controlled removable drive
bays such as the IBM ultrabay or the Dell Module Bay.
return -ENOMEM;
err = major_nr = register_blkdev(0, "umem");
- if (err < 0)
+ if (err < 0) {
+ pci_unregister_driver(&mm_pci_driver);
return -EIO;
+ }
for (i = 0; i < num_cards; i++) {
mm_gendisk[i] = alloc_disk(1 << MM_SHIFT);
return 0;
out:
+ pci_unregister_driver(&mm_pci_driver);
unregister_blkdev(major_nr, "umem");
while (i--)
put_disk(mm_gendisk[i]);
/* check whether we're reopening an existing tty */
if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
tty = devpts_get_tty(idx);
+ /*
+ * If we don't have a tty here on a slave open, it's because
+ * the master already started the close process and there's
+ * no relation between devpts file and tty anymore.
+ */
+ if (!tty && driver->subtype == PTY_TYPE_SLAVE) {
+ retval = -EIO;
+ goto end_init;
+ }
+ /*
+ * It's safe from now on because init_dev() is called with
+ * tty_mutex held and release_dev() won't change tty->count
+ * or tty->flags without having to grab tty_mutex
+ */
if (tty && driver->subtype == PTY_TYPE_MASTER)
tty = tty->link;
} else {
# Config.in for the CAPI subsystem
#
config ISDN_DRV_AVMB1_VERBOSE_REASON
- bool "Verbose reason code reporting (kernel size +=7K)"
+ bool "Verbose reason code reporting"
depends on ISDN_CAPI
+ default y
help
- If you say Y here, the AVM B1 driver will give verbose reasons for
+ If you say Y here, the CAPI drivers will give verbose reasons for
disconnecting. This will increase the size of the kernel by 7 KB. If
unsure, say Y.
+config CAPI_TRACE
+ bool "CAPI trace support"
+ depends on ISDN_CAPI
+ default y
+ help
+ If you say Y here, the kernelcapi driver can make verbose traces
+ of CAPI messages. This feature can be enabled/disabled via IOCTL for
+ every controler (default disabled).
+ This will increase the size of the kernelcapi module by 20 KB.
+ If unsure, say Y.
+
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
depends on ISDN_CAPI && EXPERIMENTAL
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
capidrv_plci *plcip;
isdn_ctrl cmd;
+ _cdebbuf *cdb;
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
break;
}
}
- printk(KERN_ERR "capidrv-%d: %s\n",
- card->contrnr, capi_cmsg2str(cmsg));
+ cdb = capi_cmsg2str(cmsg);
+ if (cdb) {
+ printk(KERN_WARNING "capidrv-%d: %s\n",
+ card->contrnr, cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_WARNING "capidrv-%d: CAPI_INFO_IND InfoNumber %x not handled\n",
+ card->contrnr, cmsg->InfoNumber);
+
break;
case CAPI_CONNECT_ACTIVE_CONF: /* plci */
static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
{
capi_message2cmsg(&s_cmsg, skb->data);
- if (debugmode > 3)
- printk(KERN_DEBUG "capidrv_signal: applid=%d %s\n",
- ap->applid, capi_cmsg2str(&s_cmsg));
-
+ if (debugmode > 3) {
+ _cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
+
+ if (cdb) {
+ printk(KERN_DEBUG "%s: applid=%d %s\n", __FUNCTION__,
+ ap->applid, cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_DEBUG "%s: applid=%d %s not traced\n",
+ __FUNCTION__, ap->applid,
+ capi_cmd2str(s_cmsg.Command, s_cmsg.Subcommand));
+ }
if (s_cmsg.Command == CAPI_DATA_B3
&& s_cmsg.Subcommand == CAPI_IND) {
handle_data(&s_cmsg, skb);
/*-------------------------------------------------------*/
+
+#ifdef CONFIG_CAPI_TRACE
+
/*-------------------------------------------------------*/
static char *pnames[] =
};
-static char buf[8192];
-static char *p = NULL;
#include <stdarg.h>
/*-------------------------------------------------------*/
-static void bufprint(char *fmt,...)
+static _cdebbuf *bufprint(_cdebbuf *cdb, char *fmt,...)
{
va_list f;
+ size_t n,r;
+
+ if (!cdb)
+ return NULL;
va_start(f, fmt);
- vsprintf(p, fmt, f);
+ r = cdb->size - cdb->pos;
+ n = vsnprintf(cdb->p, r, fmt, f);
va_end(f);
- p += strlen(p);
+ if (n >= r) {
+ /* truncated, need bigger buffer */
+ size_t ns = 2 * cdb->size;
+ u_char *nb;
+
+ while ((ns - cdb->pos) <= n)
+ ns *= 2;
+ nb = kmalloc(ns, GFP_ATOMIC);
+ if (!nb) {
+ cdebbuf_free(cdb);
+ return NULL;
+ }
+ memcpy(nb, cdb->buf, cdb->pos);
+ kfree(cdb->buf);
+ nb[cdb->pos] = 0;
+ cdb->buf = nb;
+ cdb->p = cdb->buf + cdb->pos;
+ cdb->size = ns;
+ va_start(f, fmt);
+ r = cdb->size - cdb->pos;
+ n = vsnprintf(cdb->p, r, fmt, f);
+ va_end(f);
+ }
+ cdb->p += n;
+ cdb->pos += n;
+ return cdb;
}
-static void printstructlen(u8 * m, unsigned len)
+static _cdebbuf *printstructlen(_cdebbuf *cdb, u8 * m, unsigned len)
{
unsigned hex = 0;
+
+ if (!cdb)
+ return NULL;
for (; len; len--, m++)
if (isalnum(*m) || *m == ' ') {
if (hex)
- bufprint(">");
- bufprint("%c", *m);
+ cdb = bufprint(cdb, ">");
+ cdb = bufprint(cdb, "%c", *m);
hex = 0;
} else {
if (!hex)
- bufprint("<%02x", *m);
+ cdb = bufprint(cdb, "<%02x", *m);
else
- bufprint(" %02x", *m);
+ cdb = bufprint(cdb, " %02x", *m);
hex = 1;
}
if (hex)
- bufprint(">");
+ cdb = bufprint(cdb, ">");
+ return cdb;
}
-static void printstruct(u8 * m)
+static _cdebbuf *printstruct(_cdebbuf *cdb, u8 * m)
{
unsigned len;
+
if (m[0] != 0xff) {
len = m[0];
m += 1;
len = ((u16 *) (m + 1))[0];
m += 3;
}
- printstructlen(m, len);
+ cdb = printstructlen(cdb, m, len);
+ return cdb;
}
/*-------------------------------------------------------*/
#define NAME (pnames[cmsg->par[cmsg->p]])
-static void protocol_message_2_pars(_cmsg * cmsg, int level)
+static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level)
{
for (; TYP != _CEND; cmsg->p++) {
int slen = 29 + 3 - level;
int i;
- bufprint(" ");
+ if (!cdb)
+ return NULL;
+ cdb = bufprint(cdb, " ");
for (i = 0; i < level - 1; i++)
- bufprint(" ");
+ cdb = bufprint(cdb, " ");
switch (TYP) {
case _CBYTE:
- bufprint("%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l));
+ cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l));
cmsg->l++;
break;
case _CWORD:
- bufprint("%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l));
+ cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l));
cmsg->l += 2;
break;
case _CDWORD:
- bufprint("%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l));
+ cdb = bufprint(cdb, "%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l));
cmsg->l += 4;
break;
case _CSTRUCT:
- bufprint("%-*s = ", slen, NAME);
+ cdb = bufprint(cdb, "%-*s = ", slen, NAME);
if (cmsg->m[cmsg->l] == '\0')
- bufprint("default");
+ cdb = bufprint(cdb, "default");
else
- printstruct(cmsg->m + cmsg->l);
- bufprint("\n");
+ cdb = printstruct(cdb, cmsg->m + cmsg->l);
+ cdb = bufprint(cdb, "\n");
if (cmsg->m[cmsg->l] != 0xff)
cmsg->l += 1 + cmsg->m[cmsg->l];
else
case _CMSTRUCT:
/*----- Metastruktur 0 -----*/
if (cmsg->m[cmsg->l] == '\0') {
- bufprint("%-*s = default\n", slen, NAME);
+ cdb = bufprint(cdb, "%-*s = default\n", slen, NAME);
cmsg->l++;
jumpcstruct(cmsg);
} else {
char *name = NAME;
unsigned _l = cmsg->l;
- bufprint("%-*s\n", slen, name);
+ cdb = bufprint(cdb, "%-*s\n", slen, name);
cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1;
cmsg->p++;
- protocol_message_2_pars(cmsg, level + 1);
+ cdb = protocol_message_2_pars(cdb, cmsg, level + 1);
}
break;
}
}
+ return cdb;
}
/*-------------------------------------------------------*/
-char *capi_message2str(u8 * msg)
+
+static _cdebbuf *g_debbuf;
+static u_long g_debbuf_lock;
+static _cmsg *g_cmsg;
+
+_cdebbuf *cdebbuf_alloc(void)
{
+ _cdebbuf *cdb;
+
+ if (likely(!test_and_set_bit(1, &g_debbuf_lock))) {
+ cdb = g_debbuf;
+ goto init;
+ } else
+ cdb = kmalloc(sizeof(_cdebbuf), GFP_ATOMIC);
+ if (!cdb)
+ return NULL;
+ cdb->buf = kmalloc(CDEBUG_SIZE, GFP_ATOMIC);
+ if (!cdb->buf) {
+ kfree(cdb);
+ return NULL;
+ }
+ cdb->size = CDEBUG_SIZE;
+init:
+ cdb->buf[0] = 0;
+ cdb->p = cdb->buf;
+ cdb->pos = 0;
+ return cdb;
+}
- _cmsg cmsg;
- p = buf;
- p[0] = 0;
+void cdebbuf_free(_cdebbuf *cdb)
+{
+ if (likely(cdb == g_debbuf)) {
+ test_and_clear_bit(1, &g_debbuf_lock);
+ return;
+ }
+ if (likely(cdb))
+ kfree(cdb->buf);
+ kfree(cdb);
+}
- cmsg.m = msg;
- cmsg.l = 8;
- cmsg.p = 0;
- byteTRcpy(cmsg.m + 4, &cmsg.Command);
- byteTRcpy(cmsg.m + 5, &cmsg.Subcommand);
- cmsg.par = cpars[command_2_index(cmsg.Command, cmsg.Subcommand)];
- bufprint("%-26s ID=%03d #0x%04x LEN=%04d\n",
- mnames[command_2_index(cmsg.Command, cmsg.Subcommand)],
+_cdebbuf *capi_message2str(u8 * msg)
+{
+ _cdebbuf *cdb;
+ _cmsg *cmsg;
+
+ cdb = cdebbuf_alloc();
+ if (unlikely(!cdb))
+ return NULL;
+ if (likely(cdb == g_debbuf))
+ cmsg = g_cmsg;
+ else
+ cmsg = kmalloc(sizeof(_cmsg), GFP_ATOMIC);
+ if (unlikely(!cmsg)) {
+ cdebbuf_free(cdb);
+ return NULL;
+ }
+ cmsg->m = msg;
+ cmsg->l = 8;
+ cmsg->p = 0;
+ byteTRcpy(cmsg->m + 4, &cmsg->Command);
+ byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
+ cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)];
+
+ cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n",
+ mnames[command_2_index(cmsg->Command, cmsg->Subcommand)],
((unsigned short *) msg)[1],
((unsigned short *) msg)[3],
((unsigned short *) msg)[0]);
- protocol_message_2_pars(&cmsg, 1);
- return buf;
+ cdb = protocol_message_2_pars(cdb, cmsg, 1);
+ if (unlikely(cmsg != g_cmsg))
+ kfree(cmsg);
+ return cdb;
}
-char *capi_cmsg2str(_cmsg * cmsg)
+_cdebbuf *capi_cmsg2str(_cmsg * cmsg)
{
- p = buf;
- p[0] = 0;
+ _cdebbuf *cdb;
+
+ cdb = cdebbuf_alloc();
+ if (!cdb)
+ return NULL;
cmsg->l = 8;
cmsg->p = 0;
- bufprint("%s ID=%03d #0x%04x LEN=%04d\n",
+ cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n",
mnames[command_2_index(cmsg->Command, cmsg->Subcommand)],
((u16 *) cmsg->m)[1],
((u16 *) cmsg->m)[3],
((u16 *) cmsg->m)[0]);
- protocol_message_2_pars(cmsg, 1);
- return buf;
+ cdb = protocol_message_2_pars(cdb, cmsg, 1);
+ return cdb;
}
+int __init cdebug_init(void)
+{
+ g_cmsg= kmalloc(sizeof(_cmsg), GFP_KERNEL);
+ if (!g_cmsg)
+ return ENOMEM;
+ g_debbuf = kmalloc(sizeof(_cdebbuf), GFP_KERNEL);
+ if (!g_debbuf) {
+ kfree(g_cmsg);
+ return ENOMEM;
+ }
+ g_debbuf->buf = kmalloc(CDEBUG_GSIZE, GFP_KERNEL);
+ if (!g_debbuf->buf) {
+ kfree(g_cmsg);
+ kfree(g_debbuf);
+ return ENOMEM;;
+ }
+ g_debbuf->size = CDEBUG_GSIZE;
+ g_debbuf->buf[0] = 0;
+ g_debbuf->p = g_debbuf->buf;
+ g_debbuf->pos = 0;
+ return 0;
+}
+
+void __exit cdebug_exit(void)
+{
+ if (g_debbuf)
+ kfree(g_debbuf->buf);
+ kfree(g_debbuf);
+ kfree(g_cmsg);
+}
+
+#else /* !CONFIG_CAPI_TRACE */
+
+static _cdebbuf g_debbuf = {"CONFIG_CAPI_TRACE not enabled", NULL, 0, 0};
+
+_cdebbuf *capi_message2str(u8 * msg)
+{
+ return &g_debbuf;
+}
+
+_cdebbuf *capi_cmsg2str(_cmsg * cmsg)
+{
+ return &g_debbuf;
+}
+
+_cdebbuf *cdebbuf_alloc(void)
+{
+ return &g_debbuf;
+}
+
+void cdebbuf_free(_cdebbuf *cdb)
+{
+}
+
+int __init cdebug_init(void)
+{
+ return 0;
+}
+
+void __exit cdebug_exit(void)
+{
+}
+
+#endif
+
+EXPORT_SYMBOL(cdebbuf_alloc);
+EXPORT_SYMBOL(cdebbuf_free);
EXPORT_SYMBOL(capi_cmsg2message);
EXPORT_SYMBOL(capi_message2cmsg);
EXPORT_SYMBOL(capi_cmsg_header);
int showctl = 0;
u8 cmd, subcmd;
unsigned long flags;
+ _cdebbuf *cdb;
if (card->cardstate != CARD_RUNNING) {
- printk(KERN_INFO "kcapi: controller %d not active, got: %s",
- card->cnr, capi_message2str(skb->data));
+ cdb = capi_message2str(skb->data);
+ if (cdb) {
+ printk(KERN_INFO "kcapi: controller [%03d] not active, got: %s",
+ card->cnr, cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_INFO "kcapi: controller [%03d] not active, cannot trace\n",
+ card->cnr);
goto error;
}
showctl |= (card->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
- printk(KERN_DEBUG "kcapi: got [0x%lx] id#%d %s len=%u\n",
- (unsigned long) card->cnr,
- CAPIMSG_APPID(skb->data),
+ printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u\n",
+ card->cnr, CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
} else {
- printk(KERN_DEBUG "kcapi: got [0x%lx] %s\n",
- (unsigned long) card->cnr,
- capi_message2str(skb->data));
+ cdb = capi_message2str(skb->data);
+ if (cdb) {
+ printk(KERN_DEBUG "kcapi: got [%03d] %s\n",
+ card->cnr, cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u, cannot trace\n",
+ card->cnr, CAPIMSG_APPID(skb->data),
+ capi_cmd2str(cmd, subcmd),
+ CAPIMSG_LEN(skb->data));
}
}
ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data));
if ((!ap) || (ap->release_in_progress)) {
read_unlock_irqrestore(&application_lock, flags);
- printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n",
- CAPIMSG_APPID(skb->data), capi_message2str(skb->data));
+ cdb = capi_message2str(skb->data);
+ if (cdb) {
+ printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n",
+ CAPIMSG_APPID(skb->data), cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s) cannot trace\n",
+ CAPIMSG_APPID(skb->data),
+ capi_cmd2str(cmd, subcmd));
goto error;
}
skb_queue_tail(&ap->recv_queue, skb);
{
card->cardstate = CARD_RUNNING;
- printk(KERN_NOTICE "kcapi: card %d \"%s\" ready.\n",
+ printk(KERN_NOTICE "kcapi: card [%03d] \"%s\" ready.\n",
card->cnr, card->name);
notify_push(KCI_CONTRUP, card->cnr, 0, 0);
capi_ctr_put(card);
}
- printk(KERN_NOTICE "kcapi: card %d down.\n", card->cnr);
+ printk(KERN_NOTICE "kcapi: card [%03d] down.\n", card->cnr);
notify_push(KCI_CONTRDOWN, card->cnr, 0, 0);
}
void capi_ctr_suspend_output(struct capi_ctr *card)
{
if (!card->blocked) {
- printk(KERN_DEBUG "kcapi: card %d suspend\n", card->cnr);
+ printk(KERN_DEBUG "kcapi: card [%03d] suspend\n", card->cnr);
card->blocked = 1;
}
}
void capi_ctr_resume_output(struct capi_ctr *card)
{
if (card->blocked) {
- printk(KERN_DEBUG "kcapi: card %d resume\n", card->cnr);
+ printk(KERN_DEBUG "kcapi: card [%03d] resume\n", card->cnr);
card->blocked = 0;
}
}
}
ncards++;
- printk(KERN_NOTICE "kcapi: Controller %d: %s attached\n",
+ printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n",
card->cnr, card->name);
return 0;
}
card->procent = NULL;
}
capi_cards[card->cnr - 1] = NULL;
- printk(KERN_NOTICE "kcapi: Controller %d: %s unregistered\n",
+ printk(KERN_NOTICE "kcapi: Controller [%03d]: %s unregistered\n",
card->cnr, card->name);
return 0;
showctl |= (card->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
- printk(KERN_DEBUG "kcapi: put [%#x] id#%d %s len=%u\n",
+ printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u\n",
CAPIMSG_CONTROLLER(skb->data),
CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
} else {
- printk(KERN_DEBUG "kcapi: put [%#x] %s\n",
- CAPIMSG_CONTROLLER(skb->data),
- capi_message2str(skb->data));
+ _cdebbuf *cdb = capi_message2str(skb->data);
+ if (cdb) {
+ printk(KERN_DEBUG "kcapi: put [%03d] %s\n",
+ CAPIMSG_CONTROLLER(skb->data),
+ cdb->buf);
+ cdebbuf_free(cdb);
+ } else
+ printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u cannot trace\n",
+ CAPIMSG_CONTROLLER(skb->data),
+ CAPIMSG_APPID(skb->data),
+ capi_cmd2str(cmd, subcmd),
+ CAPIMSG_LEN(skb->data));
}
-
}
return card->send_message(card, skb);
}
return -ESRCH;
card->traceflag = fdef.flag;
- printk(KERN_INFO "kcapi: contr %d set trace=%d\n",
+ printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n",
card->cnr, card->traceflag);
return 0;
}
{
char *p;
char rev[32];
+ int ret;
+ ret = cdebug_init();
+ if (ret)
+ return ret;
kcapi_proc_init();
if ((p = strchr(revision, ':')) != 0 && p[1]) {
/* make sure all notifiers are finished */
flush_scheduled_work();
+ cdebug_exit();
}
module_init(kcapi_init);
gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o asyncdata.o
usb_gigaset-y := usb-gigaset.o
-bas_gigaset-y := bas-gigaset.o isocdata.o
ser_gigaset-y := ser-gigaset.o
+bas_gigaset-y := bas-gigaset.o isocdata.o
-obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o
-obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o
-obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o gigaset.o
+obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset.o
+obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o
+obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o
+obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o
* =====================================================================
*/
-/* not set by Kbuild when building both ser_gigaset and usb_gigaset */
-#ifndef KBUILD_MODNAME
-#define KBUILD_MODNAME "asy_gigaset"
-#endif
-
#include "gigaset.h"
#include <linux/crc-ccitt.h>
#include <linux/bitrev.h>
.sync_super = super_1_sync,
},
};
-
-static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
-{
- struct list_head *tmp;
- mdk_rdev_t *rdev;
-
- ITERATE_RDEV(mddev,rdev,tmp)
- if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
- return rdev;
-
- return NULL;
-}
static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
{
- struct list_head *tmp;
- mdk_rdev_t *rdev;
+ struct list_head *tmp, *tmp2;
+ mdk_rdev_t *rdev, *rdev2;
ITERATE_RDEV(mddev1,rdev,tmp)
- if (match_dev_unit(mddev2, rdev))
- return 1;
+ ITERATE_RDEV(mddev2, rdev2, tmp2)
+ if (rdev->bdev->bd_contains ==
+ rdev2->bdev->bd_contains)
+ return 1;
return 0;
}
static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
{
- mdk_rdev_t *same_pdev;
- char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+ char b[BDEVNAME_SIZE];
struct kobject *ko;
char *s;
else
mddev->size = rdev->size;
}
- same_pdev = match_dev_unit(mddev, rdev);
- if (same_pdev)
- printk(KERN_WARNING
- "%s: WARNING: %s appears to be on the same physical"
- " disk as %s. True\n protection against single-disk"
- " failure might be compromised.\n",
- mdname(mddev), bdevname(rdev->bdev,b),
- bdevname(same_pdev->bdev,b2));
/* Verify rdev->desc_nr is unique.
* If it is -1, assign a free number, else
return -EINVAL;
}
+ if (pers->sync_request) {
+ /* Warn if this is a potentially silly
+ * configuration.
+ */
+ char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+ mdk_rdev_t *rdev2;
+ struct list_head *tmp2;
+ int warned = 0;
+ ITERATE_RDEV(mddev, rdev, tmp) {
+ ITERATE_RDEV(mddev, rdev2, tmp2) {
+ if (rdev < rdev2 &&
+ rdev->bdev->bd_contains ==
+ rdev2->bdev->bd_contains) {
+ printk(KERN_WARNING
+ "%s: WARNING: %s appears to be"
+ " on the same physical disk as"
+ " %s.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev,b),
+ bdevname(rdev2->bdev,b2));
+ warned = 1;
+ }
+ }
+ }
+ if (warned)
+ printk(KERN_WARNING
+ "True protection against single-disk"
+ " failure might be compromised.\n");
+ }
+
mddev->recovery = 0;
mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
mddev->barriers_work = 1;
set_disk_ro(disk, 0);
blk_queue_make_request(mddev->queue, md_fail_request);
mddev->pers->stop(mddev);
+ mddev->queue->merge_bvec_fn = NULL;
+ mddev->queue->unplug_fn = NULL;
+ mddev->queue->issue_flush_fn = NULL;
if (mddev->pers->sync_request)
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
EXPORT_SYMBOL_GPL(md_do_sync);
+static int remove_and_add_spares(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ struct list_head *rtmp;
+ int spares = 0;
+
+ ITERATE_RDEV(mddev,rdev,rtmp)
+ if (rdev->raid_disk >= 0 &&
+ (test_bit(Faulty, &rdev->flags) ||
+ ! test_bit(In_sync, &rdev->flags)) &&
+ atomic_read(&rdev->nr_pending)==0) {
+ if (mddev->pers->hot_remove_disk(
+ mddev, rdev->raid_disk)==0) {
+ char nm[20];
+ sprintf(nm,"rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ rdev->raid_disk = -1;
+ }
+ }
+
+ if (mddev->degraded) {
+ ITERATE_RDEV(mddev,rdev,rtmp)
+ if (rdev->raid_disk < 0
+ && !test_bit(Faulty, &rdev->flags)) {
+ rdev->recovery_offset = 0;
+ if (mddev->pers->hot_add_disk(mddev,rdev)) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_create_link(&mddev->kobj,
+ &rdev->kobj, nm);
+ spares++;
+ md_new_event(mddev);
+ } else
+ break;
+ }
+ }
+ return spares;
+}
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
return;
if (mddev_trylock(mddev)) {
- int spares =0;
+ int spares = 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
* Spare are also removed and re-added, to allow
* the personality to fail the re-add.
*/
- ITERATE_RDEV(mddev,rdev,rtmp)
- if (rdev->raid_disk >= 0 &&
- (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
- atomic_read(&rdev->nr_pending)==0) {
- if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
- char nm[20];
- sprintf(nm,"rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- rdev->raid_disk = -1;
- }
- }
-
- if (mddev->degraded) {
- ITERATE_RDEV(mddev,rdev,rtmp)
- if (rdev->raid_disk < 0
- && !test_bit(Faulty, &rdev->flags)) {
- rdev->recovery_offset = 0;
- if (mddev->pers->hot_add_disk(mddev,rdev)) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
- spares++;
- md_new_event(mddev);
- } else
- break;
- }
- }
- if (spares) {
+ if (mddev->reshape_position != MaxSector) {
+ if (mddev->pers->check_reshape(mddev) != 0)
+ /* Cannot proceed */
+ goto unlock;
+ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+ } else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
if (dev < 0)
dev += conf->raid_disks;
} else {
- while (sector > conf->stride) {
+ while (sector >= conf->stride) {
sector -= conf->stride;
if (dev < conf->near_copies)
dev += conf->raid_disks - conf->near_copies;
for (k=0; k<conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
+ BUG_ON(k == conf->copies);
bio = r10_bio->devs[1].bio;
bio->bi_next = biolist;
biolist = bio;
if (!conf->tmppage)
goto out_free_conf;
+ conf->mddev = mddev;
+ conf->raid_disks = mddev->raid_disks;
conf->near_copies = nc;
conf->far_copies = fc;
conf->copies = nc*fc;
conf->far_offset = fo;
conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
+ size = mddev->size >> (conf->chunk_shift-1);
+ sector_div(size, fc);
+ size = size * conf->raid_disks;
+ sector_div(size, nc);
+ /* 'size' is now the number of chunks in the array */
+ /* calculate "used chunks per device" in 'stride' */
+ stride = size * conf->copies;
+ sector_div(stride, conf->raid_disks);
+ mddev->size = stride << (conf->chunk_shift-1);
+
if (fo)
- conf->stride = 1 << conf->chunk_shift;
- else {
- stride = mddev->size >> (conf->chunk_shift-1);
+ stride = 1;
+ else
sector_div(stride, fc);
- conf->stride = stride << conf->chunk_shift;
- }
+ conf->stride = stride << conf->chunk_shift;
+
conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
r10bio_pool_free, conf);
if (!conf->r10bio_pool) {
disk->head_position = 0;
}
- conf->raid_disks = mddev->raid_disks;
- conf->mddev = mddev;
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
/*
* Ok, everything is just fine now
*/
- if (conf->far_offset) {
- size = mddev->size >> (conf->chunk_shift-1);
- size *= conf->raid_disks;
- size <<= conf->chunk_shift;
- sector_div(size, conf->far_copies);
- } else
- size = conf->stride * conf->raid_disks;
- sector_div(size, conf->near_copies);
- mddev->array_size = size/2;
- mddev->resync_max_sectors = size;
+ mddev->array_size = size << (conf->chunk_shift-1);
+ mddev->resync_max_sectors = size << conf->chunk_shift;
mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->issue_flush_fn = raid10_issue_flush;
static void compute_parity6(struct stripe_head *sh, int method)
{
raid6_conf_t *conf = sh->raid_conf;
- int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
+ int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
struct bio *chosen;
/**** FIX THIS: This could be very bad if disks is close to 256 ****/
void *ptrs[disks];
/* Compute one missing block */
static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
{
- raid6_conf_t *conf = sh->raid_conf;
- int i, count, disks = conf->raid_disks;
+ int i, count, disks = sh->disks;
void *ptr[MAX_XOR_BLOCKS], *p;
int pd_idx = sh->pd_idx;
int qd_idx = raid6_next_disk(pd_idx, disks);
/* Compute two missing blocks */
static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
{
- raid6_conf_t *conf = sh->raid_conf;
- int i, count, disks = conf->raid_disks;
+ int i, count, disks = sh->disks;
int pd_idx = sh->pd_idx;
int qd_idx = raid6_next_disk(pd_idx, disks);
int d0_idx = raid6_next_disk(qd_idx, disks);
static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
{
raid6_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks;
+ int disks = sh->disks;
struct bio *return_bi= NULL;
struct bio *bi;
int i;
- int syncing;
+ int syncing, expanding, expanded;
int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
int non_overwrite = 0;
int failed_num[2] = {0, 0};
clear_bit(STRIPE_DELAYED, &sh->state);
syncing = test_bit(STRIPE_SYNCING, &sh->state);
+ expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
/* Now to look around and see what can be done */
rcu_read_lock();
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
- if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) {
+ if (to_read || non_overwrite || (to_write && failed) ||
+ (syncing && (uptodate < disks)) || expanding) {
for (i=disks; i--;) {
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
syncing ||
+ expanding ||
(failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
(failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
)
}
}
}
+
+ if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
+ /* Need to write out all blocks after computing P&Q */
+ sh->disks = conf->raid_disks;
+ sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
+ conf->raid_disks);
+ compute_parity6(sh, RECONSTRUCT_WRITE);
+ for (i = conf->raid_disks ; i-- ; ) {
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
+ locked++;
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+ }
+ clear_bit(STRIPE_EXPANDING, &sh->state);
+ } else if (expanded) {
+ clear_bit(STRIPE_EXPAND_READY, &sh->state);
+ atomic_dec(&conf->reshape_stripes);
+ wake_up(&conf->wait_for_overlap);
+ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
+ }
+
+ if (expanding && locked == 0) {
+ /* We have read all the blocks in this stripe and now we need to
+ * copy some of them into a target stripe for expand.
+ */
+ clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ for (i = 0; i < sh->disks ; i++)
+ if (i != pd_idx && i != qd_idx) {
+ int dd_idx2, pd_idx2, j;
+ struct stripe_head *sh2;
+
+ sector_t bn = compute_blocknr(sh, i);
+ sector_t s = raid5_compute_sector(
+ bn, conf->raid_disks,
+ conf->raid_disks - conf->max_degraded,
+ &dd_idx2, &pd_idx2, conf);
+ sh2 = get_active_stripe(conf, s,
+ conf->raid_disks,
+ pd_idx2, 1);
+ if (sh2 == NULL)
+ /* so for only the early blocks of
+ * this stripe have been requests.
+ * When later blocks get requests, we
+ * will try again
+ */
+ continue;
+ if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
+ test_bit(R5_Expanded,
+ &sh2->dev[dd_idx2].flags)) {
+ /* must have already done this block */
+ release_stripe(sh2);
+ continue;
+ }
+ memcpy(page_address(sh2->dev[dd_idx2].page),
+ page_address(sh->dev[i].page),
+ STRIPE_SIZE);
+ set_bit(R5_Expanded, &sh2->dev[dd_idx2].flags);
+ set_bit(R5_UPTODATE, &sh2->dev[dd_idx2].flags);
+ for (j = 0 ; j < conf->raid_disks ; j++)
+ if (j != sh2->pd_idx &&
+ j != raid6_next_disk(sh2->pd_idx,
+ sh2->disks) &&
+ !test_bit(R5_Expanded,
+ &sh2->dev[j].flags))
+ break;
+ if (j == conf->raid_disks) {
+ set_bit(STRIPE_EXPAND_READY,
+ &sh2->state);
+ set_bit(STRIPE_HANDLE, &sh2->state);
+ }
+ release_stripe(sh2);
+ }
+ }
+
spin_unlock(&sh->lock);
while ((bi=return_bi)) {
rcu_read_unlock();
if (rdev) {
- if (syncing)
+ if (syncing || expanding || expanded)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev;
struct stripe_head *sh;
int pd_idx;
sector_t first_sector, last_sector;
- int raid_disks;
- int data_disks;
+ int raid_disks = conf->previous_raid_disks;
+ int data_disks = raid_disks - conf->max_degraded;
+ int new_data_disks = conf->raid_disks - conf->max_degraded;
int i;
int dd_idx;
sector_t writepos, safepos, gap;
conf->expand_progress != 0) {
/* restarting in the middle, skip the initial sectors */
sector_nr = conf->expand_progress;
- sector_div(sector_nr, conf->raid_disks-1);
+ sector_div(sector_nr, new_data_disks);
*skipped = 1;
return sector_nr;
}
* to after where expand_lo old_maps to
*/
writepos = conf->expand_progress +
- conf->chunk_size/512*(conf->raid_disks-1);
- sector_div(writepos, conf->raid_disks-1);
+ conf->chunk_size/512*(new_data_disks);
+ sector_div(writepos, new_data_disks);
safepos = conf->expand_lo;
- sector_div(safepos, conf->previous_raid_disks-1);
+ sector_div(safepos, data_disks);
gap = conf->expand_progress - conf->expand_lo;
if (writepos >= safepos ||
- gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
+ gap > (new_data_disks)*3000*2 /*3Meg*/) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes)==0);
sector_t s;
if (j == sh->pd_idx)
continue;
+ if (conf->level == 6 &&
+ j == raid6_next_disk(sh->pd_idx, sh->disks))
+ continue;
s = compute_blocknr(sh, j);
if (s < (mddev->array_size<<1)) {
skipped = 1;
* The source stripes are determined by mapping the first and last
* block on the destination stripes.
*/
- raid_disks = conf->previous_raid_disks;
- data_disks = raid_disks - 1;
first_sector =
- raid5_compute_sector(sector_nr*(conf->raid_disks-1),
+ raid5_compute_sector(sector_nr*(new_data_disks),
raid_disks, data_disks,
&dd_idx, &pd_idx, conf);
last_sector =
raid5_compute_sector((sector_nr+conf->chunk_size/512)
- *(conf->raid_disks-1) -1,
+ *(new_data_disks) -1,
raid_disks, data_disks,
&dd_idx, &pd_idx, conf);
if (last_sector >= (mddev->size<<1))
last_sector = (mddev->size<<1)-1;
while (first_sector <= last_sector) {
- pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
+ pd_idx = stripe_to_pdidx(first_sector, conf,
+ conf->previous_raid_disks);
sh = get_active_stripe(conf, first_sector,
conf->previous_raid_disks, pd_idx, 0);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
*/
sector_t here_new, here_old;
int old_disks;
+ int max_degraded = (mddev->level == 5 ? 1 : 2);
if (mddev->new_level != mddev->level ||
mddev->new_layout != mddev->layout ||
mddev->new_chunk != mddev->chunk_size) {
- printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n",
+ printk(KERN_ERR "raid5: %s: unsupported reshape "
+ "required - aborting.\n",
mdname(mddev));
return -EINVAL;
}
if (mddev->delta_disks <= 0) {
- printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n",
+ printk(KERN_ERR "raid5: %s: unsupported reshape "
+ "(reduce disks) required - aborting.\n",
mdname(mddev));
return -EINVAL;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
/* reshape_position must be on a new-stripe boundary, and one
- * further up in new geometry must map after here in old geometry.
+ * further up in new geometry must map after here in old
+ * geometry.
*/
here_new = mddev->reshape_position;
- if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) {
- printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n");
+ if (sector_div(here_new, (mddev->chunk_size>>9)*
+ (mddev->raid_disks - max_degraded))) {
+ printk(KERN_ERR "raid5: reshape_position not "
+ "on a stripe boundary\n");
return -EINVAL;
}
/* here_new is the stripe we will write to */
here_old = mddev->reshape_position;
- sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1));
- /* here_old is the first stripe that we might need to read from */
+ sector_div(here_old, (mddev->chunk_size>>9)*
+ (old_disks-max_degraded));
+ /* here_old is the first stripe that we might need to read
+ * from */
if (here_new >= here_old) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n");
+ printk(KERN_ERR "raid5: reshape_position too early for "
+ "auto-recovery - aborting.\n");
return -EINVAL;
}
printk(KERN_INFO "raid5: reshape will continue\n");
if (err)
return err;
+ if (mddev->degraded > conf->max_degraded)
+ return -EINVAL;
/* looks like we might be able to manage this */
return 0;
}
int added_devices = 0;
unsigned long flags;
- if (mddev->degraded ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
ITERATE_RDEV(mddev, rdev, rtmp)
!test_bit(Faulty, &rdev->flags))
spares++;
- if (spares < mddev->delta_disks-1)
+ if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
/* Not enough devices even to make a degraded array
* of that size
*/
struct block_device *bdev;
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1);
+ conf->mddev->array_size = conf->mddev->size *
+ (conf->raid_disks - conf->max_degraded);
set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
conf->mddev->changed = 1;
.spare_active = raid5_spare_active,
.sync_request = sync_request,
.resize = raid5_resize,
+#ifdef CONFIG_MD_RAID5_RESHAPE
+ .check_reshape = raid5_check_reshape,
+ .start_reshape = raid5_start_reshape,
+#endif
.quiesce = raid5_quiesce,
};
static struct mdk_personality raid5_personality =
static int raid6_have_mmx(void)
{
-#ifdef __KERNEL__
/* Not really "boot_cpu" but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX);
-#else
- /* User space test code */
- u32 features = cpuid_features();
- return ( (features & (1<<23)) == (1<<23) );
-#endif
}
/*
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_mmx_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_mmx(&sa);
+ kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("pxor %mm4,%mm4");
}
- raid6_after_mmx(&sa);
+ kernel_fpu_end();
}
const struct raid6_calls raid6_mmxx1 = {
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_mmx_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_mmx(&sa);
+ kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
}
- raid6_after_mmx(&sa);
+ kernel_fpu_end();
}
const struct raid6_calls raid6_mmxx2 = {
static int raid6_have_sse1_or_mmxext(void)
{
-#ifdef __KERNEL__
/* Not really boot_cpu but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX) &&
(boot_cpu_has(X86_FEATURE_XMM) ||
boot_cpu_has(X86_FEATURE_MMXEXT));
-#else
- /* User space test code - this incorrectly breaks on some Athlons */
- u32 features = cpuid_features();
- return ( (features & (5<<23)) == (5<<23) );
-#endif
}
/*
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_mmx_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- /* This is really MMX code, not SSE */
- raid6_before_mmx(&sa);
+ kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
}
- raid6_after_mmx(&sa);
asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse1x1 = {
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_mmx_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_mmx(&sa);
+ kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
}
- raid6_after_mmx(&sa);
asm volatile("sfence" : :: "memory");
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse1x2 = {
static int raid6_have_sse2(void)
{
-#ifdef __KERNEL__
/* Not really boot_cpu but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX) &&
boot_cpu_has(X86_FEATURE_FXSR) &&
boot_cpu_has(X86_FEATURE_XMM) &&
boot_cpu_has(X86_FEATURE_XMM2);
-#else
- /* User space test code */
- u32 features = cpuid_features();
- return ( (features & (15<<23)) == (15<<23) );
-#endif
}
/*
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_sse_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_sse2(&sa);
+ kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
asm volatile("pxor %xmm4,%xmm4");
}
- raid6_after_sse2(&sa);
asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x1 = {
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_sse_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_sse2(&sa);
+ kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
}
- raid6_after_sse2(&sa);
asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x2 = {
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
- raid6_sse16_save_t sa;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
- raid6_before_sse16(&sa);
+ kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
asm volatile("pxor %xmm14,%xmm14");
}
+
asm volatile("sfence" : : : "memory");
- raid6_after_sse16(&sa);
+ kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x4 = {
#if defined(__i386__) || defined(__x86_64__)
-#ifdef __x86_64__
-
-typedef struct {
- unsigned int fsave[27];
- unsigned long cr0;
-} raid6_mmx_save_t __attribute__((aligned(16)));
-
-/* N.B.: For SSE we only save %xmm0-%xmm7 even for x86-64, since
- the code doesn't know about the additional x86-64 registers */
-typedef struct {
- unsigned int sarea[8*4+2];
- unsigned long cr0;
-} raid6_sse_save_t __attribute__((aligned(16)));
-
-/* This is for x86-64-specific code which uses all 16 XMM registers */
-typedef struct {
- unsigned int sarea[16*4+2];
- unsigned long cr0;
-} raid6_sse16_save_t __attribute__((aligned(16)));
-
-/* On x86-64 the stack *SHOULD* be 16-byte aligned, but currently this
- is buggy in the kernel and it's only 8-byte aligned in places, so
- we need to do this anyway. Sigh. */
-#define SAREA(x) ((unsigned int *)((((unsigned long)&(x)->sarea)+15) & ~15))
-
-#else /* __i386__ */
-
-typedef struct {
- unsigned int fsave[27];
- unsigned long cr0;
-} raid6_mmx_save_t;
-
-/* On i386, the stack is only 8-byte aligned, but SSE requires 16-byte
- alignment. The +3 is so we have the slack space to manually align
- a properly-sized area correctly. */
-typedef struct {
- unsigned int sarea[8*4+3];
- unsigned long cr0;
-} raid6_sse_save_t;
-
-/* Find the 16-byte aligned save area */
-#define SAREA(x) ((unsigned int *)((((unsigned long)&(x)->sarea)+15) & ~15))
-
-#endif
-
#ifdef __KERNEL__ /* Real code */
-/* Note: %cr0 is 32 bits on i386 and 64 bits on x86-64 */
-
-static inline unsigned long raid6_get_fpu(void)
-{
- unsigned long cr0;
-
- preempt_disable();
- asm volatile("mov %%cr0,%0 ; clts" : "=r" (cr0));
- return cr0;
-}
-
-static inline void raid6_put_fpu(unsigned long cr0)
-{
- asm volatile("mov %0,%%cr0" : : "r" (cr0));
- preempt_enable();
-}
+#include <asm/i387.h>
#else /* Dummy code for user space testing */
-static inline unsigned long raid6_get_fpu(void)
-{
- return 0xf00ba6;
-}
-
-static inline void raid6_put_fpu(unsigned long cr0)
-{
- (void)cr0;
-}
-
-#endif
-
-static inline void raid6_before_mmx(raid6_mmx_save_t *s)
-{
- s->cr0 = raid6_get_fpu();
- asm volatile("fsave %0 ; fwait" : "=m" (s->fsave[0]));
-}
-
-static inline void raid6_after_mmx(raid6_mmx_save_t *s)
-{
- asm volatile("frstor %0" : : "m" (s->fsave[0]));
- raid6_put_fpu(s->cr0);
-}
-
-static inline void raid6_before_sse(raid6_sse_save_t *s)
-{
- unsigned int *rsa = SAREA(s);
-
- s->cr0 = raid6_get_fpu();
-
- asm volatile("movaps %%xmm0,%0" : "=m" (rsa[0]));
- asm volatile("movaps %%xmm1,%0" : "=m" (rsa[4]));
- asm volatile("movaps %%xmm2,%0" : "=m" (rsa[8]));
- asm volatile("movaps %%xmm3,%0" : "=m" (rsa[12]));
- asm volatile("movaps %%xmm4,%0" : "=m" (rsa[16]));
- asm volatile("movaps %%xmm5,%0" : "=m" (rsa[20]));
- asm volatile("movaps %%xmm6,%0" : "=m" (rsa[24]));
- asm volatile("movaps %%xmm7,%0" : "=m" (rsa[28]));
-}
-
-static inline void raid6_after_sse(raid6_sse_save_t *s)
-{
- unsigned int *rsa = SAREA(s);
-
- asm volatile("movaps %0,%%xmm0" : : "m" (rsa[0]));
- asm volatile("movaps %0,%%xmm1" : : "m" (rsa[4]));
- asm volatile("movaps %0,%%xmm2" : : "m" (rsa[8]));
- asm volatile("movaps %0,%%xmm3" : : "m" (rsa[12]));
- asm volatile("movaps %0,%%xmm4" : : "m" (rsa[16]));
- asm volatile("movaps %0,%%xmm5" : : "m" (rsa[20]));
- asm volatile("movaps %0,%%xmm6" : : "m" (rsa[24]));
- asm volatile("movaps %0,%%xmm7" : : "m" (rsa[28]));
-
- raid6_put_fpu(s->cr0);
-}
-
-static inline void raid6_before_sse2(raid6_sse_save_t *s)
+static inline void kernel_fpu_begin(void)
{
- unsigned int *rsa = SAREA(s);
-
- s->cr0 = raid6_get_fpu();
-
- asm volatile("movdqa %%xmm0,%0" : "=m" (rsa[0]));
- asm volatile("movdqa %%xmm1,%0" : "=m" (rsa[4]));
- asm volatile("movdqa %%xmm2,%0" : "=m" (rsa[8]));
- asm volatile("movdqa %%xmm3,%0" : "=m" (rsa[12]));
- asm volatile("movdqa %%xmm4,%0" : "=m" (rsa[16]));
- asm volatile("movdqa %%xmm5,%0" : "=m" (rsa[20]));
- asm volatile("movdqa %%xmm6,%0" : "=m" (rsa[24]));
- asm volatile("movdqa %%xmm7,%0" : "=m" (rsa[28]));
}
-static inline void raid6_after_sse2(raid6_sse_save_t *s)
+static inline void kernel_fpu_end(void)
{
- unsigned int *rsa = SAREA(s);
-
- asm volatile("movdqa %0,%%xmm0" : : "m" (rsa[0]));
- asm volatile("movdqa %0,%%xmm1" : : "m" (rsa[4]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (rsa[8]));
- asm volatile("movdqa %0,%%xmm3" : : "m" (rsa[12]));
- asm volatile("movdqa %0,%%xmm4" : : "m" (rsa[16]));
- asm volatile("movdqa %0,%%xmm5" : : "m" (rsa[20]));
- asm volatile("movdqa %0,%%xmm6" : : "m" (rsa[24]));
- asm volatile("movdqa %0,%%xmm7" : : "m" (rsa[28]));
-
- raid6_put_fpu(s->cr0);
}
-#ifdef __x86_64__
-
-static inline void raid6_before_sse16(raid6_sse16_save_t *s)
-{
- unsigned int *rsa = SAREA(s);
-
- s->cr0 = raid6_get_fpu();
+#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions
+ * (fast save and restore) */
+#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
- asm volatile("movdqa %%xmm0,%0" : "=m" (rsa[0]));
- asm volatile("movdqa %%xmm1,%0" : "=m" (rsa[4]));
- asm volatile("movdqa %%xmm2,%0" : "=m" (rsa[8]));
- asm volatile("movdqa %%xmm3,%0" : "=m" (rsa[12]));
- asm volatile("movdqa %%xmm4,%0" : "=m" (rsa[16]));
- asm volatile("movdqa %%xmm5,%0" : "=m" (rsa[20]));
- asm volatile("movdqa %%xmm6,%0" : "=m" (rsa[24]));
- asm volatile("movdqa %%xmm7,%0" : "=m" (rsa[28]));
- asm volatile("movdqa %%xmm8,%0" : "=m" (rsa[32]));
- asm volatile("movdqa %%xmm9,%0" : "=m" (rsa[36]));
- asm volatile("movdqa %%xmm10,%0" : "=m" (rsa[40]));
- asm volatile("movdqa %%xmm11,%0" : "=m" (rsa[44]));
- asm volatile("movdqa %%xmm12,%0" : "=m" (rsa[48]));
- asm volatile("movdqa %%xmm13,%0" : "=m" (rsa[52]));
- asm volatile("movdqa %%xmm14,%0" : "=m" (rsa[56]));
- asm volatile("movdqa %%xmm15,%0" : "=m" (rsa[60]));
-}
-
-static inline void raid6_after_sse16(raid6_sse16_save_t *s)
+/* Should work well enough on modern CPUs for testing */
+static inline int boot_cpu_has(int flag)
{
- unsigned int *rsa = SAREA(s);
+ u32 eax = (flag >> 5) ? 0x80000001 : 1;
+ u32 edx;
- asm volatile("movdqa %0,%%xmm0" : : "m" (rsa[0]));
- asm volatile("movdqa %0,%%xmm1" : : "m" (rsa[4]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (rsa[8]));
- asm volatile("movdqa %0,%%xmm3" : : "m" (rsa[12]));
- asm volatile("movdqa %0,%%xmm4" : : "m" (rsa[16]));
- asm volatile("movdqa %0,%%xmm5" : : "m" (rsa[20]));
- asm volatile("movdqa %0,%%xmm6" : : "m" (rsa[24]));
- asm volatile("movdqa %0,%%xmm7" : : "m" (rsa[28]));
- asm volatile("movdqa %0,%%xmm8" : : "m" (rsa[32]));
- asm volatile("movdqa %0,%%xmm9" : : "m" (rsa[36]));
- asm volatile("movdqa %0,%%xmm10" : : "m" (rsa[40]));
- asm volatile("movdqa %0,%%xmm11" : : "m" (rsa[44]));
- asm volatile("movdqa %0,%%xmm12" : : "m" (rsa[48]));
- asm volatile("movdqa %0,%%xmm13" : : "m" (rsa[52]));
- asm volatile("movdqa %0,%%xmm14" : : "m" (rsa[56]));
- asm volatile("movdqa %0,%%xmm15" : : "m" (rsa[60]));
+ asm volatile("cpuid"
+ : "+a" (eax), "=d" (edx)
+ : : "ecx", "ebx");
- raid6_put_fpu(s->cr0);
+ return (edx >> (flag & 31)) & 1;
}
-#endif /* __x86_64__ */
-
-/* User space test hack */
-#ifndef __KERNEL__
-static inline int cpuid_features(void)
-{
- u32 eax = 1;
- u32 ebx, ecx, edx;
-
- asm volatile("cpuid" :
- "+a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx));
-
- return edx;
-}
#endif /* ndef __KERNEL__ */
#endif
fw->data[BLUEBIRD_01_ID_OFFSET + 1] == USB_VID_DVICO >> 8) {
fw->data[BLUEBIRD_01_ID_OFFSET + 2] =
- udev->descriptor.idProduct + 1;
+ le16_to_cpu(udev->descriptor.idProduct) + 1;
fw->data[BLUEBIRD_01_ID_OFFSET + 3] =
- udev->descriptor.idProduct >> 8;
+ le16_to_cpu(udev->descriptor.idProduct) >> 8;
return usb_cypress_load_firmware(udev, fw, CYPRESS_FX2);
}
struct dvb_usb_adapter *adap = fe->dvb->priv;
u8 b[5];
dvb_usb_tuner_calc_regs(fe,fep,b, 5);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
return digitv_ctrl_msg(adap->dev, USB_WRITE_TUNER, 0, &b[1], 4, NULL, 0);
}
char __user *buffer, size_t len, loff_t *pos)
{
struct cafe_camera *cam = filp->private_data;
- int ret;
+ int ret = 0;
/*
* Perhaps we're in speculative read mode and already
if (cam->n_sbufs == 0) /* no luck at all - ret already set */
kfree(cam->sb_bufs);
- else
- ret = 0;
req->count = cam->n_sbufs; /* In case of partial success */
out:
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_CX25840)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
state->vbi_line_offset = 8;
state->id = id;
+ i2c_attach_client(client);
+
if (state->is_cx25836)
cx25836_initialize(client);
else
cx25840_initialize(client, 1);
- i2c_attach_client(client);
-
return 0;
}
*/
#define FWSEND 48
-#define FWDEV(x) &((x)->adapter->dev)
+#define FWDEV(x) &((x)->dev)
static char *firmware = FWFILE;
{
struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
- if (reg->i2c_id != 0)
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
return -EINVAL;
/* cx2388x has a 24-bit register space */
reg->val = cx_read(reg->reg&0xffffff);
{
struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
- if (reg->i2c_id != 0)
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
return -EINVAL;
cx_write(reg->reg&0xffffff, reg->val);
return 0;
int pvr2_hdw_register_access(struct pvr2_hdw *hdw,
- u32 chip_id, u64 reg_id,
- int setFl,u32 *val_ptr)
+ u32 match_type, u32 match_chip, u64 reg_id,
+ int setFl,u64 *val_ptr)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct list_head *item;
if (!capable(CAP_SYS_ADMIN)) return -EPERM;
- req.i2c_id = chip_id;
+ req.match_type = match_type;
+ req.match_chip = match_chip;
req.reg = reg_id;
if (setFl) req.val = *val_ptr;
mutex_lock(&hdw->i2c_list_lock); do {
list_for_each(item,&hdw->i2c_clients) {
cp = list_entry(item,struct pvr2_i2c_client,list);
- if (cp->client->driver->id != chip_id) continue;
+ if (!v4l2_chip_match_i2c_client(cp->client, req.match_type, req.match_chip)) {
+ continue;
+ }
stat = pvr2_i2c_client_cmd(
cp,(setFl ? VIDIOC_DBG_S_REGISTER :
VIDIOC_DBG_G_REGISTER),&req);
enum pvr2_v4l_type index,int);
/* Direct read/write access to chip's registers:
- chip_id - unique id of chip (e.g. I2C_DRIVERD_xxxx)
+ match_type - how to interpret match_chip (e.g. driver ID, i2c address)
+ match_chip - chip match value (e.g. I2C_DRIVERD_xxxx)
reg_id - register number to access
setFl - true to set the register, false to read it
val_ptr - storage location for source / result. */
int pvr2_hdw_register_access(struct pvr2_hdw *,
- u32 chip_id,u64 reg_id,
- int setFl,u32 *val_ptr);
+ u32 match_type, u32 match_chip,u64 reg_id,
+ int setFl,u64 *val_ptr);
/* The following entry points are all lower level things you normally don't
want to worry about. */
case VIDIOC_DBG_S_REGISTER:
case VIDIOC_DBG_G_REGISTER:
{
- u32 val;
+ u64 val;
struct v4l2_register *req = (struct v4l2_register *)arg;
if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val;
ret = pvr2_hdw_register_access(
- hdw,req->i2c_id,req->reg,
+ hdw,req->match_type,req->match_chip,req->reg,
cmd == VIDIOC_DBG_S_REGISTER,&val);
if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val;
break;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_SAA711X)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_SAA7127)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_TVP5150)
+ if (!v4l2_chip_match_i2c_client(c, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_UPD64031A)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
{
struct v4l2_register *reg = arg;
- if (reg->i2c_id != I2C_DRIVERID_UPD64083)
+ if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
struct v4l2_register *reg = arg;
int errCode;
- if (reg->i2c_id != 0)
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
PDEBUG(DBG_IOCTL, "VIDIOC_DBG_%c_REGISTER reg=0x%02X, value=0x%02X",
cmd == VIDIOC_DBG_G_REGISTER ? 'G' : 'S',
- (unsigned int)reg->reg, reg->val);
+ (unsigned int)reg->reg, (unsigned int)reg->val);
return 0;
}
#endif
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/i2c.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>
[_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT",
[_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY",
[_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY",
-#if 1
[_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP",
-#endif
[_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS",
[_IOC_NR(VIDIOC_G_EXT_CTRLS)] = "VIDIOC_G_EXT_CTRLS",
[_IOC_NR(VIDIOC_S_EXT_CTRLS)] = "VIDIOC_S_EXT_CTRLS",
- [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS"
+ [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS",
+#if 1
+ [_IOC_NR(VIDIOC_ENUM_FRAMESIZES)] = "VIDIOC_ENUM_FRAMESIZES",
+ [_IOC_NR(VIDIOC_ENUM_FRAMEINTERVALS)] = "VIDIOC_ENUM_FRAMEINTERVALS",
+ [_IOC_NR(VIDIOC_G_ENC_INDEX)] = "VIDIOC_G_ENC_INDEX",
+ [_IOC_NR(VIDIOC_ENCODER_CMD)] = "VIDIOC_ENCODER_CMD",
+ [_IOC_NR(VIDIOC_TRY_ENCODER_CMD)] = "VIDIOC_TRY_ENCODER_CMD",
+
+ [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
+ [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
+#endif
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
[_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY",
[_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG",
- [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
- [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
-
[_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE",
[_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET",
[_IOC_NR(VIDIOC_INT_AUDIO_CLOCK_FREQ)] = "VIDIOC_INT_AUDIO_CLOCK_FREQ",
return **ctrl_classes;
}
+int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 match_type, u32 match_chip)
+{
+ switch (match_type) {
+ case V4L2_CHIP_MATCH_I2C_DRIVER:
+ return (c != NULL && c->driver != NULL && c->driver->id == match_chip);
+ case V4L2_CHIP_MATCH_I2C_ADDR:
+ return (c != NULL && c->addr == match_chip);
+ default:
+ return 0;
+ }
+}
+
+int v4l2_chip_match_host(u32 match_type, u32 match_chip)
+{
+ switch (match_type) {
+ case V4L2_CHIP_MATCH_HOST:
+ return match_chip == 0;
+ default:
+ return 0;
+ }
+}
+
/* ----------------------------------------------------------------- */
EXPORT_SYMBOL(v4l2_norm_to_name);
EXPORT_SYMBOL(v4l2_ctrl_query_fill);
EXPORT_SYMBOL(v4l2_ctrl_query_fill_std);
+EXPORT_SYMBOL(v4l2_chip_match_i2c_client);
+EXPORT_SYMBOL(v4l2_chip_match_host);
+
/*
* Local variables:
* c-basic-offset: 8
ret=vfd->vidioc_s_jpegcomp(file, fh, p);
break;
}
+ case VIDIOC_G_ENC_INDEX:
+ {
+ struct v4l2_enc_idx *p=arg;
+
+ if (!vfd->vidioc_g_enc_index)
+ break;
+ ret=vfd->vidioc_g_enc_index(file, fh, p);
+ if (!ret)
+ dbgarg (cmd, "entries=%d, entries_cap=%d\n",
+ p->entries,p->entries_cap);
+ break;
+ }
+ case VIDIOC_ENCODER_CMD:
+ {
+ struct v4l2_encoder_cmd *p=arg;
+
+ if (!vfd->vidioc_encoder_cmd)
+ break;
+ ret=vfd->vidioc_encoder_cmd(file, fh, p);
+ if (!ret)
+ dbgarg (cmd, "cmd=%d, flags=%d\n",
+ p->cmd,p->flags);
+ break;
+ }
+ case VIDIOC_TRY_ENCODER_CMD:
+ {
+ struct v4l2_encoder_cmd *p=arg;
+
+ if (!vfd->vidioc_try_encoder_cmd)
+ break;
+ ret=vfd->vidioc_try_encoder_cmd(file, fh, p);
+ if (!ret)
+ dbgarg (cmd, "cmd=%d, flags=%d\n",
+ p->cmd,p->flags);
+ break;
+ }
case VIDIOC_G_PARM:
{
struct v4l2_streamparm *p=arg;
*/
void rtc_device_unregister(struct rtc_device *rtc)
{
- mutex_lock(&rtc->ops_lock);
- rtc->ops = NULL;
- mutex_unlock(&rtc->ops_lock);
- class_device_unregister(&rtc->class_dev);
+ if (class_device_get(&rtc->class_dev) != NULL) {
+ mutex_lock(&rtc->ops_lock);
+ /* remove innards of this RTC, then disable it, before
+ * letting any rtc_class_open() users access it again
+ */
+ class_device_unregister(&rtc->class_dev);
+ rtc->ops = NULL;
+ mutex_unlock(&rtc->ops_lock);
+ class_device_put(&rtc->class_dev);
+ }
}
EXPORT_SYMBOL_GPL(rtc_device_unregister);
down(&rtc_class->sem);
list_for_each_entry(class_dev_tmp, &rtc_class->children, node) {
if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) {
- class_dev = class_dev_tmp;
+ class_dev = class_device_get(class_dev_tmp);
break;
}
}
void rtc_class_close(struct class_device *class_dev)
{
module_put(to_rtc_device(class_dev)->owner);
+ class_device_put(class_dev);
}
EXPORT_SYMBOL_GPL(rtc_class_close);
config FB_PS3
bool "PS3 GPU framebuffer driver"
- depends on FB && PPC_PS3
- select PS3_PS3AV
+ depends on FB && PS3_PS3AV
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
#define SM501_MEMF_CRT (4)
#define SM501_MEMF_ACCEL (8)
-int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
- unsigned int why, size_t size)
+static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
+ unsigned int why, size_t size)
{
unsigned int ptr = 0;
* set or change the hardware cursor parameters
*/
-int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct sm501fb_par *par = info->par;
struct sm501fb_info *fbi = par->info;
* initialise hw cursor parameters
*/
-int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
+static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
{
struct sm501fb_par *par = fbi->par;
struct sm501fb_info *info = par->info;
},
};
-int __devinit sm501fb_init(void)
+static int __devinit sm501fb_init(void)
{
return platform_driver_register(&sm501fb_driver);
}
{
int rc = 0;
+ flags |= O_LARGEFILE;
dget(lower_dentry);
mntget(lower_mnt);
*lower_file = dentry_open(lower_dentry, lower_mnt, flags);
inode = ecryptfs_dentry->d_inode;
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
lower_flags = ((O_CREAT | O_TRUNC) & O_ACCMODE) | O_RDWR;
-#if BITS_PER_LONG != 32
- lower_flags |= O_LARGEFILE;
-#endif
lower_mnt = ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry);
/* Corresponding fput() at end of this function */
if ((rc = ecryptfs_open_lower_file(&lower_file, lower_dentry, lower_mnt,
rc = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
if (rc) {
ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
- goto out_free;
+ goto out;
}
lower_root = nd.dentry;
if (!lower_root->d_inode) {
lower_page_data = kmap_atomic(lower_page, KM_USER1);
memcpy(page_data, lower_page_data, PAGE_CACHE_SIZE);
kunmap_atomic(lower_page_data, KM_USER1);
- flush_dcache_page(lower_page);
kunmap_atomic(page_data, KM_USER0);
flush_dcache_page(page);
rc = 0;
return rc;
}
-static void ecryptfs_release_lower_page(struct page *lower_page)
+static
+void ecryptfs_release_lower_page(struct page *lower_page, int page_locked)
{
- unlock_page(lower_page);
+ if (page_locked)
+ unlock_page(lower_page);
page_cache_release(lower_page);
}
}
lower_a_ops = lower_inode->i_mapping->a_ops;
rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8);
+ if (rc) {
+ if (rc == AOP_TRUNCATED_PAGE)
+ ecryptfs_release_lower_page(header_page, 0);
+ else
+ ecryptfs_release_lower_page(header_page, 1);
+ goto out;
+ }
file_size = (u64)i_size_read(inode);
ecryptfs_printk(KERN_DEBUG, "Writing size: [0x%.16x]\n", file_size);
file_size = cpu_to_be64(file_size);
if (rc < 0)
ecryptfs_printk(KERN_ERR, "Error commiting header page "
"write\n");
- ecryptfs_release_lower_page(header_page);
+ if (rc == AOP_TRUNCATED_PAGE)
+ ecryptfs_release_lower_page(header_page, 0);
+ else
+ ecryptfs_release_lower_page(header_page, 1);
lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
mark_inode_dirty_sync(inode);
out:
}
out:
if (rc && (*lower_page)) {
- ecryptfs_release_lower_page(*lower_page);
+ if (rc == AOP_TRUNCATED_PAGE)
+ ecryptfs_release_lower_page(*lower_page, 0);
+ else
+ ecryptfs_release_lower_page(*lower_page, 1);
(*lower_page) = NULL;
}
return rc;
struct file *lower_file, int byte_offset,
int region_size)
{
+ int page_locked = 1;
int rc = 0;
rc = lower_inode->i_mapping->a_ops->commit_write(
lower_file, lower_page, byte_offset, region_size);
+ if (rc == AOP_TRUNCATED_PAGE)
+ page_locked = 0;
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error committing write; rc = [%d]\n", rc);
} else
rc = 0;
- ecryptfs_release_lower_page(lower_page);
+ ecryptfs_release_lower_page(lower_page, page_locked);
return rc;
}
struct buffer_head *bh)
{
struct mb_cache_entry *ce = NULL;
+ int error = 0;
ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev, bh->b_blocknr);
+ error = ext3_journal_get_write_access(handle, bh);
+ if (error)
+ goto out;
+
+ lock_buffer(bh);
+
if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
ea_bdebug(bh, "refcount now=0; freeing");
if (ce)
get_bh(bh);
ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
} else {
- if (ext3_journal_get_write_access(handle, bh) == 0) {
- lock_buffer(bh);
- BHDR(bh)->h_refcount = cpu_to_le32(
+ BHDR(bh)->h_refcount = cpu_to_le32(
le32_to_cpu(BHDR(bh)->h_refcount) - 1);
- ext3_journal_dirty_metadata(handle, bh);
- if (IS_SYNC(inode))
- handle->h_sync = 1;
- DQUOT_FREE_BLOCK(inode, 1);
- unlock_buffer(bh);
- ea_bdebug(bh, "refcount now=%d; releasing",
- le32_to_cpu(BHDR(bh)->h_refcount));
- }
+ error = ext3_journal_dirty_metadata(handle, bh);
+ handle->h_sync = 1;
+ DQUOT_FREE_BLOCK(inode, 1);
+ ea_bdebug(bh, "refcount now=%d; releasing",
+ le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
mb_cache_entry_release(ce);
}
+ unlock_buffer(bh);
+out:
+ ext3_std_error(inode->i_sb, error);
+ return;
}
struct ext3_xattr_info {
struct buffer_head *new_bh = NULL;
struct ext3_xattr_search *s = &bs->s;
struct mb_cache_entry *ce = NULL;
- int error;
+ int error = 0;
#define header(x) ((struct ext3_xattr_header *)(x))
if (s->base) {
ce = mb_cache_entry_get(ext3_xattr_cache, bs->bh->b_bdev,
bs->bh->b_blocknr);
+ error = ext3_journal_get_write_access(handle, bs->bh);
+ if (error)
+ goto cleanup;
+ lock_buffer(bs->bh);
+
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
if (ce) {
mb_cache_entry_free(ce);
ce = NULL;
}
ea_bdebug(bs->bh, "modifying in-place");
- error = ext3_journal_get_write_access(handle, bs->bh);
- if (error)
- goto cleanup;
- lock_buffer(bs->bh);
error = ext3_xattr_set_entry(i, s);
if (!error) {
if (!IS_LAST_ENTRY(s->first))
} else {
int offset = (char *)s->here - bs->bh->b_data;
+ unlock_buffer(bs->bh);
+ journal_release_buffer(handle, bs->bh);
+
if (ce) {
mb_cache_entry_release(ce);
ce = NULL;
struct buffer_head *bh)
{
struct mb_cache_entry *ce = NULL;
+ int error = 0;
ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
+ error = ext4_journal_get_write_access(handle, bh);
+ if (error)
+ goto out;
+
+ lock_buffer(bh);
if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
ea_bdebug(bh, "refcount now=0; freeing");
if (ce)
get_bh(bh);
ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
} else {
- if (ext4_journal_get_write_access(handle, bh) == 0) {
- lock_buffer(bh);
- BHDR(bh)->h_refcount = cpu_to_le32(
+ BHDR(bh)->h_refcount = cpu_to_le32(
le32_to_cpu(BHDR(bh)->h_refcount) - 1);
- ext4_journal_dirty_metadata(handle, bh);
- if (IS_SYNC(inode))
- handle->h_sync = 1;
- DQUOT_FREE_BLOCK(inode, 1);
- unlock_buffer(bh);
- ea_bdebug(bh, "refcount now=%d; releasing",
- le32_to_cpu(BHDR(bh)->h_refcount));
- }
+ error = ext4_journal_dirty_metadata(handle, bh);
+ if (IS_SYNC(inode))
+ handle->h_sync = 1;
+ DQUOT_FREE_BLOCK(inode, 1);
+ ea_bdebug(bh, "refcount now=%d; releasing",
+ le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
mb_cache_entry_release(ce);
}
+ unlock_buffer(bh);
+out:
+ ext4_std_error(inode->i_sb, error);
+ return;
}
struct ext4_xattr_info {
struct buffer_head *new_bh = NULL;
struct ext4_xattr_search *s = &bs->s;
struct mb_cache_entry *ce = NULL;
- int error;
+ int error = 0;
#define header(x) ((struct ext4_xattr_header *)(x))
if (s->base) {
ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
bs->bh->b_blocknr);
+ error = ext4_journal_get_write_access(handle, bs->bh);
+ if (error)
+ goto cleanup;
+ lock_buffer(bs->bh);
+
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
if (ce) {
mb_cache_entry_free(ce);
ce = NULL;
}
ea_bdebug(bs->bh, "modifying in-place");
- error = ext4_journal_get_write_access(handle, bs->bh);
- if (error)
- goto cleanup;
- lock_buffer(bs->bh);
error = ext4_xattr_set_entry(i, s);
if (!error) {
if (!IS_LAST_ENTRY(s->first))
} else {
int offset = (char *)s->here - bs->bh->b_data;
+ unlock_buffer(bs->bh);
+ jbd2_journal_release_buffer(handle, bs->bh);
if (ce) {
mb_cache_entry_release(ce);
ce = NULL;
#define swapper_pg_dir ((pgd_t *) NULL)
-#define pgtable_cache_init() do {} while(0)
+#define pgtable_cache_init() do {} while (0)
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_enter_lazy_cpu_mode() do {} while (0)
+#define arch_leave_lazy_cpu_mode() do {} while (0)
#else /* !CONFIG_MMU */
/*****************************************************************************/
static inline pte_t pte_mkread(pte_t pte)
{
- pte_set_bits(pte, _PAGE_RW);
+ pte_set_bits(pte, _PAGE_USER);
return(pte_mknewprot(pte));
}
/**
* struct hrtimer_base - the timer base for a specific clock
+ * @cpu_base: per cpu clock base
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @active: red black tree root node for the active timers
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
+#include <linux/shm.h>
#include <asm/tlbflush.h>
struct ctl_table;
static inline int is_file_hugepages(struct file *file)
{
- return file->f_op == &hugetlbfs_file_operations;
+ if (file->f_op == &hugetlbfs_file_operations)
+ return 1;
+ if (is_file_shm_hugepages(file))
+ return 1;
+
+ return 0;
}
static inline void set_file_hugepages(struct file *file)
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/errno.h>
#include <asm/irq.h>
#include <asm/ptrace.h>
/*
* Debugging / Tracing functions
*/
+
char *capi_cmd2str(__u8 cmd, __u8 subcmd);
-char *capi_cmsg2str(_cmsg * cmsg);
-char *capi_message2str(__u8 * msg);
+
+typedef struct {
+ u_char *buf;
+ u_char *p;
+ size_t size;
+ size_t pos;
+} _cdebbuf;
+
+#define CDEBUG_SIZE 1024
+#define CDEBUG_GSIZE 4096
+
+_cdebbuf *cdebbuf_alloc(void);
+void cdebbuf_free(_cdebbuf *cdb);
+int cdebug_init(void);
+void cdebug_exit(void);
+
+_cdebbuf *capi_cmsg2str(_cmsg *cmsg);
+_cdebbuf *capi_message2str(__u8 *msg);
/*-----------------------------------------------------------------------*/
extern void show_free_areas(void);
#ifdef CONFIG_SHMEM
-struct page *shmem_nopage(struct vm_area_struct *vma,
- unsigned long address, int *type);
int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr);
int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
-#define shmem_nopage filemap_nopage
-
static inline int shmem_lock(struct file *file, int lock,
struct user_struct *user)
{
}
#endif
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
-extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
int shmem_zero_setup(struct vm_area_struct *);
#define PG_active 6
#define PG_slab 7 /* slab debug (Suparna wants this) */
-#define PG_checked 8 /* kill me in 2.5.<early>. */
+#define PG_owner_priv_1 8 /* Owner use. If pagecache, fs may use*/
#define PG_arch_1 9
#define PG_reserved 10
#define PG_private 11 /* If pagecache, has fs-private data */
#define PG_nosave_free 18 /* Used for system suspend/resume */
#define PG_buddy 19 /* Page is free, on buddy lists */
+/* PG_owner_priv_1 users should have descriptive aliases */
+#define PG_checked PG_owner_priv_1 /* Used by some filesystems */
#if (BITS_PER_LONG > 32)
/*
#ifdef CONFIG_SYSVIPC
long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr);
+extern int is_file_shm_hugepages(struct file *file);
#else
static inline long do_shmat(int shmid, char __user *shmaddr,
int shmflg, unsigned long *addr)
{
return -ENOSYS;
}
+static inline int is_file_shm_hugepages(struct file *file)
+{
+ return 0;
+}
#endif
#endif /* __KERNEL__ */
}
static inline void sysfs_remove_file_from_group(struct kobject *kobj,
- const struct attribute *attr, const char *group);
+ const struct attribute *attr, const char *group)
{
- ;
}
static inline void sysfs_notify(struct kobject * k, char *dir, char *attr)
__u32 reserved[2];
};
+/*
+ * M P E G S E R V I C E S
+ *
+ * NOTE: EXPERIMENTAL API
+ */
+#if 1
+#define V4L2_ENC_IDX_FRAME_I (0)
+#define V4L2_ENC_IDX_FRAME_P (1)
+#define V4L2_ENC_IDX_FRAME_B (2)
+#define V4L2_ENC_IDX_FRAME_MASK (0xf)
+
+struct v4l2_enc_idx_entry {
+ __u64 offset;
+ __u64 pts;
+ __u32 length;
+ __u32 flags;
+ __u32 reserved[2];
+};
+
+#define V4L2_ENC_IDX_ENTRIES (64)
+struct v4l2_enc_idx {
+ __u32 entries;
+ __u32 entries_cap;
+ __u32 reserved[4];
+ struct v4l2_enc_idx_entry entry[V4L2_ENC_IDX_ENTRIES];
+};
+
+
+#define V4L2_ENC_CMD_START (0)
+#define V4L2_ENC_CMD_STOP (1)
+#define V4L2_ENC_CMD_PAUSE (2)
+#define V4L2_ENC_CMD_RESUME (3)
+
+/* Flags for V4L2_ENC_CMD_STOP */
+#define V4L2_ENC_CMD_STOP_AT_GOP_END (1 << 0)
+
+struct v4l2_encoder_cmd {
+ __u32 cmd;
+ __u32 flags;
+ union {
+ struct {
+ __u32 data[8];
+ } raw;
+ };
+};
+
+#endif
+
+
/*
* D A T A S E R V I C E S ( V B I )
*
/*
* A D V A N C E D D E B U G G I N G
+ *
+ * NOTE: EXPERIMENTAL API
*/
/* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */
+
+#define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */
+#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */
+#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */
+
struct v4l2_register {
+ __u32 match_type; /* Match type */
+ __u32 match_chip; /* Match this chip, meaning determined by match_type */
__u64 reg;
- __u32 i2c_id; /* I2C driver ID of the I2C chip, or 0 for the host */
- __u32 val;
+ __u64 val;
};
/*
#if 1
#define VIDIOC_ENUM_FRAMESIZES _IOWR ('V', 74, struct v4l2_frmsizeenum)
#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR ('V', 75, struct v4l2_frmivalenum)
+#define VIDIOC_G_ENC_INDEX _IOR ('V', 76, struct v4l2_enc_idx)
+#define VIDIOC_ENCODER_CMD _IOWR ('V', 77, struct v4l2_encoder_cmd)
+#define VIDIOC_TRY_ENCODER_CMD _IOWR ('V', 78, struct v4l2_encoder_cmd)
+
+/* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
+#define VIDIOC_DBG_S_REGISTER _IOW ('V', 79, struct v4l2_register)
+#define VIDIOC_DBG_G_REGISTER _IOWR ('V', 80, struct v4l2_register)
#endif
-/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
-#define VIDIOC_DBG_S_REGISTER _IOW ('d', 100, struct v4l2_register)
-#define VIDIOC_DBG_G_REGISTER _IOWR('d', 101, struct v4l2_register)
#ifdef __OLD_VIDIOC_
/* for compatibility, will go away some day */
int wakeup_pdflush(long nr_pages);
void laptop_io_completion(void);
void laptop_sync_completion(void);
-void throttle_vm_writeout(void);
+void throttle_vm_writeout(gfp_t gfp_mask);
/* These are exported to sysctl. */
extern int dirty_background_ratio;
/* ------------------------------------------------------------------------- */
+/* Register/chip ident helper function */
+
+struct i2c_client; /* forward reference */
+int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 id_type, u32 chip_id);
+int v4l2_chip_match_host(u32 id_type, u32 chip_id);
+
+/* ------------------------------------------------------------------------- */
+
/* Internal ioctls */
/* VIDIOC_INT_DECODE_VBI_LINE */
struct v4l2_jpegcompression *a);
int (*vidioc_s_jpegcomp) (struct file *file, void *fh,
struct v4l2_jpegcompression *a);
+ int (*vidioc_g_enc_index) (struct file *file, void *fh,
+ struct v4l2_enc_idx *a);
+ int (*vidioc_encoder_cmd) (struct file *file, void *fh,
+ struct v4l2_encoder_cmd *a);
+ int (*vidioc_try_encoder_cmd) (struct file *file, void *fh,
+ struct v4l2_encoder_cmd *a);
/* Stream type-dependent parameter ioctls */
int (*vidioc_g_parm) (struct file *file, void *fh,
mutex_unlock(&shm_ids(ns).mutex);
}
-struct page *shm_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type)
+static struct page *shm_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
return 0;
}
-#ifndef CONFIG_MMU
+static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ int (*fsync) (struct file *, struct dentry *, int datasync);
+ struct shm_file_data *sfd = shm_file_data(file);
+ int ret = -EINVAL;
+
+ fsync = sfd->file->f_op->fsync;
+ if (fsync)
+ ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
+ return ret;
+}
+
static unsigned long shm_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct shm_file_data *sfd = shm_file_data(file);
- return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff,
- flags);
+ return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
+}
+
+int is_file_shm_hugepages(struct file *file)
+{
+ int ret = 0;
+
+ if (file->f_op == &shm_file_operations) {
+ struct shm_file_data *sfd;
+ sfd = shm_file_data(file);
+ ret = is_file_hugepages(sfd->file);
+ }
+ return ret;
}
-#else
-#define shm_get_unmapped_area NULL
-#endif
static const struct file_operations shm_file_operations = {
.mmap = shm_mmap,
+ .fsync = shm_fsync,
.release = shm_release,
.get_unmapped_area = shm_get_unmapped_area,
};
raw_local_irq_restore(flags);
}
-void __init lockdep_init(void)
+void lockdep_init(void)
{
int i;
}
/**
- *
* relay_hotcpu_callback - CPU hotplug callback
* @nb: notifier block
* @action: hotplug action to take
* @hcpu: CPU number
*
- * Returns the success/failure of the operation. (NOTIFY_OK, NOTIFY_BAD)
+ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
*/
static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
unsigned long action,
}
}
next->sleep_type = SLEEP_NORMAL;
- if (dependent_sleeper(cpu, rq, next))
+ if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next))
next = rq->idle;
switch_tasks:
if (next == rq->idle)
sched_info_switch(prev, next);
if (likely(prev != next)) {
- next->timestamp = now;
+ next->timestamp = next->last_ran = now;
rq->nr_switches++;
rq->curr = next;
++*switch_count;
}
#else /* !CONFIG_SYSCTL */
-struct ctl_table_header * register_sysctl_table(ctl_table * table,
- int insert_at_head)
+struct ctl_table_header *register_sysctl_table(ctl_table * table)
{
return NULL;
}
/**
* next_timer_interrupt - return the jiffy of the next pending timer
+ * @now: current time (in jiffies)
*/
unsigned long get_next_timer_interrupt(unsigned long now)
{
#endif
/**
- * timeofday_is_continuous - check to see if timekeeping is free running
+ * timekeeping_is_continuous - check to see if timekeeping is free running
*/
int timekeeping_is_continuous(void)
{
/**
* __bitmap_shift_right - logical right shift of the bits in a bitmap
- * @dst - destination bitmap
- * @src - source bitmap
- * @nbits - shift by this many bits
- * @bits - bitmap size, in bits
+ * @dst : destination bitmap
+ * @src : source bitmap
+ * @shift : shift by this many bits
+ * @bits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
/**
* __bitmap_shift_left - logical left shift of the bits in a bitmap
- * @dst - destination bitmap
- * @src - source bitmap
- * @nbits - shift by this many bits
- * @bits - bitmap size, in bits
+ * @dst : destination bitmap
+ * @src : source bitmap
+ * @shift : shift by this many bits
+ * @bits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
i++;
pn = nd;
+ prev = vma->vm_start;
+ pend = vma->vm_end;
}
j = 0;
for (nd = pn; nd; nd = rb_prev(nd)) {
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
-void throttle_vm_writeout(void)
+void throttle_vm_writeout(gfp_t gfp_mask)
{
long background_thresh;
long dirty_thresh;
+ if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
+ /*
+ * The caller might hold locks which can prevent IO completion
+ * or progress in the filesystem. So we cannot just sit here
+ * waiting for IO to complete.
+ */
+ congestion_wait(WRITE, HZ/10);
+ return;
+ }
+
for ( ; ; ) {
get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
}
}
-
/*
* writeback at least _min_pages, and keep writing until the amount of dirty
* memory is less than the background threshold, or until we're all clean.
page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
1 << PG_referenced | 1 << PG_arch_1 |
- 1 << PG_checked | 1 << PG_mappedtodisk);
+ 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
set_page_private(page, 0);
set_page_refcounted(page);
*/
static struct anon_vma *page_lock_anon_vma(struct page *page)
{
- struct anon_vma *anon_vma = NULL;
+ struct anon_vma *anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
spin_lock(&anon_vma->lock);
+ return anon_vma;
out:
rcu_read_unlock();
- return anon_vma;
+ return NULL;
+}
+
+static void page_unlock_anon_vma(struct anon_vma *anon_vma)
+{
+ spin_unlock(&anon_vma->lock);
+ rcu_read_unlock();
}
/*
if (!mapcount)
break;
}
- spin_unlock(&anon_vma->lock);
+
+ page_unlock_anon_vma(anon_vma);
return referenced;
}
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
- spin_unlock(&anon_vma->lock);
+
+ page_unlock_anon_vma(anon_vma);
return ret;
}
return error;
}
-struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
+static struct page *shmem_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
struct page *page = NULL;
return retval;
}
-int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
/**
* cache_reap - Reclaim memory from caches.
- * @unused: unused parameter
+ * @w: work descriptor
*
* Called from workqueue/eventd every few seconds.
* Purpose:
return 0;
}
+#if 0
int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
return 0;
#endif
}
+#endif /* 0 */
#ifndef CONFIG_MMU
unsigned long shmem_get_unmapped_area(struct file *file,
pagevec_init(&pvec, 0);
next = start;
- while (next <= end && !ret && !wrapped &&
+ while (next <= end && !wrapped &&
pagevec_lookup(&pvec, mapping, next,
min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
- for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
+ for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index;
}
}
- throttle_vm_writeout();
+ throttle_vm_writeout(sc->gfp_mask);
atomic_dec(&zone->reclaim_in_progress);
return nr_reclaimed;
$prototype =~ s/^noinline +//;
$prototype =~ s/__devinit +//;
$prototype =~ s/^#define\s+//; #ak added
- $prototype =~ s/__attribute__ \(\([a-z,]*\)\)//;
+ $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
# Yes, this truly is vile. We are looking for:
# 1. Return type (may be nothing if we're looking at a macro)