Merge branch 'agp-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Aug 2008 15:28:32 +0000 (08:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Aug 2008 15:28:32 +0000 (08:28 -0700)
* 'agp-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/agp-2.6:
  agp: fix SIS 5591/5592 wrong PCI id
  intel/agp: rewrite GTT on resume
  agp: use dev_printk when possible
  amd64-agp: run fallback when no bridges found, not when driver registration fails
  intel_agp: official name for GM45 chipset

41 files changed:
arch/x86/kernel/apic_32.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/io_apic_32.c
arch/x86/kernel/io_apic_64.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/pci-calgary_64.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/vmi_32.c
arch/x86/mm/pgtable.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_emc.c
drivers/scsi/device_handler/scsi_dh_hp_sw.c
drivers/scsi/device_handler/scsi_dh_rdac.c
fs/jbd/transaction.c
fs/jbd2/transaction.c
include/asm-x86/efi.h
include/asm-x86/hw_irq.h
include/asm-x86/irq_vectors.h
include/linux/lockdep.h
include/linux/rcuclassic.h
include/linux/sched.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
kernel/Kconfig.hz
kernel/cpu.c
kernel/lockdep.c
kernel/lockdep_internals.h
kernel/lockdep_proc.c
kernel/posix-timers.c
kernel/sched.c
kernel/sched_clock.c
kernel/sched_fair.c
kernel/sched_rt.c
kernel/signal.c
kernel/smp.c
kernel/spinlock.c
kernel/time/tick-sched.c
kernel/workqueue.c
lib/debug_locks.c
mm/mmap.c

index d6c8983583713d747790587861318a5fb58eb342..039a8d4aaf62db88eeb36040ad15ff64be9ed852 100644 (file)
@@ -1720,15 +1720,19 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
 }
 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
 
-static int __init apic_set_verbosity(char *str)
+static int __init apic_set_verbosity(char *arg)
 {
-       if (strcmp("debug", str) == 0)
+       if (!arg)
+               return -EINVAL;
+
+       if (strcmp(arg, "debug") == 0)
                apic_verbosity = APIC_DEBUG;
-       else if (strcmp("verbose", str) == 0)
+       else if (strcmp(arg, "verbose") == 0)
                apic_verbosity = APIC_VERBOSE;
-       return 1;
+
+       return 0;
 }
-__setup("apic=", apic_set_verbosity);
+early_param("apic", apic_set_verbosity);
 
 static int __init lapic_insert_resource(void)
 {
index c9b58a806e852d3d4a2ff96e0f48c0e737cff80f..c8e315f1aa837d95bc20f4e48655b8b90844fe9f 100644 (file)
@@ -50,6 +50,8 @@ static double __initdata y = 3145727.0;
  */
 static void __init check_fpu(void)
 {
+       s32 fdiv_bug;
+
        if (!boot_cpu_data.hard_math) {
 #ifndef CONFIG_MATH_EMULATION
                printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
@@ -74,8 +76,10 @@ static void __init check_fpu(void)
                "fistpl %0\n\t"
                "fwait\n\t"
                "fninit"
-               : "=m" (*&boot_cpu_data.fdiv_bug)
+               : "=m" (*&fdiv_bug)
                : "m" (*&x), "m" (*&y));
+
+       boot_cpu_data.fdiv_bug = fdiv_bug;
        if (boot_cpu_data.fdiv_bug)
                printk("Hmm, FPU with FDIV bug.\n");
 }
index de9aa0e3a9c51e10df0e85403529e3aa285ad395..09cddb57bec45e9ddd60ba376b3a70aca647ad8d 100644 (file)
@@ -57,7 +57,7 @@ atomic_t irq_mis_count;
 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 
 static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
+DEFINE_SPINLOCK(vector_lock);
 
 int timer_through_8259 __initdata;
 
@@ -1209,10 +1209,6 @@ static int assign_irq_vector(int irq)
        return vector;
 }
 
-void setup_vector_irq(int cpu)
-{
-}
-
 static struct irq_chip ioapic_chip;
 
 #define IOAPIC_AUTO    -1
index 8269434d170765a6466eb7039e2b849d8b0b5b18..61a83b70c18fcc65ce60b965ee3a6e0456622dc4 100644 (file)
@@ -101,7 +101,7 @@ int timer_through_8259 __initdata;
 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 
 static DEFINE_SPINLOCK(ioapic_lock);
-DEFINE_SPINLOCK(vector_lock);
+static DEFINE_SPINLOCK(vector_lock);
 
 /*
  * # of IRQ routing registers
@@ -697,6 +697,19 @@ static int pin_2_irq(int idx, int apic, int pin)
        return irq;
 }
 
+void lock_vector_lock(void)
+{
+       /* Used to the online set of cpus does not change
+        * during assign_irq_vector.
+        */
+       spin_lock(&vector_lock);
+}
+
+void unlock_vector_lock(void)
+{
+       spin_unlock(&vector_lock);
+}
+
 static int __assign_irq_vector(int irq, cpumask_t mask)
 {
        /*
@@ -802,7 +815,7 @@ static void __clear_irq_vector(int irq)
        cpus_clear(cfg->domain);
 }
 
-static void __setup_vector_irq(int cpu)
+void __setup_vector_irq(int cpu)
 {
        /* Initialize vector_irq on a new cpu */
        /* This function must be called with vector_lock held */
@@ -825,14 +838,6 @@ static void __setup_vector_irq(int cpu)
        }
 }
 
-void setup_vector_irq(int cpu)
-{
-       spin_lock(&vector_lock);
-       __setup_vector_irq(smp_processor_id());
-       spin_unlock(&vector_lock);
-}
-
-
 static struct irq_chip ioapic_chip;
 
 static void ioapic_register_intr(int irq, unsigned long trigger)
index 6ae005ccaed83bc46a6666f1d65b80baf7737f8d..678090508a6240996aee5eedbdd5774cc86ab629 100644 (file)
@@ -83,7 +83,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
        if (x86_quirks->mpc_oem_bus_info)
                x86_quirks->mpc_oem_bus_info(m, str);
        else
-               printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str);
+               apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str);
 
 #if MAX_MP_BUSSES < 256
        if (m->mpc_busid >= MAX_MP_BUSSES) {
@@ -154,7 +154,7 @@ static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
 
 static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
 {
-       printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x,"
+       apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
                " IRQ %02x, APIC ID %x, APIC INT %02x\n",
                m->mpc_irqtype, m->mpc_irqflag & 3,
                (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
@@ -163,7 +163,7 @@ static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
 
 static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
 {
-       printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x,"
+       apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
                " IRQ %02x, APIC ID %x, APIC INT %02x\n",
                mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
                (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
@@ -235,7 +235,7 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
 
 static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
 {
-       printk(KERN_INFO "Lint: type %d, pol %d, trig %d, bus %02x,"
+       apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
                " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
                m->mpc_irqtype, m->mpc_irqflag & 3,
                (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
@@ -695,7 +695,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
        unsigned int *bp = phys_to_virt(base);
        struct intel_mp_floating *mpf;
 
-       printk(KERN_DEBUG "Scan SMP from %p for %ld bytes.\n", bp, length);
+       apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
+                       bp, length);
        BUILD_BUG_ON(sizeof(*mpf) != 16);
 
        while (length > 0) {
index b67a4b1d4eaefd1399ae3996932b061a12357fc7..02d19328525db264c3deac9793254274d4f0891d 100644 (file)
@@ -1350,7 +1350,7 @@ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
  * Function for kdump case. Get the tce tables from first kernel
  * by reading the contents of the base adress register of calgary iommu
  */
-static void get_tce_space_from_tar()
+static void get_tce_space_from_tar(void)
 {
        int bus;
        void __iomem *target;
index 2d888586385d2599dce70e99e1b4603714155e9f..68b48e3fbcbd92b7115ac90cb59cd9c1d3bc4054 100644 (file)
@@ -604,6 +604,14 @@ void __init setup_arch(char **cmdline_p)
        early_cpu_init();
        early_ioremap_init();
 
+#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
+       /*
+        * Must be before kernel pagetables are setup
+        * or fixmap area is touched.
+        */
+       vmi_init();
+#endif
+
        ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
        screen_info = boot_params.screen_info;
        edid_info = boot_params.edid_info;
@@ -817,14 +825,6 @@ void __init setup_arch(char **cmdline_p)
        kvmclock_init();
 #endif
 
-#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
-       /*
-        * Must be after max_low_pfn is determined, and before kernel
-        * pagetables are setup.
-        */
-       vmi_init();
-#endif
-
        paravirt_pagetable_setup_start(swapper_pg_dir);
        paging_init();
        paravirt_pagetable_setup_done(swapper_pg_dir);
@@ -861,12 +861,6 @@ void __init setup_arch(char **cmdline_p)
        init_apic_mappings();
        ioapic_init_mappings();
 
-#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
-       if (def_to_bigsmp)
-               printk(KERN_WARNING "More than 8 CPUs detected and "
-                       "CONFIG_X86_PC cannot handle it.\nUse "
-                       "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
-#endif
        kvm_guest_init();
 
        e820_reserve_resources();
index 332512767f4f0600d15ed473391528b20c4a5a72..91055d7fc1b0444d3e1446181216a06db2ce6978 100644 (file)
@@ -326,12 +326,16 @@ static void __cpuinit start_secondary(void *unused)
         * for which cpus receive the IPI. Holding this
         * lock helps us to not include this cpu in a currently in progress
         * smp_call_function().
+        *
+        * We need to hold vector_lock so there the set of online cpus
+        * does not change while we are assigning vectors to cpus.  Holding
+        * this lock ensures we don't half assign or remove an irq from a cpu.
         */
        ipi_call_lock_irq();
-#ifdef CONFIG_X86_IO_APIC
-       setup_vector_irq(smp_processor_id());
-#endif
+       lock_vector_lock();
+       __setup_vector_irq(smp_processor_id());
        cpu_set(smp_processor_id(), cpu_online_map);
+       unlock_vector_lock();
        ipi_call_unlock_irq();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 
@@ -990,7 +994,17 @@ int __cpuinit native_cpu_up(unsigned int cpu)
        flush_tlb_all();
        low_mappings = 1;
 
+#ifdef CONFIG_X86_PC
+       if (def_to_bigsmp && apicid > 8) {
+               printk(KERN_WARNING
+                       "More than 8 CPUs detected - skipping them.\n"
+                       "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
+               err = -1;
+       } else
+               err = do_boot_cpu(apicid, cpu);
+#else
        err = do_boot_cpu(apicid, cpu);
+#endif
 
        zap_low_mappings();
        low_mappings = 0;
@@ -1336,7 +1350,9 @@ int __cpu_disable(void)
        remove_siblinginfo(cpu);
 
        /* It's now safe to remove this processor from the online map */
+       lock_vector_lock();
        remove_cpu_from_maps(cpu);
+       unlock_vector_lock();
        fixup_irqs(cpu_online_map);
        return 0;
 }
index 0a1b1a9d922df7f4380a40d4b210330dbc8db17a..6ca515d6db543b9a7739b9f2f9ab39d181ed3f9b 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/timer.h>
 #include <asm/vmi_time.h>
 #include <asm/kmap_types.h>
+#include <asm/setup.h>
 
 /* Convenient for calling VMI functions indirectly in the ROM */
 typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -683,7 +684,7 @@ void vmi_bringup(void)
 {
        /* We must establish the lowmem mapping for MMU ops to work */
        if (vmi_ops.set_linear_mapping)
-               vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);
+               vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
 }
 
 /*
index 557b2abceef86f83058ff447be17a1da56942d44..d50302774fe2cf7d9796d9856b9c8515e69b5f54 100644 (file)
@@ -207,6 +207,9 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
        unsigned long addr;
        int i;
 
+       if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
+               return;
+
        pud = pud_offset(pgd, 0);
 
        for (addr = i = 0; i < PREALLOCATED_PMDS;
index fcdd73f256256ccddc5a5d3887d8220a34d0107b..994da56fffed4e7c513345d86f93b3a9acd40238 100644 (file)
@@ -680,7 +680,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
 
 }
 
-const struct scsi_dh_devlist alua_dev_list[] = {
+static const struct scsi_dh_devlist alua_dev_list[] = {
        {"HP", "MSA VOLUME" },
        {"HP", "HSV101" },
        {"HP", "HSV111" },
index aa46b131b20eb44eef41c17c3a086ed6935d24ed..b9d23e9e9a44dff3ba612bd90f3fbcb3db4a1392 100644 (file)
@@ -562,7 +562,7 @@ done:
        return result;
 }
 
-const struct scsi_dh_devlist clariion_dev_list[] = {
+static const struct scsi_dh_devlist clariion_dev_list[] = {
        {"DGC", "RAID"},
        {"DGC", "DISK"},
        {"DGC", "VRAID"},
index 9c7a1f8ebb722df09e88c67b49235448ef006a60..a6a4ef3ad51c01e83421d22a11bafbc0f9eb87aa 100644 (file)
@@ -282,7 +282,7 @@ static int hp_sw_activate(struct scsi_device *sdev)
        return ret;
 }
 
-const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
+static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
        {"COMPAQ", "MSA1000 VOLUME"},
        {"COMPAQ", "HSV110"},
        {"HP", "HSV100"},
index b093a501f8ae673b980ca896d6d73d4ed407a78e..e7c7b4ebc1fe3c3cebbcce6c1342a2e54145e7f2 100644 (file)
@@ -574,7 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
        return SCSI_RETURN_NOT_HANDLED;
 }
 
-const struct scsi_dh_devlist rdac_dev_list[] = {
+static const struct scsi_dh_devlist rdac_dev_list[] = {
        {"IBM", "1722"},
        {"IBM", "1724"},
        {"IBM", "1726"},
index 8dee32007500e3200451b9334b849f2362611cc7..0540ca27a4464060e9c230095f664cd9937b21a1 100644 (file)
@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
                goto out;
        }
 
-       lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+       lock_map_acquire(&handle->h_lockdep_map);
 
 out:
        return handle;
@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
                spin_unlock(&journal->j_state_lock);
        }
 
-       lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
+       lock_map_release(&handle->h_lockdep_map);
 
        jbd_free_handle(handle);
        return err;
index 4f7cadbb19faa4396edeb4ff0be6840fc7a313af..e5d540588fa9609296f446c232278202bd1f2d6b 100644 (file)
@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
                goto out;
        }
 
-       lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+       lock_map_acquire(&handle->h_lockdep_map);
 out:
        return handle;
 }
@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
                spin_unlock(&journal->j_state_lock);
        }
 
-       lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
+       lock_map_release(&handle->h_lockdep_map);
 
        jbd2_free_handle(handle);
        return err;
index 7ed2bd7a7f51c3621b26acde91bdaa65f36d9f96..d4f2b0abe9294eefce100885a7c342bea885d0c5 100644 (file)
@@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
        efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
                  (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
 
-extern void *efi_ioremap(unsigned long addr, unsigned long size);
+extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
 
 #endif /* CONFIG_X86_32 */
 
index 77ba51df56680fcd9e28b4529eb647c0eea07292..edd0b95f14d0df6dd9b4f4cfb01d02548d485c9b 100644 (file)
@@ -98,9 +98,17 @@ extern void (*const interrupt[NR_IRQS])(void);
 #else
 typedef int vector_irq_t[NR_VECTORS];
 DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern spinlock_t vector_lock;
 #endif
-extern void setup_vector_irq(int cpu);
+
+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
+extern void lock_vector_lock(void);
+extern void unlock_vector_lock(void);
+extern void __setup_vector_irq(int cpu);
+#else
+static inline void lock_vector_lock(void) {}
+static inline void unlock_vector_lock(void) {}
+static inline void __setup_vector_irq(int cpu) {}
+#endif
 
 #endif /* !ASSEMBLY_ */
 
index 90b1d1f12f08d652d39fef01e241ed321f38c415..b95d167b7fb26d8697a1d1a56b2000648b042e8b 100644 (file)
 #define LAST_VM86_IRQ          15
 #define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
 
-#if !defined(CONFIG_X86_VOYAGER)
+#ifdef CONFIG_X86_64
+# if NR_CPUS < MAX_IO_APICS
+#  define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
+# else
+#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
+# endif
+# define NR_IRQ_VECTORS NR_IRQS
+
+#elif !defined(CONFIG_X86_VOYAGER)
 
 # if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS)
 
index 2486eb4edbf146b77effbfe27c14689ea70db1de..331e5f1c2d8ec4439bc69294337dc1b00345f3d8 100644 (file)
@@ -89,6 +89,7 @@ struct lock_class {
 
        struct lockdep_subclass_key     *key;
        unsigned int                    subclass;
+       unsigned int                    dep_gen_id;
 
        /*
         * IRQ/softirq usage tracking bits:
@@ -189,6 +190,14 @@ struct lock_chain {
        u64                             chain_key;
 };
 
+#define MAX_LOCKDEP_KEYS_BITS          13
+/*
+ * Subtract one because we offset hlock->class_idx by 1 in order
+ * to make 0 mean no class. This avoids overflowing the class_idx
+ * bitfield and hitting the BUG in hlock_class().
+ */
+#define MAX_LOCKDEP_KEYS               ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
+
 struct held_lock {
        /*
         * One-way hash of the dependency chain up to this point. We
@@ -205,14 +214,14 @@ struct held_lock {
         * with zero), here we store the previous hash value:
         */
        u64                             prev_chain_key;
-       struct lock_class               *class;
        unsigned long                   acquire_ip;
        struct lockdep_map              *instance;
-
+       struct lockdep_map              *nest_lock;
 #ifdef CONFIG_LOCK_STAT
        u64                             waittime_stamp;
        u64                             holdtime_stamp;
 #endif
+       unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
        /*
         * The lock-stack is unified in that the lock chains of interrupt
         * contexts nest ontop of process context chains, but we 'separate'
@@ -226,11 +235,11 @@ struct held_lock {
         * The following field is used to detect when we cross into an
         * interrupt context:
         */
-       int                             irq_context;
-       int                             trylock;
-       int                             read;
-       int                             check;
-       int                             hardirqs_off;
+       unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
+       unsigned int trylock:1;
+       unsigned int read:2;        /* see lock_acquire() comment */
+       unsigned int check:2;       /* see lock_acquire() comment */
+       unsigned int hardirqs_off:1;
 };
 
 /*
@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
  *   2: full validation
  */
 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
-                        int trylock, int read, int check, unsigned long ip);
+                        int trylock, int read, int check,
+                        struct lockdep_map *nest_lock, unsigned long ip);
 
 extern void lock_release(struct lockdep_map *lock, int nested,
                         unsigned long ip);
 
+extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
+                             unsigned long ip);
+
 # define INIT_LOCKDEP                          .lockdep_recursion = 0,
 
 #define lockdep_depth(tsk)     (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -313,8 +326,9 @@ static inline void lockdep_on(void)
 {
 }
 
-# define lock_acquire(l, s, t, r, c, i)                do { } while (0)
+# define lock_acquire(l, s, t, r, c, n, i)     do { } while (0)
 # define lock_release(l, n, i)                 do { } while (0)
+# define lock_set_subclass(l, s, i)            do { } while (0)
 # define lockdep_init()                                do { } while (0)
 # define lockdep_info()                                do { } while (0)
 # define lockdep_init_map(lock, name, key, sub)        do { (void)(key); } while (0)
@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
-#  define spin_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 2, i)
+#  define spin_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 2, NULL, i)
+#  define spin_acquire_nest(l, s, t, n, i)     lock_acquire(l, s, t, 0, 2, n, i)
 # else
-#  define spin_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 1, i)
+#  define spin_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 1, NULL, i)
+#  define spin_acquire_nest(l, s, t, n, i)     lock_acquire(l, s, t, 0, 1, NULL, i)
 # endif
 # define spin_release(l, n, i)                 lock_release(l, n, i)
 #else
@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
-#  define rwlock_acquire(l, s, t, i)           lock_acquire(l, s, t, 0, 2, i)
-#  define rwlock_acquire_read(l, s, t, i)      lock_acquire(l, s, t, 2, 2, i)
+#  define rwlock_acquire(l, s, t, i)           lock_acquire(l, s, t, 0, 2, NULL, i)
+#  define rwlock_acquire_read(l, s, t, i)      lock_acquire(l, s, t, 2, 2, NULL, i)
 # else
-#  define rwlock_acquire(l, s, t, i)           lock_acquire(l, s, t, 0, 1, i)
-#  define rwlock_acquire_read(l, s, t, i)      lock_acquire(l, s, t, 2, 1, i)
+#  define rwlock_acquire(l, s, t, i)           lock_acquire(l, s, t, 0, 1, NULL, i)
+#  define rwlock_acquire_read(l, s, t, i)      lock_acquire(l, s, t, 2, 1, NULL, i)
 # endif
 # define rwlock_release(l, n, i)               lock_release(l, n, i)
 #else
@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
-#  define mutex_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 2, i)
+#  define mutex_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 2, NULL, i)
 # else
-#  define mutex_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 1, i)
+#  define mutex_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 1, NULL, i)
 # endif
 # define mutex_release(l, n, i)                        lock_release(l, n, i)
 #else
@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
-#  define rwsem_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 2, i)
-#  define rwsem_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 1, 2, i)
+#  define rwsem_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 2, NULL, i)
+#  define rwsem_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 1, 2, NULL, i)
 # else
-#  define rwsem_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 1, i)
-#  define rwsem_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 1, 1, i)
+#  define rwsem_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 1, NULL, i)
+#  define rwsem_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 1, 1, NULL, i)
 # endif
 # define rwsem_release(l, n, i)                        lock_release(l, n, i)
 #else
@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 # define rwsem_release(l, n, i)                        do { } while (0)
 #endif
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+#  define lock_map_acquire(l)          lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
+# else
+#  define lock_map_acquire(l)          lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
+# endif
+# define lock_map_release(l)                   lock_release(l, 1, _THIS_IP_)
+#else
+# define lock_map_acquire(l)                   do { } while (0)
+# define lock_map_release(l)                   do { } while (0)
+#endif
+
 #endif /* __LINUX_LOCKDEP_H */
index 8c774905dcfec6c5a98d500d396b5dfdee97837e..4ab8436227276322042c5b7374e6d0c561e80486 100644 (file)
@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu);
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 extern struct lockdep_map rcu_lock_map;
 # define rcu_read_acquire()    \
-                       lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
+                       lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
 # define rcu_read_release()    lock_release(&rcu_lock_map, 1, _THIS_IP_)
 #else
 # define rcu_read_acquire()    do { } while (0)
index 5270d449ff9deb9036f0b4d7852c4877dc561262..5850bfb968a87079d767387169a0847e8530f217 100644 (file)
@@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 
 extern unsigned long long sched_clock(void);
 
-#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-static inline void sched_clock_init(void)
-{
-}
-
-static inline u64 sched_clock_cpu(int cpu)
-{
-       return sched_clock();
-}
+extern void sched_clock_init(void);
+extern u64 sched_clock_cpu(int cpu);
 
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 static inline void sched_clock_tick(void)
 {
 }
@@ -1572,28 +1566,11 @@ static inline void sched_clock_idle_sleep_event(void)
 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
 {
 }
-
-#ifdef CONFIG_NO_HZ
-static inline void sched_clock_tick_stop(int cpu)
-{
-}
-
-static inline void sched_clock_tick_start(int cpu)
-{
-}
-#endif
-
-#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
-extern void sched_clock_init(void);
-extern u64 sched_clock_cpu(int cpu);
+#else
 extern void sched_clock_tick(void);
 extern void sched_clock_idle_sleep_event(void);
 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
-#ifdef CONFIG_NO_HZ
-extern void sched_clock_tick_stop(int cpu);
-extern void sched_clock_tick_start(int cpu);
 #endif
-#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
 /*
  * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
index 61e5610ad165592c0605eb68330ff50d9e0b77f8..e0c0fccced46c4b09d654121423f76357407bc95 100644 (file)
@@ -183,8 +183,14 @@ do {                                                               \
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+# define spin_lock_nest_lock(lock, nest_lock)                          \
+        do {                                                           \
+                typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
+                _spin_lock_nest_lock(lock, &(nest_lock)->dep_map);     \
+        } while (0)
 #else
 # define spin_lock_nested(lock, subclass) _spin_lock(lock)
+# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
 #endif
 
 #define write_lock(lock)               _write_lock(lock)
index 8a2307ce729687f25a954969ae14df0b31748915..d79845d034b530372ff0049655179525a45dffc0 100644 (file)
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
 void __lockfunc _spin_lock(spinlock_t *lock)           __acquires(lock);
 void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
                                                        __acquires(lock);
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
+                                                       __acquires(lock);
 void __lockfunc _read_lock(rwlock_t *lock)             __acquires(lock);
 void __lockfunc _write_lock(rwlock_t *lock)            __acquires(lock);
 void __lockfunc _spin_lock_bh(spinlock_t *lock)                __acquires(lock);
index 382dd5a8b2d7f5dd97359895358610f2f4866595..94fabd534b03d9347dc117b653204fa838283def 100644 (file)
@@ -55,4 +55,4 @@ config HZ
        default 1000 if HZ_1000
 
 config SCHED_HRTICK
-       def_bool HIGH_RES_TIMERS && USE_GENERIC_SMP_HELPERS
+       def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS)
index e202a68d1cc180cf62ade78b025ec2329c8aabcd..c977c339f55933d3229e405d49b08226c21f966a 100644 (file)
@@ -349,6 +349,8 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
                goto out_notify;
        BUG_ON(!cpu_online(cpu));
 
+       cpu_set(cpu, cpu_active_map);
+
        /* Now call notifier in preparation. */
        raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
 
@@ -383,9 +385,6 @@ int __cpuinit cpu_up(unsigned int cpu)
 
        err = _cpu_up(cpu, 0);
 
-       if (cpu_online(cpu))
-               cpu_set(cpu, cpu_active_map);
-
 out:
        cpu_maps_update_done();
        return err;
index d38a643629735b533c2e0ddff300a9881e1bf674..1aa91fd6b06ec341897646f2322e35920640f448 100644 (file)
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 unsigned long nr_lock_classes;
 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 
+static inline struct lock_class *hlock_class(struct held_lock *hlock)
+{
+       if (!hlock->class_idx) {
+               DEBUG_LOCKS_WARN_ON(1);
+               return NULL;
+       }
+       return lock_classes + hlock->class_idx - 1;
+}
+
 #ifdef CONFIG_LOCK_STAT
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
 
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock)
 
        holdtime = sched_clock() - hlock->holdtime_stamp;
 
-       stats = get_lock_stats(hlock->class);
+       stats = get_lock_stats(hlock_class(hlock));
        if (hlock->read)
                lock_time_inc(&stats->read_holdtime, holdtime);
        else
@@ -372,6 +381,19 @@ unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
 unsigned int max_recursion_depth;
 
+static unsigned int lockdep_dependency_gen_id;
+
+static bool lockdep_dependency_visit(struct lock_class *source,
+                                    unsigned int depth)
+{
+       if (!depth)
+               lockdep_dependency_gen_id++;
+       if (source->dep_gen_id == lockdep_dependency_gen_id)
+               return true;
+       source->dep_gen_id = lockdep_dependency_gen_id;
+       return false;
+}
+
 #ifdef CONFIG_DEBUG_LOCKDEP
 /*
  * We cannot printk in early bootup code. Not even early_printk()
@@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
 
 static void print_lock(struct held_lock *hlock)
 {
-       print_lock_name(hlock->class);
+       print_lock_name(hlock_class(hlock));
        printk(", at: ");
        print_ip_sym(hlock->acquire_ip);
 }
@@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
 {
        struct lock_list *entry;
 
+       if (lockdep_dependency_visit(class, depth))
+               return;
+
        if (DEBUG_LOCKS_WARN_ON(depth >= 20))
                return;
 
@@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void)
        if (debug_locks_silent)
                return 0;
 
-       this.class = check_source->class;
+       this.class = hlock_class(check_source);
        if (!save_trace(&this.trace))
                return 0;
 
@@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void)
        return 0;
 }
 
+unsigned long __lockdep_count_forward_deps(struct lock_class *class,
+                                          unsigned int depth)
+{
+       struct lock_list *entry;
+       unsigned long ret = 1;
+
+       if (lockdep_dependency_visit(class, depth))
+               return 0;
+
+       /*
+        * Recurse this class's dependency list:
+        */
+       list_for_each_entry(entry, &class->locks_after, entry)
+               ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+
+       return ret;
+}
+
+unsigned long lockdep_count_forward_deps(struct lock_class *class)
+{
+       unsigned long ret, flags;
+
+       local_irq_save(flags);
+       __raw_spin_lock(&lockdep_lock);
+       ret = __lockdep_count_forward_deps(class, 0);
+       __raw_spin_unlock(&lockdep_lock);
+       local_irq_restore(flags);
+
+       return ret;
+}
+
+unsigned long __lockdep_count_backward_deps(struct lock_class *class,
+                                           unsigned int depth)
+{
+       struct lock_list *entry;
+       unsigned long ret = 1;
+
+       if (lockdep_dependency_visit(class, depth))
+               return 0;
+       /*
+        * Recurse this class's dependency list:
+        */
+       list_for_each_entry(entry, &class->locks_before, entry)
+               ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+
+       return ret;
+}
+
+unsigned long lockdep_count_backward_deps(struct lock_class *class)
+{
+       unsigned long ret, flags;
+
+       local_irq_save(flags);
+       __raw_spin_lock(&lockdep_lock);
+       ret = __lockdep_count_backward_deps(class, 0);
+       __raw_spin_unlock(&lockdep_lock);
+       local_irq_restore(flags);
+
+       return ret;
+}
+
 /*
  * Prove that the dependency graph starting at <entry> can not
  * lead to <target>. Print an error and return 0 if it does.
@@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
 {
        struct lock_list *entry;
 
+       if (lockdep_dependency_visit(source, depth))
+               return 1;
+
        debug_atomic_inc(&nr_cyclic_check_recursions);
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
@@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
         * Check this lock's dependency list:
         */
        list_for_each_entry(entry, &source->locks_after, entry) {
-               if (entry->class == check_target->class)
+               if (entry->class == hlock_class(check_target))
                        return print_circular_bug_header(entry, depth+1);
                debug_atomic_inc(&nr_cyclic_checks);
                if (!check_noncircular(entry->class, depth+1))
@@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
        struct lock_list *entry;
        int ret;
 
+       if (lockdep_dependency_visit(source, depth))
+               return 1;
+
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
        if (depth >= RECURSION_LIMIT)
@@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
        struct lock_list *entry;
        int ret;
 
+       if (lockdep_dependency_visit(source, depth))
+               return 1;
+
        if (!__raw_spin_is_locked(&lockdep_lock))
                return DEBUG_LOCKS_WARN_ON(1);
 
@@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
                return 2;
        }
 
+       if (!source && debug_locks_off_graph_unlock()) {
+               WARN_ON(1);
+               return 0;
+       }
+
        /*
         * Check this lock's dependency list:
         */
@@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr,
        printk("\nand this task is already holding:\n");
        print_lock(prev);
        printk("which would create a new lock dependency:\n");
-       print_lock_name(prev->class);
+       print_lock_name(hlock_class(prev));
        printk(" ->");
-       print_lock_name(next->class);
+       print_lock_name(hlock_class(next));
        printk("\n");
 
        printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
@@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 
        find_usage_bit = bit_backwards;
        /* fills in <backwards_match> */
-       ret = find_usage_backwards(prev->class, 0);
+       ret = find_usage_backwards(hlock_class(prev), 0);
        if (!ret || ret == 1)
                return ret;
 
        find_usage_bit = bit_forwards;
-       ret = find_usage_forwards(next->class, 0);
+       ret = find_usage_forwards(hlock_class(next), 0);
        if (!ret || ret == 1)
                return ret;
        /* ret == 2 */
@@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
               struct lockdep_map *next_instance, int read)
 {
        struct held_lock *prev;
+       struct held_lock *nest = NULL;
        int i;
 
        for (i = 0; i < curr->lockdep_depth; i++) {
                prev = curr->held_locks + i;
-               if (prev->class != next->class)
+
+               if (prev->instance == next->nest_lock)
+                       nest = prev;
+
+               if (hlock_class(prev) != hlock_class(next))
                        continue;
+
                /*
                 * Allow read-after-read recursion of the same
                 * lock class (i.e. read_lock(lock)+read_lock(lock)):
                 */
                if ((read == 2) && prev->read)
                        return 2;
+
+               /*
+                * We're holding the nest_lock, which serializes this lock's
+                * nesting behaviour.
+                */
+               if (nest)
+                       return 2;
+
                return print_deadlock_bug(curr, prev, next);
        }
        return 1;
@@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         */
        check_source = next;
        check_target = prev;
-       if (!(check_noncircular(next->class, 0)))
+       if (!(check_noncircular(hlock_class(next), 0)))
                return print_circular_bug_tail();
 
        if (!check_prev_add_irq(curr, prev, next))
@@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         *  chains - the second one will be new, but L1 already has
         *  L2 added to its dependency list, due to the first chain.)
         */
-       list_for_each_entry(entry, &prev->class->locks_after, entry) {
-               if (entry->class == next->class) {
+       list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
+               if (entry->class == hlock_class(next)) {
                        if (distance == 1)
                                entry->distance = 1;
                        return 2;
@@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         * Ok, all validations passed, add the new lock
         * to the previous lock's dependency list:
         */
-       ret = add_lock_to_list(prev->class, next->class,
-                              &prev->class->locks_after, next->acquire_ip, distance);
+       ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
+                              &hlock_class(prev)->locks_after,
+                              next->acquire_ip, distance);
 
        if (!ret)
                return 0;
 
-       ret = add_lock_to_list(next->class, prev->class,
-                              &next->class->locks_before, next->acquire_ip, distance);
+       ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
+                              &hlock_class(next)->locks_before,
+                              next->acquire_ip, distance);
        if (!ret)
                return 0;
 
        /*
         * Debugging printouts:
         */
-       if (verbose(prev->class) || verbose(next->class)) {
+       if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
                graph_unlock();
                printk("\n new dependency: ");
-               print_lock_name(prev->class);
+               print_lock_name(hlock_class(prev));
                printk(" => ");
-               print_lock_name(next->class);
+               print_lock_name(hlock_class(next));
                printk("\n");
                dump_stack();
                return graph_lock();
@@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
                                     struct held_lock *hlock,
                                     u64 chain_key)
 {
-       struct lock_class *class = hlock->class;
+       struct lock_class *class = hlock_class(hlock);
        struct list_head *hash_head = chainhashentry(chain_key);
        struct lock_chain *chain;
        struct held_lock *hlock_curr, *hlock_next;
@@ -1554,7 +1670,7 @@ cache_hit:
        if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
                chain->base = cn;
                for (j = 0; j < chain->depth - 1; j++, i++) {
-                       int lock_id = curr->held_locks[i].class - lock_classes;
+                       int lock_id = curr->held_locks[i].class_idx - 1;
                        chain_hlocks[chain->base + j] = lock_id;
                }
                chain_hlocks[chain->base + j] = class - lock_classes;
@@ -1650,7 +1766,7 @@ static void check_chain_key(struct task_struct *curr)
                        WARN_ON(1);
                        return;
                }
-               id = hlock->class - lock_classes;
+               id = hlock->class_idx - 1;
                if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
                        return;
 
@@ -1695,7 +1811,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
        print_lock(this);
 
        printk("{%s} state was registered at:\n", usage_str[prev_bit]);
-       print_stack_trace(this->class->usage_traces + prev_bit, 1);
+       print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
 
        print_irqtrace_events(curr);
        printk("\nother info that might help us debug this:\n");
@@ -1714,7 +1830,7 @@ static inline int
 valid_state(struct task_struct *curr, struct held_lock *this,
            enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
 {
-       if (unlikely(this->class->usage_mask & (1 << bad_bit)))
+       if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
                return print_usage_bug(curr, this, bad_bit, new_bit);
        return 1;
 }
@@ -1753,7 +1869,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
        lockdep_print_held_locks(curr);
 
        printk("\nthe first lock's dependencies:\n");
-       print_lock_dependencies(this->class, 0);
+       print_lock_dependencies(hlock_class(this), 0);
 
        printk("\nthe second lock's dependencies:\n");
        print_lock_dependencies(other, 0);
@@ -1776,7 +1892,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 
        find_usage_bit = bit;
        /* fills in <forwards_match> */
-       ret = find_usage_forwards(this->class, 0);
+       ret = find_usage_forwards(hlock_class(this), 0);
        if (!ret || ret == 1)
                return ret;
 
@@ -1795,7 +1911,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 
        find_usage_bit = bit;
        /* fills in <backwards_match> */
-       ret = find_usage_backwards(this->class, 0);
+       ret = find_usage_backwards(hlock_class(this), 0);
        if (!ret || ret == 1)
                return ret;
 
@@ -1861,7 +1977,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                                LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
                        return 0;
 #endif
-               if (hardirq_verbose(this->class))
+               if (hardirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
        case LOCK_USED_IN_SOFTIRQ:
@@ -1886,7 +2002,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                                LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
                        return 0;
 #endif
-               if (softirq_verbose(this->class))
+               if (softirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
        case LOCK_USED_IN_HARDIRQ_READ:
@@ -1899,7 +2015,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                if (!check_usage_forwards(curr, this,
                                          LOCK_ENABLED_HARDIRQS, "hard"))
                        return 0;
-               if (hardirq_verbose(this->class))
+               if (hardirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
        case LOCK_USED_IN_SOFTIRQ_READ:
@@ -1912,7 +2028,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                if (!check_usage_forwards(curr, this,
                                          LOCK_ENABLED_SOFTIRQS, "soft"))
                        return 0;
-               if (softirq_verbose(this->class))
+               if (softirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
        case LOCK_ENABLED_HARDIRQS:
@@ -1938,7 +2054,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                                   LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
                        return 0;
 #endif
-               if (hardirq_verbose(this->class))
+               if (hardirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
        case LOCK_ENABLED_SOFTIRQS:
@@ -1964,7 +2080,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                                   LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
                        return 0;
 #endif
-               if (softirq_verbose(this->class))
+               if (softirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
        case LOCK_ENABLED_HARDIRQS_READ:
@@ -1979,7 +2095,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                                           LOCK_USED_IN_HARDIRQ, "hard"))
                        return 0;
 #endif
-               if (hardirq_verbose(this->class))
+               if (hardirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
        case LOCK_ENABLED_SOFTIRQS_READ:
@@ -1994,7 +2110,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                                           LOCK_USED_IN_SOFTIRQ, "soft"))
                        return 0;
 #endif
-               if (softirq_verbose(this->class))
+               if (softirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
        default:
@@ -2310,7 +2426,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
         * If already set then do not dirty the cacheline,
         * nor do any checks:
         */
-       if (likely(this->class->usage_mask & new_mask))
+       if (likely(hlock_class(this)->usage_mask & new_mask))
                return 1;
 
        if (!graph_lock())
@@ -2318,14 +2434,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
        /*
         * Make sure we didnt race:
         */
-       if (unlikely(this->class->usage_mask & new_mask)) {
+       if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
                graph_unlock();
                return 1;
        }
 
-       this->class->usage_mask |= new_mask;
+       hlock_class(this)->usage_mask |= new_mask;
 
-       if (!save_trace(this->class->usage_traces + new_bit))
+       if (!save_trace(hlock_class(this)->usage_traces + new_bit))
                return 0;
 
        switch (new_bit) {
@@ -2405,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
  */
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          int trylock, int read, int check, int hardirqs_off,
-                         unsigned long ip)
+                         struct lockdep_map *nest_lock, unsigned long ip)
 {
        struct task_struct *curr = current;
        struct lock_class *class = NULL;
@@ -2459,10 +2575,12 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                return 0;
 
        hlock = curr->held_locks + depth;
-
-       hlock->class = class;
+       if (DEBUG_LOCKS_WARN_ON(!class))
+               return 0;
+       hlock->class_idx = class - lock_classes + 1;
        hlock->acquire_ip = ip;
        hlock->instance = lock;
+       hlock->nest_lock = nest_lock;
        hlock->trylock = trylock;
        hlock->read = read;
        hlock->check = check;
@@ -2574,6 +2692,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
        return 1;
 }
 
+static int
+__lock_set_subclass(struct lockdep_map *lock,
+                   unsigned int subclass, unsigned long ip)
+{
+       struct task_struct *curr = current;
+       struct held_lock *hlock, *prev_hlock;
+       struct lock_class *class;
+       unsigned int depth;
+       int i;
+
+       depth = curr->lockdep_depth;
+       if (DEBUG_LOCKS_WARN_ON(!depth))
+               return 0;
+
+       prev_hlock = NULL;
+       for (i = depth-1; i >= 0; i--) {
+               hlock = curr->held_locks + i;
+               /*
+                * We must not cross into another context:
+                */
+               if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+                       break;
+               if (hlock->instance == lock)
+                       goto found_it;
+               prev_hlock = hlock;
+       }
+       return print_unlock_inbalance_bug(curr, lock, ip);
+
+found_it:
+       class = register_lock_class(lock, subclass, 0);
+       hlock->class_idx = class - lock_classes + 1;
+
+       curr->lockdep_depth = i;
+       curr->curr_chain_key = hlock->prev_chain_key;
+
+       for (; i < depth; i++) {
+               hlock = curr->held_locks + i;
+               if (!__lock_acquire(hlock->instance,
+                       hlock_class(hlock)->subclass, hlock->trylock,
+                               hlock->read, hlock->check, hlock->hardirqs_off,
+                               hlock->nest_lock, hlock->acquire_ip))
+                       return 0;
+       }
+
+       if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+               return 0;
+       return 1;
+}
+
 /*
  * Remove the lock to the list of currently held locks in a
  * potentially non-nested (out of order) manner. This is a
@@ -2624,9 +2791,9 @@ found_it:
        for (i++; i < depth; i++) {
                hlock = curr->held_locks + i;
                if (!__lock_acquire(hlock->instance,
-                       hlock->class->subclass, hlock->trylock,
+                       hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
-                               hlock->acquire_ip))
+                               hlock->nest_lock, hlock->acquire_ip))
                        return 0;
        }
 
@@ -2669,7 +2836,7 @@ static int lock_release_nested(struct task_struct *curr,
 
 #ifdef CONFIG_DEBUG_LOCKDEP
        hlock->prev_chain_key = 0;
-       hlock->class = NULL;
+       hlock->class_idx = 0;
        hlock->acquire_ip = 0;
        hlock->irq_context = 0;
 #endif
@@ -2738,18 +2905,36 @@ static void check_flags(unsigned long flags)
 #endif
 }
 
+void
+lock_set_subclass(struct lockdep_map *lock,
+                 unsigned int subclass, unsigned long ip)
+{
+       unsigned long flags;
+
+       if (unlikely(current->lockdep_recursion))
+               return;
+
+       raw_local_irq_save(flags);
+       current->lockdep_recursion = 1;
+       check_flags(flags);
+       if (__lock_set_subclass(lock, subclass, ip))
+               check_chain_key(current);
+       current->lockdep_recursion = 0;
+       raw_local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL_GPL(lock_set_subclass);
+
 /*
  * We are not always called with irqs disabled - do that here,
  * and also avoid lockdep recursion:
  */
 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
-                         int trylock, int read, int check, unsigned long ip)
+                         int trylock, int read, int check,
+                         struct lockdep_map *nest_lock, unsigned long ip)
 {
        unsigned long flags;
 
-       if (unlikely(!lock_stat && !prove_locking))
-               return;
-
        if (unlikely(current->lockdep_recursion))
                return;
 
@@ -2758,7 +2943,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
        current->lockdep_recursion = 1;
        __lock_acquire(lock, subclass, trylock, read, check,
-                      irqs_disabled_flags(flags), ip);
+                      irqs_disabled_flags(flags), nest_lock, ip);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
@@ -2770,9 +2955,6 @@ void lock_release(struct lockdep_map *lock, int nested,
 {
        unsigned long flags;
 
-       if (unlikely(!lock_stat && !prove_locking))
-               return;
-
        if (unlikely(current->lockdep_recursion))
                return;
 
@@ -2845,9 +3027,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
 found_it:
        hlock->waittime_stamp = sched_clock();
 
-       point = lock_contention_point(hlock->class, ip);
+       point = lock_contention_point(hlock_class(hlock), ip);
 
-       stats = get_lock_stats(hlock->class);
+       stats = get_lock_stats(hlock_class(hlock));
        if (point < ARRAY_SIZE(stats->contention_point))
                stats->contention_point[i]++;
        if (lock->cpu != smp_processor_id())
@@ -2893,7 +3075,7 @@ found_it:
                hlock->holdtime_stamp = now;
        }
 
-       stats = get_lock_stats(hlock->class);
+       stats = get_lock_stats(hlock_class(hlock));
        if (waittime) {
                if (hlock->read)
                        lock_time_inc(&stats->read_waittime, waittime);
@@ -2988,6 +3170,7 @@ static void zap_class(struct lock_class *class)
        list_del_rcu(&class->hash_entry);
        list_del_rcu(&class->lock_entry);
 
+       class->key = NULL;
 }
 
 static inline int within(const void *addr, void *start, unsigned long size)
index c3600a091a2874952e97c149f21080ede90ded95..55db193d366dcd8489290883f411c4938f45df0e 100644 (file)
@@ -17,9 +17,6 @@
  */
 #define MAX_LOCKDEP_ENTRIES    8192UL
 
-#define MAX_LOCKDEP_KEYS_BITS  11
-#define MAX_LOCKDEP_KEYS       (1UL << MAX_LOCKDEP_KEYS_BITS)
-
 #define MAX_LOCKDEP_CHAINS_BITS        14
 #define MAX_LOCKDEP_CHAINS     (1UL << MAX_LOCKDEP_CHAINS_BITS)
 
@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains;
 extern unsigned int max_lockdep_depth;
 extern unsigned int max_recursion_depth;
 
+extern unsigned long lockdep_count_forward_deps(struct lock_class *);
+extern unsigned long lockdep_count_backward_deps(struct lock_class *);
+
 #ifdef CONFIG_DEBUG_LOCKDEP
 /*
  * Various lockdep statistics:
index 9b0e940e2545efe99012dd1526ef79a4739f02d2..fa19aee604c28233822701ac5f86bcb288ffe239 100644 (file)
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v)
 {
 }
 
-static unsigned long count_forward_deps(struct lock_class *class)
-{
-       struct lock_list *entry;
-       unsigned long ret = 1;
-
-       /*
-        * Recurse this class's dependency list:
-        */
-       list_for_each_entry(entry, &class->locks_after, entry)
-               ret += count_forward_deps(entry->class);
-
-       return ret;
-}
-
-static unsigned long count_backward_deps(struct lock_class *class)
-{
-       struct lock_list *entry;
-       unsigned long ret = 1;
-
-       /*
-        * Recurse this class's dependency list:
-        */
-       list_for_each_entry(entry, &class->locks_before, entry)
-               ret += count_backward_deps(entry->class);
-
-       return ret;
-}
-
 static void print_name(struct seq_file *m, struct lock_class *class)
 {
        char str[128];
@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v)
 #ifdef CONFIG_DEBUG_LOCKDEP
        seq_printf(m, " OPS:%8ld", class->ops);
 #endif
-       nr_forward_deps = count_forward_deps(class);
+       nr_forward_deps = lockdep_count_forward_deps(class);
        seq_printf(m, " FD:%5ld", nr_forward_deps);
 
-       nr_backward_deps = count_backward_deps(class);
+       nr_backward_deps = lockdep_count_backward_deps(class);
        seq_printf(m, " BD:%5ld", nr_backward_deps);
 
        get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v)
 
        for (i = 0; i < chain->depth; i++) {
                class = lock_chain_get_class(chain, i);
+               if (!class->key)
+                       continue;
+
                seq_printf(m, "[%p] ", class->key);
                print_name(m, class);
                seq_puts(m, "\n");
@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
                if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
                        nr_hardirq_read_unsafe++;
 
-               sum_forward_deps += count_forward_deps(class);
+               sum_forward_deps += lockdep_count_forward_deps(class);
        }
 #ifdef CONFIG_DEBUG_LOCKDEP
        DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
index 9a21681aa80f82f0123917725f341a526d484c89..e36d5798cbff427fca02fd8c9a8fb6f615dbd3fe 100644 (file)
@@ -289,21 +289,29 @@ void do_schedule_next_timer(struct siginfo *info)
                else
                        schedule_next_timer(timr);
 
-               info->si_overrun = timr->it_overrun_last;
+               info->si_overrun += timr->it_overrun_last;
        }
 
        if (timr)
                unlock_timer(timr, flags);
 }
 
-int posix_timer_event(struct k_itimer *timr,int si_private)
+int posix_timer_event(struct k_itimer *timr, int si_private)
 {
-       memset(&timr->sigq->info, 0, sizeof(siginfo_t));
+       /*
+        * FIXME: if ->sigq is queued we can race with
+        * dequeue_signal()->do_schedule_next_timer().
+        *
+        * If dequeue_signal() sees the "right" value of
+        * si_sys_private it calls do_schedule_next_timer().
+        * We re-queue ->sigq and drop ->it_lock().
+        * do_schedule_next_timer() locks the timer
+        * and re-schedules it while ->sigq is pending.
+        * Not really bad, but not that we want.
+        */
        timr->sigq->info.si_sys_private = si_private;
-       /* Send signal to the process that owns this timer.*/
 
        timr->sigq->info.si_signo = timr->it_sigev_signo;
-       timr->sigq->info.si_errno = 0;
        timr->sigq->info.si_code = SI_TIMER;
        timr->sigq->info.si_tid = timr->it_id;
        timr->sigq->info.si_value = timr->it_sigev_value;
@@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void)
                kmem_cache_free(posix_timers_cache, tmr);
                tmr = NULL;
        }
+       memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
        return tmr;
 }
 
index 04160d277e7aeafe5b34e58da1bcb5a786b1696d..d601fb0406caefd39cb4eaf2dca25105f2ce3f0c 100644 (file)
@@ -600,7 +600,6 @@ struct rq {
        /* BKL stats */
        unsigned int bkl_count;
 #endif
-       struct lock_class_key rq_lock_key;
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -834,7 +833,7 @@ static inline u64 global_rt_period(void)
 
 static inline u64 global_rt_runtime(void)
 {
-       if (sysctl_sched_rt_period < 0)
+       if (sysctl_sched_rt_runtime < 0)
                return RUNTIME_INF;
 
        return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
        } else {
                if (rq1 < rq2) {
                        spin_lock(&rq1->lock);
-                       spin_lock(&rq2->lock);
+                       spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
                } else {
                        spin_lock(&rq2->lock);
-                       spin_lock(&rq1->lock);
+                       spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
                }
        }
        update_rq_clock(rq1);
@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
                if (busiest < this_rq) {
                        spin_unlock(&this_rq->lock);
                        spin_lock(&busiest->lock);
-                       spin_lock(&this_rq->lock);
+                       spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
                        ret = 1;
                } else
-                       spin_lock(&busiest->lock);
+                       spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
        }
        return ret;
 }
 
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
+       __releases(busiest->lock)
+{
+       spin_unlock(&busiest->lock);
+       lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+}
+
 /*
  * If dest_cpu is allowed for this process, migrate the task to it.
  * This is accomplished by forcing the cpu_allowed mask to only
@@ -3637,7 +3643,7 @@ redo:
                ld_moved = move_tasks(this_rq, this_cpu, busiest,
                                        imbalance, sd, CPU_NEWLY_IDLE,
                                        &all_pinned);
-               spin_unlock(&busiest->lock);
+               double_unlock_balance(this_rq, busiest);
 
                if (unlikely(all_pinned)) {
                        cpu_clear(cpu_of(busiest), *cpus);
@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
                else
                        schedstat_inc(sd, alb_failed);
        }
-       spin_unlock(&target_rq->lock);
+       double_unlock_balance(busiest_rq, target_rq);
 }
 
 #ifdef CONFIG_NO_HZ
@@ -8000,7 +8006,6 @@ void __init sched_init(void)
 
                rq = cpu_rq(i);
                spin_lock_init(&rq->lock);
-               lockdep_set_class(&rq->lock, &rq->rq_lock_key);
                rq->nr_running = 0;
                init_cfs_rq(&rq->cfs, rq);
                init_rt_rq(&rq->rt, rq);
index 22ed55d1167f3b4aa2f1c820ab78f97842f2df1b..204991a0bfa7d707e6da0e5c27b593b81b42ddb8 100644 (file)
 #include <linux/ktime.h>
 #include <linux/module.h>
 
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ * This is default implementation.
+ * Architectures and sub-architectures can override this.
+ */
+unsigned long long __attribute__((weak)) sched_clock(void)
+{
+       return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+}
 
-#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static __read_mostly int sched_clock_running;
 
-#define MULTI_SHIFT 15
-/* Max is double, Min is 1/2 */
-#define MAX_MULTI (2LL << MULTI_SHIFT)
-#define MIN_MULTI (1LL << (MULTI_SHIFT-1))
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 
 struct sched_clock_data {
        /*
@@ -49,14 +55,9 @@ struct sched_clock_data {
        raw_spinlock_t          lock;
 
        unsigned long           tick_jiffies;
-       u64                     prev_raw;
        u64                     tick_raw;
        u64                     tick_gtod;
        u64                     clock;
-       s64                     multi;
-#ifdef CONFIG_NO_HZ
-       int                     check_max;
-#endif
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
@@ -71,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
        return &per_cpu(sched_clock_data, cpu);
 }
 
-static __read_mostly int sched_clock_running;
-
 void sched_clock_init(void)
 {
        u64 ktime_now = ktime_to_ns(ktime_get());
@@ -84,90 +83,39 @@ void sched_clock_init(void)
 
                scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
                scd->tick_jiffies = now_jiffies;
-               scd->prev_raw = 0;
                scd->tick_raw = 0;
                scd->tick_gtod = ktime_now;
                scd->clock = ktime_now;
-               scd->multi = 1 << MULTI_SHIFT;
-#ifdef CONFIG_NO_HZ
-               scd->check_max = 1;
-#endif
        }
 
        sched_clock_running = 1;
 }
 
-#ifdef CONFIG_NO_HZ
-/*
- * The dynamic ticks makes the delta jiffies inaccurate. This
- * prevents us from checking the maximum time update.
- * Disable the maximum check during stopped ticks.
- */
-void sched_clock_tick_stop(int cpu)
-{
-       struct sched_clock_data *scd = cpu_sdc(cpu);
-
-       scd->check_max = 0;
-}
-
-void sched_clock_tick_start(int cpu)
-{
-       struct sched_clock_data *scd = cpu_sdc(cpu);
-
-       scd->check_max = 1;
-}
-
-static int check_max(struct sched_clock_data *scd)
-{
-       return scd->check_max;
-}
-#else
-static int check_max(struct sched_clock_data *scd)
-{
-       return 1;
-}
-#endif /* CONFIG_NO_HZ */
-
 /*
  * update the percpu scd from the raw @now value
  *
  *  - filter out backward motion
  *  - use jiffies to generate a min,max window to clip the raw values
  */
-static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time)
+static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
 {
        unsigned long now_jiffies = jiffies;
        long delta_jiffies = now_jiffies - scd->tick_jiffies;
        u64 clock = scd->clock;
        u64 min_clock, max_clock;
-       s64 delta = now - scd->prev_raw;
+       s64 delta = now - scd->tick_raw;
 
        WARN_ON_ONCE(!irqs_disabled());
-
-       /*
-        * At schedule tick the clock can be just under the gtod. We don't
-        * want to push it too prematurely.
-        */
-       min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC);
-       if (min_clock > TICK_NSEC)
-               min_clock -= TICK_NSEC / 2;
+       min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
 
        if (unlikely(delta < 0)) {
                clock++;
                goto out;
        }
 
-       /*
-        * The clock must stay within a jiffie of the gtod.
-        * But since we may be at the start of a jiffy or the end of one
-        * we add another jiffy buffer.
-        */
-       max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC;
-
-       delta *= scd->multi;
-       delta >>= MULTI_SHIFT;
+       max_clock = min_clock + TICK_NSEC;
 
-       if (unlikely(clock + delta > max_clock) && check_max(scd)) {
+       if (unlikely(clock + delta > max_clock)) {
                if (clock < max_clock)
                        clock = max_clock;
                else
@@ -180,12 +128,10 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim
        if (unlikely(clock < min_clock))
                clock = min_clock;
 
-       if (time)
-               *time = clock;
-       else {
-               scd->prev_raw = now;
-               scd->clock = clock;
-       }
+       scd->tick_jiffies = now_jiffies;
+       scd->clock = clock;
+
+       return clock;
 }
 
 static void lock_double_clock(struct sched_clock_data *data1,
@@ -203,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1,
 u64 sched_clock_cpu(int cpu)
 {
        struct sched_clock_data *scd = cpu_sdc(cpu);
-       u64 now, clock;
+       u64 now, clock, this_clock, remote_clock;
 
        if (unlikely(!sched_clock_running))
                return 0ull;
@@ -212,43 +158,44 @@ u64 sched_clock_cpu(int cpu)
        now = sched_clock();
 
        if (cpu != raw_smp_processor_id()) {
-               /*
-                * in order to update a remote cpu's clock based on our
-                * unstable raw time rebase it against:
-                *   tick_raw           (offset between raw counters)
-                *   tick_gotd          (tick offset between cpus)
-                */
                struct sched_clock_data *my_scd = this_scd();
 
                lock_double_clock(scd, my_scd);
 
-               now -= my_scd->tick_raw;
-               now += scd->tick_raw;
+               this_clock = __update_sched_clock(my_scd, now);
+               remote_clock = scd->clock;
 
-               now += my_scd->tick_gtod;
-               now -= scd->tick_gtod;
+               /*
+                * Use the opportunity that we have both locks
+                * taken to couple the two clocks: we take the
+                * larger time as the latest time for both
+                * runqueues. (this creates monotonic movement)
+                */
+               if (likely(remote_clock < this_clock)) {
+                       clock = this_clock;
+                       scd->clock = clock;
+               } else {
+                       /*
+                        * Should be rare, but possible:
+                        */
+                       clock = remote_clock;
+                       my_scd->clock = remote_clock;
+               }
 
                __raw_spin_unlock(&my_scd->lock);
-
-               __update_sched_clock(scd, now, &clock);
-
-               __raw_spin_unlock(&scd->lock);
-
        } else {
                __raw_spin_lock(&scd->lock);
-               __update_sched_clock(scd, now, NULL);
-               clock = scd->clock;
-               __raw_spin_unlock(&scd->lock);
+               clock = __update_sched_clock(scd, now);
        }
 
+       __raw_spin_unlock(&scd->lock);
+
        return clock;
 }
 
 void sched_clock_tick(void)
 {
        struct sched_clock_data *scd = this_scd();
-       unsigned long now_jiffies = jiffies;
-       s64 mult, delta_gtod, delta_raw;
        u64 now, now_gtod;
 
        if (unlikely(!sched_clock_running))
@@ -260,29 +207,14 @@ void sched_clock_tick(void)
        now = sched_clock();
 
        __raw_spin_lock(&scd->lock);
-       __update_sched_clock(scd, now, NULL);
+       __update_sched_clock(scd, now);
        /*
         * update tick_gtod after __update_sched_clock() because that will
         * already observe 1 new jiffy; adding a new tick_gtod to that would
         * increase the clock 2 jiffies.
         */
-       delta_gtod = now_gtod - scd->tick_gtod;
-       delta_raw = now - scd->tick_raw;
-
-       if ((long)delta_raw > 0) {
-               mult = delta_gtod << MULTI_SHIFT;
-               do_div(mult, delta_raw);
-               scd->multi = mult;
-               if (scd->multi > MAX_MULTI)
-                       scd->multi = MAX_MULTI;
-               else if (scd->multi < MIN_MULTI)
-                       scd->multi = MIN_MULTI;
-       } else
-               scd->multi = 1 << MULTI_SHIFT;
-
        scd->tick_raw = now;
        scd->tick_gtod = now_gtod;
-       scd->tick_jiffies = now_jiffies;
        __raw_spin_unlock(&scd->lock);
 }
 
@@ -301,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
 void sched_clock_idle_wakeup_event(u64 delta_ns)
 {
        struct sched_clock_data *scd = this_scd();
-       u64 now = sched_clock();
 
        /*
         * Override the previous timestamp and ignore all
@@ -310,27 +241,30 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
         * rq clock:
         */
        __raw_spin_lock(&scd->lock);
-       scd->prev_raw = now;
        scd->clock += delta_ns;
-       scd->multi = 1 << MULTI_SHIFT;
        __raw_spin_unlock(&scd->lock);
 
        touch_softlockup_watchdog();
 }
 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
 
-#endif
+#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
-/*
- * Scheduler clock - returns current time in nanosec units.
- * This is default implementation.
- * Architectures and sub-architectures can override this.
- */
-unsigned long long __attribute__((weak)) sched_clock(void)
+void sched_clock_init(void)
 {
-       return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+       sched_clock_running = 1;
 }
 
+u64 sched_clock_cpu(int cpu)
+{
+       if (unlikely(!sched_clock_running))
+               return 0;
+
+       return sched_clock();
+}
+
+#endif
+
 unsigned long long cpu_clock(int cpu)
 {
        unsigned long long clock;
index cf2cd6ce4cb25ad2bedc59b94205b33b24f8a9e9..fb8994c6d4bb4bbe90a71f89341baee3cc6e9806 100644 (file)
@@ -899,7 +899,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
                 * doesn't make sense. Rely on vruntime for fairness.
                 */
                if (rq->curr != p)
-                       delta = max(10000LL, delta);
+                       delta = max_t(s64, 10000LL, delta);
 
                hrtick_start(rq, delta);
        }
@@ -1442,18 +1442,23 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
        struct task_struct *p = NULL;
        struct sched_entity *se;
 
-       while (next != &cfs_rq->tasks) {
+       if (next == &cfs_rq->tasks)
+               return NULL;
+
+       /* Skip over entities that are not tasks */
+       do {
                se = list_entry(next, struct sched_entity, group_node);
                next = next->next;
+       } while (next != &cfs_rq->tasks && !entity_is_task(se));
 
-               /* Skip over entities that are not tasks */
-               if (entity_is_task(se)) {
-                       p = task_of(se);
-                       break;
-               }
-       }
+       if (next == &cfs_rq->tasks)
+               return NULL;
 
        cfs_rq->balance_iterator = next;
+
+       if (entity_is_task(se))
+               p = task_of(se);
+
        return p;
 }
 
index 908c04f9dad02d23df66fbc55a28f5619fc10081..6163e4cf885b90075fd2734427fac4f3f24eae1f 100644 (file)
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 #define RT_MAX_TRIES 3
 
 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
+
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
 
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        break;
 
                /* try again */
-               spin_unlock(&lowest_rq->lock);
+               double_unlock_balance(rq, lowest_rq);
                lowest_rq = NULL;
        }
 
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
 
        resched_task(lowest_rq->curr);
 
-       spin_unlock(&lowest_rq->lock);
+       double_unlock_balance(rq, lowest_rq);
 
        ret = 1;
 out:
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
 
                }
  skip:
-               spin_unlock(&src_rq->lock);
+               double_unlock_balance(this_rq, src_rq);
        }
 
        return ret;
index 954f77d7e3bc2368f405fa2ab83aaf58d6280fb3..c539f60c6f41bc2b9e170b93bf66a8bca5e93ce3 100644 (file)
@@ -1304,6 +1304,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
                q->info.si_overrun++;
                goto out;
        }
+       q->info.si_overrun = 0;
 
        signalfd_notify(t, sig);
        pending = group ? &t->signal->shared_pending : &t->pending;
index 96fc7c0edc59d1f09ca56a500d90b0a8a212c7d7..e6084f6efb4d70210889c9dbb3e6a02931acc784 100644 (file)
@@ -260,6 +260,41 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
        generic_exec_single(cpu, data);
 }
 
+/* Dummy function */
+static void quiesce_dummy(void *unused)
+{
+}
+
+/*
+ * Ensure stack based data used in call function mask is safe to free.
+ *
+ * This is needed by smp_call_function_mask when using on-stack data, because
+ * a single call function queue is shared by all CPUs, and any CPU may pick up
+ * the data item on the queue at any time before it is deleted. So we need to
+ * ensure that all CPUs have transitioned through a quiescent state after
+ * this call.
+ *
+ * This is a very slow function, implemented by sending synchronous IPIs to
+ * all possible CPUs. For this reason, we have to alloc data rather than use
+ * stack based data even in the case of synchronous calls. The stack based
+ * data is then just used for deadlock/oom fallback which will be very rare.
+ *
+ * If a faster scheme can be made, we could go back to preferring stack based
+ * data -- the data allocation/free is non-zero cost.
+ */
+static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
+{
+       struct call_single_data data;
+       int cpu;
+
+       data.func = quiesce_dummy;
+       data.info = NULL;
+       data.flags = CSD_FLAG_WAIT;
+
+       for_each_cpu_mask(cpu, mask)
+               generic_exec_single(cpu, &data);
+}
+
 /**
  * smp_call_function_mask(): Run a function on a set of other CPUs.
  * @mask: The set of cpus to run on.
@@ -285,6 +320,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
        cpumask_t allbutself;
        unsigned long flags;
        int cpu, num_cpus;
+       int slowpath = 0;
 
        /* Can deadlock when called with interrupts disabled */
        WARN_ON(irqs_disabled());
@@ -306,15 +342,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
                return smp_call_function_single(cpu, func, info, wait);
        }
 
-       if (!wait) {
-               data = kmalloc(sizeof(*data), GFP_ATOMIC);
-               if (data)
-                       data->csd.flags = CSD_FLAG_ALLOC;
-       }
-       if (!data) {
+       data = kmalloc(sizeof(*data), GFP_ATOMIC);
+       if (data) {
+               data->csd.flags = CSD_FLAG_ALLOC;
+               if (wait)
+                       data->csd.flags |= CSD_FLAG_WAIT;
+       } else {
                data = &d;
                data->csd.flags = CSD_FLAG_WAIT;
                wait = 1;
+               slowpath = 1;
        }
 
        spin_lock_init(&data->lock);
@@ -331,8 +368,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
        arch_send_call_function_ipi(mask);
 
        /* optionally wait for the CPUs to complete */
-       if (wait)
+       if (wait) {
                csd_flag_wait(&data->csd);
+               if (unlikely(slowpath))
+                       smp_call_function_mask_quiesce_stack(allbutself);
+       }
 
        return 0;
 }
index a1fb54c93cdd2381f23573748852a4a105e8ddd3..44baeea94ab906b06ff87d6bf936f8a19d71915d 100644 (file)
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
 }
 
 EXPORT_SYMBOL(_spin_lock_nested);
+
 unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
 {
        unsigned long flags;
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
 
 EXPORT_SYMBOL(_spin_lock_irqsave_nested);
 
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
+                                    struct lockdep_map *nest_lock)
+{
+       preempt_disable();
+       spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+       LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+EXPORT_SYMBOL(_spin_lock_nest_lock);
+
 #endif
 
 void __lockfunc _spin_unlock(spinlock_t *lock)
index 825b4c00fe4436a1921d4f57a6cb6d9e37510d3d..f5da526424a9b7c5dcaf965cbf00a49f09642eb0 100644 (file)
@@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle)
                        ts->tick_stopped = 1;
                        ts->idle_jiffies = last_jiffies;
                        rcu_enter_nohz();
-                       sched_clock_tick_stop(cpu);
                }
 
                /*
@@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void)
        select_nohz_load_balancer(0);
        now = ktime_get();
        tick_do_update_jiffies64(now);
-       sched_clock_tick_start(cpu);
        cpu_clear(cpu, nohz_cpu_mask);
 
        /*
index 4a26a1382df05febed4a94bce11be2bc2a46b648..4048e92aa04f21e7e105563fac7bf24e1be5411f 100644 (file)
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
 
                BUG_ON(get_wq_data(work) != cwq);
                work_clear_pending(work);
-               lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-               lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+               lock_map_acquire(&cwq->wq->lockdep_map);
+               lock_map_acquire(&lockdep_map);
                f(work);
-               lock_release(&lockdep_map, 1, _THIS_IP_);
-               lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+               lock_map_release(&lockdep_map);
+               lock_map_release(&cwq->wq->lockdep_map);
 
                if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
                        printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
        int cpu;
 
        might_sleep();
-       lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&wq->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&wq->lockdep_map);
+       lock_map_release(&wq->lockdep_map);
        for_each_cpu_mask_nr(cpu, *cpu_map)
                flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
 }
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
        if (!cwq)
                return 0;
 
-       lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&cwq->wq->lockdep_map);
+       lock_map_release(&cwq->wq->lockdep_map);
 
        prev = NULL;
        spin_lock_irq(&cwq->lock);
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
 
        might_sleep();
 
-       lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&work->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&work->lockdep_map);
+       lock_map_release(&work->lockdep_map);
 
        cwq = get_wq_data(work);
        if (!cwq)
@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
        if (cwq->thread == NULL)
                return;
 
-       lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&cwq->wq->lockdep_map);
+       lock_map_release(&cwq->wq->lockdep_map);
 
        flush_cpu_workqueue(cwq);
        /*
index 0ef01d14727c03c9861175e010e6f7c75c0d693f..0218b4693dd8c250f83c741bf6ffbfbbca31f94b 100644 (file)
@@ -8,6 +8,7 @@
  *
  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  */
+#include <linux/kernel.h>
 #include <linux/rwsem.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
@@ -37,6 +38,7 @@ int debug_locks_off(void)
 {
        if (xchg(&debug_locks, 0)) {
                if (!debug_locks_silent) {
+                       oops_in_progress = 1;
                        console_verbose();
                        return 1;
                }
index 971d0eda754a539d73d669fa8d91bd2459f10715..339cf5c4d5d8c3a82b04cf57fa6b973f5ad75408 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm,
 
 static DEFINE_MUTEX(mm_all_locks_mutex);
 
-static void vm_lock_anon_vma(struct anon_vma *anon_vma)
+static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
 {
        if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
                /*
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_lock(&anon_vma->lock);
+               spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
                 * anon_vma->lock. If some other vma in this mm shares
@@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct anon_vma *anon_vma)
        }
 }
 
-static void vm_lock_mapping(struct address_space *mapping)
+static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
 {
        if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
                /*
@@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct address_space *mapping)
                 */
                if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
                        BUG();
-               spin_lock(&mapping->i_mmap_lock);
+               spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
        }
 }
 
@@ -2358,11 +2358,17 @@ int mm_take_all_locks(struct mm_struct *mm)
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (signal_pending(current))
                        goto out_unlock;
-               if (vma->anon_vma)
-                       vm_lock_anon_vma(vma->anon_vma);
                if (vma->vm_file && vma->vm_file->f_mapping)
-                       vm_lock_mapping(vma->vm_file->f_mapping);
+                       vm_lock_mapping(mm, vma->vm_file->f_mapping);
+       }
+
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (signal_pending(current))
+                       goto out_unlock;
+               if (vma->anon_vma)
+                       vm_lock_anon_vma(mm, vma->anon_vma);
        }
+
        ret = 0;
 
 out_unlock: