From 55915a89c85409f000e6320ec242951c69c683e0 Mon Sep 17 00:00:00 2001 From: Maximilian Attems Date: Thu, 24 Sep 2009 18:32:14 +0000 Subject: [PATCH] add stable 2.6.31.1 should make it less suck.. svn path=/dists/trunk/linux-2.6/; revision=14288 --- debian/changelog | 1 + .../patches/bugfix/all/stable/2.6.31.1.patch | 2490 +++++++++++++++++ debian/patches/series/base | 1 + 3 files changed, 2492 insertions(+) create mode 100644 debian/patches/bugfix/all/stable/2.6.31.1.patch diff --git a/debian/changelog b/debian/changelog index 56e2a544a..585a01565 100644 --- a/debian/changelog +++ b/debian/changelog @@ -59,6 +59,7 @@ linux-2.6 (2.6.31-1~experimental.1) UNRELEASED; urgency=low * [ppc, sparc] Enable EFI_PARTITION. (closes: #540486) * Disable old USB_DEVICE_CLASS. (Closes: #510279) * Drop yaird initramfs generator support. + * Add stable release 2.6.31.1. [ Ben Hutchings ] * mga: remove unnecessary change from firmware-loading patch diff --git a/debian/patches/bugfix/all/stable/2.6.31.1.patch b/debian/patches/bugfix/all/stable/2.6.31.1.patch new file mode 100644 index 000000000..70618abfc --- /dev/null +++ b/debian/patches/bugfix/all/stable/2.6.31.1.patch @@ -0,0 +1,2490 @@ +diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c +index a34954d..73cae57 100644 +--- a/arch/arm/mm/highmem.c ++++ b/arch/arm/mm/highmem.c +@@ -40,11 +40,16 @@ void *kmap_atomic(struct page *page, enum km_type type) + { + unsigned int idx; + unsigned long vaddr; ++ void *kmap; + + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + ++ kmap = kmap_high_get(page); ++ if (kmap) ++ return kmap; ++ + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + #ifdef CONFIG_DEBUG_HIGHMEM +@@ -80,6 +85,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type) + #else + (void) idx; /* to kill a warning */ + #endif ++ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { ++ /* this address was obtained through kmap_high_get() */ ++ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + } + pagefault_enable(); + } +diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h +index 98c104a..edab67e 100644 +--- a/arch/powerpc/include/asm/mmu-hash64.h ++++ b/arch/powerpc/include/asm/mmu-hash64.h +@@ -41,6 +41,7 @@ extern char initial_stab[]; + + #define SLB_NUM_BOLTED 3 + #define SLB_CACHE_ENTRIES 8 ++#define SLB_MIN_SIZE 32 + + /* Bits in the SLB ESID word */ + #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ +@@ -296,6 +297,7 @@ extern void slb_flush_and_rebolt(void); + extern void stab_initialize(unsigned long stab); + + extern void slb_vmalloc_update(void); ++extern void slb_set_size(u16 size); + #endif /* __ASSEMBLY__ */ + + /* +diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h +index d6a616a..ccc68b5 100644 +--- a/arch/powerpc/include/asm/pmc.h ++++ b/arch/powerpc/include/asm/pmc.h +@@ -27,10 +27,22 @@ extern perf_irq_t perf_irq; + + int reserve_pmc_hardware(perf_irq_t new_perf_irq); + void release_pmc_hardware(void); ++void ppc_enable_pmcs(void); + + #ifdef CONFIG_PPC64 +-void power4_enable_pmcs(void); +-void pasemi_enable_pmcs(void); ++#include ++ ++static inline void ppc_set_pmu_inuse(int inuse) ++{ ++ get_lppaca()->pmcregs_in_use = inuse; ++} ++ ++extern void power4_enable_pmcs(void); ++ ++#else /* CONFIG_PPC64 */ ++ ++static inline void ppc_set_pmu_inuse(int inuse) { } ++ + #endif + + #endif /* __KERNEL__ */ +diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c +index 2419cc7..ed0ac4e 100644 +--- a/arch/powerpc/kernel/lparcfg.c ++++ b/arch/powerpc/kernel/lparcfg.c +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + + #define MODULE_VERS "1.8" + #define MODULE_NAME "lparcfg" +@@ -537,6 +538,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) + + seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc); + ++ seq_printf(m, "slb_size=%d\n", mmu_slb_size); ++ + return 0; + } + +diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c +index 70e1f57..7ceefaf 100644 +--- a/arch/powerpc/kernel/perf_counter.c ++++ b/arch/powerpc/kernel/perf_counter.c +@@ -32,6 +32,9 @@ struct cpu_hw_counters { + unsigned long mmcr[3]; + struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; + u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; ++ u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; ++ unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; ++ unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + }; + DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); + +@@ -62,7 +65,6 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) + { + return 0; + } +-static inline void perf_set_pmu_inuse(int inuse) { } + static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } + static inline u32 perf_get_misc_flags(struct pt_regs *regs) + { +@@ -93,11 +95,6 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) + return 0; + } + +-static inline void perf_set_pmu_inuse(int inuse) +-{ +- get_lppaca()->pmcregs_in_use = inuse; +-} +- + /* + * The user wants a data address recorded. + * If we're not doing instruction sampling, give them the SDAR +@@ -245,13 +242,11 @@ static void write_pmc(int idx, unsigned long val) + * and see if any combination of alternative codes is feasible. + * The feasible set is returned in event[]. + */ +-static int power_check_constraints(u64 event[], unsigned int cflags[], ++static int power_check_constraints(struct cpu_hw_counters *cpuhw, ++ u64 event[], unsigned int cflags[], + int n_ev) + { + unsigned long mask, value, nv; +- u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; +- unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; +- unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; + unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; + int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; + int i, j; +@@ -266,21 +261,23 @@ static int power_check_constraints(u64 event[], unsigned int cflags[], + if ((cflags[i] & PPMU_LIMITED_PMC_REQD) + && !ppmu->limited_pmc_event(event[i])) { + ppmu->get_alternatives(event[i], cflags[i], +- alternatives[i]); +- event[i] = alternatives[i][0]; ++ cpuhw->alternatives[i]); ++ event[i] = cpuhw->alternatives[i][0]; + } +- if (ppmu->get_constraint(event[i], &amasks[i][0], +- &avalues[i][0])) ++ if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0], ++ &cpuhw->avalues[i][0])) + return -1; + } + value = mask = 0; + for (i = 0; i < n_ev; ++i) { +- nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf); ++ nv = (value | cpuhw->avalues[i][0]) + ++ (value & cpuhw->avalues[i][0] & addf); + if ((((nv + tadd) ^ value) & mask) != 0 || +- (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0) ++ (((nv + tadd) ^ cpuhw->avalues[i][0]) & ++ cpuhw->amasks[i][0]) != 0) + break; + value = nv; +- mask |= amasks[i][0]; ++ mask |= cpuhw->amasks[i][0]; + } + if (i == n_ev) + return 0; /* all OK */ +@@ -291,10 +288,11 @@ static int power_check_constraints(u64 event[], unsigned int cflags[], + for (i = 0; i < n_ev; ++i) { + choice[i] = 0; + n_alt[i] = ppmu->get_alternatives(event[i], cflags[i], +- alternatives[i]); ++ cpuhw->alternatives[i]); + for (j = 1; j < n_alt[i]; ++j) +- ppmu->get_constraint(alternatives[i][j], +- &amasks[i][j], &avalues[i][j]); ++ ppmu->get_constraint(cpuhw->alternatives[i][j], ++ &cpuhw->amasks[i][j], ++ &cpuhw->avalues[i][j]); + } + + /* enumerate all possibilities and see if any will work */ +@@ -313,11 +311,11 @@ static int power_check_constraints(u64 event[], unsigned int cflags[], + * where k > j, will satisfy the constraints. + */ + while (++j < n_alt[i]) { +- nv = (value | avalues[i][j]) + +- (value & avalues[i][j] & addf); ++ nv = (value | cpuhw->avalues[i][j]) + ++ (value & cpuhw->avalues[i][j] & addf); + if ((((nv + tadd) ^ value) & mask) == 0 && +- (((nv + tadd) ^ avalues[i][j]) +- & amasks[i][j]) == 0) ++ (((nv + tadd) ^ cpuhw->avalues[i][j]) ++ & cpuhw->amasks[i][j]) == 0) + break; + } + if (j >= n_alt[i]) { +@@ -339,7 +337,7 @@ static int power_check_constraints(u64 event[], unsigned int cflags[], + svalues[i] = value; + smasks[i] = mask; + value = nv; +- mask |= amasks[i][j]; ++ mask |= cpuhw->amasks[i][j]; + ++i; + j = -1; + } +@@ -347,7 +345,7 @@ static int power_check_constraints(u64 event[], unsigned int cflags[], + + /* OK, we have a feasible combination, tell the caller the solution */ + for (i = 0; i < n_ev; ++i) +- event[i] = alternatives[i][choice[i]]; ++ event[i] = cpuhw->alternatives[i][choice[i]]; + return 0; + } + +@@ -531,8 +529,7 @@ void hw_perf_disable(void) + * Check if we ever enabled the PMU on this cpu. + */ + if (!cpuhw->pmcs_enabled) { +- if (ppc_md.enable_pmcs) +- ppc_md.enable_pmcs(); ++ ppc_enable_pmcs(); + cpuhw->pmcs_enabled = 1; + } + +@@ -594,7 +591,7 @@ void hw_perf_enable(void) + mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); + mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); + if (cpuhw->n_counters == 0) +- perf_set_pmu_inuse(0); ++ ppc_set_pmu_inuse(0); + goto out_enable; + } + +@@ -627,7 +624,7 @@ void hw_perf_enable(void) + * bit set and set the hardware counters to their initial values. + * Then unfreeze the counters. + */ +- perf_set_pmu_inuse(1); ++ ppc_set_pmu_inuse(1); + mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); + mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); + mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) +@@ -752,7 +749,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, + return -EAGAIN; + if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n)) + return -EAGAIN; +- i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0); ++ i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); + if (i < 0) + return -EAGAIN; + cpuhw->n_counters = n0 + n; +@@ -807,7 +804,7 @@ static int power_pmu_enable(struct perf_counter *counter) + cpuhw->flags[n0] = counter->hw.counter_base; + if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1)) + goto out; +- if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1)) ++ if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) + goto out; + + counter->hw.config = cpuhw->events[n0]; +@@ -1012,6 +1009,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) + unsigned int cflags[MAX_HWCOUNTERS]; + int n; + int err; ++ struct cpu_hw_counters *cpuhw; + + if (!ppmu) + return ERR_PTR(-ENXIO); +@@ -1090,7 +1088,11 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) + cflags[n] = flags; + if (check_excludes(ctrs, cflags, n, 1)) + return ERR_PTR(-EINVAL); +- if (power_check_constraints(events, cflags, n + 1)) ++ ++ cpuhw = &get_cpu_var(cpu_hw_counters); ++ err = power_check_constraints(cpuhw, events, cflags, n + 1); ++ put_cpu_var(cpu_hw_counters); ++ if (err) + return ERR_PTR(-EINVAL); + + counter->hw.config = events[n]; +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c +index c434823..bf90361 100644 +--- a/arch/powerpc/kernel/rtas.c ++++ b/arch/powerpc/kernel/rtas.c +@@ -39,6 +39,7 @@ + #include + #include + #include ++#include + + struct rtas_t rtas = { + .lock = __RAW_SPIN_LOCK_UNLOCKED +@@ -713,6 +714,7 @@ static void rtas_percpu_suspend_me(void *info) + { + long rc = H_SUCCESS; + unsigned long msr_save; ++ u16 slb_size = mmu_slb_size; + int cpu; + struct rtas_suspend_me_data *data = + (struct rtas_suspend_me_data *)info; +@@ -735,13 +737,16 @@ static void rtas_percpu_suspend_me(void *info) + /* All other cpus are in H_JOIN, this cpu does + * the suspend. + */ ++ slb_set_size(SLB_MIN_SIZE); + printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", + smp_processor_id()); + data->error = rtas_call(data->token, 0, 1, NULL); + +- if (data->error) ++ if (data->error) { + printk(KERN_DEBUG "ibm,suspend-me returned %d\n", + data->error); ++ slb_set_size(slb_size); ++ } + } else { + printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n", + smp_processor_id(), rc); +diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c +index f41aec8..956ab33 100644 +--- a/arch/powerpc/kernel/sysfs.c ++++ b/arch/powerpc/kernel/sysfs.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include "cacheinfo.h" + +@@ -123,6 +124,8 @@ static DEFINE_PER_CPU(char, pmcs_enabled); + + void ppc_enable_pmcs(void) + { ++ ppc_set_pmu_inuse(1); ++ + /* Only need to enable them once */ + if (__get_cpu_var(pmcs_enabled)) + return; +diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c +index 5b7038f..deb6193 100644 +--- a/arch/powerpc/mm/slb.c ++++ b/arch/powerpc/mm/slb.c +@@ -240,14 +240,22 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) + static inline void patch_slb_encoding(unsigned int *insn_addr, + unsigned int immed) + { +- /* Assume the instruction had a "0" immediate value, just +- * "or" in the new value +- */ +- *insn_addr |= immed; ++ *insn_addr = (*insn_addr & 0xffff0000) | immed; + flush_icache_range((unsigned long)insn_addr, 4+ + (unsigned long)insn_addr); + } + ++void slb_set_size(u16 size) ++{ ++ extern unsigned int *slb_compare_rr_to_size; ++ ++ if (mmu_slb_size == size) ++ return; ++ ++ mmu_slb_size = size; ++ patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size); ++} ++ + void slb_initialize(void) + { + unsigned long linear_llp, vmalloc_llp, io_llp; +diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c +index b6f1b13..2e2bbe1 100644 +--- a/arch/powerpc/platforms/pseries/reconfig.c ++++ b/arch/powerpc/platforms/pseries/reconfig.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + + +@@ -439,9 +440,15 @@ static int do_update_property(char *buf, size_t bufsize) + if (!newprop) + return -ENOMEM; + ++ if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size")) ++ slb_set_size(*(int *)value); ++ + oldprop = of_find_property(np, name,NULL); +- if (!oldprop) ++ if (!oldprop) { ++ if (strlen(name)) ++ return prom_add_property(np, newprop); + return -ENODEV; ++ } + + rc = prom_update_property(np, newprop, oldprop); + if (rc) +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c +index 8d75ea2..ca5f2e1 100644 +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -223,10 +223,6 @@ static void pseries_lpar_enable_pmcs(void) + set = 1UL << 63; + reset = 0; + plpar_hcall_norets(H_PERFMON, set, reset); +- +- /* instruct hypervisor to maintain PMCs */ +- if (firmware_has_feature(FW_FEATURE_SPLPAR)) +- get_lppaca()->pmcregs_in_use = 1; + } + + static void __init pseries_discover_pic(void) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index eabdc1c..68d16d2 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -618,6 +618,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); + void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); + void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, + u32 error_code); ++bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); + + int kvm_pic_set_irq(void *opaque, int irq, int level); + +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index c776826..e597ecc 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -403,7 +403,17 @@ extern unsigned long kernel_eflags; + extern asmlinkage void ignore_sysret(void); + #else /* X86_64 */ + #ifdef CONFIG_CC_STACKPROTECTOR +-DECLARE_PER_CPU(unsigned long, stack_canary); ++/* ++ * Make sure stack canary segment base is cached-aligned: ++ * "For Intel Atom processors, avoid non zero segment base address ++ * that is not aligned to cache line boundary at all cost." ++ * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) ++ */ ++struct stack_canary { ++ char __pad[20]; /* canary at %gs:20 */ ++ unsigned long canary; ++}; ++DECLARE_PER_CPU(struct stack_canary, stack_canary) ____cacheline_aligned; + #endif + #endif /* X86_64 */ + +diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h +index c2d742c..decad97 100644 +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h +@@ -78,14 +78,14 @@ static __always_inline void boot_init_stack_canary(void) + #ifdef CONFIG_X86_64 + percpu_write(irq_stack_union.stack_canary, canary); + #else +- percpu_write(stack_canary, canary); ++ percpu_write(stack_canary.canary, canary); + #endif + } + + static inline void setup_stack_canary_segment(int cpu) + { + #ifdef CONFIG_X86_32 +- unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20; ++ unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); + struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); + struct desc_struct desc; + +diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h +index 643c59b..5bd119b 100644 +--- a/arch/x86/include/asm/system.h ++++ b/arch/x86/include/asm/system.h +@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, + "movl %P[task_canary](%[next]), %%ebx\n\t" \ + "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" + #define __switch_canary_oparam \ +- , [stack_canary] "=m" (per_cpu_var(stack_canary)) ++ , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) + #define __switch_canary_iparam \ + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) + #else /* CC_STACKPROTECTOR */ +diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c +index 6c99f50..4607241 100644 +--- a/arch/x86/kernel/amd_iommu.c ++++ b/arch/x86/kernel/amd_iommu.c +@@ -485,8 +485,6 @@ void amd_iommu_flush_all_devices(void) + int i; + + for (i = 0; i <= amd_iommu_last_bdf; ++i) { +- if (amd_iommu_pd_table[i] == NULL) +- continue; + + iommu = amd_iommu_rlookup_table[i]; + if (!iommu) +diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c +index 8952a58..89174f8 100644 +--- a/arch/x86/kernel/apic/es7000_32.c ++++ b/arch/x86/kernel/apic/es7000_32.c +@@ -167,7 +167,7 @@ static int es7000_apic_is_cluster(void) + { + /* MPENTIUMIII */ + if (boot_cpu_data.x86 == 6 && +- (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) ++ (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11)) + return 1; + + return 0; +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 5ce60a8..e338b5c 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1043,7 +1043,7 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist); + #else /* CONFIG_X86_64 */ + + #ifdef CONFIG_CC_STACKPROTECTOR +-DEFINE_PER_CPU(unsigned long, stack_canary); ++DEFINE_PER_CPU(struct stack_canary, stack_canary) ____cacheline_aligned; + #endif + + /* Make sure %fs and %gs are initialized properly in idle threads */ +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S +index cc827ac..7ffec6b 100644 +--- a/arch/x86/kernel/head_32.S ++++ b/arch/x86/kernel/head_32.S +@@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP + jne 1f + movl $per_cpu__gdt_page,%eax + movl $per_cpu__stack_canary,%ecx +- subl $20, %ecx + movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) + shrl $16, %ecx + movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index c664d51..63b0ec8 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -34,7 +34,6 @@ + struct kvm_para_state { + u8 mmu_queue[MMU_QUEUE_SIZE]; + int mmu_queue_len; +- enum paravirt_lazy_mode mode; + }; + + static DEFINE_PER_CPU(struct kvm_para_state, para_state); +@@ -77,7 +76,7 @@ static void kvm_deferred_mmu_op(void *buffer, int len) + { + struct kvm_para_state *state = kvm_para_state(); + +- if (state->mode != PARAVIRT_LAZY_MMU) { ++ if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) { + kvm_mmu_op(buffer, len); + return; + } +@@ -185,10 +184,7 @@ static void kvm_release_pt(unsigned long pfn) + + static void kvm_enter_lazy_mmu(void) + { +- struct kvm_para_state *state = kvm_para_state(); +- + paravirt_enter_lazy_mmu(); +- state->mode = paravirt_get_lazy_mode(); + } + + static void kvm_leave_lazy_mmu(void) +@@ -197,7 +193,6 @@ static void kvm_leave_lazy_mmu(void) + + mmu_queue_flush(state); + paravirt_leave_lazy_mmu(); +- state->mode = paravirt_get_lazy_mode(); + } + + static void __init paravirt_ops_setup(void) +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c +index 223af43..e5efcdc 100644 +--- a/arch/x86/kernel/kvmclock.c ++++ b/arch/x86/kernel/kvmclock.c +@@ -50,8 +50,8 @@ static unsigned long kvm_get_wallclock(void) + struct timespec ts; + int low, high; + +- low = (int)__pa(&wall_clock); +- high = ((u64)__pa(&wall_clock) >> 32); ++ low = (int)__pa_symbol(&wall_clock); ++ high = ((u64)__pa_symbol(&wall_clock) >> 32); + native_write_msr(MSR_KVM_WALL_CLOCK, low, high); + + vcpu_time = &get_cpu_var(hv_clock); +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index ae99d83..bb6277d 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -573,6 +573,15 @@ static void start_apic_timer(struct kvm_lapic *apic) + + if (!apic->lapic_timer.period) + return; ++ /* ++ * Do not allow the guest to program periodic timers with small ++ * interval, since the hrtimers are not throttled by the host ++ * scheduler. ++ */ ++ if (apic_lvtt_period(apic)) { ++ if (apic->lapic_timer.period < NSEC_PER_MSEC/2) ++ apic->lapic_timer.period = NSEC_PER_MSEC/2; ++ } + + hrtimer_start(&apic->lapic_timer.timer, + ktime_add_ns(now, apic->lapic_timer.period), +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 0ef5bb2..a5cdb35 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -2633,7 +2633,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); + + void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) + { +- while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) { ++ while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES && ++ !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { + struct kvm_mmu_page *sp; + + sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 29f9129..b5fa966 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1217,12 +1217,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) + if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { + /* CR3 accesses and invlpg don't need to cause VM Exits when EPT + enabled */ +- min &= ~(CPU_BASED_CR3_LOAD_EXITING | +- CPU_BASED_CR3_STORE_EXITING | +- CPU_BASED_INVLPG_EXITING); +- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, +- &_cpu_based_exec_control) < 0) +- return -EIO; ++ _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | ++ CPU_BASED_CR3_STORE_EXITING | ++ CPU_BASED_INVLPG_EXITING); + rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, + vmx_capability.ept, vmx_capability.vpid); + } +@@ -2841,6 +2838,8 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) + unsigned long val; + int dr, reg; + ++ if (!kvm_require_cpl(vcpu, 0)) ++ return 1; + dr = vmcs_readl(GUEST_DR7); + if (dr & DR7_GD) { + /* +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 3d45290..3d36045 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -215,6 +215,19 @@ static void __queue_exception(struct kvm_vcpu *vcpu) + } + + /* ++ * Checks if cpl <= required_cpl; if true, return true. Otherwise queue ++ * a #GP and return false. ++ */ ++bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) ++{ ++ if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) ++ return true; ++ kvm_queue_exception_e(vcpu, GP_VECTOR, 0); ++ return false; ++} ++EXPORT_SYMBOL_GPL(kvm_require_cpl); ++ ++/* + * Load the pae pdptrs. Return true is they are all valid. + */ + int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) +@@ -2898,6 +2911,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) + a3 &= 0xFFFFFFFF; + } + ++ if (kvm_x86_ops->get_cpl(vcpu) != 0) { ++ ret = -KVM_EPERM; ++ goto out; ++ } ++ + switch (nr) { + case KVM_HC_VAPIC_POLL_IRQ: + ret = 0; +@@ -2909,6 +2927,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) + ret = -KVM_ENOSYS; + break; + } ++out: + kvm_register_write(vcpu, VCPU_REGS_RAX, ret); + ++vcpu->stat.hypercalls; + return r; +diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c +index 616de46..ef4dfca 100644 +--- a/arch/x86/kvm/x86_emulate.c ++++ b/arch/x86/kvm/x86_emulate.c +@@ -60,6 +60,7 @@ + #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ + #define SrcOne (7<<4) /* Implied '1' */ + #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ ++#define SrcImmU (9<<4) /* Immediate operand, unsigned */ + #define SrcMask (0xf<<4) + /* Generic ModRM decode. */ + #define ModRM (1<<8) +@@ -195,7 +196,7 @@ static u32 opcode_table[256] = { + ByteOp | SrcImmUByte, SrcImmUByte, + /* 0xE8 - 0xEF */ + SrcImm | Stack, SrcImm | ImplicitOps, +- SrcImm | Src2Imm16, SrcImmByte | ImplicitOps, ++ SrcImmU | Src2Imm16, SrcImmByte | ImplicitOps, + SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, + SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, + /* 0xF0 - 0xF7 */ +@@ -1027,6 +1028,7 @@ done_prefixes: + c->src.type = OP_MEM; + break; + case SrcImm: ++ case SrcImmU: + c->src.type = OP_IMM; + c->src.ptr = (unsigned long *)c->eip; + c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; +@@ -1044,6 +1046,19 @@ done_prefixes: + c->src.val = insn_fetch(s32, 4, c->eip); + break; + } ++ if ((c->d & SrcMask) == SrcImmU) { ++ switch (c->src.bytes) { ++ case 1: ++ c->src.val &= 0xff; ++ break; ++ case 2: ++ c->src.val &= 0xffff; ++ break; ++ case 4: ++ c->src.val &= 0xffffffff; ++ break; ++ } ++ } + break; + case SrcImmByte: + case SrcImmUByte: +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index 7e600c1..e245775 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -822,6 +822,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, + { + struct cpa_data cpa; + int ret, cache, checkalias; ++ unsigned long baddr = 0; + + /* + * Check, if we are requested to change a not supported +@@ -853,6 +854,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, + */ + WARN_ON_ONCE(1); + } ++ /* ++ * Save address for cache flush. *addr is modified in the call ++ * to __change_page_attr_set_clr() below. ++ */ ++ baddr = *addr; + } + + /* Must avoid aliasing mappings in the highmem code */ +@@ -900,7 +906,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, + cpa_flush_array(addr, numpages, cache, + cpa.flags, pages); + } else +- cpa_flush_range(*addr, numpages, cache); ++ cpa_flush_range(baddr, numpages, cache); + } else + cpa_flush_all(cache); + +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c +index d3aa2aa..b78c9c3 100644 +--- a/block/blk-sysfs.c ++++ b/block/blk-sysfs.c +@@ -40,7 +40,12 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) + { + struct request_list *rl = &q->rq; + unsigned long nr; +- int ret = queue_var_store(&nr, page, count); ++ int ret; ++ ++ if (!q->request_fn) ++ return -EINVAL; ++ ++ ret = queue_var_store(&nr, page, count); + if (nr < BLKDEV_MIN_RQ) + nr = BLKDEV_MIN_RQ; + +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 072ba5e..e71149b 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -709,7 +709,13 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) + head = tf->device & 0xf; + sect = tf->lbal; + +- block = (cyl * dev->heads + head) * dev->sectors + sect; ++ if (!sect) { ++ ata_dev_printk(dev, KERN_WARNING, "device reported " ++ "invalid CHS sector 0\n"); ++ sect = 1; /* oh well */ ++ } ++ ++ block = (cyl * dev->heads + head) * dev->sectors + sect - 1; + } + + return block; +diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c +index c585577..dee0f1f 100644 +--- a/drivers/char/agp/intel-agp.c ++++ b/drivers/char/agp/intel-agp.c +@@ -2313,15 +2313,6 @@ static int agp_intel_resume(struct pci_dev *pdev) + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + int ret_val; + +- pci_restore_state(pdev); +- +- /* We should restore our graphics device's config space, +- * as host bridge (00:00) resumes before graphics device (02:00), +- * then our access to its pci space can work right. +- */ +- if (intel_private.pcidev) +- pci_restore_state(intel_private.pcidev); +- + if (bridge->driver == &intel_generic_driver) + intel_configure(); + else if (bridge->driver == &intel_850_driver) +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c +index aec1931..0b73e4e 100644 +--- a/drivers/char/tpm/tpm_tis.c ++++ b/drivers/char/tpm/tpm_tis.c +@@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, + goto out_err; + } + ++ /* Default timeouts */ ++ chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); ++ chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); ++ chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); ++ chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); ++ + if (request_locality(chip, 0) != 0) { + rc = -ENODEV; + goto out_err; +@@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, + + vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); + +- /* Default timeouts */ +- chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); +- chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); +- chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); +- chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); +- + dev_info(dev, + "1.2 TPM (device-id 0x%X, rev-id %d)\n", + vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); +diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c +index 6e186b1..652bd33 100644 +--- a/drivers/md/dm-log-userspace-base.c ++++ b/drivers/md/dm-log-userspace-base.c +@@ -582,7 +582,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, + break; + case STATUSTYPE_TABLE: + sz = 0; +- table_args = strstr(lc->usr_argv_str, " "); ++ table_args = strchr(lc->usr_argv_str, ' '); + BUG_ON(!table_args); /* There will always be a ' ' */ + table_args++; + +diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c +index b9ceddd..4ff665c 100644 +--- a/drivers/net/mlx4/eq.c ++++ b/drivers/net/mlx4/eq.c +@@ -526,48 +526,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) + iounmap(priv->clr_base); + } + +-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) +-{ +- struct mlx4_priv *priv = mlx4_priv(dev); +- int ret; +- +- /* +- * We assume that mapping one page is enough for the whole EQ +- * context table. This is fine with all current HCAs, because +- * we only use 32 EQs and each EQ uses 64 bytes of context +- * memory, or 1 KB total. +- */ +- priv->eq_table.icm_virt = icm_virt; +- priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); +- if (!priv->eq_table.icm_page) +- return -ENOMEM; +- priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, +- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); +- if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { +- __free_page(priv->eq_table.icm_page); +- return -ENOMEM; +- } +- +- ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); +- if (ret) { +- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, +- PCI_DMA_BIDIRECTIONAL); +- __free_page(priv->eq_table.icm_page); +- } +- +- return ret; +-} +- +-void mlx4_unmap_eq_icm(struct mlx4_dev *dev) +-{ +- struct mlx4_priv *priv = mlx4_priv(dev); +- +- mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); +- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, +- PCI_DMA_BIDIRECTIONAL); +- __free_page(priv->eq_table.icm_page); +-} +- + int mlx4_alloc_eq_table(struct mlx4_dev *dev) + { + struct mlx4_priv *priv = mlx4_priv(dev); +diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c +index dac621b..8e8b79f 100644 +--- a/drivers/net/mlx4/main.c ++++ b/drivers/net/mlx4/main.c +@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, + goto err_unmap_aux; + } + +- err = mlx4_map_eq_icm(dev, init_hca->eqc_base); ++ err = mlx4_init_icm_table(dev, &priv->eq_table.table, ++ init_hca->eqc_base, dev_cap->eqc_entry_sz, ++ dev->caps.num_eqs, dev->caps.num_eqs, ++ 0, 0); + if (err) { + mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); + goto err_unmap_cmpt; +@@ -668,7 +671,7 @@ err_unmap_mtt: + mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); + + err_unmap_eq: +- mlx4_unmap_eq_icm(dev); ++ mlx4_cleanup_icm_table(dev, &priv->eq_table.table); + + err_unmap_cmpt: + mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); +@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev) + mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); + mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); + mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); ++ mlx4_cleanup_icm_table(dev, &priv->eq_table.table); + mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); +- mlx4_unmap_eq_icm(dev); + + mlx4_UNMAP_ICM_AUX(dev); + mlx4_free_icm(dev, priv->fw.aux_icm, 0); +diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h +index 5bd79c2..bc72d6e 100644 +--- a/drivers/net/mlx4/mlx4.h ++++ b/drivers/net/mlx4/mlx4.h +@@ -205,9 +205,7 @@ struct mlx4_eq_table { + void __iomem **uar_map; + u32 clr_mask; + struct mlx4_eq *eq; +- u64 icm_virt; +- struct page *icm_page; +- dma_addr_t icm_dma; ++ struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; + int have_irq; + u8 inta_pin; +@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca); + +-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt); +-void mlx4_unmap_eq_icm(struct mlx4_dev *dev); +- + int mlx4_cmd_init(struct mlx4_dev *dev); + void mlx4_cmd_cleanup(struct mlx4_dev *dev); + void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); +diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c +index 029c1bc..ba6d225 100644 +--- a/drivers/net/wireless/ath/ath5k/base.c ++++ b/drivers/net/wireless/ath/ath5k/base.c +@@ -2676,7 +2676,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) + sc->curchan = chan; + sc->curband = &sc->sbands[chan->band]; + } +- ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); ++ ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL); + if (ret) { + ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); + goto err; +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 06b9656..1073137 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -1201,6 +1201,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) + switch(dev->subsystem_device) { + case 0x00b8: /* Compaq Evo D510 CMT */ + case 0x00b9: /* Compaq Evo D510 SFF */ ++ case 0x00ba: /* Compaq Evo D510 USDT */ + /* Motherboard doesn't have Host bridge + * subvendor/subdevice IDs and on-board VGA + * controller is disabled if an AGP card is +@@ -2382,8 +2383,10 @@ static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev) + } + + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); ++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); + + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); ++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); + + static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) + { +@@ -2492,6 +2495,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); + + #endif /* CONFIG_PCI_IOV */ + +diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c +index 18066d5..af0afa1 100644 +--- a/drivers/ps3/ps3stor_lib.c ++++ b/drivers/ps3/ps3stor_lib.c +@@ -23,6 +23,65 @@ + #include + #include + ++/* ++ * A workaround for flash memory I/O errors when the internal hard disk ++ * has not been formatted for OtherOS use. Delay disk close until flash ++ * memory is closed. ++ */ ++ ++static struct ps3_flash_workaround { ++ int flash_open; ++ int disk_open; ++ struct ps3_system_bus_device *disk_sbd; ++} ps3_flash_workaround; ++ ++static int ps3stor_open_hv_device(struct ps3_system_bus_device *sbd) ++{ ++ int error = ps3_open_hv_device(sbd); ++ ++ if (error) ++ return error; ++ ++ if (sbd->match_id == PS3_MATCH_ID_STOR_FLASH) ++ ps3_flash_workaround.flash_open = 1; ++ ++ if (sbd->match_id == PS3_MATCH_ID_STOR_DISK) ++ ps3_flash_workaround.disk_open = 1; ++ ++ return 0; ++} ++ ++static int ps3stor_close_hv_device(struct ps3_system_bus_device *sbd) ++{ ++ int error; ++ ++ if (sbd->match_id == PS3_MATCH_ID_STOR_DISK ++ && ps3_flash_workaround.disk_open ++ && ps3_flash_workaround.flash_open) { ++ ps3_flash_workaround.disk_sbd = sbd; ++ return 0; ++ } ++ ++ error = ps3_close_hv_device(sbd); ++ ++ if (error) ++ return error; ++ ++ if (sbd->match_id == PS3_MATCH_ID_STOR_DISK) ++ ps3_flash_workaround.disk_open = 0; ++ ++ if (sbd->match_id == PS3_MATCH_ID_STOR_FLASH) { ++ ps3_flash_workaround.flash_open = 0; ++ ++ if (ps3_flash_workaround.disk_sbd) { ++ ps3_close_hv_device(ps3_flash_workaround.disk_sbd); ++ ps3_flash_workaround.disk_open = 0; ++ ps3_flash_workaround.disk_sbd = NULL; ++ } ++ } ++ ++ return 0; ++} + + static int ps3stor_probe_access(struct ps3_storage_device *dev) + { +@@ -90,7 +149,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler) + int error, res, alignment; + enum ps3_dma_page_size page_size; + +- error = ps3_open_hv_device(&dev->sbd); ++ error = ps3stor_open_hv_device(&dev->sbd); + if (error) { + dev_err(&dev->sbd.core, + "%s:%u: ps3_open_hv_device failed %d\n", __func__, +@@ -166,7 +225,7 @@ fail_free_irq: + fail_sb_event_receive_port_destroy: + ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq); + fail_close_device: +- ps3_close_hv_device(&dev->sbd); ++ ps3stor_close_hv_device(&dev->sbd); + fail: + return error; + } +@@ -193,7 +252,7 @@ void ps3stor_teardown(struct ps3_storage_device *dev) + "%s:%u: destroy event receive port failed %d\n", + __func__, __LINE__, error); + +- error = ps3_close_hv_device(&dev->sbd); ++ error = ps3stor_close_hv_device(&dev->sbd); + if (error) + dev_err(&dev->sbd.core, + "%s:%u: ps3_close_hv_device failed %d\n", __func__, +diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c +index 2742ae8..9ad38e8 100644 +--- a/drivers/scsi/libsrp.c ++++ b/drivers/scsi/libsrp.c +@@ -124,6 +124,7 @@ static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max, + dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); + kfree(ring[i]); + } ++ kfree(ring); + } + + int srp_target_alloc(struct srp_target *target, struct device *dev, +diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c +index 35a1386..2e4bc3d 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_base.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c +@@ -94,7 +94,7 @@ _base_fault_reset_work(struct work_struct *work) + int rc; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- if (ioc->ioc_reset_in_progress) ++ if (ioc->shost_recovery) + goto rearm_timer; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + +@@ -1542,6 +1542,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) + (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8, + ioc->bios_pg3.BiosVersion & 0x000000FF); + ++ _base_display_dell_branding(ioc); ++ + printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { +@@ -1554,8 +1556,6 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) + i++; + } + +- _base_display_dell_branding(ioc); +- + i = 0; + printk("), "); + printk("Capabilities=("); +@@ -1627,6 +1627,9 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) + u32 iounit_pg1_flags; + + mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); ++ if (ioc->ir_firmware) ++ mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply, ++ &ioc->manu_pg10); + mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); + mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); + mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); +@@ -3501,20 +3504,13 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, + __func__)); + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- if (ioc->ioc_reset_in_progress) { ++ if (ioc->shost_recovery) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + printk(MPT2SAS_ERR_FMT "%s: busy\n", + ioc->name, __func__); + return -EBUSY; + } +- ioc->ioc_reset_in_progress = 1; + ioc->shost_recovery = 1; +- if (ioc->shost->shost_state == SHOST_RUNNING) { +- /* set back to SHOST_RUNNING in mpt2sas_scsih.c */ +- scsi_host_set_state(ioc->shost, SHOST_RECOVERY); +- printk(MPT2SAS_INFO_FMT "putting controller into " +- "SHOST_RECOVERY\n", ioc->name); +- } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + _base_reset_handler(ioc, MPT2_IOC_PRE_RESET); +@@ -3534,7 +3530,10 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, + ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- ioc->ioc_reset_in_progress = 0; ++ ioc->shost_recovery = 0; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ++ ++ if (!r) ++ _base_reset_handler(ioc, MPT2_IOC_RUNNING); + return r; + } +diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h +index acdcff1..22f84d3 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_base.h ++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h +@@ -119,6 +119,7 @@ + #define MPT2_IOC_PRE_RESET 1 /* prior to host reset */ + #define MPT2_IOC_AFTER_RESET 2 /* just after host reset */ + #define MPT2_IOC_DONE_RESET 3 /* links re-initialized */ ++#define MPT2_IOC_RUNNING 4 /* shost running */ + + /* + * logging format +@@ -196,6 +197,38 @@ struct MPT2SAS_TARGET { + * @block: device is in SDEV_BLOCK state + * @tlr_snoop_check: flag used in determining whether to disable TLR + */ ++ ++/* OEM Identifiers */ ++#define MFG10_OEM_ID_INVALID (0x00000000) ++#define MFG10_OEM_ID_DELL (0x00000001) ++#define MFG10_OEM_ID_FSC (0x00000002) ++#define MFG10_OEM_ID_SUN (0x00000003) ++#define MFG10_OEM_ID_IBM (0x00000004) ++ ++/* GENERIC Flags 0*/ ++#define MFG10_GF0_OCE_DISABLED (0x00000001) ++#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002) ++#define MFG10_GF0_R10_DISPLAY (0x00000004) ++#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) ++#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) ++ ++/* OEM Specific Flags will come from OEM specific header files */ ++typedef struct _MPI2_CONFIG_PAGE_MAN_10 { ++ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ ++ U8 OEMIdentifier; /* 04h */ ++ U8 Reserved1; /* 05h */ ++ U16 Reserved2; /* 08h */ ++ U32 Reserved3; /* 0Ch */ ++ U32 GenericFlags0; /* 10h */ ++ U32 GenericFlags1; /* 14h */ ++ U32 Reserved4; /* 18h */ ++ U32 OEMSpecificFlags0; /* 1Ch */ ++ U32 OEMSpecificFlags1; /* 20h */ ++ U32 Reserved5[18]; /* 24h-60h*/ ++} MPI2_CONFIG_PAGE_MAN_10, ++ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10, ++ Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t; ++ + struct MPT2SAS_DEVICE { + struct MPT2SAS_TARGET *sas_target; + unsigned int lun; +@@ -431,7 +464,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); + * @fw_event_list: list of fw events + * @aen_event_read_flag: event log was read + * @broadcast_aen_busy: broadcast aen waiting to be serviced +- * @ioc_reset_in_progress: host reset in progress ++ * @shost_recovery: host reset in progress + * @ioc_reset_in_progress_lock: + * @ioc_link_reset_in_progress: phy/hard reset in progress + * @ignore_loginfos: ignore loginfos during task managment +@@ -460,6 +493,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); + * @facts: static facts data + * @pfacts: static port facts data + * @manu_pg0: static manufacturing page 0 ++ * @manu_pg10: static manufacturing page 10 + * @bios_pg2: static bios page 2 + * @bios_pg3: static bios page 3 + * @ioc_pg8: static ioc page 8 +@@ -544,7 +578,6 @@ struct MPT2SAS_ADAPTER { + /* misc flags */ + int aen_event_read_flag; + u8 broadcast_aen_busy; +- u8 ioc_reset_in_progress; + u8 shost_recovery; + spinlock_t ioc_reset_in_progress_lock; + u8 ioc_link_reset_in_progress; +@@ -663,6 +696,7 @@ struct MPT2SAS_ADAPTER { + dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT]; + u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT]; ++ Mpi2ManufacturingPage10_t manu_pg10; + u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23]; + u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; + }; +@@ -734,6 +768,8 @@ void mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 re + int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys); + int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page); ++int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc, ++ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page); + int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage2_t *config_page); + int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t +@@ -776,7 +812,6 @@ int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle); + int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle, + u64 *wwid); +- + /* ctl shared API */ + extern struct device_attribute *mpt2sas_host_attrs[]; + extern struct device_attribute *mpt2sas_dev_attrs[]; +@@ -802,5 +837,7 @@ void mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, u16 h + u16 attached_handle, u8 phy_number, u8 link_rate); + extern struct sas_function_template mpt2sas_transport_functions; + extern struct scsi_transport_template *mpt2sas_transport_template; ++extern int scsi_internal_device_block(struct scsi_device *sdev); ++extern int scsi_internal_device_unblock(struct scsi_device *sdev); + + #endif /* MPT2SAS_BASE_H_INCLUDED */ +diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c +index 6ddee16..b9f4d0f 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_config.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_config.c +@@ -426,6 +426,67 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, + } + + /** ++ * mpt2sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 ++ * @ioc: per adapter object ++ * @mpi_reply: reply mf payload returned from firmware ++ * @config_page: contents of the config page ++ * Context: sleep. ++ * ++ * Returns 0 for success, non-zero for failure. ++ */ ++int ++mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc, ++ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page) ++{ ++ Mpi2ConfigRequest_t mpi_request; ++ int r; ++ struct config_request mem; ++ ++ memset(config_page, 0, sizeof(Mpi2ManufacturingPage10_t)); ++ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); ++ mpi_request.Function = MPI2_FUNCTION_CONFIG; ++ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; ++ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; ++ mpi_request.Header.PageNumber = 10; ++ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; ++ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); ++ r = _config_request(ioc, &mpi_request, mpi_reply, ++ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); ++ if (r) ++ goto out; ++ ++ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; ++ mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; ++ mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; ++ mpi_request.Header.PageType = mpi_reply->Header.PageType; ++ mpi_request.Header.PageLength = mpi_reply->Header.PageLength; ++ mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; ++ if (mem.config_page_sz > ioc->config_page_sz) { ++ r = _config_alloc_config_dma_memory(ioc, &mem); ++ if (r) ++ goto out; ++ } else { ++ mem.config_page_dma = ioc->config_page_dma; ++ mem.config_page = ioc->config_page; ++ } ++ ioc->base_add_sg_single(&mpi_request.PageBufferSGE, ++ MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, ++ mem.config_page_dma); ++ r = _config_request(ioc, &mpi_request, mpi_reply, ++ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); ++ if (!r) ++ memcpy(config_page, mem.config_page, ++ min_t(u16, mem.config_page_sz, ++ sizeof(Mpi2ManufacturingPage10_t))); ++ ++ if (mem.config_page_sz > ioc->config_page_sz) ++ _config_free_config_dma_memory(ioc, &mem); ++ ++ out: ++ return r; ++} ++ ++/** + * mpt2sas_config_get_bios_pg2 - obtain bios page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware +diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c +index 14e473d..c2a5101 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c +@@ -1963,7 +1963,6 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) + { + enum block_state state; + long ret = -EINVAL; +- unsigned long flags; + + state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : + BLOCKING; +@@ -1989,13 +1988,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) + !ioc) + return -ENODEV; + +- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- if (ioc->shost_recovery) { +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, +- flags); ++ if (ioc->shost_recovery) + return -EAGAIN; +- } +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { + uarg = arg; +@@ -2098,7 +2092,6 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) + struct mpt2_ioctl_command karg; + struct MPT2SAS_ADAPTER *ioc; + enum block_state state; +- unsigned long flags; + + if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32)) + return -EINVAL; +@@ -2113,13 +2106,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) + if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) + return -ENODEV; + +- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- if (ioc->shost_recovery) { +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, +- flags); ++ if (ioc->shost_recovery) + return -EAGAIN; +- } +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); + karg.hdr.ioc_number = karg32.hdr.ioc_number; +diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c +index 2e9a444..c7a0870 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c +@@ -103,7 +103,6 @@ struct sense_info { + }; + + +-#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF) + /** + * struct fw_event_work - firmware event struct + * @list: link list framework +@@ -1502,7 +1501,13 @@ _scsih_slave_configure(struct scsi_device *sdev) + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + qdepth = MPT2SAS_RAID_QUEUE_DEPTH; +- r_level = "RAID1E"; ++ if (ioc->manu_pg10.OEMIdentifier && ++ (ioc->manu_pg10.GenericFlags0 & ++ MFG10_GF0_R10_DISPLAY) && ++ !(raid_device->num_pds % 2)) ++ r_level = "RAID10"; ++ else ++ r_level = "RAID1E"; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + qdepth = MPT2SAS_RAID_QUEUE_DEPTH; +@@ -1786,17 +1791,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, + u32 ioc_state; + unsigned long timeleft; + u8 VF_ID = 0; +- unsigned long flags; + +- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED || +- ioc->shost_recovery) { +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ++ if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) { ++ printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n", ++ __func__, ioc->name); ++ return; ++ } ++ ++ if (ioc->shost_recovery) { + printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return; + } +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + ioc_state = mpt2sas_base_get_iocstate(ioc, 0); + if (ioc_state & MPI2_DOORBELL_USED) { +@@ -2222,7 +2228,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) + MPT2SAS_INFO_FMT "SDEV_RUNNING: " + "handle(0x%04x)\n", ioc->name, handle)); + sas_device_priv_data->block = 0; +- scsi_device_set_state(sdev, SDEV_RUNNING); ++ scsi_internal_device_unblock(sdev); + } + } + } +@@ -2251,7 +2257,7 @@ _scsih_block_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) + MPT2SAS_INFO_FMT "SDEV_BLOCK: " + "handle(0x%04x)\n", ioc->name, handle)); + sas_device_priv_data->block = 1; +- scsi_device_set_state(sdev, SDEV_BLOCK); ++ scsi_internal_device_block(sdev); + } + } + } +@@ -2327,6 +2333,7 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, + u16 handle; + u16 reason_code; + u8 phy_number; ++ u8 link_rate; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); +@@ -2337,6 +2344,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) + _scsih_block_io_device(ioc, handle); ++ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) { ++ link_rate = event_data->PHY[i].LinkRate >> 4; ++ if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5) ++ _scsih_ublock_io_device(ioc, handle); ++ } + } + } + +@@ -2405,27 +2417,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc, + } + + /** +- * _scsih_queue_rescan - queue a topology rescan from user context +- * @ioc: per adapter object +- * +- * Return nothing. +- */ +-static void +-_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc) +-{ +- struct fw_event_work *fw_event; +- +- if (ioc->wait_for_port_enable_to_complete) +- return; +- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); +- if (!fw_event) +- return; +- fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET; +- fw_event->ioc = ioc; +- _scsih_fw_event_add(ioc, fw_event); +-} +- +-/** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object + * +@@ -2456,46 +2447,6 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc) + } + + /** +- * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) +- * @ioc: per adapter object +- * @reset_phase: phase +- * +- * The handler for doing any required cleanup or initialization. +- * +- * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET, +- * MPT2_IOC_DONE_RESET +- * +- * Return nothing. +- */ +-void +-mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) +-{ +- switch (reset_phase) { +- case MPT2_IOC_PRE_RESET: +- dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " +- "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); +- _scsih_fw_event_off(ioc); +- break; +- case MPT2_IOC_AFTER_RESET: +- dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " +- "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); +- if (ioc->tm_cmds.status & MPT2_CMD_PENDING) { +- ioc->tm_cmds.status |= MPT2_CMD_RESET; +- mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid); +- complete(&ioc->tm_cmds.done); +- } +- _scsih_fw_event_on(ioc); +- _scsih_flush_running_cmds(ioc); +- break; +- case MPT2_IOC_DONE_RESET: +- dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " +- "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); +- _scsih_queue_rescan(ioc); +- break; +- } +-} +- +-/** + * _scsih_setup_eedp - setup MPI request for EEDP transfer + * @scmd: pointer to scsi command object + * @mpi_request: pointer to the SCSI_IO reqest message frame +@@ -2615,7 +2566,6 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) + Mpi2SCSIIORequest_t *mpi_request; + u32 mpi_control; + u16 smid; +- unsigned long flags; + + scmd->scsi_done = done; + sas_device_priv_data = scmd->device->hostdata; +@@ -2634,13 +2584,10 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) + } + + /* see if we are busy with task managment stuff */ +- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- if (sas_target_priv_data->tm_busy || +- ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ++ if (sas_target_priv_data->tm_busy) ++ return SCSI_MLQUEUE_DEVICE_BUSY; ++ else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) + return SCSI_MLQUEUE_HOST_BUSY; +- } +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_READ; +@@ -3436,6 +3383,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) + if (!handle) + return -1; + ++ if (ioc->shost_recovery) ++ return -1; ++ + if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", +@@ -3572,6 +3522,9 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle) + struct _sas_node *sas_expander; + unsigned long flags; + ++ if (ioc->shost_recovery) ++ return; ++ + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +@@ -3743,6 +3696,8 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) + mutex_unlock(&ioc->tm_cmds.mutex); + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset " + "done: handle(0x%04x)\n", ioc->name, device_handle)); ++ if (ioc->shost_recovery) ++ goto out; + } + + /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */ +@@ -3765,6 +3720,9 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) + le32_to_cpu(mpi_reply.IOCLogInfo))); + + out: ++ ++ _scsih_ublock_io_device(ioc, handle); ++ + mpt2sas_transport_port_remove(ioc, sas_device->sas_address, + sas_device->parent_handle); + +@@ -3908,6 +3866,8 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, + "expander event\n", ioc->name)); + return; + } ++ if (ioc->shost_recovery) ++ return; + if (event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) + continue; +@@ -3942,10 +3902,6 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, + link_rate_); + } + } +- if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) { +- if (link_rate_ >= MPI2_SAS_NEG_LINK_RATE_1_5) +- _scsih_ublock_io_device(ioc, handle); +- } + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) { + if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5) + break; +@@ -5156,22 +5112,9 @@ static void + _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) + { + struct _sas_device *sas_device, *sas_device_next; +- struct _sas_node *sas_expander, *sas_expander_next; ++ struct _sas_node *sas_expander; + struct _raid_device *raid_device, *raid_device_next; +- unsigned long flags; + +- _scsih_search_responding_sas_devices(ioc); +- _scsih_search_responding_raid_devices(ioc); +- _scsih_search_responding_expanders(ioc); +- +- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- ioc->shost_recovery = 0; +- if (ioc->shost->shost_state == SHOST_RECOVERY) { +- printk(MPT2SAS_INFO_FMT "putting controller into " +- "SHOST_RUNNING\n", ioc->name); +- scsi_host_set_state(ioc->shost, SHOST_RUNNING); +- } +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { +@@ -5207,16 +5150,63 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) + _scsih_raid_device_remove(ioc, raid_device); + } + +- list_for_each_entry_safe(sas_expander, sas_expander_next, +- &ioc->sas_expander_list, list) { ++ retry_expander_search: ++ sas_expander = NULL; ++ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->responding) { + sas_expander->responding = 0; + continue; + } +- printk("\tremoving expander: handle(0x%04x), " +- " sas_addr(0x%016llx)\n", sas_expander->handle, +- (unsigned long long)sas_expander->sas_address); + _scsih_expander_remove(ioc, sas_expander->handle); ++ goto retry_expander_search; ++ } ++} ++ ++/** ++ * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) ++ * @ioc: per adapter object ++ * @reset_phase: phase ++ * ++ * The handler for doing any required cleanup or initialization. ++ * ++ * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET, ++ * MPT2_IOC_DONE_RESET ++ * ++ * Return nothing. ++ */ ++void ++mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) ++{ ++ switch (reset_phase) { ++ case MPT2_IOC_PRE_RESET: ++ dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " ++ "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); ++ _scsih_fw_event_off(ioc); ++ break; ++ case MPT2_IOC_AFTER_RESET: ++ dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " ++ "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); ++ if (ioc->tm_cmds.status & MPT2_CMD_PENDING) { ++ ioc->tm_cmds.status |= MPT2_CMD_RESET; ++ mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid); ++ complete(&ioc->tm_cmds.done); ++ } ++ _scsih_fw_event_on(ioc); ++ _scsih_flush_running_cmds(ioc); ++ break; ++ case MPT2_IOC_DONE_RESET: ++ dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " ++ "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); ++ _scsih_sas_host_refresh(ioc, 0); ++ _scsih_search_responding_sas_devices(ioc); ++ _scsih_search_responding_raid_devices(ioc); ++ _scsih_search_responding_expanders(ioc); ++ break; ++ case MPT2_IOC_RUNNING: ++ dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " ++ "MPT2_IOC_RUNNING\n", ioc->name, __func__)); ++ _scsih_remove_unresponding_devices(ioc); ++ break; + } + } + +@@ -5236,14 +5226,6 @@ _firmware_event_work(struct work_struct *work) + unsigned long flags; + struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; + +- /* This is invoked by calling _scsih_queue_rescan(). */ +- if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) { +- _scsih_fw_event_free(ioc, fw_event); +- _scsih_sas_host_refresh(ioc, 1); +- _scsih_remove_unresponding_devices(ioc); +- return; +- } +- + /* the queue is being flushed so ignore this event */ + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (ioc->fw_events_off || ioc->remove_host) { +@@ -5253,13 +5235,10 @@ _firmware_event_work(struct work_struct *work) + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + +- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery) { +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + _scsih_fw_event_requeue(ioc, fw_event, 1000); + return; + } +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + switch (fw_event->event) { + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: +@@ -5461,6 +5440,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, + if (!sas_device) + continue; + _scsih_remove_device(ioc, sas_device->handle); ++ if (ioc->shost_recovery) ++ return; + goto retry_device_search; + } + } +@@ -5482,6 +5463,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, + if (!expander_sibling) + continue; + _scsih_expander_remove(ioc, expander_sibling->handle); ++ if (ioc->shost_recovery) ++ return; + goto retry_expander_search; + } + } +diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c +index 686695b..a53086d 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c +@@ -140,11 +140,18 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle, + u32 device_info; + u32 ioc_status; + ++ if (ioc->shost_recovery) { ++ printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", ++ __func__, ioc->name); ++ return -EFAULT; ++ } ++ + if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ++ + ioc->name, __FILE__, __LINE__, __func__); +- return -1; ++ return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & +@@ -153,7 +160,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle, + printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)" + "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); +- return -1; ++ return -EIO; + } + + memset(identify, 0, sizeof(identify)); +@@ -288,21 +295,17 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, + void *psge; + u32 sgl_flags; + u8 issue_reset = 0; +- unsigned long flags; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + u64 *sas_address_le; + u16 wait_state_count; + +- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- if (ioc->ioc_reset_in_progress) { +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ++ if (ioc->shost_recovery) { + printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + mutex_lock(&ioc->transport_cmds.mutex); + +@@ -806,6 +809,12 @@ mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, + struct _sas_node *sas_node; + struct _sas_phy *mpt2sas_phy; + ++ if (ioc->shost_recovery) { ++ printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", ++ __func__, ioc->name); ++ return; ++ } ++ + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +@@ -1025,7 +1034,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, + void *psge; + u32 sgl_flags; + u8 issue_reset = 0; +- unsigned long flags; + dma_addr_t dma_addr_in = 0; + dma_addr_t dma_addr_out = 0; + u16 wait_state_count; +@@ -1045,14 +1053,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, + return -EINVAL; + } + +- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +- if (ioc->ioc_reset_in_progress) { +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ++ if (ioc->shost_recovery) { + printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } +- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index b7b9fec..a89c421 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -2021,6 +2021,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) + + sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", + sdp->removable ? "removable " : ""); ++ put_device(&sdkp->dev); + } + + /** +@@ -2106,6 +2107,7 @@ static int sd_probe(struct device *dev) + + get_device(&sdp->sdev_gendev); + ++ get_device(&sdkp->dev); /* prevent release before async_schedule */ + async_schedule(sd_probe_async, sdkp); + + return 0; +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 9230402..4968c4c 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -1811,7 +1811,7 @@ retry: + return 0; + out: + for (i = 0; i < k; i++) +- __free_pages(schp->pages[k], order); ++ __free_pages(schp->pages[i], order); + + if (--order >= 0) + goto retry; +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index b7c1603..7c1e65d 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -501,22 +501,22 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + } + } + +- /* +- * Now fill out the bss section. First pad the last page up +- * to the page boundary, and then perform a mmap to make sure +- * that there are zero-mapped pages up to and including the +- * last bss page. +- */ +- if (padzero(elf_bss)) { +- error = -EFAULT; +- goto out_close; +- } ++ if (last_bss > elf_bss) { ++ /* ++ * Now fill out the bss section. First pad the last page up ++ * to the page boundary, and then perform a mmap to make sure ++ * that there are zero-mapped pages up to and including the ++ * last bss page. ++ */ ++ if (padzero(elf_bss)) { ++ error = -EFAULT; ++ goto out_close; ++ } + +- /* What we have mapped so far */ +- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); ++ /* What we have mapped so far */ ++ elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); + +- /* Map the last of the bss segment */ +- if (last_bss > elf_bss) { ++ /* Map the last of the bss segment */ + down_write(¤t->mm->mmap_sem); + error = do_brk(elf_bss, last_bss - elf_bss); + up_write(¤t->mm->mmap_sem); +diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h +index 3ddce03..d731092 100644 +--- a/include/linux/kvm_para.h ++++ b/include/linux/kvm_para.h +@@ -13,6 +13,7 @@ + #define KVM_ENOSYS 1000 + #define KVM_EFAULT EFAULT + #define KVM_E2BIG E2BIG ++#define KVM_EPERM EPERM + + #define KVM_HC_VAPIC_POLL_IRQ 1 + #define KVM_HC_MMU_OP 2 +diff --git a/ipc/shm.c b/ipc/shm.c +index 1bc4701..30162a5 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -410,7 +410,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) + return error; + + no_id: +- if (shp->mlock_user) /* shmflg & SHM_HUGETLB case */ ++ if (is_file_hugepages(file) && shp->mlock_user) + user_shm_unlock(size, shp->mlock_user); + fput(file); + no_file: +diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c +index d7cbc57..3f49f53 100644 +--- a/kernel/perf_counter.c ++++ b/kernel/perf_counter.c +@@ -469,7 +469,8 @@ static void update_counter_times(struct perf_counter *counter) + struct perf_counter_context *ctx = counter->ctx; + u64 run_end; + +- if (counter->state < PERF_COUNTER_STATE_INACTIVE) ++ if (counter->state < PERF_COUNTER_STATE_INACTIVE || ++ counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) + return; + + counter->total_time_enabled = ctx->time - counter->tstamp_enabled; +@@ -518,7 +519,7 @@ static void __perf_counter_disable(void *info) + */ + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { + update_context_time(ctx); +- update_counter_times(counter); ++ update_group_times(counter); + if (counter == counter->group_leader) + group_sched_out(counter, cpuctx, ctx); + else +@@ -573,7 +574,7 @@ static void perf_counter_disable(struct perf_counter *counter) + * in, so we can change the state safely. + */ + if (counter->state == PERF_COUNTER_STATE_INACTIVE) { +- update_counter_times(counter); ++ update_group_times(counter); + counter->state = PERF_COUNTER_STATE_OFF; + } + +@@ -851,6 +852,27 @@ retry: + } + + /* ++ * Put a counter into inactive state and update time fields. ++ * Enabling the leader of a group effectively enables all ++ * the group members that aren't explicitly disabled, so we ++ * have to update their ->tstamp_enabled also. ++ * Note: this works for group members as well as group leaders ++ * since the non-leader members' sibling_lists will be empty. ++ */ ++static void __perf_counter_mark_enabled(struct perf_counter *counter, ++ struct perf_counter_context *ctx) ++{ ++ struct perf_counter *sub; ++ ++ counter->state = PERF_COUNTER_STATE_INACTIVE; ++ counter->tstamp_enabled = ctx->time - counter->total_time_enabled; ++ list_for_each_entry(sub, &counter->sibling_list, list_entry) ++ if (sub->state >= PERF_COUNTER_STATE_INACTIVE) ++ sub->tstamp_enabled = ++ ctx->time - sub->total_time_enabled; ++} ++ ++/* + * Cross CPU call to enable a performance counter + */ + static void __perf_counter_enable(void *info) +@@ -877,8 +899,7 @@ static void __perf_counter_enable(void *info) + + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) + goto unlock; +- counter->state = PERF_COUNTER_STATE_INACTIVE; +- counter->tstamp_enabled = ctx->time - counter->total_time_enabled; ++ __perf_counter_mark_enabled(counter, ctx); + + /* + * If the counter is in a group and isn't the group leader, +@@ -971,11 +992,9 @@ static void perf_counter_enable(struct perf_counter *counter) + * Since we have the lock this context can't be scheduled + * in, so we can change the state safely. + */ +- if (counter->state == PERF_COUNTER_STATE_OFF) { +- counter->state = PERF_COUNTER_STATE_INACTIVE; +- counter->tstamp_enabled = +- ctx->time - counter->total_time_enabled; +- } ++ if (counter->state == PERF_COUNTER_STATE_OFF) ++ __perf_counter_mark_enabled(counter, ctx); ++ + out: + spin_unlock_irq(&ctx->lock); + } +@@ -1479,9 +1498,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task) + counter->attr.enable_on_exec = 0; + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) + continue; +- counter->state = PERF_COUNTER_STATE_INACTIVE; +- counter->tstamp_enabled = +- ctx->time - counter->total_time_enabled; ++ __perf_counter_mark_enabled(counter, ctx); + enabled = 1; + } + +@@ -4171,6 +4188,7 @@ static int perf_copy_attr(struct perf_counter_attr __user *uattr, + if (val) + goto err_size; + } ++ size = sizeof(*attr); + } + + ret = copy_from_user(attr, uattr, size); +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 7e595ce..46c101a 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -97,7 +97,7 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev) + dev->bss_generation++; + } + +-static u8 *find_ie(u8 num, u8 *ies, size_t len) ++static u8 *find_ie(u8 num, u8 *ies, int len) + { + while (len > 2 && ies[0] != num) { + len -= ies[1] + 2; +diff --git a/sound/pci/cs46xx/cs46xx_lib.h b/sound/pci/cs46xx/cs46xx_lib.h +index 4eb55aa..b518949 100644 +--- a/sound/pci/cs46xx/cs46xx_lib.h ++++ b/sound/pci/cs46xx/cs46xx_lib.h +@@ -35,7 +35,7 @@ + + + #ifdef CONFIG_SND_CS46XX_NEW_DSP +-#define CS46XX_MIN_PERIOD_SIZE 1 ++#define CS46XX_MIN_PERIOD_SIZE 64 + #define CS46XX_MAX_PERIOD_SIZE 1024*1024 + #else + #define CS46XX_MIN_PERIOD_SIZE 2048 +diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c +index c1eb923..09b2b2a 100644 +--- a/sound/pci/oxygen/oxygen_io.c ++++ b/sound/pci/oxygen/oxygen_io.c +@@ -215,17 +215,8 @@ EXPORT_SYMBOL(oxygen_write_spi); + + void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data) + { +- unsigned long timeout; +- + /* should not need more than about 300 us */ +- timeout = jiffies + msecs_to_jiffies(1); +- do { +- if (!(oxygen_read16(chip, OXYGEN_2WIRE_BUS_STATUS) +- & OXYGEN_2WIRE_BUSY)) +- break; +- udelay(1); +- cond_resched(); +- } while (time_after_eq(timeout, jiffies)); ++ msleep(1); + + oxygen_write8(chip, OXYGEN_2WIRE_MAP, map); + oxygen_write8(chip, OXYGEN_2WIRE_DATA, data); +diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c +index e7348d3..b3a2de8 100644 +--- a/sound/soc/codecs/wm8350.c ++++ b/sound/soc/codecs/wm8350.c +@@ -613,7 +613,7 @@ SOC_DAPM_SINGLE("Switch", WM8350_BEEP_VOLUME, 15, 1, 1); + + /* Out4 Capture Mux */ + static const struct snd_kcontrol_new wm8350_out4_capture_controls = +-SOC_DAPM_ENUM("Route", wm8350_enum[8]); ++SOC_DAPM_ENUM("Route", wm8350_enum[7]); + + static const struct snd_soc_dapm_widget wm8350_dapm_widgets[] = { + +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c +index b4b06c7..e874ad4 100644 +--- a/tools/perf/builtin-stat.c ++++ b/tools/perf/builtin-stat.c +@@ -82,19 +82,32 @@ static u64 runtime_cycles[MAX_RUN]; + static u64 event_res[MAX_RUN][MAX_COUNTERS][3]; + static u64 event_scaled[MAX_RUN][MAX_COUNTERS]; + +-static u64 event_res_avg[MAX_COUNTERS][3]; +-static u64 event_res_noise[MAX_COUNTERS][3]; ++struct stats ++{ ++ double sum; ++ double sum_sq; ++}; + +-static u64 event_scaled_avg[MAX_COUNTERS]; ++static double avg_stats(struct stats *stats) ++{ ++ return stats->sum / run_count; ++} + +-static u64 runtime_nsecs_avg; +-static u64 runtime_nsecs_noise; ++/* ++ * stddev = sqrt(1/N (\Sum n_i^2) - avg(n)^2) ++ */ ++static double stddev_stats(struct stats *stats) ++{ ++ double avg = stats->sum / run_count; + +-static u64 walltime_nsecs_avg; +-static u64 walltime_nsecs_noise; ++ return sqrt(stats->sum_sq/run_count - avg*avg); ++} + +-static u64 runtime_cycles_avg; +-static u64 runtime_cycles_noise; ++struct stats event_res_stats[MAX_COUNTERS][3]; ++struct stats event_scaled_stats[MAX_COUNTERS]; ++struct stats runtime_nsecs_stats; ++struct stats walltime_nsecs_stats; ++struct stats runtime_cycles_stats; + + #define MATCH_EVENT(t, c, counter) \ + (attrs[counter].type == PERF_TYPE_##t && \ +@@ -278,42 +291,37 @@ static int run_perf_stat(int argc __used, const char **argv) + return WEXITSTATUS(status); + } + +-static void print_noise(u64 *count, u64 *noise) ++static void print_noise(double avg, double stddev) + { + if (run_count > 1) +- fprintf(stderr, " ( +- %7.3f%% )", +- (double)noise[0]/(count[0]+1)*100.0); ++ fprintf(stderr, " ( +- %7.3f%% )", 100*stddev / avg); + } + +-static void nsec_printout(int counter, u64 *count, u64 *noise) ++static void nsec_printout(int counter, double avg, double stddev) + { +- double msecs = (double)count[0] / 1000000; ++ double msecs = avg / 1e6; + + fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter)); + + if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) { +- if (walltime_nsecs_avg) +- fprintf(stderr, " # %10.3f CPUs ", +- (double)count[0] / (double)walltime_nsecs_avg); ++ fprintf(stderr, " # %10.3f CPUs ", ++ avg / avg_stats(&walltime_nsecs_stats)); + } +- print_noise(count, noise); ++ print_noise(avg, stddev); + } + +-static void abs_printout(int counter, u64 *count, u64 *noise) ++static void abs_printout(int counter, double avg, double stddev) + { +- fprintf(stderr, " %14Ld %-24s", count[0], event_name(counter)); ++ fprintf(stderr, " %14.0f %-24s", avg, event_name(counter)); + +- if (runtime_cycles_avg && +- MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) { ++ if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) { + fprintf(stderr, " # %10.3f IPC ", +- (double)count[0] / (double)runtime_cycles_avg); ++ avg / avg_stats(&runtime_cycles_stats)); + } else { +- if (runtime_nsecs_avg) { +- fprintf(stderr, " # %10.3f M/sec", +- (double)count[0]/runtime_nsecs_avg*1000.0); +- } ++ fprintf(stderr, " # %10.3f M/sec", ++ 1000.0 * avg / avg_stats(&runtime_nsecs_stats)); + } +- print_noise(count, noise); ++ print_noise(avg, stddev); + } + + /* +@@ -321,12 +329,12 @@ static void abs_printout(int counter, u64 *count, u64 *noise) + */ + static void print_counter(int counter) + { +- u64 *count, *noise; ++ double avg, stddev; + int scaled; + +- count = event_res_avg[counter]; +- noise = event_res_noise[counter]; +- scaled = event_scaled_avg[counter]; ++ avg = avg_stats(&event_res_stats[counter][0]); ++ stddev = stddev_stats(&event_res_stats[counter][0]); ++ scaled = avg_stats(&event_scaled_stats[counter]); + + if (scaled == -1) { + fprintf(stderr, " %14s %-24s\n", +@@ -335,36 +343,34 @@ static void print_counter(int counter) + } + + if (nsec_counter(counter)) +- nsec_printout(counter, count, noise); ++ nsec_printout(counter, avg, stddev); + else +- abs_printout(counter, count, noise); ++ abs_printout(counter, avg, stddev); ++ ++ if (scaled) { ++ double avg_enabled, avg_running; ++ ++ avg_enabled = avg_stats(&event_res_stats[counter][1]); ++ avg_running = avg_stats(&event_res_stats[counter][2]); + +- if (scaled) + fprintf(stderr, " (scaled from %.2f%%)", +- (double) count[2] / count[1] * 100); ++ 100 * avg_running / avg_enabled); ++ } + + fprintf(stderr, "\n"); + } + +-/* +- * normalize_noise noise values down to stddev: +- */ +-static void normalize_noise(u64 *val) ++static void update_stats(const char *name, int idx, struct stats *stats, u64 *val) + { +- double res; ++ double sq = *val; + +- res = (double)*val / (run_count * sqrt((double)run_count)); +- +- *val = (u64)res; +-} +- +-static void update_avg(const char *name, int idx, u64 *avg, u64 *val) +-{ +- *avg += *val; ++ stats->sum += *val; ++ stats->sum_sq += sq * sq; + + if (verbose > 1) + fprintf(stderr, "debug: %20s[%d]: %Ld\n", name, idx, *val); + } ++ + /* + * Calculate the averages and noises: + */ +@@ -376,61 +382,22 @@ static void calc_avg(void) + fprintf(stderr, "\n"); + + for (i = 0; i < run_count; i++) { +- update_avg("runtime", 0, &runtime_nsecs_avg, runtime_nsecs + i); +- update_avg("walltime", 0, &walltime_nsecs_avg, walltime_nsecs + i); +- update_avg("runtime_cycles", 0, &runtime_cycles_avg, runtime_cycles + i); ++ update_stats("runtime", 0, &runtime_nsecs_stats, runtime_nsecs + i); ++ update_stats("walltime", 0, &walltime_nsecs_stats, walltime_nsecs + i); ++ update_stats("runtime_cycles", 0, &runtime_cycles_stats, runtime_cycles + i); + + for (j = 0; j < nr_counters; j++) { +- update_avg("counter/0", j, +- event_res_avg[j]+0, event_res[i][j]+0); +- update_avg("counter/1", j, +- event_res_avg[j]+1, event_res[i][j]+1); +- update_avg("counter/2", j, +- event_res_avg[j]+2, event_res[i][j]+2); ++ update_stats("counter/0", j, ++ event_res_stats[j]+0, event_res[i][j]+0); ++ update_stats("counter/1", j, ++ event_res_stats[j]+1, event_res[i][j]+1); ++ update_stats("counter/2", j, ++ event_res_stats[j]+2, event_res[i][j]+2); + if (event_scaled[i][j] != (u64)-1) +- update_avg("scaled", j, +- event_scaled_avg + j, event_scaled[i]+j); +- else +- event_scaled_avg[j] = -1; ++ update_stats("scaled", j, ++ event_scaled_stats + j, event_scaled[i]+j); + } + } +- runtime_nsecs_avg /= run_count; +- walltime_nsecs_avg /= run_count; +- runtime_cycles_avg /= run_count; +- +- for (j = 0; j < nr_counters; j++) { +- event_res_avg[j][0] /= run_count; +- event_res_avg[j][1] /= run_count; +- event_res_avg[j][2] /= run_count; +- } +- +- for (i = 0; i < run_count; i++) { +- runtime_nsecs_noise += +- abs((s64)(runtime_nsecs[i] - runtime_nsecs_avg)); +- walltime_nsecs_noise += +- abs((s64)(walltime_nsecs[i] - walltime_nsecs_avg)); +- runtime_cycles_noise += +- abs((s64)(runtime_cycles[i] - runtime_cycles_avg)); +- +- for (j = 0; j < nr_counters; j++) { +- event_res_noise[j][0] += +- abs((s64)(event_res[i][j][0] - event_res_avg[j][0])); +- event_res_noise[j][1] += +- abs((s64)(event_res[i][j][1] - event_res_avg[j][1])); +- event_res_noise[j][2] += +- abs((s64)(event_res[i][j][2] - event_res_avg[j][2])); +- } +- } +- +- normalize_noise(&runtime_nsecs_noise); +- normalize_noise(&walltime_nsecs_noise); +- normalize_noise(&runtime_cycles_noise); +- +- for (j = 0; j < nr_counters; j++) { +- normalize_noise(&event_res_noise[j][0]); +- normalize_noise(&event_res_noise[j][1]); +- normalize_noise(&event_res_noise[j][2]); +- } + } + + static void print_stat(int argc, const char **argv) +@@ -457,10 +424,11 @@ static void print_stat(int argc, const char **argv) + + fprintf(stderr, "\n"); + fprintf(stderr, " %14.9f seconds time elapsed", +- (double)walltime_nsecs_avg/1e9); ++ avg_stats(&walltime_nsecs_stats)/1e9); + if (run_count > 1) { + fprintf(stderr, " ( +- %7.3f%% )", +- 100.0*(double)walltime_nsecs_noise/(double)walltime_nsecs_avg); ++ 100*stddev_stats(&walltime_nsecs_stats) / ++ avg_stats(&walltime_nsecs_stats)); + } + fprintf(stderr, "\n\n"); + } +diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c +index 1150c6d..5f39805 100644 +--- a/virt/kvm/ioapic.c ++++ b/virt/kvm/ioapic.c +@@ -188,6 +188,8 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) + if ((edge && old_irr != ioapic->irr) || + (!edge && !entry.fields.remote_irr)) + ret = ioapic_service(ioapic, irq); ++ else ++ ret = 0; /* report coalesced interrupt */ + } + } + return ret; diff --git a/debian/patches/series/base b/debian/patches/series/base index 538d35fb9..94a0b82e7 100644 --- a/debian/patches/series/base +++ b/debian/patches/series/base @@ -36,3 +36,4 @@ + bugfix/all/drivers-gpu-drm-r128-ioctl-add-init-test.patch + bugfix/x86/fix-alternatives-on-486.patch + bugfix/x86/fix-i8xx-agp-flush.patch ++ bugfix/all/stable/2.6.31.1.patch