diff --git a/debian/changelog b/debian/changelog index a1dee76d3..e55d022e8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -20,6 +20,110 @@ linux-2.6 (2.6.29-4) UNRELEASED; urgency=low [ Stephen R. Marenka ] * [m68k] Add 2.6.29 patches. + [ Bastian Blank ] + * Add stable release 2.6.29.2: + - Bonding: fix zero address hole bug in arp_ip_target list + - skge: fix occasional BUG during MTU change + - scsi: mpt: suppress debugobjects warning + - hugetlbfs: return negative error code for bad mount option + - NFS: Fix the XDR iovec calculation in nfs3_xdr_setaclargs + - gso: Fix support for linear packets + - agp: zero pages before sending to userspace + - virtio: fix suspend when using virtio_balloon + - Revert "console ASCII glyph 1:1 mapping" + - Input: gameport - fix attach driver code + - x86, PAT: Remove page granularity tracking for vm_insert_pfn maps + - KVM: is_long_mode() should check for EFER.LMA + - KVM: VMX: Update necessary state when guest enters long mode + - KVM: fix kvm_vm_ioctl_deassign_device + - KVM: MMU: handle compound pages in kvm_is_mmio_pfn + - KVM: Reset PIT irq injection logic when the PIT IRQ is unmasked + - KVM: Interrupt mask notifiers for ioapic + - KVM: Add CONFIG_HAVE_KVM_IRQCHIP + - KVM: Fix missing smp tlb flush in invlpg + - USB: usb-storage: augment unusual_devs entry for Simple Tech/Datafab + - USB: fix oops in cdc-wdm in case of malformed descriptors + - USB: ftdi_sio: add vendor/project id for JETI specbos 1201 spectrometer + - usb gadget: fix ethernet link reports to ethtool + - x86: disable X86_PTRACE_BTS for now + - SCSI: sg: fix q->queue_lock on scsi_error_handler path + - SCSI: sg: avoid blk_put_request/blk_rq_unmap_user in interrupt + - SCSI: sg: fix races with ioctl(SG_IO) + - SCSI: sg: fix races during device removal + - mm: pass correct mm when growing stack + - pata_hpt37x: fix HPT370 DMA timeouts + - hpt366: fix HPT370 DMA timeouts + - powerpc: Fix data-corrupting bug in __futex_atomic_op + - ALSA: hda - Fix the cmd cache keys for amp verbs + - sfc: Match calls to netif_napi_add() and netif_napi_del() + - tty: Fix leak in ti-usb + - spi: spi_write_then_read() bugfixes + - add some long-missing capabilities to fs_mask + - hrtimer: fix rq->lock inversion (again) + - x86: fix broken irq migration logic while cleaning up multiple vectors + - sched: do not count frozen tasks toward load + - dm kcopyd: fix callback race + - dm kcopyd: prepare for callback race fix + - posix-timers: fix RLIMIT_CPU && setitimer(CPUCLOCK_PROF) + - posix-timers: fix RLIMIT_CPU && fork() + - posixtimers, sched: Fix posix clock monotonicity + - cap_prctl: don't set error to 0 at 'no_change' + - SCSI: libiscsi: fix iscsi pool error path + - SCSI: libiscsi: fix iscsi pool error path + - sparc64: Fix bug in ("sparc64: Flush TLB before releasing pages.") + - ALSA: hda - add missing comma in ad1884_slave_vols + - splice: fix deadlock in splicing to file + - netfilter: {ip, ip6, arp}_tables: fix incorrect loop detection + - kprobes: Fix locking imbalance in kretprobes + - acer-wmi: Blacklist Acer Aspire One + - crypto: shash - Fix unaligned calculation with short length + - net/netrom: Fix socket locking + - af_rose/x25: Sanity check the maximum user frame size + - dm table: fix upgrade mode race + - dm: path selector use module refcount directly + - dm target: use module refcount directly + - dm snapshot: avoid having two exceptions for the same chunk + - dm snapshot: avoid dropping lock in __find_pending_exception + - dm snapshot: refactor __find_pending_exception + - dm io: make sync_io uninterruptible + - dm raid1: switch read_record from kmalloc to slab to save memory + - vfs: skip I_CLEAR state inodes + - dm: preserve bi_io_vec when resubmitting bios + - ixgbe: Fix potential memory leak/driver panic issue while setting up Tx & + Rx ring parameters + - mm: do_xip_mapping_read: fix length calculation + - mm: define a UNIQUE value for AS_UNEVICTABLE flag + - sysctl: fix suid_dumpable and lease-break-time sysctls + - cpumask: fix slab corruption caused by alloc_cpumask_var_node() + - ide-atapi: start DMA after issuing a packet command + - ide: drivers/ide/ide-atapi.c needs + - V4L/DVB (10943): cx88: Prevent general protection fault on rmmod + - r8169: Reset IntrStatus after chip reset + - md/raid1 - don't assume newly allocated bvecs are initialised. + - SCSI: sg: fix iovec bugs introduced by the block layer conversion + - drm/i915: fix TV mode setting in property change + - drm/i915: only set TV mode when any property changed + - drm: Use pgprot_writecombine in GEM GTT mapping to get the right bits for + !PAT. + - drm/i915: check for -EINVAL from vm_insert_pfn + - drm/i915: Check for dev->primary->master before dereference. + - drm/i915: Sync crt hotplug detection with intel video driver + - drm/i915: Read the right SDVO register when detecting SVDO/HDMI. + - drm/i915: Change DCC tiling detection case to cover only mobile parts. + - dock: fix dereference after kfree() + - ACPI: cap off P-state transition latency from buggy BIOSes + - x86, setup: mark %esi as clobbered in E820 BIOS call + - tracing/core: fix early free of cpumasks + - rt2x00: Fix SLAB corruption during rmmod + - ext4: fix locking typo in mballoc which could cause soft lockup hangs + - ext4: fix typo which causes a memory leak on error path + - MIPS: Compat: Zero upper 32-bit of offset_high and offset_low. + - PCI/x86: detect host bridge config space size w/o using quirks + - ide: Fix code dealing with sleeping devices in do_ide_request() + - fbdev: fix info->lock deadlock in fbcon_event_notify() + - fbmem: fix fb_info->lock and mm->mmap_sem circular locking dependency + - security/smack: fix oops when setting a size 0 SMACK64 xattr + -- maximilian attems Fri, 17 Apr 2009 11:30:55 +0200 linux-2.6 (2.6.29-3) unstable; urgency=low diff --git a/debian/patches/bugfix/all/stable/2.6.29.2.patch b/debian/patches/bugfix/all/stable/2.6.29.2.patch new file mode 100644 index 000000000..2eb04588d --- /dev/null +++ b/debian/patches/bugfix/all/stable/2.6.29.2.patch @@ -0,0 +1,4593 @@ +diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt +index 5ede747..0876275 100644 +--- a/Documentation/networking/bonding.txt ++++ b/Documentation/networking/bonding.txt +@@ -1242,7 +1242,7 @@ monitoring is enabled, and vice-versa. + To add ARP targets: + # echo +192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target + # echo +192.168.0.101 > /sys/class/net/bond0/bonding/arp_ip_target +- NOTE: up to 10 target addresses may be specified. ++ NOTE: up to 16 target addresses may be specified. + + To remove an ARP target: + # echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target +diff --git a/Makefile b/Makefile +index cdb1133..0380c7e 100644 +diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig +index f833a0b..0a2d6b8 100644 +--- a/arch/ia64/kvm/Kconfig ++++ b/arch/ia64/kvm/Kconfig +@@ -4,6 +4,10 @@ + config HAVE_KVM + bool + ++config HAVE_KVM_IRQCHIP ++ bool ++ default y ++ + menuconfig VIRTUALIZATION + bool "Virtualization" + depends on HAVE_KVM || IA64 +diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c +index 1a86f84..5abcc7f 100644 +--- a/arch/mips/kernel/linux32.c ++++ b/arch/mips/kernel/linux32.c +@@ -134,9 +134,9 @@ SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy, + return sys_ftruncate(fd, merge_64(a2, a3)); + } + +-SYSCALL_DEFINE5(32_llseek, unsigned long, fd, unsigned long, offset_high, +- unsigned long, offset_low, loff_t __user *, result, +- unsigned long, origin) ++SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high, ++ unsigned int, offset_low, loff_t __user *, result, ++ unsigned int, origin) + { + return sys_llseek(fd, offset_high, offset_low, result, origin); + } +diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h +index 6d406c5..9696cc3 100644 +--- a/arch/powerpc/include/asm/futex.h ++++ b/arch/powerpc/include/asm/futex.h +@@ -27,7 +27,7 @@ + PPC_LONG "1b,4b,2b,4b\n" \ + ".previous" \ + : "=&r" (oldval), "=&r" (ret) \ +- : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ ++ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ + : "cr0", "memory") + + static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +@@ -47,19 +47,19 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) + + switch (op) { + case FUTEX_OP_SET: +- __futex_atomic_op("", ret, oldval, uaddr, oparg); ++ __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: +- __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg); ++ __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: +- __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg); ++ __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: +- __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg); ++ __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_XOR: +- __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg); ++ __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; +diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig +index 6dbdc48..03becdf 100644 +--- a/arch/powerpc/kvm/Kconfig ++++ b/arch/powerpc/kvm/Kconfig +@@ -2,6 +2,9 @@ + # KVM configuration + # + ++config HAVE_KVM_IRQCHIP ++ bool ++ + menuconfig VIRTUALIZATION + bool "Virtualization" + ---help--- +diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig +index e051cad..3e260b7 100644 +--- a/arch/s390/kvm/Kconfig ++++ b/arch/s390/kvm/Kconfig +@@ -4,6 +4,9 @@ + config HAVE_KVM + bool + ++config HAVE_KVM_IRQCHIP ++ bool ++ + menuconfig VIRTUALIZATION + bool "Virtualization" + default y +diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h +index 0aaa086..ee38e73 100644 +--- a/arch/sparc/include/asm/tlb_64.h ++++ b/arch/sparc/include/asm/tlb_64.h +@@ -57,9 +57,9 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned i + + static inline void tlb_flush_mmu(struct mmu_gather *mp) + { ++ if (!mp->fullmm) ++ flush_tlb_pending(); + if (mp->need_flush) { +- if (!mp->fullmm) +- flush_tlb_pending(); + free_pages_and_swap_cache(mp->pages, mp->pages_nr); + mp->pages_nr = 0; + mp->need_flush = 0; +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu +index c98d52e..6ed3aca 100644 +--- a/arch/x86/Kconfig.cpu ++++ b/arch/x86/Kconfig.cpu +@@ -523,6 +523,7 @@ config X86_PTRACE_BTS + bool "Branch Trace Store" + default y + depends on X86_DEBUGCTLMSR ++ depends on BROKEN + help + This adds a ptrace interface to the hardware's branch trace store. + +diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c +index 8c3c25f..a99dbbe 100644 +--- a/arch/x86/boot/memory.c ++++ b/arch/x86/boot/memory.c +@@ -27,13 +27,14 @@ static int detect_memory_e820(void) + do { + size = sizeof(struct e820entry); + +- /* Important: %edx is clobbered by some BIOSes, +- so it must be either used for the error output ++ /* Important: %edx and %esi are clobbered by some BIOSes, ++ so they must be either used for the error output + or explicitly marked clobbered. */ + asm("int $0x15; setc %0" + : "=d" (err), "+b" (next), "=a" (id), "+c" (size), + "=m" (*desc) +- : "D" (desc), "d" (SMAP), "a" (0xe820)); ++ : "D" (desc), "d" (SMAP), "a" (0xe820) ++ : "esi"); + + /* BIOSes which terminate the chain with CF = 1 as opposed + to %ebx = 0 don't always report the SMAP signature on +diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +index 4b1c319..89c676d 100644 +--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c ++++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +@@ -680,6 +680,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) + perf->states[i].transition_latency * 1000; + } + ++ /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ ++ if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && ++ policy->cpuinfo.transition_latency > 20 * 1000) { ++ static int print_once; ++ policy->cpuinfo.transition_latency = 20 * 1000; ++ if (!print_once) { ++ print_once = 1; ++ printk(KERN_INFO "Capping off P-state tranision latency" ++ " at 20 uS\n"); ++ } ++ } ++ + data->max_freq = perf->states[0].core_frequency * 1000; + /* table init */ + for (i=0; istate_count; i++) { +diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c +index bc7ac4d..7086b24 100644 +--- a/arch/x86/kernel/io_apic.c ++++ b/arch/x86/kernel/io_apic.c +@@ -2475,6 +2475,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) + me = smp_processor_id(); + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { + unsigned int irq; ++ unsigned int irr; + struct irq_desc *desc; + struct irq_cfg *cfg; + irq = __get_cpu_var(vector_irq)[vector]; +@@ -2494,6 +2495,18 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) + goto unlock; + ++ irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); ++ /* ++ * Check if the vector that needs to be cleanedup is ++ * registered at the cpu's IRR. If so, then this is not ++ * the best time to clean it up. Lets clean it up in the ++ * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR ++ * to myself. ++ */ ++ if (irr & (1 << (vector % 32))) { ++ send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); ++ goto unlock; ++ } + __get_cpu_var(vector_irq)[vector] = -1; + cfg->move_cleanup_count--; + unlock: +diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig +index b81125f..0a303c3 100644 +--- a/arch/x86/kvm/Kconfig ++++ b/arch/x86/kvm/Kconfig +@@ -4,6 +4,10 @@ + config HAVE_KVM + bool + ++config HAVE_KVM_IRQCHIP ++ bool ++ default y ++ + menuconfig VIRTUALIZATION + bool "Virtualization" + depends on HAVE_KVM || X86 +diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c +index 72bd275..3dceaef 100644 +--- a/arch/x86/kvm/i8254.c ++++ b/arch/x86/kvm/i8254.c +@@ -536,6 +536,16 @@ void kvm_pit_reset(struct kvm_pit *pit) + pit->pit_state.irq_ack = 1; + } + ++static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) ++{ ++ struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); ++ ++ if (!mask) { ++ atomic_set(&pit->pit_state.pit_timer.pending, 0); ++ pit->pit_state.irq_ack = 1; ++ } ++} ++ + struct kvm_pit *kvm_create_pit(struct kvm *kvm) + { + struct kvm_pit *pit; +@@ -584,6 +594,9 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) + + kvm_pit_reset(pit); + ++ pit->mask_notifier.func = pit_mask_notifer; ++ kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); ++ + return pit; + } + +@@ -592,6 +605,8 @@ void kvm_free_pit(struct kvm *kvm) + struct hrtimer *timer; + + if (kvm->arch.vpit) { ++ kvm_unregister_irq_mask_notifier(kvm, 0, ++ &kvm->arch.vpit->mask_notifier); + mutex_lock(&kvm->arch.vpit->pit_state.lock); + timer = &kvm->arch.vpit->pit_state.pit_timer.timer; + hrtimer_cancel(timer); +diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h +index 4178022..0dfb936 100644 +--- a/arch/x86/kvm/i8254.h ++++ b/arch/x86/kvm/i8254.h +@@ -45,6 +45,7 @@ struct kvm_pit { + struct kvm *kvm; + struct kvm_kpit_state pit_state; + int irq_source_id; ++ struct kvm_irq_mask_notifier mask_notifier; + }; + + #define KVM_PIT_BASE_ADDRESS 0x40 +diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h +index 258e5d5..eaab214 100644 +--- a/arch/x86/kvm/mmu.h ++++ b/arch/x86/kvm/mmu.h +@@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) + static inline int is_long_mode(struct kvm_vcpu *vcpu) + { + #ifdef CONFIG_X86_64 +- return vcpu->arch.shadow_efer & EFER_LME; ++ return vcpu->arch.shadow_efer & EFER_LMA; + #else + return 0; + #endif +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h +index c95a67d..89addbd 100644 +--- a/arch/x86/kvm/paging_tmpl.h ++++ b/arch/x86/kvm/paging_tmpl.h +@@ -476,16 +476,20 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, + if (level == PT_PAGE_TABLE_LEVEL || + ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { + struct kvm_mmu_page *sp = page_header(__pa(sptep)); ++ int need_flush = 0; + + sw->pte_gpa = (sp->gfn << PAGE_SHIFT); + sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + + if (is_shadow_present_pte(*sptep)) { ++ need_flush = 1; + rmap_remove(vcpu->kvm, sptep); + if (is_large_pte(*sptep)) + --vcpu->kvm->stat.lpages; + } + set_shadow_pte(sptep, shadow_trap_nonpresent_pte); ++ if (need_flush) ++ kvm_flush_remote_tlbs(vcpu->kvm); + return 1; + } + if (!is_shadow_present_pte(*sptep)) +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 90de444..898910c 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1433,6 +1433,29 @@ continue_rmode: + init_rmode(vcpu->kvm); + } + ++static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) ++{ ++ struct vcpu_vmx *vmx = to_vmx(vcpu); ++ struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); ++ ++ vcpu->arch.shadow_efer = efer; ++ if (!msr) ++ return; ++ if (efer & EFER_LMA) { ++ vmcs_write32(VM_ENTRY_CONTROLS, ++ vmcs_read32(VM_ENTRY_CONTROLS) | ++ VM_ENTRY_IA32E_MODE); ++ msr->data = efer; ++ } else { ++ vmcs_write32(VM_ENTRY_CONTROLS, ++ vmcs_read32(VM_ENTRY_CONTROLS) & ++ ~VM_ENTRY_IA32E_MODE); ++ ++ msr->data = efer & ~EFER_LME; ++ } ++ setup_msrs(vmx); ++} ++ + #ifdef CONFIG_X86_64 + + static void enter_lmode(struct kvm_vcpu *vcpu) +@@ -1447,13 +1470,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu) + (guest_tr_ar & ~AR_TYPE_MASK) + | AR_TYPE_BUSY_64_TSS); + } +- + vcpu->arch.shadow_efer |= EFER_LMA; +- +- find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; +- vmcs_write32(VM_ENTRY_CONTROLS, +- vmcs_read32(VM_ENTRY_CONTROLS) +- | VM_ENTRY_IA32E_MODE); ++ vmx_set_efer(vcpu, vcpu->arch.shadow_efer); + } + + static void exit_lmode(struct kvm_vcpu *vcpu) +@@ -1612,30 +1630,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) + vmcs_writel(GUEST_CR4, hw_cr4); + } + +-static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) +-{ +- struct vcpu_vmx *vmx = to_vmx(vcpu); +- struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); +- +- vcpu->arch.shadow_efer = efer; +- if (!msr) +- return; +- if (efer & EFER_LMA) { +- vmcs_write32(VM_ENTRY_CONTROLS, +- vmcs_read32(VM_ENTRY_CONTROLS) | +- VM_ENTRY_IA32E_MODE); +- msr->data = efer; +- +- } else { +- vmcs_write32(VM_ENTRY_CONTROLS, +- vmcs_read32(VM_ENTRY_CONTROLS) & +- ~VM_ENTRY_IA32E_MODE); +- +- msr->data = efer & ~EFER_LME; +- } +- setup_msrs(vmx); +-} +- + static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) + { + struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c +index 21bc1f7..441489c 100644 +--- a/arch/x86/mm/pat.c ++++ b/arch/x86/mm/pat.c +@@ -713,29 +713,28 @@ static void free_pfn_range(u64 paddr, unsigned long size) + * + * If the vma has a linear pfn mapping for the entire range, we get the prot + * from pte and reserve the entire vma range with single reserve_pfn_range call. +- * Otherwise, we reserve the entire vma range, my ging through the PTEs page +- * by page to get physical address and protection. + */ + int track_pfn_vma_copy(struct vm_area_struct *vma) + { +- int retval = 0; +- unsigned long i, j; + resource_size_t paddr; + unsigned long prot; +- unsigned long vma_start = vma->vm_start; +- unsigned long vma_end = vma->vm_end; +- unsigned long vma_size = vma_end - vma_start; ++ unsigned long vma_size = vma->vm_end - vma->vm_start; + pgprot_t pgprot; + + if (!pat_enabled) + return 0; + ++ /* ++ * For now, only handle remap_pfn_range() vmas where ++ * is_linear_pfn_mapping() == TRUE. Handling of ++ * vm_insert_pfn() is TBD. ++ */ + if (is_linear_pfn_mapping(vma)) { + /* + * reserve the whole chunk covered by vma. We need the + * starting address and protection from pte. + */ +- if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { ++ if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { + WARN_ON_ONCE(1); + return -EINVAL; + } +@@ -743,28 +742,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) + return reserve_pfn_range(paddr, vma_size, &pgprot, 1); + } + +- /* reserve entire vma page by page, using pfn and prot from pte */ +- for (i = 0; i < vma_size; i += PAGE_SIZE) { +- if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) +- continue; +- +- pgprot = __pgprot(prot); +- retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1); +- if (retval) +- goto cleanup_ret; +- } + return 0; +- +-cleanup_ret: +- /* Reserve error: Cleanup partial reservation and return error */ +- for (j = 0; j < i; j += PAGE_SIZE) { +- if (follow_phys(vma, vma_start + j, 0, &prot, &paddr)) +- continue; +- +- free_pfn_range(paddr, PAGE_SIZE); +- } +- +- return retval; + } + + /* +@@ -774,50 +752,28 @@ cleanup_ret: + * prot is passed in as a parameter for the new mapping. If the vma has a + * linear pfn mapping for the entire range reserve the entire vma range with + * single reserve_pfn_range call. +- * Otherwise, we look t the pfn and size and reserve only the specified range +- * page by page. +- * +- * Note that this function can be called with caller trying to map only a +- * subrange/page inside the vma. + */ + int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long size) + { +- int retval = 0; +- unsigned long i, j; +- resource_size_t base_paddr; + resource_size_t paddr; +- unsigned long vma_start = vma->vm_start; +- unsigned long vma_end = vma->vm_end; +- unsigned long vma_size = vma_end - vma_start; ++ unsigned long vma_size = vma->vm_end - vma->vm_start; + + if (!pat_enabled) + return 0; + ++ /* ++ * For now, only handle remap_pfn_range() vmas where ++ * is_linear_pfn_mapping() == TRUE. Handling of ++ * vm_insert_pfn() is TBD. ++ */ + if (is_linear_pfn_mapping(vma)) { + /* reserve the whole chunk starting from vm_pgoff */ + paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; + return reserve_pfn_range(paddr, vma_size, prot, 0); + } + +- /* reserve page by page using pfn and size */ +- base_paddr = (resource_size_t)pfn << PAGE_SHIFT; +- for (i = 0; i < size; i += PAGE_SIZE) { +- paddr = base_paddr + i; +- retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0); +- if (retval) +- goto cleanup_ret; +- } + return 0; +- +-cleanup_ret: +- /* Reserve error: Cleanup partial reservation and return error */ +- for (j = 0; j < i; j += PAGE_SIZE) { +- paddr = base_paddr + j; +- free_pfn_range(paddr, PAGE_SIZE); +- } +- +- return retval; + } + + /* +@@ -828,39 +784,23 @@ cleanup_ret: + void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size) + { +- unsigned long i; + resource_size_t paddr; +- unsigned long prot; +- unsigned long vma_start = vma->vm_start; +- unsigned long vma_end = vma->vm_end; +- unsigned long vma_size = vma_end - vma_start; ++ unsigned long vma_size = vma->vm_end - vma->vm_start; + + if (!pat_enabled) + return; + ++ /* ++ * For now, only handle remap_pfn_range() vmas where ++ * is_linear_pfn_mapping() == TRUE. Handling of ++ * vm_insert_pfn() is TBD. ++ */ + if (is_linear_pfn_mapping(vma)) { + /* free the whole chunk starting from vm_pgoff */ + paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; + free_pfn_range(paddr, vma_size); + return; + } +- +- if (size != 0 && size != vma_size) { +- /* free page by page, using pfn and size */ +- paddr = (resource_size_t)pfn << PAGE_SHIFT; +- for (i = 0; i < size; i += PAGE_SIZE) { +- paddr = paddr + i; +- free_pfn_range(paddr, PAGE_SIZE); +- } +- } else { +- /* free entire vma, page by page, using the pfn from pte */ +- for (i = 0; i < vma_size; i += PAGE_SIZE) { +- if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) +- continue; +- +- free_pfn_range(paddr, PAGE_SIZE); +- } +- } + } + + pgprot_t pgprot_writecombine(pgprot_t prot) +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c +index 7d388d5..096b0ed 100644 +--- a/arch/x86/pci/fixup.c ++++ b/arch/x86/pci/fixup.c +@@ -495,26 +495,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, + pci_siemens_interrupt_controller); + + /* +- * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have +- * 4096 bytes configuration space for each function of their processor +- * configuration space. +- */ +-static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev) +-{ +- dev->cfg_size = pci_cfg_space_size_ext(dev); +-} +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size); +- +-/* + * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from + * confusing the PCI engine: + */ +diff --git a/crypto/shash.c b/crypto/shash.c +index d5a2b61..6792a67 100644 +--- a/crypto/shash.c ++++ b/crypto/shash.c +@@ -82,6 +82,9 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, + u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] + __attribute__ ((aligned)); + ++ if (unaligned_len > len) ++ unaligned_len = len; ++ + memcpy(buf, data, unaligned_len); + + return shash->update(desc, buf, unaligned_len) ?: +diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c +index 35094f2..8f62fa0 100644 +--- a/drivers/acpi/dock.c ++++ b/drivers/acpi/dock.c +@@ -1146,9 +1146,10 @@ static int __init dock_init(void) + static void __exit dock_exit(void) + { + struct dock_station *dock_station; ++ struct dock_station *tmp; + + unregister_acpi_bus_notifier(&dock_acpi_notifier); +- list_for_each_entry(dock_station, &dock_stations, sibiling) ++ list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) + dock_remove(dock_station); + } + +diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c +index 4216399..233a5fd 100644 +--- a/drivers/ata/pata_hpt37x.c ++++ b/drivers/ata/pata_hpt37x.c +@@ -8,7 +8,7 @@ + * Copyright (C) 1999-2003 Andre Hedrick + * Portions Copyright (C) 2001 Sun Microsystems, Inc. + * Portions Copyright (C) 2003 Red Hat Inc +- * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. ++ * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. + * + * TODO + * Look into engine reset on timeout errors. Should not be required. +@@ -24,7 +24,7 @@ + #include + + #define DRV_NAME "pata_hpt37x" +-#define DRV_VERSION "0.6.11" ++#define DRV_VERSION "0.6.12" + + struct hpt_clock { + u8 xfer_speed; +@@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) + } + + /** +- * hpt370_bmdma_start - DMA engine begin +- * @qc: ATA command +- * +- * The 370 and 370A want us to reset the DMA engine each time we +- * use it. The 372 and later are fine. +- */ +- +-static void hpt370_bmdma_start(struct ata_queued_cmd *qc) +-{ +- struct ata_port *ap = qc->ap; +- struct pci_dev *pdev = to_pci_dev(ap->host->dev); +- pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); +- udelay(10); +- ata_bmdma_start(qc); +-} +- +-/** + * hpt370_bmdma_end - DMA engine stop + * @qc: ATA command + * +@@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_sht = { + static struct ata_port_operations hpt370_port_ops = { + .inherits = &ata_bmdma_port_ops, + +- .bmdma_start = hpt370_bmdma_start, + .bmdma_stop = hpt370_bmdma_stop, + + .mode_filter = hpt370_filter, +diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c +index 10d6cbd..2224b76 100644 +--- a/drivers/char/agp/generic.c ++++ b/drivers/char/agp/generic.c +@@ -1226,7 +1226,7 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m + int i, ret = -ENOMEM; + + for (i = 0; i < num_pages; i++) { +- page = alloc_page(GFP_KERNEL | GFP_DMA32); ++ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + /* agp_free_memory() needs gart address */ + if (page == NULL) + goto out; +@@ -1257,7 +1257,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge) + { + struct page * page; + +- page = alloc_page(GFP_KERNEL | GFP_DMA32); ++ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + if (page == NULL) + return NULL; + +diff --git a/drivers/char/vt.c b/drivers/char/vt.c +index 7900bd6..60453ab 100644 +--- a/drivers/char/vt.c ++++ b/drivers/char/vt.c +@@ -2271,7 +2271,7 @@ rescan_last_byte: + continue; /* nothing to display */ + } + /* Glyph not found */ +- if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) { ++ if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) { + /* In legacy mode use the glyph we get by a 1:1 mapping. + This would make absolutely no sense with Unicode in mind, + but do this for ASCII characters since a font may lack +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index 88d3368..7ee1ce1 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -505,7 +505,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) + struct drm_map *map = NULL; + struct drm_gem_object *obj; + struct drm_hash_item *hash; +- unsigned long prot; + int ret = 0; + + mutex_lock(&dev->struct_mutex); +@@ -538,11 +537,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) + vma->vm_ops = obj->dev->driver->gem_vm_ops; + vma->vm_private_data = map->handle; + /* FIXME: use pgprot_writecombine when available */ +- prot = pgprot_val(vma->vm_page_prot); +-#ifdef CONFIG_X86 +- prot |= _PAGE_CACHE_WC; +-#endif +- vma->vm_page_prot = __pgprot(prot); ++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + /* Take a ref for this mapping of the object, so that the fault + * handler can dereference the mmap offset's pointer to the object. +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index 6d21b9e..908d24e 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -41,7 +41,6 @@ + int i915_wait_ring(struct drm_device * dev, int n, const char *caller) + { + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + drm_i915_ring_buffer_t *ring = &(dev_priv->ring); + u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; + u32 last_acthd = I915_READ(acthd_reg); +@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) + if (ring->space >= n) + return 0; + +- if (master_priv->sarea_priv) +- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; ++ if (dev->primary->master) { ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; ++ } ++ + + if (ring->head != last_head) + i = 0; +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 37427e4..fb6390a 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -603,6 +603,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + case -EAGAIN: + return VM_FAULT_OOM; + case -EFAULT: ++ case -EINVAL: + return VM_FAULT_SIGBUS; + default: + return VM_FAULT_NOPAGE; +diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c +index 7fb4191..4cce1ae 100644 +--- a/drivers/gpu/drm/i915/i915_gem_tiling.c ++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c +@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) + */ + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; +- } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || +- IS_GM45(dev)) { ++ } else if (IS_MOBILE(dev)) { + uint32_t dcc; + +- /* On 915-945 and GM965, channel interleave by the CPU is +- * determined by DCC. The CPU will alternate based on bit 6 +- * in interleaved mode, and the GPU will then also alternate +- * on bit 6, 9, and 10 for X, but the CPU may also optionally +- * alternate based on bit 17 (XOR not disabled and XOR +- * bit == 17). ++ /* On mobile 9xx chipsets, channel interleave by the CPU is ++ * determined by DCC. For single-channel, neither the CPU ++ * nor the GPU do swizzling. For dual channel interleaved, ++ * the GPU's interleave is bit 9 and 10 for X tiled, and bit ++ * 9 for Y tiled. The CPU's interleave is independent, and ++ * can be based on either bit 11 (haven't seen this yet) or ++ * bit 17 (common). + */ + dcc = I915_READ(DCC); + switch (dcc & DCC_ADDRESSING_MODE_MASK) { +@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + break; + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: +- if (IS_I915G(dev) || IS_I915GM(dev) || +- dcc & DCC_CHANNEL_XOR_DISABLE) { ++ if (dcc & DCC_CHANNEL_XOR_DISABLE) { ++ /* This is the base swizzling by the GPU for ++ * tiled buffers. ++ */ + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; +- } else if ((IS_I965GM(dev) || IS_GM45(dev)) && +- (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { +- /* GM965/GM45 does either bit 11 or bit 17 +- * swizzling. +- */ ++ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { ++ /* Bit 11 swizzling by the CPU in addition. */ + swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; + swizzle_y = I915_BIT_6_SWIZZLE_9_11; + } else { +- /* Bit 17 or perhaps other swizzling */ ++ /* Bit 17 swizzling by the CPU in addition. */ + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + } +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 90600d8..cc2938d 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -629,6 +629,22 @@ + #define TV_HOTPLUG_INT_EN (1 << 18) + #define CRT_HOTPLUG_INT_EN (1 << 9) + #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) ++#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) ++/* must use period 64 on GM45 according to docs */ ++#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) ++#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) ++#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) ++#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) ++#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) ++#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) ++#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) ++#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) ++#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) ++#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) ++#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) ++#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) ++#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ ++ + + #define PORT_HOTPLUG_STAT 0x61114 + #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c +index dcaed34..61c108e 100644 +--- a/drivers/gpu/drm/i915/intel_crt.c ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -133,20 +133,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- u32 temp; +- +- unsigned long timeout = jiffies + msecs_to_jiffies(1000); +- +- temp = I915_READ(PORT_HOTPLUG_EN); +- +- I915_WRITE(PORT_HOTPLUG_EN, +- temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5)); ++ u32 hotplug_en; ++ int i, tries = 0; ++ /* ++ * On 4 series desktop, CRT detect sequence need to be done twice ++ * to get a reliable result. ++ */ + +- do { +- if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) +- break; +- msleep(1); +- } while (time_after(timeout, jiffies)); ++ if (IS_G4X(dev) && !IS_GM45(dev)) ++ tries = 2; ++ else ++ tries = 1; ++ hotplug_en = I915_READ(PORT_HOTPLUG_EN); ++ hotplug_en &= ~(CRT_HOTPLUG_MASK); ++ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; ++ ++ if (IS_GM45(dev)) ++ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; ++ ++ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; ++ ++ for (i = 0; i < tries ; i++) { ++ unsigned long timeout; ++ /* turn on the FORCE_DETECT */ ++ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); ++ timeout = jiffies + msecs_to_jiffies(1000); ++ /* wait for FORCE_DETECT to go off */ ++ do { ++ if (!(I915_READ(PORT_HOTPLUG_EN) & ++ CRT_HOTPLUG_FORCE_DETECT)) ++ break; ++ msleep(1); ++ } while (time_after(timeout, jiffies)); ++ } + + if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == + CRT_HOTPLUG_MONITOR_COLOR) +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index a283427..601a76f 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -1474,13 +1474,21 @@ static void intel_setup_outputs(struct drm_device *dev) + + if (IS_I9XX(dev)) { + int found; ++ u32 reg; + + if (I915_READ(SDVOB) & SDVO_DETECTED) { + found = intel_sdvo_init(dev, SDVOB); + if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) + intel_hdmi_init(dev, SDVOB); + } +- if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { ++ ++ /* Before G4X SDVOC doesn't have its own detect register */ ++ if (IS_G4X(dev)) ++ reg = SDVOC; ++ else ++ reg = SDVOB; ++ ++ if (I915_READ(reg) & SDVO_DETECTED) { + found = intel_sdvo_init(dev, SDVOC); + if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) + intel_hdmi_init(dev, SDVOC); +diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c +index 56485d6..b05cb67 100644 +--- a/drivers/gpu/drm/i915/intel_tv.c ++++ b/drivers/gpu/drm/i915/intel_tv.c +@@ -1558,33 +1558,49 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop + struct drm_device *dev = connector->dev; + struct intel_output *intel_output = to_intel_output(connector); + struct intel_tv_priv *tv_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_crtc *crtc = encoder->crtc; + int ret = 0; ++ bool changed = false; + + ret = drm_connector_property_set_value(connector, property, val); + if (ret < 0) + goto out; + +- if (property == dev->mode_config.tv_left_margin_property) ++ if (property == dev->mode_config.tv_left_margin_property && ++ tv_priv->margin[TV_MARGIN_LEFT] != val) { + tv_priv->margin[TV_MARGIN_LEFT] = val; +- else if (property == dev->mode_config.tv_right_margin_property) ++ changed = true; ++ } else if (property == dev->mode_config.tv_right_margin_property && ++ tv_priv->margin[TV_MARGIN_RIGHT] != val) { + tv_priv->margin[TV_MARGIN_RIGHT] = val; +- else if (property == dev->mode_config.tv_top_margin_property) ++ changed = true; ++ } else if (property == dev->mode_config.tv_top_margin_property && ++ tv_priv->margin[TV_MARGIN_TOP] != val) { + tv_priv->margin[TV_MARGIN_TOP] = val; +- else if (property == dev->mode_config.tv_bottom_margin_property) ++ changed = true; ++ } else if (property == dev->mode_config.tv_bottom_margin_property && ++ tv_priv->margin[TV_MARGIN_BOTTOM] != val) { + tv_priv->margin[TV_MARGIN_BOTTOM] = val; +- else if (property == dev->mode_config.tv_mode_property) { ++ changed = true; ++ } else if (property == dev->mode_config.tv_mode_property) { + if (val >= NUM_TV_MODES) { + ret = -EINVAL; + goto out; + } ++ if (!strcmp(tv_priv->tv_format, tv_modes[val].name)) ++ goto out; ++ + tv_priv->tv_format = tv_modes[val].name; +- intel_tv_mode_set(&intel_output->enc, NULL, NULL); ++ changed = true; + } else { + ret = -EINVAL; + goto out; + } + +- intel_tv_mode_set(&intel_output->enc, NULL, NULL); ++ if (changed && crtc) ++ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, ++ crtc->y, crtc->fb); + out: + return ret; + } +diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c +index 3eb9b5c..5ff6962 100644 +--- a/drivers/ide/hpt366.c ++++ b/drivers/ide/hpt366.c +@@ -114,6 +114,8 @@ + * the register setting lists into the table indexed by the clock selected + * - set the correct hwif->ultra_mask for each individual chip + * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards ++ * - stop resetting HPT370's state machine before each DMA transfer as that has ++ * caused more harm than good + * Sergei Shtylyov, or + */ + +@@ -133,7 +135,7 @@ + #define DRV_NAME "hpt366" + + /* various tuning parameters */ +-#define HPT_RESET_STATE_ENGINE ++#undef HPT_RESET_STATE_ENGINE + #undef HPT_DELAY_INTERRUPT + + static const char *quirk_drives[] = { +diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c +index e9d042d..53a9e8d 100644 +--- a/drivers/ide/ide-atapi.c ++++ b/drivers/ide/ide-atapi.c +@@ -6,6 +6,8 @@ + #include + #include + #include ++#include ++ + #include + + #ifdef DEBUG +@@ -566,6 +568,10 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) + : ide_pc_intr), + timeout, expiry); + ++ /* Send the actual packet */ ++ if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) ++ hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); ++ + /* Begin DMA, if necessary */ + if (dev_is_idecd(drive)) { + if (drive->dma) +@@ -577,10 +583,6 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) + } + } + +- /* Send the actual packet */ +- if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) +- hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); +- + return ide_started; + } + +diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c +index a9a6c20..af70777 100644 +--- a/drivers/ide/ide-io.c ++++ b/drivers/ide/ide-io.c +@@ -736,11 +736,10 @@ repeat: + prev_port = hwif->host->cur_port; + hwif->rq = NULL; + +- if (drive->dev_flags & IDE_DFLAG_SLEEPING) { +- if (time_before(drive->sleep, jiffies)) { +- ide_unlock_port(hwif); +- goto plug_device; +- } ++ if (drive->dev_flags & IDE_DFLAG_SLEEPING && ++ time_after(drive->sleep, jiffies)) { ++ ide_unlock_port(hwif); ++ goto plug_device; + } + + if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && +diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c +index ebf4be5..2d175b5 100644 +--- a/drivers/input/gameport/gameport.c ++++ b/drivers/input/gameport/gameport.c +@@ -50,9 +50,8 @@ static LIST_HEAD(gameport_list); + + static struct bus_type gameport_bus; + +-static void gameport_add_driver(struct gameport_driver *drv); + static void gameport_add_port(struct gameport *gameport); +-static void gameport_destroy_port(struct gameport *gameport); ++static void gameport_attach_driver(struct gameport_driver *drv); + static void gameport_reconnect_port(struct gameport *gameport); + static void gameport_disconnect_port(struct gameport *gameport); + +@@ -230,7 +229,6 @@ static void gameport_find_driver(struct gameport *gameport) + + enum gameport_event_type { + GAMEPORT_REGISTER_PORT, +- GAMEPORT_REGISTER_DRIVER, + GAMEPORT_ATTACH_DRIVER, + }; + +@@ -374,8 +372,8 @@ static void gameport_handle_event(void) + gameport_add_port(event->object); + break; + +- case GAMEPORT_REGISTER_DRIVER: +- gameport_add_driver(event->object); ++ case GAMEPORT_ATTACH_DRIVER: ++ gameport_attach_driver(event->object); + break; + + default: +@@ -706,14 +704,14 @@ static int gameport_driver_remove(struct device *dev) + return 0; + } + +-static void gameport_add_driver(struct gameport_driver *drv) ++static void gameport_attach_driver(struct gameport_driver *drv) + { + int error; + +- error = driver_register(&drv->driver); ++ error = driver_attach(&drv->driver); + if (error) + printk(KERN_ERR +- "gameport: driver_register() failed for %s, error: %d\n", ++ "gameport: driver_attach() failed for %s, error: %d\n", + drv->driver.name, error); + } + +diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h +index d3ec217..3a8cfa2 100644 +--- a/drivers/md/dm-bio-record.h ++++ b/drivers/md/dm-bio-record.h +@@ -16,30 +16,56 @@ + * functions in this file help the target record and restore the + * original bio state. + */ ++ ++struct dm_bio_vec_details { ++#if PAGE_SIZE < 65536 ++ __u16 bv_len; ++ __u16 bv_offset; ++#else ++ unsigned bv_len; ++ unsigned bv_offset; ++#endif ++}; ++ + struct dm_bio_details { + sector_t bi_sector; + struct block_device *bi_bdev; + unsigned int bi_size; + unsigned short bi_idx; + unsigned long bi_flags; ++ struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES]; + }; + + static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) + { ++ unsigned i; ++ + bd->bi_sector = bio->bi_sector; + bd->bi_bdev = bio->bi_bdev; + bd->bi_size = bio->bi_size; + bd->bi_idx = bio->bi_idx; + bd->bi_flags = bio->bi_flags; ++ ++ for (i = 0; i < bio->bi_vcnt; i++) { ++ bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len; ++ bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset; ++ } + } + + static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) + { ++ unsigned i; ++ + bio->bi_sector = bd->bi_sector; + bio->bi_bdev = bd->bi_bdev; + bio->bi_size = bd->bi_size; + bio->bi_idx = bd->bi_idx; + bio->bi_flags = bd->bi_flags; ++ ++ for (i = 0; i < bio->bi_vcnt; i++) { ++ bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len; ++ bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset; ++ } + } + + #endif +diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c +index 36e2b5e..e73aabd 100644 +--- a/drivers/md/dm-io.c ++++ b/drivers/md/dm-io.c +@@ -370,16 +370,13 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + +- if (!atomic_read(&io.count) || signal_pending(current)) ++ if (!atomic_read(&io.count)) + break; + + io_schedule(); + } + set_current_state(TASK_RUNNING); + +- if (atomic_read(&io.count)) +- return -EINTR; +- + if (error_bits) + *error_bits = io.error_bits; + +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c +index 0a225da..3e3fc06 100644 +--- a/drivers/md/dm-kcopyd.c ++++ b/drivers/md/dm-kcopyd.c +@@ -297,7 +297,8 @@ static int run_complete_job(struct kcopyd_job *job) + dm_kcopyd_notify_fn fn = job->fn; + struct dm_kcopyd_client *kc = job->kc; + +- kcopyd_put_pages(kc, job->pages); ++ if (job->pages) ++ kcopyd_put_pages(kc, job->pages); + mempool_free(job, kc->job_pool); + fn(read_err, write_err, context); + +@@ -461,6 +462,7 @@ static void segment_complete(int read_err, unsigned long write_err, + sector_t progress = 0; + sector_t count = 0; + struct kcopyd_job *job = (struct kcopyd_job *) context; ++ struct dm_kcopyd_client *kc = job->kc; + + mutex_lock(&job->lock); + +@@ -490,7 +492,7 @@ static void segment_complete(int read_err, unsigned long write_err, + + if (count) { + int i; +- struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool, ++ struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, + GFP_NOIO); + + *sub_job = *job; +@@ -509,13 +511,16 @@ static void segment_complete(int read_err, unsigned long write_err, + } else if (atomic_dec_and_test(&job->sub_jobs)) { + + /* +- * To avoid a race we must keep the job around +- * until after the notify function has completed. +- * Otherwise the client may try and stop the job +- * after we've completed. ++ * Queue the completion callback to the kcopyd thread. ++ * ++ * Some callers assume that all the completions are called ++ * from a single thread and don't race with each other. ++ * ++ * We must not call the callback directly here because this ++ * code may not be executing in the thread. + */ +- job->fn(read_err, write_err, job->context); +- mempool_free(job, job->kc->job_pool); ++ push(&kc->complete_jobs, job); ++ wake(kc); + } + } + +@@ -528,6 +533,8 @@ static void split_job(struct kcopyd_job *job) + { + int i; + ++ atomic_inc(&job->kc->nr_jobs); ++ + atomic_set(&job->sub_jobs, SPLIT_COUNT); + for (i = 0; i < SPLIT_COUNT; i++) + segment_complete(0, 0u, job); +diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c +index 96ea226..42c04f0 100644 +--- a/drivers/md/dm-path-selector.c ++++ b/drivers/md/dm-path-selector.c +@@ -17,9 +17,7 @@ + + struct ps_internal { + struct path_selector_type pst; +- + struct list_head list; +- long use; + }; + + #define pst_to_psi(__pst) container_of((__pst), struct ps_internal, pst) +@@ -45,12 +43,8 @@ static struct ps_internal *get_path_selector(const char *name) + + down_read(&_ps_lock); + psi = __find_path_selector_type(name); +- if (psi) { +- if ((psi->use == 0) && !try_module_get(psi->pst.module)) +- psi = NULL; +- else +- psi->use++; +- } ++ if (psi && !try_module_get(psi->pst.module)) ++ psi = NULL; + up_read(&_ps_lock); + + return psi; +@@ -84,11 +78,7 @@ void dm_put_path_selector(struct path_selector_type *pst) + if (!psi) + goto out; + +- if (--psi->use == 0) +- module_put(psi->pst.module); +- +- BUG_ON(psi->use < 0); +- ++ module_put(psi->pst.module); + out: + up_read(&_ps_lock); + } +@@ -136,11 +126,6 @@ int dm_unregister_path_selector(struct path_selector_type *pst) + return -EINVAL; + } + +- if (psi->use) { +- up_write(&_ps_lock); +- return -ETXTBSY; +- } +- + list_del(&psi->list); + + up_write(&_ps_lock); +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c +index 4d6bc10..62d5948 100644 +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -145,6 +145,8 @@ struct dm_raid1_read_record { + struct dm_bio_details details; + }; + ++static struct kmem_cache *_dm_raid1_read_record_cache; ++ + /* + * Every mirror should look like this one. + */ +@@ -764,9 +766,9 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, + atomic_set(&ms->suspend, 0); + atomic_set(&ms->default_mirror, DEFAULT_MIRROR); + +- len = sizeof(struct dm_raid1_read_record); +- ms->read_record_pool = mempool_create_kmalloc_pool(MIN_READ_RECORDS, +- len); ++ ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS, ++ _dm_raid1_read_record_cache); ++ + if (!ms->read_record_pool) { + ti->error = "Error creating mirror read_record_pool"; + kfree(ms); +@@ -1279,16 +1281,31 @@ static int __init dm_mirror_init(void) + { + int r; + ++ _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0); ++ if (!_dm_raid1_read_record_cache) { ++ DMERR("Can't allocate dm_raid1_read_record cache"); ++ r = -ENOMEM; ++ goto bad_cache; ++ } ++ + r = dm_register_target(&mirror_target); +- if (r < 0) ++ if (r < 0) { + DMERR("Failed to register mirror target"); ++ goto bad_target; ++ } ++ ++ return 0; + ++bad_target: ++ kmem_cache_destroy(_dm_raid1_read_record_cache); ++bad_cache: + return r; + } + + static void __exit dm_mirror_exit(void) + { + dm_unregister_target(&mirror_target); ++ kmem_cache_destroy(_dm_raid1_read_record_cache); + } + + /* Module hooks */ +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index 65ff82f..462750c 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -972,6 +972,17 @@ static void start_copy(struct dm_snap_pending_exception *pe) + &src, 1, &dest, 0, copy_callback, pe); + } + ++static struct dm_snap_pending_exception * ++__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) ++{ ++ struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); ++ ++ if (!e) ++ return NULL; ++ ++ return container_of(e, struct dm_snap_pending_exception, e); ++} ++ + /* + * Looks to see if this snapshot already has a pending exception + * for this chunk, otherwise it allocates a new one and inserts +@@ -981,40 +992,15 @@ static void start_copy(struct dm_snap_pending_exception *pe) + * this. + */ + static struct dm_snap_pending_exception * +-__find_pending_exception(struct dm_snapshot *s, struct bio *bio) ++__find_pending_exception(struct dm_snapshot *s, ++ struct dm_snap_pending_exception *pe, chunk_t chunk) + { +- struct dm_snap_exception *e; +- struct dm_snap_pending_exception *pe; +- chunk_t chunk = sector_to_chunk(s, bio->bi_sector); +- +- /* +- * Is there a pending exception for this already ? +- */ +- e = lookup_exception(&s->pending, chunk); +- if (e) { +- /* cast the exception to a pending exception */ +- pe = container_of(e, struct dm_snap_pending_exception, e); +- goto out; +- } +- +- /* +- * Create a new pending exception, we don't want +- * to hold the lock while we do this. +- */ +- up_write(&s->lock); +- pe = alloc_pending_exception(s); +- down_write(&s->lock); +- +- if (!s->valid) { +- free_pending_exception(pe); +- return NULL; +- } ++ struct dm_snap_pending_exception *pe2; + +- e = lookup_exception(&s->pending, chunk); +- if (e) { ++ pe2 = __lookup_pending_exception(s, chunk); ++ if (pe2) { + free_pending_exception(pe); +- pe = container_of(e, struct dm_snap_pending_exception, e); +- goto out; ++ return pe2; + } + + pe->e.old_chunk = chunk; +@@ -1032,7 +1018,6 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) + get_pending_exception(pe); + insert_exception(&s->pending, &pe->e); + +- out: + return pe; + } + +@@ -1083,11 +1068,31 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, + * writeable. + */ + if (bio_rw(bio) == WRITE) { +- pe = __find_pending_exception(s, bio); ++ pe = __lookup_pending_exception(s, chunk); + if (!pe) { +- __invalidate_snapshot(s, -ENOMEM); +- r = -EIO; +- goto out_unlock; ++ up_write(&s->lock); ++ pe = alloc_pending_exception(s); ++ down_write(&s->lock); ++ ++ if (!s->valid) { ++ free_pending_exception(pe); ++ r = -EIO; ++ goto out_unlock; ++ } ++ ++ e = lookup_exception(&s->complete, chunk); ++ if (e) { ++ free_pending_exception(pe); ++ remap_exception(s, e, bio, chunk); ++ goto out_unlock; ++ } ++ ++ pe = __find_pending_exception(s, pe, chunk); ++ if (!pe) { ++ __invalidate_snapshot(s, -ENOMEM); ++ r = -EIO; ++ goto out_unlock; ++ } + } + + remap_exception(s, &pe->e, bio, chunk); +@@ -1217,10 +1222,28 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) + if (e) + goto next_snapshot; + +- pe = __find_pending_exception(snap, bio); ++ pe = __lookup_pending_exception(snap, chunk); + if (!pe) { +- __invalidate_snapshot(snap, -ENOMEM); +- goto next_snapshot; ++ up_write(&snap->lock); ++ pe = alloc_pending_exception(snap); ++ down_write(&snap->lock); ++ ++ if (!snap->valid) { ++ free_pending_exception(pe); ++ goto next_snapshot; ++ } ++ ++ e = lookup_exception(&snap->complete, chunk); ++ if (e) { ++ free_pending_exception(pe); ++ goto next_snapshot; ++ } ++ ++ pe = __find_pending_exception(snap, pe, chunk); ++ if (!pe) { ++ __invalidate_snapshot(snap, -ENOMEM); ++ goto next_snapshot; ++ } + } + + if (!primary_pe) { +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 2fd66c3..e8361b1 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -399,28 +399,30 @@ static int check_device_area(struct dm_dev_internal *dd, sector_t start, + } + + /* +- * This upgrades the mode on an already open dm_dev. Being ++ * This upgrades the mode on an already open dm_dev, being + * careful to leave things as they were if we fail to reopen the +- * device. ++ * device and not to touch the existing bdev field in case ++ * it is accessed concurrently inside dm_table_any_congested(). + */ + static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, + struct mapped_device *md) + { + int r; +- struct dm_dev_internal dd_copy; +- dev_t dev = dd->dm_dev.bdev->bd_dev; ++ struct dm_dev_internal dd_new, dd_old; + +- dd_copy = *dd; ++ dd_new = dd_old = *dd; ++ ++ dd_new.dm_dev.mode |= new_mode; ++ dd_new.dm_dev.bdev = NULL; ++ ++ r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); ++ if (r) ++ return r; + + dd->dm_dev.mode |= new_mode; +- dd->dm_dev.bdev = NULL; +- r = open_dev(dd, dev, md); +- if (!r) +- close_dev(&dd_copy, md); +- else +- *dd = dd_copy; ++ close_dev(&dd_old, md); + +- return r; ++ return 0; + } + + /* +diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c +index 7decf10..db72c94 100644 +--- a/drivers/md/dm-target.c ++++ b/drivers/md/dm-target.c +@@ -18,7 +18,6 @@ struct tt_internal { + struct target_type tt; + + struct list_head list; +- long use; + }; + + static LIST_HEAD(_targets); +@@ -44,12 +43,8 @@ static struct tt_internal *get_target_type(const char *name) + down_read(&_lock); + + ti = __find_target_type(name); +- if (ti) { +- if ((ti->use == 0) && !try_module_get(ti->tt.module)) +- ti = NULL; +- else +- ti->use++; +- } ++ if (ti && !try_module_get(ti->tt.module)) ++ ti = NULL; + + up_read(&_lock); + return ti; +@@ -77,10 +72,7 @@ void dm_put_target_type(struct target_type *t) + struct tt_internal *ti = (struct tt_internal *) t; + + down_read(&_lock); +- if (--ti->use == 0) +- module_put(ti->tt.module); +- +- BUG_ON(ti->use < 0); ++ module_put(ti->tt.module); + up_read(&_lock); + + return; +@@ -140,12 +132,6 @@ void dm_unregister_target(struct target_type *t) + BUG(); + } + +- if (ti->use) { +- DMCRIT("Attempt to unregister target still in use: %s", +- t->name); +- BUG(); +- } +- + list_del(&ti->list); + kfree(ti); + +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index e246642..4a25fa9 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -120,6 +120,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) + goto out_free_pages; + + bio->bi_io_vec[i].bv_page = page; ++ bio->bi_vcnt = i+1; + } + } + /* If not user-requests, copy the page pointers to all bios */ +@@ -135,9 +136,9 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) + return r1_bio; + + out_free_pages: +- for (i=0; i < RESYNC_PAGES ; i++) +- for (j=0 ; j < pi->raid_disks; j++) +- safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); ++ for (j=0 ; j < pi->raid_disks; j++) ++ for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++) ++ put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); + j = -1; + out_free_bio: + while ( ++j < pi->raid_disks ) +diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c +index 8683d10..5b107fa 100644 +--- a/drivers/media/video/cx88/cx88-input.c ++++ b/drivers/media/video/cx88/cx88-input.c +@@ -48,8 +48,7 @@ struct cx88_IR { + + /* poll external decoder */ + int polling; +- struct work_struct work; +- struct timer_list timer; ++ struct delayed_work work; + u32 gpio_addr; + u32 last_gpio; + u32 mask_keycode; +@@ -143,27 +142,19 @@ static void cx88_ir_handle_key(struct cx88_IR *ir) + } + } + +-static void ir_timer(unsigned long data) +-{ +- struct cx88_IR *ir = (struct cx88_IR *)data; +- +- schedule_work(&ir->work); +-} +- + static void cx88_ir_work(struct work_struct *work) + { +- struct cx88_IR *ir = container_of(work, struct cx88_IR, work); ++ struct cx88_IR *ir = container_of(work, struct cx88_IR, work.work); + + cx88_ir_handle_key(ir); +- mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling)); ++ schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); + } + + void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir) + { + if (ir->polling) { +- setup_timer(&ir->timer, ir_timer, (unsigned long)ir); +- INIT_WORK(&ir->work, cx88_ir_work); +- schedule_work(&ir->work); ++ INIT_DELAYED_WORK(&ir->work, cx88_ir_work); ++ schedule_delayed_work(&ir->work, 0); + } + if (ir->sampling) { + core->pci_irqmask |= PCI_INT_IR_SMPINT; +@@ -179,10 +170,8 @@ void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir) + core->pci_irqmask &= ~PCI_INT_IR_SMPINT; + } + +- if (ir->polling) { +- del_timer_sync(&ir->timer); +- flush_scheduled_work(); +- } ++ if (ir->polling) ++ cancel_delayed_work_sync(&ir->work); + } + + /* ---------------------------------------------------------------------- */ +diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c +index ea3aafb..6fc789e 100644 +--- a/drivers/message/fusion/mptbase.c ++++ b/drivers/message/fusion/mptbase.c +@@ -5934,7 +5934,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) + + /* Initalize the timer + */ +- init_timer(&pCfg->timer); ++ init_timer_on_stack(&pCfg->timer); + pCfg->timer.data = (unsigned long) ioc; + pCfg->timer.function = mpt_timer_expired; + pCfg->wait_done = 0; +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 3d76686..87045f8 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -2565,7 +2565,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) + + for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { + if (!targets[i]) +- continue; ++ break; + pr_debug("basa: target %x\n", targets[i]); + if (list_empty(&bond->vlan_list)) { + pr_debug("basa: empty vlan: arp_send\n"); +@@ -2672,7 +2672,6 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 + int i; + __be32 *targets = bond->params.arp_targets; + +- targets = bond->params.arp_targets; + for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { + pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", + &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); +@@ -3294,7 +3293,7 @@ static void bond_info_show_master(struct seq_file *seq) + + for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) { + if (!bond->params.arp_targets[i]) +- continue; ++ break; + if (printed) + seq_printf(seq, ","); + seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); +diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c +index 18cf478..d287315 100644 +--- a/drivers/net/bonding/bond_sysfs.c ++++ b/drivers/net/bonding/bond_sysfs.c +@@ -684,17 +684,15 @@ static ssize_t bonding_store_arp_targets(struct device *d, + goto out; + } + /* look for an empty slot to put the target in, and check for dupes */ +- for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { ++ for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { + if (targets[i] == newtarget) { /* duplicate */ + printk(KERN_ERR DRV_NAME + ": %s: ARP target %pI4 is already present\n", + bond->dev->name, &newtarget); +- if (done) +- targets[i] = 0; + ret = -EINVAL; + goto out; + } +- if (targets[i] == 0 && !done) { ++ if (targets[i] == 0) { + printk(KERN_INFO DRV_NAME + ": %s: adding ARP target %pI4.\n", + bond->dev->name, &newtarget); +@@ -720,12 +718,16 @@ static ssize_t bonding_store_arp_targets(struct device *d, + goto out; + } + +- for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { ++ for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { + if (targets[i] == newtarget) { ++ int j; + printk(KERN_INFO DRV_NAME + ": %s: removing ARP target %pI4.\n", + bond->dev->name, &newtarget); +- targets[i] = 0; ++ for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++) ++ targets[j] = targets[j+1]; ++ ++ targets[j] = 0; + done = 1; + } + } +diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c +index 67f87a7..090ada6 100644 +--- a/drivers/net/ixgbe/ixgbe_ethtool.c ++++ b/drivers/net/ixgbe/ixgbe_ethtool.c +@@ -691,9 +691,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct ixgbe_ring *temp_ring; ++ struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; + int i, err; + u32 new_rx_count, new_tx_count; ++ bool need_update = false; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; +@@ -712,80 +713,94 @@ static int ixgbe_set_ringparam(struct net_device *netdev, + return 0; + } + +- temp_ring = kcalloc(adapter->num_tx_queues, +- sizeof(struct ixgbe_ring), GFP_KERNEL); +- if (!temp_ring) +- return -ENOMEM; +- + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + msleep(1); + +- if (new_tx_count != adapter->tx_ring->count) { ++ temp_tx_ring = kcalloc(adapter->num_tx_queues, ++ sizeof(struct ixgbe_ring), GFP_KERNEL); ++ if (!temp_tx_ring) { ++ err = -ENOMEM; ++ goto err_setup; ++ } ++ ++ if (new_tx_count != adapter->tx_ring_count) { ++ memcpy(temp_tx_ring, adapter->tx_ring, ++ adapter->num_tx_queues * sizeof(struct ixgbe_ring)); + for (i = 0; i < adapter->num_tx_queues; i++) { +- temp_ring[i].count = new_tx_count; +- err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); ++ temp_tx_ring[i].count = new_tx_count; ++ err = ixgbe_setup_tx_resources(adapter, ++ &temp_tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_tx_resources(adapter, +- &temp_ring[i]); ++ &temp_tx_ring[i]); + } + goto err_setup; + } +- temp_ring[i].v_idx = adapter->tx_ring[i].v_idx; ++ temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; + } +- if (netif_running(netdev)) +- netdev->netdev_ops->ndo_stop(netdev); +- ixgbe_reset_interrupt_capability(adapter); +- ixgbe_napi_del_all(adapter); +- INIT_LIST_HEAD(&netdev->napi_list); +- kfree(adapter->tx_ring); +- adapter->tx_ring = temp_ring; +- temp_ring = NULL; +- adapter->tx_ring_count = new_tx_count; ++ need_update = true; + } + +- temp_ring = kcalloc(adapter->num_rx_queues, +- sizeof(struct ixgbe_ring), GFP_KERNEL); +- if (!temp_ring) { +- if (netif_running(netdev)) +- netdev->netdev_ops->ndo_open(netdev); +- return -ENOMEM; ++ temp_rx_ring = kcalloc(adapter->num_rx_queues, ++ sizeof(struct ixgbe_ring), GFP_KERNEL); ++ if ((!temp_rx_ring) && (need_update)) { ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]); ++ kfree(temp_tx_ring); ++ err = -ENOMEM; ++ goto err_setup; + } + +- if (new_rx_count != adapter->rx_ring->count) { ++ if (new_rx_count != adapter->rx_ring_count) { ++ memcpy(temp_rx_ring, adapter->rx_ring, ++ adapter->num_rx_queues * sizeof(struct ixgbe_ring)); + for (i = 0; i < adapter->num_rx_queues; i++) { +- temp_ring[i].count = new_rx_count; +- err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); ++ temp_rx_ring[i].count = new_rx_count; ++ err = ixgbe_setup_rx_resources(adapter, ++ &temp_rx_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_rx_resources(adapter, +- &temp_ring[i]); ++ &temp_rx_ring[i]); + } + goto err_setup; + } +- temp_ring[i].v_idx = adapter->rx_ring[i].v_idx; ++ temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; + } ++ need_update = true; ++ } ++ ++ /* if rings need to be updated, here's the place to do it in one shot */ ++ if (need_update) { + if (netif_running(netdev)) +- netdev->netdev_ops->ndo_stop(netdev); +- ixgbe_reset_interrupt_capability(adapter); +- ixgbe_napi_del_all(adapter); +- INIT_LIST_HEAD(&netdev->napi_list); +- kfree(adapter->rx_ring); +- adapter->rx_ring = temp_ring; +- temp_ring = NULL; +- +- adapter->rx_ring_count = new_rx_count; ++ ixgbe_down(adapter); ++ ++ /* tx */ ++ if (new_tx_count != adapter->tx_ring_count) { ++ kfree(adapter->tx_ring); ++ adapter->tx_ring = temp_tx_ring; ++ temp_tx_ring = NULL; ++ adapter->tx_ring_count = new_tx_count; ++ } ++ ++ /* rx */ ++ if (new_rx_count != adapter->rx_ring_count) { ++ kfree(adapter->rx_ring); ++ adapter->rx_ring = temp_rx_ring; ++ temp_rx_ring = NULL; ++ adapter->rx_ring_count = new_rx_count; ++ } + } + + /* success! */ + err = 0; +-err_setup: +- ixgbe_init_interrupt_scheme(adapter); + if (netif_running(netdev)) +- netdev->netdev_ops->ndo_open(netdev); ++ ixgbe_up(adapter); + ++err_setup: + clear_bit(__IXGBE_RESETTING, &adapter->state); + return err; + } +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c +index 43fedb9..9201e5a 100644 +--- a/drivers/net/r8169.c ++++ b/drivers/net/r8169.c +@@ -2075,8 +2075,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + if (!tp->pcie_cap && netif_msg_probe(tp)) + dev_info(&pdev->dev, "no PCI Express capability\n"); + +- /* Unneeded ? Don't mess with Mrs. Murphy. */ +- rtl8169_irq_mask_and_ack(ioaddr); ++ RTL_W16(IntrMask, 0x0000); + + /* Soft reset the chip. */ + RTL_W8(ChipCmd, CmdReset); +@@ -2088,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + msleep_interruptible(1); + } + ++ RTL_W16(IntrStatus, 0xffff); ++ + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, ioaddr); + +diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c +index ab0e09b..655e9b2 100644 +--- a/drivers/net/sfc/efx.c ++++ b/drivers/net/sfc/efx.c +@@ -424,10 +424,6 @@ static void efx_start_channel(struct efx_channel *channel) + + EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); + +- if (!(channel->efx->net_dev->flags & IFF_UP)) +- netif_napi_add(channel->napi_dev, &channel->napi_str, +- efx_poll, napi_weight); +- + /* The interrupt handler for this channel may set work_pending + * as soon as we enable it. Make sure it's cleared before + * then. Similarly, make sure it sees the enabled flag set. */ +@@ -1273,6 +1269,8 @@ static int efx_init_napi(struct efx_nic *efx) + + efx_for_each_channel(channel, efx) { + channel->napi_dev = efx->net_dev; ++ netif_napi_add(channel->napi_dev, &channel->napi_str, ++ efx_poll, napi_weight); + rc = efx_lro_init(&channel->lro_mgr, efx); + if (rc) + goto err; +@@ -1289,6 +1287,8 @@ static void efx_fini_napi(struct efx_nic *efx) + + efx_for_each_channel(channel, efx) { + efx_lro_fini(&channel->lro_mgr); ++ if (channel->napi_dev) ++ netif_napi_del(&channel->napi_str); + channel->napi_dev = NULL; + } + } +diff --git a/drivers/net/skge.c b/drivers/net/skge.c +index c9dbb06..2bbb44b 100644 +--- a/drivers/net/skge.c ++++ b/drivers/net/skge.c +@@ -2674,7 +2674,7 @@ static int skge_down(struct net_device *dev) + if (netif_msg_ifdown(skge)) + printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); + +- netif_stop_queue(dev); ++ netif_tx_disable(dev); + + if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) + del_timer_sync(&skge->link_timer); +@@ -2881,7 +2881,6 @@ static void skge_tx_clean(struct net_device *dev) + } + + skge->tx_ring.to_clean = e; +- netif_wake_queue(dev); + } + + static void skge_tx_timeout(struct net_device *dev) +@@ -2893,6 +2892,7 @@ static void skge_tx_timeout(struct net_device *dev) + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); + skge_tx_clean(dev); ++ netif_wake_queue(dev); + } + + static int skge_change_mtu(struct net_device *dev, int new_mtu) +diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h +index 39ecf3b..820fdb2 100644 +--- a/drivers/net/wireless/rt2x00/rt2x00.h ++++ b/drivers/net/wireless/rt2x00/rt2x00.h +@@ -687,8 +687,7 @@ struct rt2x00_dev { + */ + #ifdef CONFIG_RT2X00_LIB_RFKILL + unsigned long rfkill_state; +-#define RFKILL_STATE_ALLOCATED 1 +-#define RFKILL_STATE_REGISTERED 2 ++#define RFKILL_STATE_REGISTERED 1 + struct rfkill *rfkill; + struct delayed_work rfkill_work; + #endif /* CONFIG_RT2X00_LIB_RFKILL */ +diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c +index 87c0f2c..e694bb7 100644 +--- a/drivers/net/wireless/rt2x00/rt2x00dev.c ++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c +@@ -1105,7 +1105,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) + * Register extra components. + */ + rt2x00leds_register(rt2x00dev); +- rt2x00rfkill_allocate(rt2x00dev); + rt2x00debug_register(rt2x00dev); + + set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); +@@ -1137,7 +1136,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) + * Free extra components + */ + rt2x00debug_deregister(rt2x00dev); +- rt2x00rfkill_free(rt2x00dev); + rt2x00leds_unregister(rt2x00dev); + + /* +diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h +index 86cd26f..49309d4 100644 +--- a/drivers/net/wireless/rt2x00/rt2x00lib.h ++++ b/drivers/net/wireless/rt2x00/rt2x00lib.h +@@ -260,8 +260,6 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, + #ifdef CONFIG_RT2X00_LIB_RFKILL + void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev); + void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev); +-void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev); +-void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev); + #else + static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) + { +@@ -270,14 +268,6 @@ static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) + static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) + { + } +- +-static inline void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) +-{ +-} +- +-static inline void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) +-{ +-} + #endif /* CONFIG_RT2X00_LIB_RFKILL */ + + /* +diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c +index 3298cae..08ffc6d 100644 +--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c ++++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c +@@ -94,14 +94,50 @@ static void rt2x00rfkill_poll(struct work_struct *work) + &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL); + } + ++static int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) ++{ ++ struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy); ++ ++ rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN); ++ if (!rt2x00dev->rfkill) ++ return -ENOMEM; ++ ++ rt2x00dev->rfkill->name = rt2x00dev->ops->name; ++ rt2x00dev->rfkill->data = rt2x00dev; ++ rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; ++ if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) { ++ rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state; ++ rt2x00dev->rfkill->state = ++ rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ? ++ RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED; ++ } else { ++ rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED; ++ } ++ ++ INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll); ++ ++ return 0; ++} ++ ++static void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) ++{ ++ rfkill_free(rt2x00dev->rfkill); ++ rt2x00dev->rfkill = NULL; ++} ++ + void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) + { +- if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) || +- test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) ++ if (test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) ++ return; ++ ++ if (rt2x00rfkill_allocate(rt2x00dev)) { ++ ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); + return; ++ } + + if (rfkill_register(rt2x00dev->rfkill)) { + ERROR(rt2x00dev, "Failed to register rfkill handler.\n"); ++ rt2x00rfkill_free(rt2x00dev); + return; + } + +@@ -117,8 +153,7 @@ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) + + void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) + { +- if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) || +- !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) ++ if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) + return; + + cancel_delayed_work_sync(&rt2x00dev->rfkill_work); +@@ -127,46 +162,3 @@ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) + + __clear_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state); + } +- +-void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) +-{ +- struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy); +- +- if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) +- return; +- +- rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN); +- if (!rt2x00dev->rfkill) { +- ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); +- return; +- } +- +- __set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state); +- +- rt2x00dev->rfkill->name = rt2x00dev->ops->name; +- rt2x00dev->rfkill->data = rt2x00dev; +- rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; +- if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) { +- rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state; +- rt2x00dev->rfkill->state = +- rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ? +- RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED; +- } else { +- rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED; +- } +- +- INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll); +- +- return; +-} +- +-void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) +-{ +- if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) +- return; +- +- cancel_delayed_work_sync(&rt2x00dev->rfkill_work); +- +- rfkill_free(rt2x00dev->rfkill); +- rt2x00dev->rfkill = NULL; +-} +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 55ec44a..31cfd86 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -847,6 +847,11 @@ int pci_cfg_space_size(struct pci_dev *dev) + { + int pos; + u32 status; ++ u16 class; ++ ++ class = dev->class >> 8; ++ if (class == PCI_CLASS_BRIDGE_HOST) ++ return pci_cfg_space_size_ext(dev); + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) { +@@ -936,7 +941,6 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) + dev->multifunction = !!(hdr_type & 0x80); + dev->vendor = l & 0xffff; + dev->device = (l >> 16) & 0xffff; +- dev->cfg_size = pci_cfg_space_size(dev); + dev->error_state = pci_channel_io_normal; + set_pcie_port_type(dev); + +@@ -952,6 +956,9 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) + return NULL; + } + ++ /* need to have dev->class ready */ ++ dev->cfg_size = pci_cfg_space_size(dev); ++ + return dev; + } + +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index a6a42e8..60fbef2 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -225,6 +225,25 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { + .wireless = 2, + }; + ++/* The Aspire One has a dummy ACPI-WMI interface - disable it */ ++static struct dmi_system_id __devinitdata acer_blacklist[] = { ++ { ++ .ident = "Acer Aspire One (SSD)", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), ++ }, ++ }, ++ { ++ .ident = "Acer Aspire One (HDD)", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), ++ }, ++ }, ++ {} ++}; ++ + static struct dmi_system_id acer_quirks[] = { + { + .callback = dmi_matched, +@@ -1254,6 +1273,12 @@ static int __init acer_wmi_init(void) + + printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n"); + ++ if (dmi_check_system(acer_blacklist)) { ++ printk(ACER_INFO "Blacklisted hardware detected - " ++ "not loading\n"); ++ return -ENODEV; ++ } ++ + find_quirks(); + + /* +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index 809d32d..ca4467c 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -1944,12 +1944,14 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) + num_arrays++; + q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); + if (q->pool == NULL) +- goto enomem; ++ return -ENOMEM; + + q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), + GFP_KERNEL, NULL); +- if (q->queue == ERR_PTR(-ENOMEM)) ++ if (IS_ERR(q->queue)) { ++ q->queue = NULL; + goto enomem; ++ } + + for (i = 0; i < max; i++) { + q->pool[i] = kzalloc(item_size, GFP_KERNEL); +@@ -1979,8 +1981,7 @@ void iscsi_pool_free(struct iscsi_pool *q) + + for (i = 0; i < q->max; i++) + kfree(q->pool[i]); +- if (q->pool) +- kfree(q->pool); ++ kfree(q->pool); + kfree(q->queue); + } + EXPORT_SYMBOL_GPL(iscsi_pool_free); +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 516925d..5e390d2 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -101,6 +101,7 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ; + #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1) + + static int sg_add(struct device *, struct class_interface *); ++static void sg_device_destroy(struct kref *kref); + static void sg_remove(struct device *, struct class_interface *); + + static DEFINE_IDR(sg_index_idr); +@@ -137,6 +138,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ + volatile char done; /* 0->before bh, 1->before read, 2->read */ + struct request *rq; + struct bio *bio; ++ struct execute_work ew; + } Sg_request; + + typedef struct sg_fd { /* holds the state of a file descriptor */ +@@ -158,6 +160,8 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ + char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ + char mmap_called; /* 0 -> mmap() never called on this fd */ ++ struct kref f_ref; ++ struct execute_work ew; + } Sg_fd; + + typedef struct sg_device { /* holds the state of each scsi generic device */ +@@ -171,6 +175,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */ + char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ + struct gendisk *disk; + struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg] */ ++ struct kref d_ref; + } Sg_device; + + static int sg_fasync(int fd, struct file *filp, int mode); +@@ -185,7 +190,7 @@ static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, + Sg_request * srp); + static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, + const char __user *buf, size_t count, int blocking, +- int read_only, Sg_request **o_srp); ++ int read_only, int sg_io_owned, Sg_request **o_srp); + static int sg_common_write(Sg_fd * sfp, Sg_request * srp, + unsigned char *cmnd, int timeout, int blocking); + static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); +@@ -194,13 +199,14 @@ static void sg_build_reserve(Sg_fd * sfp, int req_size); + static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); + static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); + static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); +-static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); +-static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); ++static void sg_remove_sfp(struct kref *); + static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); + static Sg_request *sg_add_request(Sg_fd * sfp); + static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); + static int sg_res_in_use(Sg_fd * sfp); ++static Sg_device *sg_lookup_dev(int dev); + static Sg_device *sg_get_dev(int dev); ++static void sg_put_dev(Sg_device *sdp); + #ifdef CONFIG_SCSI_PROC_FS + static int sg_last_dev(void); + #endif +@@ -237,22 +243,17 @@ sg_open(struct inode *inode, struct file *filp) + nonseekable_open(inode, filp); + SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); + sdp = sg_get_dev(dev); +- if ((!sdp) || (!sdp->device)) { +- unlock_kernel(); +- return -ENXIO; +- } +- if (sdp->detached) { +- unlock_kernel(); +- return -ENODEV; ++ if (IS_ERR(sdp)) { ++ retval = PTR_ERR(sdp); ++ sdp = NULL; ++ goto sg_put; + } + + /* This driver's module count bumped by fops_get in */ + /* Prevent the device driver from vanishing while we sleep */ + retval = scsi_device_get(sdp->device); +- if (retval) { +- unlock_kernel(); +- return retval; +- } ++ if (retval) ++ goto sg_put; + + if (!((flags & O_NONBLOCK) || + scsi_block_when_processing_errors(sdp->device))) { +@@ -303,16 +304,20 @@ sg_open(struct inode *inode, struct file *filp) + if ((sfp = sg_add_sfp(sdp, dev))) + filp->private_data = sfp; + else { +- if (flags & O_EXCL) ++ if (flags & O_EXCL) { + sdp->exclude = 0; /* undo if error */ ++ wake_up_interruptible(&sdp->o_excl_wait); ++ } + retval = -ENOMEM; + goto error_out; + } +- unlock_kernel(); +- return 0; +- +- error_out: +- scsi_device_put(sdp->device); ++ retval = 0; ++error_out: ++ if (retval) ++ scsi_device_put(sdp->device); ++sg_put: ++ if (sdp) ++ sg_put_dev(sdp); + unlock_kernel(); + return retval; + } +@@ -327,13 +332,13 @@ sg_release(struct inode *inode, struct file *filp) + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); +- if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ +- if (!sdp->detached) { +- scsi_device_put(sdp->device); +- } +- sdp->exclude = 0; +- wake_up_interruptible(&sdp->o_excl_wait); +- } ++ ++ sfp->closed = 1; ++ ++ sdp->exclude = 0; ++ wake_up_interruptible(&sdp->o_excl_wait); ++ ++ kref_put(&sfp->f_ref, sg_remove_sfp); + return 0; + } + +@@ -557,7 +562,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + return -EFAULT; + blocking = !(filp->f_flags & O_NONBLOCK); + if (old_hdr.reply_len < 0) +- return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL); ++ return sg_new_write(sfp, filp, buf, count, ++ blocking, 0, 0, NULL); + if (count < (SZ_SG_HEADER + 6)) + return -EIO; /* The minimum scsi command length is 6 bytes. */ + +@@ -638,7 +644,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + + static ssize_t + sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, +- size_t count, int blocking, int read_only, ++ size_t count, int blocking, int read_only, int sg_io_owned, + Sg_request **o_srp) + { + int k; +@@ -658,6 +664,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, + SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); + return -EDOM; + } ++ srp->sg_io_owned = sg_io_owned; + hp = &srp->header; + if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { + sg_remove_request(sfp, srp); +@@ -755,24 +762,13 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, + hp->duration = jiffies_to_msecs(jiffies); + + srp->rq->timeout = timeout; ++ kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ + blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, + srp->rq, 1, sg_rq_end_io); + return 0; + } + + static int +-sg_srp_done(Sg_request *srp, Sg_fd *sfp) +-{ +- unsigned long iflags; +- int done; +- +- read_lock_irqsave(&sfp->rq_list_lock, iflags); +- done = srp->done; +- read_unlock_irqrestore(&sfp->rq_list_lock, iflags); +- return done; +-} +- +-static int + sg_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd_in, unsigned long arg) + { +@@ -804,27 +800,26 @@ sg_ioctl(struct inode *inode, struct file *filp, + return -EFAULT; + result = + sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, +- blocking, read_only, &srp); ++ blocking, read_only, 1, &srp); + if (result < 0) + return result; +- srp->sg_io_owned = 1; + while (1) { + result = 0; /* following macro to beat race condition */ + __wait_event_interruptible(sfp->read_wait, +- (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)), +- result); ++ (srp->done || sdp->detached), ++ result); + if (sdp->detached) + return -ENODEV; +- if (sfp->closed) +- return 0; /* request packet dropped already */ +- if (0 == result) ++ write_lock_irq(&sfp->rq_list_lock); ++ if (srp->done) { ++ srp->done = 2; ++ write_unlock_irq(&sfp->rq_list_lock); + break; ++ } + srp->orphan = 1; ++ write_unlock_irq(&sfp->rq_list_lock); + return result; /* -ERESTARTSYS because signal hit process */ + } +- write_lock_irqsave(&sfp->rq_list_lock, iflags); +- srp->done = 2; +- write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); + return (result < 0) ? result : 0; + } +@@ -1240,6 +1235,15 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) + return 0; + } + ++static void sg_rq_end_io_usercontext(struct work_struct *work) ++{ ++ struct sg_request *srp = container_of(work, struct sg_request, ew.work); ++ struct sg_fd *sfp = srp->parentfp; ++ ++ sg_finish_rem_req(srp); ++ kref_put(&sfp->f_ref, sg_remove_sfp); ++} ++ + /* + * This function is a "bottom half" handler that is called by the mid + * level when a command is completed (or has failed). +@@ -1247,24 +1251,23 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) + static void sg_rq_end_io(struct request *rq, int uptodate) + { + struct sg_request *srp = rq->end_io_data; +- Sg_device *sdp = NULL; ++ Sg_device *sdp; + Sg_fd *sfp; + unsigned long iflags; + unsigned int ms; + char *sense; +- int result, resid; ++ int result, resid, done = 1; + +- if (NULL == srp) { +- printk(KERN_ERR "sg_cmd_done: NULL request\n"); ++ if (WARN_ON(srp->done != 0)) + return; +- } ++ + sfp = srp->parentfp; +- if (sfp) +- sdp = sfp->parentdp; +- if ((NULL == sdp) || sdp->detached) { +- printk(KERN_INFO "sg_cmd_done: device detached\n"); ++ if (WARN_ON(sfp == NULL)) + return; +- } ++ ++ sdp = sfp->parentdp; ++ if (unlikely(sdp->detached)) ++ printk(KERN_INFO "sg_rq_end_io: device detached\n"); + + sense = rq->sense; + result = rq->errors; +@@ -1303,32 +1306,26 @@ static void sg_rq_end_io(struct request *rq, int uptodate) + } + /* Rely on write phase to clean out srp status values, so no "else" */ + +- if (sfp->closed) { /* whoops this fd already released, cleanup */ +- SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); +- sg_finish_rem_req(srp); +- srp = NULL; +- if (NULL == sfp->headrp) { +- SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n")); +- if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */ +- scsi_device_put(sdp->device); +- } +- sfp = NULL; +- } +- } else if (srp && srp->orphan) { ++ write_lock_irqsave(&sfp->rq_list_lock, iflags); ++ if (unlikely(srp->orphan)) { + if (sfp->keep_orphan) + srp->sg_io_owned = 0; +- else { +- sg_finish_rem_req(srp); +- srp = NULL; +- } ++ else ++ done = 0; + } +- if (sfp && srp) { +- /* Now wake up any sg_read() that is waiting for this packet. */ +- kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); +- write_lock_irqsave(&sfp->rq_list_lock, iflags); +- srp->done = 1; ++ srp->done = done; ++ write_unlock_irqrestore(&sfp->rq_list_lock, iflags); ++ ++ if (likely(done)) { ++ /* Now wake up any sg_read() that is waiting for this ++ * packet. ++ */ + wake_up_interruptible(&sfp->read_wait); +- write_unlock_irqrestore(&sfp->rq_list_lock, iflags); ++ kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); ++ kref_put(&sfp->f_ref, sg_remove_sfp); ++ } else { ++ INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); ++ schedule_work(&srp->ew.work); + } + } + +@@ -1364,17 +1361,18 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) + printk(KERN_WARNING "kmalloc Sg_device failure\n"); + return ERR_PTR(-ENOMEM); + } +- error = -ENOMEM; ++ + if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) { + printk(KERN_WARNING "idr expansion Sg_device failure\n"); ++ error = -ENOMEM; + goto out; + } + + write_lock_irqsave(&sg_index_lock, iflags); +- error = idr_get_new(&sg_index_idr, sdp, &k); +- write_unlock_irqrestore(&sg_index_lock, iflags); + ++ error = idr_get_new(&sg_index_idr, sdp, &k); + if (error) { ++ write_unlock_irqrestore(&sg_index_lock, iflags); + printk(KERN_WARNING "idr allocation Sg_device failure: %d\n", + error); + goto out; +@@ -1391,6 +1389,9 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) + init_waitqueue_head(&sdp->o_excl_wait); + sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); + sdp->index = k; ++ kref_init(&sdp->d_ref); ++ ++ write_unlock_irqrestore(&sg_index_lock, iflags); + + error = 0; + out: +@@ -1401,6 +1402,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) + return sdp; + + overflow: ++ idr_remove(&sg_index_idr, k); ++ write_unlock_irqrestore(&sg_index_lock, iflags); + sdev_printk(KERN_WARNING, scsidp, + "Unable to attach sg device type=%d, minor " + "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); +@@ -1488,49 +1491,46 @@ out: + return error; + } + +-static void +-sg_remove(struct device *cl_dev, struct class_interface *cl_intf) ++static void sg_device_destroy(struct kref *kref) ++{ ++ struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); ++ unsigned long flags; ++ ++ /* CAUTION! Note that the device can still be found via idr_find() ++ * even though the refcount is 0. Therefore, do idr_remove() BEFORE ++ * any other cleanup. ++ */ ++ ++ write_lock_irqsave(&sg_index_lock, flags); ++ idr_remove(&sg_index_idr, sdp->index); ++ write_unlock_irqrestore(&sg_index_lock, flags); ++ ++ SCSI_LOG_TIMEOUT(3, ++ printk("sg_device_destroy: %s\n", ++ sdp->disk->disk_name)); ++ ++ put_disk(sdp->disk); ++ kfree(sdp); ++} ++ ++static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) + { + struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); + Sg_device *sdp = dev_get_drvdata(cl_dev); + unsigned long iflags; + Sg_fd *sfp; +- Sg_fd *tsfp; +- Sg_request *srp; +- Sg_request *tsrp; +- int delay; + +- if (!sdp) ++ if (!sdp || sdp->detached) + return; + +- delay = 0; ++ SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name)); ++ ++ /* Need a write lock to set sdp->detached. */ + write_lock_irqsave(&sg_index_lock, iflags); +- if (sdp->headfp) { +- sdp->detached = 1; +- for (sfp = sdp->headfp; sfp; sfp = tsfp) { +- tsfp = sfp->nextfp; +- for (srp = sfp->headrp; srp; srp = tsrp) { +- tsrp = srp->nextrp; +- if (sfp->closed || (0 == sg_srp_done(srp, sfp))) +- sg_finish_rem_req(srp); +- } +- if (sfp->closed) { +- scsi_device_put(sdp->device); +- __sg_remove_sfp(sdp, sfp); +- } else { +- delay = 1; +- wake_up_interruptible(&sfp->read_wait); +- kill_fasync(&sfp->async_qp, SIGPOLL, +- POLL_HUP); +- } +- } +- SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index)); +- if (NULL == sdp->headfp) { +- idr_remove(&sg_index_idr, sdp->index); +- } +- } else { /* nothing active, simple case */ +- SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index)); +- idr_remove(&sg_index_idr, sdp->index); ++ sdp->detached = 1; ++ for (sfp = sdp->headfp; sfp; sfp = sfp->nextfp) { ++ wake_up_interruptible(&sfp->read_wait); ++ kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); + } + write_unlock_irqrestore(&sg_index_lock, iflags); + +@@ -1538,13 +1538,8 @@ sg_remove(struct device *cl_dev, struct class_interface *cl_intf) + device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); + cdev_del(sdp->cdev); + sdp->cdev = NULL; +- put_disk(sdp->disk); +- sdp->disk = NULL; +- if (NULL == sdp->headfp) +- kfree(sdp); + +- if (delay) +- msleep(10); /* dirty detach so delay device destruction */ ++ sg_put_dev(sdp); + } + + module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); +@@ -1673,10 +1668,30 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) + md->null_mapped = hp->dxferp ? 0 : 1; + } + +- if (iov_count) +- res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count, +- hp->dxfer_len, GFP_ATOMIC); +- else ++ if (iov_count) { ++ int len, size = sizeof(struct sg_iovec) * iov_count; ++ struct iovec *iov; ++ ++ iov = kmalloc(size, GFP_ATOMIC); ++ if (!iov) ++ return -ENOMEM; ++ ++ if (copy_from_user(iov, hp->dxferp, size)) { ++ kfree(iov); ++ return -EFAULT; ++ } ++ ++ len = iov_length(iov, iov_count); ++ if (hp->dxfer_len < len) { ++ iov_count = iov_shorten(iov, iov_count, hp->dxfer_len); ++ len = hp->dxfer_len; ++ } ++ ++ res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov, ++ iov_count, ++ len, GFP_ATOMIC); ++ kfree(iov); ++ } else + res = blk_rq_map_user(q, rq, md, hp->dxferp, + hp->dxfer_len, GFP_ATOMIC); + +@@ -1941,22 +1956,6 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) + return resp; + } + +-#ifdef CONFIG_SCSI_PROC_FS +-static Sg_request * +-sg_get_nth_request(Sg_fd * sfp, int nth) +-{ +- Sg_request *resp; +- unsigned long iflags; +- int k; +- +- read_lock_irqsave(&sfp->rq_list_lock, iflags); +- for (k = 0, resp = sfp->headrp; resp && (k < nth); +- ++k, resp = resp->nextrp) ; +- read_unlock_irqrestore(&sfp->rq_list_lock, iflags); +- return resp; +-} +-#endif +- + /* always adds to end of list */ + static Sg_request * + sg_add_request(Sg_fd * sfp) +@@ -2032,22 +2031,6 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp) + return res; + } + +-#ifdef CONFIG_SCSI_PROC_FS +-static Sg_fd * +-sg_get_nth_sfp(Sg_device * sdp, int nth) +-{ +- Sg_fd *resp; +- unsigned long iflags; +- int k; +- +- read_lock_irqsave(&sg_index_lock, iflags); +- for (k = 0, resp = sdp->headfp; resp && (k < nth); +- ++k, resp = resp->nextfp) ; +- read_unlock_irqrestore(&sg_index_lock, iflags); +- return resp; +-} +-#endif +- + static Sg_fd * + sg_add_sfp(Sg_device * sdp, int dev) + { +@@ -2062,6 +2045,7 @@ sg_add_sfp(Sg_device * sdp, int dev) + init_waitqueue_head(&sfp->read_wait); + rwlock_init(&sfp->rq_list_lock); + ++ kref_init(&sfp->f_ref); + sfp->timeout = SG_DEFAULT_TIMEOUT; + sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; + sfp->force_packid = SG_DEF_FORCE_PACK_ID; +@@ -2089,15 +2073,54 @@ sg_add_sfp(Sg_device * sdp, int dev) + sg_build_reserve(sfp, bufflen); + SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", + sfp->reserve.bufflen, sfp->reserve.k_use_sg)); ++ ++ kref_get(&sdp->d_ref); ++ __module_get(THIS_MODULE); + return sfp; + } + +-static void +-__sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) ++static void sg_remove_sfp_usercontext(struct work_struct *work) ++{ ++ struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); ++ struct sg_device *sdp = sfp->parentdp; ++ ++ /* Cleanup any responses which were never read(). */ ++ while (sfp->headrp) ++ sg_finish_rem_req(sfp->headrp); ++ ++ if (sfp->reserve.bufflen > 0) { ++ SCSI_LOG_TIMEOUT(6, ++ printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", ++ (int) sfp->reserve.bufflen, ++ (int) sfp->reserve.k_use_sg)); ++ sg_remove_scat(&sfp->reserve); ++ } ++ ++ SCSI_LOG_TIMEOUT(6, ++ printk("sg_remove_sfp: %s, sfp=0x%p\n", ++ sdp->disk->disk_name, ++ sfp)); ++ kfree(sfp); ++ ++ scsi_device_put(sdp->device); ++ sg_put_dev(sdp); ++ module_put(THIS_MODULE); ++} ++ ++static void sg_remove_sfp(struct kref *kref) + { ++ struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); ++ struct sg_device *sdp = sfp->parentdp; + Sg_fd *fp; + Sg_fd *prev_fp; ++ unsigned long iflags; + ++ /* CAUTION! Note that sfp can still be found by walking sdp->headfp ++ * even though the refcount is now 0. Therefore, unlink sfp from ++ * sdp->headfp BEFORE doing any other cleanup. ++ */ ++ ++ write_lock_irqsave(&sg_index_lock, iflags); + prev_fp = sdp->headfp; + if (sfp == prev_fp) + sdp->headfp = prev_fp->nextfp; +@@ -2110,54 +2133,11 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) + prev_fp = fp; + } + } +- if (sfp->reserve.bufflen > 0) { +- SCSI_LOG_TIMEOUT(6, +- printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", +- (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); +- sg_remove_scat(&sfp->reserve); +- } +- sfp->parentdp = NULL; +- SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); +- kfree(sfp); +-} +- +-/* Returns 0 in normal case, 1 when detached and sdp object removed */ +-static int +-sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) +-{ +- Sg_request *srp; +- Sg_request *tsrp; +- int dirty = 0; +- int res = 0; +- +- for (srp = sfp->headrp; srp; srp = tsrp) { +- tsrp = srp->nextrp; +- if (sg_srp_done(srp, sfp)) +- sg_finish_rem_req(srp); +- else +- ++dirty; +- } +- if (0 == dirty) { +- unsigned long iflags; ++ write_unlock_irqrestore(&sg_index_lock, iflags); ++ wake_up_interruptible(&sdp->o_excl_wait); + +- write_lock_irqsave(&sg_index_lock, iflags); +- __sg_remove_sfp(sdp, sfp); +- if (sdp->detached && (NULL == sdp->headfp)) { +- idr_remove(&sg_index_idr, sdp->index); +- kfree(sdp); +- res = 1; +- } +- write_unlock_irqrestore(&sg_index_lock, iflags); +- } else { +- /* MOD_INC's to inhibit unloading sg and associated adapter driver */ +- /* only bump the access_count if we actually succeeded in +- * throwing another counter on the host module */ +- scsi_device_get(sdp->device); /* XXX: retval ignored? */ +- sfp->closed = 1; /* flag dirty state on this fd */ +- SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n", +- dirty)); +- } +- return res; ++ INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); ++ schedule_work(&sfp->ew.work); + } + + static int +@@ -2199,19 +2179,38 @@ sg_last_dev(void) + } + #endif + +-static Sg_device * +-sg_get_dev(int dev) ++/* must be called with sg_index_lock held */ ++static Sg_device *sg_lookup_dev(int dev) + { +- Sg_device *sdp; +- unsigned long iflags; ++ return idr_find(&sg_index_idr, dev); ++} + +- read_lock_irqsave(&sg_index_lock, iflags); +- sdp = idr_find(&sg_index_idr, dev); +- read_unlock_irqrestore(&sg_index_lock, iflags); ++static Sg_device *sg_get_dev(int dev) ++{ ++ struct sg_device *sdp; ++ unsigned long flags; ++ ++ read_lock_irqsave(&sg_index_lock, flags); ++ sdp = sg_lookup_dev(dev); ++ if (!sdp) ++ sdp = ERR_PTR(-ENXIO); ++ else if (sdp->detached) { ++ /* If sdp->detached, then the refcount may already be 0, in ++ * which case it would be a bug to do kref_get(). ++ */ ++ sdp = ERR_PTR(-ENODEV); ++ } else ++ kref_get(&sdp->d_ref); ++ read_unlock_irqrestore(&sg_index_lock, flags); + + return sdp; + } + ++static void sg_put_dev(struct sg_device *sdp) ++{ ++ kref_put(&sdp->d_ref, sg_device_destroy); ++} ++ + #ifdef CONFIG_SCSI_PROC_FS + + static struct proc_dir_entry *sg_proc_sgp = NULL; +@@ -2468,8 +2467,10 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v) + struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; + Sg_device *sdp; + struct scsi_device *scsidp; ++ unsigned long iflags; + +- sdp = it ? sg_get_dev(it->index) : NULL; ++ read_lock_irqsave(&sg_index_lock, iflags); ++ sdp = it ? sg_lookup_dev(it->index) : NULL; + if (sdp && (scsidp = sdp->device) && (!sdp->detached)) + seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", + scsidp->host->host_no, scsidp->channel, +@@ -2480,6 +2481,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v) + (int) scsi_device_online(scsidp)); + else + seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); ++ read_unlock_irqrestore(&sg_index_lock, iflags); + return 0; + } + +@@ -2493,16 +2495,20 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) + struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; + Sg_device *sdp; + struct scsi_device *scsidp; ++ unsigned long iflags; + +- sdp = it ? sg_get_dev(it->index) : NULL; ++ read_lock_irqsave(&sg_index_lock, iflags); ++ sdp = it ? sg_lookup_dev(it->index) : NULL; + if (sdp && (scsidp = sdp->device) && (!sdp->detached)) + seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", + scsidp->vendor, scsidp->model, scsidp->rev); + else + seq_printf(s, "\n"); ++ read_unlock_irqrestore(&sg_index_lock, iflags); + return 0; + } + ++/* must be called while holding sg_index_lock */ + static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) + { + int k, m, new_interface, blen, usg; +@@ -2512,7 +2518,8 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) + const char * cp; + unsigned int ms; + +- for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) { ++ for (k = 0, fp = sdp->headfp; fp != NULL; ++k, fp = fp->nextfp) { ++ read_lock(&fp->rq_list_lock); /* irqs already disabled */ + seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " + "(res)sgat=%d low_dma=%d\n", k + 1, + jiffies_to_msecs(fp->timeout), +@@ -2522,7 +2529,9 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) + seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", + (int) fp->cmd_q, (int) fp->force_packid, + (int) fp->keep_orphan, (int) fp->closed); +- for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) { ++ for (m = 0, srp = fp->headrp; ++ srp != NULL; ++ ++m, srp = srp->nextrp) { + hp = &srp->header; + new_interface = (hp->interface_id == '\0') ? 0 : 1; + if (srp->res_used) { +@@ -2559,6 +2568,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) + } + if (0 == m) + seq_printf(s, " No requests active\n"); ++ read_unlock(&fp->rq_list_lock); + } + } + +@@ -2571,39 +2581,34 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v) + { + struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; + Sg_device *sdp; ++ unsigned long iflags; + + if (it && (0 == it->index)) { + seq_printf(s, "max_active_device=%d(origin 1)\n", + (int)it->max); + seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); + } +- sdp = it ? sg_get_dev(it->index) : NULL; +- if (sdp) { +- struct scsi_device *scsidp = sdp->device; + +- if (NULL == scsidp) { +- seq_printf(s, "device %d detached ??\n", +- (int)it->index); +- return 0; +- } ++ read_lock_irqsave(&sg_index_lock, iflags); ++ sdp = it ? sg_lookup_dev(it->index) : NULL; ++ if (sdp && sdp->headfp) { ++ struct scsi_device *scsidp = sdp->device; + +- if (sg_get_nth_sfp(sdp, 0)) { +- seq_printf(s, " >>> device=%s ", +- sdp->disk->disk_name); +- if (sdp->detached) +- seq_printf(s, "detached pending close "); +- else +- seq_printf +- (s, "scsi%d chan=%d id=%d lun=%d em=%d", +- scsidp->host->host_no, +- scsidp->channel, scsidp->id, +- scsidp->lun, +- scsidp->host->hostt->emulated); +- seq_printf(s, " sg_tablesize=%d excl=%d\n", +- sdp->sg_tablesize, sdp->exclude); +- } ++ seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); ++ if (sdp->detached) ++ seq_printf(s, "detached pending close "); ++ else ++ seq_printf ++ (s, "scsi%d chan=%d id=%d lun=%d em=%d", ++ scsidp->host->host_no, ++ scsidp->channel, scsidp->id, ++ scsidp->lun, ++ scsidp->host->hostt->emulated); ++ seq_printf(s, " sg_tablesize=%d excl=%d\n", ++ sdp->sg_tablesize, sdp->exclude); + sg_proc_debug_helper(s, sdp); + } ++ read_unlock_irqrestore(&sg_index_lock, iflags); + return 0; + } + +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index 643908b..8eba98c 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -658,7 +658,7 @@ int spi_write_then_read(struct spi_device *spi, + + int status; + struct spi_message message; +- struct spi_transfer x; ++ struct spi_transfer x[2]; + u8 *local_buf; + + /* Use preallocated DMA-safe buffer. We can't avoid copying here, +@@ -669,9 +669,15 @@ int spi_write_then_read(struct spi_device *spi, + return -EINVAL; + + spi_message_init(&message); +- memset(&x, 0, sizeof x); +- x.len = n_tx + n_rx; +- spi_message_add_tail(&x, &message); ++ memset(x, 0, sizeof x); ++ if (n_tx) { ++ x[0].len = n_tx; ++ spi_message_add_tail(&x[0], &message); ++ } ++ if (n_rx) { ++ x[1].len = n_rx; ++ spi_message_add_tail(&x[1], &message); ++ } + + /* ... unless someone else is using the pre-allocated buffer */ + if (!mutex_trylock(&lock)) { +@@ -682,15 +688,15 @@ int spi_write_then_read(struct spi_device *spi, + local_buf = buf; + + memcpy(local_buf, txbuf, n_tx); +- x.tx_buf = local_buf; +- x.rx_buf = local_buf; ++ x[0].tx_buf = local_buf; ++ x[1].rx_buf = local_buf + n_tx; + + /* do the i/o */ + status = spi_sync(spi, &message); + if (status == 0) +- memcpy(rxbuf, x.rx_buf + n_tx, n_rx); ++ memcpy(rxbuf, x[1].rx_buf, n_rx); + +- if (x.tx_buf == buf) ++ if (x[0].tx_buf == buf) + mutex_unlock(&lock); + else + kfree(local_buf); +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c +index 3771d6e..34e6108 100644 +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -652,7 +652,7 @@ next_desc: + + iface = &intf->altsetting[0]; + ep = &iface->endpoint[0].desc; +- if (!usb_endpoint_is_int_in(ep)) { ++ if (!ep || !usb_endpoint_is_int_in(ep)) { + rv = -EINVAL; + goto err; + } +diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c +index 96d65ca..4007770 100644 +--- a/drivers/usb/gadget/u_ether.c ++++ b/drivers/usb/gadget/u_ether.c +@@ -175,12 +175,6 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) + strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info); + } + +-static u32 eth_get_link(struct net_device *net) +-{ +- struct eth_dev *dev = netdev_priv(net); +- return dev->gadget->speed != USB_SPEED_UNKNOWN; +-} +- + /* REVISIT can also support: + * - WOL (by tracking suspends and issuing remote wakeup) + * - msglevel (implies updated messaging) +@@ -189,7 +183,7 @@ static u32 eth_get_link(struct net_device *net) + + static struct ethtool_ops ops = { + .get_drvinfo = eth_get_drvinfo, +- .get_link = eth_get_link ++ .get_link = ethtool_op_get_link, + }; + + static void defer_kevent(struct eth_dev *dev, int flag) +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index ae84c32..bb3143e 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -668,6 +668,7 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(DE_VID, WHT_PID) }, + { USB_DEVICE(ADI_VID, ADI_GNICE_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, ++ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, + { }, /* Optional parameter entry */ + { } /* Terminating entry */ + }; +diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h +index daaf63d..c09f658 100644 +--- a/drivers/usb/serial/ftdi_sio.h ++++ b/drivers/usb/serial/ftdi_sio.h +@@ -913,6 +913,13 @@ + #define ADI_GNICE_PID 0xF000 + + /* ++ * JETI SPECTROMETER SPECBOS 1201 ++ * http://www.jeti.com/products/sys/scb/scb1201.php ++ */ ++#define JETI_VID 0x0c6c ++#define JETI_SPC1201_PID 0x04b2 ++ ++/* + * BmRequestType: 1100 0000b + * bRequest: FTDI_E2_READ + * wValue: 0 +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c +index 2620bf6..9c4c700 100644 +--- a/drivers/usb/serial/ti_usb_3410_5052.c ++++ b/drivers/usb/serial/ti_usb_3410_5052.c +@@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct urb *urb) + } + + tty = tty_port_tty_get(&port->port); +- if (tty && urb->actual_length) { +- usb_serial_debug_data(debug, dev, __func__, +- urb->actual_length, urb->transfer_buffer); +- +- if (!tport->tp_is_open) +- dbg("%s - port closed, dropping data", __func__); +- else +- ti_recv(&urb->dev->dev, tty, ++ if (tty) { ++ if (urb->actual_length) { ++ usb_serial_debug_data(debug, dev, __func__, ++ urb->actual_length, urb->transfer_buffer); ++ ++ if (!tport->tp_is_open) ++ dbg("%s - port closed, dropping data", ++ __func__); ++ else ++ ti_recv(&urb->dev->dev, tty, + urb->transfer_buffer, + urb->actual_length); +- +- spin_lock(&tport->tp_lock); +- tport->tp_icount.rx += urb->actual_length; +- spin_unlock(&tport->tp_lock); ++ spin_lock(&tport->tp_lock); ++ tport->tp_icount.rx += urb->actual_length; ++ spin_unlock(&tport->tp_lock); ++ } + tty_kref_put(tty); + } + +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index cfde74a..0f54399 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -1218,12 +1218,14 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff, + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ), + +-/* Reported by Rauch Wolke */ ++/* Reported by Rauch Wolke ++ * and augmented by binbin (Bugzilla #12882) ++ */ + UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, + "Simple Tech/Datafab", + "CF+SM Reader", + US_SC_DEVICE, US_PR_DEVICE, NULL, +- US_FL_IGNORE_RESIDUE ), ++ US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ), + + /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant + * to the USB storage specification in two ways: +diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c +index 1657b96..471a9a6 100644 +--- a/drivers/video/console/fbcon.c ++++ b/drivers/video/console/fbcon.c +@@ -2263,9 +2263,12 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, + } + + ++ if (!lock_fb_info(info)) ++ return; + event.info = info; + event.data = ␣ + fb_notifier_call_chain(FB_EVENT_CONBLANK, &event); ++ unlock_fb_info(info); + } + + static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) +@@ -2954,8 +2957,9 @@ static int fbcon_fb_unbind(int idx) + + static int fbcon_fb_unregistered(struct fb_info *info) + { +- int i, idx = info->node; ++ int i, idx; + ++ idx = info->node; + for (i = first_fb_vc; i <= last_fb_vc; i++) { + if (con2fb_map[i] == idx) + con2fb_map[i] = -1; +@@ -2979,13 +2983,12 @@ static int fbcon_fb_unregistered(struct fb_info *info) + } + } + +- if (!num_registered_fb) +- unregister_con_driver(&fb_con); +- +- + if (primary_device == idx) + primary_device = -1; + ++ if (!num_registered_fb) ++ unregister_con_driver(&fb_con); ++ + return 0; + } + +@@ -3021,8 +3024,9 @@ static inline void fbcon_select_primary(struct fb_info *info) + + static int fbcon_fb_registered(struct fb_info *info) + { +- int ret = 0, i, idx = info->node; ++ int ret = 0, i, idx; + ++ idx = info->node; + fbcon_select_primary(info); + + if (info_idx == -1) { +@@ -3124,7 +3128,7 @@ static void fbcon_get_requirement(struct fb_info *info, + } + } + +-static int fbcon_event_notify(struct notifier_block *self, ++static int fbcon_event_notify(struct notifier_block *self, + unsigned long action, void *data) + { + struct fb_event *event = data; +@@ -3132,7 +3136,7 @@ static int fbcon_event_notify(struct notifier_block *self, + struct fb_videomode *mode; + struct fb_con2fbmap *con2fb; + struct fb_blit_caps *caps; +- int ret = 0; ++ int idx, ret = 0; + + /* + * ignore all events except driver registration and deregistration +@@ -3160,7 +3164,8 @@ static int fbcon_event_notify(struct notifier_block *self, + ret = fbcon_mode_deleted(info, mode); + break; + case FB_EVENT_FB_UNBIND: +- ret = fbcon_fb_unbind(info->node); ++ idx = info->node; ++ ret = fbcon_fb_unbind(idx); + break; + case FB_EVENT_FB_REGISTERED: + ret = fbcon_fb_registered(info); +@@ -3188,7 +3193,6 @@ static int fbcon_event_notify(struct notifier_block *self, + fbcon_get_requirement(info, caps); + break; + } +- + done: + return ret; + } +diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c +index cfd9dce..1d6fb41 100644 +--- a/drivers/video/fbmem.c ++++ b/drivers/video/fbmem.c +@@ -1086,13 +1086,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + return -EINVAL; + con2fb.framebuffer = -1; + event.data = &con2fb; +- + if (!lock_fb_info(info)) + return -ENODEV; + event.info = info; + fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event); + unlock_fb_info(info); +- + ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0; + break; + case FBIOPUT_CON2FBMAP: +@@ -1112,8 +1110,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + if (!lock_fb_info(info)) + return -ENODEV; + event.info = info; +- ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, +- &event); ++ ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); + unlock_fb_info(info); + break; + case FBIOBLANK: +@@ -1519,7 +1516,10 @@ register_framebuffer(struct fb_info *fb_info) + registered_fb[i] = fb_info; + + event.info = fb_info; ++ if (!lock_fb_info(fb_info)) ++ return -ENODEV; + fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); ++ unlock_fb_info(fb_info); + return 0; + } + +@@ -1553,8 +1553,12 @@ unregister_framebuffer(struct fb_info *fb_info) + goto done; + } + ++ ++ if (!lock_fb_info(fb_info)) ++ return -ENODEV; + event.info = fb_info; + ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); ++ unlock_fb_info(fb_info); + + if (ret) { + ret = -EINVAL; +@@ -1588,6 +1592,8 @@ void fb_set_suspend(struct fb_info *info, int state) + { + struct fb_event event; + ++ if (!lock_fb_info(info)) ++ return; + event.info = info; + if (state) { + fb_notifier_call_chain(FB_EVENT_SUSPEND, &event); +@@ -1596,6 +1602,7 @@ void fb_set_suspend(struct fb_info *info, int state) + info->state = FBINFO_STATE_RUNNING; + fb_notifier_call_chain(FB_EVENT_RESUME, &event); + } ++ unlock_fb_info(info); + } + + /** +@@ -1665,8 +1672,11 @@ int fb_new_modelist(struct fb_info *info) + err = 1; + + if (!list_empty(&info->modelist)) { ++ if (!lock_fb_info(info)) ++ return -ENODEV; + event.info = info; + err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); ++ unlock_fb_info(info); + } + + return err; +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c +index 5926826..9c76a06 100644 +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -190,7 +190,8 @@ static int balloon(void *_vballoon) + try_to_freeze(); + wait_event_interruptible(vb->config_change, + (diff = towards_target(vb)) != 0 +- || kthread_should_stop()); ++ || kthread_should_stop() ++ || freezing(current)); + if (diff > 0) + fill_balloon(vb, diff); + else if (diff < 0) +diff --git a/fs/dquot.c b/fs/dquot.c +index bca3cac..5a0059d 100644 +--- a/fs/dquot.c ++++ b/fs/dquot.c +@@ -793,7 +793,7 @@ static void add_dquot_ref(struct super_block *sb, int type) + continue; + if (!dqinit_needed(inode, type)) + continue; +- if (inode->i_state & (I_FREEING|I_WILL_FREE)) ++ if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) + continue; + + __iget(inode); +diff --git a/fs/drop_caches.c b/fs/drop_caches.c +index 3e5637f..f7e66c0 100644 +--- a/fs/drop_caches.c ++++ b/fs/drop_caches.c +@@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb) + + spin_lock(&inode_lock); + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { +- if (inode->i_state & (I_FREEING|I_WILL_FREE)) ++ if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) + continue; + if (inode->i_mapping->nrpages == 0) + continue; +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 9f61e62..27b3741 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -2693,7 +2693,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) + i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int); + sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); + if (sbi->s_mb_maxs == NULL) { +- kfree(sbi->s_mb_maxs); ++ kfree(sbi->s_mb_offsets); + return -ENOMEM; + } + +@@ -4439,7 +4439,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) + pa_inode_list) { + spin_lock(&tmp_pa->pa_lock); + if (tmp_pa->pa_deleted) { +- spin_unlock(&pa->pa_lock); ++ spin_unlock(&tmp_pa->pa_lock); + continue; + } + if (!added && pa->pa_free < tmp_pa->pa_free) { +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index e3fe991..f81f9e7 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -538,7 +538,8 @@ void generic_sync_sb_inodes(struct super_block *sb, + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + struct address_space *mapping; + +- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ++ if (inode->i_state & ++ (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) + continue; + mapping = inode->i_mapping; + if (mapping->nrpages == 0) +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 9b800d9..c91a818 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -26,7 +26,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -842,7 +841,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) + bad_val: + printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n", + args[0].from, p); +- return 1; ++ return -EINVAL; + } + + static int +diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c +index 6cdeacf..4bd49c1 100644 +--- a/fs/nfs/nfs3xdr.c ++++ b/fs/nfs/nfs3xdr.c +@@ -716,7 +716,8 @@ nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p, + if (args->npages != 0) + xdr_encode_pages(buf, args->pages, 0, args->len); + else +- req->rq_slen += args->len; ++ req->rq_slen = xdr_adjust_iovec(req->rq_svec, ++ p + XDR_QUADLEN(args->len)); + + err = nfsacl_encode(buf, base, args->inode, + (args->mask & NFS_ACL) ? +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index a5887df..8672b95 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -1926,7 +1926,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, + out->f_path.dentry->d_name.len, + out->f_path.dentry->d_name.name); + +- inode_double_lock(inode, pipe->inode); ++ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); + + ret = ocfs2_rw_lock(inode, 1); + if (ret < 0) { +@@ -1941,12 +1941,16 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, + goto out_unlock; + } + ++ if (pipe->inode) ++ mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD); + ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags); ++ if (pipe->inode) ++ mutex_unlock(&pipe->inode->i_mutex); + + out_unlock: + ocfs2_rw_unlock(inode, 1); + out: +- inode_double_unlock(inode, pipe->inode); ++ mutex_unlock(&inode->i_mutex); + + mlog_exit(ret); + return ret; +diff --git a/fs/splice.c b/fs/splice.c +index 4ed0ba4..4c1029a 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -736,10 +736,19 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, + * ->write_end. Most of the time, these expect i_mutex to + * be held. Since this may result in an ABBA deadlock with + * pipe->inode, we have to order lock acquiry here. ++ * ++ * Outer lock must be inode->i_mutex, as pipe_wait() will ++ * release and reacquire pipe->inode->i_mutex, AND inode must ++ * never be a pipe. + */ +- inode_double_lock(inode, pipe->inode); ++ WARN_ON(S_ISFIFO(inode->i_mode)); ++ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); ++ if (pipe->inode) ++ mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD); + ret = __splice_from_pipe(pipe, &sd, actor); +- inode_double_unlock(inode, pipe->inode); ++ if (pipe->inode) ++ mutex_unlock(&pipe->inode->i_mutex); ++ mutex_unlock(&inode->i_mutex); + + return ret; + } +@@ -830,11 +839,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, + }; + ssize_t ret; + +- inode_double_lock(inode, pipe->inode); ++ WARN_ON(S_ISFIFO(inode->i_mode)); ++ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); + ret = file_remove_suid(out); +- if (likely(!ret)) ++ if (likely(!ret)) { ++ if (pipe->inode) ++ mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD); + ret = __splice_from_pipe(pipe, &sd, pipe_to_file); +- inode_double_unlock(inode, pipe->inode); ++ if (pipe->inode) ++ mutex_unlock(&pipe->inode->i_mutex); ++ } ++ mutex_unlock(&inode->i_mutex); + if (ret > 0) { + unsigned long nr_pages; + +diff --git a/include/linux/capability.h b/include/linux/capability.h +index 4864a43..c302110 100644 +--- a/include/linux/capability.h ++++ b/include/linux/capability.h +@@ -377,7 +377,21 @@ struct cpu_vfs_cap_data { + #define CAP_FOR_EACH_U32(__capi) \ + for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi) + ++/* ++ * CAP_FS_MASK and CAP_NFSD_MASKS: ++ * ++ * The fs mask is all the privileges that fsuid==0 historically meant. ++ * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE. ++ * ++ * It has never meant setting security.* and trusted.* xattrs. ++ * ++ * We could also define fsmask as follows: ++ * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions ++ * 2. The security.* and trusted.* xattrs are fs-related MAC permissions ++ */ ++ + # define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \ ++ | CAP_TO_MASK(CAP_MKNOD) \ + | CAP_TO_MASK(CAP_DAC_OVERRIDE) \ + | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \ + | CAP_TO_MASK(CAP_FOWNER) \ +@@ -392,11 +406,12 @@ struct cpu_vfs_cap_data { + # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) + # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) + # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}) +-# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } }) ++# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ ++ | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ ++ CAP_FS_MASK_B1 } }) + # define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ +- | CAP_TO_MASK(CAP_SYS_RESOURCE) \ +- | CAP_TO_MASK(CAP_MKNOD), \ +- CAP_FS_MASK_B1 } }) ++ | CAP_TO_MASK(CAP_SYS_RESOURCE), \ ++ CAP_FS_MASK_B1 } }) + + #endif /* _KERNEL_CAPABILITY_U32S != 2 */ + +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h +index bd37078..0d2f7c8 100644 +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, + const enum hrtimer_mode mode); + extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + unsigned long range_ns, const enum hrtimer_mode mode); ++extern int ++__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, ++ unsigned long delta_ns, ++ const enum hrtimer_mode mode, int wakeup); ++ + extern int hrtimer_cancel(struct hrtimer *timer); + extern int hrtimer_try_to_cancel(struct hrtimer *timer); + +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index 9127f6b..564d1c0 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -274,6 +274,7 @@ extern void softirq_init(void); + #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) + extern void raise_softirq_irqoff(unsigned int nr); + extern void raise_softirq(unsigned int nr); ++extern void wakeup_softirqd(void); + + /* This is the worklist that queues up per-cpu softirq work. + * +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index bf6f703..552ef4f 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -127,6 +127,10 @@ struct kvm { + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + #endif + ++#ifdef CONFIG_HAVE_KVM_IRQCHIP ++ struct hlist_head mask_notifier_list; ++#endif ++ + #ifdef KVM_ARCH_WANT_MMU_NOTIFIER + struct mmu_notifier mmu_notifier; + unsigned long mmu_notifier_seq; +@@ -321,6 +325,19 @@ struct kvm_assigned_dev_kernel { + struct pci_dev *dev; + struct kvm *kvm; + }; ++ ++struct kvm_irq_mask_notifier { ++ void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); ++ int irq; ++ struct hlist_node link; ++}; ++ ++void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, ++ struct kvm_irq_mask_notifier *kimn); ++void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, ++ struct kvm_irq_mask_notifier *kimn); ++void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); ++ + void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); + void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); + void kvm_register_irq_ack_notifier(struct kvm *kvm, +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index 01ca085..076a7dc 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -18,9 +18,14 @@ + * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page + * allocation mode flags. + */ +-#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ +-#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ +-#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ ++enum mapping_flags { ++ AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ ++ AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ ++ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ ++#ifdef CONFIG_UNEVICTABLE_LRU ++ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ ++#endif ++}; + + static inline void mapping_set_error(struct address_space *mapping, int error) + { +@@ -33,7 +38,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) + } + + #ifdef CONFIG_UNEVICTABLE_LRU +-#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */ + + static inline void mapping_set_unevictable(struct address_space *mapping) + { +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 011db2f..f8af167 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -202,7 +202,8 @@ extern unsigned long long time_sync_thresh; + #define task_is_stopped_or_traced(task) \ + ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + #define task_contributes_to_load(task) \ +- ((task->state & TASK_UNINTERRUPTIBLE) != 0) ++ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ ++ (task->flags & PF_FROZEN) == 0) + + #define __set_task_state(tsk, state_value) \ + do { (tsk)->state = (state_value); } while (0) +diff --git a/kernel/fork.c b/kernel/fork.c +index 4854c2c..9b51a1b 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -808,6 +808,12 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) + sig->cputime_expires.virt_exp = cputime_zero; + sig->cputime_expires.sched_exp = 0; + ++ if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { ++ sig->cputime_expires.prof_exp = ++ secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); ++ sig->cputimer.running = 1; ++ } ++ + /* The timer lists. */ + INIT_LIST_HEAD(&sig->cpu_timers[0]); + INIT_LIST_HEAD(&sig->cpu_timers[1]); +@@ -823,11 +829,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) + atomic_inc(¤t->signal->live); + return 0; + } +- sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); +- +- if (sig) +- posix_cpu_timers_init_group(sig); + ++ sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); + tsk->signal = sig; + if (!sig) + return -ENOMEM; +@@ -865,6 +868,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) + memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); + task_unlock(current->group_leader); + ++ posix_cpu_timers_init_group(sig); ++ + acct_init_pacct(&sig->pacct); + + tty_audit_fork(sig); +diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c +index f394d2a..cb8a15c 100644 +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -651,14 +651,20 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) + * and expiry check is done in the hrtimer_interrupt or in the softirq. + */ + static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) ++ struct hrtimer_clock_base *base, ++ int wakeup) + { + if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { +- spin_unlock(&base->cpu_base->lock); +- raise_softirq_irqoff(HRTIMER_SOFTIRQ); +- spin_lock(&base->cpu_base->lock); ++ if (wakeup) { ++ spin_unlock(&base->cpu_base->lock); ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ spin_lock(&base->cpu_base->lock); ++ } else ++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ + return 1; + } ++ + return 0; + } + +@@ -703,7 +709,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; } + static inline int hrtimer_switch_to_hres(void) { return 0; } + static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } + static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) ++ struct hrtimer_clock_base *base, ++ int wakeup) + { + return 0; + } +@@ -886,20 +893,9 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) + return 0; + } + +-/** +- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU +- * @timer: the timer to be added +- * @tim: expiry time +- * @delta_ns: "slack" range for the timer +- * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) +- * +- * Returns: +- * 0 on success +- * 1 when the timer was active +- */ +-int +-hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns, +- const enum hrtimer_mode mode) ++int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, ++ unsigned long delta_ns, const enum hrtimer_mode mode, ++ int wakeup) + { + struct hrtimer_clock_base *base, *new_base; + unsigned long flags; +@@ -940,12 +936,29 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n + * XXX send_remote_softirq() ? + */ + if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) +- hrtimer_enqueue_reprogram(timer, new_base); ++ hrtimer_enqueue_reprogram(timer, new_base, wakeup); + + unlock_hrtimer_base(timer, &flags); + + return ret; + } ++ ++/** ++ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU ++ * @timer: the timer to be added ++ * @tim: expiry time ++ * @delta_ns: "slack" range for the timer ++ * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) ++ * ++ * Returns: ++ * 0 on success ++ * 1 when the timer was active ++ */ ++int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, ++ unsigned long delta_ns, const enum hrtimer_mode mode) ++{ ++ return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); ++} + EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); + + /** +@@ -961,7 +974,7 @@ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); + int + hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) + { +- return hrtimer_start_range_ns(timer, tim, 0, mode); ++ return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); + } + EXPORT_SYMBOL_GPL(hrtimer_start); + +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 7ba8cd9..6589776 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -912,10 +912,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, + ri->rp = rp; + ri->task = current; + +- if (rp->entry_handler && rp->entry_handler(ri, regs)) { +- spin_unlock_irqrestore(&rp->lock, flags); ++ if (rp->entry_handler && rp->entry_handler(ri, regs)) + return 0; +- } + + arch_prepare_kretprobe(ri, regs); + +diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c +index e976e50..68647c1 100644 +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rlim_new) + + cputime = secs_to_cputime(rlim_new); + if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || +- cputime_lt(current->signal->it_prof_expires, cputime)) { ++ cputime_gt(current->signal->it_prof_expires, cputime)) { + spin_lock_irq(¤t->sighand->siglock); + set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); + spin_unlock_irq(¤t->sighand->siglock); +@@ -224,7 +224,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, + cpu->cpu = virt_ticks(p); + break; + case CPUCLOCK_SCHED: +- cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p); ++ cpu->sched = task_sched_runtime(p); + break; + } + return 0; +@@ -305,18 +305,19 @@ static int cpu_clock_sample_group(const clockid_t which_clock, + { + struct task_cputime cputime; + +- thread_group_cputime(p, &cputime); + switch (CPUCLOCK_WHICH(which_clock)) { + default: + return -EINVAL; + case CPUCLOCK_PROF: ++ thread_group_cputime(p, &cputime); + cpu->cpu = cputime_add(cputime.utime, cputime.stime); + break; + case CPUCLOCK_VIRT: ++ thread_group_cputime(p, &cputime); + cpu->cpu = cputime.utime; + break; + case CPUCLOCK_SCHED: +- cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); ++ cpu->sched = thread_group_sched_runtime(p); + break; + } + return 0; +diff --git a/kernel/sched.c b/kernel/sched.c +index 8e2558c..5e80629 100644 +--- a/kernel/sched.c ++++ b/kernel/sched.c +@@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) + + spin_lock(&rt_b->rt_runtime_lock); + for (;;) { ++ unsigned long delta; ++ ktime_t soft, hard; ++ + if (hrtimer_active(&rt_b->rt_period_timer)) + break; + + now = hrtimer_cb_get_time(&rt_b->rt_period_timer); + hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); +- hrtimer_start_expires(&rt_b->rt_period_timer, +- HRTIMER_MODE_ABS); ++ ++ soft = hrtimer_get_softexpires(&rt_b->rt_period_timer); ++ hard = hrtimer_get_expires(&rt_b->rt_period_timer); ++ delta = ktime_to_ns(ktime_sub(hard, soft)); ++ __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, ++ HRTIMER_MODE_ABS, 0); + } + spin_unlock(&rt_b->rt_runtime_lock); + } +@@ -1129,7 +1136,8 @@ static __init void init_hrtick(void) + */ + static void hrtick_start(struct rq *rq, u64 delay) + { +- hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); ++ __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, ++ HRTIMER_MODE_REL, 0); + } + + static inline void init_hrtick(void) +@@ -4134,9 +4142,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); + EXPORT_PER_CPU_SYMBOL(kstat); + + /* +- * Return any ns on the sched_clock that have not yet been banked in ++ * Return any ns on the sched_clock that have not yet been accounted in + * @p in case that task is currently running. ++ * ++ * Called with task_rq_lock() held on @rq. + */ ++static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) ++{ ++ u64 ns = 0; ++ ++ if (task_current(rq, p)) { ++ update_rq_clock(rq); ++ ns = rq->clock - p->se.exec_start; ++ if ((s64)ns < 0) ++ ns = 0; ++ } ++ ++ return ns; ++} ++ + unsigned long long task_delta_exec(struct task_struct *p) + { + unsigned long flags; +@@ -4144,16 +4168,49 @@ unsigned long long task_delta_exec(struct task_struct *p) + u64 ns = 0; + + rq = task_rq_lock(p, &flags); ++ ns = do_task_delta_exec(p, rq); ++ task_rq_unlock(rq, &flags); + +- if (task_current(rq, p)) { +- u64 delta_exec; ++ return ns; ++} + +- update_rq_clock(rq); +- delta_exec = rq->clock - p->se.exec_start; +- if ((s64)delta_exec > 0) +- ns = delta_exec; +- } ++/* ++ * Return accounted runtime for the task. ++ * In case the task is currently running, return the runtime plus current's ++ * pending runtime that have not been accounted yet. ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ u64 ns = 0; + ++ rq = task_rq_lock(p, &flags); ++ ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); ++ task_rq_unlock(rq, &flags); ++ ++ return ns; ++} ++ ++/* ++ * Return sum_exec_runtime for the thread group. ++ * In case the task is currently running, return the sum plus current's ++ * pending runtime that have not been accounted yet. ++ * ++ * Note that the thread group might have other running tasks as well, ++ * so the return value not includes other pending runtime that other ++ * running tasks might have. ++ */ ++unsigned long long thread_group_sched_runtime(struct task_struct *p) ++{ ++ struct task_cputime totals; ++ unsigned long flags; ++ struct rq *rq; ++ u64 ns; ++ ++ rq = task_rq_lock(p, &flags); ++ thread_group_cputime(p, &totals); ++ ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); + task_rq_unlock(rq, &flags); + + return ns; +diff --git a/kernel/softirq.c b/kernel/softirq.c +index 9041ea7..d2b183e 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); + * to the pending events, so lets the scheduler to balance + * the softirq load for us. + */ +-static inline void wakeup_softirqd(void) ++void wakeup_softirqd(void) + { + /* Interrupts are disabled: no need to stop preemption */ + struct task_struct *tsk = __get_cpu_var(ksoftirqd); +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index c5ef44f..7755ae7 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -95,12 +95,9 @@ static int sixty = 60; + static int neg_one = -1; + #endif + +-#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING) +-static int two = 2; +-#endif +- + static int zero; + static int one = 1; ++static int two = 2; + static unsigned long one_ul = 1; + static int one_hundred = 100; + +@@ -1373,10 +1370,7 @@ static struct ctl_table fs_table[] = { + .data = &lease_break_time, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = &proc_dointvec_minmax, +- .strategy = &sysctl_intvec, +- .extra1 = &zero, +- .extra2 = &two, ++ .proc_handler = &proc_dointvec, + }, + #endif + #ifdef CONFIG_AIO +@@ -1417,7 +1411,10 @@ static struct ctl_table fs_table[] = { + .data = &suid_dumpable, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = &proc_dointvec, ++ .proc_handler = &proc_dointvec_minmax, ++ .strategy = &sysctl_intvec, ++ .extra1 = &zero, ++ .extra2 = &two, + }, + #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) + { +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 17bb88d..b2387c0 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -3886,7 +3886,8 @@ __init static int tracer_alloc_buffers(void) + &trace_panic_notifier); + + register_die_notifier(&trace_die_notifier); +- ret = 0; ++ ++ return 0; + + out_free_cpumask: + free_cpumask_var(tracing_cpumask); +diff --git a/lib/cpumask.c b/lib/cpumask.c +index 3389e24..1f71b97 100644 +--- a/lib/cpumask.c ++++ b/lib/cpumask.c +@@ -109,10 +109,10 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) + #endif + /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ + if (*mask) { ++ unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); + unsigned int tail; + tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); +- memset(cpumask_bits(*mask) + cpumask_size() - tail, +- 0, tail); ++ memset(ptr + cpumask_size() - tail, 0, tail); + } + + return *mask != NULL; +diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c +index 0c04615..427dfe3 100644 +--- a/mm/filemap_xip.c ++++ b/mm/filemap_xip.c +@@ -89,8 +89,8 @@ do_xip_mapping_read(struct address_space *mapping, + } + } + nr = nr - offset; +- if (nr > len) +- nr = len; ++ if (nr > len - copied) ++ nr = len - copied; + + error = mapping->a_ops->get_xip_mem(mapping, index, 0, + &xip_mem, &xip_pfn); +diff --git a/mm/mmap.c b/mm/mmap.c +index 00ced3e..f1aa6f9 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -1571,7 +1571,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + * Overcommit.. This must be the final test, as it will + * update security statistics. + */ +- if (security_vm_enough_memory(grow)) ++ if (security_vm_enough_memory_mm(mm, grow)) + return -ENOMEM; + + /* Ok, everything looks good - let it rip */ +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index c6a6b16..eae6954 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -2496,7 +2496,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) + skb_network_header_len(skb)); + skb_copy_from_linear_data(skb, nskb->data, doffset); + +- if (pos >= offset + len) ++ if (fskb != skb_shinfo(skb)->frag_list) + continue; + + if (!sg) { +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index 7ea88b6..39879ae 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -374,7 +374,9 @@ static int mark_source_chains(struct xt_table_info *newinfo, + && unconditional(&e->arp)) || visited) { + unsigned int oldpos, size; + +- if (t->verdict < -NF_MAX_VERDICT - 1) { ++ if ((strcmp(t->target.u.user.name, ++ ARPT_STANDARD_TARGET) == 0) && ++ t->verdict < -NF_MAX_VERDICT - 1) { + duprintf("mark_source_chains: bad " + "negative verdict (%i)\n", + t->verdict); +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index ef8b6ca..ec362a3 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -496,7 +496,9 @@ mark_source_chains(struct xt_table_info *newinfo, + && unconditional(&e->ip)) || visited) { + unsigned int oldpos, size; + +- if (t->verdict < -NF_MAX_VERDICT - 1) { ++ if ((strcmp(t->target.u.user.name, ++ IPT_STANDARD_TARGET) == 0) && ++ t->verdict < -NF_MAX_VERDICT - 1) { + duprintf("mark_source_chains: bad " + "negative verdict (%i)\n", + t->verdict); +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index a33485d..def375b 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -525,7 +525,9 @@ mark_source_chains(struct xt_table_info *newinfo, + && unconditional(&e->ipv6)) || visited) { + unsigned int oldpos, size; + +- if (t->verdict < -NF_MAX_VERDICT - 1) { ++ if ((strcmp(t->target.u.user.name, ++ IP6T_STANDARD_TARGET) == 0) && ++ t->verdict < -NF_MAX_VERDICT - 1) { + duprintf("mark_source_chains: bad " + "negative verdict (%i)\n", + t->verdict); +diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c +index e9c05b8..dcce778 100644 +--- a/net/netrom/af_netrom.c ++++ b/net/netrom/af_netrom.c +@@ -1082,7 +1082,13 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, + + SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n"); + +- /* Build a packet */ ++ /* Build a packet - the conventional user limit is 236 bytes. We can ++ do ludicrously large NetROM frames but must not overflow */ ++ if (len > 65536) { ++ err = -EMSGSIZE; ++ goto out; ++ } ++ + SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); + size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; + +diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c +index 0139264..5e75bbf 100644 +--- a/net/rose/af_rose.c ++++ b/net/rose/af_rose.c +@@ -1124,6 +1124,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, + + /* Build a packet */ + SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); ++ /* Sanity check the packet size */ ++ if (len > 65535) ++ return -EMSGSIZE; ++ + size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; + + if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c +index 9fc5b02..88d80f5 100644 +--- a/net/x25/af_x25.c ++++ b/net/x25/af_x25.c +@@ -1037,6 +1037,12 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, + sx25.sx25_addr = x25->dest_addr; + } + ++ /* Sanity check the packet size */ ++ if (len > 65535) { ++ rc = -EMSGSIZE; ++ goto out; ++ } ++ + SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); + + /* Build a packet */ +diff --git a/security/commoncap.c b/security/commoncap.c +index 7cd61a5..beac025 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -916,7 +916,6 @@ changed: + return commit_creds(new); + + no_change: +- error = 0; + error: + abort_creds(new); + return error; +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index e7ded13..c1c5f36 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -607,6 +607,8 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name, + strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { + if (!capable(CAP_MAC_ADMIN)) + rc = -EPERM; ++ if (size == 0) ++ rc = -EINVAL; + } else + rc = cap_inode_setxattr(dentry, name, value, size, flags); + +@@ -1430,7 +1432,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name, + struct socket *sock; + int rc = 0; + +- if (value == NULL || size > SMK_LABELLEN) ++ if (value == NULL || size > SMK_LABELLEN || size == 0) + return -EACCES; + + sp = smk_import(value, size); +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index d03f992..cef1ce0 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -2003,7 +2003,11 @@ int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid, + err = bus->ops.command(bus, res); + if (!err) { + struct hda_cache_head *c; +- u32 key = build_cmd_cache_key(nid, verb); ++ u32 key; ++ /* parm may contain the verb stuff for get/set amp */ ++ verb = verb | (parm >> 8); ++ parm &= 0xff; ++ key = build_cmd_cache_key(nid, verb); + c = get_alloc_hash(&codec->cmd_cache, key); + if (c) + c->val = parm; +diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c +index e486123..5a6d6d8 100644 +--- a/sound/pci/hda/patch_analog.c ++++ b/sound/pci/hda/patch_analog.c +@@ -3239,7 +3239,7 @@ static const char *ad1884_slave_vols[] = { + "Mic Playback Volume", + "CD Playback Volume", + "Internal Mic Playback Volume", +- "Docking Mic Playback Volume" ++ "Docking Mic Playback Volume", + "Beep Playback Volume", + "IEC958 Playback Volume", + NULL +diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c +index 23b81cf..e85a2bc 100644 +--- a/virt/kvm/ioapic.c ++++ b/virt/kvm/ioapic.c +@@ -101,6 +101,7 @@ static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) + static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) + { + unsigned index; ++ bool mask_before, mask_after; + + switch (ioapic->ioregsel) { + case IOAPIC_REG_VERSION: +@@ -120,6 +121,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) + ioapic_debug("change redir index %x val %x\n", index, val); + if (index >= IOAPIC_NUM_PINS) + return; ++ mask_before = ioapic->redirtbl[index].fields.mask; + if (ioapic->ioregsel & 1) { + ioapic->redirtbl[index].bits &= 0xffffffff; + ioapic->redirtbl[index].bits |= (u64) val << 32; +@@ -128,6 +130,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) + ioapic->redirtbl[index].bits |= (u32) val; + ioapic->redirtbl[index].fields.remote_irr = 0; + } ++ mask_after = ioapic->redirtbl[index].fields.mask; ++ if (mask_before != mask_after) ++ kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); + if (ioapic->irr & (1 << index)) + ioapic_service(ioapic, index); + break; +@@ -426,3 +431,4 @@ int kvm_ioapic_init(struct kvm *kvm) + kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev); + return 0; + } ++ +diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c +index aa5d1e5..5162a41 100644 +--- a/virt/kvm/irq_comm.c ++++ b/virt/kvm/irq_comm.c +@@ -99,3 +99,27 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) + clear_bit(irq_source_id, &kvm->arch.irq_states[i]); + clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); + } ++ ++void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, ++ struct kvm_irq_mask_notifier *kimn) ++{ ++ kimn->irq = irq; ++ hlist_add_head(&kimn->link, &kvm->mask_notifier_list); ++} ++ ++void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, ++ struct kvm_irq_mask_notifier *kimn) ++{ ++ hlist_del(&kimn->link); ++} ++ ++void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) ++{ ++ struct kvm_irq_mask_notifier *kimn; ++ struct hlist_node *n; ++ ++ hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link) ++ if (kimn->irq == irq) ++ kimn->func(kimn, mask); ++} ++ +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 29a667c..6723411 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -563,7 +563,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, + goto out; + } + +- if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) ++ if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) + kvm_deassign_device(kvm, match); + + kvm_free_assigned_device(kvm, match); +@@ -581,8 +581,10 @@ static inline int valid_vcpu(int n) + + inline int kvm_is_mmio_pfn(pfn_t pfn) + { +- if (pfn_valid(pfn)) +- return PageReserved(pfn_to_page(pfn)); ++ if (pfn_valid(pfn)) { ++ struct page *page = compound_head(pfn_to_page(pfn)); ++ return PageReserved(page); ++ } + + return true; + } +@@ -828,6 +830,9 @@ static struct kvm *kvm_create_vm(void) + + if (IS_ERR(kvm)) + goto out; ++#ifdef CONFIG_HAVE_KVM_IRQCHIP ++ INIT_HLIST_HEAD(&kvm->mask_notifier_list); ++#endif + + #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET + page = alloc_page(GFP_KERNEL | __GFP_ZERO); diff --git a/debian/patches/series/4 b/debian/patches/series/4 index dab46e427..719247344 100644 --- a/debian/patches/series/4 +++ b/debian/patches/series/4 @@ -5,3 +5,5 @@ + features/arm/g760a.patch + features/arm/allow-alternative-copy-user.patch + features/arm/alternative-copy-user.patch +- bugfix/mips/compat-zero-upper-32bits-of-offset_high-and-offset_low.patch ++ bugfix/all/stable/2.6.29.2.patch