From f46b35894b6f8711a47cdf4dadde5d57f9519732 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 26 Jan 2010 00:32:43 +0000 Subject: [PATCH] Add stable release 2.6.32.6 svn path=/dists/trunk/linux-2.6/; revision=14999 --- debian/changelog | 1 + .../patches/bugfix/all/stable/2.6.32.6.patch | 1324 +++++++++++++++++ debian/patches/series/6 | 1 + 3 files changed, 1326 insertions(+) create mode 100644 debian/patches/bugfix/all/stable/2.6.32.6.patch diff --git a/debian/changelog b/debian/changelog index 66ab9295d..8bec6ade4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -45,6 +45,7 @@ linux-2.6 (2.6.32-6) UNRELEASED; urgency=low (CVE-2009-3939) (Closes: #562975) * Force distribution=UNRELEASED in debian/bin/test-patches so that it works in released source packages + * Add stable release 2.6.32.6 [ Ian Campbell ] * xen: Enable up to 32G of guest memory on i386. diff --git a/debian/patches/bugfix/all/stable/2.6.32.6.patch b/debian/patches/bugfix/all/stable/2.6.32.6.patch new file mode 100644 index 000000000..705b8bd66 --- /dev/null +++ b/debian/patches/bugfix/all/stable/2.6.32.6.patch @@ -0,0 +1,1324 @@ +diff --git a/Makefile b/Makefile +index 4ebd3f1..20da312 100644 +diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c +index 6a52d4b..f8590c5 100644 +--- a/arch/x86/kernel/cpuid.c ++++ b/arch/x86/kernel/cpuid.c +@@ -192,7 +192,8 @@ static int __init cpuid_init(void) + int i, err = 0; + i = 0; + +- if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) { ++ if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, ++ "cpu/cpuid", &cpuid_fops)) { + printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n", + CPUID_MAJOR); + err = -EBUSY; +@@ -221,7 +222,7 @@ out_class: + } + class_destroy(cpuid_class); + out_chrdev: +- unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); ++ __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); + out: + return err; + } +diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c +index 6a3cefc..b42e63b 100644 +--- a/arch/x86/kernel/msr.c ++++ b/arch/x86/kernel/msr.c +@@ -251,7 +251,7 @@ static int __init msr_init(void) + int i, err = 0; + i = 0; + +- if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) { ++ if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { + printk(KERN_ERR "msr: unable to get major %d for msr\n", + MSR_MAJOR); + err = -EBUSY; +@@ -279,7 +279,7 @@ out_class: + msr_device_destroy(i); + class_destroy(msr_class); + out_chrdev: +- unregister_chrdev(MSR_MAJOR, "cpu/msr"); ++ __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); + out: + return err; + } +diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c +index b22d13b..a672f12 100644 +--- a/arch/x86/pci/i386.c ++++ b/arch/x86/pci/i386.c +@@ -282,6 +282,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + return -EINVAL; + + prot = pgprot_val(vma->vm_page_prot); ++ ++ /* ++ * Return error if pat is not enabled and write_combine is requested. ++ * Caller can followup with UC MINUS request and add a WC mtrr if there ++ * is a free mtrr slot. ++ */ ++ if (!pat_enabled && write_combine) ++ return -EINVAL; ++ + if (pat_enabled && write_combine) + prot |= _PAGE_CACHE_WC; + else if (pat_enabled || boot_cpu_data.x86 > 3) +diff --git a/block/blk-settings.c b/block/blk-settings.c +index 66d4aa8..d5aa886 100644 +--- a/block/blk-settings.c ++++ b/block/blk-settings.c +@@ -560,6 +560,28 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + EXPORT_SYMBOL(blk_stack_limits); + + /** ++ * bdev_stack_limits - adjust queue limits for stacked drivers ++ * @t: the stacking driver limits (top device) ++ * @bdev: the component block_device (bottom) ++ * @start: first data sector within component device ++ * ++ * Description: ++ * Merges queue limits for a top device and a block_device. Returns ++ * 0 if alignment didn't change. Returns -1 if adding the bottom ++ * device caused misalignment. ++ */ ++int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, ++ sector_t start) ++{ ++ struct request_queue *bq = bdev_get_queue(bdev); ++ ++ start += get_start_sect(bdev); ++ ++ return blk_stack_limits(t, &bq->limits, start << 9); ++} ++EXPORT_SYMBOL(bdev_stack_limits); ++ ++/** + * disk_stack_limits - adjust queue limits for stacked drivers + * @disk: MD/DM gendisk (top) + * @bdev: the underlying block device (bottom) +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 7511029..f1670e0 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -201,14 +201,13 @@ unlock: + spin_unlock_irqrestore(&ec->curr_lock, flags); + } + +-static void acpi_ec_gpe_query(void *ec_cxt); ++static int acpi_ec_sync_query(struct acpi_ec *ec); + +-static int ec_check_sci(struct acpi_ec *ec, u8 state) ++static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) + { + if (state & ACPI_EC_FLAG_SCI) { + if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) +- return acpi_os_execute(OSL_EC_BURST_HANDLER, +- acpi_ec_gpe_query, ec); ++ return acpi_ec_sync_query(ec); + } + return 0; + } +@@ -249,11 +248,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, + { + unsigned long tmp; + int ret = 0; +- pr_debug(PREFIX "transaction start\n"); +- /* disable GPE during transaction if storm is detected */ +- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { +- acpi_disable_gpe(NULL, ec->gpe); +- } + if (EC_FLAGS_MSI) + udelay(ACPI_EC_MSI_UDELAY); + /* start transaction */ +@@ -265,20 +259,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, + clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + spin_unlock_irqrestore(&ec->curr_lock, tmp); + ret = ec_poll(ec); +- pr_debug(PREFIX "transaction end\n"); + spin_lock_irqsave(&ec->curr_lock, tmp); + ec->curr = NULL; + spin_unlock_irqrestore(&ec->curr_lock, tmp); +- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { +- /* check if we received SCI during transaction */ +- ec_check_sci(ec, acpi_ec_read_status(ec)); +- /* it is safe to enable GPE outside of transaction */ +- acpi_enable_gpe(NULL, ec->gpe); +- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { +- pr_info(PREFIX "GPE storm detected, " +- "transactions will use polling mode\n"); +- set_bit(EC_FLAGS_GPE_STORM, &ec->flags); +- } + return ret; + } + +@@ -321,7 +304,26 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) + status = -ETIME; + goto end; + } ++ pr_debug(PREFIX "transaction start\n"); ++ /* disable GPE during transaction if storm is detected */ ++ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { ++ acpi_disable_gpe(NULL, ec->gpe); ++ } ++ + status = acpi_ec_transaction_unlocked(ec, t); ++ ++ /* check if we received SCI during transaction */ ++ ec_check_sci_sync(ec, acpi_ec_read_status(ec)); ++ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { ++ msleep(1); ++ /* it is safe to enable GPE outside of transaction */ ++ acpi_enable_gpe(NULL, ec->gpe); ++ } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { ++ pr_info(PREFIX "GPE storm detected, " ++ "transactions will use polling mode\n"); ++ set_bit(EC_FLAGS_GPE_STORM, &ec->flags); ++ } ++ pr_debug(PREFIX "transaction end\n"); + end: + if (ec->global_lock) + acpi_release_global_lock(glk); +@@ -443,7 +445,7 @@ int ec_transaction(u8 command, + + EXPORT_SYMBOL(ec_transaction); + +-static int acpi_ec_query(struct acpi_ec *ec, u8 * data) ++static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) + { + int result; + u8 d; +@@ -452,20 +454,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data) + .wlen = 0, .rlen = 1}; + if (!ec || !data) + return -EINVAL; +- + /* + * Query the EC to find out which _Qxx method we need to evaluate. + * Note that successful completion of the query causes the ACPI_EC_SCI + * bit to be cleared (and thus clearing the interrupt source). + */ +- +- result = acpi_ec_transaction(ec, &t); ++ result = acpi_ec_transaction_unlocked(ec, &t); + if (result) + return result; +- + if (!d) + return -ENODATA; +- + *data = d; + return 0; + } +@@ -509,43 +507,78 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) + + EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); + +-static void acpi_ec_gpe_query(void *ec_cxt) ++static void acpi_ec_run(void *cxt) + { +- struct acpi_ec *ec = ec_cxt; +- u8 value = 0; +- struct acpi_ec_query_handler *handler, copy; +- +- if (!ec || acpi_ec_query(ec, &value)) ++ struct acpi_ec_query_handler *handler = cxt; ++ if (!handler) + return; +- mutex_lock(&ec->lock); ++ pr_debug(PREFIX "start query execution\n"); ++ if (handler->func) ++ handler->func(handler->data); ++ else if (handler->handle) ++ acpi_evaluate_object(handler->handle, NULL, NULL, NULL); ++ pr_debug(PREFIX "stop query execution\n"); ++ kfree(handler); ++} ++ ++static int acpi_ec_sync_query(struct acpi_ec *ec) ++{ ++ u8 value = 0; ++ int status; ++ struct acpi_ec_query_handler *handler, *copy; ++ if ((status = acpi_ec_query_unlocked(ec, &value))) ++ return status; + list_for_each_entry(handler, &ec->list, node) { + if (value == handler->query_bit) { + /* have custom handler for this bit */ +- memcpy(©, handler, sizeof(copy)); +- mutex_unlock(&ec->lock); +- if (copy.func) { +- copy.func(copy.data); +- } else if (copy.handle) { +- acpi_evaluate_object(copy.handle, NULL, NULL, NULL); +- } +- return; ++ copy = kmalloc(sizeof(*handler), GFP_KERNEL); ++ if (!copy) ++ return -ENOMEM; ++ memcpy(copy, handler, sizeof(*copy)); ++ pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value); ++ return acpi_os_execute(OSL_GPE_HANDLER, ++ acpi_ec_run, copy); + } + } ++ return 0; ++} ++ ++static void acpi_ec_gpe_query(void *ec_cxt) ++{ ++ struct acpi_ec *ec = ec_cxt; ++ if (!ec) ++ return; ++ mutex_lock(&ec->lock); ++ acpi_ec_sync_query(ec); + mutex_unlock(&ec->lock); + } + ++static void acpi_ec_gpe_query(void *ec_cxt); ++ ++static int ec_check_sci(struct acpi_ec *ec, u8 state) ++{ ++ if (state & ACPI_EC_FLAG_SCI) { ++ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { ++ pr_debug(PREFIX "push gpe query to the queue\n"); ++ return acpi_os_execute(OSL_NOTIFY_HANDLER, ++ acpi_ec_gpe_query, ec); ++ } ++ } ++ return 0; ++} ++ + static u32 acpi_ec_gpe_handler(void *data) + { + struct acpi_ec *ec = data; +- u8 status; + + pr_debug(PREFIX "~~~> interrupt\n"); +- status = acpi_ec_read_status(ec); + +- advance_transaction(ec, status); +- if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0) ++ advance_transaction(ec, acpi_ec_read_status(ec)); ++ if (ec_transaction_done(ec) && ++ (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { + wake_up(&ec->wait); +- ec_check_sci(ec, status); ++ ec_check_sci(ec, acpi_ec_read_status(ec)); ++ } + return ACPI_INTERRUPT_HANDLED; + } + +diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c +index d3400b2..dc52f75 100644 +--- a/drivers/char/nozomi.c ++++ b/drivers/char/nozomi.c +@@ -1629,10 +1629,10 @@ static void ntty_close(struct tty_struct *tty, struct file *file) + + dc->open_ttys--; + port->count--; +- tty_port_tty_set(port, NULL); + + if (port->count == 0) { + DBG1("close: %d", nport->token_dl); ++ tty_port_tty_set(port, NULL); + spin_lock_irqsave(&dc->spin_mutex, flags); + dc->last_ier &= ~(nport->token_dl); + writew(dc->last_ier, dc->reg_ier); +diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c +index 59499ee..e919bd9 100644 +--- a/drivers/char/tty_io.c ++++ b/drivers/char/tty_io.c +@@ -1930,8 +1930,8 @@ static int tty_fasync(int fd, struct file *filp, int on) + pid = task_pid(current); + type = PIDTYPE_PID; + } +- spin_unlock_irqrestore(&tty->ctrl_lock, flags); + retval = __f_setown(filp, pid, type, 0); ++ spin_unlock_irqrestore(&tty->ctrl_lock, flags); + if (retval) + goto out; + } else { +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +index 083bec2..29e21d3 100644 +--- a/drivers/gpu/drm/i915/intel_sdvo.c ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -472,14 +472,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) + } + + /** +- * Don't check status code from this as it switches the bus back to the +- * SDVO chips which defeats the purpose of doing a bus switch in the first +- * place. ++ * Try to read the response after issuie the DDC switch command. But it ++ * is noted that we must do the action of reading response and issuing DDC ++ * switch command in one I2C transaction. Otherwise when we try to start ++ * another I2C transaction after issuing the DDC bus switch, it will be ++ * switched to the internal SDVO register. + */ + static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, + u8 target) + { +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ u8 out_buf[2], cmd_buf[2], ret_value[2], ret; ++ struct i2c_msg msgs[] = { ++ { ++ .addr = sdvo_priv->slave_addr >> 1, ++ .flags = 0, ++ .len = 2, ++ .buf = out_buf, ++ }, ++ /* the following two are to read the response */ ++ { ++ .addr = sdvo_priv->slave_addr >> 1, ++ .flags = 0, ++ .len = 1, ++ .buf = cmd_buf, ++ }, ++ { ++ .addr = sdvo_priv->slave_addr >> 1, ++ .flags = I2C_M_RD, ++ .len = 1, ++ .buf = ret_value, ++ }, ++ }; ++ ++ intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, ++ &target, 1); ++ /* write the DDC switch command argument */ ++ intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); ++ ++ out_buf[0] = SDVO_I2C_OPCODE; ++ out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; ++ cmd_buf[0] = SDVO_I2C_CMD_STATUS; ++ cmd_buf[1] = 0; ++ ret_value[0] = 0; ++ ret_value[1] = 0; ++ ++ ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); ++ if (ret != 3) { ++ /* failure in I2C transfer */ ++ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); ++ return; ++ } ++ if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { ++ DRM_DEBUG_KMS("DDC switch command returns response %d\n", ++ ret_value[0]); ++ return; ++ } ++ return; + } + + static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) +@@ -1589,6 +1638,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + edid = drm_get_edid(&intel_output->base, + intel_output->ddc_bus); + ++ /* This is only applied to SDVO cards with multiple outputs */ ++ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { ++ uint8_t saved_ddc, temp_ddc; ++ saved_ddc = sdvo_priv->ddc_bus; ++ temp_ddc = sdvo_priv->ddc_bus >> 1; ++ /* ++ * Don't use the 1 as the argument of DDC bus switch to get ++ * the EDID. It is used for SDVO SPD ROM. ++ */ ++ while(temp_ddc > 1) { ++ sdvo_priv->ddc_bus = temp_ddc; ++ edid = drm_get_edid(&intel_output->base, ++ intel_output->ddc_bus); ++ if (edid) { ++ /* ++ * When we can get the EDID, maybe it is the ++ * correct DDC bus. Update it. ++ */ ++ sdvo_priv->ddc_bus = temp_ddc; ++ break; ++ } ++ temp_ddc >>= 1; ++ } ++ if (edid == NULL) ++ sdvo_priv->ddc_bus = saved_ddc; ++ } + /* when there is no edid and no monitor is connected with VGA + * port, try to use the CRT ddc to read the EDID for DVI-connector + */ +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 1a6cb3c..e869128 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -499,16 +499,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, + return 0; + } + +- if (blk_stack_limits(limits, &q->limits, start << 9) < 0) +- DMWARN("%s: target device %s is misaligned: " ++ if (bdev_stack_limits(limits, bdev, start) < 0) ++ DMWARN("%s: adding target device %s caused an alignment inconsistency: " + "physical_block_size=%u, logical_block_size=%u, " + "alignment_offset=%u, start=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + q->limits.physical_block_size, + q->limits.logical_block_size, + q->limits.alignment_offset, +- (unsigned long long) start << 9); +- ++ (unsigned long long) start << SECTOR_SHIFT); + + /* + * Check if merge fn is supported. +@@ -1025,9 +1024,9 @@ combine_limits: + * for the table. + */ + if (blk_stack_limits(limits, &ti_limits, 0) < 0) +- DMWARN("%s: target device " ++ DMWARN("%s: adding target device " + "(start sect %llu len %llu) " +- "is misaligned", ++ "caused an alignment inconsistency", + dm_device_name(table->md), + (unsigned long long) ti->begin, + (unsigned long long) ti->len); +@@ -1079,15 +1078,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + struct queue_limits *limits) + { + /* +- * Each target device in the table has a data area that should normally +- * be aligned such that the DM device's alignment_offset is 0. +- * FIXME: Propagate alignment_offsets up the stack and warn of +- * sub-optimal or inconsistent settings. +- */ +- limits->alignment_offset = 0; +- limits->misaligned = 0; +- +- /* + * Copy table's limits to the DM device's request_queue + */ + q->limits = *limits; +diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c +index aa8f995..28b4625 100644 +--- a/drivers/media/video/gspca/sunplus.c ++++ b/drivers/media/video/gspca/sunplus.c +@@ -705,7 +705,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev) + rc = spca504B_PollingDataReady(gspca_dev); + + /* Init the cam width height with some values get on init ? */ +- reg_w_riv(dev, 0x31, 0, 0x04); ++ reg_w_riv(dev, 0x31, 0x04, 0); + spca504B_WaitCmdStatus(gspca_dev); + rc = spca504B_PollingDataReady(gspca_dev); + break; +@@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev) + default: + /* case BRIDGE_SPCA533: */ + /* case BRIDGE_SPCA504B: */ +- reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */ +- reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */ +- reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */ ++ reg_w_riv(dev, 0, 0x21ad, 0x00); /* hue */ ++ reg_w_riv(dev, 0, 0x21ac, 0x01); /* sat/hue */ ++ reg_w_riv(dev, 0, 0x21a3, 0x00); /* gamma */ + break; + case BRIDGE_SPCA536: +- reg_w_riv(dev, 0, 0x40, 0x20f5); +- reg_w_riv(dev, 0, 0x01, 0x20f4); +- reg_w_riv(dev, 0, 0x00, 0x2089); ++ reg_w_riv(dev, 0, 0x20f5, 0x40); ++ reg_w_riv(dev, 0, 0x20f4, 0x01); ++ reg_w_riv(dev, 0, 0x2089, 0x00); + break; + } + if (pollreg) +@@ -888,11 +888,11 @@ static int sd_init(struct gspca_dev *gspca_dev) + switch (sd->bridge) { + case BRIDGE_SPCA504B: + reg_w_riv(dev, 0x1d, 0x00, 0); +- reg_w_riv(dev, 0, 0x01, 0x2306); +- reg_w_riv(dev, 0, 0x00, 0x0d04); +- reg_w_riv(dev, 0, 0x00, 0x2000); +- reg_w_riv(dev, 0, 0x13, 0x2301); +- reg_w_riv(dev, 0, 0x00, 0x2306); ++ reg_w_riv(dev, 0, 0x2306, 0x01); ++ reg_w_riv(dev, 0, 0x0d04, 0x00); ++ reg_w_riv(dev, 0, 0x2000, 0x00); ++ reg_w_riv(dev, 0, 0x2301, 0x13); ++ reg_w_riv(dev, 0, 0x2306, 0x00); + /* fall thru */ + case BRIDGE_SPCA533: + spca504B_PollingDataReady(gspca_dev); +@@ -1011,7 +1011,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + spca504B_WaitCmdStatus(gspca_dev); + break; + default: +- reg_w_riv(dev, 0x31, 0, 0x04); ++ reg_w_riv(dev, 0x31, 0x04, 0); + spca504B_WaitCmdStatus(gspca_dev); + spca504B_PollingDataReady(gspca_dev); + break; +diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c +index e9eae4a..1eac626 100644 +--- a/drivers/misc/enclosure.c ++++ b/drivers/misc/enclosure.c +@@ -391,6 +391,7 @@ static const char *const enclosure_status [] = { + [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", + [ENCLOSURE_STATUS_UNKNOWN] = "unknown", + [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", ++ [ENCLOSURE_STATUS_MAX] = NULL, + }; + + static const char *const enclosure_type [] = { +diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c +index 0cce8a4..deac67e 100644 +--- a/drivers/serial/8250_pnp.c ++++ b/drivers/serial/8250_pnp.c +@@ -328,15 +328,7 @@ static const struct pnp_device_id pnp_dev_table[] = { + /* U.S. Robotics 56K Voice INT PnP*/ + { "USR9190", 0 }, + /* Wacom tablets */ +- { "WACF004", 0 }, +- { "WACF005", 0 }, +- { "WACF006", 0 }, +- { "WACF007", 0 }, +- { "WACF008", 0 }, +- { "WACF009", 0 }, +- { "WACF00A", 0 }, +- { "WACF00B", 0 }, +- { "WACF00C", 0 }, ++ { "WACFXXX", 0 }, + /* Compaq touchscreen */ + { "FPI2002", 0 }, + /* Fujitsu Stylistic touchscreens */ +diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c +index f4c2657..43c57b7 100644 +--- a/drivers/staging/asus_oled/asus_oled.c ++++ b/drivers/staging/asus_oled/asus_oled.c +@@ -194,9 +194,11 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr, + { + struct usb_interface *intf = to_usb_interface(dev); + struct asus_oled_dev *odev = usb_get_intfdata(intf); +- int temp = strict_strtoul(buf, 10, NULL); ++ unsigned long value; ++ if (strict_strtoul(buf, 10, &value)) ++ return -EINVAL; + +- enable_oled(odev, temp); ++ enable_oled(odev, value); + + return count; + } +@@ -207,10 +209,12 @@ static ssize_t class_set_enabled(struct device *device, + { + struct asus_oled_dev *odev = + (struct asus_oled_dev *) dev_get_drvdata(device); ++ unsigned long value; + +- int temp = strict_strtoul(buf, 10, NULL); ++ if (strict_strtoul(buf, 10, &value)) ++ return -EINVAL; + +- enable_oled(odev, temp); ++ enable_oled(odev, value); + + return count; + } +diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c +index c5b6613..c2809f2 100644 +--- a/drivers/staging/hv/Hv.c ++++ b/drivers/staging/hv/Hv.c +@@ -386,7 +386,7 @@ u16 HvSignalEvent(void) + * retrieve the initialized message and event pages. Otherwise, we create and + * initialize the message and event pages. + */ +-int HvSynicInit(u32 irqVector) ++void HvSynicInit(void *irqarg) + { + u64 version; + union hv_synic_simp simp; +@@ -394,13 +394,14 @@ int HvSynicInit(u32 irqVector) + union hv_synic_sint sharedSint; + union hv_synic_scontrol sctrl; + u64 guestID; +- int ret = 0; ++ u32 irqVector = *((u32 *)(irqarg)); ++ int cpu = smp_processor_id(); + + DPRINT_ENTER(VMBUS); + + if (!gHvContext.HypercallPage) { + DPRINT_EXIT(VMBUS); +- return ret; ++ return; + } + + /* Check the version */ +@@ -425,27 +426,27 @@ int HvSynicInit(u32 irqVector) + */ + rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID); + if (guestID == HV_LINUX_GUEST_ID) { +- gHvContext.synICMessagePage[0] = ++ gHvContext.synICMessagePage[cpu] = + phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT); +- gHvContext.synICEventPage[0] = ++ gHvContext.synICEventPage[cpu] = + phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT); + } else { + DPRINT_ERR(VMBUS, "unknown guest id!!"); + goto Cleanup; + } + DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p", +- gHvContext.synICMessagePage[0], +- gHvContext.synICEventPage[0]); ++ gHvContext.synICMessagePage[cpu], ++ gHvContext.synICEventPage[cpu]); + } else { +- gHvContext.synICMessagePage[0] = osd_PageAlloc(1); +- if (gHvContext.synICMessagePage[0] == NULL) { ++ gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC); ++ if (gHvContext.synICMessagePage[cpu] == NULL) { + DPRINT_ERR(VMBUS, + "unable to allocate SYNIC message page!!"); + goto Cleanup; + } + +- gHvContext.synICEventPage[0] = osd_PageAlloc(1); +- if (gHvContext.synICEventPage[0] == NULL) { ++ gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC); ++ if (gHvContext.synICEventPage[cpu] == NULL) { + DPRINT_ERR(VMBUS, + "unable to allocate SYNIC event page!!"); + goto Cleanup; +@@ -454,7 +455,7 @@ int HvSynicInit(u32 irqVector) + /* Setup the Synic's message page */ + rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64); + simp.SimpEnabled = 1; +- simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0]) ++ simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu]) + >> PAGE_SHIFT; + + DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx", +@@ -465,7 +466,7 @@ int HvSynicInit(u32 irqVector) + /* Setup the Synic's event page */ + rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64); + siefp.SiefpEnabled = 1; +- siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0]) ++ siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu]) + >> PAGE_SHIFT; + + DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx", +@@ -501,32 +502,30 @@ int HvSynicInit(u32 irqVector) + + DPRINT_EXIT(VMBUS); + +- return ret; ++ return; + + Cleanup: +- ret = -1; +- + if (gHvContext.GuestId == HV_LINUX_GUEST_ID) { +- if (gHvContext.synICEventPage[0]) +- osd_PageFree(gHvContext.synICEventPage[0], 1); ++ if (gHvContext.synICEventPage[cpu]) ++ osd_PageFree(gHvContext.synICEventPage[cpu], 1); + +- if (gHvContext.synICMessagePage[0]) +- osd_PageFree(gHvContext.synICMessagePage[0], 1); ++ if (gHvContext.synICMessagePage[cpu]) ++ osd_PageFree(gHvContext.synICMessagePage[cpu], 1); + } + + DPRINT_EXIT(VMBUS); +- +- return ret; ++ return; + } + + /** + * HvSynicCleanup - Cleanup routine for HvSynicInit(). + */ +-void HvSynicCleanup(void) ++void HvSynicCleanup(void *arg) + { + union hv_synic_sint sharedSint; + union hv_synic_simp simp; + union hv_synic_siefp siefp; ++ int cpu = smp_processor_id(); + + DPRINT_ENTER(VMBUS); + +@@ -539,6 +538,7 @@ void HvSynicCleanup(void) + + sharedSint.Masked = 1; + ++ /* Need to correctly cleanup in the case of SMP!!! */ + /* Disable the interrupt */ + wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64); + +@@ -560,8 +560,8 @@ void HvSynicCleanup(void) + + wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64); + +- osd_PageFree(gHvContext.synICMessagePage[0], 1); +- osd_PageFree(gHvContext.synICEventPage[0], 1); ++ osd_PageFree(gHvContext.synICMessagePage[cpu], 1); ++ osd_PageFree(gHvContext.synICEventPage[cpu], 1); + } + + DPRINT_EXIT(VMBUS); +diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/Hv.h +index 5379e4b..fce4b5c 100644 +--- a/drivers/staging/hv/Hv.h ++++ b/drivers/staging/hv/Hv.h +@@ -93,7 +93,7 @@ static const struct hv_guid VMBUS_SERVICE_ID = { + }, + }; + +-#define MAX_NUM_CPUS 1 ++#define MAX_NUM_CPUS 32 + + + struct hv_input_signal_event_buffer { +@@ -137,8 +137,8 @@ extern u16 HvPostMessage(union hv_connection_id connectionId, + + extern u16 HvSignalEvent(void); + +-extern int HvSynicInit(u32 irqVector); ++extern void HvSynicInit(void *irqarg); + +-extern void HvSynicCleanup(void); ++extern void HvSynicCleanup(void *arg); + + #endif /* __HV_H__ */ +diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/Vmbus.c +index a4dd06f..35a023e 100644 +--- a/drivers/staging/hv/Vmbus.c ++++ b/drivers/staging/hv/Vmbus.c +@@ -129,7 +129,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo) + + /* strcpy(dev->name, "vmbus"); */ + /* SynIC setup... */ +- ret = HvSynicInit(*irqvector); ++ on_each_cpu(HvSynicInit, (void *)irqvector, 1); + + /* Connect to VMBus in the root partition */ + ret = VmbusConnect(); +@@ -150,7 +150,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev) + DPRINT_ENTER(VMBUS); + VmbusChannelReleaseUnattachedChannels(); + VmbusDisconnect(); +- HvSynicCleanup(); ++ on_each_cpu(HvSynicCleanup, NULL, 1); + DPRINT_EXIT(VMBUS); + + return ret; +@@ -173,7 +173,8 @@ static void VmbusOnCleanup(struct hv_driver *drv) + */ + static void VmbusOnMsgDPC(struct hv_driver *drv) + { +- void *page_addr = gHvContext.synICMessagePage[0]; ++ int cpu = smp_processor_id(); ++ void *page_addr = gHvContext.synICMessagePage[cpu]; + struct hv_message *msg = (struct hv_message *)page_addr + + VMBUS_MESSAGE_SINT; + struct hv_message *copied; +@@ -230,11 +231,12 @@ static void VmbusOnEventDPC(struct hv_driver *drv) + static int VmbusOnISR(struct hv_driver *drv) + { + int ret = 0; ++ int cpu = smp_processor_id(); + void *page_addr; + struct hv_message *msg; + union hv_synic_event_flags *event; + +- page_addr = gHvContext.synICMessagePage[0]; ++ page_addr = gHvContext.synICMessagePage[cpu]; + msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; + + DPRINT_ENTER(VMBUS); +@@ -248,7 +250,7 @@ static int VmbusOnISR(struct hv_driver *drv) + } + + /* TODO: Check if there are events to be process */ +- page_addr = gHvContext.synICEventPage[0]; ++ page_addr = gHvContext.synICEventPage[cpu]; + event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; + + /* Since we are a child, we only need to check bit 0 */ +diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c +index 96f1171..355dffc 100644 +--- a/drivers/usb/core/devices.c ++++ b/drivers/usb/core/devices.c +@@ -494,7 +494,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, + return 0; + /* allocate 2^1 pages = 8K (on i386); + * should be more than enough for one device */ +- pages_start = (char *)__get_free_pages(GFP_KERNEL, 1); ++ pages_start = (char *)__get_free_pages(GFP_NOIO, 1); + if (!pages_start) + return -ENOMEM; + +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 8b0c235..1a7d54b 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -3286,6 +3286,9 @@ static void hub_events(void) + USB_PORT_FEAT_C_SUSPEND); + udev = hdev->children[i-1]; + if (udev) { ++ /* TRSMRCY = 10 msec */ ++ msleep(10); ++ + usb_lock_device(udev); + ret = remote_wakeup(hdev-> + children[i-1]); +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index da718e8..980a8d2 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -911,11 +911,11 @@ char *usb_cache_string(struct usb_device *udev, int index) + if (index <= 0) + return NULL; + +- buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL); ++ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); + if (buf) { + len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); + if (len > 0) { +- smallbuf = kmalloc(++len, GFP_KERNEL); ++ smallbuf = kmalloc(++len, GFP_NOIO); + if (!smallbuf) + return buf; + memcpy(smallbuf, buf, len); +@@ -1682,7 +1682,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration) + if (cp) { + nintf = cp->desc.bNumInterfaces; + new_interfaces = kmalloc(nintf * sizeof(*new_interfaces), +- GFP_KERNEL); ++ GFP_NOIO); + if (!new_interfaces) { + dev_err(&dev->dev, "Out of memory\n"); + return -ENOMEM; +@@ -1691,7 +1691,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration) + for (; n < nintf; ++n) { + new_interfaces[n] = kzalloc( + sizeof(struct usb_interface), +- GFP_KERNEL); ++ GFP_NOIO); + if (!new_interfaces[n]) { + dev_err(&dev->dev, "Out of memory\n"); + ret = -ENOMEM; +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c +index 8752e55..fcdcad4 100644 +--- a/drivers/usb/core/sysfs.c ++++ b/drivers/usb/core/sysfs.c +@@ -115,6 +115,12 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf) + case USB_SPEED_HIGH: + speed = "480"; + break; ++ case USB_SPEED_VARIABLE: ++ speed = "480"; ++ break; ++ case USB_SPEED_SUPER: ++ speed = "5000"; ++ break; + default: + speed = "unknown"; + } +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c +index f5f5601..e18c677 100644 +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -785,9 +785,10 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) + + /* start 20 msec resume signaling from this port, + * and make khubd collect PORT_STAT_C_SUSPEND to +- * stop that signaling. ++ * stop that signaling. Use 5 ms extra for safety, ++ * like usb_port_resume() does. + */ +- ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); ++ ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); + ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); + mod_timer(&hcd->rh_timer, ehci->reset_done[i]); + } +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c +index 1b6f1c0..698f461 100644 +--- a/drivers/usb/host/ehci-hub.c ++++ b/drivers/usb/host/ehci-hub.c +@@ -120,9 +120,26 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) + del_timer_sync(&ehci->watchdog); + del_timer_sync(&ehci->iaa_watchdog); + +- port = HCS_N_PORTS (ehci->hcs_params); + spin_lock_irq (&ehci->lock); + ++ /* Once the controller is stopped, port resumes that are already ++ * in progress won't complete. Hence if remote wakeup is enabled ++ * for the root hub and any ports are in the middle of a resume or ++ * remote wakeup, we must fail the suspend. ++ */ ++ if (hcd->self.root_hub->do_remote_wakeup) { ++ port = HCS_N_PORTS(ehci->hcs_params); ++ while (port--) { ++ if (ehci->reset_done[port] != 0) { ++ spin_unlock_irq(&ehci->lock); ++ ehci_dbg(ehci, "suspend failed because " ++ "port %d is resuming\n", ++ port + 1); ++ return -EBUSY; ++ } ++ } ++ } ++ + /* stop schedules, clean any completed work */ + if (HC_IS_RUNNING(hcd->state)) { + ehci_quiesce (ehci); +@@ -138,6 +155,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) + */ + ehci->bus_suspended = 0; + ehci->owned_ports = 0; ++ port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *reg = &ehci->regs->port_status [port]; + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; +diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c +index 139a2cc..c0d4b39 100644 +--- a/drivers/usb/host/ehci-q.c ++++ b/drivers/usb/host/ehci-q.c +@@ -827,9 +827,10 @@ qh_make ( + * But interval 1 scheduling is simpler, and + * includes high bandwidth. + */ +- dbg ("intr period %d uframes, NYET!", +- urb->interval); +- goto done; ++ urb->interval = 1; ++ } else if (qh->period > ehci->periodic_size) { ++ qh->period = ehci->periodic_size; ++ urb->interval = qh->period << 3; + } + } else { + int think_time; +@@ -852,6 +853,10 @@ qh_make ( + usb_calc_bus_time (urb->dev->speed, + is_input, 0, max_packet (maxp))); + qh->period = urb->interval; ++ if (qh->period > ehci->periodic_size) { ++ qh->period = ehci->periodic_size; ++ urb->interval = qh->period; ++ } + } + } + +diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c +index 5cd0e48..99cd00f 100644 +--- a/drivers/usb/host/uhci-hcd.c ++++ b/drivers/usb/host/uhci-hcd.c +@@ -749,7 +749,20 @@ static int uhci_rh_suspend(struct usb_hcd *hcd) + spin_lock_irq(&uhci->lock); + if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) + rc = -ESHUTDOWN; +- else if (!uhci->dead) ++ else if (uhci->dead) ++ ; /* Dead controllers tell no tales */ ++ ++ /* Once the controller is stopped, port resumes that are already ++ * in progress won't complete. Hence if remote wakeup is enabled ++ * for the root hub and any ports are in the middle of a resume or ++ * remote wakeup, we must fail the suspend. ++ */ ++ else if (hcd->self.root_hub->do_remote_wakeup && ++ uhci->resuming_ports) { ++ dev_dbg(uhci_dev(uhci), "suspend failed because a port " ++ "is resuming\n"); ++ rc = -EBUSY; ++ } else + suspend_rh(uhci, UHCI_RH_SUSPENDED); + spin_unlock_irq(&uhci->lock); + return rc; +diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c +index 885b585..8270055 100644 +--- a/drivers/usb/host/uhci-hub.c ++++ b/drivers/usb/host/uhci-hub.c +@@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci) + /* Port received a wakeup request */ + set_bit(port, &uhci->resuming_ports); + uhci->ports_timeout = jiffies + +- msecs_to_jiffies(20); ++ msecs_to_jiffies(25); + + /* Make sure we see the port again + * after the resuming period is over. */ +diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c +index bbe005c..e0fb294 100644 +--- a/drivers/usb/serial/generic.c ++++ b/drivers/usb/serial/generic.c +@@ -489,6 +489,8 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb) + dbg("%s - port %d", __func__, port->number); + + if (port->serial->type->max_in_flight_urbs) { ++ kfree(urb->transfer_buffer); ++ + spin_lock_irqsave(&port->lock, flags); + --port->urbs_in_flight; + port->tx_bytes_flight -= urb->transfer_buffer_length; +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 64a0a2c..c932f90 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -1807,13 +1807,6 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999, + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_GO_SLOW ), + +-/* Reported by Rohan Hart */ +-UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010, +- "INTOVA", +- "Pixtreme", +- US_SC_DEVICE, US_PR_DEVICE, NULL, +- US_FL_FIX_CAPACITY ), +- + /* Reported by Frederic Marchal + * Mio Moov 330 + */ +diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c +index 716c8d7..33197fa 100644 +--- a/drivers/usb/storage/usb.c ++++ b/drivers/usb/storage/usb.c +@@ -430,7 +430,8 @@ static void adjust_quirks(struct us_data *us) + u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor); + u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct); + unsigned f = 0; +- unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY | ++ unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE | ++ US_FL_FIX_CAPACITY | + US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE | + US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 | + US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE | +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c +index fbb6e5e..7cb0a59 100644 +--- a/fs/ecryptfs/crypto.c ++++ b/fs/ecryptfs/crypto.c +@@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, + char *cipher_name, size_t *key_size) + { + char dummy_key[ECRYPTFS_MAX_KEY_BYTES]; +- char *full_alg_name; ++ char *full_alg_name = NULL; + int rc; + + *key_tfm = NULL; +@@ -1763,7 +1763,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, + if (rc) + goto out; + *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC); +- kfree(full_alg_name); + if (IS_ERR(*key_tfm)) { + rc = PTR_ERR(*key_tfm); + printk(KERN_ERR "Unable to allocate crypto cipher with name " +@@ -1786,6 +1785,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, + goto out; + } + out: ++ kfree(full_alg_name); + return rc; + } + +diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c +index 9e94405..1744f17 100644 +--- a/fs/ecryptfs/file.c ++++ b/fs/ecryptfs/file.c +@@ -191,13 +191,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file) + | ECRYPTFS_ENCRYPTED); + } + mutex_unlock(&crypt_stat->cs_mutex); +- if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) +- && !(file->f_flags & O_RDONLY)) { +- rc = -EPERM; +- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " +- "file must hence be opened RO\n", __func__); +- goto out; +- } + if (!ecryptfs_inode_to_private(inode)->lower_file) { + rc = ecryptfs_init_persistent_file(ecryptfs_dentry); + if (rc) { +@@ -208,6 +201,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file) + goto out; + } + } ++ if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) ++ && !(file->f_flags & O_RDONLY)) { ++ rc = -EPERM; ++ printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " ++ "file must hence be opened RO\n", __func__); ++ goto out; ++ } + ecryptfs_set_file_lower( + file, ecryptfs_inode_to_private(inode)->lower_file); + if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index adf99c6..912b8ff 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -942,6 +942,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); + extern void blk_set_default_limits(struct queue_limits *lim); + extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset); ++extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, ++ sector_t offset); + extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset); + extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h +index 90d1c21..9a33c5f 100644 +--- a/include/linux/enclosure.h ++++ b/include/linux/enclosure.h +@@ -42,6 +42,8 @@ enum enclosure_status { + ENCLOSURE_STATUS_NOT_INSTALLED, + ENCLOSURE_STATUS_UNKNOWN, + ENCLOSURE_STATUS_UNAVAILABLE, ++ /* last element for counting purposes */ ++ ENCLOSURE_STATUS_MAX + }; + + /* SFF-8485 activity light settings */ +diff --git a/kernel/perf_event.c b/kernel/perf_event.c +index 6eee915..413d101 100644 +--- a/kernel/perf_event.c ++++ b/kernel/perf_event.c +@@ -1359,6 +1359,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) + if (event->state != PERF_EVENT_STATE_ACTIVE) + continue; + ++ if (event->cpu != -1 && event->cpu != smp_processor_id()) ++ continue; ++ + hwc = &event->hw; + + interrupts = hwc->interrupts; +@@ -3226,6 +3229,12 @@ static void perf_event_task_output(struct perf_event *event, + + static int perf_event_task_match(struct perf_event *event) + { ++ if (event->state != PERF_EVENT_STATE_ACTIVE) ++ return 0; ++ ++ if (event->cpu != -1 && event->cpu != smp_processor_id()) ++ return 0; ++ + if (event->attr.comm || event->attr.mmap || event->attr.task) + return 1; + +@@ -3255,13 +3264,13 @@ static void perf_event_task_event(struct perf_task_event *task_event) + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_event_task_ctx(&cpuctx->ctx, task_event); +- put_cpu_var(perf_cpu_context); + + rcu_read_lock(); + if (!ctx) + ctx = rcu_dereference(task_event->task->perf_event_ctxp); + if (ctx) + perf_event_task_ctx(ctx, task_event); ++ put_cpu_var(perf_cpu_context); + rcu_read_unlock(); + } + +@@ -3338,6 +3347,12 @@ static void perf_event_comm_output(struct perf_event *event, + + static int perf_event_comm_match(struct perf_event *event) + { ++ if (event->state != PERF_EVENT_STATE_ACTIVE) ++ return 0; ++ ++ if (event->cpu != -1 && event->cpu != smp_processor_id()) ++ return 0; ++ + if (event->attr.comm) + return 1; + +@@ -3378,7 +3393,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_event_comm_ctx(&cpuctx->ctx, comm_event); +- put_cpu_var(perf_cpu_context); + + rcu_read_lock(); + /* +@@ -3388,6 +3402,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) + ctx = rcu_dereference(current->perf_event_ctxp); + if (ctx) + perf_event_comm_ctx(ctx, comm_event); ++ put_cpu_var(perf_cpu_context); + rcu_read_unlock(); + } + +@@ -3462,6 +3477,12 @@ static void perf_event_mmap_output(struct perf_event *event, + static int perf_event_mmap_match(struct perf_event *event, + struct perf_mmap_event *mmap_event) + { ++ if (event->state != PERF_EVENT_STATE_ACTIVE) ++ return 0; ++ ++ if (event->cpu != -1 && event->cpu != smp_processor_id()) ++ return 0; ++ + if (event->attr.mmap) + return 1; + +@@ -3539,7 +3560,6 @@ got_name: + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); +- put_cpu_var(perf_cpu_context); + + rcu_read_lock(); + /* +@@ -3549,6 +3569,7 @@ got_name: + ctx = rcu_dereference(current->perf_event_ctxp); + if (ctx) + perf_event_mmap_ctx(ctx, mmap_event); ++ put_cpu_var(perf_cpu_context); + rcu_read_unlock(); + + kfree(buf); +@@ -3811,6 +3832,9 @@ static int perf_swevent_match(struct perf_event *event, + enum perf_type_id type, + u32 event_id, struct pt_regs *regs) + { ++ if (event->cpu != -1 && event->cpu != smp_processor_id()) ++ return 0; ++ + if (!perf_swevent_is_counting(event)) + return 0; + +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 7758726..a3a99d3 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -555,10 +555,8 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, + } + rcu_read_unlock(); + +- if (nr) { +- BUG_ON(nr > atomic_read(&vmap_lazy_nr)); ++ if (nr) + atomic_sub(nr, &vmap_lazy_nr); +- } + + if (nr || force_flush) + flush_tlb_kernel_range(*start, *end); +diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c +index e8a510d..4101afe 100644 +--- a/tools/perf/builtin-timechart.c ++++ b/tools/perf/builtin-timechart.c +@@ -275,7 +275,7 @@ static u64 cpus_pstate_state[MAX_CPUS]; + static int + process_comm_event(event_t *event) + { +- pid_set_comm(event->comm.pid, event->comm.comm); ++ pid_set_comm(event->comm.tid, event->comm.comm); + return 0; + } + static int diff --git a/debian/patches/series/6 b/debian/patches/series/6 index 4e83688a1..66f85b383 100644 --- a/debian/patches/series/6 +++ b/debian/patches/series/6 @@ -29,3 +29,4 @@ + bugfix/all/e1000-enhance-fragment-detection.patch + bugfix/all/e1000e-enhance-fragment-detection.patch + bugfix/all/stable/2.6.32.5.patch ++ bugfix/all/stable/2.6.32.6.patch