Update to 3.2.25

Drop one bug fix that was included.
Refresh various other patches with more or less serious conflicts.

svn path=/dists/sid/linux/; revision=19304
This commit is contained in:
Ben Hutchings 2012-08-03 01:37:42 +00:00
parent 01a5ea1aab
commit 1d65a00c89
6 changed files with 438 additions and 432 deletions

14
debian/changelog vendored
View File

@ -1,4 +1,4 @@
linux (3.2.24-1) UNRELEASED; urgency=low
linux (3.2.25-1) UNRELEASED; urgency=low
* New upstream stable update:
http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.24
@ -27,6 +27,18 @@ linux (3.2.24-1) UNRELEASED; urgency=low
- bnx2x: fix panic when TX ring is full
- eCryptfs: Gracefully refuse miscdev file ops on inherited/passed files
- ACPI / PM: Make acpi_pm_device_sleep_state() follow the specification
http://www.kernel.org/pub/linux/kernel/v3.x/ChangeLog-3.2.25
- mm: Fix various performance problems, particularly affecting use of
transparent hugepages (Closes: #675493)
- target: Add range checking to UNMAP emulation
- target: Fix reading of data length fields for UNMAP commands
- target: Fix possible integer underflow in UNMAP emulation
- target: Check number of unmap descriptors against our limit
- ext4: don't let i_reserved_meta_blocks go negative
- ext4: undo ext4_calc_metadata_amount if we fail to claim space
- locks: fix checking of fcntl_setlease argument
- drm/radeon: fix bo creation retry path
- Btrfs: call the ordered free operation without any locks held
[ Ben Hutchings ]
* linux-image: Include package version in utsname version string

View File

@ -1,29 +0,0 @@
From: Jan Kara <jack@suse.cz>
Date: Tue, 10 Jul 2012 17:58:04 +0200
Subject: udf: Improve table length check to avoid possible overflow
When a partition table length is corrupted to be close to 1 << 32, the
check for its length may overflow on 32-bit systems and we will think
the length is valid. Later on the kernel can crash trying to read beyond
end of buffer. Fix the check to avoid possible overflow.
CC: stable@vger.kernel.org
Reported-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Jan Kara <jack@suse.cz>
---
fs/udf/super.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 8a75838..dcbf987 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1340,7 +1340,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
BUG_ON(ident != TAG_IDENT_LVD);
lvd = (struct logicalVolDesc *)bh->b_data;
table_len = le32_to_cpu(lvd->mapTableLength);
- if (sizeof(*lvd) + table_len > sb->s_blocksize) {
+ if (table_len > sb->s_blocksize - sizeof(*lvd)) {
udf_err(sb, "error loading logical volume descriptor: "
"Partition table too long (%u > %lu)\n", table_len,
sb->s_blocksize - sizeof(*lvd));

View File

@ -27,22 +27,24 @@ this and loose ~250 lines of code.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[bwh: Adjust to apply on top of commit
6575820221f7a4dd6eadecf7bf83cdd154335eda ('workqueue: perform cpu down
operations from low priority cpu_notifier()'), cherry-picked in 3.2.25]
---
include/linux/cpu.h | 6 +-
include/linux/workqueue.h | 5 +-
kernel/workqueue.c | 556 ++++++++++++---------------------------------
3 files changed, 152 insertions(+), 415 deletions(-)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c46ec3e..72e90bb 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -66,8 +66,10 @@ enum {
@@ -73,9 +73,10 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
- /* prepare workqueues for other notifiers */
- CPU_PRI_WORKQUEUE = 5,
- /* bring up workqueues before normal notifiers and down after */
- CPU_PRI_WORKQUEUE_UP = 5,
- CPU_PRI_WORKQUEUE_DOWN = -5,
+
+ CPU_PRI_WORKQUEUE_ACTIVE = 5, /* prepare workqueues for others */
+ CPU_PRI_NORMAL = 0,
@ -50,8 +52,6 @@ index c46ec3e..72e90bb 100644
};
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index e228ca9..3d8ac9d 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -254,9 +254,10 @@ enum {
@ -67,8 +67,6 @@ index e228ca9..3d8ac9d 100644
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5d23c05b..8daede8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
@ -87,9 +85,7 @@ index 5d23c05b..8daede8 100644
- WORKER_REBIND = 1 << 5, /* mom is home, come back */
- WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
- WORKER_UNBOUND = 1 << 7, /* worker is unbound */
+ WORKER_CPU_INTENSIVE = 1 << 4, /* cpu intensive */
+ WORKER_UNBOUND = 1 << 5, /* worker is unbound */
-
- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
- WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
-
@ -99,6 +95,9 @@ index 5d23c05b..8daede8 100644
- TRUSTEE_BUTCHER = 2, /* butcher workers */
- TRUSTEE_RELEASE = 3, /* release workers */
- TRUSTEE_DONE = 4, /* trustee is done */
+ WORKER_CPU_INTENSIVE = 1 << 4, /* cpu intensive */
+ WORKER_UNBOUND = 1 << 5, /* worker is unbound */
+
+ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
@ -131,7 +130,7 @@ index 5d23c05b..8daede8 100644
} ____cacheline_aligned_in_smp;
/*
@@ -974,13 +961,38 @@ static bool is_chained_work(struct workqueue_struct *wq)
@@ -974,13 +961,38 @@ static bool is_chained_work(struct workq
return false;
}
@ -173,7 +172,7 @@ index 5d23c05b..8daede8 100644
unsigned long flags;
debug_work_activate(work);
@@ -1026,27 +1038,32 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
@@ -1026,27 +1038,32 @@ static void __queue_work(unsigned int cp
spin_lock_irqsave(&gcwq->lock, flags);
}
@ -223,7 +222,7 @@ index 5d23c05b..8daede8 100644
}
/**
@@ -1063,34 +1080,19 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
@@ -1063,34 +1080,19 @@ int queue_work(struct workqueue_struct *
{
int ret;
@ -261,7 +260,7 @@ index 5d23c05b..8daede8 100644
}
EXPORT_SYMBOL_GPL(queue_work_on);
@@ -1136,6 +1138,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
@@ -1136,6 +1138,8 @@ int queue_delayed_work_on(int cpu, struc
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
@ -270,7 +269,7 @@ index 5d23c05b..8daede8 100644
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
unsigned int lcpu;
@@ -1201,12 +1205,13 @@ static void worker_enter_idle(struct worker *worker)
@@ -1201,12 +1205,13 @@ static void worker_enter_idle(struct wor
/* idle_list is LIFO */
list_add(&worker->entry, &gcwq->idle_list);
@ -322,7 +321,7 @@ index 5d23c05b..8daede8 100644
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
@@ -1663,13 +1650,6 @@ static bool manage_workers(struct worker *worker)
@@ -1663,13 +1650,6 @@ static bool manage_workers(struct worker
gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
@ -336,7 +335,7 @@ index 5d23c05b..8daede8 100644
return ret;
}
@@ -3209,171 +3189,71 @@ EXPORT_SYMBOL_GPL(work_busy);
@@ -3209,366 +3189,42 @@ EXPORT_SYMBOL_GPL(work_busy);
* gcwqs serve mix of short, long and very long running works making
* blocked draining impractical.
*
@ -381,8 +380,8 @@ index 5d23c05b..8daede8 100644
- * | | ^
- * | CPU is back online v return workers |
- * ----------------> RELEASE --------------
*/
- */
-
-/**
- * trustee_wait_event_timeout - timed event wait for trustee
- * @cond: condition to wait for
@ -411,15 +410,7 @@ index 5d23c05b..8daede8 100644
- } \
- gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
-})
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct global_cwq *gcwq = get_gcwq(cpu);
+ struct worker *uninitialized_var(new_worker);
+ unsigned long flags;
-
-/**
- * trustee_wait_event - event wait for trustee
- * @cond: condition to wait for
@ -439,8 +430,7 @@ index 5d23c05b..8daede8 100644
- __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
- __ret1 < 0 ? -1 : 0; \
-})
+ action &= ~CPU_TASKS_FROZEN;
-
-static int __cpuinit trustee_thread(void *__gcwq)
-{
- struct global_cwq *gcwq = __gcwq;
@ -449,18 +439,9 @@ index 5d23c05b..8daede8 100644
- struct hlist_node *pos;
- long rc;
- int i;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ BUG_ON(gcwq->first_idle);
+ new_worker = create_worker(gcwq, false);
+ if (!new_worker)
+ return NOTIFY_BAD;
+ }
-
- BUG_ON(gcwq->cpu != smp_processor_id());
+ /* some are called w/ irq disabled, don't disturb irq status */
+ spin_lock_irqsave(&gcwq->lock, flags);
-
- spin_lock_irq(&gcwq->lock);
- /*
- * Claim the manager position and make all workers rogue.
@ -470,34 +451,15 @@ index 5d23c05b..8daede8 100644
- BUG_ON(gcwq->cpu != smp_processor_id());
- rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
- BUG_ON(rc < 0);
+ switch (action) {
+ case CPU_UP_PREPARE:
+ BUG_ON(gcwq->first_idle);
+ gcwq->first_idle = new_worker;
+ break;
-
- gcwq->flags |= GCWQ_MANAGING_WORKERS;
+ case CPU_UP_CANCELED:
+ destroy_worker(gcwq->first_idle);
+ gcwq->first_idle = NULL;
+ break;
-
- list_for_each_entry(worker, &gcwq->idle_list, entry)
- worker->flags |= WORKER_ROGUE;
+ case CPU_ONLINE:
+ spin_unlock_irq(&gcwq->lock);
+ kthread_bind(gcwq->first_idle->task, cpu);
+ spin_lock_irq(&gcwq->lock);
+ gcwq->flags |= GCWQ_MANAGE_WORKERS;
+ start_worker(gcwq->first_idle);
+ gcwq->first_idle = NULL;
+ break;
+ }
-
- for_each_busy_worker(worker, i, pos, gcwq)
- worker->flags |= WORKER_ROGUE;
+ spin_unlock_irqrestore(&gcwq->lock, flags);
-
- /*
- * Call schedule() so that we cross rq->lock and thus can
- * guarantee sched callbacks see the rogue flag. This is
@ -507,9 +469,7 @@ index 5d23c05b..8daede8 100644
- spin_unlock_irq(&gcwq->lock);
- schedule();
- spin_lock_irq(&gcwq->lock);
+ return notifier_from_errno(0);
+}
-
- /*
- * Sched callbacks are disabled now. Zap nr_running. After
- * this, nr_running stays zero and need_more_worker() and
@ -517,18 +477,11 @@ index 5d23c05b..8daede8 100644
- * not empty.
- */
- atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
+static void flush_gcwq(struct global_cwq *gcwq)
+{
+ struct work_struct *work, *nw;
+ struct worker *worker, *n;
+ LIST_HEAD(non_affine_works);
-
- spin_unlock_irq(&gcwq->lock);
- del_timer_sync(&gcwq->idle_timer);
spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &gcwq->worklist, entry) {
+ struct workqueue_struct *wq = get_work_cwq(work)->wq;
- spin_lock_irq(&gcwq->lock);
-
- /*
- * We're now in charge. Notify and proceed to drain. We need
- * to keep the gcwq running during the whole CPU down
@ -537,10 +490,7 @@ index 5d23c05b..8daede8 100644
- */
- gcwq->trustee_state = TRUSTEE_IN_CHARGE;
- wake_up_all(&gcwq->trustee_wait);
+ if (wq->flags & WQ_NON_AFFINE)
+ list_move(&work->entry, &non_affine_works);
+ }
-
- /*
- * The original cpu is in the process of dying and may go away
- * anytime now. When that happens, we and all workers would
@ -554,28 +504,29 @@ index 5d23c05b..8daede8 100644
- while (gcwq->nr_workers != gcwq->nr_idle ||
- gcwq->flags & GCWQ_FREEZING ||
- gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
+ while (!list_empty(&gcwq->worklist)) {
int nr_works = 0;
list_for_each_entry(work, &gcwq->worklist, entry) {
@@ -3387,200 +3267,55 @@ static int __cpuinit trustee_thread(void *__gcwq)
wake_up_process(worker->task);
}
+ spin_unlock_irq(&gcwq->lock);
+
if (need_to_create_worker(gcwq)) {
- int nr_works = 0;
-
- list_for_each_entry(work, &gcwq->worklist, entry) {
- send_mayday(work);
- nr_works++;
- }
-
- list_for_each_entry(worker, &gcwq->idle_list, entry) {
- if (!nr_works--)
- break;
- wake_up_process(worker->task);
- }
-
- if (need_to_create_worker(gcwq)) {
- spin_unlock_irq(&gcwq->lock);
- worker = create_worker(gcwq, false);
- spin_lock_irq(&gcwq->lock);
- if (worker) {
- worker->flags |= WORKER_ROGUE;
+ worker = create_worker(gcwq, true);
+ if (worker)
start_worker(worker);
- start_worker(worker);
- }
}
- }
-
- /* give a breather */
- if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
- break;
@ -601,14 +552,10 @@ index 5d23c05b..8daede8 100644
- * currently scheduled works by scheduling the rebind_work.
- */
- WARN_ON(!list_empty(&gcwq->idle_list));
+ wait_event_timeout(gcwq->idle_wait,
+ gcwq->nr_idle == gcwq->nr_workers, HZ/10);
-
- for_each_busy_worker(worker, i, pos, gcwq) {
- struct work_struct *rebind_work = &worker->rebind_work;
+ spin_lock_irq(&gcwq->lock);
+ }
-
- /*
- * Rebind_work may race with future cpu hotplug
- * operations. Use a separate flag to mark that
@ -616,22 +563,18 @@ index 5d23c05b..8daede8 100644
- */
- worker->flags |= WORKER_REBIND;
- worker->flags &= ~WORKER_ROGUE;
+ WARN_ON(gcwq->nr_workers != gcwq->nr_idle);
-
- /* queue rebind_work, wq doesn't matter, use the default one */
- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
- work_data_bits(rebind_work)))
- continue;
+ list_for_each_entry_safe(worker, n, &gcwq->idle_list, entry)
+ destroy_worker(worker);
-
- debug_work_activate(rebind_work);
- insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
- worker->scheduled.next,
- work_color_to_flags(WORK_NO_COLOR));
- }
+ WARN_ON(gcwq->nr_workers || gcwq->nr_idle);
-
- /* relinquish manager role */
- gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
-
@ -639,10 +582,10 @@ index 5d23c05b..8daede8 100644
- gcwq->trustee = NULL;
- gcwq->trustee_state = TRUSTEE_DONE;
- wake_up_all(&gcwq->trustee_wait);
spin_unlock_irq(&gcwq->lock);
- spin_unlock_irq(&gcwq->lock);
- return 0;
-}
-
-/**
- * wait_trustee_state - wait for trustee to enter the specified state
- * @gcwq: gcwq the trustee of interest belongs to
@ -653,7 +596,7 @@ index 5d23c05b..8daede8 100644
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by cpu_callback.
- */
*/
-static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
@ -665,30 +608,23 @@ index 5d23c05b..8daede8 100644
- gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE);
- spin_lock_irq(&gcwq->lock);
+ gcwq = get_gcwq(get_cpu());
+ spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
+ list_del_init(&work->entry);
+ ___queue_work(get_work_cwq(work)->wq, gcwq, work);
}
+ spin_unlock_irq(&gcwq->lock);
+ put_cpu();
}
- }
-}
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
- struct task_struct *new_trustee = NULL;
- struct worker *uninitialized_var(new_worker);
- unsigned long flags;
struct worker *uninitialized_var(new_worker);
unsigned long flags;
action &= ~CPU_TASKS_FROZEN;
- switch (action) {
switch (action) {
- case CPU_DOWN_PREPARE:
- new_trustee = kthread_create(trustee_thread, gcwq,
- "workqueue_trustee/%d\n", cpu);
@ -696,13 +632,14 @@ index 5d23c05b..8daede8 100644
- return notifier_from_errno(PTR_ERR(new_trustee));
- kthread_bind(new_trustee, cpu);
- /* fall through */
- case CPU_UP_PREPARE:
- BUG_ON(gcwq->first_idle);
- new_worker = create_worker(gcwq, false);
case CPU_UP_PREPARE:
BUG_ON(gcwq->first_idle);
new_worker = create_worker(gcwq, false);
- if (!new_worker) {
- if (new_trustee)
- kthread_stop(new_trustee);
- return NOTIFY_BAD;
+ if (!new_worker)
return NOTIFY_BAD;
- }
- break;
- case CPU_POST_DEAD:
@ -723,12 +660,12 @@ index 5d23c05b..8daede8 100644
- gcwq->flags |= GCWQ_DISASSOCIATED;
- default:
- goto out;
- }
-
- /* some are called w/ irq disabled, don't disturb irq status */
- spin_lock_irqsave(&gcwq->lock, flags);
-
- switch (action) {
}
/* some are called w/ irq disabled, don't disturb irq status */
spin_lock_irqsave(&gcwq->lock, flags);
switch (action) {
- case CPU_DOWN_PREPARE:
- /* initialize trustee and tell it to acquire the gcwq */
- BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
@ -737,26 +674,21 @@ index 5d23c05b..8daede8 100644
- wake_up_process(gcwq->trustee);
- wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
- /* fall through */
- case CPU_UP_PREPARE:
- BUG_ON(gcwq->first_idle);
- gcwq->first_idle = new_worker;
- break;
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ flush_gcwq(gcwq);
+ break;
+ }
case CPU_UP_PREPARE:
BUG_ON(gcwq->first_idle);
gcwq->first_idle = new_worker;
break;
- case CPU_POST_DEAD:
- gcwq->trustee_state = TRUSTEE_BUTCHER;
- /* fall through */
- case CPU_UP_CANCELED:
- destroy_worker(gcwq->first_idle);
- gcwq->first_idle = NULL;
- break;
case CPU_UP_CANCELED:
destroy_worker(gcwq->first_idle);
gcwq->first_idle = NULL;
break;
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
case CPU_ONLINE:
- gcwq->flags &= ~GCWQ_DISASSOCIATED;
- if (gcwq->trustee_state != TRUSTEE_DONE) {
- gcwq->trustee_state = TRUSTEE_RELEASE;
@ -769,32 +701,137 @@ index 5d23c05b..8daede8 100644
- * Put the first_idle in and request a real manager to
- * take a look.
- */
- spin_unlock_irq(&gcwq->lock);
- kthread_bind(gcwq->first_idle->task, cpu);
- spin_lock_irq(&gcwq->lock);
- gcwq->flags |= GCWQ_MANAGE_WORKERS;
- start_worker(gcwq->first_idle);
- gcwq->first_idle = NULL;
- break;
- }
-
- spin_unlock_irqrestore(&gcwq->lock, flags);
-
spin_unlock_irq(&gcwq->lock);
kthread_bind(gcwq->first_idle->task, cpu);
spin_lock_irq(&gcwq->lock);
@@ -3580,43 +3236,87 @@ static int __devinit workqueue_cpu_callb
spin_unlock_irqrestore(&gcwq->lock, flags);
-out:
return notifier_from_errno(0);
}
@@ -3777,7 +3512,8 @@ static int __init init_workqueues(void)
-/*
- * Workqueues should be brought up before normal priority CPU notifiers.
- * This will be registered high priority CPU notifier.
- */
-static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static void flush_gcwq(struct global_cwq *gcwq)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- case CPU_UP_CANCELED:
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- return workqueue_cpu_callback(nfb, action, hcpu);
+ struct work_struct *work, *nw;
+ struct worker *worker, *n;
+ LIST_HEAD(non_affine_works);
+
+ spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &gcwq->worklist, entry) {
+ struct workqueue_struct *wq = get_work_cwq(work)->wq;
+
+ if (wq->flags & WQ_NON_AFFINE)
+ list_move(&work->entry, &non_affine_works);
+ }
+
+ while (!list_empty(&gcwq->worklist)) {
+ int nr_works = 0;
+
+ list_for_each_entry(work, &gcwq->worklist, entry) {
+ send_mayday(work);
+ nr_works++;
+ }
+
+ list_for_each_entry(worker, &gcwq->idle_list, entry) {
+ if (!nr_works--)
+ break;
+ wake_up_process(worker->task);
+ }
+
+ spin_unlock_irq(&gcwq->lock);
+
+ if (need_to_create_worker(gcwq)) {
+ worker = create_worker(gcwq, true);
+ if (worker)
+ start_worker(worker);
+ }
+
+ wait_event_timeout(gcwq->idle_wait,
+ gcwq->nr_idle == gcwq->nr_workers, HZ/10);
+
+ spin_lock_irq(&gcwq->lock);
}
- return NOTIFY_OK;
+
+ WARN_ON(gcwq->nr_workers != gcwq->nr_idle);
+
+ list_for_each_entry_safe(worker, n, &gcwq->idle_list, entry)
+ destroy_worker(worker);
+
+ WARN_ON(gcwq->nr_workers || gcwq->nr_idle);
+
+ spin_unlock_irq(&gcwq->lock);
+
+ gcwq = get_gcwq(get_cpu());
+ spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
+ list_del_init(&work->entry);
+ ___queue_work(get_work_cwq(work)->wq, gcwq, work);
+ }
+ spin_unlock_irq(&gcwq->lock);
+ put_cpu();
}
-/*
- * Workqueues should be brought down after normal priority CPU notifiers.
- * This will be registered as low priority CPU notifier.
- */
static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+ unsigned long action,
+ void *hcpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- case CPU_DYING:
- case CPU_POST_DEAD:
- return workqueue_cpu_callback(nfb, action, hcpu);
- }
- return NOTIFY_OK;
+ unsigned int cpu = (unsigned long)hcpu;
+ struct global_cwq *gcwq = get_gcwq(cpu);
+
+ action &= ~CPU_TASKS_FROZEN;
+
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ flush_gcwq(gcwq);
+ break;
+ }
+
+
+ return notifier_from_errno(0);
}
#ifdef CONFIG_SMP
@@ -3812,8 +3512,8 @@ static int __init init_workqueues(void)
unsigned int cpu;
int i;
- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
- cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
- cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_ACTIVE);
+ hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_INACTIVE);
/* initialize gcwqs */
for_each_gcwq_cpu(cpu) {
@@ -3800,9 +3536,7 @@ static int __init init_workqueues(void)
@@ -3836,9 +3536,7 @@ static int __init init_workqueues(void)
(unsigned long)gcwq);
ida_init(&gcwq->worker_ida);
@ -805,6 +842,3 @@ index 5d23c05b..8daede8 100644
}
/* create the initial worker */
--
1.7.10

View File

@ -9,6 +9,7 @@ Acked-by: Gertjan van Wingerde <gwingerde@gmail.com>
Acked-by: Ivo van Doorn <IvDoorn@gmail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
[bwh: Adjust context to apply after 3.2.25]
---
drivers/net/wireless/rt2x00/rt2800.h | 1 +
drivers/net/wireless/rt2x00/rt2800lib.c | 155 ++++++++++++++++++++++++++-----
@ -17,8 +18,6 @@ Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
drivers/net/wireless/rt2x00/rt2x00.h | 1 +
5 files changed, 148 insertions(+), 26 deletions(-)
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4778620347c4..9efdaafb11e5 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -68,6 +68,7 @@
@ -29,11 +28,9 @@ index 4778620347c4..9efdaafb11e5 100644
#define RF5390 0x5390
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 1ff428ba060e..1896cbf912ad 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -402,7 +402,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
@@ -402,7 +402,8 @@ int rt2800_load_firmware(struct rt2x00_d
if (rt2x00_is_pci(rt2x00dev)) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
@ -43,7 +40,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -1904,7 +1905,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
@@ -1904,7 +1905,8 @@ static void rt2800_config_channel_rf53xx
r55_nonbt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59,
r59_nonbt_rev[idx]);
@ -53,7 +50,7 @@ index 1ff428ba060e..1896cbf912ad 100644
static const char r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
@@ -1951,6 +1953,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -1951,6 +1953,7 @@ static void rt2800_config_channel(struct
else if (rt2x00_rf(rt2x00dev, RF3052))
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
else if (rt2x00_rf(rt2x00dev, RF5370) ||
@ -61,7 +58,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2x00_rf(rt2x00dev, RF5390))
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
else
@@ -1965,7 +1968,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -1965,7 +1968,8 @@ static void rt2800_config_channel(struct
rt2800_bbp_write(rt2x00dev, 86, 0);
if (rf->channel <= 14) {
@ -71,7 +68,7 @@ index 1ff428ba060e..1896cbf912ad 100644
if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
&rt2x00dev->cap_flags)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -2495,7 +2499,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
@@ -2495,7 +2499,8 @@ static u8 rt2800_get_default_vgc(struct
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
@ -81,7 +78,7 @@ index 1ff428ba060e..1896cbf912ad 100644
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -2630,7 +2635,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -2630,7 +2635,8 @@ static int rt2800_init_registers(struct
} else if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@ -91,7 +88,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3006,7 +3012,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
@@ -3006,7 +3012,8 @@ static int rt2800_init_bbp(struct rt2x00
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
@ -101,7 +98,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_bbp_read(rt2x00dev, 4, &value);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
@@ -3014,19 +3021,22 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
@@ -3014,19 +3021,22 @@ static int rt2800_init_bbp(struct rt2x00
if (rt2800_is_305x_soc(rt2x00dev) ||
rt2x00_rt(rt2x00dev, RT3572) ||
@ -127,7 +124,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
@@ -3044,7 +3054,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
@@ -3044,7 +3054,8 @@ static int rt2800_init_bbp(struct rt2x00
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
@ -137,7 +134,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -3056,64 +3067,88 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
@@ -3056,64 +3067,88 @@ static int rt2800_init_bbp(struct rt2x00
}
rt2800_bbp_write(rt2x00dev, 82, 0x62);
@ -234,7 +231,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_bbp_read(rt2x00dev, 138, &value);
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
@@ -3125,7 +3160,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
@@ -3125,7 +3160,8 @@ static int rt2800_init_bbp(struct rt2x00
rt2800_bbp_write(rt2x00dev, 138, value);
}
@ -244,7 +241,7 @@ index 1ff428ba060e..1896cbf912ad 100644
int ant, div_mode;
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
@@ -3251,13 +3287,15 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
@@ -3251,13 +3287,15 @@ static int rt2800_init_rfcsr(struct rt2x
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
@ -261,7 +258,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
@@ -3475,6 +3513,66 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
@@ -3475,6 +3513,66 @@ static int rt2800_init_rfcsr(struct rt2x
rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
@ -328,7 +325,7 @@ index 1ff428ba060e..1896cbf912ad 100644
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -3542,7 +3640,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
@@ -3542,7 +3640,8 @@ static int rt2800_init_rfcsr(struct rt2x
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
}
@ -338,7 +335,7 @@ index 1ff428ba060e..1896cbf912ad 100644
/*
* Set back to initial state
*/
@@ -3570,7 +3669,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
@@ -3570,7 +3669,8 @@ static int rt2800_init_rfcsr(struct rt2x
rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
@ -348,7 +345,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
@@ -3638,7 +3738,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
@@ -3638,7 +3738,8 @@ static int rt2800_init_rfcsr(struct rt2x
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
}
@ -358,7 +355,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
@@ -3922,7 +4023,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
@@ -3922,7 +4023,8 @@ int rt2800_init_eeprom(struct rt2x00_dev
* RT53xx: defined in "EEPROM_CHIP_ID" field
*/
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
@ -368,7 +365,7 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
else
value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -3938,7 +4040,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
@@ -3938,7 +4040,8 @@ int rt2800_init_eeprom(struct rt2x00_dev
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
@ -378,7 +375,7 @@ index 1ff428ba060e..1896cbf912ad 100644
ERROR(rt2x00dev, "Invalid RT chipset 0x%04x detected.\n", rt2x00dev->chip.rt);
return -ENODEV;
}
@@ -3955,6 +4058,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
@@ -3955,6 +4058,7 @@ int rt2800_init_eeprom(struct rt2x00_dev
case RF3052:
case RF3320:
case RF5370:
@ -386,7 +383,7 @@ index 1ff428ba060e..1896cbf912ad 100644
case RF5390:
break;
default:
@@ -4261,6 +4365,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
@@ -4261,6 +4365,7 @@ int rt2800_probe_hw_mode(struct rt2x00_d
rt2x00_rf(rt2x00dev, RF3022) ||
rt2x00_rf(rt2x00dev, RF3320) ||
rt2x00_rf(rt2x00dev, RF5370) ||
@ -394,11 +391,9 @@ index 1ff428ba060e..1896cbf912ad 100644
rt2x00_rf(rt2x00dev, RF5390)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 837b460d4055..bf0f83cf3738 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -480,7 +480,8 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -480,7 +480,8 @@ static int rt2800pci_init_registers(stru
if (rt2x00_is_pcie(rt2x00dev) &&
(rt2x00_rt(rt2x00dev, RT3572) ||
@ -408,11 +403,9 @@ index 837b460d4055..bf0f83cf3738 100644
rt2x00pci_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 0ffa1119acd1..d241ac6d7477 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1119,12 +1119,26 @@ static struct usb_device_id rt2800usb_device_table[] = {
@@ -1133,15 +1133,29 @@ static struct usb_device_id rt2800usb_de
{ USB_DEVICE(0x5a57, 0x0284) },
#endif
#ifdef CONFIG_RT2800USB_RT53XX
@ -424,6 +417,9 @@ index 0ffa1119acd1..d241ac6d7477 100644
/* Azurewave */
{ USB_DEVICE(0x13d3, 0x3329) },
{ USB_DEVICE(0x13d3, 0x3365) },
/* D-Link */
{ USB_DEVICE(0x2001, 0x3c1c) },
{ USB_DEVICE(0x2001, 0x3c1d) },
+ /* LG innotek */
+ { USB_DEVICE(0x043e, 0x7a22) },
+ /* Panasonic */
@ -439,8 +435,6 @@ index 0ffa1119acd1..d241ac6d7477 100644
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 99ff12d0c29d..845dce5c997a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -192,6 +192,7 @@ struct rt2x00_chip {
@ -451,6 +445,3 @@ index 99ff12d0c29d..845dce5c997a 100644
u16 rf;
u16 rev;
--
1.7.10.2

View File

@ -343,8 +343,6 @@ features/arm/net-drop-NET-dependency-from-HAVE_BPF_JIT.patch
# Until next ABI bump
debian/driver-core-avoid-ABI-change-for-removal-of-__must_check.patch
bugfix/all/udf-Improve-table-length-check-to-avoid-possible-underflow.patch
# nouveau update to support Fermi (NVC0+) acceleration
features/all/fermi-accel/drm-nouveau-ttm-always-do-buffer-moves-on-kernel-cha.patch
features/all/fermi-accel/drm-nouveau-remove-subchannel-names-from-places-wher.patch