Update to 4.14.10

This commit is contained in:
Salvatore Bonaccorso 2018-01-05 12:38:05 +01:00
parent 572b685574
commit 304afa4414
4 changed files with 3 additions and 110 deletions

3
debian/changelog vendored
View File

@ -1,8 +1,9 @@
linux (4.14.9-1) UNRELEASED; urgency=medium
linux (4.14.10-1) UNRELEASED; urgency=medium
* New upstream stable update:
https://www.kernel.org/pub/linux/kernel/v4.x/ChangeLog-4.14.8
https://www.kernel.org/pub/linux/kernel/v4.x/ChangeLog-4.14.9
https://www.kernel.org/pub/linux/kernel/v4.x/ChangeLog-4.14.10
[ Ben Hutchings ]
* e1000e: Fix e1000_check_for_copper_link_ich8lan return value.

View File

@ -103,5 +103,5 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ struct work_struct work;
+#endif
union {
call_single_data_t csd;
struct __call_single_data csd;
u64 fifo_time;

View File

@ -1,107 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 30 Nov 2017 13:03:09 +0100
Subject: [PATCH] crypto: mcryptd: protect the per-CPU queue with a lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patches-4.14.8-rt9.tar.xz
mcryptd_enqueue_request() grabs the per-CPU queue struct and protects
access to it with disabled preemption. Then it schedules a worker on the
same CPU. The worker in mcryptd_queue_worker() guards access to the same
per-CPU variable with disabled preemption.
If we take CPU-hotplug into account then it is possible that between
queue_work_on() and the actual invocation of the worker the CPU goes
down and the worker will be scheduled on _another_ CPU. And here the
preempt_disable() protection does not work anymore. The easiest thing is
to add a spin_lock() to guard access to the list.
Another detail: mcryptd_queue_worker() is not processing more than
MCRYPTD_BATCH invocation in a row. If there are still items left, then
it will invoke queue_work() to proceed with more later. *I* would
suggest to simply drop that check because it does not use a system
workqueue and the workqueue is already marked as "CPU_INTENSIVE". And if
preemption is required then the scheduler should do it.
However if queue_work() is used then to work item is marked as CPU
unbound. That means it will try to run on the local CPU but it may run
on another CPU as well. Especially with CONFIG_DEBUG_WQ_FORCE_RR_CPU=y.
Again, the preempt_disable() won't work here but lock which was
introduced will help.
In order to keep work-item on the local CPU (and avoid RR) I changed it
to queue_work_on().
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
crypto/mcryptd.c | 23 ++++++++++-------------
include/crypto/mcryptd.h | 1 +
2 files changed, 11 insertions(+), 13 deletions(-)
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcr
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
+ spin_lock_init(&cpu_queue->q_lock);
}
return 0;
}
@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struc
int cpu, err;
struct mcryptd_cpu_queue *cpu_queue;
- cpu = get_cpu();
- cpu_queue = this_cpu_ptr(queue->cpu_queue);
- rctx->tag.cpu = cpu;
+ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+ spin_lock(&cpu_queue->q_lock);
+ cpu = smp_processor_id();
+ rctx->tag.cpu = smp_processor_id();
err = crypto_enqueue_request(&cpu_queue->queue, request);
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
cpu, cpu_queue, request);
+ spin_unlock(&cpu_queue->q_lock);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
- put_cpu();
return err;
}
@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
i = 0;
while (i < MCRYPTD_BATCH || single_task_running()) {
- /*
- * preempt_disable/enable is used to prevent
- * being preempted by mcryptd_enqueue_request()
- */
- local_bh_disable();
- preempt_disable();
+
+ spin_lock_bh(&cpu_queue->q_lock);
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
- preempt_enable();
- local_bh_enable();
+ spin_unlock_bh(&cpu_queue->q_lock);
if (!req) {
mcryptd_opportunistic_flush();
@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct
++i;
}
if (cpu_queue->queue.qlen)
- queue_work(kcrypto_wq, &cpu_queue->work);
+ queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
}
void mcryptd_flusher(struct work_struct *__work)
--- a/include/crypto/mcryptd.h
+++ b/include/crypto/mcryptd.h
@@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mc
struct mcryptd_cpu_queue {
struct crypto_queue queue;
+ spinlock_t q_lock;
struct work_struct work;
};

View File

@ -87,7 +87,6 @@ features/all/rt/Bluetooth-avoid-recursive-locking-in-hci_send_to_cha.patch
features/all/rt/iommu-iova-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-fo.patch
features/all/rt/greybus-audio-don-t-inclide-rwlock.h-directly.patch
features/all/rt/xen-9pfs-don-t-inclide-rwlock.h-directly.patch
features/all/rt/crypto-mcryptd-protect-the-per-CPU-queue-with-a-lock.patch
features/all/rt/tty-goldfish-Enable-earlycon-only-if-built-in.patch
features/all/rt/drm-i915-properly-init-lockdep-class.patch