2020-09-04 20:10:21 +00:00
|
|
|
From d23194475f30cbe5bc9dd84735a66cec5954972d Mon Sep 17 00:00:00 2001
|
|
|
|
Message-Id: <d23194475f30cbe5bc9dd84735a66cec5954972d.1599166691.git.zanussi@kernel.org>
|
|
|
|
In-Reply-To: <56457dc415803c8abc5acb513ada877a79596f05.1599166690.git.zanussi@kernel.org>
|
|
|
|
References: <56457dc415803c8abc5acb513ada877a79596f05.1599166690.git.zanussi@kernel.org>
|
2018-08-27 14:32:32 +00:00
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Date: Fri, 3 Jul 2009 08:44:56 -0500
|
2020-09-04 20:10:21 +00:00
|
|
|
Subject: [PATCH 259/333] signals: Allow rt tasks to cache one sigqueue struct
|
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.142-rt63.tar.xz
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
To avoid allocation allow rt tasks to cache one sigqueue struct in
|
|
|
|
task struct.
|
|
|
|
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
include/linux/sched.h | 2 ++
|
|
|
|
include/linux/signal.h | 1 +
|
|
|
|
kernel/exit.c | 2 +-
|
|
|
|
kernel/fork.c | 1 +
|
|
|
|
kernel/signal.c | 69 +++++++++++++++++++++++++++++++++++++++---
|
2018-08-27 14:32:32 +00:00
|
|
|
5 files changed, 70 insertions(+), 5 deletions(-)
|
|
|
|
|
2020-04-21 20:13:52 +00:00
|
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
2020-06-22 13:14:16 +00:00
|
|
|
index 852b1221cd40..bdae37091808 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/include/linux/sched.h
|
|
|
|
+++ b/include/linux/sched.h
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -895,6 +895,8 @@ struct task_struct {
|
2018-08-27 14:32:32 +00:00
|
|
|
/* Signal handlers: */
|
|
|
|
struct signal_struct *signal;
|
|
|
|
struct sighand_struct *sighand;
|
|
|
|
+ struct sigqueue *sigqueue_cache;
|
|
|
|
+
|
|
|
|
sigset_t blocked;
|
|
|
|
sigset_t real_blocked;
|
|
|
|
/* Restored if set_restore_sigmask() was used: */
|
2020-04-21 20:13:52 +00:00
|
|
|
diff --git a/include/linux/signal.h b/include/linux/signal.h
|
|
|
|
index 0be5ce2375cb..6495fda18c2c 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/include/linux/signal.h
|
|
|
|
+++ b/include/linux/signal.h
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -245,6 +245,7 @@ static inline void init_sigpending(struct sigpending *sig)
|
2018-08-27 14:32:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
extern void flush_sigqueue(struct sigpending *queue);
|
|
|
|
+extern void flush_task_sigqueue(struct task_struct *tsk);
|
|
|
|
|
|
|
|
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
|
|
|
|
static inline int valid_signal(unsigned long sig)
|
2020-04-21 20:13:52 +00:00
|
|
|
diff --git a/kernel/exit.c b/kernel/exit.c
|
2020-07-15 20:05:29 +00:00
|
|
|
index eeaafd4064c9..7b30795f19ef 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/kernel/exit.c
|
|
|
|
+++ b/kernel/exit.c
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -160,7 +160,7 @@ static void __exit_signal(struct task_struct *tsk)
|
2018-08-27 14:32:32 +00:00
|
|
|
* Do this under ->siglock, we can race with another thread
|
|
|
|
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
|
|
|
|
*/
|
|
|
|
- flush_sigqueue(&tsk->pending);
|
|
|
|
+ flush_task_sigqueue(tsk);
|
|
|
|
tsk->sighand = NULL;
|
|
|
|
spin_unlock(&sighand->siglock);
|
|
|
|
|
2020-04-21 20:13:52 +00:00
|
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
|
|
index ecec0f8bef7e..234e0ca9a74b 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/kernel/fork.c
|
|
|
|
+++ b/kernel/fork.c
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -1832,6 +1832,7 @@ static __latent_entropy struct task_struct *copy_process(
|
2018-08-27 14:32:32 +00:00
|
|
|
spin_lock_init(&p->alloc_lock);
|
|
|
|
|
|
|
|
init_sigpending(&p->pending);
|
|
|
|
+ p->sigqueue_cache = NULL;
|
|
|
|
|
|
|
|
p->utime = p->stime = p->gtime = 0;
|
|
|
|
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
2020-04-21 20:13:52 +00:00
|
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
2020-06-22 13:14:16 +00:00
|
|
|
index 6f7191a72103..45748993f777 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/kernel/signal.c
|
|
|
|
+++ b/kernel/signal.c
|
|
|
|
@@ -19,6 +19,7 @@
|
|
|
|
#include <linux/sched/task.h>
|
|
|
|
#include <linux/sched/task_stack.h>
|
|
|
|
#include <linux/sched/cputime.h>
|
|
|
|
+#include <linux/sched/rt.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/tty.h>
|
|
|
|
#include <linux/binfmts.h>
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -397,13 +398,30 @@ void task_join_group_stop(struct task_struct *task)
|
2018-10-30 12:40:05 +00:00
|
|
|
}
|
2018-08-27 14:32:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
+static inline struct sigqueue *get_task_cache(struct task_struct *t)
|
|
|
|
+{
|
|
|
|
+ struct sigqueue *q = t->sigqueue_cache;
|
|
|
|
+
|
|
|
|
+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
|
|
|
|
+ return NULL;
|
|
|
|
+ return q;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
|
|
|
|
+{
|
|
|
|
+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
|
|
|
|
+ return 0;
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
/*
|
|
|
|
* allocate a new signal queue record
|
|
|
|
* - this may be called without locks if and only if t == current, otherwise an
|
|
|
|
* appropriate lock must be held to stop the target task from exiting
|
|
|
|
*/
|
|
|
|
static struct sigqueue *
|
|
|
|
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
|
|
|
|
+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
|
|
|
|
+ int override_rlimit, int fromslab)
|
|
|
|
{
|
|
|
|
struct sigqueue *q = NULL;
|
|
|
|
struct user_struct *user;
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -425,7 +443,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
|
2020-04-09 19:55:05 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
|
2018-08-27 14:32:32 +00:00
|
|
|
- q = kmem_cache_alloc(sigqueue_cachep, flags);
|
|
|
|
+ if (!fromslab)
|
|
|
|
+ q = get_task_cache(t);
|
|
|
|
+ if (!q)
|
|
|
|
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
|
|
|
|
} else {
|
|
|
|
print_dropped_signal(sig);
|
|
|
|
}
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -442,6 +463,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
|
2018-08-27 14:32:32 +00:00
|
|
|
return q;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static struct sigqueue *
|
|
|
|
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
|
|
|
|
+ int override_rlimit)
|
|
|
|
+{
|
|
|
|
+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static void __sigqueue_free(struct sigqueue *q)
|
|
|
|
{
|
|
|
|
if (q->flags & SIGQUEUE_PREALLOC)
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -451,6 +479,21 @@ static void __sigqueue_free(struct sigqueue *q)
|
2018-08-27 14:32:32 +00:00
|
|
|
kmem_cache_free(sigqueue_cachep, q);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void sigqueue_free_current(struct sigqueue *q)
|
|
|
|
+{
|
|
|
|
+ struct user_struct *up;
|
|
|
|
+
|
|
|
|
+ if (q->flags & SIGQUEUE_PREALLOC)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ up = q->user;
|
|
|
|
+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
|
|
|
|
+ atomic_dec(&up->sigpending);
|
|
|
|
+ free_uid(up);
|
|
|
|
+ } else
|
|
|
|
+ __sigqueue_free(q);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
void flush_sigqueue(struct sigpending *queue)
|
|
|
|
{
|
|
|
|
struct sigqueue *q;
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -463,6 +506,21 @@ void flush_sigqueue(struct sigpending *queue)
|
|
|
|
}
|
2018-08-27 14:32:32 +00:00
|
|
|
}
|
|
|
|
|
2020-04-21 20:13:52 +00:00
|
|
|
+/*
|
2018-08-27 14:32:32 +00:00
|
|
|
+ * Called from __exit_signal. Flush tsk->pending and
|
|
|
|
+ * tsk->sigqueue_cache
|
|
|
|
+ */
|
|
|
|
+void flush_task_sigqueue(struct task_struct *tsk)
|
|
|
|
+{
|
|
|
|
+ struct sigqueue *q;
|
|
|
|
+
|
|
|
|
+ flush_sigqueue(&tsk->pending);
|
|
|
|
+
|
|
|
|
+ q = get_task_cache(tsk);
|
|
|
|
+ if (q)
|
|
|
|
+ kmem_cache_free(sigqueue_cachep, q);
|
|
|
|
+}
|
|
|
|
+
|
2020-04-21 20:13:52 +00:00
|
|
|
/*
|
2018-08-27 14:32:32 +00:00
|
|
|
* Flush all pending signals for this kthread.
|
|
|
|
*/
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -586,7 +644,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
|
2018-08-27 14:32:32 +00:00
|
|
|
(info->si_code == SI_TIMER) &&
|
|
|
|
(info->si_sys_private);
|
|
|
|
|
|
|
|
- __sigqueue_free(first);
|
|
|
|
+ sigqueue_free_current(first);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Ok, it wasn't in the queue. This must be
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -623,6 +681,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
|
2018-08-27 14:32:32 +00:00
|
|
|
bool resched_timer = false;
|
|
|
|
int signr;
|
|
|
|
|
|
|
|
+ WARN_ON_ONCE(tsk != current);
|
|
|
|
+
|
|
|
|
/* We only dequeue private signals from ourselves, we don't let
|
|
|
|
* signalfd steal them
|
|
|
|
*/
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -1761,7 +1821,8 @@ EXPORT_SYMBOL(kill_pid);
|
2018-08-27 14:32:32 +00:00
|
|
|
*/
|
|
|
|
struct sigqueue *sigqueue_alloc(void)
|
|
|
|
{
|
|
|
|
- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
|
|
|
|
+ /* Preallocated sigqueue objects always from the slabcache ! */
|
|
|
|
+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
|
|
|
|
|
|
|
|
if (q)
|
|
|
|
q->flags |= SIGQUEUE_PREALLOC;
|
2020-04-21 20:13:52 +00:00
|
|
|
--
|
2020-06-22 13:14:16 +00:00
|
|
|
2.17.1
|
2020-04-21 20:13:52 +00:00
|
|
|
|