Messages in this thread Patch in this message |  | | Subject | [PATCH 1/2] sched: Move __task_rq_{, un}lock() to kernel/sched/sched.h | From | Kirill Tkhai <> | Date | Tue, 17 Feb 2015 13:46:51 +0300 |
| |
Place it in sched.h, because dl_task_timer() needs it. Also remove lockdep check, which is not fit to this function.
Similar to idea of Josh Poimboeuf for task_rq_{,un}lock(): https://lkml.org/lkml/2015/2/9/476
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> CC: Josh Poimboeuf <jpoimboe@redhat.com> --- kernel/sched/core.c | 28 ---------------------------- kernel/sched/deadline.c | 12 +++--------- kernel/sched/sched.h | 26 ++++++++++++++++++++++++++ 3 files changed, 29 insertions(+), 37 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0466e61..fc12a1d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -307,28 +307,6 @@ __read_mostly int scheduler_running; int sysctl_sched_rt_runtime = 950000; /* - * __task_rq_lock - lock the rq @p resides on. - */ -static inline struct rq *__task_rq_lock(struct task_struct *p) - __acquires(rq->lock) -{ - struct rq *rq; - - lockdep_assert_held(&p->pi_lock); - - for (;;) { - rq = task_rq(p); - raw_spin_lock(&rq->lock); - if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) - return rq; - raw_spin_unlock(&rq->lock); - - while (unlikely(task_on_rq_migrating(p))) - cpu_relax(); - } -} - -/* * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. */ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) @@ -351,12 +329,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) } } -static void __task_rq_unlock(struct rq *rq) - __releases(rq->lock) -{ - raw_spin_unlock(&rq->lock); -} - static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) __releases(rq->lock) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 29ae704..e008b51 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -512,14 +512,8 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) dl_timer); struct task_struct *p = dl_task_of(dl_se); struct rq *rq; -again: - rq = task_rq(p); - raw_spin_lock(&rq->lock); - if (rq != task_rq(p) || task_on_rq_migrating(p)) { - /* Task was move{d,ing}, retry */ - raw_spin_unlock(&rq->lock); - goto again; - } + + rq = __task_rq_lock(p); /* * We need to take care of several possible races here: @@ -574,7 +568,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) push_dl_task(rq); #endif unlock: - raw_spin_unlock(&rq->lock); + __task_rq_unlock(rq); return HRTIMER_NORESTART; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0870db2..f65f57c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1020,6 +1020,32 @@ static inline int task_on_rq_migrating(struct task_struct *p) return p->on_rq == TASK_ON_RQ_MIGRATING; } +/* + * __task_rq_lock - lock the rq @p resides on. + */ +static inline struct rq *__task_rq_lock(struct task_struct *p) + __acquires(rq->lock) +{ + struct rq *rq; + + for (;;) { + rq = task_rq(p); + raw_spin_lock(&rq->lock); + if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) + return rq; + raw_spin_unlock(&rq->lock); + + while (unlikely(task_on_rq_migrating(p))) + cpu_relax(); + } +} + +static inline void __task_rq_unlock(struct rq *rq) + __releases(rq->lock) +{ + raw_spin_unlock(&rq->lock); +} + #ifndef prepare_arch_switch # define prepare_arch_switch(next) do { } while (0) #endif
|  |