Loading kernel/futex.c +3 −6 Original line number Original line Diff line number Diff line Loading @@ -1029,7 +1029,6 @@ static inline void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) struct futex_hash_bucket *hb) { { drop_futex_key_refs(&q->key); get_futex_key_refs(key); get_futex_key_refs(key); q->key = *key; q->key = *key; Loading Loading @@ -1227,6 +1226,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, */ */ if (ret == 1) { if (ret == 1) { WARN_ON(pi_state); WARN_ON(pi_state); drop_count++; task_count++; task_count++; ret = get_futex_value_locked(&curval2, uaddr2); ret = get_futex_value_locked(&curval2, uaddr2); if (!ret) if (!ret) Loading Loading @@ -1305,6 +1305,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, if (ret == 1) { if (ret == 1) { /* We got the lock. */ /* We got the lock. */ requeue_pi_wake_futex(this, &key2, hb2); requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; continue; } else if (ret) { } else if (ret) { /* -EDEADLK */ /* -EDEADLK */ Loading Loading @@ -2126,7 +2127,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, plist_del(&q->list, &q->list.plist); plist_del(&q->list, &q->list.plist); /* Handle spurious wakeups gracefully */ /* Handle spurious wakeups gracefully */ ret = -EAGAIN; ret = -EWOULDBLOCK; if (timeout && !timeout->task) if (timeout && !timeout->task) ret = -ETIMEDOUT; ret = -ETIMEDOUT; else if (signal_pending(current)) else if (signal_pending(current)) Loading Loading @@ -2207,7 +2208,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, debug_rt_mutex_init_waiter(&rt_waiter); debug_rt_mutex_init_waiter(&rt_waiter); rt_waiter.task = NULL; rt_waiter.task = NULL; retry: key2 = FUTEX_KEY_INIT; key2 = FUTEX_KEY_INIT; ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) if (unlikely(ret != 0)) Loading Loading @@ -2302,9 +2302,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, out_key2: out_key2: put_futex_key(fshared, &key2); put_futex_key(fshared, &key2); /* Spurious wakeup ? */ if (ret == -EAGAIN) goto retry; out: out: if (to) { if (to) { hrtimer_cancel(&to->timer); hrtimer_cancel(&to->timer); Loading kernel/rcutree.c +25 −6 Original line number Original line Diff line number Diff line Loading @@ -61,7 +61,7 @@ static struct lock_class_key rcu_root_class; NUM_RCU_LVL_2, \ NUM_RCU_LVL_2, \ NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ }, \ }, \ .signaled = RCU_SIGNAL_INIT, \ .signaled = RCU_GP_IDLE, \ .gpnum = -300, \ .gpnum = -300, \ .completed = -300, \ .completed = -300, \ .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ Loading Loading @@ -663,10 +663,13 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rcu_preempt_check_blocked_tasks(rnp); rcu_preempt_check_blocked_tasks(rnp); rnp->qsmask = rnp->qsmaskinit; rnp->qsmask = rnp->qsmaskinit; rnp->gpnum = rsp->gpnum; rnp->gpnum = rsp->gpnum; spin_unlock(&rnp->lock); /* irqs already disabled. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ } } rnp = rcu_get_root(rsp); spin_lock(&rnp->lock); /* irqs already disabled. */ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ spin_unlock_irqrestore(&rsp->onofflock, flags); spin_unlock_irqrestore(&rsp->onofflock, flags); } } Loading Loading @@ -708,6 +711,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) { { WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); rsp->completed = rsp->gpnum; rsp->completed = rsp->gpnum; rsp->signaled = RCU_GP_IDLE; rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ } } Loading Loading @@ -915,7 +919,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) spin_unlock(&rnp->lock); /* irqs remain disabled. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ break; break; } } rcu_preempt_offline_tasks(rsp, rnp, rdp); /* * If there was a task blocking the current grace period, * and if all CPUs have checked in, we need to propagate * the quiescent state up the rcu_node hierarchy. But that * is inconvenient at the moment due to deadlock issues if * this should end the current grace period. So set the * offlined CPU's bit in ->qsmask in order to force the * next force_quiescent_state() invocation to clean up this * mess in a deadlock-free manner. */ if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) rnp->qsmask |= mask; mask = rnp->grpmask; mask = rnp->grpmask; spin_unlock(&rnp->lock); /* irqs remain disabled. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ rnp = rnp->parent; rnp = rnp->parent; Loading Loading @@ -1151,9 +1168,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) } } spin_unlock(&rnp->lock); spin_unlock(&rnp->lock); switch (signaled) { switch (signaled) { case RCU_GP_IDLE: case RCU_GP_INIT: case RCU_GP_INIT: break; /* grace period still initializing, ignore. */ break; /* grace period idle or initializing, ignore. */ case RCU_SAVE_DYNTICK: case RCU_SAVE_DYNTICK: Loading @@ -1167,7 +1185,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) /* Update state, record completion counter. */ /* Update state, record completion counter. */ spin_lock(&rnp->lock); spin_lock(&rnp->lock); if (lastcomp == rsp->completed) { if (lastcomp == rsp->completed && rsp->signaled == RCU_SAVE_DYNTICK) { rsp->signaled = RCU_FORCE_QS; rsp->signaled = RCU_FORCE_QS; dyntick_record_completed(rsp, lastcomp); dyntick_record_completed(rsp, lastcomp); } } Loading kernel/rcutree.h +7 −6 Original line number Original line Diff line number Diff line Loading @@ -201,9 +201,10 @@ struct rcu_data { }; }; /* Values for signaled field in struct rcu_state. */ /* Values for signaled field in struct rcu_state. */ #define RCU_GP_INIT 0 /* Grace period being initialized. */ #define RCU_GP_IDLE 0 /* No grace period in progress. */ #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ #define RCU_GP_INIT 1 /* Grace period being initialized. */ #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ #define RCU_FORCE_QS 3 /* Need to force quiescent state. */ #ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK #else /* #ifdef CONFIG_NO_HZ */ #else /* #ifdef CONFIG_NO_HZ */ Loading Loading @@ -306,7 +307,7 @@ static void rcu_print_task_stall(struct rcu_node *rnp); #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU static void rcu_preempt_offline_tasks(struct rcu_state *rsp, static int rcu_preempt_offline_tasks(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_node *rnp, struct rcu_data *rdp); struct rcu_data *rdp); static void rcu_preempt_offline_cpu(int cpu); static void rcu_preempt_offline_cpu(int cpu); Loading kernel/rcutree_plugin.h +17 −8 Original line number Original line Diff line number Diff line Loading @@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) * parent is to remove the need for rcu_read_unlock_special() to * parent is to remove the need for rcu_read_unlock_special() to * make more than two attempts to acquire the target rcu_node's lock. * make more than two attempts to acquire the target rcu_node's lock. * * * Returns 1 if there was previously a task blocking the current grace * period on the specified rcu_node structure. * * The caller must hold rnp->lock with irqs disabled. * The caller must hold rnp->lock with irqs disabled. */ */ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, static int rcu_preempt_offline_tasks(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_node *rnp, struct rcu_data *rdp) struct rcu_data *rdp) { { int i; int i; struct list_head *lp; struct list_head *lp; struct list_head *lp_root; struct list_head *lp_root; int retval = rcu_preempted_readers(rnp); struct rcu_node *rnp_root = rcu_get_root(rsp); struct rcu_node *rnp_root = rcu_get_root(rsp); struct task_struct *tp; struct task_struct *tp; if (rnp == rnp_root) { if (rnp == rnp_root) { WARN_ONCE(1, "Last CPU thought to be offlined?"); WARN_ONCE(1, "Last CPU thought to be offlined?"); return; /* Shouldn't happen: at least one CPU online. */ return 0; /* Shouldn't happen: at least one CPU online. */ } } WARN_ON_ONCE(rnp != rdp->mynode && WARN_ON_ONCE(rnp != rdp->mynode && (!list_empty(&rnp->blocked_tasks[0]) || (!list_empty(&rnp->blocked_tasks[0]) || Loading @@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, spin_unlock(&rnp_root->lock); /* irqs remain disabled */ spin_unlock(&rnp_root->lock); /* irqs remain disabled */ } } } } return retval; } } /* /* Loading Loading @@ -532,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) /* /* * Because preemptable RCU does not exist, it never needs to migrate * Because preemptable RCU does not exist, it never needs to migrate * tasks that were blocked within RCU read-side critical sections. * tasks that were blocked within RCU read-side critical sections, and * such non-existent tasks cannot possibly have been blocking the current * grace period. */ */ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, static int rcu_preempt_offline_tasks(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_node *rnp, struct rcu_data *rdp) struct rcu_data *rdp) { { return 0; } } /* /* Loading kernel/user.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -330,9 +330,9 @@ static void cleanup_user_struct(struct work_struct *w) */ */ static void free_user(struct user_struct *up, unsigned long flags) static void free_user(struct user_struct *up, unsigned long flags) { { spin_unlock_irqrestore(&uidhash_lock, flags); INIT_DELAYED_WORK(&up->work, cleanup_user_struct); INIT_DELAYED_WORK(&up->work, cleanup_user_struct); schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); spin_unlock_irqrestore(&uidhash_lock, flags); } } #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ Loading Loading
kernel/futex.c +3 −6 Original line number Original line Diff line number Diff line Loading @@ -1029,7 +1029,6 @@ static inline void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) struct futex_hash_bucket *hb) { { drop_futex_key_refs(&q->key); get_futex_key_refs(key); get_futex_key_refs(key); q->key = *key; q->key = *key; Loading Loading @@ -1227,6 +1226,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, */ */ if (ret == 1) { if (ret == 1) { WARN_ON(pi_state); WARN_ON(pi_state); drop_count++; task_count++; task_count++; ret = get_futex_value_locked(&curval2, uaddr2); ret = get_futex_value_locked(&curval2, uaddr2); if (!ret) if (!ret) Loading Loading @@ -1305,6 +1305,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, if (ret == 1) { if (ret == 1) { /* We got the lock. */ /* We got the lock. */ requeue_pi_wake_futex(this, &key2, hb2); requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; continue; } else if (ret) { } else if (ret) { /* -EDEADLK */ /* -EDEADLK */ Loading Loading @@ -2126,7 +2127,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, plist_del(&q->list, &q->list.plist); plist_del(&q->list, &q->list.plist); /* Handle spurious wakeups gracefully */ /* Handle spurious wakeups gracefully */ ret = -EAGAIN; ret = -EWOULDBLOCK; if (timeout && !timeout->task) if (timeout && !timeout->task) ret = -ETIMEDOUT; ret = -ETIMEDOUT; else if (signal_pending(current)) else if (signal_pending(current)) Loading Loading @@ -2207,7 +2208,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, debug_rt_mutex_init_waiter(&rt_waiter); debug_rt_mutex_init_waiter(&rt_waiter); rt_waiter.task = NULL; rt_waiter.task = NULL; retry: key2 = FUTEX_KEY_INIT; key2 = FUTEX_KEY_INIT; ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) if (unlikely(ret != 0)) Loading Loading @@ -2302,9 +2302,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, out_key2: out_key2: put_futex_key(fshared, &key2); put_futex_key(fshared, &key2); /* Spurious wakeup ? */ if (ret == -EAGAIN) goto retry; out: out: if (to) { if (to) { hrtimer_cancel(&to->timer); hrtimer_cancel(&to->timer); Loading
kernel/rcutree.c +25 −6 Original line number Original line Diff line number Diff line Loading @@ -61,7 +61,7 @@ static struct lock_class_key rcu_root_class; NUM_RCU_LVL_2, \ NUM_RCU_LVL_2, \ NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ }, \ }, \ .signaled = RCU_SIGNAL_INIT, \ .signaled = RCU_GP_IDLE, \ .gpnum = -300, \ .gpnum = -300, \ .completed = -300, \ .completed = -300, \ .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ Loading Loading @@ -663,10 +663,13 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rcu_preempt_check_blocked_tasks(rnp); rcu_preempt_check_blocked_tasks(rnp); rnp->qsmask = rnp->qsmaskinit; rnp->qsmask = rnp->qsmaskinit; rnp->gpnum = rsp->gpnum; rnp->gpnum = rsp->gpnum; spin_unlock(&rnp->lock); /* irqs already disabled. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ } } rnp = rcu_get_root(rsp); spin_lock(&rnp->lock); /* irqs already disabled. */ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ spin_unlock_irqrestore(&rsp->onofflock, flags); spin_unlock_irqrestore(&rsp->onofflock, flags); } } Loading Loading @@ -708,6 +711,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) { { WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); rsp->completed = rsp->gpnum; rsp->completed = rsp->gpnum; rsp->signaled = RCU_GP_IDLE; rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ } } Loading Loading @@ -915,7 +919,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) spin_unlock(&rnp->lock); /* irqs remain disabled. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ break; break; } } rcu_preempt_offline_tasks(rsp, rnp, rdp); /* * If there was a task blocking the current grace period, * and if all CPUs have checked in, we need to propagate * the quiescent state up the rcu_node hierarchy. But that * is inconvenient at the moment due to deadlock issues if * this should end the current grace period. So set the * offlined CPU's bit in ->qsmask in order to force the * next force_quiescent_state() invocation to clean up this * mess in a deadlock-free manner. */ if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) rnp->qsmask |= mask; mask = rnp->grpmask; mask = rnp->grpmask; spin_unlock(&rnp->lock); /* irqs remain disabled. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ rnp = rnp->parent; rnp = rnp->parent; Loading Loading @@ -1151,9 +1168,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) } } spin_unlock(&rnp->lock); spin_unlock(&rnp->lock); switch (signaled) { switch (signaled) { case RCU_GP_IDLE: case RCU_GP_INIT: case RCU_GP_INIT: break; /* grace period still initializing, ignore. */ break; /* grace period idle or initializing, ignore. */ case RCU_SAVE_DYNTICK: case RCU_SAVE_DYNTICK: Loading @@ -1167,7 +1185,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) /* Update state, record completion counter. */ /* Update state, record completion counter. */ spin_lock(&rnp->lock); spin_lock(&rnp->lock); if (lastcomp == rsp->completed) { if (lastcomp == rsp->completed && rsp->signaled == RCU_SAVE_DYNTICK) { rsp->signaled = RCU_FORCE_QS; rsp->signaled = RCU_FORCE_QS; dyntick_record_completed(rsp, lastcomp); dyntick_record_completed(rsp, lastcomp); } } Loading
kernel/rcutree.h +7 −6 Original line number Original line Diff line number Diff line Loading @@ -201,9 +201,10 @@ struct rcu_data { }; }; /* Values for signaled field in struct rcu_state. */ /* Values for signaled field in struct rcu_state. */ #define RCU_GP_INIT 0 /* Grace period being initialized. */ #define RCU_GP_IDLE 0 /* No grace period in progress. */ #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ #define RCU_GP_INIT 1 /* Grace period being initialized. */ #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ #define RCU_FORCE_QS 3 /* Need to force quiescent state. */ #ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK #else /* #ifdef CONFIG_NO_HZ */ #else /* #ifdef CONFIG_NO_HZ */ Loading Loading @@ -306,7 +307,7 @@ static void rcu_print_task_stall(struct rcu_node *rnp); #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU static void rcu_preempt_offline_tasks(struct rcu_state *rsp, static int rcu_preempt_offline_tasks(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_node *rnp, struct rcu_data *rdp); struct rcu_data *rdp); static void rcu_preempt_offline_cpu(int cpu); static void rcu_preempt_offline_cpu(int cpu); Loading
kernel/rcutree_plugin.h +17 −8 Original line number Original line Diff line number Diff line Loading @@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) * parent is to remove the need for rcu_read_unlock_special() to * parent is to remove the need for rcu_read_unlock_special() to * make more than two attempts to acquire the target rcu_node's lock. * make more than two attempts to acquire the target rcu_node's lock. * * * Returns 1 if there was previously a task blocking the current grace * period on the specified rcu_node structure. * * The caller must hold rnp->lock with irqs disabled. * The caller must hold rnp->lock with irqs disabled. */ */ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, static int rcu_preempt_offline_tasks(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_node *rnp, struct rcu_data *rdp) struct rcu_data *rdp) { { int i; int i; struct list_head *lp; struct list_head *lp; struct list_head *lp_root; struct list_head *lp_root; int retval = rcu_preempted_readers(rnp); struct rcu_node *rnp_root = rcu_get_root(rsp); struct rcu_node *rnp_root = rcu_get_root(rsp); struct task_struct *tp; struct task_struct *tp; if (rnp == rnp_root) { if (rnp == rnp_root) { WARN_ONCE(1, "Last CPU thought to be offlined?"); WARN_ONCE(1, "Last CPU thought to be offlined?"); return; /* Shouldn't happen: at least one CPU online. */ return 0; /* Shouldn't happen: at least one CPU online. */ } } WARN_ON_ONCE(rnp != rdp->mynode && WARN_ON_ONCE(rnp != rdp->mynode && (!list_empty(&rnp->blocked_tasks[0]) || (!list_empty(&rnp->blocked_tasks[0]) || Loading @@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, spin_unlock(&rnp_root->lock); /* irqs remain disabled */ spin_unlock(&rnp_root->lock); /* irqs remain disabled */ } } } } return retval; } } /* /* Loading Loading @@ -532,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) /* /* * Because preemptable RCU does not exist, it never needs to migrate * Because preemptable RCU does not exist, it never needs to migrate * tasks that were blocked within RCU read-side critical sections. * tasks that were blocked within RCU read-side critical sections, and * such non-existent tasks cannot possibly have been blocking the current * grace period. */ */ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, static int rcu_preempt_offline_tasks(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_node *rnp, struct rcu_data *rdp) struct rcu_data *rdp) { { return 0; } } /* /* Loading
kernel/user.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -330,9 +330,9 @@ static void cleanup_user_struct(struct work_struct *w) */ */ static void free_user(struct user_struct *up, unsigned long flags) static void free_user(struct user_struct *up, unsigned long flags) { { spin_unlock_irqrestore(&uidhash_lock, flags); INIT_DELAYED_WORK(&up->work, cleanup_user_struct); INIT_DELAYED_WORK(&up->work, cleanup_user_struct); schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); spin_unlock_irqrestore(&uidhash_lock, flags); } } #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ Loading