rcu: Eliminate ->onoff_mutex from rcu_node structure
Because that RCU grace-period initialization need no longer exclude CPU-hotplug operations, this commit eliminates the ->onoff_mutex and its uses. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
0aa04b055e
commit
c199068913
@ -101,7 +101,6 @@ struct rcu_state sname##_state = { \
|
|||||||
.orphan_nxttail = &sname##_state.orphan_nxtlist, \
|
.orphan_nxttail = &sname##_state.orphan_nxtlist, \
|
||||||
.orphan_donetail = &sname##_state.orphan_donelist, \
|
.orphan_donetail = &sname##_state.orphan_donelist, \
|
||||||
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
|
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
|
||||||
.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
|
|
||||||
.name = RCU_STATE_NAME(sname), \
|
.name = RCU_STATE_NAME(sname), \
|
||||||
.abbr = sabbr, \
|
.abbr = sabbr, \
|
||||||
}; \
|
}; \
|
||||||
@ -1754,10 +1753,6 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|||||||
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
|
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
|
||||||
raw_spin_unlock_irq(&rnp->lock);
|
raw_spin_unlock_irq(&rnp->lock);
|
||||||
|
|
||||||
/* Exclude any concurrent CPU-hotplug operations. */
|
|
||||||
mutex_lock(&rsp->onoff_mutex);
|
|
||||||
smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Apply per-leaf buffered online and offline operations to the
|
* Apply per-leaf buffered online and offline operations to the
|
||||||
* rcu_node tree. Note that this new grace period need not wait
|
* rcu_node tree. Note that this new grace period need not wait
|
||||||
@ -1844,7 +1839,6 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|||||||
schedule_timeout_uninterruptible(gp_init_delay);
|
schedule_timeout_uninterruptible(gp_init_delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&rsp->onoff_mutex);
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2498,9 +2492,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
|||||||
/* Adjust any no-longer-needed kthreads. */
|
/* Adjust any no-longer-needed kthreads. */
|
||||||
rcu_boost_kthread_setaffinity(rnp, -1);
|
rcu_boost_kthread_setaffinity(rnp, -1);
|
||||||
|
|
||||||
/* Exclude any attempts to start a new grace period. */
|
|
||||||
mutex_lock(&rsp->onoff_mutex);
|
|
||||||
|
|
||||||
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
|
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
|
||||||
raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
|
raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
|
||||||
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
|
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
|
||||||
@ -2517,7 +2508,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
|||||||
WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
|
WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
|
||||||
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
|
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
|
||||||
cpu, rdp->qlen, rdp->nxtlist);
|
cpu, rdp->qlen, rdp->nxtlist);
|
||||||
mutex_unlock(&rsp->onoff_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* #ifdef CONFIG_HOTPLUG_CPU */
|
#else /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||||
@ -3700,9 +3690,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||||
|
|
||||||
/* Exclude new grace periods. */
|
|
||||||
mutex_lock(&rsp->onoff_mutex);
|
|
||||||
|
|
||||||
/* Set up local state, ensuring consistent view of global state. */
|
/* Set up local state, ensuring consistent view of global state. */
|
||||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||||
rdp->beenonline = 1; /* We have now been online. */
|
rdp->beenonline = 1; /* We have now been online. */
|
||||||
@ -3733,8 +3720,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|||||||
rdp->qs_pending = false;
|
rdp->qs_pending = false;
|
||||||
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
|
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
|
||||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
|
|
||||||
mutex_unlock(&rsp->onoff_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_prepare_cpu(int cpu)
|
static void rcu_prepare_cpu(int cpu)
|
||||||
|
@ -456,8 +456,6 @@ struct rcu_state {
|
|||||||
long qlen; /* Total number of callbacks. */
|
long qlen; /* Total number of callbacks. */
|
||||||
/* End of fields guarded by orphan_lock. */
|
/* End of fields guarded by orphan_lock. */
|
||||||
|
|
||||||
struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */
|
|
||||||
|
|
||||||
struct mutex barrier_mutex; /* Guards barrier fields. */
|
struct mutex barrier_mutex; /* Guards barrier fields. */
|
||||||
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
|
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
|
||||||
struct completion barrier_completion; /* Wake at barrier end. */
|
struct completion barrier_completion; /* Wake at barrier end. */
|
||||||
|
Loading…
Reference in New Issue
Block a user