workqueue: improve destroy_workqueue() debuggability
Now that the worklist is global, having works pending after wq destruction can easily lead to oops and destroy_workqueue() have several BUG_ON()s to catch these cases. Unfortunately, BUG_ON() doesn't tell much about how the work became pending after the final flush_workqueue(). This patch adds WQ_DYING which is set before the final flush begins. If a work is requested to be queued on a dying workqueue, WARN_ON_ONCE() is triggered and the request is ignored. This clearly indicates which caller is trying to queue a work on a dying workqueue and keeps the system working in most cases. Locking rule comment is updated such that the 'I' rule includes modifying the field from destruction path. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
972fa1c531
commit
e41e704bc4
@ -241,6 +241,8 @@ enum {
|
|||||||
WQ_HIGHPRI = 1 << 4, /* high priority */
|
WQ_HIGHPRI = 1 << 4, /* high priority */
|
||||||
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
||||||
|
|
||||||
|
WQ_DYING = 1 << 6, /* internal: workqueue is dying */
|
||||||
|
|
||||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||||
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
||||||
|
@ -87,7 +87,8 @@ enum {
|
|||||||
/*
|
/*
|
||||||
* Structure fields follow one of the following exclusion rules.
|
* Structure fields follow one of the following exclusion rules.
|
||||||
*
|
*
|
||||||
* I: Set during initialization and read-only afterwards.
|
* I: Modifiable by initialization/destruction paths and read-only for
|
||||||
|
* everyone else.
|
||||||
*
|
*
|
||||||
* P: Preemption protected. Disabling preemption is enough and should
|
* P: Preemption protected. Disabling preemption is enough and should
|
||||||
* only be modified and accessed from the local cpu.
|
* only be modified and accessed from the local cpu.
|
||||||
@ -944,6 +945,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
|
|||||||
|
|
||||||
debug_work_activate(work);
|
debug_work_activate(work);
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(wq->flags & WQ_DYING))
|
||||||
|
return;
|
||||||
|
|
||||||
/* determine gcwq to use */
|
/* determine gcwq to use */
|
||||||
if (!(wq->flags & WQ_UNBOUND)) {
|
if (!(wq->flags & WQ_UNBOUND)) {
|
||||||
struct global_cwq *last_gcwq;
|
struct global_cwq *last_gcwq;
|
||||||
@ -2828,6 +2832,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|||||||
{
|
{
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
|
wq->flags |= WQ_DYING;
|
||||||
flush_workqueue(wq);
|
flush_workqueue(wq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user