aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-mq.c4
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c2
-rw-r--r--fs/aio.c10
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/cgroup-defs.h4
7 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a080b49390197..430a4ec078113 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -42,7 +42,7 @@
#include <linux/irq_work.h>
#include <linux/export.h>
#include <linux/jiffies.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
#include <asm/processor.h>
#include <asm/traps.h>
diff --git a/block/blk-core.c b/block/blk-core.c
index 4c8cefd2c0199..2f7afb98e8e0e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -680,7 +680,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
- swait_wake_all(&q->mq_freeze_wq);
+ swake_up_all(&q->mq_freeze_wq);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
@@ -742,7 +742,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
- init_swait_head(&q->mq_freeze_wq);
+ init_swait_queue_head(&q->mq_freeze_wq);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3c70813cb6b63..7cdf19e4aaea9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -130,7 +130,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
percpu_ref_reinit(&q->q_usage_counter);
- swait_wake_all(&q->mq_freeze_wq);
+ swake_up_all(&q->mq_freeze_wq);
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
@@ -149,7 +149,7 @@ void blk_mq_wake_waiters(struct request_queue *q)
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
- swait_wake_all(&q->mq_freeze_wq);
+ swake_up_all(&q->mq_freeze_wq);
}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index a774f0c8d22bf..e03fa17b8670e 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -29,7 +29,7 @@
#include <linux/pm.h>
#include <linux/thermal.h>
#include <linux/debugfs.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
#include <asm/cpu_device_id.h>
#include <asm/mce.h>
diff --git a/fs/aio.c b/fs/aio.c
index 14af015402882..2d770b979ab05 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -40,7 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <linux/mount.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -1314,10 +1314,10 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
* Create an aio_context capable of receiving at least nr_events.
* ctxp must not point to an aio_context that already exists, and
* must be initialized to 0 prior to the call. On successful
- * creation of the aio_context, *ctxp is filled in with the resulting
+ * creation of the aio_context, *ctxp is filled in with the resulting
* handle. May fail with -EINVAL if *ctxp is not initialized,
- * if the specified nr_events exceeds internal limits. May fail
- * with -EAGAIN if the specified nr_events exceeds the user's limit
+ * if the specified nr_events exceeds internal limits. May fail
+ * with -EAGAIN if the specified nr_events exceeds the user's limit
* of available events. May fail with -ENOMEM if insufficient kernel
* resources are available. May fail with -EFAULT if an invalid
* pointer is passed for ctxp. Will fail with -ENOSYS if not
@@ -1354,7 +1354,7 @@ out:
}
/* sys_io_destroy:
- * Destroy the aio_context specified. May cancel any outstanding
+ * Destroy the aio_context specified. May cancel any outstanding
* AIOs and block on completion. Will fail with -ENOSYS if not
* implemented. May fail with -EINVAL if the context pointed to
* is invalid.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2366b9cec665f..77fed1b9afcd6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -456,7 +456,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
- struct swait_head mq_freeze_wq;
+ struct swait_queue_head mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 634111447aea5..f3d8e072c014c 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,8 +16,8 @@
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
-#include <linux/work-simple.h>
-
+#include <linux/swait.h>
+#include <linux/swork.h>
#ifdef CONFIG_CGROUPS
struct cgroup;