summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-05-13 13:02:32 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-05-13 13:02:32 +0200
commit651cb42b4c2bdc1326a06204a30ef5d4e143f2aa (patch)
tree4919dd0b1a570e9dbd1937fb3b456e59e3272ced
parenta52e3317edc58fd536845127ccd9e2e4174261d3 (diff)
download4.8-rt-patches-651cb42b4c2bdc1326a06204a30ef5d4e143f2aa.tar.gz
[ANNOUNCE] 4.6-rc7-rt1v4.6-rc7-rt1-patches
Dear RT folks! I'm pleased to announce the v4.6-rc7-rt1 patch set. I tested it on my AMD A10, 64bit. Had a few runs on ARM, nothing exploded so far. Changes since v4.4.9-rt17: - rebase to v4.6-rc7 - RWLOCKS and SPINLOCKS used to be cacheline aligned on RT. Now there are no more which is the same behaviour as in upstream. Known issues (inherited from v4.4-RT): - CPU hotplug got a little better but can deadlock. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6-rc7-rt1 The RT patch against 4.6-rc7 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6-rc7-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6-rc7-rt1.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-clk-at91-make-use-of-syscon-to-share-PMC-registers-i.patch121
-rw-r--r--patches/0001-wait.-ch-Introduce-the-simple-waitqueue-swait-implem.patch360
-rw-r--r--patches/0002-clk-at91-make-use-of-syscon-regmap-internally.patch3252
-rw-r--r--patches/0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch52
-rw-r--r--patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch420
-rw-r--r--patches/0003-clk-at91-remove-IRQ-handling-and-use-polling.patch1033
-rw-r--r--patches/0004-clk-at91-pmc-merge-at91_pmc_init-in-atmel_pmc_probe.patch70
-rw-r--r--patches/0004-rcu-Do-not-call-rcu_nocb_gp_cleanup-while-holding-rn.patch190
-rw-r--r--patches/0005-clk-at91-pmc-move-pmc-structures-to-C-file.patch52
-rw-r--r--patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch2
-rw-r--r--patches/0005-rcu-Use-simple-wait-queues-where-possible-in-rcutree.patch310
-rw-r--r--patches/0006-ARM-at91-pm-simply-call-at91_pm_init.patch41
-rw-r--r--patches/0007-ARM-at91-pm-find-and-remap-the-pmc.patch90
-rw-r--r--patches/0008-ARM-at91-pm-move-idle-functions-to-pm.c.patch201
-rw-r--r--patches/0009-ARM-at91-remove-useless-includes-and-function-protot.patch30
-rw-r--r--patches/0010-usb-gadget-atmel-access-the-PMC-using-regmap.patch75
-rw-r--r--patches/0011-clk-at91-pmc-drop-at91_pmc_base.patch69
-rw-r--r--patches/0012-clk-at91-pmc-remove-useless-capacities-handling.patch155
-rw-r--r--patches/0013-clk-at91-remove-useless-includes.patch129
-rw-r--r--patches/ARM-imx-always-use-TWD-on-IMX6Q.patch2
-rw-r--r--patches/HACK-printk-drop-the-logbuf_lock-more-often.patch21
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch10
-rw-r--r--patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch4
-rw-r--r--patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch2
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch18
-rw-r--r--patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch2
-rw-r--r--patches/arm-convert-boot-lock-to-raw.patch10
-rw-r--r--patches/arm-preempt-lazy-support.patch2
-rw-r--r--patches/arm64-xen--Make-XEN-depend-on-non-rt.patch4
-rw-r--r--patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch2
-rw-r--r--patches/block-blk-mq-use-swait.patch8
-rw-r--r--patches/block-mq-don-t-complete-requests-via-IPI.patch10
-rw-r--r--patches/block-mq-drop-per-ctx-cpu_lock.patch124
-rw-r--r--patches/block-mq-drop-preempt-disable.patch6
-rw-r--r--patches/block-mq-use-cpu_light.patch68
-rw-r--r--patches/block-shorten-interrupt-disabled-regions.patch10
-rw-r--r--patches/bug-rt-dependend-variants.patch2
-rw-r--r--patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch2
-rw-r--r--patches/cgroups-use-simple-wait-in-css_release.patch8
-rw-r--r--patches/completion-use-simple-wait-queues.patch46
-rw-r--r--patches/cond-resched-softirq-rt.patch6
-rw-r--r--patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch2
-rw-r--r--patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch12
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch28
-rw-r--r--patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch12
-rw-r--r--patches/cpu_down_move_migrate_enable_back.patch18
-rw-r--r--patches/cpumask-disable-offstack-on-rt.patch2
-rw-r--r--patches/crypto-ccp-remove-rwlocks_types.h.patch22
-rw-r--r--patches/dm-make-rt-aware.patch4
-rw-r--r--patches/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch25
-rw-r--r--patches/drivers-media-vsp1_video-fix-compile-error.patch32
-rw-r--r--patches/drivers-net-fix-livelock-issues.patch4
-rw-r--r--patches/drivers-net-vortex-fix-locking-issues.patch2
-rw-r--r--patches/drivers-tty-pl011-irq-disable-madness.patch6
-rw-r--r--patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch2
-rw-r--r--patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch8
-rw-r--r--patches/epoll-use-get-cpu-light.patch2
-rw-r--r--patches/f2fs_Mutex_cant_be_used_by_down_write_nest_lock().patch66
-rw-r--r--patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch8
-rw-r--r--patches/fs-jbd-replace-bh_state-lock.patch2
-rw-r--r--patches/fs-ntfs-disable-interrupt-non-rt.patch22
-rw-r--r--patches/fs-replace-bh_uptodate_lock-for-rt.patch12
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch8
-rw-r--r--patches/genirq-Add-default-affinity-mask-command-line-option.patch67
-rw-r--r--patches/genirq-disable-irqpoll-on-rt.patch4
-rw-r--r--patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch14
-rw-r--r--patches/genirq-force-threading.patch2
-rw-r--r--patches/genirq-update-irq_set_irqchip_state-documentation.patch2
-rw-r--r--patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch2
-rw-r--r--patches/hotplug-light-get-online-cpus.patch51
-rw-r--r--patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch2
-rw-r--r--patches/hotplug-use-migrate-disable.patch14
-rw-r--r--patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch4
-rw-r--r--patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch28
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch8
-rw-r--r--patches/hwlatdetect.patch2
-rw-r--r--patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch2
-rw-r--r--patches/ide-use-nort-local-irq-variants.patch4
-rw-r--r--patches/infiniband-mellanox-ib-use-nort-irq.patch4
-rw-r--r--patches/infiniband-ulp-ipoib-remove-pkey_mutex.patch25
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch18
-rw-r--r--patches/iommu-amd--Use-WARN_ON_NORT.patch4
-rw-r--r--patches/ipc-sem-rework-semaphore-wakeups.patch8
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch4
-rw-r--r--patches/irqwork-push_most_work_into_softirq_context.patch22
-rw-r--r--patches/jump-label-rt.patch10
-rw-r--r--patches/kconfig-disable-a-few-options-rt.patch2
-rw-r--r--patches/kernel-SRCU-provide-a-static-initializer.patch12
-rw-r--r--patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch16
-rw-r--r--patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch22
-rw-r--r--patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch4
-rw-r--r--patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch2
-rw-r--r--patches/kernel-sched-fix-preempt_disable_ip-recodring-for-pr.patch106
-rw-r--r--patches/kernel-stop_machine-partly-revert-stop_machine-Use-r.patch152
-rw-r--r--patches/kgb-serial-hackaround.patch4
-rw-r--r--patches/kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch157
-rw-r--r--patches/latency-hist.patch16
-rw-r--r--patches/local-irq-rt-depending-variants.patch2
-rw-r--r--patches/localversion.patch4
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch6
-rw-r--r--patches/mips-disable-highmem-on-rt.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch90
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/mm-enable-slub.patch139
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch85
-rw-r--r--patches/mm-page-alloc-use-local-lock-on-target-cpu.patch2
-rw-r--r--patches/mm-page_alloc-reduce-lock-sections-further.patch25
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch40
-rw-r--r--patches/mm-protect-activate-switch-mm.patch2
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch2
-rw-r--r--patches/mm-vmalloc-use-get-cpu-light.patch10
-rw-r--r--patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch51
-rw-r--r--patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch4
-rw-r--r--patches/move_sched_delayed_work_to_helper.patch2
-rw-r--r--patches/net-another-local-irq-disable-alloc-atomic-headache.patch8
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch72
-rw-r--r--patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch2
-rw-r--r--patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch6
-rw-r--r--patches/net-make-devnet_rename_seq-a-mutex.patch14
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch14
-rw-r--r--patches/net-prevent-abba-deadlock.patch2
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch4
-rw-r--r--patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch2
-rw-r--r--patches/net-tx-action-avoid-livelock-on-rt.patch4
-rw-r--r--patches/net-use-cpu-chill.patch10
-rw-r--r--patches/net-wireless-warn-nort.patch2
-rw-r--r--patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch2
-rw-r--r--patches/oleg-signal-rt-fix.patch4
-rw-r--r--patches/panic-change-nmi_panic-from-macro-to-function.patch112
-rw-r--r--patches/panic-disable-random-on-rt.patch2
-rw-r--r--patches/panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch245
-rw-r--r--patches/panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch188
-rw-r--r--patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch20
-rw-r--r--patches/pci-access-use-__wake_up_all_locked.patch2
-rw-r--r--patches/perf-make-swevent-hrtimer-irqsafe.patch2
-rw-r--r--patches/peter_zijlstra-frob-rcu.patch2
-rw-r--r--patches/peterz-srcu-crypto-chain.patch4
-rw-r--r--patches/ping-sysrq.patch2
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch12
-rw-r--r--patches/power-use-generic-rwsem-on-rt.patch2
-rw-r--r--patches/powerpc-preempt-lazy-support.patch14
-rw-r--r--patches/preempt-lazy-check-preempt_schedule.patch6
-rw-r--r--patches/preempt-lazy-support.patch73
-rw-r--r--patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch2
-rw-r--r--patches/printk-kill.patch14
-rw-r--r--patches/printk-rt-aware.patch50
-rw-r--r--patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch2
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch22
-rw-r--r--patches/radix-tree-rt-aware.patch10
-rw-r--r--patches/random-make-it-work-on-rt.patch10
-rw-r--r--patches/rcu-Eliminate-softirq-processing-from-rcutree.patch26
-rw-r--r--patches/rcu-disable-more-spots-of-rcu_bh.patch12
-rw-r--r--patches/rcu-disable-rcu-fast-no-hz-on-rt.patch2
-rw-r--r--patches/rcu-make-RCU_BOOST-default-on-RT.patch4
-rw-r--r--patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch46
-rw-r--r--patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch4
-rw-r--r--patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch2
-rw-r--r--patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch2
-rw-r--r--patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch4
-rw-r--r--patches/rt-add-rt-locks.patch37
-rw-r--r--patches/rt-introduce-cpu-chill.patch2
-rw-r--r--patches/rt-local-irq-lock.patch24
-rw-r--r--patches/rtmutex-Make-wait_lock-irq-safe.patch597
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch18
-rw-r--r--patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch2
-rw-r--r--patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch2
-rw-r--r--patches/sched-cputime-Clarify-vtime-symbols-and-document-the.patch89
-rw-r--r--patches/sched-cputime-Convert-vtime_seqlock-to-seqcount.patch206
-rw-r--r--patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch2
-rw-r--r--patches/sched-delay-put-task.patch10
-rw-r--r--patches/sched-disable-rt-group-sched-on-rt.patch2
-rw-r--r--patches/sched-limit-nr-migrate.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch6
-rw-r--r--patches/sched-mmdrop-delayed.patch14
-rw-r--r--patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch50
-rw-r--r--patches/sched-rt-mutex-wakeup.patch10
-rw-r--r--patches/sched-ttwu-ensure-success-return-is-correct.patch2
-rw-r--r--patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch2
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch4
-rw-r--r--patches/seqlock-prevent-rt-starvation.patch2
-rw-r--r--patches/series54
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch6
-rw-r--r--patches/skbufhead-raw-lock.patch16
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/slub-enable-irqs-for-no-wait.patch4
-rw-r--r--patches/softirq-disable-softirq-stacks-for-rt.patch6
-rw-r--r--patches/softirq-preempt-fix-3-re.patch86
-rw-r--r--patches/softirq-split-locks.patch24
-rw-r--r--patches/sparc64-use-generic-rwsem-spinlocks-rt.patch2
-rw-r--r--patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch48
-rw-r--r--patches/stop-machine-raw-lock.patch126
-rw-r--r--patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch10
-rw-r--r--patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch6
-rw-r--r--patches/suspend-prevernt-might-sleep-splats.patch2
-rw-r--r--patches/sysfs-realtime-entry.patch6
-rw-r--r--patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch6
-rw-r--r--patches/tasklist-lock-fix-section-conflict.patch55
-rw-r--r--patches/timekeeping-split-jiffies-lock.patch4
-rw-r--r--patches/timers-preempt-rt-support.patch4
-rw-r--r--patches/timers-prepare-for-full-preemption.patch6
-rw-r--r--patches/tracing-account-for-preempt-off-in-preempt_schedule.patch2
-rw-r--r--patches/tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch392
-rw-r--r--patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch2
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
-rw-r--r--patches/usb-use-_nort-in-giveback.patch2
-rw-r--r--patches/work-queue-work-around-irqsafe-timer-optimization.patch2
-rw-r--r--patches/work-simple-Simple-work-queue-implemenation.patch2
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch45
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch26
-rw-r--r--patches/workqueue-use-locallock.patch22
-rw-r--r--patches/workqueue-use-rcu.patch61
-rw-r--r--patches/x86-UV-raw_spinlock-conversion.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-mce-timer-hrtimer.patch22
-rw-r--r--patches/x86-mce-use-swait-queue-for-mce-wakeups.patch6
-rw-r--r--patches/x86-preempt-lazy.patch10
-rw-r--r--patches/x86-use-gen-rwsem-spinlocks-rt.patch2
220 files changed, 1229 insertions, 10567 deletions
diff --git a/patches/0001-clk-at91-make-use-of-syscon-to-share-PMC-registers-i.patch b/patches/0001-clk-at91-make-use-of-syscon-to-share-PMC-registers-i.patch
deleted file mode 100644
index 62ad706a6aba34..00000000000000
--- a/patches/0001-clk-at91-make-use-of-syscon-to-share-PMC-registers-i.patch
+++ /dev/null
@@ -1,121 +0,0 @@
-From: Boris Brezillon <boris.brezillon@free-electrons.com>
-Date: Fri, 5 Sep 2014 09:54:13 +0200
-Subject: [PATCH 01/13] clk: at91: make use of syscon to share PMC registers in
- several drivers
-
-The PMC block is providing several functionnalities:
- - system clk management
- - cpuidle
- - platform suspend
-
-Replace the void __iomem *regs field by a regmap (retrieved using syscon)
-so that we can later share the regmap across several drivers without
-exporting a new specific API or a global void __iomem * variable.
-
-Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Stephen Boyd <sboyd@codeaurora.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/mach-at91/Kconfig | 1 +
- drivers/clk/at91/pmc.c | 12 ++++++++----
- drivers/clk/at91/pmc.h | 11 ++++++++---
- 3 files changed, 17 insertions(+), 7 deletions(-)
-
---- a/arch/arm/mach-at91/Kconfig
-+++ b/arch/arm/mach-at91/Kconfig
-@@ -99,6 +99,7 @@ config HAVE_AT91_USB_CLK
- config COMMON_CLK_AT91
- bool
- select COMMON_CLK
-+ select MFD_SYSCON
-
- config HAVE_AT91_SMD
- bool
---- a/drivers/clk/at91/pmc.c
-+++ b/drivers/clk/at91/pmc.c
-@@ -19,6 +19,7 @@
- #include <linux/irqchip/chained_irq.h>
- #include <linux/irqdomain.h>
- #include <linux/of_irq.h>
-+#include <linux/mfd/syscon.h>
-
- #include <asm/proc-fns.h>
-
-@@ -223,6 +224,7 @@ static const struct at91_pmc_caps sama5d
- };
-
- static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
-+ struct regmap *regmap,
- void __iomem *regbase, int virq,
- const struct at91_pmc_caps *caps)
- {
-@@ -238,7 +240,7 @@ static struct at91_pmc *__init at91_pmc_
- return NULL;
-
- spin_lock_init(&pmc->lock);
-- pmc->regbase = regbase;
-+ pmc->regmap = regmap;
- pmc->virq = virq;
- pmc->caps = caps;
-
-@@ -394,16 +396,18 @@ static void __init of_at91_pmc_setup(str
- void (*clk_setup)(struct device_node *, struct at91_pmc *);
- const struct of_device_id *clk_id;
- void __iomem *regbase = of_iomap(np, 0);
-+ struct regmap *regmap;
- int virq;
-
-- if (!regbase)
-- return;
-+ regmap = syscon_node_to_regmap(np);
-+ if (IS_ERR(regmap))
-+ panic("Could not retrieve syscon regmap");
-
- virq = irq_of_parse_and_map(np, 0);
- if (!virq)
- return;
-
-- pmc = at91_pmc_init(np, regbase, virq, caps);
-+ pmc = at91_pmc_init(np, regmap, regbase, virq, caps);
- if (!pmc)
- return;
- for_each_child_of_node(np, childnp) {
---- a/drivers/clk/at91/pmc.h
-+++ b/drivers/clk/at91/pmc.h
-@@ -14,6 +14,7 @@
-
- #include <linux/io.h>
- #include <linux/irqdomain.h>
-+#include <linux/regmap.h>
- #include <linux/spinlock.h>
-
- struct clk_range {
-@@ -28,7 +29,7 @@ struct at91_pmc_caps {
- };
-
- struct at91_pmc {
-- void __iomem *regbase;
-+ struct regmap *regmap;
- int virq;
- spinlock_t lock;
- const struct at91_pmc_caps *caps;
-@@ -48,12 +49,16 @@ static inline void pmc_unlock(struct at9
-
- static inline u32 pmc_read(struct at91_pmc *pmc, int offset)
- {
-- return readl(pmc->regbase + offset);
-+ unsigned int ret = 0;
-+
-+ regmap_read(pmc->regmap, offset, &ret);
-+
-+ return ret;
- }
-
- static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
- {
-- writel(value, pmc->regbase + offset);
-+ regmap_write(pmc->regmap, offset, value);
- }
-
- int of_at91_get_clk_range(struct device_node *np, const char *propname,
diff --git a/patches/0001-wait.-ch-Introduce-the-simple-waitqueue-swait-implem.patch b/patches/0001-wait.-ch-Introduce-the-simple-waitqueue-swait-implem.patch
deleted file mode 100644
index d0dade7f816510..00000000000000
--- a/patches/0001-wait.-ch-Introduce-the-simple-waitqueue-swait-implem.patch
+++ /dev/null
@@ -1,360 +0,0 @@
-From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
-Date: Fri, 19 Feb 2016 09:46:37 +0100
-Subject: [PATCH 1/5] wait.[ch]: Introduce the simple waitqueue (swait)
- implementation
-
-The existing wait queue support has support for custom wake up call
-backs, wake flags, wake key (passed to call back) and exclusive
-flags that allow wakers to be tagged as exclusive, for limiting
-the number of wakers.
-
-In a lot of cases, none of these features are used, and hence we
-can benefit from a slimmed down version that lowers memory overhead
-and reduces runtime overhead.
-
-The concept originated from -rt, where waitqueues are a constant
-source of trouble, as we can't convert the head lock to a raw
-spinlock due to fancy and long lasting callbacks.
-
-With the removal of custom callbacks, we can use a raw lock for
-queue list manipulations, hence allowing the simple wait support
-to be used in -rt.
-
-[Patch is from PeterZ which is based on Thomas version. Commit message is
- written by Paul G.
- Daniel: - Fixed some compile issues
- - Added non-lazy implementation of swake_up_locked as suggested
- by Boqun Feng.]
-
-Originally-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: linux-rt-users@vger.kernel.org
-Cc: Boqun Feng <boqun.feng@gmail.com>
-Cc: Marcelo Tosatti <mtosatti@redhat.com>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Link: http://lkml.kernel.org/r/1455871601-27484-2-git-send-email-wagi@monom.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/swait.h | 172 ++++++++++++++++++++++++++++++++++++++++++++++++++
- kernel/sched/Makefile | 2
- kernel/sched/swait.c | 123 +++++++++++++++++++++++++++++++++++
- 3 files changed, 296 insertions(+), 1 deletion(-)
- create mode 100644 include/linux/swait.h
- create mode 100644 kernel/sched/swait.c
-
---- /dev/null
-+++ b/include/linux/swait.h
-@@ -0,0 +1,172 @@
-+#ifndef _LINUX_SWAIT_H
-+#define _LINUX_SWAIT_H
-+
-+#include <linux/list.h>
-+#include <linux/stddef.h>
-+#include <linux/spinlock.h>
-+#include <asm/current.h>
-+
-+/*
-+ * Simple wait queues
-+ *
-+ * While these are very similar to the other/complex wait queues (wait.h) the
-+ * most important difference is that the simple waitqueue allows for
-+ * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
-+ * times.
-+ *
-+ * In order to make this so, we had to drop a fair number of features of the
-+ * other waitqueue code; notably:
-+ *
-+ * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
-+ * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
-+ * sleeper state.
-+ *
-+ * - the exclusive mode; because this requires preserving the list order
-+ * and this is hard.
-+ *
-+ * - custom wake functions; because you cannot give any guarantees about
-+ * random code.
-+ *
-+ * As a side effect of this; the data structures are slimmer.
-+ *
-+ * One would recommend using this wait queue where possible.
-+ */
-+
-+struct task_struct;
-+
-+struct swait_queue_head {
-+ raw_spinlock_t lock;
-+ struct list_head task_list;
-+};
-+
-+struct swait_queue {
-+ struct task_struct *task;
-+ struct list_head task_list;
-+};
-+
-+#define __SWAITQUEUE_INITIALIZER(name) { \
-+ .task = current, \
-+ .task_list = LIST_HEAD_INIT((name).task_list), \
-+}
-+
-+#define DECLARE_SWAITQUEUE(name) \
-+ struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
-+
-+#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
-+ .task_list = LIST_HEAD_INIT((name).task_list), \
-+}
-+
-+#define DECLARE_SWAIT_QUEUE_HEAD(name) \
-+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
-+
-+extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
-+ struct lock_class_key *key);
-+
-+#define init_swait_queue_head(q) \
-+ do { \
-+ static struct lock_class_key __key; \
-+ __init_swait_queue_head((q), #q, &__key); \
-+ } while (0)
-+
-+#ifdef CONFIG_LOCKDEP
-+# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
-+ ({ init_swait_queue_head(&name); name; })
-+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
-+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
-+#else
-+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
-+ DECLARE_SWAIT_QUEUE_HEAD(name)
-+#endif
-+
-+static inline int swait_active(struct swait_queue_head *q)
-+{
-+ return !list_empty(&q->task_list);
-+}
-+
-+extern void swake_up(struct swait_queue_head *q);
-+extern void swake_up_all(struct swait_queue_head *q);
-+extern void swake_up_locked(struct swait_queue_head *q);
-+
-+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-+extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
-+extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
-+
-+extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
-+extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
-+
-+/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
-+#define ___swait_event(wq, condition, state, ret, cmd) \
-+({ \
-+ struct swait_queue __wait; \
-+ long __ret = ret; \
-+ \
-+ INIT_LIST_HEAD(&__wait.task_list); \
-+ for (;;) { \
-+ long __int = prepare_to_swait_event(&wq, &__wait, state);\
-+ \
-+ if (condition) \
-+ break; \
-+ \
-+ if (___wait_is_interruptible(state) && __int) { \
-+ __ret = __int; \
-+ break; \
-+ } \
-+ \
-+ cmd; \
-+ } \
-+ finish_swait(&wq, &__wait); \
-+ __ret; \
-+})
-+
-+#define __swait_event(wq, condition) \
-+ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
-+ schedule())
-+
-+#define swait_event(wq, condition) \
-+do { \
-+ if (condition) \
-+ break; \
-+ __swait_event(wq, condition); \
-+} while (0)
-+
-+#define __swait_event_timeout(wq, condition, timeout) \
-+ ___swait_event(wq, ___wait_cond_timeout(condition), \
-+ TASK_UNINTERRUPTIBLE, timeout, \
-+ __ret = schedule_timeout(__ret))
-+
-+#define swait_event_timeout(wq, condition, timeout) \
-+({ \
-+ long __ret = timeout; \
-+ if (!___wait_cond_timeout(condition)) \
-+ __ret = __swait_event_timeout(wq, condition, timeout); \
-+ __ret; \
-+})
-+
-+#define __swait_event_interruptible(wq, condition) \
-+ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
-+ schedule())
-+
-+#define swait_event_interruptible(wq, condition) \
-+({ \
-+ int __ret = 0; \
-+ if (!(condition)) \
-+ __ret = __swait_event_interruptible(wq, condition); \
-+ __ret; \
-+})
-+
-+#define __swait_event_interruptible_timeout(wq, condition, timeout) \
-+ ___swait_event(wq, ___wait_cond_timeout(condition), \
-+ TASK_INTERRUPTIBLE, timeout, \
-+ __ret = schedule_timeout(__ret))
-+
-+#define swait_event_interruptible_timeout(wq, condition, timeout) \
-+({ \
-+ long __ret = timeout; \
-+ if (!___wait_cond_timeout(condition)) \
-+ __ret = __swait_event_interruptible_timeout(wq, \
-+ condition, timeout); \
-+ __ret; \
-+})
-+
-+#endif /* _LINUX_SWAIT_H */
---- a/kernel/sched/Makefile
-+++ b/kernel/sched/Makefile
-@@ -13,7 +13,7 @@ endif
-
- obj-y += core.o loadavg.o clock.o cputime.o
- obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
--obj-y += wait.o completion.o idle.o
-+obj-y += wait.o swait.o completion.o idle.o
- obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
- obj-$(CONFIG_SCHEDSTATS) += stats.o
---- /dev/null
-+++ b/kernel/sched/swait.c
-@@ -0,0 +1,123 @@
-+#include <linux/sched.h>
-+#include <linux/swait.h>
-+
-+void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
-+ struct lock_class_key *key)
-+{
-+ raw_spin_lock_init(&q->lock);
-+ lockdep_set_class_and_name(&q->lock, key, name);
-+ INIT_LIST_HEAD(&q->task_list);
-+}
-+EXPORT_SYMBOL(__init_swait_queue_head);
-+
-+/*
-+ * The thing about the wake_up_state() return value; I think we can ignore it.
-+ *
-+ * If for some reason it would return 0, that means the previously waiting
-+ * task is already running, so it will observe condition true (or has already).
-+ */
-+void swake_up_locked(struct swait_queue_head *q)
-+{
-+ struct swait_queue *curr;
-+
-+ if (list_empty(&q->task_list))
-+ return;
-+
-+ curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
-+ wake_up_process(curr->task);
-+ list_del_init(&curr->task_list);
-+}
-+EXPORT_SYMBOL(swake_up_locked);
-+
-+void swake_up(struct swait_queue_head *q)
-+{
-+ unsigned long flags;
-+
-+ if (!swait_active(q))
-+ return;
-+
-+ raw_spin_lock_irqsave(&q->lock, flags);
-+ swake_up_locked(q);
-+ raw_spin_unlock_irqrestore(&q->lock, flags);
-+}
-+EXPORT_SYMBOL(swake_up);
-+
-+/*
-+ * Does not allow usage from IRQ disabled, since we must be able to
-+ * release IRQs to guarantee bounded hold time.
-+ */
-+void swake_up_all(struct swait_queue_head *q)
-+{
-+ struct swait_queue *curr;
-+ LIST_HEAD(tmp);
-+
-+ if (!swait_active(q))
-+ return;
-+
-+ raw_spin_lock_irq(&q->lock);
-+ list_splice_init(&q->task_list, &tmp);
-+ while (!list_empty(&tmp)) {
-+ curr = list_first_entry(&tmp, typeof(*curr), task_list);
-+
-+ wake_up_state(curr->task, TASK_NORMAL);
-+ list_del_init(&curr->task_list);
-+
-+ if (list_empty(&tmp))
-+ break;
-+
-+ raw_spin_unlock_irq(&q->lock);
-+ raw_spin_lock_irq(&q->lock);
-+ }
-+ raw_spin_unlock_irq(&q->lock);
-+}
-+EXPORT_SYMBOL(swake_up_all);
-+
-+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
-+{
-+ wait->task = current;
-+ if (list_empty(&wait->task_list))
-+ list_add(&wait->task_list, &q->task_list);
-+}
-+
-+void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
-+{
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&q->lock, flags);
-+ __prepare_to_swait(q, wait);
-+ set_current_state(state);
-+ raw_spin_unlock_irqrestore(&q->lock, flags);
-+}
-+EXPORT_SYMBOL(prepare_to_swait);
-+
-+long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
-+{
-+ if (signal_pending_state(state, current))
-+ return -ERESTARTSYS;
-+
-+ prepare_to_swait(q, wait, state);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(prepare_to_swait_event);
-+
-+void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-+{
-+ __set_current_state(TASK_RUNNING);
-+ if (!list_empty(&wait->task_list))
-+ list_del_init(&wait->task_list);
-+}
-+
-+void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
-+{
-+ unsigned long flags;
-+
-+ __set_current_state(TASK_RUNNING);
-+
-+ if (!list_empty_careful(&wait->task_list)) {
-+ raw_spin_lock_irqsave(&q->lock, flags);
-+ list_del_init(&wait->task_list);
-+ raw_spin_unlock_irqrestore(&q->lock, flags);
-+ }
-+}
-+EXPORT_SYMBOL(finish_swait);
diff --git a/patches/0002-clk-at91-make-use-of-syscon-regmap-internally.patch b/patches/0002-clk-at91-make-use-of-syscon-regmap-internally.patch
deleted file mode 100644
index ab73e058c4d1d8..00000000000000
--- a/patches/0002-clk-at91-make-use-of-syscon-regmap-internally.patch
+++ /dev/null
@@ -1,3252 +0,0 @@
-From: Boris Brezillon <boris.brezillon@free-electrons.com>
-Date: Sun, 7 Sep 2014 08:14:29 +0200
-Subject: [PATCH 02/13] clk: at91: make use of syscon/regmap internally
-
-Use the regmap coming from syscon to access the registers instead of using
-pmc_read/pmc_write. This allows to avoid passing the at91_pmc structure to
-the child nodes of the PMC.
-
-The final benefit is to have each clock register itself instead of having
-to iterate over the children.
-
-Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Acked-by: Stephen Boyd <sboyd@codeaurora.org>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clk/at91/clk-generated.c | 91 +++++++------
- drivers/clk/at91/clk-h32mx.c | 32 ++--
- drivers/clk/at91/clk-main.c | 248 +++++++++++++++++++++++-------------
- drivers/clk/at91/clk-master.c | 68 ++++++---
- drivers/clk/at91/clk-peripheral.c | 134 +++++++++++--------
- drivers/clk/at91/clk-pll.c | 119 ++++++++++-------
- drivers/clk/at91/clk-plldiv.c | 42 ++----
- drivers/clk/at91/clk-programmable.c | 92 +++++++------
- drivers/clk/at91/clk-slow.c | 26 ++-
- drivers/clk/at91/clk-smd.c | 54 ++++---
- drivers/clk/at91/clk-system.c | 60 +++++---
- drivers/clk/at91/clk-usb.c | 121 +++++++++--------
- drivers/clk/at91/clk-utmi.c | 53 ++++---
- drivers/clk/at91/pmc.c | 155 +---------------------
- drivers/clk/at91/pmc.h | 89 ------------
- 15 files changed, 691 insertions(+), 693 deletions(-)
-
---- a/drivers/clk/at91/clk-generated.c
-+++ b/drivers/clk/at91/clk-generated.c
-@@ -17,6 +17,8 @@
- #include <linux/of.h>
- #include <linux/of_address.h>
- #include <linux/io.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -28,8 +30,9 @@
-
- struct clk_generated {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- struct clk_range range;
-+ spinlock_t *lock;
- u32 id;
- u32 gckdiv;
- u8 parent_id;
-@@ -41,49 +44,52 @@ struct clk_generated {
- static int clk_generated_enable(struct clk_hw *hw)
- {
- struct clk_generated *gck = to_clk_generated(hw);
-- struct at91_pmc *pmc = gck->pmc;
-- u32 tmp;
-+ unsigned long flags;
-
- pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
- __func__, gck->gckdiv, gck->parent_id);
-
-- pmc_lock(pmc);
-- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
-- tmp = pmc_read(pmc, AT91_PMC_PCR) &
-- ~(AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK);
-- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_GCKCSS(gck->parent_id)
-- | AT91_PMC_PCR_CMD
-- | AT91_PMC_PCR_GCKDIV(gck->gckdiv)
-- | AT91_PMC_PCR_GCKEN);
-- pmc_unlock(pmc);
-+ spin_lock_irqsave(gck->lock, flags);
-+ regmap_write(gck->regmap, AT91_PMC_PCR,
-+ (gck->id & AT91_PMC_PCR_PID_MASK));
-+ regmap_update_bits(gck->regmap, AT91_PMC_PCR,
-+ AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK |
-+ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
-+ AT91_PMC_PCR_GCKCSS(gck->parent_id) |
-+ AT91_PMC_PCR_CMD |
-+ AT91_PMC_PCR_GCKDIV(gck->gckdiv) |
-+ AT91_PMC_PCR_GCKEN);
-+ spin_unlock_irqrestore(gck->lock, flags);
- return 0;
- }
-
- static void clk_generated_disable(struct clk_hw *hw)
- {
- struct clk_generated *gck = to_clk_generated(hw);
-- struct at91_pmc *pmc = gck->pmc;
-- u32 tmp;
-+ unsigned long flags;
-
-- pmc_lock(pmc);
-- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
-- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_GCKEN;
-- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
-- pmc_unlock(pmc);
-+ spin_lock_irqsave(gck->lock, flags);
-+ regmap_write(gck->regmap, AT91_PMC_PCR,
-+ (gck->id & AT91_PMC_PCR_PID_MASK));
-+ regmap_update_bits(gck->regmap, AT91_PMC_PCR,
-+ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
-+ AT91_PMC_PCR_CMD);
-+ spin_unlock_irqrestore(gck->lock, flags);
- }
-
- static int clk_generated_is_enabled(struct clk_hw *hw)
- {
- struct clk_generated *gck = to_clk_generated(hw);
-- struct at91_pmc *pmc = gck->pmc;
-- int ret;
-+ unsigned long flags;
-+ unsigned int status;
-
-- pmc_lock(pmc);
-- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
-- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_GCKEN);
-- pmc_unlock(pmc);
-+ spin_lock_irqsave(gck->lock, flags);
-+ regmap_write(gck->regmap, AT91_PMC_PCR,
-+ (gck->id & AT91_PMC_PCR_PID_MASK));
-+ regmap_read(gck->regmap, AT91_PMC_PCR, &status);
-+ spin_unlock_irqrestore(gck->lock, flags);
-
-- return ret;
-+ return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
- }
-
- static unsigned long
-@@ -214,13 +220,14 @@ static const struct clk_ops generated_op
- */
- static void clk_generated_startup(struct clk_generated *gck)
- {
-- struct at91_pmc *pmc = gck->pmc;
- u32 tmp;
-+ unsigned long flags;
-
-- pmc_lock(pmc);
-- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
-- tmp = pmc_read(pmc, AT91_PMC_PCR);
-- pmc_unlock(pmc);
-+ spin_lock_irqsave(gck->lock, flags);
-+ regmap_write(gck->regmap, AT91_PMC_PCR,
-+ (gck->id & AT91_PMC_PCR_PID_MASK));
-+ regmap_read(gck->regmap, AT91_PMC_PCR, &tmp);
-+ spin_unlock_irqrestore(gck->lock, flags);
-
- gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
- >> AT91_PMC_PCR_GCKCSS_OFFSET;
-@@ -229,8 +236,8 @@ static void clk_generated_startup(struct
- }
-
- static struct clk * __init
--at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
-- const char **parent_names, u8 num_parents,
-+at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char
-+ *name, const char **parent_names, u8 num_parents,
- u8 id, const struct clk_range *range)
- {
- struct clk_generated *gck;
-@@ -249,7 +256,8 @@ at91_clk_register_generated(struct at91_
-
- gck->id = id;
- gck->hw.init = &init;
-- gck->pmc = pmc;
-+ gck->regmap = regmap;
-+ gck->lock = lock;
- gck->range = *range;
-
- clk = clk_register(NULL, &gck->hw);
-@@ -261,8 +269,7 @@ at91_clk_register_generated(struct at91_
- return clk;
- }
-
--void __init of_sama5d2_clk_generated_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+void __init of_sama5d2_clk_generated_setup(struct device_node *np)
- {
- int num;
- u32 id;
-@@ -272,6 +279,7 @@ void __init of_sama5d2_clk_generated_set
- const char *parent_names[GENERATED_SOURCE_MAX];
- struct device_node *gcknp;
- struct clk_range range = CLK_RANGE(0, 0);
-+ struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > GENERATED_SOURCE_MAX)
-@@ -283,6 +291,10 @@ void __init of_sama5d2_clk_generated_set
- if (!num || num > PERIPHERAL_MAX)
- return;
-
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
- for_each_child_of_node(np, gcknp) {
- if (of_property_read_u32(gcknp, "reg", &id))
- continue;
-@@ -296,11 +308,14 @@ void __init of_sama5d2_clk_generated_set
- of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
- &range);
-
-- clk = at91_clk_register_generated(pmc, name, parent_names,
-- num_parents, id, &range);
-+ clk = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
-+ parent_names, num_parents,
-+ id, &range);
- if (IS_ERR(clk))
- continue;
-
- of_clk_add_provider(gcknp, of_clk_src_simple_get, clk);
- }
- }
-+CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated",
-+ of_sama5d2_clk_generated_setup);
---- a/drivers/clk/at91/clk-h32mx.c
-+++ b/drivers/clk/at91/clk-h32mx.c
-@@ -24,6 +24,8 @@
- #include <linux/irq.h>
- #include <linux/sched.h>
- #include <linux/wait.h>
-+#include <linux/regmap.h>
-+#include <linux/mfd/syscon.h>
-
- #include "pmc.h"
-
-@@ -31,7 +33,7 @@
-
- struct clk_sama5d4_h32mx {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- };
-
- #define to_clk_sama5d4_h32mx(hw) container_of(hw, struct clk_sama5d4_h32mx, hw)
-@@ -40,8 +42,10 @@ static unsigned long clk_sama5d4_h32mx_r
- unsigned long parent_rate)
- {
- struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw);
-+ unsigned int mckr;
-
-- if (pmc_read(h32mxclk->pmc, AT91_PMC_MCKR) & AT91_PMC_H32MXDIV)
-+ regmap_read(h32mxclk->regmap, AT91_PMC_MCKR, &mckr);
-+ if (mckr & AT91_PMC_H32MXDIV)
- return parent_rate / 2;
-
- if (parent_rate > H32MX_MAX_FREQ)
-@@ -70,18 +74,16 @@ static int clk_sama5d4_h32mx_set_rate(st
- unsigned long parent_rate)
- {
- struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw);
-- struct at91_pmc *pmc = h32mxclk->pmc;
-- u32 tmp;
-+ u32 mckr = 0;
-
- if (parent_rate != rate && (parent_rate / 2) != rate)
- return -EINVAL;
-
-- pmc_lock(pmc);
-- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_H32MXDIV;
- if ((parent_rate / 2) == rate)
-- tmp |= AT91_PMC_H32MXDIV;
-- pmc_write(pmc, AT91_PMC_MCKR, tmp);
-- pmc_unlock(pmc);
-+ mckr = AT91_PMC_H32MXDIV;
-+
-+ regmap_update_bits(h32mxclk->regmap, AT91_PMC_MCKR,
-+ AT91_PMC_H32MXDIV, mckr);
-
- return 0;
- }
-@@ -92,14 +94,18 @@ static const struct clk_ops h32mx_ops =
- .set_rate = clk_sama5d4_h32mx_set_rate,
- };
-
--void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
- {
- struct clk_sama5d4_h32mx *h32mxclk;
- struct clk_init_data init;
- const char *parent_name;
-+ struct regmap *regmap;
- struct clk *clk;
-
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
- h32mxclk = kzalloc(sizeof(*h32mxclk), GFP_KERNEL);
- if (!h32mxclk)
- return;
-@@ -113,7 +119,7 @@ void __init of_sama5d4_clk_h32mx_setup(s
- init.flags = CLK_SET_RATE_GATE;
-
- h32mxclk->hw.init = &init;
-- h32mxclk->pmc = pmc;
-+ h32mxclk->regmap = regmap;
-
- clk = clk_register(NULL, &h32mxclk->hw);
- if (!clk) {
-@@ -123,3 +129,5 @@ void __init of_sama5d4_clk_h32mx_setup(s
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx",
-+ of_sama5d4_clk_h32mx_setup);
---- a/drivers/clk/at91/clk-main.c
-+++ b/drivers/clk/at91/clk-main.c
-@@ -18,6 +18,8 @@
- #include <linux/io.h>
- #include <linux/interrupt.h>
- #include <linux/irq.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
- #include <linux/sched.h>
- #include <linux/wait.h>
-
-@@ -34,7 +36,7 @@
-
- struct clk_main_osc {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- unsigned int irq;
- wait_queue_head_t wait;
- };
-@@ -43,7 +45,7 @@ struct clk_main_osc {
-
- struct clk_main_rc_osc {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- unsigned int irq;
- wait_queue_head_t wait;
- unsigned long frequency;
-@@ -54,14 +56,14 @@ struct clk_main_rc_osc {
-
- struct clk_rm9200_main {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- };
-
- #define to_clk_rm9200_main(hw) container_of(hw, struct clk_rm9200_main, hw)
-
- struct clk_sam9x5_main {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- unsigned int irq;
- wait_queue_head_t wait;
- u8 parent;
-@@ -79,25 +81,36 @@ static irqreturn_t clk_main_osc_irq_hand
- return IRQ_HANDLED;
- }
-
-+static inline bool clk_main_osc_ready(struct regmap *regmap)
-+{
-+ unsigned int status;
-+
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+
-+ return status & AT91_PMC_MOSCS;
-+}
-+
- static int clk_main_osc_prepare(struct clk_hw *hw)
- {
- struct clk_main_osc *osc = to_clk_main_osc(hw);
-- struct at91_pmc *pmc = osc->pmc;
-+ struct regmap *regmap = osc->regmap;
- u32 tmp;
-
-- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
-+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
-+ tmp &= ~MOR_KEY_MASK;
-+
- if (tmp & AT91_PMC_OSCBYPASS)
- return 0;
-
- if (!(tmp & AT91_PMC_MOSCEN)) {
- tmp |= AT91_PMC_MOSCEN | AT91_PMC_KEY;
-- pmc_write(pmc, AT91_CKGR_MOR, tmp);
-+ regmap_write(regmap, AT91_CKGR_MOR, tmp);
- }
-
-- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) {
-+ while (!clk_main_osc_ready(regmap)) {
- enable_irq(osc->irq);
- wait_event(osc->wait,
-- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
-+ clk_main_osc_ready(regmap));
- }
-
- return 0;
-@@ -106,9 +119,10 @@ static int clk_main_osc_prepare(struct c
- static void clk_main_osc_unprepare(struct clk_hw *hw)
- {
- struct clk_main_osc *osc = to_clk_main_osc(hw);
-- struct at91_pmc *pmc = osc->pmc;
-- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
-+ struct regmap *regmap = osc->regmap;
-+ u32 tmp;
-
-+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
- if (tmp & AT91_PMC_OSCBYPASS)
- return;
-
-@@ -116,20 +130,22 @@ static void clk_main_osc_unprepare(struc
- return;
-
- tmp &= ~(AT91_PMC_KEY | AT91_PMC_MOSCEN);
-- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
-+ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
- }
-
- static int clk_main_osc_is_prepared(struct clk_hw *hw)
- {
- struct clk_main_osc *osc = to_clk_main_osc(hw);
-- struct at91_pmc *pmc = osc->pmc;
-- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
-+ struct regmap *regmap = osc->regmap;
-+ u32 tmp, status;
-
-+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
- if (tmp & AT91_PMC_OSCBYPASS)
- return 1;
-
-- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS) &&
-- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN));
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+
-+ return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
- }
-
- static const struct clk_ops main_osc_ops = {
-@@ -139,7 +155,7 @@ static const struct clk_ops main_osc_ops
- };
-
- static struct clk * __init
--at91_clk_register_main_osc(struct at91_pmc *pmc,
-+at91_clk_register_main_osc(struct regmap *regmap,
- unsigned int irq,
- const char *name,
- const char *parent_name,
-@@ -150,7 +166,7 @@ at91_clk_register_main_osc(struct at91_p
- struct clk *clk = NULL;
- struct clk_init_data init;
-
-- if (!pmc || !irq || !name || !parent_name)
-+ if (!irq || !name || !parent_name)
- return ERR_PTR(-EINVAL);
-
- osc = kzalloc(sizeof(*osc), GFP_KERNEL);
-@@ -164,7 +180,7 @@ at91_clk_register_main_osc(struct at91_p
- init.flags = CLK_IGNORE_UNUSED;
-
- osc->hw.init = &init;
-- osc->pmc = pmc;
-+ osc->regmap = regmap;
- osc->irq = irq;
-
- init_waitqueue_head(&osc->wait);
-@@ -177,10 +193,10 @@ at91_clk_register_main_osc(struct at91_p
- }
-
- if (bypass)
-- pmc_write(pmc, AT91_CKGR_MOR,
-- (pmc_read(pmc, AT91_CKGR_MOR) &
-- ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) |
-- AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
-+ regmap_update_bits(regmap,
-+ AT91_CKGR_MOR, MOR_KEY_MASK |
-+ AT91_PMC_MOSCEN,
-+ AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
-
- clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk)) {
-@@ -191,29 +207,35 @@ at91_clk_register_main_osc(struct at91_p
- return clk;
- }
-
--void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
- {
- struct clk *clk;
- unsigned int irq;
- const char *name = np->name;
- const char *parent_name;
-+ struct regmap *regmap;
- bool bypass;
-
- of_property_read_string(np, "clock-output-names", &name);
- bypass = of_property_read_bool(np, "atmel,osc-bypass");
- parent_name = of_clk_get_parent_name(np, 0);
-
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
- return;
-
-- clk = at91_clk_register_main_osc(pmc, irq, name, parent_name, bypass);
-+ clk = at91_clk_register_main_osc(regmap, irq, name, parent_name, bypass);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc",
-+ of_at91rm9200_clk_main_osc_setup);
-
- static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id)
- {
-@@ -225,23 +247,32 @@ static irqreturn_t clk_main_rc_osc_irq_h
- return IRQ_HANDLED;
- }
-
-+static bool clk_main_rc_osc_ready(struct regmap *regmap)
-+{
-+ unsigned int status;
-+
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+
-+ return status & AT91_PMC_MOSCRCS;
-+}
-+
- static int clk_main_rc_osc_prepare(struct clk_hw *hw)
- {
- struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
-- struct at91_pmc *pmc = osc->pmc;
-- u32 tmp;
-+ struct regmap *regmap = osc->regmap;
-+ unsigned int mor;
-
-- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
-+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
-
-- if (!(tmp & AT91_PMC_MOSCRCEN)) {
-- tmp |= AT91_PMC_MOSCRCEN | AT91_PMC_KEY;
-- pmc_write(pmc, AT91_CKGR_MOR, tmp);
-- }
-+ if (!(mor & AT91_PMC_MOSCRCEN))
-+ regmap_update_bits(regmap, AT91_CKGR_MOR,
-+ MOR_KEY_MASK | AT91_PMC_MOSCRCEN,
-+ AT91_PMC_MOSCRCEN | AT91_PMC_KEY);
-
-- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS)) {
-+ while (!clk_main_rc_osc_ready(regmap)) {
- enable_irq(osc->irq);
- wait_event(osc->wait,
-- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS);
-+ clk_main_rc_osc_ready(regmap));
- }
-
- return 0;
-@@ -250,23 +281,28 @@ static int clk_main_rc_osc_prepare(struc
- static void clk_main_rc_osc_unprepare(struct clk_hw *hw)
- {
- struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
-- struct at91_pmc *pmc = osc->pmc;
-- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
-+ struct regmap *regmap = osc->regmap;
-+ unsigned int mor;
-
-- if (!(tmp & AT91_PMC_MOSCRCEN))
-+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
-+
-+ if (!(mor & AT91_PMC_MOSCRCEN))
- return;
-
-- tmp &= ~(MOR_KEY_MASK | AT91_PMC_MOSCRCEN);
-- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
-+ regmap_update_bits(regmap, AT91_CKGR_MOR,
-+ MOR_KEY_MASK | AT91_PMC_MOSCRCEN, AT91_PMC_KEY);
- }
-
- static int clk_main_rc_osc_is_prepared(struct clk_hw *hw)
- {
- struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
-- struct at91_pmc *pmc = osc->pmc;
-+ struct regmap *regmap = osc->regmap;
-+ unsigned int mor, status;
-+
-+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-
-- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS) &&
-- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCRCEN));
-+ return (mor & AT91_PMC_MOSCRCEN) && (status & AT91_PMC_MOSCRCS);
- }
-
- static unsigned long clk_main_rc_osc_recalc_rate(struct clk_hw *hw,
-@@ -294,7 +330,7 @@ static const struct clk_ops main_rc_osc_
- };
-
- static struct clk * __init
--at91_clk_register_main_rc_osc(struct at91_pmc *pmc,
-+at91_clk_register_main_rc_osc(struct regmap *regmap,
- unsigned int irq,
- const char *name,
- u32 frequency, u32 accuracy)
-@@ -304,7 +340,7 @@ at91_clk_register_main_rc_osc(struct at9
- struct clk *clk = NULL;
- struct clk_init_data init;
-
-- if (!pmc || !irq || !name || !frequency)
-+ if (!name || !frequency)
- return ERR_PTR(-EINVAL);
-
- osc = kzalloc(sizeof(*osc), GFP_KERNEL);
-@@ -318,7 +354,7 @@ at91_clk_register_main_rc_osc(struct at9
- init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
-
- osc->hw.init = &init;
-- osc->pmc = pmc;
-+ osc->regmap = regmap;
- osc->irq = irq;
- osc->frequency = frequency;
- osc->accuracy = accuracy;
-@@ -339,14 +375,14 @@ at91_clk_register_main_rc_osc(struct at9
- return clk;
- }
-
--void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
- {
- struct clk *clk;
- unsigned int irq;
- u32 frequency = 0;
- u32 accuracy = 0;
- const char *name = np->name;
-+ struct regmap *regmap;
-
- of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "clock-frequency", &frequency);
-@@ -356,25 +392,31 @@ void __init of_at91sam9x5_clk_main_rc_os
- if (!irq)
- return;
-
-- clk = at91_clk_register_main_rc_osc(pmc, irq, name, frequency,
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
-+ clk = at91_clk_register_main_rc_osc(regmap, irq, name, frequency,
- accuracy);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc",
-+ of_at91sam9x5_clk_main_rc_osc_setup);
-
-
--static int clk_main_probe_frequency(struct at91_pmc *pmc)
-+static int clk_main_probe_frequency(struct regmap *regmap)
- {
- unsigned long prep_time, timeout;
-- u32 tmp;
-+ unsigned int mcfr;
-
- timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT);
- do {
- prep_time = jiffies;
-- tmp = pmc_read(pmc, AT91_CKGR_MCFR);
-- if (tmp & AT91_PMC_MAINRDY)
-+ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
-+ if (mcfr & AT91_PMC_MAINRDY)
- return 0;
- usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
- } while (time_before(prep_time, timeout));
-@@ -382,34 +424,37 @@ static int clk_main_probe_frequency(stru
- return -ETIMEDOUT;
- }
-
--static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc,
-+static unsigned long clk_main_recalc_rate(struct regmap *regmap,
- unsigned long parent_rate)
- {
-- u32 tmp;
-+ unsigned int mcfr;
-
- if (parent_rate)
- return parent_rate;
-
- pr_warn("Main crystal frequency not set, using approximate value\n");
-- tmp = pmc_read(pmc, AT91_CKGR_MCFR);
-- if (!(tmp & AT91_PMC_MAINRDY))
-+ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
-+ if (!(mcfr & AT91_PMC_MAINRDY))
- return 0;
-
-- return ((tmp & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV;
-+ return ((mcfr & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV;
- }
-
- static int clk_rm9200_main_prepare(struct clk_hw *hw)
- {
- struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
-
-- return clk_main_probe_frequency(clkmain->pmc);
-+ return clk_main_probe_frequency(clkmain->regmap);
- }
-
- static int clk_rm9200_main_is_prepared(struct clk_hw *hw)
- {
- struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
-+ unsigned int status;
-+
-+ regmap_read(clkmain->regmap, AT91_CKGR_MCFR, &status);
-
-- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINRDY);
-+ return status & AT91_PMC_MAINRDY ? 1 : 0;
- }
-
- static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
-@@ -417,7 +462,7 @@ static unsigned long clk_rm9200_main_rec
- {
- struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
-
-- return clk_main_recalc_rate(clkmain->pmc, parent_rate);
-+ return clk_main_recalc_rate(clkmain->regmap, parent_rate);
- }
-
- static const struct clk_ops rm9200_main_ops = {
-@@ -427,7 +472,7 @@ static const struct clk_ops rm9200_main_
- };
-
- static struct clk * __init
--at91_clk_register_rm9200_main(struct at91_pmc *pmc,
-+at91_clk_register_rm9200_main(struct regmap *regmap,
- const char *name,
- const char *parent_name)
- {
-@@ -435,7 +480,7 @@ at91_clk_register_rm9200_main(struct at9
- struct clk *clk = NULL;
- struct clk_init_data init;
-
-- if (!pmc || !name)
-+ if (!name)
- return ERR_PTR(-EINVAL);
-
- if (!parent_name)
-@@ -452,7 +497,7 @@ at91_clk_register_rm9200_main(struct at9
- init.flags = 0;
-
- clkmain->hw.init = &init;
-- clkmain->pmc = pmc;
-+ clkmain->regmap = regmap;
-
- clk = clk_register(NULL, &clkmain->hw);
- if (IS_ERR(clk))
-@@ -461,22 +506,28 @@ at91_clk_register_rm9200_main(struct at9
- return clk;
- }
-
--void __init of_at91rm9200_clk_main_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
- {
- struct clk *clk;
- const char *parent_name;
- const char *name = np->name;
-+ struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
- of_property_read_string(np, "clock-output-names", &name);
-
-- clk = at91_clk_register_rm9200_main(pmc, name, parent_name);
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
-+ clk = at91_clk_register_rm9200_main(regmap, name, parent_name);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main",
-+ of_at91rm9200_clk_main_setup);
-
- static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id)
- {
-@@ -488,25 +539,34 @@ static irqreturn_t clk_sam9x5_main_irq_h
- return IRQ_HANDLED;
- }
-
-+static inline bool clk_sam9x5_main_ready(struct regmap *regmap)
-+{
-+ unsigned int status;
-+
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+
-+ return status & AT91_PMC_MOSCSELS ? 1 : 0;
-+}
-+
- static int clk_sam9x5_main_prepare(struct clk_hw *hw)
- {
- struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
-- struct at91_pmc *pmc = clkmain->pmc;
-+ struct regmap *regmap = clkmain->regmap;
-
-- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
-+ while (!clk_sam9x5_main_ready(regmap)) {
- enable_irq(clkmain->irq);
- wait_event(clkmain->wait,
-- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
-+ clk_sam9x5_main_ready(regmap));
- }
-
-- return clk_main_probe_frequency(pmc);
-+ return clk_main_probe_frequency(regmap);
- }
-
- static int clk_sam9x5_main_is_prepared(struct clk_hw *hw)
- {
- struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
-
-- return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
-+ return clk_sam9x5_main_ready(clkmain->regmap);
- }
-
- static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw,
-@@ -514,29 +574,30 @@ static unsigned long clk_sam9x5_main_rec
- {
- struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
-
-- return clk_main_recalc_rate(clkmain->pmc, parent_rate);
-+ return clk_main_recalc_rate(clkmain->regmap, parent_rate);
- }
-
- static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
- {
- struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
-- struct at91_pmc *pmc = clkmain->pmc;
-- u32 tmp;
-+ struct regmap *regmap = clkmain->regmap;
-+ unsigned int tmp;
-
- if (index > 1)
- return -EINVAL;
-
-- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
-+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
-+ tmp &= ~MOR_KEY_MASK;
-
- if (index && !(tmp & AT91_PMC_MOSCSEL))
-- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
-+ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
- else if (!index && (tmp & AT91_PMC_MOSCSEL))
-- pmc_write(pmc, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
-+ regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
-
-- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
-+ while (!clk_sam9x5_main_ready(regmap)) {
- enable_irq(clkmain->irq);
- wait_event(clkmain->wait,
-- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
-+ clk_sam9x5_main_ready(regmap));
- }
-
- return 0;
-@@ -545,8 +606,11 @@ static int clk_sam9x5_main_set_parent(st
- static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
- {
- struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
-+ unsigned int status;
-
-- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN);
-+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
-+
-+ return status & AT91_PMC_MOSCEN ? 1 : 0;
- }
-
- static const struct clk_ops sam9x5_main_ops = {
-@@ -558,7 +622,7 @@ static const struct clk_ops sam9x5_main_
- };
-
- static struct clk * __init
--at91_clk_register_sam9x5_main(struct at91_pmc *pmc,
-+at91_clk_register_sam9x5_main(struct regmap *regmap,
- unsigned int irq,
- const char *name,
- const char **parent_names,
-@@ -568,8 +632,9 @@ at91_clk_register_sam9x5_main(struct at9
- struct clk_sam9x5_main *clkmain;
- struct clk *clk = NULL;
- struct clk_init_data init;
-+ unsigned int status;
-
-- if (!pmc || !irq || !name)
-+ if (!name)
- return ERR_PTR(-EINVAL);
-
- if (!parent_names || !num_parents)
-@@ -586,10 +651,10 @@ at91_clk_register_sam9x5_main(struct at9
- init.flags = CLK_SET_PARENT_GATE;
-
- clkmain->hw.init = &init;
-- clkmain->pmc = pmc;
-+ clkmain->regmap = regmap;
- clkmain->irq = irq;
-- clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) &
-- AT91_PMC_MOSCEN);
-+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
-+ clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
- init_waitqueue_head(&clkmain->wait);
- irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN);
- ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler,
-@@ -606,20 +671,23 @@ at91_clk_register_sam9x5_main(struct at9
- return clk;
- }
-
--void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
- {
- struct clk *clk;
- const char *parent_names[2];
- int num_parents;
- unsigned int irq;
- const char *name = np->name;
-+ struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > 2)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-
- of_property_read_string(np, "clock-output-names", &name);
-
-@@ -627,10 +695,12 @@ void __init of_at91sam9x5_clk_main_setup
- if (!irq)
- return;
-
-- clk = at91_clk_register_sam9x5_main(pmc, irq, name, parent_names,
-+ clk = at91_clk_register_sam9x5_main(regmap, irq, name, parent_names,
- num_parents);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main",
-+ of_at91sam9x5_clk_main_setup);
---- a/drivers/clk/at91/clk-master.c
-+++ b/drivers/clk/at91/clk-master.c
-@@ -19,6 +19,8 @@
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/irq.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -44,7 +46,7 @@ struct clk_master_layout {
-
- struct clk_master {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- unsigned int irq;
- wait_queue_head_t wait;
- const struct clk_master_layout *layout;
-@@ -60,15 +62,24 @@ static irqreturn_t clk_master_irq_handle
-
- return IRQ_HANDLED;
- }
-+
-+static inline bool clk_master_ready(struct regmap *regmap)
-+{
-+ unsigned int status;
-+
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+
-+ return status & AT91_PMC_MCKRDY ? 1 : 0;
-+}
-+
- static int clk_master_prepare(struct clk_hw *hw)
- {
- struct clk_master *master = to_clk_master(hw);
-- struct at91_pmc *pmc = master->pmc;
-
-- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY)) {
-+ while (!clk_master_ready(master->regmap)) {
- enable_irq(master->irq);
- wait_event(master->wait,
-- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
-+ clk_master_ready(master->regmap));
- }
-
- return 0;
-@@ -78,7 +89,7 @@ static int clk_master_is_prepared(struct
- {
- struct clk_master *master = to_clk_master(hw);
-
-- return !!(pmc_read(master->pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
-+ return clk_master_ready(master->regmap);
- }
-
- static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
-@@ -88,18 +99,16 @@ static unsigned long clk_master_recalc_r
- u8 div;
- unsigned long rate = parent_rate;
- struct clk_master *master = to_clk_master(hw);
-- struct at91_pmc *pmc = master->pmc;
- const struct clk_master_layout *layout = master->layout;
- const struct clk_master_characteristics *characteristics =
- master->characteristics;
-- u32 tmp;
-+ unsigned int mckr;
-
-- pmc_lock(pmc);
-- tmp = pmc_read(pmc, AT91_PMC_MCKR) & layout->mask;
-- pmc_unlock(pmc);
-+ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
-+ mckr &= layout->mask;
-
-- pres = (tmp >> layout->pres_shift) & MASTER_PRES_MASK;
-- div = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
-+ pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK;
-+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
-
- if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX)
- rate /= 3;
-@@ -119,9 +128,11 @@ static unsigned long clk_master_recalc_r
- static u8 clk_master_get_parent(struct clk_hw *hw)
- {
- struct clk_master *master = to_clk_master(hw);
-- struct at91_pmc *pmc = master->pmc;
-+ unsigned int mckr;
-+
-+ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
-
-- return pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_CSS;
-+ return mckr & AT91_PMC_CSS;
- }
-
- static const struct clk_ops master_ops = {
-@@ -132,7 +143,7 @@ static const struct clk_ops master_ops =
- };
-
- static struct clk * __init
--at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
-+at91_clk_register_master(struct regmap *regmap, unsigned int irq,
- const char *name, int num_parents,
- const char **parent_names,
- const struct clk_master_layout *layout,
-@@ -143,7 +154,7 @@ at91_clk_register_master(struct at91_pmc
- struct clk *clk = NULL;
- struct clk_init_data init;
-
-- if (!pmc || !irq || !name || !num_parents || !parent_names)
-+ if (!name || !num_parents || !parent_names)
- return ERR_PTR(-EINVAL);
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
-@@ -159,7 +170,7 @@ at91_clk_register_master(struct at91_pmc
- master->hw.init = &init;
- master->layout = layout;
- master->characteristics = characteristics;
-- master->pmc = pmc;
-+ master->regmap = regmap;
- master->irq = irq;
- init_waitqueue_head(&master->wait);
- irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
-@@ -217,7 +228,7 @@ of_at91_clk_master_get_characteristics(s
- }
-
- static void __init
--of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
-+of_at91_clk_master_setup(struct device_node *np,
- const struct clk_master_layout *layout)
- {
- struct clk *clk;
-@@ -226,6 +237,7 @@ of_at91_clk_master_setup(struct device_n
- const char *parent_names[MASTER_SOURCE_MAX];
- const char *name = np->name;
- struct clk_master_characteristics *characteristics;
-+ struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
-@@ -239,11 +251,15 @@ of_at91_clk_master_setup(struct device_n
- if (!characteristics)
- return;
-
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
- goto out_free_characteristics;
-
-- clk = at91_clk_register_master(pmc, irq, name, num_parents,
-+ clk = at91_clk_register_master(regmap, irq, name, num_parents,
- parent_names, layout,
- characteristics);
- if (IS_ERR(clk))
-@@ -256,14 +272,16 @@ of_at91_clk_master_setup(struct device_n
- kfree(characteristics);
- }
-
--void __init of_at91rm9200_clk_master_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91rm9200_clk_master_setup(struct device_node *np)
- {
-- of_at91_clk_master_setup(np, pmc, &at91rm9200_master_layout);
-+ of_at91_clk_master_setup(np, &at91rm9200_master_layout);
- }
-+CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master",
-+ of_at91rm9200_clk_master_setup);
-
--void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9x5_clk_master_setup(struct device_node *np)
- {
-- of_at91_clk_master_setup(np, pmc, &at91sam9x5_master_layout);
-+ of_at91_clk_master_setup(np, &at91sam9x5_master_layout);
- }
-+CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master",
-+ of_at91sam9x5_clk_master_setup);
---- a/drivers/clk/at91/clk-peripheral.c
-+++ b/drivers/clk/at91/clk-peripheral.c
-@@ -14,9 +14,13 @@
- #include <linux/of.h>
- #include <linux/of_address.h>
- #include <linux/io.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-+DEFINE_SPINLOCK(pmc_pcr_lock);
-+
- #define PERIPHERAL_MAX 64
-
- #define PERIPHERAL_AT91RM9200 0
-@@ -33,7 +37,7 @@
-
- struct clk_peripheral {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- u32 id;
- };
-
-@@ -41,8 +45,9 @@ struct clk_peripheral {
-
- struct clk_sam9x5_peripheral {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- struct clk_range range;
-+ spinlock_t *lock;
- u32 id;
- u32 div;
- bool auto_div;
-@@ -54,7 +59,6 @@ struct clk_sam9x5_peripheral {
- static int clk_peripheral_enable(struct clk_hw *hw)
- {
- struct clk_peripheral *periph = to_clk_peripheral(hw);
-- struct at91_pmc *pmc = periph->pmc;
- int offset = AT91_PMC_PCER;
- u32 id = periph->id;
-
-@@ -62,14 +66,14 @@ static int clk_peripheral_enable(struct
- return 0;
- if (id > PERIPHERAL_ID_MAX)
- offset = AT91_PMC_PCER1;
-- pmc_write(pmc, offset, PERIPHERAL_MASK(id));
-+ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id));
-+
- return 0;
- }
-
- static void clk_peripheral_disable(struct clk_hw *hw)
- {
- struct clk_peripheral *periph = to_clk_peripheral(hw);
-- struct at91_pmc *pmc = periph->pmc;
- int offset = AT91_PMC_PCDR;
- u32 id = periph->id;
-
-@@ -77,21 +81,23 @@ static void clk_peripheral_disable(struc
- return;
- if (id > PERIPHERAL_ID_MAX)
- offset = AT91_PMC_PCDR1;
-- pmc_write(pmc, offset, PERIPHERAL_MASK(id));
-+ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id));
- }
-
- static int clk_peripheral_is_enabled(struct clk_hw *hw)
- {
- struct clk_peripheral *periph = to_clk_peripheral(hw);
-- struct at91_pmc *pmc = periph->pmc;
- int offset = AT91_PMC_PCSR;
-+ unsigned int status;
- u32 id = periph->id;
-
- if (id < PERIPHERAL_ID_MIN)
- return 1;
- if (id > PERIPHERAL_ID_MAX)
- offset = AT91_PMC_PCSR1;
-- return !!(pmc_read(pmc, offset) & PERIPHERAL_MASK(id));
-+ regmap_read(periph->regmap, offset, &status);
-+
-+ return status & PERIPHERAL_MASK(id) ? 1 : 0;
- }
-
- static const struct clk_ops peripheral_ops = {
-@@ -101,14 +107,14 @@ static const struct clk_ops peripheral_o
- };
-
- static struct clk * __init
--at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
-+at91_clk_register_peripheral(struct regmap *regmap, const char *name,
- const char *parent_name, u32 id)
- {
- struct clk_peripheral *periph;
- struct clk *clk = NULL;
- struct clk_init_data init;
-
-- if (!pmc || !name || !parent_name || id > PERIPHERAL_ID_MAX)
-+ if (!name || !parent_name || id > PERIPHERAL_ID_MAX)
- return ERR_PTR(-EINVAL);
-
- periph = kzalloc(sizeof(*periph), GFP_KERNEL);
-@@ -123,7 +129,7 @@ at91_clk_register_peripheral(struct at91
-
- periph->id = id;
- periph->hw.init = &init;
-- periph->pmc = pmc;
-+ periph->regmap = regmap;
-
- clk = clk_register(NULL, &periph->hw);
- if (IS_ERR(clk))
-@@ -160,53 +166,58 @@ static void clk_sam9x5_peripheral_autodi
- static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
- {
- struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
-- struct at91_pmc *pmc = periph->pmc;
-- u32 tmp;
-+ unsigned long flags;
-
- if (periph->id < PERIPHERAL_ID_MIN)
- return 0;
-
-- pmc_lock(pmc);
-- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
-- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_DIV_MASK;
-- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_DIV(periph->div)
-- | AT91_PMC_PCR_CMD
-- | AT91_PMC_PCR_EN);
-- pmc_unlock(pmc);
-+ spin_lock_irqsave(periph->lock, flags);
-+ regmap_write(periph->regmap, AT91_PMC_PCR,
-+ (periph->id & AT91_PMC_PCR_PID_MASK));
-+ regmap_update_bits(periph->regmap, AT91_PMC_PCR,
-+ AT91_PMC_PCR_DIV_MASK | AT91_PMC_PCR_CMD |
-+ AT91_PMC_PCR_EN,
-+ AT91_PMC_PCR_DIV(periph->div) |
-+ AT91_PMC_PCR_CMD |
-+ AT91_PMC_PCR_EN);
-+ spin_unlock_irqrestore(periph->lock, flags);
-+
- return 0;
- }
-
- static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
- {
- struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
-- struct at91_pmc *pmc = periph->pmc;
-- u32 tmp;
-+ unsigned long flags;
-
- if (periph->id < PERIPHERAL_ID_MIN)
- return;
-
-- pmc_lock(pmc);
-- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
-- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_EN;
-- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
-- pmc_unlock(pmc);
-+ spin_lock_irqsave(periph->lock, flags);
-+ regmap_write(periph->regmap, AT91_PMC_PCR,
-+ (periph->id & AT91_PMC_PCR_PID_MASK));
-+ regmap_update_bits(periph->regmap, AT91_PMC_PCR,
-+ AT91_PMC_PCR_EN | AT91_PMC_PCR_CMD,
-+ AT91_PMC_PCR_CMD);
-+ spin_unlock_irqrestore(periph->lock, flags);
- }
-
- static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
- {
- struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
-- struct at91_pmc *pmc = periph->pmc;
-- int ret;
-+ unsigned long flags;
-+ unsigned int status;
-
- if (periph->id < PERIPHERAL_ID_MIN)
- return 1;
-
-- pmc_lock(pmc);
-- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
-- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN);
-- pmc_unlock(pmc);
-+ spin_lock_irqsave(periph->lock, flags);
-+ regmap_write(periph->regmap, AT91_PMC_PCR,
-+ (periph->id & AT91_PMC_PCR_PID_MASK));
-+ regmap_read(periph->regmap, AT91_PMC_PCR, &status);
-+ spin_unlock_irqrestore(periph->lock, flags);
-
-- return ret;
-+ return status & AT91_PMC_PCR_EN ? 1 : 0;
- }
-
- static unsigned long
-@@ -214,19 +225,20 @@ clk_sam9x5_peripheral_recalc_rate(struct
- unsigned long parent_rate)
- {
- struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
-- struct at91_pmc *pmc = periph->pmc;
-- u32 tmp;
-+ unsigned long flags;
-+ unsigned int status;
-
- if (periph->id < PERIPHERAL_ID_MIN)
- return parent_rate;
-
-- pmc_lock(pmc);
-- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
-- tmp = pmc_read(pmc, AT91_PMC_PCR);
-- pmc_unlock(pmc);
-+ spin_lock_irqsave(periph->lock, flags);
-+ regmap_write(periph->regmap, AT91_PMC_PCR,
-+ (periph->id & AT91_PMC_PCR_PID_MASK));
-+ regmap_read(periph->regmap, AT91_PMC_PCR, &status);
-+ spin_unlock_irqrestore(periph->lock, flags);
-
-- if (tmp & AT91_PMC_PCR_EN) {
-- periph->div = PERIPHERAL_RSHIFT(tmp);
-+ if (status & AT91_PMC_PCR_EN) {
-+ periph->div = PERIPHERAL_RSHIFT(status);
- periph->auto_div = false;
- } else {
- clk_sam9x5_peripheral_autodiv(periph);
-@@ -318,15 +330,15 @@ static const struct clk_ops sam9x5_perip
- };
-
- static struct clk * __init
--at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
-- const char *parent_name, u32 id,
-- const struct clk_range *range)
-+at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
-+ const char *name, const char *parent_name,
-+ u32 id, const struct clk_range *range)
- {
- struct clk_sam9x5_peripheral *periph;
- struct clk *clk = NULL;
- struct clk_init_data init;
-
-- if (!pmc || !name || !parent_name)
-+ if (!name || !parent_name)
- return ERR_PTR(-EINVAL);
-
- periph = kzalloc(sizeof(*periph), GFP_KERNEL);
-@@ -342,7 +354,8 @@ at91_clk_register_sam9x5_peripheral(stru
- periph->id = id;
- periph->hw.init = &init;
- periph->div = 0;
-- periph->pmc = pmc;
-+ periph->regmap = regmap;
-+ periph->lock = lock;
- periph->auto_div = true;
- periph->range = *range;
-
-@@ -356,7 +369,7 @@ at91_clk_register_sam9x5_peripheral(stru
- }
-
- static void __init
--of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
-+of_at91_clk_periph_setup(struct device_node *np, u8 type)
- {
- int num;
- u32 id;
-@@ -364,6 +377,7 @@ of_at91_clk_periph_setup(struct device_n
- const char *parent_name;
- const char *name;
- struct device_node *periphclknp;
-+ struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
-@@ -373,6 +387,10 @@ of_at91_clk_periph_setup(struct device_n
- if (!num || num > PERIPHERAL_MAX)
- return;
-
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
- for_each_child_of_node(np, periphclknp) {
- if (of_property_read_u32(periphclknp, "reg", &id))
- continue;
-@@ -384,7 +402,7 @@ of_at91_clk_periph_setup(struct device_n
- name = periphclknp->name;
-
- if (type == PERIPHERAL_AT91RM9200) {
-- clk = at91_clk_register_peripheral(pmc, name,
-+ clk = at91_clk_register_peripheral(regmap, name,
- parent_name, id);
- } else {
- struct clk_range range = CLK_RANGE(0, 0);
-@@ -393,7 +411,9 @@ of_at91_clk_periph_setup(struct device_n
- "atmel,clk-output-range",
- &range);
-
-- clk = at91_clk_register_sam9x5_peripheral(pmc, name,
-+ clk = at91_clk_register_sam9x5_peripheral(regmap,
-+ &pmc_pcr_lock,
-+ name,
- parent_name,
- id, &range);
- }
-@@ -405,14 +425,16 @@ of_at91_clk_periph_setup(struct device_n
- }
- }
-
--void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91rm9200_clk_periph_setup(struct device_node *np)
- {
-- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91RM9200);
-+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200);
- }
-+CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral",
-+ of_at91rm9200_clk_periph_setup);
-
--void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np)
- {
-- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91SAM9X5);
-+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5);
- }
-+CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral",
-+ of_at91sam9x5_clk_periph_setup);
---- a/drivers/clk/at91/clk-pll.c
-+++ b/drivers/clk/at91/clk-pll.c
-@@ -20,6 +20,8 @@
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/irq.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -58,7 +60,7 @@ struct clk_pll_layout {
-
- struct clk_pll {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- unsigned int irq;
- wait_queue_head_t wait;
- u8 id;
-@@ -79,10 +81,19 @@ static irqreturn_t clk_pll_irq_handler(i
- return IRQ_HANDLED;
- }
-
-+static inline bool clk_pll_ready(struct regmap *regmap, int id)
-+{
-+ unsigned int status;
-+
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+
-+ return status & PLL_STATUS_MASK(id) ? 1 : 0;
-+}
-+
- static int clk_pll_prepare(struct clk_hw *hw)
- {
- struct clk_pll *pll = to_clk_pll(hw);
-- struct at91_pmc *pmc = pll->pmc;
-+ struct regmap *regmap = pll->regmap;
- const struct clk_pll_layout *layout = pll->layout;
- const struct clk_pll_characteristics *characteristics =
- pll->characteristics;
-@@ -90,38 +101,36 @@ static int clk_pll_prepare(struct clk_hw
- u32 mask = PLL_STATUS_MASK(id);
- int offset = PLL_REG(id);
- u8 out = 0;
-- u32 pllr, icpr;
-+ unsigned int pllr;
-+ unsigned int status;
- u8 div;
- u16 mul;
-
-- pllr = pmc_read(pmc, offset);
-+ regmap_read(regmap, offset, &pllr);
- div = PLL_DIV(pllr);
- mul = PLL_MUL(pllr, layout);
-
-- if ((pmc_read(pmc, AT91_PMC_SR) & mask) &&
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+ if ((status & mask) &&
- (div == pll->div && mul == pll->mul))
- return 0;
-
- if (characteristics->out)
- out = characteristics->out[pll->range];
-- if (characteristics->icpll) {
-- icpr = pmc_read(pmc, AT91_PMC_PLLICPR) & ~PLL_ICPR_MASK(id);
-- icpr |= (characteristics->icpll[pll->range] <<
-- PLL_ICPR_SHIFT(id));
-- pmc_write(pmc, AT91_PMC_PLLICPR, icpr);
-- }
-
-- pllr &= ~layout->pllr_mask;
-- pllr |= layout->pllr_mask &
-- (pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
-- (out << PLL_OUT_SHIFT) |
-- ((pll->mul & layout->mul_mask) << layout->mul_shift));
-- pmc_write(pmc, offset, pllr);
-+ if (characteristics->icpll)
-+ regmap_update_bits(regmap, AT91_PMC_PLLICPR, PLL_ICPR_MASK(id),
-+ characteristics->icpll[pll->range] << PLL_ICPR_SHIFT(id));
-+
-+ regmap_update_bits(regmap, offset, layout->pllr_mask,
-+ pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
-+ (out << PLL_OUT_SHIFT) |
-+ ((pll->mul & layout->mul_mask) << layout->mul_shift));
-
-- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
-+ while (!clk_pll_ready(regmap, pll->id)) {
- enable_irq(pll->irq);
- wait_event(pll->wait,
-- pmc_read(pmc, AT91_PMC_SR) & mask);
-+ clk_pll_ready(regmap, pll->id));
- }
-
- return 0;
-@@ -130,32 +139,35 @@ static int clk_pll_prepare(struct clk_hw
- static int clk_pll_is_prepared(struct clk_hw *hw)
- {
- struct clk_pll *pll = to_clk_pll(hw);
-- struct at91_pmc *pmc = pll->pmc;
-
-- return !!(pmc_read(pmc, AT91_PMC_SR) &
-- PLL_STATUS_MASK(pll->id));
-+ return clk_pll_ready(pll->regmap, pll->id);
- }
-
- static void clk_pll_unprepare(struct clk_hw *hw)
- {
- struct clk_pll *pll = to_clk_pll(hw);
-- struct at91_pmc *pmc = pll->pmc;
-- const struct clk_pll_layout *layout = pll->layout;
-- int offset = PLL_REG(pll->id);
-- u32 tmp = pmc_read(pmc, offset) & ~(layout->pllr_mask);
-+ unsigned int mask = pll->layout->pllr_mask;
-
-- pmc_write(pmc, offset, tmp);
-+ regmap_update_bits(pll->regmap, PLL_REG(pll->id), mask, ~mask);
- }
-
- static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
- {
- struct clk_pll *pll = to_clk_pll(hw);
-+ unsigned int pllr;
-+ u16 mul;
-+ u8 div;
-+
-+ regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
-+
-+ div = PLL_DIV(pllr);
-+ mul = PLL_MUL(pllr, pll->layout);
-
-- if (!pll->div || !pll->mul)
-+ if (!div || !mul)
- return 0;
-
-- return (parent_rate / pll->div) * (pll->mul + 1);
-+ return (parent_rate / div) * (mul + 1);
- }
-
- static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
-@@ -308,7 +320,7 @@ static const struct clk_ops pll_ops = {
- };
-
- static struct clk * __init
--at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
-+at91_clk_register_pll(struct regmap *regmap, unsigned int irq, const char *name,
- const char *parent_name, u8 id,
- const struct clk_pll_layout *layout,
- const struct clk_pll_characteristics *characteristics)
-@@ -318,7 +330,7 @@ at91_clk_register_pll(struct at91_pmc *p
- struct clk_init_data init;
- int ret;
- int offset = PLL_REG(id);
-- u32 tmp;
-+ unsigned int pllr;
-
- if (id > PLL_MAX_ID)
- return ERR_PTR(-EINVAL);
-@@ -337,11 +349,11 @@ at91_clk_register_pll(struct at91_pmc *p
- pll->hw.init = &init;
- pll->layout = layout;
- pll->characteristics = characteristics;
-- pll->pmc = pmc;
-+ pll->regmap = regmap;
- pll->irq = irq;
-- tmp = pmc_read(pmc, offset) & layout->pllr_mask;
-- pll->div = PLL_DIV(tmp);
-- pll->mul = PLL_MUL(tmp, layout);
-+ regmap_read(regmap, offset, &pllr);
-+ pll->div = PLL_DIV(pllr);
-+ pll->mul = PLL_MUL(pllr, layout);
- init_waitqueue_head(&pll->wait);
- irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
- ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
-@@ -483,12 +495,13 @@ of_at91_clk_pll_get_characteristics(stru
- }
-
- static void __init
--of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc,
-+of_at91_clk_pll_setup(struct device_node *np,
- const struct clk_pll_layout *layout)
- {
- u32 id;
- unsigned int irq;
- struct clk *clk;
-+ struct regmap *regmap;
- const char *parent_name;
- const char *name = np->name;
- struct clk_pll_characteristics *characteristics;
-@@ -500,6 +513,10 @@ of_at91_clk_pll_setup(struct device_node
-
- of_property_read_string(np, "clock-output-names", &name);
-
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
- characteristics = of_at91_clk_pll_get_characteristics(np);
- if (!characteristics)
- return;
-@@ -508,7 +525,7 @@ of_at91_clk_pll_setup(struct device_node
- if (!irq)
- return;
-
-- clk = at91_clk_register_pll(pmc, irq, name, parent_name, id, layout,
-+ clk = at91_clk_register_pll(regmap, irq, name, parent_name, id, layout,
- characteristics);
- if (IS_ERR(clk))
- goto out_free_characteristics;
-@@ -520,26 +537,30 @@ of_at91_clk_pll_setup(struct device_node
- kfree(characteristics);
- }
-
--void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91rm9200_clk_pll_setup(struct device_node *np)
- {
-- of_at91_clk_pll_setup(np, pmc, &at91rm9200_pll_layout);
-+ of_at91_clk_pll_setup(np, &at91rm9200_pll_layout);
- }
-+CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll",
-+ of_at91rm9200_clk_pll_setup);
-
--void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np)
- {
-- of_at91_clk_pll_setup(np, pmc, &at91sam9g45_pll_layout);
-+ of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout);
- }
-+CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll",
-+ of_at91sam9g45_clk_pll_setup);
-
--void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np)
- {
-- of_at91_clk_pll_setup(np, pmc, &at91sam9g20_pllb_layout);
-+ of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout);
- }
-+CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb",
-+ of_at91sam9g20_clk_pllb_setup);
-
--void __init of_sama5d3_clk_pll_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_sama5d3_clk_pll_setup(struct device_node *np)
- {
-- of_at91_clk_pll_setup(np, pmc, &sama5d3_pll_layout);
-+ of_at91_clk_pll_setup(np, &sama5d3_pll_layout);
- }
-+CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll",
-+ of_sama5d3_clk_pll_setup);
---- a/drivers/clk/at91/clk-plldiv.c
-+++ b/drivers/clk/at91/clk-plldiv.c
-@@ -14,6 +14,8 @@
- #include <linux/of.h>
- #include <linux/of_address.h>
- #include <linux/io.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -21,16 +23,18 @@
-
- struct clk_plldiv {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- };
-
- static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
- {
- struct clk_plldiv *plldiv = to_clk_plldiv(hw);
-- struct at91_pmc *pmc = plldiv->pmc;
-+ unsigned int mckr;
-
-- if (pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_PLLADIV2)
-+ regmap_read(plldiv->regmap, AT91_PMC_MCKR, &mckr);
-+
-+ if (mckr & AT91_PMC_PLLADIV2)
- return parent_rate / 2;
-
- return parent_rate;
-@@ -57,18 +61,12 @@ static int clk_plldiv_set_rate(struct cl
- unsigned long parent_rate)
- {
- struct clk_plldiv *plldiv = to_clk_plldiv(hw);
-- struct at91_pmc *pmc = plldiv->pmc;
-- u32 tmp;
-
-- if (parent_rate != rate && (parent_rate / 2) != rate)
-+ if ((parent_rate != rate) && (parent_rate / 2 != rate))
- return -EINVAL;
-
-- pmc_lock(pmc);
-- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_PLLADIV2;
-- if ((parent_rate / 2) == rate)
-- tmp |= AT91_PMC_PLLADIV2;
-- pmc_write(pmc, AT91_PMC_MCKR, tmp);
-- pmc_unlock(pmc);
-+ regmap_update_bits(plldiv->regmap, AT91_PMC_MCKR, AT91_PMC_PLLADIV2,
-+ parent_rate != rate ? AT91_PMC_PLLADIV2 : 0);
-
- return 0;
- }
-@@ -80,7 +78,7 @@ static const struct clk_ops plldiv_ops =
- };
-
- static struct clk * __init
--at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
-+at91_clk_register_plldiv(struct regmap *regmap, const char *name,
- const char *parent_name)
- {
- struct clk_plldiv *plldiv;
-@@ -98,7 +96,7 @@ at91_clk_register_plldiv(struct at91_pmc
- init.flags = CLK_SET_RATE_GATE;
-
- plldiv->hw.init = &init;
-- plldiv->pmc = pmc;
-+ plldiv->regmap = regmap;
-
- clk = clk_register(NULL, &plldiv->hw);
-
-@@ -109,27 +107,27 @@ at91_clk_register_plldiv(struct at91_pmc
- }
-
- static void __init
--of_at91_clk_plldiv_setup(struct device_node *np, struct at91_pmc *pmc)
-+of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
- {
- struct clk *clk;
- const char *parent_name;
- const char *name = np->name;
-+ struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
-
- of_property_read_string(np, "clock-output-names", &name);
-
-- clk = at91_clk_register_plldiv(pmc, name, parent_name);
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-
-+ clk = at91_clk_register_plldiv(regmap, name, parent_name);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- return;
- }
--
--void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
-- struct at91_pmc *pmc)
--{
-- of_at91_clk_plldiv_setup(np, pmc);
--}
-+CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv",
-+ of_at91sam9x5_clk_plldiv_setup);
---- a/drivers/clk/at91/clk-programmable.c
-+++ b/drivers/clk/at91/clk-programmable.c
-@@ -16,6 +16,8 @@
- #include <linux/io.h>
- #include <linux/wait.h>
- #include <linux/sched.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -24,6 +26,7 @@
-
- #define PROG_STATUS_MASK(id) (1 << ((id) + 8))
- #define PROG_PRES_MASK 0x7
-+#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
- #define PROG_MAX_RM9200_CSS 3
-
- struct clk_programmable_layout {
-@@ -34,7 +37,7 @@ struct clk_programmable_layout {
-
- struct clk_programmable {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- u8 id;
- const struct clk_programmable_layout *layout;
- };
-@@ -44,14 +47,12 @@ struct clk_programmable {
- static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
- {
-- u32 pres;
- struct clk_programmable *prog = to_clk_programmable(hw);
-- struct at91_pmc *pmc = prog->pmc;
-- const struct clk_programmable_layout *layout = prog->layout;
-+ unsigned int pckr;
-+
-+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
-
-- pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) &
-- PROG_PRES_MASK;
-- return parent_rate >> pres;
-+ return parent_rate >> PROG_PRES(prog->layout, pckr);
- }
-
- static int clk_programmable_determine_rate(struct clk_hw *hw,
-@@ -101,36 +102,36 @@ static int clk_programmable_set_parent(s
- {
- struct clk_programmable *prog = to_clk_programmable(hw);
- const struct clk_programmable_layout *layout = prog->layout;
-- struct at91_pmc *pmc = prog->pmc;
-- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask;
-+ unsigned int mask = layout->css_mask;
-+ unsigned int pckr = 0;
-
- if (layout->have_slck_mck)
-- tmp &= AT91_PMC_CSSMCK_MCK;
-+ mask |= AT91_PMC_CSSMCK_MCK;
-
- if (index > layout->css_mask) {
-- if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) {
-- tmp |= AT91_PMC_CSSMCK_MCK;
-- return 0;
-- } else {
-+ if (index > PROG_MAX_RM9200_CSS && !layout->have_slck_mck)
- return -EINVAL;
-- }
-+
-+ pckr |= AT91_PMC_CSSMCK_MCK;
- }
-
-- pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index);
-+ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), mask, pckr);
-+
- return 0;
- }
-
- static u8 clk_programmable_get_parent(struct clk_hw *hw)
- {
-- u32 tmp;
-- u8 ret;
- struct clk_programmable *prog = to_clk_programmable(hw);
-- struct at91_pmc *pmc = prog->pmc;
- const struct clk_programmable_layout *layout = prog->layout;
-+ unsigned int pckr;
-+ u8 ret;
-+
-+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
-+
-+ ret = pckr & layout->css_mask;
-
-- tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
-- ret = tmp & layout->css_mask;
-- if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret)
-+ if (layout->have_slck_mck && (pckr & AT91_PMC_CSSMCK_MCK) && !ret)
- ret = PROG_MAX_RM9200_CSS + 1;
-
- return ret;
-@@ -140,26 +141,27 @@ static int clk_programmable_set_rate(str
- unsigned long parent_rate)
- {
- struct clk_programmable *prog = to_clk_programmable(hw);
-- struct at91_pmc *pmc = prog->pmc;
- const struct clk_programmable_layout *layout = prog->layout;
- unsigned long div = parent_rate / rate;
-+ unsigned int pckr;
- int shift = 0;
-- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) &
-- ~(PROG_PRES_MASK << layout->pres_shift);
-+
-+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
-
- if (!div)
- return -EINVAL;
-
- shift = fls(div) - 1;
-
-- if (div != (1<<shift))
-+ if (div != (1 << shift))
- return -EINVAL;
-
- if (shift >= PROG_PRES_MASK)
- return -EINVAL;
-
-- pmc_write(pmc, AT91_PMC_PCKR(prog->id),
-- tmp | (shift << layout->pres_shift));
-+ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
-+ PROG_PRES_MASK << layout->pres_shift,
-+ shift << layout->pres_shift);
-
- return 0;
- }
-@@ -173,7 +175,7 @@ static const struct clk_ops programmable
- };
-
- static struct clk * __init
--at91_clk_register_programmable(struct at91_pmc *pmc,
-+at91_clk_register_programmable(struct regmap *regmap,
- const char *name, const char **parent_names,
- u8 num_parents, u8 id,
- const struct clk_programmable_layout *layout)
-@@ -198,7 +200,7 @@ at91_clk_register_programmable(struct at
- prog->id = id;
- prog->layout = layout;
- prog->hw.init = &init;
-- prog->pmc = pmc;
-+ prog->regmap = regmap;
-
- clk = clk_register(NULL, &prog->hw);
- if (IS_ERR(clk))
-@@ -226,7 +228,7 @@ static const struct clk_programmable_lay
- };
-
- static void __init
--of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
-+of_at91_clk_prog_setup(struct device_node *np,
- const struct clk_programmable_layout *layout)
- {
- int num;
-@@ -236,6 +238,7 @@ of_at91_clk_prog_setup(struct device_nod
- const char *parent_names[PROG_SOURCE_MAX];
- const char *name;
- struct device_node *progclknp;
-+ struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
-@@ -247,6 +250,10 @@ of_at91_clk_prog_setup(struct device_nod
- if (!num || num > (PROG_ID_MAX + 1))
- return;
-
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
- for_each_child_of_node(np, progclknp) {
- if (of_property_read_u32(progclknp, "reg", &id))
- continue;
-@@ -254,7 +261,7 @@ of_at91_clk_prog_setup(struct device_nod
- if (of_property_read_string(np, "clock-output-names", &name))
- name = progclknp->name;
-
-- clk = at91_clk_register_programmable(pmc, name,
-+ clk = at91_clk_register_programmable(regmap, name,
- parent_names, num_parents,
- id, layout);
- if (IS_ERR(clk))
-@@ -265,20 +272,23 @@ of_at91_clk_prog_setup(struct device_nod
- }
-
-
--void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91rm9200_clk_prog_setup(struct device_node *np)
- {
-- of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout);
-+ of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout);
- }
-+CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable",
-+ of_at91rm9200_clk_prog_setup);
-
--void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np)
- {
-- of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout);
-+ of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout);
- }
-+CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable",
-+ of_at91sam9g45_clk_prog_setup);
-
--void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np)
- {
-- of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout);
-+ of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout);
- }
-+CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable",
-+ of_at91sam9x5_clk_prog_setup);
---- a/drivers/clk/at91/clk-slow.c
-+++ b/drivers/clk/at91/clk-slow.c
-@@ -22,6 +22,8 @@
- #include <linux/io.h>
- #include <linux/interrupt.h>
- #include <linux/irq.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
- #include <linux/sched.h>
- #include <linux/wait.h>
-
-@@ -59,7 +61,7 @@ struct clk_slow_rc_osc {
-
- struct clk_sam9260_slow {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- };
-
- #define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw)
-@@ -393,8 +395,11 @@ void __init of_at91sam9x5_clk_slow_setup
- static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw)
- {
- struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw);
-+ unsigned int status;
-
-- return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL);
-+ regmap_read(slowck->regmap, AT91_PMC_SR, &status);
-+
-+ return status & AT91_PMC_OSCSEL ? 1 : 0;
- }
-
- static const struct clk_ops sam9260_slow_ops = {
-@@ -402,7 +407,7 @@ static const struct clk_ops sam9260_slow
- };
-
- static struct clk * __init
--at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
-+at91_clk_register_sam9260_slow(struct regmap *regmap,
- const char *name,
- const char **parent_names,
- int num_parents)
-@@ -411,7 +416,7 @@ at91_clk_register_sam9260_slow(struct at
- struct clk *clk = NULL;
- struct clk_init_data init;
-
-- if (!pmc || !name)
-+ if (!name)
- return ERR_PTR(-EINVAL);
-
- if (!parent_names || !num_parents)
-@@ -428,7 +433,7 @@ at91_clk_register_sam9260_slow(struct at
- init.flags = 0;
-
- slowck->hw.init = &init;
-- slowck->pmc = pmc;
-+ slowck->regmap = regmap;
-
- clk = clk_register(NULL, &slowck->hw);
- if (IS_ERR(clk))
-@@ -439,29 +444,34 @@ at91_clk_register_sam9260_slow(struct at
- return clk;
- }
-
--void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
- {
- struct clk *clk;
- const char *parent_names[2];
- int num_parents;
- const char *name = np->name;
-+ struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents != 2)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-
- of_property_read_string(np, "clock-output-names", &name);
-
-- clk = at91_clk_register_sam9260_slow(pmc, name, parent_names,
-+ clk = at91_clk_register_sam9260_slow(regmap, name, parent_names,
- num_parents);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow",
-+ of_at91sam9260_clk_slow_setup);
-
- /*
- * FIXME: All slow clk users are not properly claiming it (get + prepare +
---- a/drivers/clk/at91/clk-smd.c
-+++ b/drivers/clk/at91/clk-smd.c
-@@ -14,6 +14,8 @@
- #include <linux/of.h>
- #include <linux/of_address.h>
- #include <linux/io.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -24,7 +26,7 @@
-
- struct at91sam9x5_clk_smd {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- };
-
- #define to_at91sam9x5_clk_smd(hw) \
-@@ -33,13 +35,13 @@ struct at91sam9x5_clk_smd {
- static unsigned long at91sam9x5_clk_smd_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
- {
-- u32 tmp;
-- u8 smddiv;
- struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
-- struct at91_pmc *pmc = smd->pmc;
-+ unsigned int smdr;
-+ u8 smddiv;
-+
-+ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr);
-+ smddiv = (smdr & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
-
-- tmp = pmc_read(pmc, AT91_PMC_SMD);
-- smddiv = (tmp & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
- return parent_rate / (smddiv + 1);
- }
-
-@@ -67,40 +69,38 @@ static long at91sam9x5_clk_smd_round_rat
-
- static int at91sam9x5_clk_smd_set_parent(struct clk_hw *hw, u8 index)
- {
-- u32 tmp;
- struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
-- struct at91_pmc *pmc = smd->pmc;
-
- if (index > 1)
- return -EINVAL;
-- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMDS;
-- if (index)
-- tmp |= AT91_PMC_SMDS;
-- pmc_write(pmc, AT91_PMC_SMD, tmp);
-+
-+ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMDS,
-+ index ? AT91_PMC_SMDS : 0);
-+
- return 0;
- }
-
- static u8 at91sam9x5_clk_smd_get_parent(struct clk_hw *hw)
- {
- struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
-- struct at91_pmc *pmc = smd->pmc;
-+ unsigned int smdr;
-
-- return pmc_read(pmc, AT91_PMC_SMD) & AT91_PMC_SMDS;
-+ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr);
-+
-+ return smdr & AT91_PMC_SMDS;
- }
-
- static int at91sam9x5_clk_smd_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
- {
-- u32 tmp;
- struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
-- struct at91_pmc *pmc = smd->pmc;
- unsigned long div = parent_rate / rate;
-
- if (parent_rate % rate || div < 1 || div > (SMD_MAX_DIV + 1))
- return -EINVAL;
-- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMD_DIV;
-- tmp |= (div - 1) << SMD_DIV_SHIFT;
-- pmc_write(pmc, AT91_PMC_SMD, tmp);
-+
-+ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMD_DIV,
-+ (div - 1) << SMD_DIV_SHIFT);
-
- return 0;
- }
-@@ -114,7 +114,7 @@ static const struct clk_ops at91sam9x5_s
- };
-
- static struct clk * __init
--at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
-+at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
- const char **parent_names, u8 num_parents)
- {
- struct at91sam9x5_clk_smd *smd;
-@@ -132,7 +132,7 @@ at91sam9x5_clk_register_smd(struct at91_
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
-
- smd->hw.init = &init;
-- smd->pmc = pmc;
-+ smd->regmap = regmap;
-
- clk = clk_register(NULL, &smd->hw);
- if (IS_ERR(clk))
-@@ -141,13 +141,13 @@ at91sam9x5_clk_register_smd(struct at91_
- return clk;
- }
-
--void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
- {
- struct clk *clk;
- int num_parents;
- const char *parent_names[SMD_SOURCE_MAX];
- const char *name = np->name;
-+ struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
-@@ -157,10 +157,16 @@ void __init of_at91sam9x5_clk_smd_setup(
-
- of_property_read_string(np, "clock-output-names", &name);
-
-- clk = at91sam9x5_clk_register_smd(pmc, name, parent_names,
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
-+ clk = at91sam9x5_clk_register_smd(regmap, name, parent_names,
- num_parents);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd",
-+ of_at91sam9x5_clk_smd_setup);
---- a/drivers/clk/at91/clk-system.c
-+++ b/drivers/clk/at91/clk-system.c
-@@ -19,6 +19,8 @@
- #include <linux/interrupt.h>
- #include <linux/wait.h>
- #include <linux/sched.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -29,7 +31,7 @@
- #define to_clk_system(hw) container_of(hw, struct clk_system, hw)
- struct clk_system {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- unsigned int irq;
- wait_queue_head_t wait;
- u8 id;
-@@ -49,24 +51,32 @@ static irqreturn_t clk_system_irq_handle
- return IRQ_HANDLED;
- }
-
-+static inline bool clk_system_ready(struct regmap *regmap, int id)
-+{
-+ unsigned int status;
-+
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+
-+ return status & (1 << id) ? 1 : 0;
-+}
-+
- static int clk_system_prepare(struct clk_hw *hw)
- {
- struct clk_system *sys = to_clk_system(hw);
-- struct at91_pmc *pmc = sys->pmc;
-- u32 mask = 1 << sys->id;
-
-- pmc_write(pmc, AT91_PMC_SCER, mask);
-+ regmap_write(sys->regmap, AT91_PMC_SCER, 1 << sys->id);
-
- if (!is_pck(sys->id))
- return 0;
-
-- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
-+ while (!clk_system_ready(sys->regmap, sys->id)) {
- if (sys->irq) {
- enable_irq(sys->irq);
- wait_event(sys->wait,
-- pmc_read(pmc, AT91_PMC_SR) & mask);
-- } else
-+ clk_system_ready(sys->regmap, sys->id));
-+ } else {
- cpu_relax();
-+ }
- }
- return 0;
- }
-@@ -74,23 +84,26 @@ static int clk_system_prepare(struct clk
- static void clk_system_unprepare(struct clk_hw *hw)
- {
- struct clk_system *sys = to_clk_system(hw);
-- struct at91_pmc *pmc = sys->pmc;
-
-- pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id);
-+ regmap_write(sys->regmap, AT91_PMC_SCDR, 1 << sys->id);
- }
-
- static int clk_system_is_prepared(struct clk_hw *hw)
- {
- struct clk_system *sys = to_clk_system(hw);
-- struct at91_pmc *pmc = sys->pmc;
-+ unsigned int status;
-+
-+ regmap_read(sys->regmap, AT91_PMC_SCSR, &status);
-
-- if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id)))
-+ if (!(status & (1 << sys->id)))
- return 0;
-
- if (!is_pck(sys->id))
- return 1;
-
-- return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id));
-+ regmap_read(sys->regmap, AT91_PMC_SR, &status);
-+
-+ return status & (1 << sys->id) ? 1 : 0;
- }
-
- static const struct clk_ops system_ops = {
-@@ -100,7 +113,7 @@ static const struct clk_ops system_ops =
- };
-
- static struct clk * __init
--at91_clk_register_system(struct at91_pmc *pmc, const char *name,
-+at91_clk_register_system(struct regmap *regmap, const char *name,
- const char *parent_name, u8 id, int irq)
- {
- struct clk_system *sys;
-@@ -123,7 +136,7 @@ at91_clk_register_system(struct at91_pmc
-
- sys->id = id;
- sys->hw.init = &init;
-- sys->pmc = pmc;
-+ sys->regmap = regmap;
- sys->irq = irq;
- if (irq) {
- init_waitqueue_head(&sys->wait);
-@@ -146,8 +159,7 @@ at91_clk_register_system(struct at91_pmc
- return clk;
- }
-
--static void __init
--of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
-+static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
- {
- int num;
- int irq = 0;
-@@ -156,11 +168,16 @@ of_at91_clk_sys_setup(struct device_node
- const char *name;
- struct device_node *sysclknp;
- const char *parent_name;
-+ struct regmap *regmap;
-
- num = of_get_child_count(np);
- if (num > (SYSTEM_MAX_ID + 1))
- return;
-
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
- for_each_child_of_node(np, sysclknp) {
- if (of_property_read_u32(sysclknp, "reg", &id))
- continue;
-@@ -173,16 +190,13 @@ of_at91_clk_sys_setup(struct device_node
-
- parent_name = of_clk_get_parent_name(sysclknp, 0);
-
-- clk = at91_clk_register_system(pmc, name, parent_name, id, irq);
-+ clk = at91_clk_register_system(regmap, name, parent_name, id,
-+ irq);
- if (IS_ERR(clk))
- continue;
-
- of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk);
- }
- }
--
--void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
-- struct at91_pmc *pmc)
--{
-- of_at91_clk_sys_setup(np, pmc);
--}
-+CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system",
-+ of_at91rm9200_clk_sys_setup);
---- a/drivers/clk/at91/clk-usb.c
-+++ b/drivers/clk/at91/clk-usb.c
-@@ -14,6 +14,8 @@
- #include <linux/of.h>
- #include <linux/of_address.h>
- #include <linux/io.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -27,7 +29,7 @@
-
- struct at91sam9x5_clk_usb {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- };
-
- #define to_at91sam9x5_clk_usb(hw) \
-@@ -35,7 +37,7 @@ struct at91sam9x5_clk_usb {
-
- struct at91rm9200_clk_usb {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- u32 divisors[4];
- };
-
-@@ -45,13 +47,12 @@ struct at91rm9200_clk_usb {
- static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
- {
-- u32 tmp;
-- u8 usbdiv;
- struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
-+ unsigned int usbr;
-+ u8 usbdiv;
-
-- tmp = pmc_read(pmc, AT91_PMC_USB);
-- usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
-+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
-+ usbdiv = (usbr & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
-
- return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
- }
-@@ -109,33 +110,31 @@ static int at91sam9x5_clk_usb_determine_
-
- static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
- {
-- u32 tmp;
- struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
-
- if (index > 1)
- return -EINVAL;
-- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS;
-- if (index)
-- tmp |= AT91_PMC_USBS;
-- pmc_write(pmc, AT91_PMC_USB, tmp);
-+
-+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS,
-+ index ? AT91_PMC_USBS : 0);
-+
- return 0;
- }
-
- static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
- {
- struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
-+ unsigned int usbr;
-
-- return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS;
-+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
-+
-+ return usbr & AT91_PMC_USBS;
- }
-
- static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
- {
-- u32 tmp;
- struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
- unsigned long div;
-
- if (!rate)
-@@ -145,9 +144,8 @@ static int at91sam9x5_clk_usb_set_rate(s
- if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
- return -EINVAL;
-
-- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
-- tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT;
-- pmc_write(pmc, AT91_PMC_USB, tmp);
-+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_OHCIUSBDIV,
-+ (div - 1) << SAM9X5_USB_DIV_SHIFT);
-
- return 0;
- }
-@@ -163,28 +161,28 @@ static const struct clk_ops at91sam9x5_u
- static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
- {
- struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
-
-- pmc_write(pmc, AT91_PMC_USB,
-- pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS);
-+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS,
-+ AT91_PMC_USBS);
-+
- return 0;
- }
-
- static void at91sam9n12_clk_usb_disable(struct clk_hw *hw)
- {
- struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
-
-- pmc_write(pmc, AT91_PMC_USB,
-- pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS);
-+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, 0);
- }
-
- static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw)
- {
- struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
-+ unsigned int usbr;
-
-- return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS);
-+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
-+
-+ return usbr & AT91_PMC_USBS;
- }
-
- static const struct clk_ops at91sam9n12_usb_ops = {
-@@ -197,7 +195,7 @@ static const struct clk_ops at91sam9n12_
- };
-
- static struct clk * __init
--at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
-+at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
- const char **parent_names, u8 num_parents)
- {
- struct at91sam9x5_clk_usb *usb;
-@@ -216,7 +214,7 @@ at91sam9x5_clk_register_usb(struct at91_
- CLK_SET_RATE_PARENT;
-
- usb->hw.init = &init;
-- usb->pmc = pmc;
-+ usb->regmap = regmap;
-
- clk = clk_register(NULL, &usb->hw);
- if (IS_ERR(clk))
-@@ -226,7 +224,7 @@ at91sam9x5_clk_register_usb(struct at91_
- }
-
- static struct clk * __init
--at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
-+at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
- const char *parent_name)
- {
- struct at91sam9x5_clk_usb *usb;
-@@ -244,7 +242,7 @@ at91sam9n12_clk_register_usb(struct at91
- init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
-
- usb->hw.init = &init;
-- usb->pmc = pmc;
-+ usb->regmap = regmap;
-
- clk = clk_register(NULL, &usb->hw);
- if (IS_ERR(clk))
-@@ -257,12 +255,12 @@ static unsigned long at91rm9200_clk_usb_
- unsigned long parent_rate)
- {
- struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
-- u32 tmp;
-+ unsigned int pllbr;
- u8 usbdiv;
-
-- tmp = pmc_read(pmc, AT91_CKGR_PLLBR);
-- usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
-+ regmap_read(usb->regmap, AT91_CKGR_PLLBR, &pllbr);
-+
-+ usbdiv = (pllbr & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
- if (usb->divisors[usbdiv])
- return parent_rate / usb->divisors[usbdiv];
-
-@@ -310,10 +308,8 @@ static long at91rm9200_clk_usb_round_rat
- static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
- {
-- u32 tmp;
- int i;
- struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
-- struct at91_pmc *pmc = usb->pmc;
- unsigned long div;
-
- if (!rate)
-@@ -323,10 +319,10 @@ static int at91rm9200_clk_usb_set_rate(s
-
- for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
- if (usb->divisors[i] == div) {
-- tmp = pmc_read(pmc, AT91_CKGR_PLLBR) &
-- ~AT91_PMC_USBDIV;
-- tmp |= i << RM9200_USB_DIV_SHIFT;
-- pmc_write(pmc, AT91_CKGR_PLLBR, tmp);
-+ regmap_update_bits(usb->regmap, AT91_CKGR_PLLBR,
-+ AT91_PMC_USBDIV,
-+ i << RM9200_USB_DIV_SHIFT);
-+
- return 0;
- }
- }
-@@ -341,7 +337,7 @@ static const struct clk_ops at91rm9200_u
- };
-
- static struct clk * __init
--at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
-+at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
- const char *parent_name, const u32 *divisors)
- {
- struct at91rm9200_clk_usb *usb;
-@@ -359,7 +355,7 @@ at91rm9200_clk_register_usb(struct at91_
- init.flags = CLK_SET_RATE_PARENT;
-
- usb->hw.init = &init;
-- usb->pmc = pmc;
-+ usb->regmap = regmap;
- memcpy(usb->divisors, divisors, sizeof(usb->divisors));
-
- clk = clk_register(NULL, &usb->hw);
-@@ -369,13 +365,13 @@ at91rm9200_clk_register_usb(struct at91_
- return clk;
- }
-
--void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
- {
- struct clk *clk;
- int num_parents;
- const char *parent_names[USB_SOURCE_MAX];
- const char *name = np->name;
-+ struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
-@@ -385,19 +381,26 @@ void __init of_at91sam9x5_clk_usb_setup(
-
- of_property_read_string(np, "clock-output-names", &name);
-
-- clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents);
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
-+ clk = at91sam9x5_clk_register_usb(regmap, name, parent_names,
-+ num_parents);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb",
-+ of_at91sam9x5_clk_usb_setup);
-
--void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
- {
- struct clk *clk;
- const char *parent_name;
- const char *name = np->name;
-+ struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
-@@ -405,20 +408,26 @@ void __init of_at91sam9n12_clk_usb_setup
-
- of_property_read_string(np, "clock-output-names", &name);
-
-- clk = at91sam9n12_clk_register_usb(pmc, name, parent_name);
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
-+ clk = at91sam9n12_clk_register_usb(regmap, name, parent_name);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb",
-+ of_at91sam9n12_clk_usb_setup);
-
--void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
-- struct at91_pmc *pmc)
-+static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
- {
- struct clk *clk;
- const char *parent_name;
- const char *name = np->name;
- u32 divisors[4] = {0, 0, 0, 0};
-+ struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
-@@ -430,9 +439,15 @@ void __init of_at91rm9200_clk_usb_setup(
-
- of_property_read_string(np, "clock-output-names", &name);
-
-- clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors);
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
-+ clk = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
-+CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb",
-+ of_at91rm9200_clk_usb_setup);
---- a/drivers/clk/at91/clk-utmi.c
-+++ b/drivers/clk/at91/clk-utmi.c
-@@ -19,6 +19,8 @@
- #include <linux/io.h>
- #include <linux/sched.h>
- #include <linux/wait.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include "pmc.h"
-
-@@ -26,7 +28,7 @@
-
- struct clk_utmi {
- struct clk_hw hw;
-- struct at91_pmc *pmc;
-+ struct regmap *regmap;
- unsigned int irq;
- wait_queue_head_t wait;
- };
-@@ -43,19 +45,27 @@ static irqreturn_t clk_utmi_irq_handler(
- return IRQ_HANDLED;
- }
-
-+static inline bool clk_utmi_ready(struct regmap *regmap)
-+{
-+ unsigned int status;
-+
-+ regmap_read(regmap, AT91_PMC_SR, &status);
-+
-+ return status & AT91_PMC_LOCKU;
-+}
-+
- static int clk_utmi_prepare(struct clk_hw *hw)
- {
- struct clk_utmi *utmi = to_clk_utmi(hw);
-- struct at91_pmc *pmc = utmi->pmc;
-- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) | AT91_PMC_UPLLEN |
-- AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN;
-+ unsigned int uckr = AT91_PMC_UPLLEN | AT91_PMC_UPLLCOUNT |
-+ AT91_PMC_BIASEN;
-
-- pmc_write(pmc, AT91_CKGR_UCKR, tmp);
-+ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr);
-
-- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU)) {
-+ while (!clk_utmi_ready(utmi->regmap)) {
- enable_irq(utmi->irq);
- wait_event(utmi->wait,
-- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
-+ clk_utmi_ready(utmi->regmap));
- }
-
- return 0;
-@@ -64,18 +74,15 @@ static int clk_utmi_prepare(struct clk_h
- static int clk_utmi_is_prepared(struct clk_hw *hw)
- {
- struct clk_utmi *utmi = to_clk_utmi(hw);
-- struct at91_pmc *pmc = utmi->pmc;
-
-- return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
-+ return clk_utmi_ready(utmi->regmap);
- }
-
- static void clk_utmi_unprepare(struct clk_hw *hw)
- {
- struct clk_utmi *utmi = to_clk_utmi(hw);
-- struct at91_pmc *pmc = utmi->pmc;
-- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN;
-
-- pmc_write(pmc, AT91_CKGR_UCKR, tmp);
-+ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, AT91_PMC_UPLLEN, 0);
- }
-
- static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
-@@ -93,7 +100,7 @@ static const struct clk_ops utmi_ops = {
- };
-
- static struct clk * __init
--at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
-+at91_clk_register_utmi(struct regmap *regmap, unsigned int irq,
- const char *name, const char *parent_name)
- {
- int ret;
-@@ -112,7 +119,7 @@ at91_clk_register_utmi(struct at91_pmc *
- init.flags = CLK_SET_RATE_GATE;
-
- utmi->hw.init = &init;
-- utmi->pmc = pmc;
-+ utmi->regmap = regmap;
- utmi->irq = irq;
- init_waitqueue_head(&utmi->wait);
- irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
-@@ -132,13 +139,13 @@ at91_clk_register_utmi(struct at91_pmc *
- return clk;
- }
-
--static void __init
--of_at91_clk_utmi_setup(struct device_node *np, struct at91_pmc *pmc)
-+static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
- {
- unsigned int irq;
- struct clk *clk;
- const char *parent_name;
- const char *name = np->name;
-+ struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
-
-@@ -148,16 +155,16 @@ of_at91_clk_utmi_setup(struct device_nod
- if (!irq)
- return;
-
-- clk = at91_clk_register_utmi(pmc, irq, name, parent_name);
-+ regmap = syscon_node_to_regmap(of_get_parent(np));
-+ if (IS_ERR(regmap))
-+ return;
-+
-+ clk = at91_clk_register_utmi(regmap, irq, name, parent_name);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- return;
- }
--
--void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
-- struct at91_pmc *pmc)
--{
-- of_at91_clk_utmi_setup(np, pmc);
--}
-+CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi",
-+ of_at91sam9x5_clk_utmi_setup);
---- a/drivers/clk/at91/pmc.c
-+++ b/drivers/clk/at91/pmc.c
-@@ -20,6 +20,7 @@
- #include <linux/irqdomain.h>
- #include <linux/of_irq.h>
- #include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-
- #include <asm/proc-fns.h>
-
-@@ -70,14 +71,14 @@ static void pmc_irq_mask(struct irq_data
- {
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
-- pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq);
-+ regmap_write(pmc->regmap, AT91_PMC_IDR, 1 << d->hwirq);
- }
-
- static void pmc_irq_unmask(struct irq_data *d)
- {
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
-- pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq);
-+ regmap_write(pmc->regmap, AT91_PMC_IER, 1 << d->hwirq);
- }
-
- static int pmc_irq_set_type(struct irq_data *d, unsigned type)
-@@ -94,15 +95,15 @@ static void pmc_irq_suspend(struct irq_d
- {
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
-- pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
-- pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
-+ regmap_read(pmc->regmap, AT91_PMC_IMR, &pmc->imr);
-+ regmap_write(pmc->regmap, AT91_PMC_IDR, pmc->imr);
- }
-
- static void pmc_irq_resume(struct irq_data *d)
- {
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
-- pmc_write(pmc, AT91_PMC_IER, pmc->imr);
-+ regmap_write(pmc->regmap, AT91_PMC_IER, pmc->imr);
- }
-
- static struct irq_chip pmc_irq = {
-@@ -161,10 +162,14 @@ static const struct irq_domain_ops pmc_i
- static irqreturn_t pmc_irq_handler(int irq, void *data)
- {
- struct at91_pmc *pmc = (struct at91_pmc *)data;
-+ unsigned int tmpsr, imr;
- unsigned long sr;
- int n;
-
-- sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR);
-+ regmap_read(pmc->regmap, AT91_PMC_SR, &tmpsr);
-+ regmap_read(pmc->regmap, AT91_PMC_IMR, &imr);
-+
-+ sr = tmpsr & imr;
- if (!sr)
- return IRQ_NONE;
-
-@@ -239,17 +244,15 @@ static struct at91_pmc *__init at91_pmc_
- if (!pmc)
- return NULL;
-
-- spin_lock_init(&pmc->lock);
- pmc->regmap = regmap;
- pmc->virq = virq;
- pmc->caps = caps;
-
- pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc);
--
- if (!pmc->irqdomain)
- goto out_free_pmc;
-
-- pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
-+ regmap_write(pmc->regmap, AT91_PMC_IDR, 0xffffffff);
- if (request_irq(pmc->virq, pmc_irq_handler,
- IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
- goto out_remove_irqdomain;
-@@ -264,137 +267,10 @@ static struct at91_pmc *__init at91_pmc_
- return NULL;
- }
-
--static const struct of_device_id pmc_clk_ids[] __initconst = {
-- /* Slow oscillator */
-- {
-- .compatible = "atmel,at91sam9260-clk-slow",
-- .data = of_at91sam9260_clk_slow_setup,
-- },
-- /* Main clock */
-- {
-- .compatible = "atmel,at91rm9200-clk-main-osc",
-- .data = of_at91rm9200_clk_main_osc_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9x5-clk-main-rc-osc",
-- .data = of_at91sam9x5_clk_main_rc_osc_setup,
-- },
-- {
-- .compatible = "atmel,at91rm9200-clk-main",
-- .data = of_at91rm9200_clk_main_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9x5-clk-main",
-- .data = of_at91sam9x5_clk_main_setup,
-- },
-- /* PLL clocks */
-- {
-- .compatible = "atmel,at91rm9200-clk-pll",
-- .data = of_at91rm9200_clk_pll_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9g45-clk-pll",
-- .data = of_at91sam9g45_clk_pll_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9g20-clk-pllb",
-- .data = of_at91sam9g20_clk_pllb_setup,
-- },
-- {
-- .compatible = "atmel,sama5d3-clk-pll",
-- .data = of_sama5d3_clk_pll_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9x5-clk-plldiv",
-- .data = of_at91sam9x5_clk_plldiv_setup,
-- },
-- /* Master clock */
-- {
-- .compatible = "atmel,at91rm9200-clk-master",
-- .data = of_at91rm9200_clk_master_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9x5-clk-master",
-- .data = of_at91sam9x5_clk_master_setup,
-- },
-- /* System clocks */
-- {
-- .compatible = "atmel,at91rm9200-clk-system",
-- .data = of_at91rm9200_clk_sys_setup,
-- },
-- /* Peripheral clocks */
-- {
-- .compatible = "atmel,at91rm9200-clk-peripheral",
-- .data = of_at91rm9200_clk_periph_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9x5-clk-peripheral",
-- .data = of_at91sam9x5_clk_periph_setup,
-- },
-- /* Programmable clocks */
-- {
-- .compatible = "atmel,at91rm9200-clk-programmable",
-- .data = of_at91rm9200_clk_prog_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9g45-clk-programmable",
-- .data = of_at91sam9g45_clk_prog_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9x5-clk-programmable",
-- .data = of_at91sam9x5_clk_prog_setup,
-- },
-- /* UTMI clock */
--#if defined(CONFIG_HAVE_AT91_UTMI)
-- {
-- .compatible = "atmel,at91sam9x5-clk-utmi",
-- .data = of_at91sam9x5_clk_utmi_setup,
-- },
--#endif
-- /* USB clock */
--#if defined(CONFIG_HAVE_AT91_USB_CLK)
-- {
-- .compatible = "atmel,at91rm9200-clk-usb",
-- .data = of_at91rm9200_clk_usb_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9x5-clk-usb",
-- .data = of_at91sam9x5_clk_usb_setup,
-- },
-- {
-- .compatible = "atmel,at91sam9n12-clk-usb",
-- .data = of_at91sam9n12_clk_usb_setup,
-- },
--#endif
-- /* SMD clock */
--#if defined(CONFIG_HAVE_AT91_SMD)
-- {
-- .compatible = "atmel,at91sam9x5-clk-smd",
-- .data = of_at91sam9x5_clk_smd_setup,
-- },
--#endif
--#if defined(CONFIG_HAVE_AT91_H32MX)
-- {
-- .compatible = "atmel,sama5d4-clk-h32mx",
-- .data = of_sama5d4_clk_h32mx_setup,
-- },
--#endif
--#if defined(CONFIG_HAVE_AT91_GENERATED_CLK)
-- {
-- .compatible = "atmel,sama5d2-clk-generated",
-- .data = of_sama5d2_clk_generated_setup,
-- },
--#endif
-- { /*sentinel*/ }
--};
--
- static void __init of_at91_pmc_setup(struct device_node *np,
- const struct at91_pmc_caps *caps)
- {
- struct at91_pmc *pmc;
-- struct device_node *childnp;
-- void (*clk_setup)(struct device_node *, struct at91_pmc *);
-- const struct of_device_id *clk_id;
- void __iomem *regbase = of_iomap(np, 0);
- struct regmap *regmap;
- int virq;
-@@ -410,13 +286,6 @@ static void __init of_at91_pmc_setup(str
- pmc = at91_pmc_init(np, regmap, regbase, virq, caps);
- if (!pmc)
- return;
-- for_each_child_of_node(np, childnp) {
-- clk_id = of_match_node(pmc_clk_ids, childnp);
-- if (!clk_id)
-- continue;
-- clk_setup = clk_id->data;
-- clk_setup(childnp, pmc);
-- }
- }
-
- static void __init of_at91rm9200_pmc_setup(struct device_node *np)
---- a/drivers/clk/at91/pmc.h
-+++ b/drivers/clk/at91/pmc.h
-@@ -17,6 +17,8 @@
- #include <linux/regmap.h>
- #include <linux/spinlock.h>
-
-+extern spinlock_t pmc_pcr_lock;
-+
- struct clk_range {
- unsigned long min;
- unsigned long max;
-@@ -31,99 +33,12 @@ struct at91_pmc_caps {
- struct at91_pmc {
- struct regmap *regmap;
- int virq;
-- spinlock_t lock;
- const struct at91_pmc_caps *caps;
- struct irq_domain *irqdomain;
- u32 imr;
- };
-
--static inline void pmc_lock(struct at91_pmc *pmc)
--{
-- spin_lock(&pmc->lock);
--}
--
--static inline void pmc_unlock(struct at91_pmc *pmc)
--{
-- spin_unlock(&pmc->lock);
--}
--
--static inline u32 pmc_read(struct at91_pmc *pmc, int offset)
--{
-- unsigned int ret = 0;
--
-- regmap_read(pmc->regmap, offset, &ret);
--
-- return ret;
--}
--
--static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
--{
-- regmap_write(pmc->regmap, offset, value);
--}
--
- int of_at91_get_clk_range(struct device_node *np, const char *propname,
- struct clk_range *range);
-
--void of_at91sam9260_clk_slow_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91rm9200_clk_main_osc_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91rm9200_clk_main_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9x5_clk_main_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91rm9200_clk_pll_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9g45_clk_pll_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9g20_clk_pllb_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_sama5d3_clk_pll_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91rm9200_clk_master_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9x5_clk_master_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91rm9200_clk_sys_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91rm9200_clk_periph_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9x5_clk_periph_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91rm9200_clk_prog_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9g45_clk_prog_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9x5_clk_prog_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91sam9x5_clk_utmi_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91rm9200_clk_usb_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9x5_clk_usb_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--void of_at91sam9n12_clk_usb_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_at91sam9x5_clk_smd_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_sama5d4_clk_h32mx_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
--void of_sama5d2_clk_generated_setup(struct device_node *np,
-- struct at91_pmc *pmc);
--
- #endif /* __PMC_H_ */
diff --git a/patches/0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch b/patches/0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch
deleted file mode 100644
index e77f4c506af276..00000000000000
--- a/patches/0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From: Daniel Wagner <daniel.wagner@bmw-carit.de>
-Date: Fri, 19 Feb 2016 09:46:38 +0100
-Subject: [PATCH 2/5] kbuild: Add option to turn incompatible pointer check
- into error
-
-With the introduction of the simple wait API we have two very
-similar APIs in the kernel. For example wake_up() and swake_up()
-is only one character away. Although the compiler will warn
-happily the wrong usage it keeps on going an even links the kernel.
-Thomas and Peter would rather like to see early missuses reported
-as error early on.
-
-In a first attempt we tried to wrap all swait and wait calls
-into a macro which has an compile time type assertion. The result
-was pretty ugly and wasn't able to catch all wrong usages.
-woken_wake_function(), autoremove_wake_function() and wake_bit_function()
-are assigned as function pointers. Wrapping them with a macro around is
-not possible. Prefixing them with '_' was also not a real option
-because there some users in the kernel which do use them as well.
-All in all this attempt looked to intrusive and too ugly.
-
-An alternative is to turn the pointer type check into an error which
-catches wrong type uses. Obviously not only the swait/wait ones. That
-isn't a bad thing either.
-
-Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: linux-rt-users@vger.kernel.org
-Cc: Boqun Feng <boqun.feng@gmail.com>
-Cc: Marcelo Tosatti <mtosatti@redhat.com>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Link: http://lkml.kernel.org/r/1455871601-27484-3-git-send-email-wagi@monom.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- Makefile | 3 +++
- 1 file changed, 3 insertions(+)
-
---- a/Makefile
-+++ b/Makefile
-@@ -767,6 +767,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wer
- # Prohibit date/time macros, which would make the build non-deterministic
- KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
-
-+# enforce correct pointer usage
-+KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
-+
- # use the deterministic mode of AR if available
- KBUILD_ARFLAGS := $(call ar-option,D)
-
diff --git a/patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch b/patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch
deleted file mode 100644
index f54be20e69f248..00000000000000
--- a/patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch
+++ /dev/null
@@ -1,420 +0,0 @@
-From: Marcelo Tosatti <mtosatti@redhat.com>
-Date: Fri, 19 Feb 2016 09:46:39 +0100
-Subject: [PATCH 3/5] KVM: Use simple waitqueue for vcpu->wq
-
-The problem:
-
-On -rt, an emulated LAPIC timer instances has the following path:
-
-1) hard interrupt
-2) ksoftirqd is scheduled
-3) ksoftirqd wakes up vcpu thread
-4) vcpu thread is scheduled
-
-This extra context switch introduces unnecessary latency in the
-LAPIC path for a KVM guest.
-
-The solution:
-
-Allow waking up vcpu thread from hardirq context,
-thus avoiding the need for ksoftirqd to be scheduled.
-
-Normal waitqueues make use of spinlocks, which on -RT
-are sleepable locks. Therefore, waking up a waitqueue
-waiter involves locking a sleeping lock, which
-is not allowed from hard interrupt context.
-
-cyclictest command line:
-
-This patch reduces the average latency in my tests from 14us to 11us.
-
-Daniel writes:
-Paolo asked for numbers from kvm-unit-tests/tscdeadline_latency
-benchmark on mainline. The test was run 1000 times on
-tip/sched/core 4.4.0-rc8-01134-g0905f04:
-
- ./x86-run x86/tscdeadline_latency.flat -cpu host
-
-with idle=poll.
-
-The test seems not to deliver really stable numbers though most of
-them are smaller. Paolo write:
-
-"Anything above ~10000 cycles means that the host went to C1 or
-lower---the number means more or less nothing in that case.
-
-The mean shows an improvement indeed."
-
-Before:
-
- min max mean std
-count 1000.000000 1000.000000 1000.000000 1000.000000
-mean 5162.596000 2019270.084000 5824.491541 20681.645558
-std 75.431231 622607.723969 89.575700 6492.272062
-min 4466.000000 23928.000000 5537.926500 585.864966
-25% 5163.000000 1613252.750000 5790.132275 16683.745433
-50% 5175.000000 2281919.000000 5834.654000 23151.990026
-75% 5190.000000 2382865.750000 5861.412950 24148.206168
-max 5228.000000 4175158.000000 6254.827300 46481.048691
-
-After
- min max mean std
-count 1000.000000 1000.00000 1000.000000 1000.000000
-mean 5143.511000 2076886.10300 5813.312474 21207.357565
-std 77.668322 610413.09583 86.541500 6331.915127
-min 4427.000000 25103.00000 5529.756600 559.187707
-25% 5148.000000 1691272.75000 5784.889825 17473.518244
-50% 5160.000000 2308328.50000 5832.025000 23464.837068
-75% 5172.000000 2393037.75000 5853.177675 24223.969976
-max 5222.000000 3922458.00000 6186.720500 42520.379830
-
-[Patch was originaly based on the swait implementation found in the -rt
- tree. Daniel ported it to mainline's version and gathered the
- benchmark numbers for tscdeadline_latency test.]
-
-Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: linux-rt-users@vger.kernel.org
-Cc: Boqun Feng <boqun.feng@gmail.com>
-Cc: Marcelo Tosatti <mtosatti@redhat.com>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Link: http://lkml.kernel.org/r/1455871601-27484-4-git-send-email-wagi@monom.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/arm/kvm/arm.c | 8 ++++----
- arch/arm/kvm/psci.c | 4 ++--
- arch/mips/kvm/mips.c | 8 ++++----
- arch/powerpc/include/asm/kvm_host.h | 4 ++--
- arch/powerpc/kvm/book3s_hv.c | 23 +++++++++++------------
- arch/s390/include/asm/kvm_host.h | 2 +-
- arch/s390/kvm/interrupt.c | 4 ++--
- arch/x86/kvm/lapic.c | 6 +++---
- include/linux/kvm_host.h | 5 +++--
- virt/kvm/async_pf.c | 4 ++--
- virt/kvm/kvm_main.c | 17 ++++++++---------
- 11 files changed, 42 insertions(+), 43 deletions(-)
-
---- a/arch/arm/kvm/arm.c
-+++ b/arch/arm/kvm/arm.c
-@@ -498,18 +498,18 @@ static void kvm_arm_resume_guest(struct
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
-- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
-+ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
- vcpu->arch.pause = false;
-- wake_up_interruptible(wq);
-+ swake_up(wq);
- }
- }
-
- static void vcpu_sleep(struct kvm_vcpu *vcpu)
- {
-- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
-+ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
-- wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
-+ swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
- (!vcpu->arch.pause)));
- }
-
---- a/arch/arm/kvm/psci.c
-+++ b/arch/arm/kvm/psci.c
-@@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(st
- {
- struct kvm *kvm = source_vcpu->kvm;
- struct kvm_vcpu *vcpu = NULL;
-- wait_queue_head_t *wq;
-+ struct swait_queue_head *wq;
- unsigned long cpu_id;
- unsigned long context_id;
- phys_addr_t target_pc;
-@@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(st
- smp_mb(); /* Make sure the above is visible */
-
- wq = kvm_arch_vcpu_wq(vcpu);
-- wake_up_interruptible(wq);
-+ swake_up(wq);
-
- return PSCI_RET_SUCCESS;
- }
---- a/arch/mips/kvm/mips.c
-+++ b/arch/mips/kvm/mips.c
-@@ -445,8 +445,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_
-
- dvcpu->arch.wait = 0;
-
-- if (waitqueue_active(&dvcpu->wq))
-- wake_up_interruptible(&dvcpu->wq);
-+ if (swait_active(&dvcpu->wq))
-+ swake_up(&dvcpu->wq);
-
- return 0;
- }
-@@ -1174,8 +1174,8 @@ static void kvm_mips_comparecount_func(u
- kvm_mips_callbacks->queue_timer_int(vcpu);
-
- vcpu->arch.wait = 0;
-- if (waitqueue_active(&vcpu->wq))
-- wake_up_interruptible(&vcpu->wq);
-+ if (swait_active(&vcpu->wq))
-+ swake_up(&vcpu->wq);
- }
-
- /* low level hrtimer wake routine */
---- a/arch/powerpc/include/asm/kvm_host.h
-+++ b/arch/powerpc/include/asm/kvm_host.h
-@@ -286,7 +286,7 @@ struct kvmppc_vcore {
- struct list_head runnable_threads;
- struct list_head preempt_list;
- spinlock_t lock;
-- wait_queue_head_t wq;
-+ struct swait_queue_head wq;
- spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
- u64 stolen_tb;
- u64 preempt_tb;
-@@ -626,7 +626,7 @@ struct kvm_vcpu_arch {
- u8 prodded;
- u32 last_inst;
-
-- wait_queue_head_t *wqp;
-+ struct swait_queue_head *wqp;
- struct kvmppc_vcore *vcore;
- int ret;
- int trap;
---- a/arch/powerpc/kvm/book3s_hv.c
-+++ b/arch/powerpc/kvm/book3s_hv.c
-@@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu)
- static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
- {
- int cpu;
-- wait_queue_head_t *wqp;
-+ struct swait_queue_head *wqp;
-
- wqp = kvm_arch_vcpu_wq(vcpu);
-- if (waitqueue_active(wqp)) {
-- wake_up_interruptible(wqp);
-+ if (swait_active(wqp)) {
-+ swake_up(wqp);
- ++vcpu->stat.halt_wakeup;
- }
-
-@@ -707,8 +707,8 @@ int kvmppc_pseries_do_hcall(struct kvm_v
- tvcpu->arch.prodded = 1;
- smp_mb();
- if (vcpu->arch.ceded) {
-- if (waitqueue_active(&vcpu->wq)) {
-- wake_up_interruptible(&vcpu->wq);
-+ if (swait_active(&vcpu->wq)) {
-+ swake_up(&vcpu->wq);
- vcpu->stat.halt_wakeup++;
- }
- }
-@@ -1447,7 +1447,7 @@ static struct kvmppc_vcore *kvmppc_vcore
- INIT_LIST_HEAD(&vcore->runnable_threads);
- spin_lock_init(&vcore->lock);
- spin_lock_init(&vcore->stoltb_lock);
-- init_waitqueue_head(&vcore->wq);
-+ init_swait_queue_head(&vcore->wq);
- vcore->preempt_tb = TB_NIL;
- vcore->lpcr = kvm->arch.lpcr;
- vcore->first_vcpuid = core * threads_per_subcore;
-@@ -2519,10 +2519,9 @@ static void kvmppc_vcore_blocked(struct
- {
- struct kvm_vcpu *vcpu;
- int do_sleep = 1;
-+ DECLARE_SWAITQUEUE(wait);
-
-- DEFINE_WAIT(wait);
--
-- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
-+ prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
-
- /*
- * Check one last time for pending exceptions and ceded state after
-@@ -2536,7 +2535,7 @@ static void kvmppc_vcore_blocked(struct
- }
-
- if (!do_sleep) {
-- finish_wait(&vc->wq, &wait);
-+ finish_swait(&vc->wq, &wait);
- return;
- }
-
-@@ -2544,7 +2543,7 @@ static void kvmppc_vcore_blocked(struct
- trace_kvmppc_vcore_blocked(vc, 0);
- spin_unlock(&vc->lock);
- schedule();
-- finish_wait(&vc->wq, &wait);
-+ finish_swait(&vc->wq, &wait);
- spin_lock(&vc->lock);
- vc->vcore_state = VCORE_INACTIVE;
- trace_kvmppc_vcore_blocked(vc, 1);
-@@ -2600,7 +2599,7 @@ static int kvmppc_run_vcpu(struct kvm_ru
- kvmppc_start_thread(vcpu, vc);
- trace_kvm_guest_enter(vcpu);
- } else if (vc->vcore_state == VCORE_SLEEPING) {
-- wake_up(&vc->wq);
-+ swake_up(&vc->wq);
- }
-
- }
---- a/arch/s390/include/asm/kvm_host.h
-+++ b/arch/s390/include/asm/kvm_host.h
-@@ -427,7 +427,7 @@ struct kvm_s390_irq_payload {
- struct kvm_s390_local_interrupt {
- spinlock_t lock;
- struct kvm_s390_float_interrupt *float_int;
-- wait_queue_head_t *wq;
-+ struct swait_queue_head *wq;
- atomic_t *cpuflags;
- DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
- struct kvm_s390_irq_payload irq;
---- a/arch/s390/kvm/interrupt.c
-+++ b/arch/s390/kvm/interrupt.c
-@@ -868,13 +868,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu
-
- void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
- {
-- if (waitqueue_active(&vcpu->wq)) {
-+ if (swait_active(&vcpu->wq)) {
- /*
- * The vcpu gave up the cpu voluntarily, mark it as a good
- * yield-candidate.
- */
- vcpu->preempted = true;
-- wake_up_interruptible(&vcpu->wq);
-+ swake_up(&vcpu->wq);
- vcpu->stat.halt_wakeup++;
- }
- }
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_
- static void apic_timer_expired(struct kvm_lapic *apic)
- {
- struct kvm_vcpu *vcpu = apic->vcpu;
-- wait_queue_head_t *q = &vcpu->wq;
-+ struct swait_queue_head *q = &vcpu->wq;
- struct kvm_timer *ktimer = &apic->lapic_timer;
-
- if (atomic_read(&apic->lapic_timer.pending))
-@@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kv
- atomic_inc(&apic->lapic_timer.pending);
- kvm_set_pending_timer(vcpu);
-
-- if (waitqueue_active(q))
-- wake_up_interruptible(q);
-+ if (swait_active(q))
-+ swake_up(q);
-
- if (apic_lvtt_tscdeadline(apic))
- ktimer->expired_tscdeadline = ktimer->tscdeadline;
---- a/include/linux/kvm_host.h
-+++ b/include/linux/kvm_host.h
-@@ -25,6 +25,7 @@
- #include <linux/irqflags.h>
- #include <linux/context_tracking.h>
- #include <linux/irqbypass.h>
-+#include <linux/swait.h>
- #include <asm/signal.h>
-
- #include <linux/kvm.h>
-@@ -243,7 +244,7 @@ struct kvm_vcpu {
- int fpu_active;
- int guest_fpu_loaded, guest_xcr0_loaded;
- unsigned char fpu_counter;
-- wait_queue_head_t wq;
-+ struct swait_queue_head wq;
- struct pid *pid;
- int sigset_active;
- sigset_t sigset;
-@@ -794,7 +795,7 @@ static inline bool kvm_arch_has_assigned
- }
- #endif
-
--static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
-+static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
- {
- #ifdef __KVM_HAVE_ARCH_WQP
- return vcpu->arch.wqp;
---- a/virt/kvm/async_pf.c
-+++ b/virt/kvm/async_pf.c
-@@ -98,8 +98,8 @@ static void async_pf_execute(struct work
- * This memory barrier pairs with prepare_to_wait's set_current_state()
- */
- smp_mb();
-- if (waitqueue_active(&vcpu->wq))
-- wake_up_interruptible(&vcpu->wq);
-+ if (swait_active(&vcpu->wq))
-+ swake_up(&vcpu->wq);
-
- mmput(mm);
- kvm_put_kvm(vcpu->kvm);
---- a/virt/kvm/kvm_main.c
-+++ b/virt/kvm/kvm_main.c
-@@ -226,8 +226,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
- vcpu->kvm = kvm;
- vcpu->vcpu_id = id;
- vcpu->pid = NULL;
-- vcpu->halt_poll_ns = 0;
-- init_waitqueue_head(&vcpu->wq);
-+ init_swait_queue_head(&vcpu->wq);
- kvm_async_pf_vcpu_init(vcpu);
-
- vcpu->pre_pcpu = -1;
-@@ -2003,7 +2002,7 @@ static int kvm_vcpu_check_block(struct k
- void kvm_vcpu_block(struct kvm_vcpu *vcpu)
- {
- ktime_t start, cur;
-- DEFINE_WAIT(wait);
-+ DECLARE_SWAITQUEUE(wait);
- bool waited = false;
- u64 block_ns;
-
-@@ -2028,7 +2027,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
- kvm_arch_vcpu_blocking(vcpu);
-
- for (;;) {
-- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
-+ prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
-
- if (kvm_vcpu_check_block(vcpu) < 0)
- break;
-@@ -2037,7 +2036,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
- schedule();
- }
-
-- finish_wait(&vcpu->wq, &wait);
-+ finish_swait(&vcpu->wq, &wait);
- cur = ktime_get();
-
- kvm_arch_vcpu_unblocking(vcpu);
-@@ -2069,11 +2068,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu
- {
- int me;
- int cpu = vcpu->cpu;
-- wait_queue_head_t *wqp;
-+ struct swait_queue_head *wqp;
-
- wqp = kvm_arch_vcpu_wq(vcpu);
-- if (waitqueue_active(wqp)) {
-- wake_up_interruptible(wqp);
-+ if (swait_active(wqp)) {
-+ swake_up(wqp);
- ++vcpu->stat.halt_wakeup;
- }
-
-@@ -2174,7 +2173,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *m
- continue;
- if (vcpu == me)
- continue;
-- if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
-+ if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
- continue;
- if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
- continue;
diff --git a/patches/0003-clk-at91-remove-IRQ-handling-and-use-polling.patch b/patches/0003-clk-at91-remove-IRQ-handling-and-use-polling.patch
deleted file mode 100644
index b64bd5cb07ba9e..00000000000000
--- a/patches/0003-clk-at91-remove-IRQ-handling-and-use-polling.patch
+++ /dev/null
@@ -1,1033 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 16 Sep 2015 23:47:39 +0200
-Subject: [PATCH 03/13] clk: at91: remove IRQ handling and use polling
-
-The AT91 clock drivers make use of IRQs to avoid polling when waiting for
-some clocks to be enabled. Unfortunately, this leads to a crash when those
-IRQs are threaded (which happens when using preempt-rt) because they are
-registered before thread creation is possible.
-
-Use polling on those clocks instead to avoid the problem.
-
-Acked-by: Stephen Boyd <sboyd@codeaurora.org>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clk/at91/clk-main.c | 138 ++++------------------------------------
- drivers/clk/at91/clk-master.c | 46 +------------
- drivers/clk/at91/clk-pll.c | 47 +------------
- drivers/clk/at91/clk-system.c | 56 +---------------
- drivers/clk/at91/clk-utmi.c | 49 +-------------
- drivers/clk/at91/pmc.c | 144 ------------------------------------------
- drivers/clk/at91/pmc.h | 3
- 7 files changed, 37 insertions(+), 446 deletions(-)
-
---- a/drivers/clk/at91/clk-main.c
-+++ b/drivers/clk/at91/clk-main.c
-@@ -13,15 +13,8 @@
- #include <linux/clk/at91_pmc.h>
- #include <linux/delay.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/io.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
--#include <linux/sched.h>
--#include <linux/wait.h>
-
- #include "pmc.h"
-
-@@ -37,8 +30,6 @@
- struct clk_main_osc {
- struct clk_hw hw;
- struct regmap *regmap;
-- unsigned int irq;
-- wait_queue_head_t wait;
- };
-
- #define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw)
-@@ -46,8 +37,6 @@ struct clk_main_osc {
- struct clk_main_rc_osc {
- struct clk_hw hw;
- struct regmap *regmap;
-- unsigned int irq;
-- wait_queue_head_t wait;
- unsigned long frequency;
- unsigned long accuracy;
- };
-@@ -64,23 +53,11 @@ struct clk_rm9200_main {
- struct clk_sam9x5_main {
- struct clk_hw hw;
- struct regmap *regmap;
-- unsigned int irq;
-- wait_queue_head_t wait;
- u8 parent;
- };
-
- #define to_clk_sam9x5_main(hw) container_of(hw, struct clk_sam9x5_main, hw)
-
--static irqreturn_t clk_main_osc_irq_handler(int irq, void *dev_id)
--{
-- struct clk_main_osc *osc = dev_id;
--
-- wake_up(&osc->wait);
-- disable_irq_nosync(osc->irq);
--
-- return IRQ_HANDLED;
--}
--
- static inline bool clk_main_osc_ready(struct regmap *regmap)
- {
- unsigned int status;
-@@ -107,11 +84,8 @@ static int clk_main_osc_prepare(struct c
- regmap_write(regmap, AT91_CKGR_MOR, tmp);
- }
-
-- while (!clk_main_osc_ready(regmap)) {
-- enable_irq(osc->irq);
-- wait_event(osc->wait,
-- clk_main_osc_ready(regmap));
-- }
-+ while (!clk_main_osc_ready(regmap))
-+ cpu_relax();
-
- return 0;
- }
-@@ -156,17 +130,15 @@ static const struct clk_ops main_osc_ops
-
- static struct clk * __init
- at91_clk_register_main_osc(struct regmap *regmap,
-- unsigned int irq,
- const char *name,
- const char *parent_name,
- bool bypass)
- {
-- int ret;
- struct clk_main_osc *osc;
- struct clk *clk = NULL;
- struct clk_init_data init;
-
-- if (!irq || !name || !parent_name)
-+ if (!name || !parent_name)
- return ERR_PTR(-EINVAL);
-
- osc = kzalloc(sizeof(*osc), GFP_KERNEL);
-@@ -181,16 +153,6 @@ at91_clk_register_main_osc(struct regmap
-
- osc->hw.init = &init;
- osc->regmap = regmap;
-- osc->irq = irq;
--
-- init_waitqueue_head(&osc->wait);
-- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
-- ret = request_irq(osc->irq, clk_main_osc_irq_handler,
-- IRQF_TRIGGER_HIGH, name, osc);
-- if (ret) {
-- kfree(osc);
-- return ERR_PTR(ret);
-- }
-
- if (bypass)
- regmap_update_bits(regmap,
-@@ -199,10 +161,8 @@ at91_clk_register_main_osc(struct regmap
- AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
-
- clk = clk_register(NULL, &osc->hw);
-- if (IS_ERR(clk)) {
-- free_irq(irq, osc);
-+ if (IS_ERR(clk))
- kfree(osc);
-- }
-
- return clk;
- }
-@@ -210,7 +170,6 @@ at91_clk_register_main_osc(struct regmap
- static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
- {
- struct clk *clk;
-- unsigned int irq;
- const char *name = np->name;
- const char *parent_name;
- struct regmap *regmap;
-@@ -224,11 +183,7 @@ static void __init of_at91rm9200_clk_mai
- if (IS_ERR(regmap))
- return;
-
-- irq = irq_of_parse_and_map(np, 0);
-- if (!irq)
-- return;
--
-- clk = at91_clk_register_main_osc(regmap, irq, name, parent_name, bypass);
-+ clk = at91_clk_register_main_osc(regmap, name, parent_name, bypass);
- if (IS_ERR(clk))
- return;
-
-@@ -237,16 +192,6 @@ static void __init of_at91rm9200_clk_mai
- CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc",
- of_at91rm9200_clk_main_osc_setup);
-
--static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id)
--{
-- struct clk_main_rc_osc *osc = dev_id;
--
-- wake_up(&osc->wait);
-- disable_irq_nosync(osc->irq);
--
-- return IRQ_HANDLED;
--}
--
- static bool clk_main_rc_osc_ready(struct regmap *regmap)
- {
- unsigned int status;
-@@ -269,11 +214,8 @@ static int clk_main_rc_osc_prepare(struc
- MOR_KEY_MASK | AT91_PMC_MOSCRCEN,
- AT91_PMC_MOSCRCEN | AT91_PMC_KEY);
-
-- while (!clk_main_rc_osc_ready(regmap)) {
-- enable_irq(osc->irq);
-- wait_event(osc->wait,
-- clk_main_rc_osc_ready(regmap));
-- }
-+ while (!clk_main_rc_osc_ready(regmap))
-+ cpu_relax();
-
- return 0;
- }
-@@ -331,11 +273,9 @@ static const struct clk_ops main_rc_osc_
-
- static struct clk * __init
- at91_clk_register_main_rc_osc(struct regmap *regmap,
-- unsigned int irq,
- const char *name,
- u32 frequency, u32 accuracy)
- {
-- int ret;
- struct clk_main_rc_osc *osc;
- struct clk *clk = NULL;
- struct clk_init_data init;
-@@ -355,22 +295,12 @@ at91_clk_register_main_rc_osc(struct reg
-
- osc->hw.init = &init;
- osc->regmap = regmap;
-- osc->irq = irq;
- osc->frequency = frequency;
- osc->accuracy = accuracy;
-
-- init_waitqueue_head(&osc->wait);
-- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
-- ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler,
-- IRQF_TRIGGER_HIGH, name, osc);
-- if (ret)
-- return ERR_PTR(ret);
--
- clk = clk_register(NULL, &osc->hw);
-- if (IS_ERR(clk)) {
-- free_irq(irq, osc);
-+ if (IS_ERR(clk))
- kfree(osc);
-- }
-
- return clk;
- }
-@@ -378,7 +308,6 @@ at91_clk_register_main_rc_osc(struct reg
- static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
- {
- struct clk *clk;
-- unsigned int irq;
- u32 frequency = 0;
- u32 accuracy = 0;
- const char *name = np->name;
-@@ -388,16 +317,11 @@ static void __init of_at91sam9x5_clk_mai
- of_property_read_u32(np, "clock-frequency", &frequency);
- of_property_read_u32(np, "clock-accuracy", &accuracy);
-
-- irq = irq_of_parse_and_map(np, 0);
-- if (!irq)
-- return;
--
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
-- clk = at91_clk_register_main_rc_osc(regmap, irq, name, frequency,
-- accuracy);
-+ clk = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy);
- if (IS_ERR(clk))
- return;
-
-@@ -529,16 +453,6 @@ static void __init of_at91rm9200_clk_mai
- CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main",
- of_at91rm9200_clk_main_setup);
-
--static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id)
--{
-- struct clk_sam9x5_main *clkmain = dev_id;
--
-- wake_up(&clkmain->wait);
-- disable_irq_nosync(clkmain->irq);
--
-- return IRQ_HANDLED;
--}
--
- static inline bool clk_sam9x5_main_ready(struct regmap *regmap)
- {
- unsigned int status;
-@@ -553,11 +467,8 @@ static int clk_sam9x5_main_prepare(struc
- struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- struct regmap *regmap = clkmain->regmap;
-
-- while (!clk_sam9x5_main_ready(regmap)) {
-- enable_irq(clkmain->irq);
-- wait_event(clkmain->wait,
-- clk_sam9x5_main_ready(regmap));
-- }
-+ while (!clk_sam9x5_main_ready(regmap))
-+ cpu_relax();
-
- return clk_main_probe_frequency(regmap);
- }
-@@ -594,11 +505,8 @@ static int clk_sam9x5_main_set_parent(st
- else if (!index && (tmp & AT91_PMC_MOSCSEL))
- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
-
-- while (!clk_sam9x5_main_ready(regmap)) {
-- enable_irq(clkmain->irq);
-- wait_event(clkmain->wait,
-- clk_sam9x5_main_ready(regmap));
-- }
-+ while (!clk_sam9x5_main_ready(regmap))
-+ cpu_relax();
-
- return 0;
- }
-@@ -623,12 +531,10 @@ static const struct clk_ops sam9x5_main_
-
- static struct clk * __init
- at91_clk_register_sam9x5_main(struct regmap *regmap,
-- unsigned int irq,
- const char *name,
- const char **parent_names,
- int num_parents)
- {
-- int ret;
- struct clk_sam9x5_main *clkmain;
- struct clk *clk = NULL;
- struct clk_init_data init;
-@@ -652,21 +558,12 @@ at91_clk_register_sam9x5_main(struct reg
-
- clkmain->hw.init = &init;
- clkmain->regmap = regmap;
-- clkmain->irq = irq;
- regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
- clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
-- init_waitqueue_head(&clkmain->wait);
-- irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN);
-- ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler,
-- IRQF_TRIGGER_HIGH, name, clkmain);
-- if (ret)
-- return ERR_PTR(ret);
-
- clk = clk_register(NULL, &clkmain->hw);
-- if (IS_ERR(clk)) {
-- free_irq(clkmain->irq, clkmain);
-+ if (IS_ERR(clk))
- kfree(clkmain);
-- }
-
- return clk;
- }
-@@ -676,7 +573,6 @@ static void __init of_at91sam9x5_clk_mai
- struct clk *clk;
- const char *parent_names[2];
- int num_parents;
-- unsigned int irq;
- const char *name = np->name;
- struct regmap *regmap;
-
-@@ -691,11 +587,7 @@ static void __init of_at91sam9x5_clk_mai
-
- of_property_read_string(np, "clock-output-names", &name);
-
-- irq = irq_of_parse_and_map(np, 0);
-- if (!irq)
-- return;
--
-- clk = at91_clk_register_sam9x5_main(regmap, irq, name, parent_names,
-+ clk = at91_clk_register_sam9x5_main(regmap, name, parent_names,
- num_parents);
- if (IS_ERR(clk))
- return;
---- a/drivers/clk/at91/clk-master.c
-+++ b/drivers/clk/at91/clk-master.c
-@@ -12,13 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/io.h>
--#include <linux/wait.h>
--#include <linux/sched.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
-@@ -47,22 +40,10 @@ struct clk_master_layout {
- struct clk_master {
- struct clk_hw hw;
- struct regmap *regmap;
-- unsigned int irq;
-- wait_queue_head_t wait;
- const struct clk_master_layout *layout;
- const struct clk_master_characteristics *characteristics;
- };
-
--static irqreturn_t clk_master_irq_handler(int irq, void *dev_id)
--{
-- struct clk_master *master = (struct clk_master *)dev_id;
--
-- wake_up(&master->wait);
-- disable_irq_nosync(master->irq);
--
-- return IRQ_HANDLED;
--}
--
- static inline bool clk_master_ready(struct regmap *regmap)
- {
- unsigned int status;
-@@ -76,11 +57,8 @@ static int clk_master_prepare(struct clk
- {
- struct clk_master *master = to_clk_master(hw);
-
-- while (!clk_master_ready(master->regmap)) {
-- enable_irq(master->irq);
-- wait_event(master->wait,
-- clk_master_ready(master->regmap));
-- }
-+ while (!clk_master_ready(master->regmap))
-+ cpu_relax();
-
- return 0;
- }
-@@ -143,13 +121,12 @@ static const struct clk_ops master_ops =
- };
-
- static struct clk * __init
--at91_clk_register_master(struct regmap *regmap, unsigned int irq,
-+at91_clk_register_master(struct regmap *regmap,
- const char *name, int num_parents,
- const char **parent_names,
- const struct clk_master_layout *layout,
- const struct clk_master_characteristics *characteristics)
- {
-- int ret;
- struct clk_master *master;
- struct clk *clk = NULL;
- struct clk_init_data init;
-@@ -171,19 +148,9 @@ at91_clk_register_master(struct regmap *
- master->layout = layout;
- master->characteristics = characteristics;
- master->regmap = regmap;
-- master->irq = irq;
-- init_waitqueue_head(&master->wait);
-- irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
-- ret = request_irq(master->irq, clk_master_irq_handler,
-- IRQF_TRIGGER_HIGH, "clk-master", master);
-- if (ret) {
-- kfree(master);
-- return ERR_PTR(ret);
-- }
-
- clk = clk_register(NULL, &master->hw);
- if (IS_ERR(clk)) {
-- free_irq(master->irq, master);
- kfree(master);
- }
-
-@@ -233,7 +200,6 @@ of_at91_clk_master_setup(struct device_n
- {
- struct clk *clk;
- int num_parents;
-- unsigned int irq;
- const char *parent_names[MASTER_SOURCE_MAX];
- const char *name = np->name;
- struct clk_master_characteristics *characteristics;
-@@ -255,11 +221,7 @@ of_at91_clk_master_setup(struct device_n
- if (IS_ERR(regmap))
- return;
-
-- irq = irq_of_parse_and_map(np, 0);
-- if (!irq)
-- goto out_free_characteristics;
--
-- clk = at91_clk_register_master(regmap, irq, name, num_parents,
-+ clk = at91_clk_register_master(regmap, name, num_parents,
- parent_names, layout,
- characteristics);
- if (IS_ERR(clk))
---- a/drivers/clk/at91/clk-pll.c
-+++ b/drivers/clk/at91/clk-pll.c
-@@ -12,14 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/io.h>
--#include <linux/kernel.h>
--#include <linux/wait.h>
--#include <linux/sched.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
-@@ -61,8 +53,6 @@ struct clk_pll_layout {
- struct clk_pll {
- struct clk_hw hw;
- struct regmap *regmap;
-- unsigned int irq;
-- wait_queue_head_t wait;
- u8 id;
- u8 div;
- u8 range;
-@@ -71,16 +61,6 @@ struct clk_pll {
- const struct clk_pll_characteristics *characteristics;
- };
-
--static irqreturn_t clk_pll_irq_handler(int irq, void *dev_id)
--{
-- struct clk_pll *pll = (struct clk_pll *)dev_id;
--
-- wake_up(&pll->wait);
-- disable_irq_nosync(pll->irq);
--
-- return IRQ_HANDLED;
--}
--
- static inline bool clk_pll_ready(struct regmap *regmap, int id)
- {
- unsigned int status;
-@@ -127,11 +107,8 @@ static int clk_pll_prepare(struct clk_hw
- (out << PLL_OUT_SHIFT) |
- ((pll->mul & layout->mul_mask) << layout->mul_shift));
-
-- while (!clk_pll_ready(regmap, pll->id)) {
-- enable_irq(pll->irq);
-- wait_event(pll->wait,
-- clk_pll_ready(regmap, pll->id));
-- }
-+ while (!clk_pll_ready(regmap, pll->id))
-+ cpu_relax();
-
- return 0;
- }
-@@ -320,7 +297,7 @@ static const struct clk_ops pll_ops = {
- };
-
- static struct clk * __init
--at91_clk_register_pll(struct regmap *regmap, unsigned int irq, const char *name,
-+at91_clk_register_pll(struct regmap *regmap, const char *name,
- const char *parent_name, u8 id,
- const struct clk_pll_layout *layout,
- const struct clk_pll_characteristics *characteristics)
-@@ -328,7 +305,6 @@ at91_clk_register_pll(struct regmap *reg
- struct clk_pll *pll;
- struct clk *clk = NULL;
- struct clk_init_data init;
-- int ret;
- int offset = PLL_REG(id);
- unsigned int pllr;
-
-@@ -350,22 +326,12 @@ at91_clk_register_pll(struct regmap *reg
- pll->layout = layout;
- pll->characteristics = characteristics;
- pll->regmap = regmap;
-- pll->irq = irq;
- regmap_read(regmap, offset, &pllr);
- pll->div = PLL_DIV(pllr);
- pll->mul = PLL_MUL(pllr, layout);
-- init_waitqueue_head(&pll->wait);
-- irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
-- ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
-- id ? "clk-pllb" : "clk-plla", pll);
-- if (ret) {
-- kfree(pll);
-- return ERR_PTR(ret);
-- }
-
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
-- free_irq(pll->irq, pll);
- kfree(pll);
- }
-
-@@ -499,7 +465,6 @@ of_at91_clk_pll_setup(struct device_node
- const struct clk_pll_layout *layout)
- {
- u32 id;
-- unsigned int irq;
- struct clk *clk;
- struct regmap *regmap;
- const char *parent_name;
-@@ -521,11 +486,7 @@ of_at91_clk_pll_setup(struct device_node
- if (!characteristics)
- return;
-
-- irq = irq_of_parse_and_map(np, 0);
-- if (!irq)
-- return;
--
-- clk = at91_clk_register_pll(regmap, irq, name, parent_name, id, layout,
-+ clk = at91_clk_register_pll(regmap, name, parent_name, id, layout,
- characteristics);
- if (IS_ERR(clk))
- goto out_free_characteristics;
---- a/drivers/clk/at91/clk-system.c
-+++ b/drivers/clk/at91/clk-system.c
-@@ -12,13 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/io.h>
--#include <linux/irq.h>
--#include <linux/of_irq.h>
--#include <linux/interrupt.h>
--#include <linux/wait.h>
--#include <linux/sched.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
-@@ -32,8 +25,6 @@
- struct clk_system {
- struct clk_hw hw;
- struct regmap *regmap;
-- unsigned int irq;
-- wait_queue_head_t wait;
- u8 id;
- };
-
-@@ -41,15 +32,6 @@ static inline int is_pck(int id)
- {
- return (id >= 8) && (id <= 15);
- }
--static irqreturn_t clk_system_irq_handler(int irq, void *dev_id)
--{
-- struct clk_system *sys = (struct clk_system *)dev_id;
--
-- wake_up(&sys->wait);
-- disable_irq_nosync(sys->irq);
--
-- return IRQ_HANDLED;
--}
-
- static inline bool clk_system_ready(struct regmap *regmap, int id)
- {
-@@ -69,15 +51,9 @@ static int clk_system_prepare(struct clk
- if (!is_pck(sys->id))
- return 0;
-
-- while (!clk_system_ready(sys->regmap, sys->id)) {
-- if (sys->irq) {
-- enable_irq(sys->irq);
-- wait_event(sys->wait,
-- clk_system_ready(sys->regmap, sys->id));
-- } else {
-- cpu_relax();
-- }
-- }
-+ while (!clk_system_ready(sys->regmap, sys->id))
-+ cpu_relax();
-+
- return 0;
- }
-
-@@ -114,12 +90,11 @@ static const struct clk_ops system_ops =
-
- static struct clk * __init
- at91_clk_register_system(struct regmap *regmap, const char *name,
-- const char *parent_name, u8 id, int irq)
-+ const char *parent_name, u8 id)
- {
- struct clk_system *sys;
- struct clk *clk = NULL;
- struct clk_init_data init;
-- int ret;
-
- if (!parent_name || id > SYSTEM_MAX_ID)
- return ERR_PTR(-EINVAL);
-@@ -137,24 +112,10 @@ at91_clk_register_system(struct regmap *
- sys->id = id;
- sys->hw.init = &init;
- sys->regmap = regmap;
-- sys->irq = irq;
-- if (irq) {
-- init_waitqueue_head(&sys->wait);
-- irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
-- ret = request_irq(sys->irq, clk_system_irq_handler,
-- IRQF_TRIGGER_HIGH, name, sys);
-- if (ret) {
-- kfree(sys);
-- return ERR_PTR(ret);
-- }
-- }
-
- clk = clk_register(NULL, &sys->hw);
-- if (IS_ERR(clk)) {
-- if (irq)
-- free_irq(sys->irq, sys);
-+ if (IS_ERR(clk))
- kfree(sys);
-- }
-
- return clk;
- }
-@@ -162,7 +123,6 @@ at91_clk_register_system(struct regmap *
- static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
- {
- int num;
-- int irq = 0;
- u32 id;
- struct clk *clk;
- const char *name;
-@@ -185,13 +145,9 @@ static void __init of_at91rm9200_clk_sys
- if (of_property_read_string(np, "clock-output-names", &name))
- name = sysclknp->name;
-
-- if (is_pck(id))
-- irq = irq_of_parse_and_map(sysclknp, 0);
--
- parent_name = of_clk_get_parent_name(sysclknp, 0);
-
-- clk = at91_clk_register_system(regmap, name, parent_name, id,
-- irq);
-+ clk = at91_clk_register_system(regmap, name, parent_name, id);
- if (IS_ERR(clk))
- continue;
-
---- a/drivers/clk/at91/clk-utmi.c
-+++ b/drivers/clk/at91/clk-utmi.c
-@@ -11,14 +11,7 @@
- #include <linux/clk-provider.h>
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/io.h>
--#include <linux/sched.h>
--#include <linux/wait.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
-@@ -29,22 +22,10 @@
- struct clk_utmi {
- struct clk_hw hw;
- struct regmap *regmap;
-- unsigned int irq;
-- wait_queue_head_t wait;
- };
-
- #define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
-
--static irqreturn_t clk_utmi_irq_handler(int irq, void *dev_id)
--{
-- struct clk_utmi *utmi = (struct clk_utmi *)dev_id;
--
-- wake_up(&utmi->wait);
-- disable_irq_nosync(utmi->irq);
--
-- return IRQ_HANDLED;
--}
--
- static inline bool clk_utmi_ready(struct regmap *regmap)
- {
- unsigned int status;
-@@ -62,11 +43,8 @@ static int clk_utmi_prepare(struct clk_h
-
- regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr);
-
-- while (!clk_utmi_ready(utmi->regmap)) {
-- enable_irq(utmi->irq);
-- wait_event(utmi->wait,
-- clk_utmi_ready(utmi->regmap));
-- }
-+ while (!clk_utmi_ready(utmi->regmap))
-+ cpu_relax();
-
- return 0;
- }
-@@ -100,10 +78,9 @@ static const struct clk_ops utmi_ops = {
- };
-
- static struct clk * __init
--at91_clk_register_utmi(struct regmap *regmap, unsigned int irq,
-+at91_clk_register_utmi(struct regmap *regmap,
- const char *name, const char *parent_name)
- {
-- int ret;
- struct clk_utmi *utmi;
- struct clk *clk = NULL;
- struct clk_init_data init;
-@@ -120,28 +97,16 @@ at91_clk_register_utmi(struct regmap *re
-
- utmi->hw.init = &init;
- utmi->regmap = regmap;
-- utmi->irq = irq;
-- init_waitqueue_head(&utmi->wait);
-- irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
-- ret = request_irq(utmi->irq, clk_utmi_irq_handler,
-- IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
-- if (ret) {
-- kfree(utmi);
-- return ERR_PTR(ret);
-- }
-
- clk = clk_register(NULL, &utmi->hw);
-- if (IS_ERR(clk)) {
-- free_irq(utmi->irq, utmi);
-+ if (IS_ERR(clk))
- kfree(utmi);
-- }
-
- return clk;
- }
-
- static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
- {
-- unsigned int irq;
- struct clk *clk;
- const char *parent_name;
- const char *name = np->name;
-@@ -151,15 +116,11 @@ static void __init of_at91sam9x5_clk_utm
-
- of_property_read_string(np, "clock-output-names", &name);
-
-- irq = irq_of_parse_and_map(np, 0);
-- if (!irq)
-- return;
--
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
-- clk = at91_clk_register_utmi(regmap, irq, name, parent_name);
-+ clk = at91_clk_register_utmi(regmap, name, parent_name);
- if (IS_ERR(clk))
- return;
-
---- a/drivers/clk/at91/pmc.c
-+++ b/drivers/clk/at91/pmc.c
-@@ -13,12 +13,6 @@
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
- #include <linux/of_address.h>
--#include <linux/io.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
--#include <linux/irqchip/chained_irq.h>
--#include <linux/irqdomain.h>
--#include <linux/of_irq.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
-@@ -67,118 +61,6 @@ int of_at91_get_clk_range(struct device_
- }
- EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
-
--static void pmc_irq_mask(struct irq_data *d)
--{
-- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
--
-- regmap_write(pmc->regmap, AT91_PMC_IDR, 1 << d->hwirq);
--}
--
--static void pmc_irq_unmask(struct irq_data *d)
--{
-- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
--
-- regmap_write(pmc->regmap, AT91_PMC_IER, 1 << d->hwirq);
--}
--
--static int pmc_irq_set_type(struct irq_data *d, unsigned type)
--{
-- if (type != IRQ_TYPE_LEVEL_HIGH) {
-- pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n");
-- return -EINVAL;
-- }
--
-- return 0;
--}
--
--static void pmc_irq_suspend(struct irq_data *d)
--{
-- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
--
-- regmap_read(pmc->regmap, AT91_PMC_IMR, &pmc->imr);
-- regmap_write(pmc->regmap, AT91_PMC_IDR, pmc->imr);
--}
--
--static void pmc_irq_resume(struct irq_data *d)
--{
-- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
--
-- regmap_write(pmc->regmap, AT91_PMC_IER, pmc->imr);
--}
--
--static struct irq_chip pmc_irq = {
-- .name = "PMC",
-- .irq_disable = pmc_irq_mask,
-- .irq_mask = pmc_irq_mask,
-- .irq_unmask = pmc_irq_unmask,
-- .irq_set_type = pmc_irq_set_type,
-- .irq_suspend = pmc_irq_suspend,
-- .irq_resume = pmc_irq_resume,
--};
--
--static struct lock_class_key pmc_lock_class;
--
--static int pmc_irq_map(struct irq_domain *h, unsigned int virq,
-- irq_hw_number_t hw)
--{
-- struct at91_pmc *pmc = h->host_data;
--
-- irq_set_lockdep_class(virq, &pmc_lock_class);
--
-- irq_set_chip_and_handler(virq, &pmc_irq,
-- handle_level_irq);
-- irq_set_chip_data(virq, pmc);
--
-- return 0;
--}
--
--static int pmc_irq_domain_xlate(struct irq_domain *d,
-- struct device_node *ctrlr,
-- const u32 *intspec, unsigned int intsize,
-- irq_hw_number_t *out_hwirq,
-- unsigned int *out_type)
--{
-- struct at91_pmc *pmc = d->host_data;
-- const struct at91_pmc_caps *caps = pmc->caps;
--
-- if (WARN_ON(intsize < 1))
-- return -EINVAL;
--
-- *out_hwirq = intspec[0];
--
-- if (!(caps->available_irqs & (1 << *out_hwirq)))
-- return -EINVAL;
--
-- *out_type = IRQ_TYPE_LEVEL_HIGH;
--
-- return 0;
--}
--
--static const struct irq_domain_ops pmc_irq_ops = {
-- .map = pmc_irq_map,
-- .xlate = pmc_irq_domain_xlate,
--};
--
--static irqreturn_t pmc_irq_handler(int irq, void *data)
--{
-- struct at91_pmc *pmc = (struct at91_pmc *)data;
-- unsigned int tmpsr, imr;
-- unsigned long sr;
-- int n;
--
-- regmap_read(pmc->regmap, AT91_PMC_SR, &tmpsr);
-- regmap_read(pmc->regmap, AT91_PMC_IMR, &imr);
--
-- sr = tmpsr & imr;
-- if (!sr)
-- return IRQ_NONE;
--
-- for_each_set_bit(n, &sr, BITS_PER_LONG)
-- generic_handle_irq(irq_find_mapping(pmc->irqdomain, n));
--
-- return IRQ_HANDLED;
--}
--
- static const struct at91_pmc_caps at91rm9200_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
-@@ -230,12 +112,12 @@ static const struct at91_pmc_caps sama5d
-
- static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
- struct regmap *regmap,
-- void __iomem *regbase, int virq,
-+ void __iomem *regbase,
- const struct at91_pmc_caps *caps)
- {
- struct at91_pmc *pmc;
-
-- if (!regbase || !virq || !caps)
-+ if (!regbase || !caps)
- return NULL;
-
- at91_pmc_base = regbase;
-@@ -245,26 +127,11 @@ static struct at91_pmc *__init at91_pmc_
- return NULL;
-
- pmc->regmap = regmap;
-- pmc->virq = virq;
- pmc->caps = caps;
-
-- pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc);
-- if (!pmc->irqdomain)
-- goto out_free_pmc;
--
- regmap_write(pmc->regmap, AT91_PMC_IDR, 0xffffffff);
-- if (request_irq(pmc->virq, pmc_irq_handler,
-- IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
-- goto out_remove_irqdomain;
-
- return pmc;
--
--out_remove_irqdomain:
-- irq_domain_remove(pmc->irqdomain);
--out_free_pmc:
-- kfree(pmc);
--
-- return NULL;
- }
-
- static void __init of_at91_pmc_setup(struct device_node *np,
-@@ -273,17 +140,12 @@ static void __init of_at91_pmc_setup(str
- struct at91_pmc *pmc;
- void __iomem *regbase = of_iomap(np, 0);
- struct regmap *regmap;
-- int virq;
-
- regmap = syscon_node_to_regmap(np);
- if (IS_ERR(regmap))
- panic("Could not retrieve syscon regmap");
-
-- virq = irq_of_parse_and_map(np, 0);
-- if (!virq)
-- return;
--
-- pmc = at91_pmc_init(np, regmap, regbase, virq, caps);
-+ pmc = at91_pmc_init(np, regmap, regbase, caps);
- if (!pmc)
- return;
- }
---- a/drivers/clk/at91/pmc.h
-+++ b/drivers/clk/at91/pmc.h
-@@ -32,10 +32,7 @@ struct at91_pmc_caps {
-
- struct at91_pmc {
- struct regmap *regmap;
-- int virq;
- const struct at91_pmc_caps *caps;
-- struct irq_domain *irqdomain;
-- u32 imr;
- };
-
- int of_at91_get_clk_range(struct device_node *np, const char *propname,
diff --git a/patches/0004-clk-at91-pmc-merge-at91_pmc_init-in-atmel_pmc_probe.patch b/patches/0004-clk-at91-pmc-merge-at91_pmc_init-in-atmel_pmc_probe.patch
deleted file mode 100644
index 24f18b5e5d3cc2..00000000000000
--- a/patches/0004-clk-at91-pmc-merge-at91_pmc_init-in-atmel_pmc_probe.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 27 Jan 2016 14:59:47 +0100
-Subject: [PATCH 04/13] clk: at91: pmc: merge at91_pmc_init in atmel_pmc_probe
-
-at91_pmc_init() doesn't do much anymore, merge it in atmel_pmc_probe().
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Acked-by: Stephen Boyd <sboyd@codeaurora.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clk/at91/pmc.c | 34 +++++++++-------------------------
- 1 file changed, 9 insertions(+), 25 deletions(-)
-
---- a/drivers/clk/at91/pmc.c
-+++ b/drivers/clk/at91/pmc.c
-@@ -110,30 +110,6 @@ static const struct at91_pmc_caps sama5d
- AT91_PMC_CFDEV,
- };
-
--static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
-- struct regmap *regmap,
-- void __iomem *regbase,
-- const struct at91_pmc_caps *caps)
--{
-- struct at91_pmc *pmc;
--
-- if (!regbase || !caps)
-- return NULL;
--
-- at91_pmc_base = regbase;
--
-- pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
-- if (!pmc)
-- return NULL;
--
-- pmc->regmap = regmap;
-- pmc->caps = caps;
--
-- regmap_write(pmc->regmap, AT91_PMC_IDR, 0xffffffff);
--
-- return pmc;
--}
--
- static void __init of_at91_pmc_setup(struct device_node *np,
- const struct at91_pmc_caps *caps)
- {
-@@ -141,13 +117,21 @@ static void __init of_at91_pmc_setup(str
- void __iomem *regbase = of_iomap(np, 0);
- struct regmap *regmap;
-
-+ at91_pmc_base = regbase;
-+
- regmap = syscon_node_to_regmap(np);
- if (IS_ERR(regmap))
- panic("Could not retrieve syscon regmap");
-
-- pmc = at91_pmc_init(np, regmap, regbase, caps);
-+ pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
- if (!pmc)
- return;
-+
-+ pmc->regmap = regmap;
-+ pmc->caps = caps;
-+
-+ regmap_write(pmc->regmap, AT91_PMC_IDR, 0xffffffff);
-+
- }
-
- static void __init of_at91rm9200_pmc_setup(struct device_node *np)
diff --git a/patches/0004-rcu-Do-not-call-rcu_nocb_gp_cleanup-while-holding-rn.patch b/patches/0004-rcu-Do-not-call-rcu_nocb_gp_cleanup-while-holding-rn.patch
deleted file mode 100644
index 67e5440001be66..00000000000000
--- a/patches/0004-rcu-Do-not-call-rcu_nocb_gp_cleanup-while-holding-rn.patch
+++ /dev/null
@@ -1,190 +0,0 @@
-From: Daniel Wagner <daniel.wagner@bmw-carit.de>
-Date: Fri, 19 Feb 2016 09:46:40 +0100
-Subject: [PATCH 4/5] rcu: Do not call rcu_nocb_gp_cleanup() while holding
- rnp->lock
-
-rcu_nocb_gp_cleanup() is called while holding rnp->lock. Currently,
-this is okay because the wake_up_all() in rcu_nocb_gp_cleanup() will
-not enable the IRQs. lockdep is happy.
-
-By switching over using swait this is not true anymore. swake_up_all()
-enables the IRQs while processing the waiters. __do_softirq() can now
-run and will eventually call rcu_process_callbacks() which wants to
-grap nrp->lock.
-
-Let's move the rcu_nocb_gp_cleanup() call outside the lock before we
-switch over to swait.
-
-If we would hold the rnp->lock and use swait, lockdep reports
-following:
-
- =================================
- [ INFO: inconsistent lock state ]
- 4.2.0-rc5-00025-g9a73ba0 #136 Not tainted
- ---------------------------------
- inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
- rcu_preempt/8 [HC0[0]:SC0[0]:HE1:SE1] takes:
- (rcu_node_1){+.?...}, at: [<ffffffff811387c7>] rcu_gp_kthread+0xb97/0xeb0
- {IN-SOFTIRQ-W} state was registered at:
- [<ffffffff81109b9f>] __lock_acquire+0xd5f/0x21e0
- [<ffffffff8110be0f>] lock_acquire+0xdf/0x2b0
- [<ffffffff81841cc9>] _raw_spin_lock_irqsave+0x59/0xa0
- [<ffffffff81136991>] rcu_process_callbacks+0x141/0x3c0
- [<ffffffff810b1a9d>] __do_softirq+0x14d/0x670
- [<ffffffff810b2214>] irq_exit+0x104/0x110
- [<ffffffff81844e96>] smp_apic_timer_interrupt+0x46/0x60
- [<ffffffff81842e70>] apic_timer_interrupt+0x70/0x80
- [<ffffffff810dba66>] rq_attach_root+0xa6/0x100
- [<ffffffff810dbc2d>] cpu_attach_domain+0x16d/0x650
- [<ffffffff810e4b42>] build_sched_domains+0x942/0xb00
- [<ffffffff821777c2>] sched_init_smp+0x509/0x5c1
- [<ffffffff821551e3>] kernel_init_freeable+0x172/0x28f
- [<ffffffff8182cdce>] kernel_init+0xe/0xe0
- [<ffffffff8184231f>] ret_from_fork+0x3f/0x70
- irq event stamp: 76
- hardirqs last enabled at (75): [<ffffffff81841330>] _raw_spin_unlock_irq+0x30/0x60
- hardirqs last disabled at (76): [<ffffffff8184116f>] _raw_spin_lock_irq+0x1f/0x90
- softirqs last enabled at (0): [<ffffffff810a8df2>] copy_process.part.26+0x602/0x1cf0
- softirqs last disabled at (0): [< (null)>] (null)
- other info that might help us debug this:
- Possible unsafe locking scenario:
- CPU0
- ----
- lock(rcu_node_1);
- <Interrupt>
- lock(rcu_node_1);
- *** DEADLOCK ***
- 1 lock held by rcu_preempt/8:
- #0: (rcu_node_1){+.?...}, at: [<ffffffff811387c7>] rcu_gp_kthread+0xb97/0xeb0
- stack backtrace:
- CPU: 0 PID: 8 Comm: rcu_preempt Not tainted 4.2.0-rc5-00025-g9a73ba0 #136
- Hardware name: Dell Inc. PowerEdge R820/066N7P, BIOS 2.0.20 01/16/2014
- 0000000000000000 000000006d7e67d8 ffff881fb081fbd8 ffffffff818379e0
- 0000000000000000 ffff881fb0812a00 ffff881fb081fc38 ffffffff8110813b
- 0000000000000000 0000000000000001 ffff881f00000001 ffffffff8102fa4f
- Call Trace:
- [<ffffffff818379e0>] dump_stack+0x4f/0x7b
- [<ffffffff8110813b>] print_usage_bug+0x1db/0x1e0
- [<ffffffff8102fa4f>] ? save_stack_trace+0x2f/0x50
- [<ffffffff811087ad>] mark_lock+0x66d/0x6e0
- [<ffffffff81107790>] ? check_usage_forwards+0x150/0x150
- [<ffffffff81108898>] mark_held_locks+0x78/0xa0
- [<ffffffff81841330>] ? _raw_spin_unlock_irq+0x30/0x60
- [<ffffffff81108a28>] trace_hardirqs_on_caller+0x168/0x220
- [<ffffffff81108aed>] trace_hardirqs_on+0xd/0x10
- [<ffffffff81841330>] _raw_spin_unlock_irq+0x30/0x60
- [<ffffffff810fd1c7>] swake_up_all+0xb7/0xe0
- [<ffffffff811386e1>] rcu_gp_kthread+0xab1/0xeb0
- [<ffffffff811089bf>] ? trace_hardirqs_on_caller+0xff/0x220
- [<ffffffff81841341>] ? _raw_spin_unlock_irq+0x41/0x60
- [<ffffffff81137c30>] ? rcu_barrier+0x20/0x20
- [<ffffffff810d2014>] kthread+0x104/0x120
- [<ffffffff81841330>] ? _raw_spin_unlock_irq+0x30/0x60
- [<ffffffff810d1f10>] ? kthread_create_on_node+0x260/0x260
- [<ffffffff8184231f>] ret_from_fork+0x3f/0x70
- [<ffffffff810d1f10>] ? kthread_create_on_node+0x260/0x260
-
-Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: linux-rt-users@vger.kernel.org
-Cc: Boqun Feng <boqun.feng@gmail.com>
-Cc: Marcelo Tosatti <mtosatti@redhat.com>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Link: http://lkml.kernel.org/r/1455871601-27484-5-git-send-email-wagi@monom.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/rcu/tree.c | 4 +++-
- kernel/rcu/tree.h | 3 ++-
- kernel/rcu/tree_plugin.h | 16 +++++++++++++---
- 3 files changed, 18 insertions(+), 5 deletions(-)
-
---- a/kernel/rcu/tree.c
-+++ b/kernel/rcu/tree.c
-@@ -1590,7 +1590,6 @@ static int rcu_future_gp_cleanup(struct
- int needmore;
- struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
-
-- rcu_nocb_gp_cleanup(rsp, rnp);
- rnp->need_future_gp[c & 0x1] = 0;
- needmore = rnp->need_future_gp[(c + 1) & 0x1];
- trace_rcu_future_gp(rnp, rdp, c,
-@@ -1991,6 +1990,7 @@ static void rcu_gp_cleanup(struct rcu_st
- int nocb = 0;
- struct rcu_data *rdp;
- struct rcu_node *rnp = rcu_get_root(rsp);
-+ wait_queue_head_t *sq;
-
- WRITE_ONCE(rsp->gp_activity, jiffies);
- raw_spin_lock_irq(&rnp->lock);
-@@ -2029,7 +2029,9 @@ static void rcu_gp_cleanup(struct rcu_st
- needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
- /* smp_mb() provided by prior unlock-lock pair. */
- nocb += rcu_future_gp_cleanup(rsp, rnp);
-+ sq = rcu_nocb_gp_get(rnp);
- raw_spin_unlock_irq(&rnp->lock);
-+ rcu_nocb_gp_cleanup(sq);
- cond_resched_rcu_qs();
- WRITE_ONCE(rsp->gp_activity, jiffies);
- rcu_gp_slow(rsp, gp_cleanup_delay);
---- a/kernel/rcu/tree.h
-+++ b/kernel/rcu/tree.h
-@@ -607,7 +607,8 @@ static void zero_cpu_stall_ticks(struct
- static void increment_cpu_stall_ticks(void);
- static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
- static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
--static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
-+static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp);
-+static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq);
- static void rcu_init_one_nocb(struct rcu_node *rnp);
- static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
- bool lazy, unsigned long flags);
---- a/kernel/rcu/tree_plugin.h
-+++ b/kernel/rcu/tree_plugin.h
-@@ -1822,9 +1822,9 @@ early_param("rcu_nocb_poll", parse_rcu_n
- * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
- * grace period.
- */
--static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
-+static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq)
- {
-- wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
-+ wake_up_all(sq);
- }
-
- /*
-@@ -1840,6 +1840,11 @@ static void rcu_nocb_gp_set(struct rcu_n
- rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
- }
-
-+static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp)
-+{
-+ return &rnp->nocb_gp_wq[rnp->completed & 0x1];
-+}
-+
- static void rcu_init_one_nocb(struct rcu_node *rnp)
- {
- init_waitqueue_head(&rnp->nocb_gp_wq[0]);
-@@ -2514,7 +2519,7 @@ static bool rcu_nocb_cpu_needs_barrier(s
- return false;
- }
-
--static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
-+static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq)
- {
- }
-
-@@ -2522,6 +2527,11 @@ static void rcu_nocb_gp_set(struct rcu_n
- {
- }
-
-+static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp)
-+{
-+ return NULL;
-+}
-+
- static void rcu_init_one_nocb(struct rcu_node *rnp)
- {
- }
diff --git a/patches/0005-clk-at91-pmc-move-pmc-structures-to-C-file.patch b/patches/0005-clk-at91-pmc-move-pmc-structures-to-C-file.patch
deleted file mode 100644
index 294e4cdd676457..00000000000000
--- a/patches/0005-clk-at91-pmc-move-pmc-structures-to-C-file.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Thu, 17 Sep 2015 15:26:46 +0200
-Subject: [PATCH 05/13] clk: at91: pmc: move pmc structures to C file
-
-pmc.c is now the only user of struct at91_pmc*, move their definition in
-the C file.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Acked-by: Stephen Boyd <sboyd@codeaurora.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clk/at91/pmc.c | 9 +++++++++
- drivers/clk/at91/pmc.h | 9 ---------
- 2 files changed, 9 insertions(+), 9 deletions(-)
-
---- a/drivers/clk/at91/pmc.c
-+++ b/drivers/clk/at91/pmc.c
-@@ -20,6 +20,15 @@
-
- #include "pmc.h"
-
-+struct at91_pmc_caps {
-+ u32 available_irqs;
-+};
-+
-+struct at91_pmc {
-+ struct regmap *regmap;
-+ const struct at91_pmc_caps *caps;
-+};
-+
- void __iomem *at91_pmc_base;
- EXPORT_SYMBOL_GPL(at91_pmc_base);
-
---- a/drivers/clk/at91/pmc.h
-+++ b/drivers/clk/at91/pmc.h
-@@ -26,15 +26,6 @@ struct clk_range {
-
- #define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
-
--struct at91_pmc_caps {
-- u32 available_irqs;
--};
--
--struct at91_pmc {
-- struct regmap *regmap;
-- const struct at91_pmc_caps *caps;
--};
--
- int of_at91_get_clk_range(struct device_node *np, const char *propname,
- struct clk_range *range);
-
diff --git a/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index 6bec5562dd2704..b560ddf9703a7a 100644
--- a/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -815,7 +815,9 @@ void exit_pi_state_list(struct task_stru
+@@ -866,7 +866,9 @@ void exit_pi_state_list(struct task_stru
* task still owns the PI-state:
*/
if (head->next != next) {
diff --git a/patches/0005-rcu-Use-simple-wait-queues-where-possible-in-rcutree.patch b/patches/0005-rcu-Use-simple-wait-queues-where-possible-in-rcutree.patch
deleted file mode 100644
index cb37231ba67eb9..00000000000000
--- a/patches/0005-rcu-Use-simple-wait-queues-where-possible-in-rcutree.patch
+++ /dev/null
@@ -1,310 +0,0 @@
-From: Paul Gortmaker <paul.gortmaker@windriver.com>
-Date: Fri, 19 Feb 2016 09:46:41 +0100
-Subject: [PATCH 5/5] rcu: Use simple wait queues where possible in rcutree
-
-As of commit dae6e64d2bcfd ("rcu: Introduce proper blocking to no-CBs kthreads
-GP waits") the RCU subsystem started making use of wait queues.
-
-Here we convert all additions of RCU wait queues to use simple wait queues,
-since they don't need the extra overhead of the full wait queue features.
-
-Originally this was done for RT kernels[1], since we would get things like...
-
- BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
- in_atomic(): 1, irqs_disabled(): 1, pid: 8, name: rcu_preempt
- Pid: 8, comm: rcu_preempt Not tainted
- Call Trace:
- [<ffffffff8106c8d0>] __might_sleep+0xd0/0xf0
- [<ffffffff817d77b4>] rt_spin_lock+0x24/0x50
- [<ffffffff8106fcf6>] __wake_up+0x36/0x70
- [<ffffffff810c4542>] rcu_gp_kthread+0x4d2/0x680
- [<ffffffff8105f910>] ? __init_waitqueue_head+0x50/0x50
- [<ffffffff810c4070>] ? rcu_gp_fqs+0x80/0x80
- [<ffffffff8105eabb>] kthread+0xdb/0xe0
- [<ffffffff8106b912>] ? finish_task_switch+0x52/0x100
- [<ffffffff817e0754>] kernel_thread_helper+0x4/0x10
- [<ffffffff8105e9e0>] ? __init_kthread_worker+0x60/0x60
- [<ffffffff817e0750>] ? gs_change+0xb/0xb
-
-...and hence simple wait queues were deployed on RT out of necessity
-(as simple wait uses a raw lock), but mainline might as well take
-advantage of the more streamline support as well.
-
-[1] This is a carry forward of work from v3.10-rt; the original conversion
-was by Thomas on an earlier -rt version, and Sebastian extended it to
-additional post-3.10 added RCU waiters; here I've added a commit log and
-unified the RCU changes into one, and uprev'd it to match mainline RCU.
-
-Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: linux-rt-users@vger.kernel.org
-Cc: Boqun Feng <boqun.feng@gmail.com>
-Cc: Marcelo Tosatti <mtosatti@redhat.com>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Link: http://lkml.kernel.org/r/1455871601-27484-6-git-send-email-wagi@monom.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/rcu/tree.c | 22 +++++++++++-----------
- kernel/rcu/tree.h | 13 +++++++------
- kernel/rcu/tree_plugin.h | 26 +++++++++++++-------------
- 3 files changed, 31 insertions(+), 30 deletions(-)
-
---- a/kernel/rcu/tree.c
-+++ b/kernel/rcu/tree.c
-@@ -1610,7 +1610,7 @@ static void rcu_gp_kthread_wake(struct r
- !READ_ONCE(rsp->gp_flags) ||
- !rsp->gp_kthread)
- return;
-- wake_up(&rsp->gp_wq);
-+ swake_up(&rsp->gp_wq);
- }
-
- /*
-@@ -1990,7 +1990,7 @@ static void rcu_gp_cleanup(struct rcu_st
- int nocb = 0;
- struct rcu_data *rdp;
- struct rcu_node *rnp = rcu_get_root(rsp);
-- wait_queue_head_t *sq;
-+ struct swait_queue_head *sq;
-
- WRITE_ONCE(rsp->gp_activity, jiffies);
- raw_spin_lock_irq(&rnp->lock);
-@@ -2078,7 +2078,7 @@ static int __noreturn rcu_gp_kthread(voi
- READ_ONCE(rsp->gpnum),
- TPS("reqwait"));
- rsp->gp_state = RCU_GP_WAIT_GPS;
-- wait_event_interruptible(rsp->gp_wq,
-+ swait_event_interruptible(rsp->gp_wq,
- READ_ONCE(rsp->gp_flags) &
- RCU_GP_FLAG_INIT);
- rsp->gp_state = RCU_GP_DONE_GPS;
-@@ -2108,7 +2108,7 @@ static int __noreturn rcu_gp_kthread(voi
- READ_ONCE(rsp->gpnum),
- TPS("fqswait"));
- rsp->gp_state = RCU_GP_WAIT_FQS;
-- ret = wait_event_interruptible_timeout(rsp->gp_wq,
-+ ret = swait_event_interruptible_timeout(rsp->gp_wq,
- rcu_gp_fqs_check_wake(rsp, &gf), j);
- rsp->gp_state = RCU_GP_DOING_FQS;
- /* Locking provides needed memory barriers. */
-@@ -2232,7 +2232,7 @@ static void rcu_report_qs_rsp(struct rcu
- WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
- WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
- raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
-- rcu_gp_kthread_wake(rsp);
-+ swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
- }
-
- /*
-@@ -2893,7 +2893,7 @@ static void force_quiescent_state(struct
- }
- WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
- raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
-- rcu_gp_kthread_wake(rsp);
-+ swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
- }
-
- /*
-@@ -3526,7 +3526,7 @@ static void __rcu_report_exp_rnp(struct
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- if (wake) {
- smp_mb(); /* EGP done before wake_up(). */
-- wake_up(&rsp->expedited_wq);
-+ swake_up(&rsp->expedited_wq);
- }
- break;
- }
-@@ -3783,7 +3783,7 @@ static void synchronize_sched_expedited_
- jiffies_start = jiffies;
-
- for (;;) {
-- ret = wait_event_interruptible_timeout(
-+ ret = swait_event_timeout(
- rsp->expedited_wq,
- sync_rcu_preempt_exp_done(rnp_root),
- jiffies_stall);
-@@ -3791,7 +3791,7 @@ static void synchronize_sched_expedited_
- return;
- if (ret < 0) {
- /* Hit a signal, disable CPU stall warnings. */
-- wait_event(rsp->expedited_wq,
-+ swait_event(rsp->expedited_wq,
- sync_rcu_preempt_exp_done(rnp_root));
- return;
- }
-@@ -4457,8 +4457,8 @@ static void __init rcu_init_one(struct r
- }
- }
-
-- init_waitqueue_head(&rsp->gp_wq);
-- init_waitqueue_head(&rsp->expedited_wq);
-+ init_swait_queue_head(&rsp->gp_wq);
-+ init_swait_queue_head(&rsp->expedited_wq);
- rnp = rsp->level[rcu_num_lvls - 1];
- for_each_possible_cpu(i) {
- while (i > rnp->grphi)
---- a/kernel/rcu/tree.h
-+++ b/kernel/rcu/tree.h
-@@ -27,6 +27,7 @@
- #include <linux/threads.h>
- #include <linux/cpumask.h>
- #include <linux/seqlock.h>
-+#include <linux/swait.h>
- #include <linux/stop_machine.h>
-
- /*
-@@ -241,7 +242,7 @@ struct rcu_node {
- /* Refused to boost: not sure why, though. */
- /* This can happen due to race conditions. */
- #ifdef CONFIG_RCU_NOCB_CPU
-- wait_queue_head_t nocb_gp_wq[2];
-+ struct swait_queue_head nocb_gp_wq[2];
- /* Place for rcu_nocb_kthread() to wait GP. */
- #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- int need_future_gp[2];
-@@ -393,7 +394,7 @@ struct rcu_data {
- atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
- struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
- struct rcu_head **nocb_follower_tail;
-- wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
-+ struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
- struct task_struct *nocb_kthread;
- int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
-
-@@ -472,7 +473,7 @@ struct rcu_state {
- unsigned long gpnum; /* Current gp number. */
- unsigned long completed; /* # of last completed gp. */
- struct task_struct *gp_kthread; /* Task for grace periods. */
-- wait_queue_head_t gp_wq; /* Where GP task waits. */
-+ struct swait_queue_head gp_wq; /* Where GP task waits. */
- short gp_flags; /* Commands for GP task. */
- short gp_state; /* GP kthread sleep state. */
-
-@@ -504,7 +505,7 @@ struct rcu_state {
- atomic_long_t expedited_workdone3; /* # done by others #3. */
- atomic_long_t expedited_normal; /* # fallbacks to normal. */
- atomic_t expedited_need_qs; /* # CPUs left to check in. */
-- wait_queue_head_t expedited_wq; /* Wait for check-ins. */
-+ struct swait_queue_head expedited_wq; /* Wait for check-ins. */
- int ncpus_snap; /* # CPUs seen last time. */
-
- unsigned long jiffies_force_qs; /* Time at which to invoke */
-@@ -607,8 +608,8 @@ static void zero_cpu_stall_ticks(struct
- static void increment_cpu_stall_ticks(void);
- static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
- static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
--static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp);
--static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq);
-+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
-+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
- static void rcu_init_one_nocb(struct rcu_node *rnp);
- static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
- bool lazy, unsigned long flags);
---- a/kernel/rcu/tree_plugin.h
-+++ b/kernel/rcu/tree_plugin.h
-@@ -1822,9 +1822,9 @@ early_param("rcu_nocb_poll", parse_rcu_n
- * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
- * grace period.
- */
--static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq)
-+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
- {
-- wake_up_all(sq);
-+ swake_up_all(sq);
- }
-
- /*
-@@ -1840,15 +1840,15 @@ static void rcu_nocb_gp_set(struct rcu_n
- rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
- }
-
--static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp)
-+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
- {
- return &rnp->nocb_gp_wq[rnp->completed & 0x1];
- }
-
- static void rcu_init_one_nocb(struct rcu_node *rnp)
- {
-- init_waitqueue_head(&rnp->nocb_gp_wq[0]);
-- init_waitqueue_head(&rnp->nocb_gp_wq[1]);
-+ init_swait_queue_head(&rnp->nocb_gp_wq[0]);
-+ init_swait_queue_head(&rnp->nocb_gp_wq[1]);
- }
-
- #ifndef CONFIG_RCU_NOCB_CPU_ALL
-@@ -1873,7 +1873,7 @@ static void wake_nocb_leader(struct rcu_
- if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
- /* Prior smp_mb__after_atomic() orders against prior enqueue. */
- WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
-- wake_up(&rdp_leader->nocb_wq);
-+ swake_up(&rdp_leader->nocb_wq);
- }
- }
-
-@@ -2086,7 +2086,7 @@ static void rcu_nocb_wait_gp(struct rcu_
- */
- trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
- for (;;) {
-- wait_event_interruptible(
-+ swait_event_interruptible(
- rnp->nocb_gp_wq[c & 0x1],
- (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
- if (likely(d))
-@@ -2114,7 +2114,7 @@ static void nocb_leader_wait(struct rcu_
- /* Wait for callbacks to appear. */
- if (!rcu_nocb_poll) {
- trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
-- wait_event_interruptible(my_rdp->nocb_wq,
-+ swait_event_interruptible(my_rdp->nocb_wq,
- !READ_ONCE(my_rdp->nocb_leader_sleep));
- /* Memory barrier handled by smp_mb() calls below and repoll. */
- } else if (firsttime) {
-@@ -2189,7 +2189,7 @@ static void nocb_leader_wait(struct rcu_
- * List was empty, wake up the follower.
- * Memory barriers supplied by atomic_long_add().
- */
-- wake_up(&rdp->nocb_wq);
-+ swake_up(&rdp->nocb_wq);
- }
- }
-
-@@ -2210,7 +2210,7 @@ static void nocb_follower_wait(struct rc
- if (!rcu_nocb_poll) {
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
- "FollowerSleep");
-- wait_event_interruptible(rdp->nocb_wq,
-+ swait_event_interruptible(rdp->nocb_wq,
- READ_ONCE(rdp->nocb_follower_head));
- } else if (firsttime) {
- /* Don't drown trace log with "Poll"! */
-@@ -2369,7 +2369,7 @@ void __init rcu_init_nohz(void)
- static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
- {
- rdp->nocb_tail = &rdp->nocb_head;
-- init_waitqueue_head(&rdp->nocb_wq);
-+ init_swait_queue_head(&rdp->nocb_wq);
- rdp->nocb_follower_tail = &rdp->nocb_follower_head;
- }
-
-@@ -2519,7 +2519,7 @@ static bool rcu_nocb_cpu_needs_barrier(s
- return false;
- }
-
--static void rcu_nocb_gp_cleanup(wait_queue_head_t *sq)
-+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
- {
- }
-
-@@ -2527,7 +2527,7 @@ static void rcu_nocb_gp_set(struct rcu_n
- {
- }
-
--static wait_queue_head_t *rcu_nocb_gp_get(struct rcu_node *rnp)
-+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
- {
- return NULL;
- }
diff --git a/patches/0006-ARM-at91-pm-simply-call-at91_pm_init.patch b/patches/0006-ARM-at91-pm-simply-call-at91_pm_init.patch
deleted file mode 100644
index cb5d3f2ae01539..00000000000000
--- a/patches/0006-ARM-at91-pm-simply-call-at91_pm_init.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 30 Sep 2015 01:08:33 +0200
-Subject: [PATCH 06/13] ARM: at91: pm: simply call at91_pm_init
-
-at91_pm_init() doesn't return a value, as is the case for its callers,
-simply call it instead of returning its non-existent return value.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/mach-at91/pm.c | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
---- a/arch/arm/mach-at91/pm.c
-+++ b/arch/arm/mach-at91/pm.c
-@@ -432,7 +432,7 @@ void __init at91sam9260_pm_init(void)
- at91_dt_ramc();
- at91_pm_data.memctrl = AT91_MEMCTRL_SDRAMC;
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
-- return at91_pm_init();
-+ at91_pm_init();
- }
-
- void __init at91sam9g45_pm_init(void)
-@@ -440,7 +440,7 @@ void __init at91sam9g45_pm_init(void)
- at91_dt_ramc();
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP;
- at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
-- return at91_pm_init();
-+ at91_pm_init();
- }
-
- void __init at91sam9x5_pm_init(void)
-@@ -448,5 +448,5 @@ void __init at91sam9x5_pm_init(void)
- at91_dt_ramc();
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
- at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
-- return at91_pm_init();
-+ at91_pm_init();
- }
diff --git a/patches/0007-ARM-at91-pm-find-and-remap-the-pmc.patch b/patches/0007-ARM-at91-pm-find-and-remap-the-pmc.patch
deleted file mode 100644
index 5564ca952fb2f9..00000000000000
--- a/patches/0007-ARM-at91-pm-find-and-remap-the-pmc.patch
+++ /dev/null
@@ -1,90 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 30 Sep 2015 01:31:34 +0200
-Subject: [PATCH 07/13] ARM: at91: pm: find and remap the pmc
-
-To avoid relying on at91_pmc_read(), find the pmc node and remap it
-locally.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/mach-at91/pm.c | 33 +++++++++++++++++++++++++++------
- 1 file changed, 27 insertions(+), 6 deletions(-)
-
---- a/arch/arm/mach-at91/pm.c
-+++ b/arch/arm/mach-at91/pm.c
-@@ -35,6 +35,8 @@
- #include "generic.h"
- #include "pm.h"
-
-+static void __iomem *pmc;
-+
- /*
- * FIXME: this is needed to communicate between the pinctrl driver and
- * the PM implementation in the machine. Possibly part of the PM
-@@ -87,7 +89,7 @@ static int at91_pm_verify_clocks(void)
- unsigned long scsr;
- int i;
-
-- scsr = at91_pmc_read(AT91_PMC_SCSR);
-+ scsr = readl(pmc + AT91_PMC_SCSR);
-
- /* USB must not be using PLLB */
- if ((scsr & at91_pm_data.uhp_udp_mask) != 0) {
-@@ -101,8 +103,7 @@ static int at91_pm_verify_clocks(void)
-
- if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
- continue;
--
-- css = at91_pmc_read(AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
-+ css = readl(pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
- if (css != AT91_PMC_CSS_SLOW) {
- pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
- return 0;
-@@ -145,8 +146,8 @@ static void at91_pm_suspend(suspend_stat
- flush_cache_all();
- outer_disable();
-
-- at91_suspend_sram_fn(at91_pmc_base, at91_ramc_base[0],
-- at91_ramc_base[1], pm_data);
-+ at91_suspend_sram_fn(pmc, at91_ramc_base[0],
-+ at91_ramc_base[1], pm_data);
-
- outer_resume();
- }
-@@ -399,13 +400,33 @@ static void __init at91_pm_sram_init(voi
- &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
- }
-
-+static const struct of_device_id atmel_pmc_ids[] __initconst = {
-+ { .compatible = "atmel,at91rm9200-pmc" },
-+ { .compatible = "atmel,at91sam9260-pmc" },
-+ { .compatible = "atmel,at91sam9g45-pmc" },
-+ { .compatible = "atmel,at91sam9n12-pmc" },
-+ { .compatible = "atmel,at91sam9x5-pmc" },
-+ { .compatible = "atmel,sama5d3-pmc" },
-+ { .compatible = "atmel,sama5d2-pmc" },
-+ { /* sentinel */ },
-+};
-+
- static void __init at91_pm_init(void)
- {
-- at91_pm_sram_init();
-+ struct device_node *pmc_np;
-
- if (at91_cpuidle_device.dev.platform_data)
- platform_device_register(&at91_cpuidle_device);
-
-+ pmc_np = of_find_matching_node(NULL, atmel_pmc_ids);
-+ pmc = of_iomap(pmc_np, 0);
-+ if (!pmc) {
-+ pr_err("AT91: PM not supported, PMC not found\n");
-+ return;
-+ }
-+
-+ at91_pm_sram_init();
-+
- if (at91_suspend_sram_fn)
- suspend_set_ops(&at91_pm_ops);
- else
diff --git a/patches/0008-ARM-at91-pm-move-idle-functions-to-pm.c.patch b/patches/0008-ARM-at91-pm-move-idle-functions-to-pm.c.patch
deleted file mode 100644
index 1d016641f0d6d7..00000000000000
--- a/patches/0008-ARM-at91-pm-move-idle-functions-to-pm.c.patch
+++ /dev/null
@@ -1,201 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 30 Sep 2015 01:58:40 +0200
-Subject: [PATCH 08/13] ARM: at91: pm: move idle functions to pm.c
-
-Avoid using code from clk/at91 for PM.
-This also has the bonus effect of setting arm_pm_idle for sama5 platforms.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/mach-at91/at91rm9200.c | 2 --
- arch/arm/mach-at91/at91sam9.c | 2 --
- arch/arm/mach-at91/generic.h | 6 ++----
- arch/arm/mach-at91/pm.c | 37 ++++++++++++++++++++++++++++++++-----
- arch/arm/mach-at91/sama5.c | 2 +-
- drivers/clk/at91/pmc.c | 15 ---------------
- 6 files changed, 35 insertions(+), 29 deletions(-)
-
---- a/arch/arm/mach-at91/at91rm9200.c
-+++ b/arch/arm/mach-at91/at91rm9200.c
-@@ -12,7 +12,6 @@
- #include <linux/of_platform.h>
-
- #include <asm/mach/arch.h>
--#include <asm/system_misc.h>
-
- #include "generic.h"
- #include "soc.h"
-@@ -33,7 +32,6 @@ static void __init at91rm9200_dt_device_
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
-
-- arm_pm_idle = at91rm9200_idle;
- at91rm9200_pm_init();
- }
-
---- a/arch/arm/mach-at91/at91sam9.c
-+++ b/arch/arm/mach-at91/at91sam9.c
-@@ -62,8 +62,6 @@ static void __init at91sam9_common_init(
- soc_dev = soc_device_to_device(soc);
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
--
-- arm_pm_idle = at91sam9_idle;
- }
-
- static void __init at91sam9_dt_device_init(void)
---- a/arch/arm/mach-at91/generic.h
-+++ b/arch/arm/mach-at91/generic.h
-@@ -18,20 +18,18 @@
- extern void __init at91_map_io(void);
- extern void __init at91_alt_map_io(void);
-
--/* idle */
--extern void at91rm9200_idle(void);
--extern void at91sam9_idle(void);
--
- #ifdef CONFIG_PM
- extern void __init at91rm9200_pm_init(void);
- extern void __init at91sam9260_pm_init(void);
- extern void __init at91sam9g45_pm_init(void);
- extern void __init at91sam9x5_pm_init(void);
-+extern void __init sama5_pm_init(void);
- #else
- static inline void __init at91rm9200_pm_init(void) { }
- static inline void __init at91sam9260_pm_init(void) { }
- static inline void __init at91sam9g45_pm_init(void) { }
- static inline void __init at91sam9x5_pm_init(void) { }
-+static inline void __init sama5_pm_init(void) { }
- #endif
-
- #endif /* _AT91_GENERIC_H */
---- a/arch/arm/mach-at91/pm.c
-+++ b/arch/arm/mach-at91/pm.c
-@@ -31,6 +31,7 @@
- #include <asm/mach/irq.h>
- #include <asm/fncpy.h>
- #include <asm/cacheflush.h>
-+#include <asm/system_misc.h>
-
- #include "generic.h"
- #include "pm.h"
-@@ -354,6 +355,21 @@ static __init void at91_dt_ramc(void)
- at91_pm_set_standby(standby);
- }
-
-+void at91rm9200_idle(void)
-+{
-+ /*
-+ * Disable the processor clock. The processor will be automatically
-+ * re-enabled by an interrupt or by a reset.
-+ */
-+ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
-+}
-+
-+void at91sam9_idle(void)
-+{
-+ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
-+ cpu_do_idle();
-+}
-+
- static void __init at91_pm_sram_init(void)
- {
- struct gen_pool *sram_pool;
-@@ -411,7 +427,7 @@ static const struct of_device_id atmel_p
- { /* sentinel */ },
- };
-
--static void __init at91_pm_init(void)
-+static void __init at91_pm_init(void (*pm_idle)(void))
- {
- struct device_node *pmc_np;
-
-@@ -425,6 +441,9 @@ static void __init at91_pm_init(void)
- return;
- }
-
-+ if (pm_idle)
-+ arm_pm_idle = pm_idle;
-+
- at91_pm_sram_init();
-
- if (at91_suspend_sram_fn)
-@@ -445,7 +464,7 @@ void __init at91rm9200_pm_init(void)
- at91_pm_data.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP;
- at91_pm_data.memctrl = AT91_MEMCTRL_MC;
-
-- at91_pm_init();
-+ at91_pm_init(at91rm9200_idle);
- }
-
- void __init at91sam9260_pm_init(void)
-@@ -453,7 +472,7 @@ void __init at91sam9260_pm_init(void)
- at91_dt_ramc();
- at91_pm_data.memctrl = AT91_MEMCTRL_SDRAMC;
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
-- at91_pm_init();
-+ at91_pm_init(at91sam9_idle);
- }
-
- void __init at91sam9g45_pm_init(void)
-@@ -461,7 +480,7 @@ void __init at91sam9g45_pm_init(void)
- at91_dt_ramc();
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP;
- at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
-- at91_pm_init();
-+ at91_pm_init(at91sam9_idle);
- }
-
- void __init at91sam9x5_pm_init(void)
-@@ -469,5 +488,13 @@ void __init at91sam9x5_pm_init(void)
- at91_dt_ramc();
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
- at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
-- at91_pm_init();
-+ at91_pm_init(at91sam9_idle);
-+}
-+
-+void __init sama5_pm_init(void)
-+{
-+ at91_dt_ramc();
-+ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
-+ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
-+ at91_pm_init(NULL);
- }
---- a/arch/arm/mach-at91/sama5.c
-+++ b/arch/arm/mach-at91/sama5.c
-@@ -51,7 +51,7 @@ static void __init sama5_dt_device_init(
- soc_dev = soc_device_to_device(soc);
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
-- at91sam9x5_pm_init();
-+ sama5_pm_init();
- }
-
- static const char *const sama5_dt_board_compat[] __initconst = {
---- a/drivers/clk/at91/pmc.c
-+++ b/drivers/clk/at91/pmc.c
-@@ -32,21 +32,6 @@ struct at91_pmc {
- void __iomem *at91_pmc_base;
- EXPORT_SYMBOL_GPL(at91_pmc_base);
-
--void at91rm9200_idle(void)
--{
-- /*
-- * Disable the processor clock. The processor will be automatically
-- * re-enabled by an interrupt or by a reset.
-- */
-- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
--}
--
--void at91sam9_idle(void)
--{
-- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
-- cpu_do_idle();
--}
--
- int of_at91_get_clk_range(struct device_node *np, const char *propname,
- struct clk_range *range)
- {
diff --git a/patches/0009-ARM-at91-remove-useless-includes-and-function-protot.patch b/patches/0009-ARM-at91-remove-useless-includes-and-function-protot.patch
deleted file mode 100644
index 4fac07bd3bfc5c..00000000000000
--- a/patches/0009-ARM-at91-remove-useless-includes-and-function-protot.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 30 Sep 2015 02:01:20 +0200
-Subject: [PATCH 09/13] ARM: at91: remove useless includes and function
- prototypes
-
-Remove leftover from the previous cleanup
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/mach-at91/generic.h | 7 -------
- 1 file changed, 7 deletions(-)
-
---- a/arch/arm/mach-at91/generic.h
-+++ b/arch/arm/mach-at91/generic.h
-@@ -11,13 +11,6 @@
- #ifndef _AT91_GENERIC_H
- #define _AT91_GENERIC_H
-
--#include <linux/of.h>
--#include <linux/reboot.h>
--
-- /* Map io */
--extern void __init at91_map_io(void);
--extern void __init at91_alt_map_io(void);
--
- #ifdef CONFIG_PM
- extern void __init at91rm9200_pm_init(void);
- extern void __init at91sam9260_pm_init(void);
diff --git a/patches/0010-usb-gadget-atmel-access-the-PMC-using-regmap.patch b/patches/0010-usb-gadget-atmel-access-the-PMC-using-regmap.patch
deleted file mode 100644
index 379a98ba87f968..00000000000000
--- a/patches/0010-usb-gadget-atmel-access-the-PMC-using-regmap.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 30 Sep 2015 12:57:10 +0200
-Subject: [PATCH 10/13] usb: gadget: atmel: access the PMC using regmap
-
-Use regmap to access the PMC to avoid using at91_pmc_read and
-at91_pmc_write.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Acked-by: Felipe Balbi <balbi@ti.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/usb/gadget/udc/atmel_usba_udc.c | 20 ++++++++++----------
- drivers/usb/gadget/udc/atmel_usba_udc.h | 2 ++
- 2 files changed, 12 insertions(+), 10 deletions(-)
-
---- a/drivers/usb/gadget/udc/atmel_usba_udc.c
-+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
-@@ -17,7 +17,9 @@
- #include <linux/device.h>
- #include <linux/dma-mapping.h>
- #include <linux/list.h>
-+#include <linux/mfd/syscon.h>
- #include <linux/platform_device.h>
-+#include <linux/regmap.h>
- #include <linux/usb/ch9.h>
- #include <linux/usb/gadget.h>
- #include <linux/usb/atmel_usba_udc.h>
-@@ -1888,20 +1890,15 @@ static int atmel_usba_stop(struct usb_ga
- #ifdef CONFIG_OF
- static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on)
- {
-- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
--
-- if (is_on)
-- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
-- else
-- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
-+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
-+ is_on ? AT91_PMC_BIASEN : 0);
- }
-
- static void at91sam9g45_pulse_bias(struct usba_udc *udc)
- {
-- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
--
-- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
-- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
-+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0);
-+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
-+ AT91_PMC_BIASEN);
- }
-
- static const struct usba_udc_errata at91sam9rl_errata = {
-@@ -1938,6 +1935,9 @@ static struct usba_ep * atmel_udc_of_ini
- return ERR_PTR(-EINVAL);
-
- udc->errata = match->data;
-+ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
-+ if (udc->errata && IS_ERR(udc->pmc))
-+ return ERR_CAST(udc->pmc);
-
- udc->num_ep = 0;
-
---- a/drivers/usb/gadget/udc/atmel_usba_udc.h
-+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
-@@ -354,6 +354,8 @@ struct usba_udc {
- struct dentry *debugfs_root;
- struct dentry *debugfs_regs;
- #endif
-+
-+ struct regmap *pmc;
- };
-
- static inline struct usba_ep *to_usba_ep(struct usb_ep *ep)
diff --git a/patches/0011-clk-at91-pmc-drop-at91_pmc_base.patch b/patches/0011-clk-at91-pmc-drop-at91_pmc_base.patch
deleted file mode 100644
index 39addf9379a132..00000000000000
--- a/patches/0011-clk-at91-pmc-drop-at91_pmc_base.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 30 Sep 2015 13:02:01 +0200
-Subject: [PATCH 11/13] clk: at91: pmc: drop at91_pmc_base
-
-at91_pmc_base is not used anymore, remove it along with at91_pmc_read and
-at91_pmc_write.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
-Acked-by: Stephen Boyd <sboyd@codeaurora.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clk/at91/pmc.c | 7 -------
- include/linux/clk/at91_pmc.h | 12 ------------
- 2 files changed, 19 deletions(-)
-
---- a/drivers/clk/at91/pmc.c
-+++ b/drivers/clk/at91/pmc.c
-@@ -12,7 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
-@@ -29,9 +28,6 @@ struct at91_pmc {
- const struct at91_pmc_caps *caps;
- };
-
--void __iomem *at91_pmc_base;
--EXPORT_SYMBOL_GPL(at91_pmc_base);
--
- int of_at91_get_clk_range(struct device_node *np, const char *propname,
- struct clk_range *range)
- {
-@@ -108,11 +104,8 @@ static void __init of_at91_pmc_setup(str
- const struct at91_pmc_caps *caps)
- {
- struct at91_pmc *pmc;
-- void __iomem *regbase = of_iomap(np, 0);
- struct regmap *regmap;
-
-- at91_pmc_base = regbase;
--
- regmap = syscon_node_to_regmap(np);
- if (IS_ERR(regmap))
- panic("Could not retrieve syscon regmap");
---- a/include/linux/clk/at91_pmc.h
-+++ b/include/linux/clk/at91_pmc.h
-@@ -16,18 +16,6 @@
- #ifndef AT91_PMC_H
- #define AT91_PMC_H
-
--#ifndef __ASSEMBLY__
--extern void __iomem *at91_pmc_base;
--
--#define at91_pmc_read(field) \
-- readl_relaxed(at91_pmc_base + field)
--
--#define at91_pmc_write(field, value) \
-- writel_relaxed(value, at91_pmc_base + field)
--#else
--.extern at91_pmc_base
--#endif
--
- #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
- #define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
-
diff --git a/patches/0012-clk-at91-pmc-remove-useless-capacities-handling.patch b/patches/0012-clk-at91-pmc-remove-useless-capacities-handling.patch
deleted file mode 100644
index b54c5bf6fdb5b7..00000000000000
--- a/patches/0012-clk-at91-pmc-remove-useless-capacities-handling.patch
+++ /dev/null
@@ -1,155 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 27 Jan 2016 15:05:55 +0100
-Subject: [PATCH 12/13] clk: at91: pmc: remove useless capacities handling
-
-Capacities only handles interrupts and they are not use anymore. Remove the
-whole initialisation.
-
-Acked-by: Stephen Boyd <sboyd@codeaurora.org>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clk/at91/pmc.c | 128 -------------------------------------------------
- 1 file changed, 128 deletions(-)
-
---- a/drivers/clk/at91/pmc.c
-+++ b/drivers/clk/at91/pmc.c
-@@ -19,15 +19,6 @@
-
- #include "pmc.h"
-
--struct at91_pmc_caps {
-- u32 available_irqs;
--};
--
--struct at91_pmc {
-- struct regmap *regmap;
-- const struct at91_pmc_caps *caps;
--};
--
- int of_at91_get_clk_range(struct device_node *np, const char *propname,
- struct clk_range *range)
- {
-@@ -50,122 +41,3 @@ int of_at91_get_clk_range(struct device_
- return 0;
- }
- EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
--
--static const struct at91_pmc_caps at91rm9200_caps = {
-- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
-- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
-- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
-- AT91_PMC_PCK3RDY,
--};
--
--static const struct at91_pmc_caps at91sam9260_caps = {
-- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
-- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
-- AT91_PMC_PCK1RDY,
--};
--
--static const struct at91_pmc_caps at91sam9g45_caps = {
-- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
-- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
-- AT91_PMC_PCK1RDY,
--};
--
--static const struct at91_pmc_caps at91sam9n12_caps = {
-- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
-- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
-- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
-- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
--};
--
--static const struct at91_pmc_caps at91sam9x5_caps = {
-- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
-- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
-- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
-- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
--};
--
--static const struct at91_pmc_caps sama5d2_caps = {
-- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
-- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
-- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
-- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
-- AT91_PMC_CFDEV | AT91_PMC_GCKRDY,
--};
--
--static const struct at91_pmc_caps sama5d3_caps = {
-- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
-- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
-- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
-- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
-- AT91_PMC_CFDEV,
--};
--
--static void __init of_at91_pmc_setup(struct device_node *np,
-- const struct at91_pmc_caps *caps)
--{
-- struct at91_pmc *pmc;
-- struct regmap *regmap;
--
-- regmap = syscon_node_to_regmap(np);
-- if (IS_ERR(regmap))
-- panic("Could not retrieve syscon regmap");
--
-- pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
-- if (!pmc)
-- return;
--
-- pmc->regmap = regmap;
-- pmc->caps = caps;
--
-- regmap_write(pmc->regmap, AT91_PMC_IDR, 0xffffffff);
--
--}
--
--static void __init of_at91rm9200_pmc_setup(struct device_node *np)
--{
-- of_at91_pmc_setup(np, &at91rm9200_caps);
--}
--CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc",
-- of_at91rm9200_pmc_setup);
--
--static void __init of_at91sam9260_pmc_setup(struct device_node *np)
--{
-- of_at91_pmc_setup(np, &at91sam9260_caps);
--}
--CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc",
-- of_at91sam9260_pmc_setup);
--
--static void __init of_at91sam9g45_pmc_setup(struct device_node *np)
--{
-- of_at91_pmc_setup(np, &at91sam9g45_caps);
--}
--CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc",
-- of_at91sam9g45_pmc_setup);
--
--static void __init of_at91sam9n12_pmc_setup(struct device_node *np)
--{
-- of_at91_pmc_setup(np, &at91sam9n12_caps);
--}
--CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc",
-- of_at91sam9n12_pmc_setup);
--
--static void __init of_at91sam9x5_pmc_setup(struct device_node *np)
--{
-- of_at91_pmc_setup(np, &at91sam9x5_caps);
--}
--CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc",
-- of_at91sam9x5_pmc_setup);
--
--static void __init of_sama5d2_pmc_setup(struct device_node *np)
--{
-- of_at91_pmc_setup(np, &sama5d2_caps);
--}
--CLK_OF_DECLARE(sama5d2_clk_pmc, "atmel,sama5d2-pmc",
-- of_sama5d2_pmc_setup);
--
--static void __init of_sama5d3_pmc_setup(struct device_node *np)
--{
-- of_at91_pmc_setup(np, &sama5d3_caps);
--}
--CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc",
-- of_sama5d3_pmc_setup);
diff --git a/patches/0013-clk-at91-remove-useless-includes.patch b/patches/0013-clk-at91-remove-useless-includes.patch
deleted file mode 100644
index 4329c9e237d4a9..00000000000000
--- a/patches/0013-clk-at91-remove-useless-includes.patch
+++ /dev/null
@@ -1,129 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Date: Wed, 27 Jan 2016 15:17:37 +0100
-Subject: [PATCH 13/13] clk: at91: remove useless includes
-
-Over time, some includes were copy pasted from other clocks drivers but are
-not necessary.
-
-Acked-by: Stephen Boyd <sboyd@codeaurora.org>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clk/at91/clk-generated.c | 2 --
- drivers/clk/at91/clk-h32mx.c | 8 --------
- drivers/clk/at91/clk-peripheral.c | 2 --
- drivers/clk/at91/clk-plldiv.c | 2 --
- drivers/clk/at91/clk-programmable.c | 4 ----
- drivers/clk/at91/clk-slow.c | 8 --------
- drivers/clk/at91/clk-smd.c | 2 --
- drivers/clk/at91/clk-usb.c | 2 --
- 8 files changed, 30 deletions(-)
-
---- a/drivers/clk/at91/clk-generated.c
-+++ b/drivers/clk/at91/clk-generated.c
-@@ -15,8 +15,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/io.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
---- a/drivers/clk/at91/clk-h32mx.c
-+++ b/drivers/clk/at91/clk-h32mx.c
-@@ -15,15 +15,7 @@
- #include <linux/clk-provider.h>
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
--#include <linux/delay.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/io.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
--#include <linux/sched.h>
--#include <linux/wait.h>
- #include <linux/regmap.h>
- #include <linux/mfd/syscon.h>
-
---- a/drivers/clk/at91/clk-peripheral.c
-+++ b/drivers/clk/at91/clk-peripheral.c
-@@ -12,8 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/io.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
---- a/drivers/clk/at91/clk-plldiv.c
-+++ b/drivers/clk/at91/clk-plldiv.c
-@@ -12,8 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/io.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
---- a/drivers/clk/at91/clk-programmable.c
-+++ b/drivers/clk/at91/clk-programmable.c
-@@ -12,10 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/io.h>
--#include <linux/wait.h>
--#include <linux/sched.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
---- a/drivers/clk/at91/clk-slow.c
-+++ b/drivers/clk/at91/clk-slow.c
-@@ -13,19 +13,11 @@
- #include <linux/clk.h>
- #include <linux/clk-provider.h>
- #include <linux/clkdev.h>
--#include <linux/slab.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/delay.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/io.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
--#include <linux/sched.h>
--#include <linux/wait.h>
-
- #include "pmc.h"
- #include "sckc.h"
---- a/drivers/clk/at91/clk-smd.c
-+++ b/drivers/clk/at91/clk-smd.c
-@@ -12,8 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/io.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
---- a/drivers/clk/at91/clk-usb.c
-+++ b/drivers/clk/at91/clk-usb.c
-@@ -12,8 +12,6 @@
- #include <linux/clkdev.h>
- #include <linux/clk/at91_pmc.h>
- #include <linux/of.h>
--#include <linux/of_address.h>
--#include <linux/io.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
-
diff --git a/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch b/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch
index 22055a030d93ec..32a5205eb2951b 100644
--- a/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch
+++ b/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
-@@ -524,7 +524,7 @@ config SOC_IMX6Q
+@@ -526,7 +526,7 @@ config SOC_IMX6Q
bool "i.MX6 Quad/DualLite support"
select ARM_ERRATA_764369 if SMP
select HAVE_ARM_SCU if SMP
diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
index d490e62263c5fe..8780a1b733a84c 100644
--- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -7,12 +7,12 @@ with a "full" buffer after executing "dmesg" on the shell.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/printk.c | 27 ++++++++++++++++++++++++++-
- 1 file changed, 26 insertions(+), 1 deletion(-)
+ kernel/printk/printk.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1262,6 +1262,7 @@ static int syslog_print_all(char __user
+@@ -1268,6 +1268,7 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
@@ -20,11 +20,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
-@@ -1273,7 +1274,14 @@ static int syslog_print_all(char __user
+@@ -1279,6 +1280,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
enum log_flags prev;
--
+ int num_msg;
+try_again:
+ attempts++;
@@ -33,10 +32,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ goto out;
+ }
+ num_msg = 0;
- if (clear_seq < log_first_seq) {
- /* messages are gone, move to first available one */
- clear_seq = log_first_seq;
-@@ -1294,6 +1302,14 @@ static int syslog_print_all(char __user
+
+ /*
+ * Find first record that fits, including all following records,
+@@ -1294,6 +1303,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -51,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* move first record forward until length fits into the buffer */
-@@ -1307,6 +1323,14 @@ static int syslog_print_all(char __user
+@@ -1307,6 +1324,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -66,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* last message fitting into this dump */
-@@ -1347,6 +1371,7 @@ static int syslog_print_all(char __user
+@@ -1347,6 +1372,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index ab153a020324a7..f32ba0d4f3ab27 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,17 +22,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
-@@ -568,7 +568,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -581,7 +581,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
- preempt_disable();
+ migrate_disable();
+ kvm_pmu_flush_hwstate(vcpu);
kvm_timer_flush_hwstate(vcpu);
kvm_vgic_flush_hwstate(vcpu);
-
-@@ -587,7 +587,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
- local_irq_enable();
+@@ -602,7 +602,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+ kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
- preempt_enable();
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -641,7 +641,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -658,7 +658,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_vgic_sync_hwstate(vcpu);
diff --git a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index 0439c87b572aa2..f2452555eb90de 100644
--- a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -15,9 +15,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -1801,6 +1801,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -1870,6 +1870,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_ABS_PINNED);
apic->lapic_timer.timer.function = apic_timer_fn;
+ apic->lapic_timer.timer.irqsafe = 1;
diff --git a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index 7ce92eec479232..aab925b93c201f 100644
--- a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
-@@ -156,7 +156,7 @@ void acpi_ut_mutex_terminate(void)
+@@ -145,7 +145,7 @@ void acpi_ut_mutex_terminate(void)
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index 0b4207ff9c66cc..e8c39a43f44f23 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -19,7 +19,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -76,6 +76,7 @@ config ARM64
+@@ -81,6 +81,7 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
@@ -37,7 +37,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
int cpu; /* cpu */
};
-@@ -103,6 +104,7 @@ static inline struct thread_info *curren
+@@ -109,6 +110,7 @@ static inline struct thread_info *curren
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
@@ -45,7 +45,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-@@ -118,6 +120,7 @@ static inline struct thread_info *curren
+@@ -124,6 +126,7 @@ static inline struct thread_info *curren
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
@@ -55,7 +55,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
-@@ -35,6 +35,7 @@ int main(void)
+@@ -36,6 +36,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -65,9 +65,9 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -363,11 +363,16 @@ ENDPROC(el1_sync)
+@@ -411,11 +411,16 @@ ENDPROC(el1_sync)
+
#ifdef CONFIG_PREEMPT
- get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
- cbnz w24, 1f // preempt count != 0
+ cbnz w24, 2f // preempt count != 0
@@ -85,7 +85,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
-@@ -381,6 +386,7 @@ ENDPROC(el1_irq)
+@@ -429,6 +434,7 @@ ENDPROC(el1_irq)
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
@@ -93,11 +93,11 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
ret x24
#endif
-@@ -625,6 +631,7 @@ ENDPROC(cpu_switch_to)
+@@ -675,6 +681,7 @@ ENDPROC(cpu_switch_to)
*/
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
+ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
- ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
+ enable_irq // enable interrupts for do_notify_resume()
diff --git a/patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch b/patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch
index cf5ff52f9bdefc..cc3e344a5ed206 100644
--- a/patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch
+++ b/patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
-@@ -129,7 +129,8 @@ static inline struct thread_info *curren
+@@ -135,7 +135,8 @@ static inline struct thread_info *curren
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
diff --git a/patches/arm-convert-boot-lock-to-raw.patch b/patches/arm-convert-boot-lock-to-raw.patch
index 06a82869f033ed..5ca037db214b8c 100644
--- a/patches/arm-convert-boot-lock-to-raw.patch
+++ b/patches/arm-convert-boot-lock-to-raw.patch
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
-@@ -230,7 +230,7 @@ static void __iomem *scu_base_addr(void)
+@@ -229,7 +229,7 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)(S5P_VA_SCU);
}
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void exynos_secondary_init(unsigned int cpu)
{
-@@ -243,8 +243,8 @@ static void exynos_secondary_init(unsign
+@@ -242,8 +242,8 @@ static void exynos_secondary_init(unsign
/*
* Synchronise with the boot thread.
*/
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
-@@ -308,7 +308,7 @@ static int exynos_boot_secondary(unsigne
+@@ -307,7 +307,7 @@ static int exynos_boot_secondary(unsigne
* Set synchronisation state between this boot processor
* and the secondary one
*/
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The secondary processor is waiting to be released from
-@@ -335,7 +335,7 @@ static int exynos_boot_secondary(unsigne
+@@ -334,7 +334,7 @@ static int exynos_boot_secondary(unsigne
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return -ETIMEDOUT;
}
}
-@@ -381,7 +381,7 @@ static int exynos_boot_secondary(unsigne
+@@ -380,7 +380,7 @@ static int exynos_boot_secondary(unsigne
* calibrations, then wait for it to finish
*/
fail:
diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch
index dd96770ef9a8f2..f26f3c4963ccd1 100644
--- a/patches/arm-preempt-lazy-support.patch
+++ b/patches/arm-preempt-lazy-support.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -68,6 +68,7 @@ config ARM
+@@ -71,6 +71,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
index f78656dafe92fa..cbc0c40a1b953f 100644
--- a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -12,12 +12,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -562,7 +562,7 @@ config XEN_DOM0
+@@ -624,7 +624,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64"
- depends on ARM64 && OF
+ depends on ARM64 && OF && !PREEMPT_RT_FULL
select SWIOTLB_XEN
+ select PARAVIRT
help
- Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
diff --git a/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch b/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
index f22ada1312563c..fa08979005bcdd 100644
--- a/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
+++ b/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1640,7 +1640,7 @@ static int blk_mq_hctx_notify(void *data
+@@ -1641,7 +1641,7 @@ static int blk_mq_hctx_notify(void *data
{
struct blk_mq_hw_ctx *hctx = data;
diff --git a/patches/block-blk-mq-use-swait.patch b/patches/block-blk-mq-use-swait.patch
index 84da6e0c5adbd9..b699a8b0bc2418 100644
--- a/patches/block-blk-mq-use-swait.patch
+++ b/patches/block-blk-mq-use-swait.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -660,7 +660,7 @@ int blk_queue_enter(struct request_queue
- if (!gfpflags_allow_blocking(gfp))
+ if (nowait)
return -EBUSY;
- ret = wait_event_interruptible(q->mq_freeze_wq,
@@ -62,8 +62,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ swake_up_all(&q->mq_freeze_wq);
}
- struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
-@@ -742,7 +742,7 @@ struct request_queue *blk_alloc_queue_no
+ static void blk_rq_timed_out_timer(unsigned long data)
+@@ -749,7 +749,7 @@ struct request_queue *blk_alloc_queue_no
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -456,7 +456,7 @@ struct request_queue {
+@@ -458,7 +458,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
diff --git a/patches/block-mq-don-t-complete-requests-via-IPI.patch b/patches/block-mq-don-t-complete-requests-via-IPI.patch
index 160cf27b79b302..8412a0941fb61f 100644
--- a/patches/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/patches/block-mq-don-t-complete-requests-via-IPI.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
-@@ -325,6 +328,17 @@ void blk_mq_end_request(struct request *
+@@ -323,6 +326,17 @@ void blk_mq_end_request(struct request *
}
EXPORT_SYMBOL(blk_mq_end_request);
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
-@@ -332,6 +346,8 @@ static void __blk_mq_complete_request_re
+@@ -330,6 +344,8 @@ static void __blk_mq_complete_request_re
rq->q->softirq_done_fn(rq);
}
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -348,10 +364,14 @@ static void blk_mq_ipi_complete_request(
+@@ -346,10 +362,14 @@ static void blk_mq_ipi_complete_request(
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
-@@ -212,6 +212,7 @@ static inline u16 blk_mq_unique_tag_to_t
+@@ -218,6 +218,7 @@ static inline u16 blk_mq_unique_tag_to_t
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_start_request(struct request *rq);
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -89,6 +89,7 @@ struct request {
+@@ -90,6 +90,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
diff --git a/patches/block-mq-drop-per-ctx-cpu_lock.patch b/patches/block-mq-drop-per-ctx-cpu_lock.patch
deleted file mode 100644
index 53e9ccbe966441..00000000000000
--- a/patches/block-mq-drop-per-ctx-cpu_lock.patch
+++ /dev/null
@@ -1,124 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 18 Feb 2015 18:37:26 +0100
-Subject: block/mq: drop per ctx cpu_lock
-
-While converting the get_cpu() to get_cpu_light() I added a cpu lock to
-ensure the same code is not invoked twice on the same CPU. And now I run
-into this:
-
-| kernel BUG at kernel/locking/rtmutex.c:996!
-| invalid opcode: 0000 [#1] PREEMPT SMP
-| CPU0: 13 PID: 75 Comm: kworker/u258:0 Tainted: G I 3.18.7-rt1.5+ #12
-| Workqueue: writeback bdi_writeback_workfn (flush-8:0)
-| task: ffff88023742a620 ti: ffff88023743c000 task.ti: ffff88023743c000
-| RIP: 0010:[<ffffffff81523cc0>] [<ffffffff81523cc0>] rt_spin_lock_slowlock+0x280/0x2d0
-| Call Trace:
-| [<ffffffff815254e7>] rt_spin_lock+0x27/0x60
-taking the same lock again
-|
-| [<ffffffff8127c771>] blk_mq_insert_requests+0x51/0x130
-| [<ffffffff8127d4a9>] blk_mq_flush_plug_list+0x129/0x140
-| [<ffffffff81272461>] blk_flush_plug_list+0xd1/0x250
-| [<ffffffff81522075>] schedule+0x75/0xa0
-| [<ffffffff8152474d>] do_nanosleep+0xdd/0x180
-| [<ffffffff810c8312>] __hrtimer_nanosleep+0xd2/0x1c0
-| [<ffffffff810c8456>] cpu_chill+0x56/0x80
-| [<ffffffff8107c13d>] try_to_grab_pending+0x1bd/0x390
-| [<ffffffff8107c431>] cancel_delayed_work+0x21/0x170
-| [<ffffffff81279a98>] blk_mq_stop_hw_queue+0x18/0x40
-| [<ffffffffa000ac6f>] scsi_queue_rq+0x7f/0x830 [scsi_mod]
-| [<ffffffff8127b0de>] __blk_mq_run_hw_queue+0x1ee/0x360
-| [<ffffffff8127b528>] blk_mq_map_request+0x108/0x190
-take the lock ^^^
-|
-| [<ffffffff8127c8d2>] blk_sq_make_request+0x82/0x350
-| [<ffffffff8126f6c0>] generic_make_request+0xd0/0x120
-| [<ffffffff8126f788>] submit_bio+0x78/0x190
-| [<ffffffff811bd537>] _submit_bh+0x117/0x180
-| [<ffffffff811bf528>] __block_write_full_page.constprop.38+0x138/0x3f0
-| [<ffffffff811bf880>] block_write_full_page+0xa0/0xe0
-| [<ffffffff811c02b3>] blkdev_writepage+0x13/0x20
-| [<ffffffff81127b25>] __writepage+0x15/0x40
-| [<ffffffff8112873b>] write_cache_pages+0x1fb/0x440
-| [<ffffffff811289be>] generic_writepages+0x3e/0x60
-| [<ffffffff8112a17c>] do_writepages+0x1c/0x30
-| [<ffffffff811b3603>] __writeback_single_inode+0x33/0x140
-| [<ffffffff811b462d>] writeback_sb_inodes+0x2bd/0x490
-| [<ffffffff811b4897>] __writeback_inodes_wb+0x97/0xd0
-| [<ffffffff811b4a9b>] wb_writeback+0x1cb/0x210
-| [<ffffffff811b505b>] bdi_writeback_workfn+0x25b/0x380
-| [<ffffffff8107b50b>] process_one_work+0x1bb/0x490
-| [<ffffffff8107c7ab>] worker_thread+0x6b/0x4f0
-| [<ffffffff81081863>] kthread+0xe3/0x100
-| [<ffffffff8152627c>] ret_from_fork+0x7c/0xb0
-
-After looking at this for a while it seems that it is save if blk_mq_ctx is
-used multiple times, the in struct lock protects the access.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- block/blk-mq.c | 4 ----
- block/blk-mq.h | 8 --------
- 2 files changed, 12 deletions(-)
-
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -1405,9 +1405,7 @@ static blk_qc_t blk_sq_make_request(stru
- blk_mq_put_ctx(data.ctx);
-
- if (request_count >= BLK_MAX_REQUEST_COUNT) {
-- spin_unlock(&data.ctx->cpu_lock);
- blk_flush_plug_list(plug, false);
-- spin_lock(&data.ctx->cpu_lock);
- trace_block_plug(q);
- }
-
-@@ -1609,7 +1607,6 @@ static int blk_mq_hctx_cpu_offline(struc
- blk_mq_hctx_clear_pending(hctx, ctx);
- }
- spin_unlock(&ctx->lock);
-- __blk_mq_put_ctx(ctx);
-
- if (list_empty(&tmp))
- return NOTIFY_OK;
-@@ -1803,7 +1800,6 @@ static void blk_mq_init_cpu_queues(struc
- memset(__ctx, 0, sizeof(*__ctx));
- __ctx->cpu = i;
- spin_lock_init(&__ctx->lock);
-- spin_lock_init(&__ctx->cpu_lock);
- INIT_LIST_HEAD(&__ctx->rq_list);
- __ctx->queue = q;
-
---- a/block/blk-mq.h
-+++ b/block/blk-mq.h
-@@ -9,7 +9,6 @@ struct blk_mq_ctx {
- struct list_head rq_list;
- } ____cacheline_aligned_in_smp;
-
-- spinlock_t cpu_lock;
- unsigned int cpu;
- unsigned int index_hw;
-
-@@ -78,7 +77,6 @@ static inline struct blk_mq_ctx *__blk_m
- struct blk_mq_ctx *ctx;
-
- ctx = per_cpu_ptr(q->queue_ctx, cpu);
-- spin_lock(&ctx->cpu_lock);
- return ctx;
- }
-
-@@ -93,14 +91,8 @@ static inline struct blk_mq_ctx *blk_mq_
- return __blk_mq_get_ctx(q, get_cpu_light());
- }
-
--static void __blk_mq_put_ctx(struct blk_mq_ctx *ctx)
--{
-- spin_unlock(&ctx->cpu_lock);
--}
--
- static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
- {
-- __blk_mq_put_ctx(ctx);
- put_cpu_light();
- }
-
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index 354aeb8c78de8d..8a250ea7892d2a 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -343,7 +343,7 @@ static void blk_mq_ipi_complete_request(
+@@ -341,7 +341,7 @@ static void blk_mq_ipi_complete_request(
return;
}
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
-@@ -355,7 +355,7 @@ static void blk_mq_ipi_complete_request(
+@@ -353,7 +353,7 @@ static void blk_mq_ipi_complete_request(
} else {
rq->q->softirq_done_fn(rq);
}
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void __blk_mq_complete_request(struct request *rq)
-@@ -862,14 +862,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+@@ -868,14 +868,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
return;
if (!async) {
diff --git a/patches/block-mq-use-cpu_light.patch b/patches/block-mq-use-cpu_light.patch
index 1d62870fcba643..f94316592e172f 100644
--- a/patches/block-mq-use-cpu_light.patch
+++ b/patches/block-mq-use-cpu_light.patch
@@ -3,86 +3,26 @@ Date: Wed, 9 Apr 2014 10:37:23 +0200
Subject: block: mq: use cpu_light()
there is a might sleep splat because get_cpu() disables preemption and
-later we grab a lock. As a workaround for this we use get_cpu_light()
-and an additional lock to prevent taking the same ctx.
-
-There is a lock member in the ctx already but there some functions which do ++
-on the member and this works with irq off but on RT we would need the extra lock.
+later we grab a lock. As a workaround for this we use get_cpu_light().
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- block/blk-mq.c | 4 ++++
- block/blk-mq.h | 17 ++++++++++++++---
- 2 files changed, 18 insertions(+), 3 deletions(-)
+ block/blk-mq.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -1385,7 +1385,9 @@ static blk_qc_t blk_sq_make_request(stru
- blk_mq_put_ctx(data.ctx);
-
- if (request_count >= BLK_MAX_REQUEST_COUNT) {
-+ spin_unlock(&data.ctx->cpu_lock);
- blk_flush_plug_list(plug, false);
-+ spin_lock(&data.ctx->cpu_lock);
- trace_block_plug(q);
- }
-
-@@ -1587,6 +1589,7 @@ static int blk_mq_hctx_cpu_offline(struc
- blk_mq_hctx_clear_pending(hctx, ctx);
- }
- spin_unlock(&ctx->lock);
-+ __blk_mq_put_ctx(ctx);
-
- if (list_empty(&tmp))
- return NOTIFY_OK;
-@@ -1780,6 +1783,7 @@ static void blk_mq_init_cpu_queues(struc
- memset(__ctx, 0, sizeof(*__ctx));
- __ctx->cpu = i;
- spin_lock_init(&__ctx->lock);
-+ spin_lock_init(&__ctx->cpu_lock);
- INIT_LIST_HEAD(&__ctx->rq_list);
- __ctx->queue = q;
-
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
-@@ -9,6 +9,7 @@ struct blk_mq_ctx {
- struct list_head rq_list;
- } ____cacheline_aligned_in_smp;
-
-+ spinlock_t cpu_lock;
- unsigned int cpu;
- unsigned int index_hw;
-
-@@ -74,7 +75,11 @@ struct blk_align_bitmap {
- static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
- unsigned int cpu)
- {
-- return per_cpu_ptr(q->queue_ctx, cpu);
-+ struct blk_mq_ctx *ctx;
-+
-+ ctx = per_cpu_ptr(q->queue_ctx, cpu);
-+ spin_lock(&ctx->cpu_lock);
-+ return ctx;
- }
-
- /*
-@@ -85,12 +90,18 @@ static inline struct blk_mq_ctx *__blk_m
+@@ -86,12 +86,12 @@ static inline struct blk_mq_ctx *__blk_m
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
- return __blk_mq_get_ctx(q, get_cpu());
+ return __blk_mq_get_ctx(q, get_cpu_light());
-+}
-+
-+static void __blk_mq_put_ctx(struct blk_mq_ctx *ctx)
-+{
-+ spin_unlock(&ctx->cpu_lock);
}
static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
{
- put_cpu();
-+ __blk_mq_put_ctx(ctx);
+ put_cpu_light();
}
diff --git a/patches/block-shorten-interrupt-disabled-regions.patch b/patches/block-shorten-interrupt-disabled-regions.patch
index 6422d01853a6a5..325baa85b80df4 100644
--- a/patches/block-shorten-interrupt-disabled-regions.patch
+++ b/patches/block-shorten-interrupt-disabled-regions.patch
@@ -47,7 +47,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -3198,7 +3198,7 @@ static void queue_unplugged(struct reque
+@@ -3209,7 +3209,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3246,7 +3246,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3257,7 +3257,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -64,7 +64,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3266,11 +3265,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3277,11 +3276,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@@ -76,7 +76,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3283,7 +3277,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3294,7 +3288,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -85,7 +85,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
/*
-@@ -3310,8 +3304,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3321,8 +3315,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
diff --git a/patches/bug-rt-dependend-variants.patch b/patches/bug-rt-dependend-variants.patch
index 85d0be51e82cdb..e9be7176843791 100644
--- a/patches/bug-rt-dependend-variants.patch
+++ b/patches/bug-rt-dependend-variants.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
-@@ -206,6 +206,20 @@ extern void warn_slowpath_null(const cha
+@@ -215,6 +215,20 @@ void __warn(const char *file, int line,
# define WARN_ON_SMP(x) ({0;})
#endif
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index c57144635c9d9f..a61c501b4e50b7 100644
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1939,14 +1939,17 @@ static void drain_local_stock(struct wor
+@@ -1828,14 +1828,17 @@ static void drain_local_stock(struct wor
*/
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index 6982596fd9746b..70d47b6bcf02bc 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_CGROUPS
-@@ -142,6 +143,7 @@ struct cgroup_subsys_state {
+@@ -137,6 +138,7 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -4737,10 +4737,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -4943,10 +4943,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4779,8 +4779,8 @@ static void css_release(struct percpu_re
+@@ -4987,8 +4987,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5396,6 +5396,7 @@ static int __init cgroup_wq_init(void)
+@@ -5631,6 +5631,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 2346eb9572ef57..ace719e619dc6b 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -8,22 +8,22 @@ contention on the waitqueue lock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- drivers/net/wireless/orinoco/orinoco_usb.c | 2 -
- drivers/usb/gadget/function/f_fs.c | 2 -
- drivers/usb/gadget/legacy/inode.c | 4 +--
- include/linux/completion.h | 9 +++-----
- include/linux/suspend.h | 6 +++++
- include/linux/swait.h | 1
- include/linux/uprobes.h | 1
- kernel/power/hibernate.c | 7 ++++++
- kernel/power/suspend.c | 5 ++++
- kernel/sched/completion.c | 32 ++++++++++++++---------------
- kernel/sched/core.c | 10 +++++++--
- kernel/sched/swait.c | 20 ++++++++++++++++++
+ drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 2 -
+ drivers/usb/gadget/function/f_fs.c | 2 -
+ drivers/usb/gadget/legacy/inode.c | 4 +-
+ include/linux/completion.h | 9 ++---
+ include/linux/suspend.h | 6 +++
+ include/linux/swait.h | 1
+ include/linux/uprobes.h | 1
+ kernel/power/hibernate.c | 7 ++++
+ kernel/power/suspend.c | 5 +++
+ kernel/sched/completion.c | 32 ++++++++++----------
+ kernel/sched/core.c | 10 +++++-
+ kernel/sched/swait.c | 20 ++++++++++++
12 files changed, 72 insertions(+), 27 deletions(-)
---- a/drivers/net/wireless/orinoco/orinoco_usb.c
-+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
+--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ez
while (!ctx->done.done && msecs--)
udelay(1000);
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
break;
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1404,7 +1404,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1393,7 +1393,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
-@@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf
+@@ -346,7 +346,7 @@ ep_io (struct ep_data *epdata, void *buf
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
-@@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf
+@@ -355,7 +355,7 @@ ep_io (struct ep_data *epdata, void *buf
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
@@ -166,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -522,6 +522,8 @@ static int enter_state(suspend_state_t s
+@@ -521,6 +521,8 @@ static int enter_state(suspend_state_t s
return error;
}
@@ -175,7 +175,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
-@@ -536,6 +538,8 @@ int pm_suspend(suspend_state_t state)
+@@ -535,6 +537,8 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
@@ -184,7 +184,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = enter_state(state);
if (error) {
suspend_stats.fail++;
-@@ -543,6 +547,7 @@ int pm_suspend(suspend_state_t state)
+@@ -542,6 +546,7 @@ int pm_suspend(suspend_state_t state)
} else {
suspend_stats.success++;
}
@@ -286,7 +286,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3092,7 +3092,10 @@ void migrate_disable(void)
+@@ -3141,7 +3141,10 @@ void migrate_disable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@@ -298,7 +298,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -3119,7 +3122,10 @@ void migrate_enable(void)
+@@ -3168,7 +3171,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@@ -338,7 +338,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ }
+ if (pm_in_action)
+ return;
-+ WARN(wakes > 2, "complate_all() with %d waiters\n", wakes);
++ WARN(wakes > 2, "complete_all() with %d waiters\n", wakes);
+}
+EXPORT_SYMBOL(swake_up_all_locked);
+
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index 85a937b884755b..c2e160c596f3de 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2987,12 +2987,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -3029,12 +3029,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4760,6 +4760,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -4812,6 +4812,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4773,6 +4774,7 @@ int __sched __cond_resched_softirq(void)
+@@ -4825,6 +4826,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
index d03d541aae4fd8..44973e92aa7397 100644
--- a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -110,6 +110,14 @@ struct hotplug_pcp {
+@@ -187,6 +187,14 @@ struct hotplug_pcp {
int grab_lock;
struct completion synced;
#ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index 4dd05bfd9dc301..2bf54bfe456795 100644
--- a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -60,10 +60,16 @@ static int cpu_hotplug_disabled;
+@@ -137,10 +137,16 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -76,12 +82,26 @@ static struct {
+@@ -153,12 +159,26 @@ static struct {
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -118,8 +138,8 @@ void pin_current_cpu(void)
+@@ -195,8 +215,8 @@ void pin_current_cpu(void)
return;
}
preempt_enable();
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -192,9 +212,9 @@ void get_online_cpus(void)
+@@ -269,9 +289,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -247,11 +267,11 @@ void cpu_hotplug_begin(void)
+@@ -324,11 +344,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -105,7 +105,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -260,7 +280,7 @@ void cpu_hotplug_begin(void)
+@@ -337,7 +357,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 7d3cca283425ad..a928a4574d10ba 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2288,6 +2288,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -2325,6 +2325,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2300,6 +2304,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -2337,6 +2341,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_NO_HZ_COMMON
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -60,16 +60,10 @@ static int cpu_hotplug_disabled;
+@@ -137,16 +137,10 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -81,27 +75,13 @@ static struct {
+@@ -158,27 +152,13 @@ static struct {
#endif
} cpu_hotplug = {
.active_writer = NULL,
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -109,12 +89,42 @@ static struct {
+@@ -186,12 +166,42 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
/**
-@@ -128,18 +138,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -205,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
void pin_current_cpu(void)
{
struct hotplug_pcp *hp;
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -160,26 +191,84 @@ void unpin_current_cpu(void)
+@@ -237,26 +268,84 @@ void unpin_current_cpu(void)
wake_up_process(hp->unplug);
}
@@ -304,7 +304,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
-@@ -187,23 +276,83 @@ static int sync_unplug_thread(void *data
+@@ -264,23 +353,83 @@ static int sync_unplug_thread(void *data
static int cpu_unplug_begin(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
@@ -395,7 +395,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void get_online_cpus(void)
-@@ -212,9 +361,9 @@ void get_online_cpus(void)
+@@ -289,9 +438,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -267,11 +416,11 @@ void cpu_hotplug_begin(void)
+@@ -344,11 +493,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -421,7 +421,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -280,7 +429,7 @@ void cpu_hotplug_begin(void)
+@@ -357,7 +506,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
@@ -430,8 +430,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpuhp_lock_release();
}
-@@ -516,6 +665,9 @@ static int _cpu_down(unsigned int cpu, i
-
+@@ -838,6 +987,9 @@ static int takedown_cpu(unsigned int cpu
+ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu);
+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
@@ -442,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* interrupt affinities.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1211,6 +1211,84 @@ void do_set_cpus_allowed(struct task_str
+@@ -1091,6 +1091,84 @@ void do_set_cpus_allowed(struct task_str
enqueue_task(rq, p, ENQUEUE_RESTORE);
}
diff --git a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index 889033e6f1526b..936b653fd3ac9f 100644
--- a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1675,12 +1675,13 @@ void hrtimer_init_sleeper(struct hrtimer
+@@ -1669,12 +1669,13 @@ void hrtimer_init_sleeper(struct hrtimer
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_start_expires(&t->timer, mode);
if (likely(t->task))
-@@ -1722,7 +1723,8 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1716,7 +1717,8 @@ long __sched hrtimer_nanosleep_restart(s
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
rmtp = restart->nanosleep.rmtp;
-@@ -1739,8 +1741,10 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1733,8 +1735,10 @@ long __sched hrtimer_nanosleep_restart(s
return ret;
}
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct restart_block *restart;
struct hrtimer_sleeper t;
-@@ -1753,7 +1757,7 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1747,7 +1751,7 @@ long hrtimer_nanosleep(struct timespec *
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
/* Absolute timers do not update the rmtp value and restart: */
-@@ -1780,6 +1784,12 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1774,6 +1778,12 @@ long hrtimer_nanosleep(struct timespec *
return ret;
}
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
-@@ -1806,7 +1816,8 @@ void cpu_chill(void)
+@@ -1800,7 +1810,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
diff --git a/patches/cpu_down_move_migrate_enable_back.patch b/patches/cpu_down_move_migrate_enable_back.patch
index 40d0699a8260e1..17aa462e194560 100644
--- a/patches/cpu_down_move_migrate_enable_back.patch
+++ b/patches/cpu_down_move_migrate_enable_back.patch
@@ -34,19 +34,19 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -657,6 +657,7 @@ static int _cpu_down(unsigned int cpu, i
- err = -EBUSY;
+@@ -1125,6 +1125,7 @@ static int __ref _cpu_down(unsigned int
goto restore_cpus;
}
-+ migrate_enable();
++ migrate_enable();
cpu_hotplug_begin();
- err = cpu_unplug_begin(cpu);
-@@ -741,7 +742,6 @@ static int _cpu_down(unsigned int cpu, i
- out_release:
+ ret = cpu_unplug_begin(cpu);
+ if (ret) {
+@@ -1172,7 +1173,6 @@ static int __ref _cpu_down(unsigned int
cpu_unplug_done(cpu);
out_cancel:
-- migrate_enable();
cpu_hotplug_done();
- if (!err)
- cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
+- migrate_enable();
+ /* This post dead nonsense must die */
+ if (!ret && hasdied)
+ cpu_notify_nofail(CPU_POST_DEAD, cpu);
diff --git a/patches/cpumask-disable-offstack-on-rt.patch b/patches/cpumask-disable-offstack-on-rt.patch
index bf7781364cd18a..834b6217b924af 100644
--- a/patches/cpumask-disable-offstack-on-rt.patch
+++ b/patches/cpumask-disable-offstack-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -851,7 +851,7 @@ config IOMMU_HELPER
+@@ -892,7 +892,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
diff --git a/patches/crypto-ccp-remove-rwlocks_types.h.patch b/patches/crypto-ccp-remove-rwlocks_types.h.patch
new file mode 100644
index 00000000000000..e8ebfe3addffc3
--- /dev/null
+++ b/patches/crypto-ccp-remove-rwlocks_types.h.patch
@@ -0,0 +1,22 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 11 May 2016 11:56:18 +0200
+Subject: crypto/ccp: remove rwlocks_types.h
+
+Users of rwlocks should include spinlock.h instead including this
+header file. The current users of rwlocks_types.h are internal.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/crypto/ccp/ccp-dev.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/crypto/ccp/ccp-dev.c
++++ b/drivers/crypto/ccp/ccp-dev.c
+@@ -16,7 +16,6 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/spinlock.h>
+-#include <linux/rwlock_types.h>
+ #include <linux/types.h>
+ #include <linux/mutex.h>
+ #include <linux/delay.h>
diff --git a/patches/dm-make-rt-aware.patch b/patches/dm-make-rt-aware.patch
index 7bf6cbb9bab796..2bbb6804e0ca14 100644
--- a/patches/dm-make-rt-aware.patch
+++ b/patches/dm-make-rt-aware.patch
@@ -15,12 +15,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
-@@ -2127,7 +2127,7 @@ static void dm_request_fn(struct request
+@@ -2187,7 +2187,7 @@ static void dm_request_fn(struct request
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
queue_kthread_work(&md->kworker, &tio->work);
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
}
+ }
- goto out;
diff --git a/patches/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch b/patches/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch
deleted file mode 100644
index 5c3d5cd9d58c1b..00000000000000
--- a/patches/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Anders Roxell <anders.roxell@linaro.org>
-Date: Fri, 15 Jan 2016 20:21:12 +0100
-Subject: drivers/cpuidle: coupled: fix warning cpuidle_coupled_lock
-
-Used multi_v7_defconfig+PREEMPT_RT_FULL=y and this caused a compilation
-warning without this fix:
-../drivers/cpuidle/coupled.c:122:21: warning: 'cpuidle_coupled_lock'
-defined but not used [-Wunused-variable]
-
-Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/cpuidle/coupled.c | 1 -
- 1 file changed, 1 deletion(-)
-
---- a/drivers/cpuidle/coupled.c
-+++ b/drivers/cpuidle/coupled.c
-@@ -119,7 +119,6 @@ struct cpuidle_coupled {
-
- #define CPUIDLE_COUPLED_NOT_IDLE (-1)
-
--static DEFINE_MUTEX(cpuidle_coupled_lock);
- static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
-
- /*
diff --git a/patches/drivers-media-vsp1_video-fix-compile-error.patch b/patches/drivers-media-vsp1_video-fix-compile-error.patch
deleted file mode 100644
index 57d677d1b5d3fa..00000000000000
--- a/patches/drivers-media-vsp1_video-fix-compile-error.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From: Anders Roxell <anders.roxell@linaro.org>
-Date: Fri, 15 Jan 2016 01:09:43 +0100
-Subject: drivers/media: vsp1_video: fix compile error
-
-This was found with the -RT patch enabled, but the fix should apply to
-non-RT also.
-
-Compilation error without this fix:
-../drivers/media/platform/vsp1/vsp1_video.c: In function
-'vsp1_pipeline_stopped':
-../drivers/media/platform/vsp1/vsp1_video.c:524:2: error: expected
-expression before 'do'
- spin_unlock_irqrestore(&pipe->irqlock, flags);
- ^
-
-Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/media/platform/vsp1/vsp1_video.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/media/platform/vsp1/vsp1_video.c
-+++ b/drivers/media/platform/vsp1/vsp1_video.c
-@@ -520,7 +520,7 @@ static bool vsp1_pipeline_stopped(struct
- bool stopped;
-
- spin_lock_irqsave(&pipe->irqlock, flags);
-- stopped = pipe->state == VSP1_PIPELINE_STOPPED,
-+ stopped = pipe->state == VSP1_PIPELINE_STOPPED;
- spin_unlock_irqrestore(&pipe->irqlock, flags);
-
- return stopped;
diff --git a/patches/drivers-net-fix-livelock-issues.patch b/patches/drivers-net-fix-livelock-issues.patch
index 06d2a8006144e9..80d97450f9ab88 100644
--- a/patches/drivers-net-fix-livelock-issues.patch
+++ b/patches/drivers-net-fix-livelock-issues.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
-@@ -2221,11 +2221,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
+@@ -2217,11 +2217,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
}
tpd_req = atl1c_cal_tpd_req(skb);
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
-@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_b
+@@ -179,11 +179,7 @@ static int rionet_start_xmit(struct sk_b
unsigned long flags;
int add_num = 1;
diff --git a/patches/drivers-net-vortex-fix-locking-issues.patch b/patches/drivers-net-vortex-fix-locking-issues.patch
index 0c6dde7e2729a3..543333ebb59a42 100644
--- a/patches/drivers-net-vortex-fix-locking-issues.patch
+++ b/patches/drivers-net-vortex-fix-locking-issues.patch
@@ -31,7 +31,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
#endif
-@@ -1916,12 +1916,12 @@ static void vortex_tx_timeout(struct net
+@@ -1910,12 +1910,12 @@ static void vortex_tx_timeout(struct net
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
diff --git a/patches/drivers-tty-pl011-irq-disable-madness.patch b/patches/drivers-tty-pl011-irq-disable-madness.patch
index 34857d9124878f..72116fbff9b6c4 100644
--- a/patches/drivers-tty-pl011-irq-disable-madness.patch
+++ b/patches/drivers-tty-pl011-irq-disable-madness.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
-@@ -2067,13 +2067,19 @@ pl011_console_write(struct console *co,
+@@ -2166,13 +2166,19 @@ pl011_console_write(struct console *co,
clk_enable(uap->clk);
@@ -35,8 +35,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the CR then disable the interrupts
-@@ -2098,8 +2104,7 @@ pl011_console_write(struct console *co,
- writew(old_cr, uap->port.membase + UART011_CR);
+@@ -2196,8 +2202,7 @@ pl011_console_write(struct console *co,
+ pl011_write(old_cr, uap, REG_CR);
if (locked)
- spin_unlock(&uap->port.lock);
diff --git a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index bcd215a49ffcf4..50bfea3ba1fddc 100644
--- a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -1264,7 +1264,9 @@ i915_gem_ringbuffer_submission(struct i9
+@@ -1314,7 +1314,9 @@ i915_gem_ringbuffer_submission(struct i9
if (ret)
return ret;
diff --git a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index 983f346b8e2800..371961b14ab507 100644
--- a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -830,6 +830,7 @@ static int i915_get_crtc_scanoutpos(stru
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -881,6 +882,7 @@ static int i915_get_crtc_scanoutpos(stru
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -1862,6 +1862,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1863,6 +1863,7 @@ int radeon_get_crtc_scanoutpos(struct dr
struct radeon_device *rdev = dev->dev_private;
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -1954,6 +1955,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1955,6 +1956,7 @@ int radeon_get_crtc_scanoutpos(struct dr
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/patches/epoll-use-get-cpu-light.patch b/patches/epoll-use-get-cpu-light.patch
index f75d1701a8ab20..2824ec2c2a6390 100644
--- a/patches/epoll-use-get-cpu-light.patch
+++ b/patches/epoll-use-get-cpu-light.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
-@@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *pri
+@@ -510,12 +510,12 @@ static int ep_poll_wakeup_proc(void *pri
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
diff --git a/patches/f2fs_Mutex_cant_be_used_by_down_write_nest_lock().patch b/patches/f2fs_Mutex_cant_be_used_by_down_write_nest_lock().patch
deleted file mode 100644
index 070c842d6c2edb..00000000000000
--- a/patches/f2fs_Mutex_cant_be_used_by_down_write_nest_lock().patch
+++ /dev/null
@@ -1,66 +0,0 @@
-Subject: f2fs: Mutex can't be used by down_write_nest_lock()
-From: Yang Shi <yang.shi@linaro.org>
-Date: Fri, 26 Feb 2016 16:25:25 -0800
-
-fsf2_lock_all() calls down_write_nest_lock() to acquire a rw_sem and check
-a mutex, but down_write_nest_lock() is designed for two rw_sem accoring to the
-comment in include/linux/rwsem.h. And, other than f2fs, it is just called in
-mm/mmap.c with two rwsem.
-
-So, it looks it is used wrongly by f2fs. And, it causes the below compile
-warning on -rt kernel too.
-
-In file included from fs/f2fs/xattr.c:25:0:
-fs/f2fs/f2fs.h: In function 'f2fs_lock_all':
-fs/f2fs/f2fs.h:962:34: warning: passing argument 2 of 'down_write_nest_lock' from
- incompatible pointer type [-Wincompatible-pointer-types]
- f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex);
- ^
-
-The nest annotation was anyway bogus as nested annotations for lockdep are
-only required if one nests two locks of the same lock class, which is not the
-case here.
-
-Signed-off-by: Yang Shi <yang.shi@linaro.org>
-Cc: cm224.lee@samsung.com
-Cc: chao2.yu@samsung.com
-Cc: linaro-kernel@lists.linaro.org
-Cc: linux-rt-users@vger.kernel.org
-Cc: bigeasy@linutronix.de
-Cc: rostedt@goodmis.org
-Cc: linux-f2fs-devel@lists.sourceforge.net
-Cc: linux-fsdevel@vger.kernel.org
-Cc: jaegeuk@kernel.org
-Link: http://lkml.kernel.org/r/1456532725-4126-1-git-send-email-yang.shi@linaro.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- fs/f2fs/f2fs.h | 4 +---
- 1 file changed, 1 insertion(+), 3 deletions(-)
-
---- a/fs/f2fs/f2fs.h
-+++ b/fs/f2fs/f2fs.h
-@@ -24,7 +24,6 @@
-
- #ifdef CONFIG_F2FS_CHECK_FS
- #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
--#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
- #else
- #define f2fs_bug_on(sbi, condition) \
- do { \
-@@ -33,7 +32,6 @@
- set_sbi_flag(sbi, SBI_NEED_FSCK); \
- } \
- } while (0)
--#define f2fs_down_write(x, y) down_write(x)
- #endif
-
- /*
-@@ -959,7 +957,7 @@ static inline void f2fs_unlock_op(struct
-
- static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
- {
-- f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex);
-+ down_write(&sbi->cp_rwsem);
- }
-
- static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index ead334bd603c2c..134a6140b2b797 100644
--- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -17,17 +17,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
-@@ -34,6 +34,7 @@
+@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <asm/current.h>
- #include <asm/uaccess.h>
+ #include <linux/uaccess.h>
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
-@@ -150,7 +150,7 @@ static struct dentry *get_next_positive_
+@@ -148,7 +148,7 @@ static struct dentry *get_next_positive_
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return dentry; /* try again with same dentry */
}
-@@ -2391,7 +2392,7 @@ void d_delete(struct dentry * dentry)
+@@ -2316,7 +2317,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
diff --git a/patches/fs-jbd-replace-bh_state-lock.patch b/patches/fs-jbd-replace-bh_state-lock.patch
index 4496a7ad9a3ce0..764a557c72bd52 100644
--- a/patches/fs-jbd-replace-bh_state-lock.patch
+++ b/patches/fs-jbd-replace-bh_state-lock.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
-@@ -352,32 +352,56 @@ static inline struct journal_head *bh2jh
+@@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
diff --git a/patches/fs-ntfs-disable-interrupt-non-rt.patch b/patches/fs-ntfs-disable-interrupt-non-rt.patch
index 49203174e40860..811c199e5922cf 100644
--- a/patches/fs-ntfs-disable-interrupt-non-rt.patch
+++ b/patches/fs-ntfs-disable-interrupt-non-rt.patch
@@ -36,13 +36,29 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- fs/ntfs/aops.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
+ fs/ntfs/aops.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
+@@ -92,13 +92,13 @@ static void ntfs_end_buffer_async_read(s
+ ofs = 0;
+ if (file_ofs < init_size)
+ ofs = init_size - file_ofs;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ kaddr = kmap_atomic(page);
+ memset(kaddr + bh_offset(bh) + ofs, 0,
+ bh->b_size - ofs);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ } else {
+ clear_buffer_uptodate(bh);
@@ -143,13 +143,13 @@ static void ntfs_end_buffer_async_read(s
- recs = PAGE_CACHE_SIZE / rec_size;
+ recs = PAGE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- local_irq_save(flags);
diff --git a/patches/fs-replace-bh_uptodate_lock-for-rt.patch b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
index 109d35cb70758f..153a6283046e76 100644
--- a/patches/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -305,8 +305,7 @@ static void end_buffer_async_read(struct
+@@ -300,8 +300,7 @@ static void end_buffer_async_read(struct
* decide that the page is now completely done.
*/
first = page_buffers(page);
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -319,8 +318,7 @@ static void end_buffer_async_read(struct
+@@ -314,8 +313,7 @@ static void end_buffer_async_read(struct
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If none of the buffers had errors and they are all
-@@ -332,9 +330,7 @@ static void end_buffer_async_read(struct
+@@ -327,9 +325,7 @@ static void end_buffer_async_read(struct
return;
still_busy:
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -362,8 +358,7 @@ void end_buffer_async_write(struct buffe
+@@ -357,8 +353,7 @@ void end_buffer_async_write(struct buffe
}
first = page_buffers(page);
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_write(bh);
unlock_buffer(bh);
-@@ -375,15 +370,12 @@ void end_buffer_async_write(struct buffe
+@@ -370,15 +365,12 @@ void end_buffer_async_write(struct buffe
}
tmp = tmp->b_this_page;
}
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3325,6 +3317,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3314,6 +3306,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index b43658e4996739..aef8584cc7785f 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -66,6 +66,8 @@ struct trace_entry {
+@@ -56,6 +56,8 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1663,6 +1663,8 @@ tracing_generic_entry_update(struct trac
+@@ -1669,6 +1669,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2560,9 +2562,10 @@ static void print_lat_help_header(struct
+@@ -2566,9 +2568,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
-@@ -428,6 +428,11 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -432,6 +432,11 @@ int trace_print_lat_fmt(struct trace_seq
else
trace_seq_putc(s, '.');
diff --git a/patches/genirq-Add-default-affinity-mask-command-line-option.patch b/patches/genirq-Add-default-affinity-mask-command-line-option.patch
deleted file mode 100644
index f370b37fe7b856..00000000000000
--- a/patches/genirq-Add-default-affinity-mask-command-line-option.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 25 May 2012 16:59:47 +0200
-Subject: genirq: Add default affinity mask command line option
-
-If we isolate CPUs, then we don't want random device interrupts on them. Even
-w/o the user space irq balancer enabled we can end up with irqs on non boot
-cpus and chasing newly requested interrupts is a tedious task.
-
-Allow to restrict the default irq affinity mask.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- Documentation/kernel-parameters.txt | 9 +++++++++
- kernel/irq/irqdesc.c | 21 +++++++++++++++++++--
- 2 files changed, 28 insertions(+), 2 deletions(-)
-
---- a/Documentation/kernel-parameters.txt
-+++ b/Documentation/kernel-parameters.txt
-@@ -1629,6 +1629,15 @@ bytes respectively. Such letter suffixes
- ip= [IP_PNP]
- See Documentation/filesystems/nfs/nfsroot.txt.
-
-+ irqaffinity= [SMP] Set the default irq affinity mask
-+ Format:
-+ <cpu number>,...,<cpu number>
-+ or
-+ <cpu number>-<cpu number>
-+ (must be a positive range in ascending order)
-+ or a mixture
-+ <cpu number>,...,<cpu number>-<cpu number>
-+
- irqfixup [HW]
- When an interrupt is not handled search all handlers
- for it. Intended to get systems with badly broken
---- a/kernel/irq/irqdesc.c
-+++ b/kernel/irq/irqdesc.c
-@@ -24,10 +24,27 @@
- static struct lock_class_key irq_desc_lock_class;
-
- #if defined(CONFIG_SMP)
-+static int __init irq_affinity_setup(char *str)
-+{
-+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
-+ cpulist_parse(str, irq_default_affinity);
-+ /*
-+ * Set at least the boot cpu. We don't want to end up with
-+ * bugreports caused by random comandline masks
-+ */
-+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
-+ return 1;
-+}
-+__setup("irqaffinity=", irq_affinity_setup);
-+
- static void __init init_irq_default_affinity(void)
- {
-- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
-- cpumask_setall(irq_default_affinity);
-+#ifdef CONFIG_CPUMASK_OFFSTACK
-+ if (!irq_default_affinity)
-+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
-+#endif
-+ if (cpumask_empty(irq_default_affinity))
-+ cpumask_setall(irq_default_affinity);
- }
- #else
- static void __init init_irq_default_affinity(void)
diff --git a/patches/genirq-disable-irqpoll-on-rt.patch b/patches/genirq-disable-irqpoll-on-rt.patch
index 9aa92515d4d2a3..4b0751e6c2192d 100644
--- a/patches/genirq-disable-irqpoll-on-rt.patch
+++ b/patches/genirq-disable-irqpoll-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
-@@ -444,6 +444,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
+@@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
static int __init irqfixup_setup(char *str)
{
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
-@@ -456,6 +460,10 @@ module_param(irqfixup, int, 0644);
+@@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
diff --git a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index ad6cdadcf876cb..471619ef97fb24 100644
--- a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -206,6 +206,7 @@ extern void resume_device_irqs(void);
+@@ -217,6 +217,7 @@ extern void resume_device_irqs(void);
* @irq: Interrupt to which notification applies
* @kref: Reference count, for internal use
* @work: Work item, for internal use
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @notify: Function to be called on change. This will be
* called in process context.
* @release: Function to be called on release. This will be
-@@ -217,6 +218,7 @@ struct irq_affinity_notify {
+@@ -228,6 +229,7 @@ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -183,6 +183,62 @@ static inline void
+@@ -181,6 +181,62 @@ static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
-@@ -222,7 +278,17 @@ int irq_set_affinity_locked(struct irq_d
+@@ -220,7 +276,17 @@ int irq_set_affinity_locked(struct irq_d
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
irqd_set(data, IRQD_AFFINITY_SET);
-@@ -260,10 +326,8 @@ int irq_set_affinity_hint(unsigned int i
+@@ -258,10 +324,8 @@ int irq_set_affinity_hint(unsigned int i
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
-@@ -285,6 +349,13 @@ static void irq_affinity_notify(struct w
+@@ -283,6 +347,13 @@ static void irq_affinity_notify(struct w
kref_put(&notify->kref, notify->release);
}
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
-@@ -314,6 +385,8 @@ irq_set_affinity_notifier(unsigned int i
+@@ -312,6 +383,8 @@ irq_set_affinity_notifier(unsigned int i
notify->irq = irq;
kref_init(&notify->kref);
INIT_WORK(&notify->work, irq_affinity_notify);
diff --git a/patches/genirq-force-threading.patch b/patches/genirq-force-threading.patch
index bbfcbec08ded9d..497fa443068271 100644
--- a/patches/genirq-force-threading.patch
+++ b/patches/genirq-force-threading.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -379,9 +379,13 @@ extern int irq_set_irqchip_state(unsigne
+@@ -390,9 +390,13 @@ extern int irq_set_irqchip_state(unsigne
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
index 2e4440a13fe1d2..8c6e6f3a57e0c1 100644
--- a/patches/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -2054,7 +2054,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2084,7 +2084,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
diff --git a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
index 47896518324131..9ef86507abe35e 100644
--- a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -268,7 +268,7 @@ static int sync_unplug_thread(void *data
+@@ -345,7 +345,7 @@ static int sync_unplug_thread(void *data
* we don't want any more work on this CPU.
*/
current->flags &= ~PF_NO_SETAFFINITY;
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 8f69f74689e9c3..2053bd9b9f074e 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -13,12 +13,12 @@ tasks on the cpu which should be brought down.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 7 +--
- kernel/cpu.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++-
- 2 files changed, 122 insertions(+), 4 deletions(-)
+ kernel/cpu.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 122 insertions(+), 3 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -222,9 +222,6 @@ static inline void smpboot_thread_init(v
+@@ -221,9 +221,6 @@ static inline void cpu_notifier_register
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
-@@ -234,6 +231,8 @@ extern void get_online_cpus(void);
+@@ -233,6 +230,8 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
-@@ -251,6 +250,8 @@ static inline void cpu_hotplug_done(void
+@@ -250,6 +249,8 @@ static inline void cpu_hotplug_done(void
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* These aren't inline functions due to a GCC bug. */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -89,6 +89,100 @@ static struct {
+@@ -166,6 +166,100 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -149,24 +149,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void get_online_cpus(void)
{
-@@ -338,13 +432,14 @@ static int take_cpu_down(void *_param)
- /* Requires cpu_add_remove_lock to be held */
- static int _cpu_down(unsigned int cpu, int tasks_frozen)
- {
-- int err, nr_calls = 0;
-+ int mycpu, err, nr_calls = 0;
- void *hcpu = (void *)(long)cpu;
- unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
- struct take_cpu_down_param tcd_param = {
- .mod = mod,
- .hcpu = hcpu,
- };
+@@ -807,6 +901,8 @@ static int __ref _cpu_down(unsigned int
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int prev_state, ret = 0;
+ bool hasdied = false;
++ int mycpu;
+ cpumask_var_t cpumask;
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -352,7 +447,27 @@ static int _cpu_down(unsigned int cpu, i
- if (!cpu_online(cpu))
+@@ -814,7 +910,27 @@ static int __ref _cpu_down(unsigned int
+ if (!cpu_present(cpu))
return -EINVAL;
+ /* Move the downtaker off the unplug cpu */
@@ -185,20 +178,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_enable();
+
cpu_hotplug_begin();
-+ err = cpu_unplug_begin(cpu);
-+ if (err) {
++ ret = cpu_unplug_begin(cpu);
++ if (ret) {
+ printk("cpu_unplug_begin(%d) failed\n", cpu);
+ goto out_cancel;
+ }
- err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
- if (err) {
-@@ -424,6 +539,8 @@ static int _cpu_down(unsigned int cpu, i
- check_for_tasks(cpu);
+ cpuhp_tasks_frozen = tasks_frozen;
+
+@@ -853,6 +969,8 @@ static int __ref _cpu_down(unsigned int
- out_release:
+ hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
+ out:
+ cpu_unplug_done(cpu);
+out_cancel:
cpu_hotplug_done();
- if (!err)
- cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
+ /* This post dead nonsense must die */
+ if (!ret && hasdied)
diff --git a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
index 5da8b011d622d4..217c46e3dd7020 100644
--- a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -168,7 +168,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -245,7 +245,7 @@ static int cpu_unplug_begin(unsigned int
struct task_struct *tsk;
init_completion(&hp->synced);
diff --git a/patches/hotplug-use-migrate-disable.patch b/patches/hotplug-use-migrate-disable.patch
index a9175682f6a6ce..5182f0db5489c7 100644
--- a/patches/hotplug-use-migrate-disable.patch
+++ b/patches/hotplug-use-migrate-disable.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -455,14 +455,13 @@ static int _cpu_down(unsigned int cpu, i
+@@ -918,14 +918,13 @@ static int __ref _cpu_down(unsigned int
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
@@ -28,12 +28,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- preempt_enable();
cpu_hotplug_begin();
- err = cpu_unplug_begin(cpu);
-@@ -543,6 +542,7 @@ static int _cpu_down(unsigned int cpu, i
- out_release:
+ ret = cpu_unplug_begin(cpu);
+@@ -974,6 +973,7 @@ static int __ref _cpu_down(unsigned int
cpu_unplug_done(cpu);
out_cancel:
-+ migrate_enable();
cpu_hotplug_done();
- if (!err)
- cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
++ migrate_enable();
+ /* This post dead nonsense must die */
+ if (!ret && hasdied)
+ cpu_notify_nofail(CPU_POST_DEAD, cpu);
diff --git a/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
index ef0a92332bd471..85434fd685cd1b 100644
--- a/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ b/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/freezer.h>
#include <asm/uaccess.h>
-@@ -713,6 +714,44 @@ static void clock_was_set_work(struct wo
+@@ -707,6 +708,44 @@ static void clock_was_set_work(struct wo
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Called from timekeeping and resume code to reprogramm the hrtimer
* interrupt device on all cpus.
-@@ -721,6 +760,7 @@ void clock_was_set_delayed(void)
+@@ -715,6 +754,7 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 249be2f4905752..2698c67604bc9a 100644
--- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -66,7 +66,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
unsigned int clock_was_set_seq;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -438,6 +438,7 @@ static void init_rq_hrtick(struct rq *rq
+@@ -306,6 +306,7 @@ static void init_rq_hrtick(struct rq *rq
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
@@ -86,7 +86,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -730,11 +730,8 @@ static inline int hrtimer_is_hres_enable
+@@ -724,11 +724,8 @@ static inline int hrtimer_is_hres_enable
static inline void hrtimer_switch_to_hres(void) { }
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
@@ -100,7 +100,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
-@@ -883,7 +880,7 @@ void hrtimer_wait_for_timer(const struct
+@@ -877,7 +874,7 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
@@ -109,7 +109,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
wait_event(base->cpu_base->wait,
!(hrtimer_callback_running(timer)));
}
-@@ -933,6 +930,11 @@ static void __remove_hrtimer(struct hrti
+@@ -927,6 +924,11 @@ static void __remove_hrtimer(struct hrti
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
@@ -121,7 +121,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
-@@ -1173,6 +1175,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1167,6 +1169,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@@ -129,7 +129,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
-@@ -1213,6 +1216,7 @@ bool hrtimer_active(const struct hrtimer
+@@ -1207,6 +1210,7 @@ bool hrtimer_active(const struct hrtimer
seq = raw_read_seqcount_begin(&cpu_base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
@@ -137,7 +137,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
cpu_base->running == timer)
return true;
-@@ -1311,12 +1315,112 @@ static void __run_hrtimer(struct hrtimer
+@@ -1305,12 +1309,112 @@ static void __run_hrtimer(struct hrtimer
cpu_base->running = NULL;
}
@@ -250,7 +250,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
for (; active; base++, active >>= 1) {
struct timerqueue_node *node;
-@@ -1356,9 +1460,14 @@ static void __hrtimer_run_queues(struct
+@@ -1350,9 +1454,14 @@ static void __hrtimer_run_queues(struct
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
break;
@@ -266,7 +266,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
#ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1500,8 +1609,6 @@ void hrtimer_run_queues(void)
+@@ -1494,8 +1603,6 @@ void hrtimer_run_queues(void)
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
@@ -275,7 +275,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
/*
-@@ -1523,6 +1630,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1517,6 +1624,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -283,7 +283,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1657,6 +1765,7 @@ static void init_hrtimers_cpu(int cpu)
+@@ -1651,6 +1759,7 @@ static void init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -291,7 +291,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
cpu_base->cpu = cpu;
-@@ -1761,11 +1870,21 @@ static struct notifier_block hrtimers_nb
+@@ -1755,11 +1864,21 @@ static struct notifier_block hrtimers_nb
.notifier_call = hrtimer_cpu_notify,
};
@@ -315,7 +315,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/**
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -1105,6 +1105,7 @@ void tick_setup_sched_timer(void)
+@@ -1213,6 +1213,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -325,7 +325,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/* Get the next period (per cpu) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
-@@ -507,6 +507,7 @@ static void watchdog_enable(unsigned int
+@@ -523,6 +523,7 @@ static void watchdog_enable(unsigned int
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index 6a5d72b47681cb..4fd28e70b5a4fa 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -866,6 +866,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -860,6 +860,32 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -1083,7 +1109,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1077,7 +1103,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1474,6 +1500,8 @@ void hrtimer_run_queues(void)
+@@ -1468,6 +1494,8 @@ void hrtimer_run_queues(void)
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1633,6 +1661,9 @@ static void init_hrtimers_cpu(int cpu)
+@@ -1627,6 +1655,9 @@ static void init_hrtimers_cpu(int cpu)
cpu_base->cpu = cpu;
hrtimer_init_hres(cpu_base);
diff --git a/patches/hwlatdetect.patch b/patches/hwlatdetect.patch
index bac7d137204976..5c1859eefeb32e 100644
--- a/patches/hwlatdetect.patch
+++ b/patches/hwlatdetect.patch
@@ -84,7 +84,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
+consumed by reading from the "sample" (pipe) debugfs file interface.
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
-@@ -121,6 +121,35 @@ config IBM_ASM
+@@ -122,6 +122,35 @@ config IBM_ASM
for information on the specific driver level and support statement
for your IBM server.
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index 92064d31e96d0c..263c1c0fda88ba 100644
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -11376,7 +11376,7 @@ void intel_check_page_flip(struct drm_de
+@@ -11476,7 +11476,7 @@ void intel_check_page_flip(struct drm_de
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
diff --git a/patches/ide-use-nort-local-irq-variants.patch b/patches/ide-use-nort-local-irq-variants.patch
index 11e3df947f0e14..7bbc795cca9b04 100644
--- a/patches/ide-use-nort-local-irq-variants.patch
+++ b/patches/ide-use-nort-local-irq-variants.patch
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
-@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *h
+@@ -1236,7 +1236,7 @@ static int init_dma_hpt366(ide_hwif_t *h
dma_old = inb(base + 2);
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
dma_new = dma_old;
pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
-@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *h
+@@ -1247,7 +1247,7 @@ static int init_dma_hpt366(ide_hwif_t *h
if (dma_new != dma_old)
outb(dma_new, base + 2);
diff --git a/patches/infiniband-mellanox-ib-use-nort-irq.patch b/patches/infiniband-mellanox-ib-use-nort-irq.patch
index 01e88049f6c31f..06a8b945da1914 100644
--- a/patches/infiniband-mellanox-ib-use-nort-irq.patch
+++ b/patches/infiniband-mellanox-ib-use-nort-irq.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -857,7 +857,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -883,7 +883,7 @@ void ipoib_mcast_restart_task(struct wor
ipoib_dbg_mcast(priv, "restarting multicast task\n");
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netif_addr_lock(dev);
spin_lock(&priv->lock);
-@@ -939,7 +939,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -965,7 +965,7 @@ void ipoib_mcast_restart_task(struct wor
spin_unlock(&priv->lock);
netif_addr_unlock(dev);
diff --git a/patches/infiniband-ulp-ipoib-remove-pkey_mutex.patch b/patches/infiniband-ulp-ipoib-remove-pkey_mutex.patch
new file mode 100644
index 00000000000000..5e135a65aef8c9
--- /dev/null
+++ b/patches/infiniband-ulp-ipoib-remove-pkey_mutex.patch
@@ -0,0 +1,25 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 11 May 2016 11:52:23 +0200
+Subject: infiniband/ulp/ipoib: remove pkey_mutex
+
+The last user of pkey_mutex was removed in db84f8803759 ("IB/ipoib: Use
+P_Key change event instead of P_Key polling mechanism") but the lock
+remained.
+This patch removes it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/infiniband/ulp/ipoib/ipoib_ib.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -51,8 +51,6 @@ MODULE_PARM_DESC(data_debug_level,
+ "Enable data path debug tracing if > 0");
+ #endif
+
+-static DEFINE_MUTEX(pkey_mutex);
+-
+ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
+ struct ib_pd *pd, struct ib_ah_attr *attr)
+ {
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index ca6b01bd3c38c2..b5816a3b2b13e8 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -222,6 +222,9 @@ static inline void smpboot_thread_init(v
+@@ -221,6 +221,9 @@ static inline void cpu_notifier_register
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT_NOTIFIERS
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1412,6 +1412,12 @@ struct task_struct {
+@@ -1429,6 +1429,12 @@ struct task_struct {
#endif
unsigned int policy;
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1838,14 +1844,6 @@ extern int arch_task_struct_size __read_
+@@ -1875,14 +1881,6 @@ extern int arch_task_struct_size __read_
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -3122,6 +3120,31 @@ static inline void set_task_cpu(struct t
+@@ -3164,6 +3162,31 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1171,6 +1171,11 @@ void do_set_cpus_allowed(struct task_str
+@@ -1051,6 +1051,11 @@ void do_set_cpus_allowed(struct task_str
lockdep_assert_held(&p->pi_lock);
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
queued = task_on_rq_queued(p);
running = task_current(rq, p);
-@@ -1232,7 +1237,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1112,7 +1117,7 @@ static int __set_cpus_allowed_ptr(struct
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -3012,6 +3017,69 @@ static inline void schedule_debug(struct
+@@ -3061,6 +3066,69 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
@@ -243,7 +243,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -251,6 +251,9 @@ void print_rt_rq(struct seq_file *m, int
+@@ -559,6 +559,9 @@ void print_rt_rq(struct seq_file *m, int
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
@@ -253,7 +253,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#undef PN
#undef P
-@@ -635,6 +638,10 @@ void proc_sched_show_task(struct task_st
+@@ -954,6 +957,10 @@ void proc_sched_show_task(struct task_st
#endif
P(policy);
P(prio);
diff --git a/patches/iommu-amd--Use-WARN_ON_NORT.patch b/patches/iommu-amd--Use-WARN_ON_NORT.patch
index 3bde9d093027cf..aeed6219085eea 100644
--- a/patches/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/patches/iommu-amd--Use-WARN_ON_NORT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
-@@ -2017,10 +2017,10 @@ static int __attach_device(struct iommu_
+@@ -2165,10 +2165,10 @@ static int __attach_device(struct iommu_
int ret;
/*
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* lock domain */
spin_lock(&domain->lock);
-@@ -2183,10 +2183,10 @@ static void __detach_device(struct iommu
+@@ -2331,10 +2331,10 @@ static void __detach_device(struct iommu
struct protection_domain *domain;
/*
diff --git a/patches/ipc-sem-rework-semaphore-wakeups.patch b/patches/ipc-sem-rework-semaphore-wakeups.patch
index aaece82917bfe8..9dcc32866a81b5 100644
--- a/patches/ipc-sem-rework-semaphore-wakeups.patch
+++ b/patches/ipc-sem-rework-semaphore-wakeups.patch
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/ipc/sem.c
+++ b/ipc/sem.c
-@@ -690,6 +690,13 @@ static int perform_atomic_semop(struct s
+@@ -697,6 +697,13 @@ static int perform_atomic_semop(struct s
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
-@@ -701,6 +708,7 @@ static void wake_up_sem_queue_prepare(st
+@@ -708,6 +715,7 @@ static void wake_up_sem_queue_prepare(st
q->pid = error;
list_add_tail(&q->list, pt);
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -714,6 +722,7 @@ static void wake_up_sem_queue_prepare(st
+@@ -721,6 +729,7 @@ static void wake_up_sem_queue_prepare(st
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct sem_queue *q, *t;
int did_something;
-@@ -726,6 +735,7 @@ static void wake_up_sem_queue_do(struct
+@@ -733,6 +742,7 @@ static void wake_up_sem_queue_do(struct
}
if (did_something)
preempt_enable();
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index 586090ceb7b62c..a3228348855fe3 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -940,7 +940,15 @@ irq_forced_thread_fn(struct irq_desc *de
+@@ -938,7 +938,15 @@ irq_forced_thread_fn(struct irq_desc *de
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -1390,6 +1398,9 @@ static int
+@@ -1388,6 +1396,9 @@ static int
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index fe30b1f69222ab..b79d7896641c6b 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -22,9 +22,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/irq_work.h | 1 +
kernel/irq_work.c | 47 ++++++++++++++++++++++++++++++++++-------------
kernel/sched/rt.c | 1 +
- kernel/time/tick-sched.c | 6 ++++++
+ kernel/time/tick-sched.c | 1 +
kernel/time/timer.c | 6 +++++-
- 5 files changed, 47 insertions(+), 14 deletions(-)
+ 5 files changed, 42 insertions(+), 14 deletions(-)
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -94,6 +94,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
+@@ -102,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->push_cpu = nr_cpu_ids;
raw_spin_lock_init(&rt_rq->push_lock);
init_irq_work(&rt_rq->push_work, push_irq_work_func);
@@ -153,22 +153,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* We start is dequeued state, because no RT tasks are queued */
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -181,6 +181,11 @@ static bool can_stop_full_tick(void)
- return false;
- }
-
-+ if (!arch_irq_work_has_interrupt()) {
-+ trace_tick_stop(0, "missing irq work interrupt\n");
-+ return false;
-+ }
-+
- /* sched_clock_tick() needs us? */
- #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
- /*
-@@ -209,6 +214,7 @@ static void nohz_full_kick_work_func(str
+@@ -217,6 +217,7 @@ static void nohz_full_kick_func(struct i
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
- .func = nohz_full_kick_work_func,
+ .func = nohz_full_kick_func,
+ .flags = IRQ_WORK_HARD_IRQ,
};
diff --git a/patches/jump-label-rt.patch b/patches/jump-label-rt.patch
index 45dec0e56269db..4643147ee9bc4d 100644
--- a/patches/jump-label-rt.patch
+++ b/patches/jump-label-rt.patch
@@ -24,12 +24,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -33,7 +33,7 @@ config ARM
+@@ -35,7 +35,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
-- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
-+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !PREEMPT_RT_BASE
- select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
- select HAVE_ARCH_TRACEHOOK
diff --git a/patches/kconfig-disable-a-few-options-rt.patch b/patches/kconfig-disable-a-few-options-rt.patch
index 872e2aff7fffe5..bf0c195d4ba80f 100644
--- a/patches/kconfig-disable-a-few-options-rt.patch
+++ b/patches/kconfig-disable-a-few-options-rt.patch
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
help
--- a/mm/Kconfig
+++ b/mm/Kconfig
-@@ -392,7 +392,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+@@ -391,7 +391,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
config TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support"
diff --git a/patches/kernel-SRCU-provide-a-static-initializer.patch b/patches/kernel-SRCU-provide-a-static-initializer.patch
index e73dad9895ba89..92b19ed986f3a7 100644
--- a/patches/kernel-SRCU-provide-a-static-initializer.patch
+++ b/patches/kernel-SRCU-provide-a-static-initializer.patch
@@ -38,8 +38,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * often but notifier_blocks will seldom be removed.
*/
- typedef int (*notifier_fn_t)(struct notifier_block *nb,
-@@ -88,7 +86,7 @@ struct srcu_notifier_head {
+ struct notifier_block;
+@@ -90,7 +88,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
-@@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(stru
+@@ -103,7 +101,13 @@ extern void srcu_init_notifier_head(stru
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
-@@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(stru
+@@ -115,6 +119,18 @@ extern void srcu_init_notifier_head(stru
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
-@@ -182,12 +198,12 @@ static inline int notifier_to_errno(int
+@@ -184,12 +200,12 @@ static inline int notifier_to_errno(int
/*
* Declared notifiers so far. I can imagine quite a few more chains
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
.running = false, \
.batch_queue = RCU_BATCH_INIT(name.batch_queue), \
-@@ -104,7 +104,7 @@ void process_srcu(struct work_struct *wo
+@@ -119,7 +119,7 @@ void process_srcu(struct work_struct *wo
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
diff --git a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index 4bf7247eb49c80..7197bd82884bb7 100644
--- a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -109,6 +109,7 @@ struct hotplug_pcp {
+@@ -186,6 +186,7 @@ struct hotplug_pcp {
int refcount;
int grab_lock;
struct completion synced;
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RT_FULL
/*
* Note, on PREEMPT_RT, the hotplug lock must save the state of
-@@ -212,6 +213,7 @@ static int sync_unplug_thread(void *data
+@@ -289,6 +290,7 @@ static int sync_unplug_thread(void *data
{
struct hotplug_pcp *hp = data;
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_disable();
hp->unplug = current;
wait_for_pinned_cpus(hp);
-@@ -277,6 +279,14 @@ static void __cpu_unplug_sync(struct hot
+@@ -354,6 +356,14 @@ static void __cpu_unplug_sync(struct hot
wait_for_completion(&hp->synced);
}
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
-@@ -300,6 +310,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -377,6 +387,7 @@ static int cpu_unplug_begin(unsigned int
tell_sched_cpu_down_begin(cpu);
init_completion(&hp->synced);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
if (IS_ERR(hp->sync_tsk)) {
-@@ -315,8 +326,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -392,8 +403,7 @@ static int cpu_unplug_begin(unsigned int
* wait for tasks that are going to enter these sections and
* we must not have them block.
*/
@@ -75,11 +75,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -671,6 +681,7 @@ static int _cpu_down(unsigned int cpu, i
+@@ -991,6 +1001,7 @@ static int takedown_cpu(unsigned int cpu
else
synchronize_rcu();
+ __cpu_unplug_wait(cpu);
+ /* Park the smpboot threads */
+ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu);
-
- /* Notifiers are done. Don't let any more tasks pin this CPU. */
diff --git a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index e8720e3d0e40c3..e43e3b1cbba217 100644
--- a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -15,15 +15,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -629,6 +629,7 @@ static int _cpu_down(unsigned int cpu, i
- .hcpu = hcpu,
- };
+@@ -1096,6 +1096,7 @@ static int __ref _cpu_down(unsigned int
+ bool hasdied = false;
+ int mycpu;
cpumask_var_t cpumask;
+ cpumask_var_t cpumask_org;
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -639,6 +640,12 @@ static int _cpu_down(unsigned int cpu, i
+@@ -1106,6 +1107,12 @@ static int __ref _cpu_down(unsigned int
/* Move the downtaker off the unplug cpu */
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
@@ -36,23 +36,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
-@@ -647,7 +654,8 @@ static int _cpu_down(unsigned int cpu, i
+@@ -1114,7 +1121,8 @@ static int __ref _cpu_down(unsigned int
if (mycpu == cpu) {
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
migrate_enable();
- return -EBUSY;
-+ err = -EBUSY;
++ ret = -EBUSY;
+ goto restore_cpus;
}
cpu_hotplug_begin();
-@@ -737,6 +745,9 @@ static int _cpu_down(unsigned int cpu, i
- cpu_hotplug_done();
- if (!err)
- cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
+@@ -1168,6 +1176,9 @@ static int __ref _cpu_down(unsigned int
+ /* This post dead nonsense must die */
+ if (!ret && hasdied)
+ cpu_notify_nofail(CPU_POST_DEAD, cpu);
+restore_cpus:
+ set_cpus_allowed_ptr(current, cpumask_org);
+ free_cpumask_var(cpumask_org);
- return err;
+ return ret;
}
diff --git a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
index f18c3e980d9508..5ddcd9a719b110 100644
--- a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
+++ b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3068,7 +3068,7 @@ void migrate_disable(void)
+@@ -3117,7 +3117,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
-@@ -3095,7 +3095,7 @@ void migrate_enable(void)
+@@ -3144,7 +3144,7 @@ void migrate_enable(void)
{
struct task_struct *p = current;
diff --git a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index 03daa83a38f1f4..f19b2a6acf1a03 100644
--- a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -802,6 +802,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -963,6 +963,7 @@ static void __perf_mux_hrtimer_init(stru
raw_spin_lock_init(&cpuctx->hrtimer_lock);
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
timer->function = perf_mux_hrtimer_handler;
diff --git a/patches/kernel-sched-fix-preempt_disable_ip-recodring-for-pr.patch b/patches/kernel-sched-fix-preempt_disable_ip-recodring-for-pr.patch
deleted file mode 100644
index 6201723cc7afd7..00000000000000
--- a/patches/kernel-sched-fix-preempt_disable_ip-recodring-for-pr.patch
+++ /dev/null
@@ -1,106 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 11 Feb 2016 22:06:28 +0100
-Subject: kernel: sched: Fix preempt_disable_ip recodring for preempt_disable()
-
-preempt_disable() invokes preempt_count_add() which saves the caller in
-current->preempt_disable_ip. It uses CALLER_ADDR1 which does not look for its
-caller but for the parent of the caller. Which means we get the correct caller
-for something like spin_lock() unless the architectures inlines those
-invocations. It is always wrong for preempt_disable() or local_bh_disable().
-
-This patch makes the function get_parent_ip() which tries CALLER_ADDR0,1,2 if
-the former is a locking function. This seems to record the preempt_disable()
-caller properly for preempt_disable() itself as well as for get_cpu_var() or
-local_bh_disable().
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/ftrace.h | 12 ++++++++++++
- include/linux/sched.h | 2 --
- kernel/sched/core.c | 14 ++------------
- kernel/softirq.c | 4 ++--
- 4 files changed, 16 insertions(+), 16 deletions(-)
-
---- a/include/linux/ftrace.h
-+++ b/include/linux/ftrace.h
-@@ -694,6 +694,18 @@ static inline void __ftrace_enabled_rest
- #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
- #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
-
-+static inline unsigned long get_lock_parent_ip(void)
-+{
-+ unsigned long addr = CALLER_ADDR0;
-+
-+ if (!in_lock_functions(addr))
-+ return addr;
-+ addr = CALLER_ADDR1;
-+ if (!in_lock_functions(addr))
-+ return addr;
-+ return CALLER_ADDR2;
-+}
-+
- #ifdef CONFIG_IRQSOFF_TRACER
- extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
- extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -182,8 +182,6 @@ extern void update_cpu_load_nohz(void);
- static inline void update_cpu_load_nohz(void) { }
- #endif
-
--extern unsigned long get_parent_ip(unsigned long addr);
--
- extern void dump_cpu_task(int cpu);
-
- struct seq_file;
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -2910,16 +2910,6 @@ u64 scheduler_tick_max_deferment(void)
- }
- #endif
-
--notrace unsigned long get_parent_ip(unsigned long addr)
--{
-- if (in_lock_functions(addr)) {
-- addr = CALLER_ADDR2;
-- if (in_lock_functions(addr))
-- addr = CALLER_ADDR3;
-- }
-- return addr;
--}
--
- #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
- defined(CONFIG_PREEMPT_TRACER))
-
-@@ -2941,7 +2931,7 @@ void preempt_count_add(int val)
- PREEMPT_MASK - 10);
- #endif
- if (preempt_count() == val) {
-- unsigned long ip = get_parent_ip(CALLER_ADDR1);
-+ unsigned long ip = get_lock_parent_ip();
- #ifdef CONFIG_DEBUG_PREEMPT
- current->preempt_disable_ip = ip;
- #endif
-@@ -2968,7 +2958,7 @@ void preempt_count_sub(int val)
- #endif
-
- if (preempt_count() == val)
-- trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
-+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
- __preempt_count_sub(val);
- }
- EXPORT_SYMBOL(preempt_count_sub);
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -116,9 +116,9 @@ void __local_bh_disable_ip(unsigned long
-
- if (preempt_count() == cnt) {
- #ifdef CONFIG_DEBUG_PREEMPT
-- current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1);
-+ current->preempt_disable_ip = get_lock_parent_ip();
- #endif
-- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
-+ trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
- }
- }
- EXPORT_SYMBOL(__local_bh_disable_ip);
diff --git a/patches/kernel-stop_machine-partly-revert-stop_machine-Use-r.patch b/patches/kernel-stop_machine-partly-revert-stop_machine-Use-r.patch
deleted file mode 100644
index 71d204ddd5d0e3..00000000000000
--- a/patches/kernel-stop_machine-partly-revert-stop_machine-Use-r.patch
+++ /dev/null
@@ -1,152 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 10 Feb 2016 18:25:16 +0100
-Subject: kernel/stop_machine: partly revert "stop_machine: Use raw
- spinlocks"
-
-With completion using swait and so rawlocks we don't need this anymore.
-Further, bisect thinks this patch is responsible for:
-
-|BUG: unable to handle kernel NULL pointer dereference at (null)
-|IP: [<ffffffff81082123>] sched_cpu_active+0x53/0x70
-|PGD 0
-|Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
-|Dumping ftrace buffer:
-| (ftrace buffer empty)
-|Modules linked in:
-|CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.4.1+ #330
-|Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS Debian-1.8.2-1 04/01/2014
-|task: ffff88013ae64b00 ti: ffff88013ae74000 task.ti: ffff88013ae74000
-|RIP: 0010:[<ffffffff81082123>] [<ffffffff81082123>] sched_cpu_active+0x53/0x70
-|RSP: 0000:ffff88013ae77eb8 EFLAGS: 00010082
-|RAX: 0000000000000001 RBX: ffffffff81c2cf20 RCX: 0000001050fb52fb
-|RDX: 0000001050fb52fb RSI: 000000105117ca1e RDI: 00000000001c7723
-|RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000001
-|R10: 0000000000000000 R11: 0000000000000001 R12: 00000000ffffffff
-|R13: ffffffff81c2cee0 R14: 0000000000000000 R15: 0000000000000001
-|FS: 0000000000000000(0000) GS:ffff88013b200000(0000) knlGS:0000000000000000
-|CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
-|CR2: 0000000000000000 CR3: 0000000001c09000 CR4: 00000000000006e0
-|Stack:
-| ffffffff810c446d ffff88013ae77f00 ffffffff8107d8dd 000000000000000a
-| 0000000000000001 0000000000000000 0000000000000000 0000000000000000
-| 0000000000000000 ffff88013ae77f10 ffffffff8107d90e ffff88013ae77f20
-|Call Trace:
-| [<ffffffff810c446d>] ? debug_lockdep_rcu_enabled+0x1d/0x20
-| [<ffffffff8107d8dd>] ? notifier_call_chain+0x5d/0x80
-| [<ffffffff8107d90e>] ? __raw_notifier_call_chain+0xe/0x10
-| [<ffffffff810598a3>] ? cpu_notify+0x23/0x40
-| [<ffffffff8105a7b8>] ? notify_cpu_starting+0x28/0x30
-
-during hotplug. The rawlocks need to remain however.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/stop_machine.c | 40 ++++++++--------------------------------
- 1 file changed, 8 insertions(+), 32 deletions(-)
-
---- a/kernel/stop_machine.c
-+++ b/kernel/stop_machine.c
-@@ -30,7 +30,7 @@ struct cpu_stop_done {
- atomic_t nr_todo; /* nr left to execute */
- bool executed; /* actually executed? */
- int ret; /* collected return value */
-- struct task_struct *waiter; /* woken when nr_todo reaches 0 */
-+ struct completion completion; /* fired if nr_todo reaches 0 */
- };
-
- /* the actual stopper, one per every possible cpu, enabled on online cpus */
-@@ -59,7 +59,7 @@ static void cpu_stop_init_done(struct cp
- {
- memset(done, 0, sizeof(*done));
- atomic_set(&done->nr_todo, nr_todo);
-- done->waiter = current;
-+ init_completion(&done->completion);
- }
-
- /* signal completion unless @done is NULL */
-@@ -68,10 +68,8 @@ static void cpu_stop_signal_done(struct
- if (done) {
- if (executed)
- done->executed = true;
-- if (atomic_dec_and_test(&done->nr_todo)) {
-- wake_up_process(done->waiter);
-- done->waiter = NULL;
-- }
-+ if (atomic_dec_and_test(&done->nr_todo))
-+ complete(&done->completion);
- }
- }
-
-@@ -96,22 +94,6 @@ static void cpu_stop_queue_work(unsigned
- raw_spin_unlock_irqrestore(&stopper->lock, flags);
- }
-
--static void wait_for_stop_done(struct cpu_stop_done *done)
--{
-- set_current_state(TASK_UNINTERRUPTIBLE);
-- while (atomic_read(&done->nr_todo)) {
-- schedule();
-- set_current_state(TASK_UNINTERRUPTIBLE);
-- }
-- /*
-- * We need to wait until cpu_stop_signal_done() has cleared
-- * done->waiter.
-- */
-- while (done->waiter)
-- cpu_relax();
-- set_current_state(TASK_RUNNING);
--}
--
- /**
- * stop_one_cpu - stop a cpu
- * @cpu: cpu to stop
-@@ -143,7 +125,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
-
- cpu_stop_init_done(&done, 1);
- cpu_stop_queue_work(cpu, &work);
-- wait_for_stop_done(&done);
-+ wait_for_completion(&done.completion);
- return done.executed ? done.ret : -ENOENT;
- }
-
-@@ -302,7 +284,7 @@ int stop_two_cpus(unsigned int cpu1, uns
-
- preempt_enable_nort();
-
-- wait_for_stop_done(&done);
-+ wait_for_completion(&done.completion);
-
- return done.executed ? done.ret : -ENOENT;
- }
-@@ -364,7 +346,7 @@ static int __stop_cpus(const struct cpum
-
- cpu_stop_init_done(&done, cpumask_weight(cpumask));
- queue_stop_cpus_work(cpumask, fn, arg, &done, false);
-- wait_for_stop_done(&done);
-+ wait_for_completion(&done.completion);
- return done.executed ? done.ret : -ENOENT;
- }
-
-@@ -495,13 +477,7 @@ static void cpu_stopper_thread(unsigned
- kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
- ksym_buf), arg);
-
-- /*
-- * Make sure that the wakeup and setting done->waiter
-- * to NULL is atomic.
-- */
-- local_irq_disable();
- cpu_stop_signal_done(done, true);
-- local_irq_enable();
- goto repeat;
- }
- }
-@@ -663,7 +639,7 @@ int stop_machine_from_inactive_cpu(cpu_s
- ret = multi_cpu_stop(&msdata);
-
- /* Busy wait for completion. */
-- while (atomic_read(&done.nr_todo))
-+ while (!completion_done(&done.completion))
- cpu_relax();
-
- mutex_unlock(&stop_cpus_mutex);
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index 46fe574c195d90..3b0c43ea71e407 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -32,8 +32,8 @@ Jason.
+#include <linux/kdb.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
-
-@@ -2845,6 +2846,8 @@ void serial8250_console_write(struct uar
+ #include <linux/timer.h>
+@@ -3094,6 +3095,8 @@ void serial8250_console_write(struct uar
if (port->sysrq || oops_in_progress)
locked = 0;
diff --git a/patches/kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch b/patches/kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch
deleted file mode 100644
index 3ac4693d127542..00000000000000
--- a/patches/kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch
+++ /dev/null
@@ -1,157 +0,0 @@
-From: Rik van Riel <riel@redhat.com>
-Date: Mon, 21 Mar 2016 15:13:27 +0100
-Subject: [PATCH] kvm, rt: change async pagefault code locking for PREEMPT_RT
-
-The async pagefault wake code can run from the idle task in exception
-context, so everything here needs to be made non-preemptible.
-
-Conversion to a simple wait queue and raw spinlock does the trick.
-
-Signed-off-by: Rik van Riel <riel@redhat.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/kvm.c | 37 +++++++++++++++++++------------------
- 1 file changed, 19 insertions(+), 18 deletions(-)
-
---- a/arch/x86/kernel/kvm.c
-+++ b/arch/x86/kernel/kvm.c
-@@ -36,6 +36,7 @@
- #include <linux/kprobes.h>
- #include <linux/debugfs.h>
- #include <linux/nmi.h>
-+#include <linux/swait.h>
- #include <asm/timer.h>
- #include <asm/cpu.h>
- #include <asm/traps.h>
-@@ -91,14 +92,14 @@ static void kvm_io_delay(void)
-
- struct kvm_task_sleep_node {
- struct hlist_node link;
-- wait_queue_head_t wq;
-+ struct swait_queue_head wq;
- u32 token;
- int cpu;
- bool halted;
- };
-
- static struct kvm_task_sleep_head {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- struct hlist_head list;
- } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
-
-@@ -122,17 +123,17 @@ void kvm_async_pf_task_wait(u32 token)
- u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
- struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
- struct kvm_task_sleep_node n, *e;
-- DEFINE_WAIT(wait);
-+ DECLARE_SWAITQUEUE(wait);
-
- rcu_irq_enter();
-
-- spin_lock(&b->lock);
-+ raw_spin_lock(&b->lock);
- e = _find_apf_task(b, token);
- if (e) {
- /* dummy entry exist -> wake up was delivered ahead of PF */
- hlist_del(&e->link);
- kfree(e);
-- spin_unlock(&b->lock);
-+ raw_spin_unlock(&b->lock);
-
- rcu_irq_exit();
- return;
-@@ -141,13 +142,13 @@ void kvm_async_pf_task_wait(u32 token)
- n.token = token;
- n.cpu = smp_processor_id();
- n.halted = is_idle_task(current) || preempt_count() > 1;
-- init_waitqueue_head(&n.wq);
-+ init_swait_queue_head(&n.wq);
- hlist_add_head(&n.link, &b->list);
-- spin_unlock(&b->lock);
-+ raw_spin_unlock(&b->lock);
-
- for (;;) {
- if (!n.halted)
-- prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
-+ prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
- if (hlist_unhashed(&n.link))
- break;
-
-@@ -166,7 +167,7 @@ void kvm_async_pf_task_wait(u32 token)
- }
- }
- if (!n.halted)
-- finish_wait(&n.wq, &wait);
-+ finish_swait(&n.wq, &wait);
-
- rcu_irq_exit();
- return;
-@@ -178,8 +179,8 @@ static void apf_task_wake_one(struct kvm
- hlist_del_init(&n->link);
- if (n->halted)
- smp_send_reschedule(n->cpu);
-- else if (waitqueue_active(&n->wq))
-- wake_up(&n->wq);
-+ else if (swait_active(&n->wq))
-+ swake_up(&n->wq);
- }
-
- static void apf_task_wake_all(void)
-@@ -189,14 +190,14 @@ static void apf_task_wake_all(void)
- for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
- struct hlist_node *p, *next;
- struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
-- spin_lock(&b->lock);
-+ raw_spin_lock(&b->lock);
- hlist_for_each_safe(p, next, &b->list) {
- struct kvm_task_sleep_node *n =
- hlist_entry(p, typeof(*n), link);
- if (n->cpu == smp_processor_id())
- apf_task_wake_one(n);
- }
-- spin_unlock(&b->lock);
-+ raw_spin_unlock(&b->lock);
- }
- }
-
-@@ -212,7 +213,7 @@ void kvm_async_pf_task_wake(u32 token)
- }
-
- again:
-- spin_lock(&b->lock);
-+ raw_spin_lock(&b->lock);
- n = _find_apf_task(b, token);
- if (!n) {
- /*
-@@ -225,17 +226,17 @@ void kvm_async_pf_task_wake(u32 token)
- * Allocation failed! Busy wait while other cpu
- * handles async PF.
- */
-- spin_unlock(&b->lock);
-+ raw_spin_unlock(&b->lock);
- cpu_relax();
- goto again;
- }
- n->token = token;
- n->cpu = smp_processor_id();
-- init_waitqueue_head(&n->wq);
-+ init_swait_queue_head(&n->wq);
- hlist_add_head(&n->link, &b->list);
- } else
- apf_task_wake_one(n);
-- spin_unlock(&b->lock);
-+ raw_spin_unlock(&b->lock);
- return;
- }
- EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
-@@ -486,7 +487,7 @@ void __init kvm_guest_init(void)
- paravirt_ops_setup();
- register_reboot_notifier(&kvm_pv_reboot_nb);
- for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
-- spin_lock_init(&async_pf_sleepers[i].lock);
-+ raw_spin_lock_init(&async_pf_sleepers[i].lock);
- if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
- x86_init.irqs.trap_init = kvm_apf_trap_init;
-
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index a02f651f429391..ce45e4c337e494 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -236,19 +236,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int start_pid;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1796,6 +1796,12 @@ struct task_struct {
- unsigned long trace;
+@@ -1821,6 +1821,12 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
+ #endif /* CONFIG_TRACING */
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ u64 preempt_timestamp_hist;
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ long timer_offset;
+#endif
+#endif
- #endif /* CONFIG_TRACING */
- #ifdef CONFIG_MEMCG
- struct mem_cgroup *memcg_in_oom;
+ #ifdef CONFIG_KCOV
+ /* Coverage collection mode enabled for this task (0 if disabled). */
+ enum kcov_mode kcov_mode;
--- /dev/null
+++ b/include/trace/events/hist.h
@@ -0,0 +1,73 @@
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "tick-internal.h"
-@@ -1001,7 +1002,16 @@ void hrtimer_start_range_ns(struct hrtim
+@@ -995,7 +996,16 @@ void hrtimer_start_range_ns(struct hrtim
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
timer_stats_hrtimer_set_start_info(timer);
@@ -384,7 +384,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
leftmost = enqueue_hrtimer(timer, new_base);
if (!leftmost)
goto unlock;
-@@ -1275,6 +1285,8 @@ static void __run_hrtimer(struct hrtimer
+@@ -1269,6 +1279,8 @@ static void __run_hrtimer(struct hrtimer
cpu_base->running = NULL;
}
@@ -393,7 +393,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
-@@ -1294,6 +1306,15 @@ static void __hrtimer_run_queues(struct
+@@ -1288,6 +1300,15 @@ static void __hrtimer_run_queues(struct
timer = container_of(node, struct hrtimer, node);
diff --git a/patches/local-irq-rt-depending-variants.patch b/patches/local-irq-rt-depending-variants.patch
index 87c63ed46c9068..af811d03f39051 100644
--- a/patches/local-irq-rt-depending-variants.patch
+++ b/patches/local-irq-rt-depending-variants.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -186,7 +186,7 @@ extern void devm_free_irq(struct device
+@@ -196,7 +196,7 @@ extern void devm_free_irq(struct device
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
diff --git a/patches/localversion.patch b/patches/localversion.patch
index d061f44f02ca0f..a02382e6df7098 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.4.9-rt17
+Subject: Add localversion for -RT release
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt17
++-rt1
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 818b66bff1e09a..dbf16445867799 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_IRQSOFF_TRACER) || \
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -3525,6 +3525,7 @@ static void check_flags(unsigned long fl
+@@ -3648,6 +3648,7 @@ static void check_flags(unsigned long fl
}
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3539,6 +3540,7 @@ static void check_flags(unsigned long fl
+@@ -3662,6 +3663,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index a6ce854361cc4f..d23329601c0cd8 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -20,7 +20,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -1920,8 +1920,9 @@ static void raid_run_ops(struct stripe_h
+@@ -1918,8 +1918,9 @@ static void raid_run_ops(struct stripe_h
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -31,7 +31,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -1977,7 +1978,8 @@ static void raid_run_ops(struct stripe_h
+@@ -1975,7 +1976,8 @@ static void raid_run_ops(struct stripe_h
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -41,7 +41,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
-@@ -6414,6 +6416,7 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6415,6 +6417,7 @@ static int raid5_alloc_percpu(struct r5c
__func__, cpu);
break;
}
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
index f456b2566a591f..88fba7dd83f597 100644
--- a/patches/mips-disable-highmem-on-rt.patch
+++ b/patches/mips-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2410,7 +2410,7 @@ config CPU_R4400_WORKAROUNDS
+@@ -2416,7 +2416,7 @@ config CPU_R4400_WORKAROUNDS
#
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index d880226f039636..c93537f60add37 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -9,12 +9,54 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- mm/swap.c | 34 ++++++++++++++++++++--------------
- 1 file changed, 20 insertions(+), 14 deletions(-)
+ include/linux/swap.h | 1 +
+ mm/compaction.c | 6 ++++--
+ mm/page_alloc.c | 2 ++
+ mm/swap.c | 39 +++++++++++++++++++++++----------------
+ 4 files changed, 30 insertions(+), 18 deletions(-)
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -297,6 +297,7 @@ extern unsigned long nr_free_pagecache_p
+
+
+ /* linux/mm/swap.c */
++DECLARE_LOCAL_IRQ_LOCK(swapvec_lock);
+ extern void lru_cache_add(struct page *);
+ extern void lru_cache_add_anon(struct page *page);
+ extern void lru_cache_add_file(struct page *page);
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1414,10 +1414,12 @@ static int compact_zone(struct zone *zon
+ cc->migrate_pfn & ~((1UL << cc->order) - 1);
+
+ if (cc->last_migrated_pfn < current_block_start) {
+- cpu = get_cpu();
++ cpu = get_cpu_light();
++ local_lock_irq(swapvec_lock);
+ lru_add_drain_cpu(cpu);
++ local_unlock_irq(swapvec_lock);
+ drain_local_pages(zone);
+- put_cpu();
++ put_cpu_light();
+ /* No more flushing until we migrate again */
+ cc->last_migrated_pfn = 0;
+ }
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6274,7 +6274,9 @@ static int page_alloc_cpu_notify(struct
+ int cpu = (unsigned long)hcpu;
+
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
++ local_lock_irq_on(swapvec_lock, cpu);
+ lru_add_drain_cpu(cpu);
++ local_unlock_irq_on(swapvec_lock, cpu);
+ drain_pages(cpu);
+
+ /*
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -31,6 +31,7 @@
+@@ -32,6 +32,7 @@
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
@@ -22,20 +64,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/hugetlb.h>
#include <linux/page_idle.h>
-@@ -46,6 +47,9 @@ static DEFINE_PER_CPU(struct pagevec, lr
- static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
+@@ -48,6 +49,9 @@ static DEFINE_PER_CPU(struct pagevec, lr
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
+ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
-+static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
++DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
+
/*
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
-@@ -481,11 +485,11 @@ void rotate_reclaimable_page(struct page
+@@ -237,11 +241,11 @@ void rotate_reclaimable_page(struct page
unsigned long flags;
- page_cache_get(page);
+ get_page(page);
- local_irq_save(flags);
+ local_lock_irqsave(rotate_lock, flags);
pvec = this_cpu_ptr(&lru_rotate_pvecs);
@@ -46,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -536,12 +540,13 @@ static bool need_activate_page_drain(int
+@@ -292,12 +296,13 @@ static bool need_activate_page_drain(int
void activate_page(struct page *page)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -54,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
+ activate_page_pvecs);
- page_cache_get(page);
+ get_page(page);
if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
- put_cpu_var(activate_page_pvecs);
@@ -62,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -567,7 +572,7 @@ void activate_page(struct page *page)
+@@ -323,7 +328,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
@@ -71,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/*
-@@ -589,7 +594,7 @@ static void __lru_cache_activate_page(st
+@@ -345,7 +350,7 @@ static void __lru_cache_activate_page(st
}
}
@@ -80,14 +122,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -630,13 +635,13 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -387,13 +392,13 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
- page_cache_get(page);
+ get_page(page);
if (!pagevec_space(pvec))
__pagevec_lru_add(pvec);
pagevec_add(pvec, page);
@@ -96,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -816,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -591,9 +596,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -108,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -846,18 +851,19 @@ void deactivate_file_page(struct page *p
+@@ -625,11 +630,12 @@ void deactivate_file_page(struct page *p
return;
if (likely(get_page_unless_zero(page))) {
@@ -123,6 +165,22 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
+@@ -644,19 +650,20 @@ void deactivate_file_page(struct page *p
+ void deactivate_page(struct page *page)
+ {
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock,
++ lru_deactivate_pvecs);
+
+ get_page(page);
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+- put_cpu_var(lru_deactivate_pvecs);
++ put_locked_var(swapvec_lock, lru_deactivate_pvecs);
+ }
+ }
+
void lru_add_drain(void)
{
- lru_add_drain_cpu(get_cpu());
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index d2ad7c35e96430..bd7edc658cadb0 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1719,6 +1719,7 @@ choice
+@@ -1717,6 +1717,7 @@ choice
config SLAB
bool "SLAB"
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
-@@ -1737,6 +1738,7 @@ config SLUB
+@@ -1735,6 +1736,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch
index 4d3a46dd9db197..7413036b5898a8 100644
--- a/patches/mm-enable-slub.patch
+++ b/patches/mm-enable-slub.patch
@@ -8,12 +8,12 @@ move the freeing out of the lock held region.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
mm/slab.h | 4 +
- mm/slub.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++--------------
- 2 files changed, 102 insertions(+), 27 deletions(-)
+ mm/slub.c | 134 ++++++++++++++++++++++++++++++++++++++++++++++++--------------
+ 2 files changed, 109 insertions(+), 29 deletions(-)
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -324,7 +324,11 @@ static inline struct kmem_cache *cache_f
+@@ -415,7 +415,11 @@ static inline void slab_post_alloc_hook(
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -27,25 +27,25 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct list_head slabs_partial; /* partial list first, better asm code */
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1075,7 +1075,7 @@ static noinline struct kmem_cache_node *
- void *object = head;
- int cnt = 0;
+@@ -1143,7 +1143,7 @@ static noinline int free_debug_processin
+ unsigned long uninitialized_var(flags);
+ int ret = 0;
-- spin_lock_irqsave(&n->list_lock, *flags);
-+ raw_spin_lock_irqsave(&n->list_lock, *flags);
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
slab_lock(page);
- if (!check_slab(s, page))
-@@ -1136,7 +1136,7 @@ static noinline struct kmem_cache_node *
+ if (s->flags & SLAB_CONSISTENCY_CHECKS) {
+@@ -1178,7 +1178,7 @@ static noinline int free_debug_processin
+ bulk_cnt, cnt);
- fail:
slab_unlock(page);
-- spin_unlock_irqrestore(&n->list_lock, *flags);
-+ raw_spin_unlock_irqrestore(&n->list_lock, *flags);
- slab_fix(s, "Object at 0x%p not freed", object);
- return NULL;
- }
-@@ -1263,6 +1263,12 @@ static inline void dec_slabs_node(struct
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ if (!ret)
+ slab_fix(s, "Object at 0x%p not freed", object);
+ return ret;
+@@ -1306,6 +1306,12 @@ static inline void dec_slabs_node(struct
#endif /* CONFIG_SLUB_DEBUG */
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1402,7 +1408,11 @@ static struct page *allocate_slab(struct
+@@ -1415,7 +1421,11 @@ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1473,7 +1483,11 @@ static struct page *allocate_slab(struct
+@@ -1486,7 +1496,11 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
@@ -82,8 +82,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_disable();
if (!page)
return NULL;
-@@ -1529,6 +1543,16 @@ static void __free_slab(struct kmem_cach
- __free_kmem_pages(page, order);
+@@ -1543,6 +1557,16 @@ static void __free_slab(struct kmem_cach
+ __free_pages(page, order);
}
+static void free_delayed(struct list_head *h)
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1560,6 +1584,12 @@ static void free_slab(struct kmem_cache
+@@ -1574,6 +1598,12 @@ static void free_slab(struct kmem_cache
}
call_rcu(head, rcu_free_slab);
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
__free_slab(s, page);
}
-@@ -1673,7 +1703,7 @@ static void *get_partial_node(struct kme
+@@ -1681,7 +1711,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1698,7 +1728,7 @@ static void *get_partial_node(struct kme
+@@ -1706,7 +1736,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return object;
}
-@@ -1944,7 +1974,7 @@ static void deactivate_slab(struct kmem_
+@@ -1952,7 +1982,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
m = M_FULL;
-@@ -1955,7 +1985,7 @@ static void deactivate_slab(struct kmem_
+@@ -1963,7 +1993,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -1990,7 +2020,7 @@ static void deactivate_slab(struct kmem_
+@@ -1998,7 +2028,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -2022,10 +2052,10 @@ static void unfreeze_partials(struct kme
+@@ -2030,10 +2060,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
do {
-@@ -2054,7 +2084,7 @@ static void unfreeze_partials(struct kme
+@@ -2062,7 +2092,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (discard_page) {
page = discard_page;
-@@ -2093,14 +2123,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2101,14 +2131,21 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2172,7 +2209,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2180,7 +2217,22 @@ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
@@ -224,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2208,10 +2260,10 @@ static unsigned long count_partial(struc
+@@ -2216,10 +2268,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
@@ -237,7 +237,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2349,8 +2401,10 @@ static inline void *get_freelist(struct
+@@ -2357,8 +2409,10 @@ static inline void *get_freelist(struct
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -249,7 +249,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *freelist;
struct page *page;
-@@ -2410,6 +2464,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2418,6 +2472,13 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return freelist;
new_slab:
-@@ -2441,7 +2502,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2449,7 +2510,7 @@ static void *___slab_alloc(struct kmem_c
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
@@ -272,7 +272,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2453,6 +2514,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2461,6 +2522,7 @@ static void *__slab_alloc(struct kmem_ca
{
void *p;
unsigned long flags;
@@ -280,7 +280,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2464,8 +2526,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2472,8 +2534,9 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
@@ -291,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return p;
}
-@@ -2652,7 +2715,7 @@ static void __slab_free(struct kmem_cach
+@@ -2659,7 +2722,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
n = NULL;
}
prior = page->freelist;
-@@ -2684,7 +2747,7 @@ static void __slab_free(struct kmem_cach
+@@ -2691,7 +2754,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -309,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2726,7 +2789,7 @@ static void __slab_free(struct kmem_cach
+@@ -2733,7 +2796,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
slab_empty:
-@@ -2741,7 +2804,7 @@ static void __slab_free(struct kmem_cach
+@@ -2748,7 +2811,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@@ -327,7 +327,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -2913,6 +2976,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -2935,6 +2998,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
void **p)
{
struct kmem_cache_cpu *c;
@@ -335,7 +335,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/* memcg and kmem_cache debug support */
-@@ -2936,7 +3000,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -2958,7 +3022,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -344,7 +344,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(!p[i]))
goto error;
-@@ -2948,6 +3012,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -2970,6 +3034,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -352,7 +352,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
-@@ -3095,7 +3160,7 @@ static void
+@@ -3117,7 +3182,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -361,7 +361,44 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3677,7 +3742,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3450,6 +3515,10 @@ static void list_slab_objects(struct kme
+ const char *text)
+ {
+ #ifdef CONFIG_SLUB_DEBUG
++#ifdef CONFIG_PREEMPT_RT_BASE
++ /* XXX move out of irq-off section */
++ slab_err(s, page, text, s->name);
++#else
+ void *addr = page_address(page);
+ void *p;
+ unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
+@@ -3470,6 +3539,7 @@ static void list_slab_objects(struct kme
+ slab_unlock(page);
+ kfree(map);
+ #endif
++#endif
+ }
+
+ /*
+@@ -3482,7 +3552,7 @@ static void free_partial(struct kmem_cac
+ struct page *page, *h;
+
+ BUG_ON(irqs_disabled());
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ list_for_each_entry_safe(page, h, &n->partial, lru) {
+ if (!page->inuse) {
+ remove_partial(n, page);
+@@ -3492,7 +3562,7 @@ static void free_partial(struct kmem_cac
+ "Objects remaining in %s on __kmem_cache_shutdown()");
+ }
+ }
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+ }
+
+ /*
+@@ -3706,7 +3776,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -370,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -3708,7 +3773,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3737,7 +3807,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -379,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -3884,6 +3949,12 @@ void __init kmem_cache_init(void)
+@@ -3913,6 +3983,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
@@ -392,7 +429,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -4127,7 +4198,7 @@ static int validate_slab_node(struct kme
+@@ -4156,7 +4232,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
@@ -401,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -4149,7 +4220,7 @@ static int validate_slab_node(struct kme
+@@ -4178,7 +4254,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -410,7 +447,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return count;
}
-@@ -4337,12 +4408,12 @@ static int list_locations(struct kmem_ca
+@@ -4366,12 +4442,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index ee1066f21fc557..d13c3a7eba8455 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1959,7 +1959,7 @@ static void drain_all_stock(struct mem_c
+@@ -1848,7 +1848,7 @@ static void drain_all_stock(struct mem_c
return;
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -1976,7 +1976,7 @@ static void drain_all_stock(struct mem_c
+@@ -1865,7 +1865,7 @@ static void drain_all_stock(struct mem_c
schedule_work_on(cpu, &stock->work);
}
}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 9d7f3cc6e693b6..3d1e94ce4d3c62 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -7,87 +7,57 @@ patch converts them local locks.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/swap.h | 1 +
- mm/compaction.c | 6 ++++--
- mm/memcontrol.c | 20 ++++++++++++++------
- mm/swap.c | 2 +-
- 4 files changed, 20 insertions(+), 9 deletions(-)
+ mm/memcontrol.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
---- a/include/linux/swap.h
-+++ b/include/linux/swap.h
-@@ -298,6 +298,7 @@ extern unsigned long nr_free_pagecache_p
-
-
- /* linux/mm/swap.c */
-+DECLARE_LOCAL_IRQ_LOCK(swapvec_lock);
- extern void lru_cache_add(struct page *);
- extern void lru_cache_add_anon(struct page *page);
- extern void lru_cache_add_file(struct page *page);
---- a/mm/compaction.c
-+++ b/mm/compaction.c
-@@ -1443,10 +1443,12 @@ static int compact_zone(struct zone *zon
- cc->migrate_pfn & ~((1UL << cc->order) - 1);
-
- if (cc->last_migrated_pfn < current_block_start) {
-- cpu = get_cpu();
-+ cpu = get_cpu_light();
-+ local_lock_irq(swapvec_lock);
- lru_add_drain_cpu(cpu);
-+ local_unlock_irq(swapvec_lock);
- drain_local_pages(zone);
-- put_cpu();
-+ put_cpu_light();
- /* No more flushing until we migrate again */
- cc->last_migrated_pfn = 0;
- }
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -67,6 +67,8 @@
+@@ -67,6 +67,7 @@
#include <net/sock.h>
#include <net/ip.h>
- #include <net/tcp_memcontrol.h>
-+#include <linux/locallock.h>
-+
#include "slab.h"
++#include <linux/locallock.h>
#include <asm/uaccess.h>
-@@ -87,6 +89,7 @@ int do_swap_account __read_mostly;
+
+@@ -92,6 +93,8 @@ int do_swap_account __read_mostly;
#define do_swap_account 0
#endif
+static DEFINE_LOCAL_IRQ_LOCK(event_lock);
- static const char * const mem_cgroup_stat_names[] = {
- "cache",
- "rss",
-@@ -4618,12 +4621,12 @@ static int mem_cgroup_move_account(struc
++
+ /* Whether legacy memory+swap accounting is active */
+ static bool do_memsw_account(void)
+ {
+@@ -4484,12 +4487,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
- local_irq_disable();
+ local_lock_irq(event_lock);
- mem_cgroup_charge_statistics(to, page, nr_pages);
+ mem_cgroup_charge_statistics(to, page, compound, nr_pages);
memcg_check_events(to, page);
- mem_cgroup_charge_statistics(from, page, -nr_pages);
+ mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
memcg_check_events(from, page);
- local_irq_enable();
+ local_unlock_irq(event_lock);
out_unlock:
unlock_page(page);
out:
-@@ -5411,10 +5414,10 @@ void mem_cgroup_commit_charge(struct pag
- VM_BUG_ON_PAGE(!PageTransHuge(page), page);
- }
+@@ -5339,10 +5342,10 @@ void mem_cgroup_commit_charge(struct pag
+
+ commit_charge(page, memcg, lrucare);
- local_irq_disable();
+ local_lock_irq(event_lock);
- mem_cgroup_charge_statistics(memcg, page, nr_pages);
+ mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
memcg_check_events(memcg, page);
- local_irq_enable();
+ local_unlock_irq(event_lock);
- if (do_swap_account && PageSwapCache(page)) {
+ if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5470,14 +5473,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5394,14 +5397,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -104,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5669,6 +5672,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5719,6 +5722,7 @@ void mem_cgroup_swapout(struct page *pag
{
struct mem_cgroup *memcg;
unsigned short oldid;
@@ -112,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5697,9 +5701,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5747,9 +5751,13 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
@@ -120,20 +90,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#ifndef CONFIG_PREEMPT_RT_BASE
VM_BUG_ON(!irqs_disabled());
+#endif
- mem_cgroup_charge_statistics(memcg, page, -1);
+ mem_cgroup_charge_statistics(memcg, page, false, -1);
memcg_check_events(memcg, page);
+ local_unlock_irqrestore(event_lock, flags);
}
- /**
---- a/mm/swap.c
-+++ b/mm/swap.c
-@@ -48,7 +48,7 @@ static DEFINE_PER_CPU(struct pagevec, lr
- static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
-
- static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
--static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
-+DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
-
/*
- * This path almost never happens for VM activity - pages are normally
diff --git a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
index 8465350896d4e4..f47973383d2eed 100644
--- a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -269,9 +269,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+@@ -280,9 +280,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
#ifdef CONFIG_PREEMPT_RT_BASE
# define cpu_lock_irqsave(cpu, flags) \
diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch
index 1a47c5c9e09b73..3f9bffba1881e9 100644
--- a/patches/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch
@@ -8,12 +8,12 @@ call free_pages_bulk() outside of the percpu page allocator locks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- mm/page_alloc.c | 89 +++++++++++++++++++++++++++++++++++++++-----------------
- 1 file changed, 63 insertions(+), 26 deletions(-)
+ mm/page_alloc.c | 87 +++++++++++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 62 insertions(+), 25 deletions(-)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -797,7 +797,7 @@ static inline int free_pages_check(struc
+@@ -827,7 +827,7 @@ static inline int free_pages_check(struc
}
/*
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -808,18 +808,53 @@ static inline int free_pages_check(struc
+@@ -838,18 +838,53 @@ static inline int free_pages_check(struc
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (to_free) {
struct page *page;
struct list_head *list;
-@@ -835,7 +870,7 @@ static void free_pcppages_bulk(struct zo
+@@ -865,7 +900,7 @@ static void free_pcppages_bulk(struct zo
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -89,15 +89,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -843,24 +878,12 @@ static void free_pcppages_bulk(struct zo
+@@ -873,24 +908,12 @@ static void free_pcppages_bulk(struct zo
batch_free = to_free;
do {
- int mt; /* migratetype of the to-be-freed page */
-
-- page = list_entry(list->prev, struct page, lru);
+ page = list_last_entry(list, struct page, lru);
- /* must delete as __free_one_page list manipulates */
-+ page = list_last_entry(list, struct page, lru);
list_del(&page->lru);
- mt = get_pcppage_migratetype(page);
@@ -116,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -869,7 +892,9 @@ static void free_one_page(struct zone *z
+@@ -899,7 +922,9 @@ static void free_one_page(struct zone *z
int migratetype)
{
unsigned long nr_scanned;
@@ -127,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-@@ -879,7 +904,7 @@ static void free_one_page(struct zone *z
+@@ -909,7 +934,7 @@ static void free_one_page(struct zone *z
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
@@ -136,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int free_tail_pages_check(struct page *head_page, struct page *page)
-@@ -1890,16 +1915,18 @@ static int rmqueue_bulk(struct zone *zon
+@@ -2028,16 +2053,18 @@ static int rmqueue_bulk(struct zone *zon
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -156,7 +155,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -1915,16 +1942,21 @@ static void drain_pages_zone(unsigned in
+@@ -2053,16 +2080,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -180,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2102,8 +2134,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2240,8 +2272,13 @@ void free_hot_cold_page(struct page *pag
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 24b756261c504f..95e409cc62db8e 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -60,6 +60,7 @@
+@@ -61,6 +61,7 @@
#include <linux/page_ext.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page_owner.h>
#include <linux/kthread.h>
-@@ -264,6 +265,18 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -275,6 +276,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1017,10 +1030,10 @@ static void __free_pages_ok(struct page
+@@ -1070,10 +1083,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page,
-@@ -1879,14 +1892,14 @@ void drain_zone_pages(struct zone *zone,
+@@ -2017,14 +2030,14 @@ void drain_zone_pages(struct zone *zone,
unsigned long flags;
int to_drain, batch;
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -1903,7 +1916,7 @@ static void drain_pages_zone(unsigned in
+@@ -2041,7 +2054,7 @@ static void drain_pages_zone(unsigned in
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -1911,7 +1924,7 @@ static void drain_pages_zone(unsigned in
+@@ -2049,7 +2062,7 @@ static void drain_pages_zone(unsigned in
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1997,8 +2010,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2135,8 +2148,17 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#ifdef CONFIG_HIBERNATION
-@@ -2054,7 +2076,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2192,7 +2214,7 @@ void free_hot_cold_page(struct page *pag
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__count_vm_event(PGFREE);
/*
-@@ -2085,7 +2107,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2223,7 +2245,7 @@ void free_hot_cold_page(struct page *pag
}
out:
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2220,7 +2242,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2358,7 +2380,7 @@ struct page *buffered_rmqueue(struct zon
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -137,19 +137,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
-@@ -2252,7 +2274,7 @@ struct page *buffered_rmqueue(struct zon
- */
- WARN_ON_ONCE(order > 1);
- }
+@@ -2382,7 +2404,7 @@ struct page *buffered_rmqueue(struct zon
+ * allocate greater than order-1 page units with __GFP_NOFAIL.
+ */
+ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
- spin_lock_irqsave(&zone->lock, flags);
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
page = NULL;
if (alloc_flags & ALLOC_HARDER) {
-@@ -2262,11 +2284,13 @@ struct page *buffered_rmqueue(struct zon
+@@ -2392,11 +2414,13 @@ struct page *buffered_rmqueue(struct zon
}
if (!page)
- page = __rmqueue(zone, order, migratetype, gfp_flags);
+ page = __rmqueue(zone, order, migratetype);
- spin_unlock(&zone->lock);
- if (!page)
+ if (!page) {
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -2276,13 +2300,13 @@ struct page *buffered_rmqueue(struct zon
+@@ -2406,13 +2430,13 @@ struct page *buffered_rmqueue(struct zon
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
@@ -178,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -5948,6 +5972,7 @@ static int page_alloc_cpu_notify(struct
+@@ -6239,6 +6263,7 @@ static int page_alloc_cpu_notify(struct
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -186,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -6842,7 +6867,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7163,7 +7188,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -6851,7 +6876,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7172,7 +7197,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/mm-protect-activate-switch-mm.patch b/patches/mm-protect-activate-switch-mm.patch
index 01118d85e11ddf..696c03a29a7174 100644
--- a/patches/mm-protect-activate-switch-mm.patch
+++ b/patches/mm-protect-activate-switch-mm.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -865,12 +865,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -961,12 +961,14 @@ static int exec_mmap(struct mm_struct *m
}
}
task_lock(tsk);
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 9dbd83c51dff75..412e5f5315948f 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/page.h>
#include <asm/ptrace.h>
-@@ -1849,6 +1850,12 @@ struct task_struct {
+@@ -1883,6 +1884,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif
diff --git a/patches/mm-vmalloc-use-get-cpu-light.patch b/patches/mm-vmalloc-use-get-cpu-light.patch
index 1344dd02451379..2bb2ee53052f90 100644
--- a/patches/mm-vmalloc-use-get-cpu-light.patch
+++ b/patches/mm-vmalloc-use-get-cpu-light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -821,7 +821,7 @@ static void *new_vmap_block(unsigned int
+@@ -819,7 +819,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -864,11 +864,12 @@ static void *new_vmap_block(unsigned int
+@@ -862,11 +862,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -937,6 +938,7 @@ static void *vb_alloc(unsigned long size
+@@ -935,6 +936,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -951,7 +953,8 @@ static void *vb_alloc(unsigned long size
+@@ -949,7 +951,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -974,7 +977,7 @@ static void *vb_alloc(unsigned long size
+@@ -972,7 +975,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index 61efb48c4d2fa9..cfe0da6f4e3584 100644
--- a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/mm/filemap.c
+++ b/mm/filemap.c
-@@ -168,7 +168,9 @@ static void page_cache_tree_delete(struc
+@@ -169,7 +169,9 @@ static void page_cache_tree_delete(struc
if (!workingset_node_pages(node) &&
list_empty(&node->private_list)) {
node->private_data = mapping;
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -597,9 +599,12 @@ static int page_cache_tree_insert(struct
+@@ -618,9 +620,12 @@ static int page_cache_tree_insert(struct
* node->private_list is protected by
* mapping->tree_lock.
*/
@@ -65,23 +65,24 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/mm/truncate.c
+++ b/mm/truncate.c
-@@ -56,8 +56,11 @@ static void clear_exceptional_entry(stru
- * protected by mapping->tree_lock.
- */
- if (!workingset_node_shadows(node) &&
-- !list_empty(&node->private_list))
-- list_lru_del(&workingset_shadow_nodes, &node->private_list);
-+ !list_empty(&node->private_list)) {
-+ local_lock(workingset_shadow_lock);
-+ list_lru_del(&__workingset_shadow_nodes, &node->private_list);
-+ local_unlock(workingset_shadow_lock);
-+ }
- __radix_tree_delete_node(&mapping->page_tree, node);
+@@ -63,9 +63,12 @@ static void clear_exceptional_entry(stru
+ * protected by mapping->tree_lock.
+ */
+ if (!workingset_node_shadows(node) &&
+- !list_empty(&node->private_list))
+- list_lru_del(&workingset_shadow_nodes,
++ !list_empty(&node->private_list)) {
++ local_lock(workingset_shadow_lock);
++ list_lru_del(&__workingset_shadow_nodes,
+ &node->private_list);
++ local_unlock(workingset_shadow_lock);
++ }
+ __radix_tree_delete_node(&mapping->page_tree, node);
+ }
unlock:
- spin_unlock_irq(&mapping->tree_lock);
--- a/mm/workingset.c
+++ b/mm/workingset.c
-@@ -264,7 +264,8 @@ void workingset_activation(struct page *
+@@ -335,7 +335,8 @@ void workingset_activation(struct page *
* point where they would still be useful.
*/
@@ -91,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static unsigned long count_shadow_nodes(struct shrinker *shrinker,
struct shrink_control *sc)
-@@ -274,9 +275,9 @@ static unsigned long count_shadow_nodes(
+@@ -345,9 +346,9 @@ static unsigned long count_shadow_nodes(
unsigned long pages;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -102,9 +103,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
+ local_unlock_irq(workingset_shadow_lock);
- pages = node_present_pages(sc->nid);
- /*
-@@ -363,9 +364,9 @@ static enum lru_status shadow_lru_isolat
+ if (memcg_kmem_enabled())
+ pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+@@ -440,9 +441,9 @@ static enum lru_status shadow_lru_isolat
spin_unlock(&mapping->tree_lock);
ret = LRU_REMOVED_RETRY;
out:
@@ -116,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(lru_lock);
return ret;
}
-@@ -376,10 +377,10 @@ static unsigned long scan_shadow_nodes(s
+@@ -453,10 +454,10 @@ static unsigned long scan_shadow_nodes(s
unsigned long ret;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -130,16 +131,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -400,7 +401,7 @@ static int __init workingset_init(void)
- {
- int ret;
+@@ -494,7 +495,7 @@ static int __init workingset_init(void)
+ printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+ timestamp_bits, max_order, bucket_order);
- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
+ ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key);
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -408,7 +409,7 @@ static int __init workingset_init(void)
+@@ -502,7 +503,7 @@ static int __init workingset_init(void)
goto err_list_lru;
return 0;
err_list_lru:
diff --git a/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch b/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
index b827211e33ba6f..5db2c5898b05f6 100644
--- a/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
+++ b/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
-@@ -1289,7 +1289,7 @@ void *zs_map_object(struct zs_pool *pool
+@@ -1292,7 +1292,7 @@ void *zs_map_object(struct zs_pool *pool
class = pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
-@@ -1342,7 +1342,7 @@ void zs_unmap_object(struct zs_pool *poo
+@@ -1345,7 +1345,7 @@ void zs_unmap_object(struct zs_pool *poo
__zs_unmap_object(area, pages, off, class->size);
}
diff --git a/patches/move_sched_delayed_work_to_helper.patch b/patches/move_sched_delayed_work_to_helper.patch
index b1f7b3393881b3..af52d55c193cb2 100644
--- a/patches/move_sched_delayed_work_to_helper.patch
+++ b/patches/move_sched_delayed_work_to_helper.patch
@@ -33,7 +33,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
#include <linux/math64.h>
#include <linux/timex.h>
#include <linux/time.h>
-@@ -562,10 +563,52 @@ static void sync_cmos_clock(struct work_
+@@ -568,10 +569,52 @@ static void sync_cmos_clock(struct work_
&sync_cmos_work, timespec64_to_jiffies(&next));
}
diff --git a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
index d6f4cc4a37837d..30f9e3ec298dbf 100644
--- a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -19,15 +19,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <net/protocol.h>
#include <net/dst.h>
-@@ -351,6 +352,7 @@ EXPORT_SYMBOL(build_skb);
+@@ -359,6 +360,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
- static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
+ static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -358,10 +360,10 @@ static void *__netdev_alloc_frag(unsigne
+@@ -366,10 +368,10 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
void *data;
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return data;
}
-@@ -429,13 +431,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+@@ -437,13 +439,13 @@ struct sk_buff *__netdev_alloc_skb(struc
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index fa0d1c05441a8a..154198c01a8193 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -7473,7 +7473,7 @@ static int dev_cpu_callback(struct notif
+@@ -7789,7 +7789,7 @@ static int dev_cpu_callback(struct notif
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index 4446e34d3b6a79..857149f7a3c34d 100644
--- a/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -12,65 +12,101 @@ This patch ensures that each user of napi_alloc_cache uses a local lock.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- net/core/skbuff.c | 18 ++++++++++++++----
- 1 file changed, 14 insertions(+), 4 deletions(-)
+ net/core/skbuff.c | 25 +++++++++++++++++++------
+ 1 file changed, 19 insertions(+), 6 deletions(-)
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -353,6 +353,7 @@ EXPORT_SYMBOL(build_skb);
+@@ -361,6 +361,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
- static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
+ static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -382,9 +383,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -390,9 +391,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
-+ struct page_frag_cache *nc;
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+ void *data;
-- return __alloc_page_frag(nc, fragsz, gfp_mask);
+- return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
-+ data = __alloc_page_frag(nc, fragsz, gfp_mask);
++ data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ return data;
}
void *napi_alloc_frag(unsigned int fragsz)
-@@ -478,9 +483,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+@@ -486,9 +491,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
-- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
-+ struct page_frag_cache *nc;
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
struct sk_buff *skb;
void *data;
+ bool pfmemalloc;
len += NET_SKB_PAD + NET_IP_ALIGN;
-@@ -498,7 +504,11 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -506,7 +512,10 @@ struct sk_buff *__napi_alloc_skb(struct
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- data = __alloc_page_frag(nc, len, gfp_mask);
-+ pfmemalloc = nc->pfmemalloc;
+ data = __alloc_page_frag(&nc->page, len, gfp_mask);
++ pfmemalloc = nc->page.pfmemalloc;
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
-+
if (unlikely(!data))
return NULL;
-@@ -509,7 +519,7 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -517,7 +526,7 @@ struct sk_buff *__napi_alloc_skb(struct
}
/* use OR instead of assignment to avoid clearing of bits in mask */
-- if (nc->pfmemalloc)
+- if (nc->page.pfmemalloc)
+ if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
+@@ -761,23 +770,26 @@ EXPORT_SYMBOL(consume_skb);
+
+ void __kfree_skb_flush(void)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ /* flush skb_cache if containing objects */
+ if (nc->skb_count) {
+ kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
+ nc->skb_cache);
+ nc->skb_count = 0;
+ }
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ }
+
+ static inline void _kfree_skb_defer(struct sk_buff *skb)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+
+ /* drop skb->head and call any destructors for packet */
+ skb_release_all(skb);
+
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ /* record skb to CPU local list */
+ nc->skb_cache[nc->skb_count++] = skb;
+
+@@ -792,6 +804,7 @@ static inline void _kfree_skb_defer(stru
+ nc->skb_cache);
+ nc->skb_count = 0;
+ }
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ }
+ void __kfree_skb_defer(struct sk_buff *skb)
+ {
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index b915dc9490ed7e..ac5a8aa422bf2f 100644
--- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2885,7 +2885,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3037,7 +3037,11 @@ static inline int __dev_xmit_skb(struct
* This permits __QDISC___STATE_RUNNING owner to get the lock more
* often and dequeue packets faster.
*/
diff --git a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index 35b702cbfd5487..6cdbb866a99690 100644
--- a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <uapi/linux/netfilter/x_tables.h>
/**
-@@ -282,6 +283,8 @@ void xt_free_table_info(struct xt_table_
+@@ -285,6 +286,8 @@ void xt_free_table_info(struct xt_table_
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* xt_tee_enabled - true if x_tables needs to handle reentrancy
*
* Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -302,6 +305,9 @@ static inline unsigned int xt_write_recs
+@@ -305,6 +308,9 @@ static inline unsigned int xt_write_recs
{
unsigned int addend;
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -332,6 +338,7 @@ static inline void xt_write_recseq_end(u
+@@ -335,6 +341,7 @@ static inline void xt_write_recseq_end(u
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch
index cb15f31b218ed7..85140e9f57c347 100644
--- a/patches/net-make-devnet_rename_seq-a-mutex.patch
+++ b/patches/net-make-devnet_rename_seq-a-mutex.patch
@@ -21,15 +21,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -186,6 +186,7 @@ static unsigned int napi_gen_id;
- static DEFINE_HASHTABLE(napi_hash, 8);
+@@ -188,6 +188,7 @@ static unsigned int napi_gen_id = NR_CPU
+ static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
+static DEFINE_MUTEX(devnet_rename_mutex);
static inline void dev_base_seq_inc(struct net *net)
{
-@@ -884,7 +885,8 @@ int netdev_get_name(struct net *net, cha
+@@ -886,7 +887,8 @@ int netdev_get_name(struct net *net, cha
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto retry;
}
-@@ -1153,20 +1155,17 @@ int dev_change_name(struct net_device *d
+@@ -1155,20 +1157,17 @@ int dev_change_name(struct net_device *d
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1179,11 +1178,12 @@ int dev_change_name(struct net_device *d
+@@ -1181,11 +1180,12 @@ int dev_change_name(struct net_device *d
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netdev_adjacent_rename_links(dev, oldname);
-@@ -1204,7 +1204,8 @@ int dev_change_name(struct net_device *d
+@@ -1206,7 +1206,8 @@ int dev_change_name(struct net_device *d
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1217,6 +1218,11 @@ int dev_change_name(struct net_device *d
+@@ -1219,6 +1220,11 @@ int dev_change_name(struct net_device *d
}
return err;
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 208c29c1fc9c20..ed090ff8342847 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2249,11 +2249,20 @@ void netdev_freemem(struct net_device *d
+@@ -2393,11 +2393,20 @@ void netdev_freemem(struct net_device *d
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1852,6 +1852,9 @@ struct task_struct {
+@@ -1886,6 +1886,9 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
@@ -53,11 +53,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ int xmit_recursion;
+#endif
int pagefault_disabled;
- /* CPU-specific state of this task */
- struct thread_struct thread;
+ #ifdef CONFIG_MMU
+ struct task_struct *oom_reaper_list;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2945,9 +2945,44 @@ static void skb_update_prio(struct sk_bu
+@@ -3098,9 +3098,44 @@ static void skb_update_prio(struct sk_bu
#define skb_update_prio(skb)
#endif
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define RECURSION_LIMIT 10
/**
-@@ -3140,7 +3175,7 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3346,7 +3381,7 @@ static int __dev_queue_xmit(struct sk_bu
if (txq->xmit_lock_owner != cpu) {
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -3150,9 +3185,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3356,9 +3391,9 @@ static int __dev_queue_xmit(struct sk_bu
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch
index 04181bc8387c14..f699a9d39299b2 100644
--- a/patches/net-prevent-abba-deadlock.patch
+++ b/patches/net-prevent-abba-deadlock.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -2435,12 +2435,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2421,12 +2421,11 @@ void lock_sock_nested(struct sock *sk, i
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 319dd904109c7e..cdd5d1bd3cf9c3 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -465,6 +465,14 @@ extern void thread_do_softirq(void);
+@@ -476,6 +476,14 @@ extern void thread_do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4927,7 +4927,7 @@ static void net_rx_action(struct softirq
+@@ -5211,7 +5211,7 @@ static void net_rx_action(struct softirq
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index b90553dfa13624..92829e408434c6 100644
--- a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -890,7 +890,7 @@ void dev_deactivate_many(struct list_hea
+@@ -894,7 +894,7 @@ void dev_deactivate_many(struct list_hea
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
diff --git a/patches/net-tx-action-avoid-livelock-on-rt.patch b/patches/net-tx-action-avoid-livelock-on-rt.patch
index d8a85721893f6d..d97286982d7abd 100644
--- a/patches/net-tx-action-avoid-livelock-on-rt.patch
+++ b/patches/net-tx-action-avoid-livelock-on-rt.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3642,6 +3642,36 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3848,6 +3848,36 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -3683,7 +3713,7 @@ static void net_tx_action(struct softirq
+@@ -3895,7 +3925,7 @@ static void net_tx_action(struct softirq
head = head->next_sched;
root_lock = qdisc_lock(q);
diff --git a/patches/net-use-cpu-chill.patch b/patches/net-use-cpu-chill.patch
index f945d89790e427..e7f2794d9da0b2 100644
--- a/patches/net-use-cpu-chill.patch
+++ b/patches/net-use-cpu-chill.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-@@ -695,7 +696,7 @@ static void prb_retire_rx_blk_timer_expi
+@@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expi
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -957,7 +958,7 @@ static void prb_retire_current_block(str
+@@ -956,7 +957,7 @@ static void prb_retire_current_block(str
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -49,9 +49,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/llist.h>
+#include <linux/delay.h>
- #include "rds.h"
- #include "ib.h"
-@@ -313,7 +314,7 @@ static inline void wait_clean_list_grace
+ #include "ib_mr.h"
+
+@@ -209,7 +210,7 @@ static inline void wait_clean_list_grace
for_each_online_cpu(cpu) {
flag = &per_cpu(clean_list_grace, cpu);
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
diff --git a/patches/net-wireless-warn-nort.patch b/patches/net-wireless-warn-nort.patch
index e3b49fc3a372a0..84feed94cbc192 100644
--- a/patches/net-wireless-warn-nort.patch
+++ b/patches/net-wireless-warn-nort.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
-@@ -3574,7 +3574,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -3679,7 +3679,7 @@ void ieee80211_rx_napi(struct ieee80211_
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
diff --git a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
index d454b642c4ba33..62bc044100d5ad 100644
--- a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
+++ b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -7222,7 +7222,7 @@ EXPORT_SYMBOL(free_netdev);
+@@ -7538,7 +7538,7 @@ EXPORT_SYMBOL(free_netdev);
void synchronize_net(void)
{
might_sleep();
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 6367819f96b47a..7a1b0c76aa6544 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -239,6 +239,13 @@ static void exit_to_usermode_loop(struct
+@@ -221,6 +221,13 @@ static void exit_to_usermode_loop(struct
if (cached_flags & _TIF_NEED_RESCHED)
schedule();
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1579,6 +1579,10 @@ struct task_struct {
+@@ -1600,6 +1600,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
diff --git a/patches/panic-change-nmi_panic-from-macro-to-function.patch b/patches/panic-change-nmi_panic-from-macro-to-function.patch
deleted file mode 100644
index 608acc78014abb..00000000000000
--- a/patches/panic-change-nmi_panic-from-macro-to-function.patch
+++ /dev/null
@@ -1,112 +0,0 @@
-From 8b60994ea4ef1dad39b29a0e61261bd0c2c2919f Mon Sep 17 00:00:00 2001
-From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
-Date: Tue, 22 Mar 2016 14:27:17 -0700
-Subject: [PATCH] panic: change nmi_panic from macro to function
-
-Commit 1717f2096b54 ("panic, x86: Fix re-entrance problem due to panic
-on NMI") and commit 58c5661f2144 ("panic, x86: Allow CPUs to save
-registers even if looping in NMI context") introduced nmi_panic() which
-prevents concurrent/recursive execution of panic(). It also saves
-registers for the crash dump on x86.
-
-However, there are some cases where NMI handlers still use panic().
-This patch set partially replaces them with nmi_panic() in those cases.
-
-Even this patchset is applied, some NMI or similar handlers (e.g. MCE
-handler) continue to use panic(). This is because I can't test them
-well and actual problems won't happen. For example, the possibility
-that normal panic and panic on MCE happen simultaneously is very low.
-
-This patch (of 3):
-
-Convert nmi_panic() to a proper function and export it instead of
-exporting internal implementation details to modules, for obvious
-reasons.
-
-Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
-Acked-by: Borislav Petkov <bp@suse.de>
-Acked-by: Michal Nazarewicz <mina86@mina86.com>
-Cc: Michal Hocko <mhocko@suse.com>
-Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
-Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
-Cc: Javi Merino <javi.merino@arm.com>
-Cc: Gobinda Charan Maji <gobinda.cemk07@gmail.com>
-Cc: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
-Cc: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
-Cc: Tejun Heo <tj@kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/kernel.h | 21 +--------------------
- kernel/panic.c | 20 ++++++++++++++++++++
- 2 files changed, 21 insertions(+), 20 deletions(-)
-
---- a/include/linux/kernel.h
-+++ b/include/linux/kernel.h
-@@ -255,7 +255,7 @@ extern long (*panic_blink)(int state);
- __printf(1, 2)
- void panic(const char *fmt, ...)
- __noreturn __cold;
--void nmi_panic_self_stop(struct pt_regs *);
-+void nmi_panic(struct pt_regs *regs, const char *msg);
- extern void oops_enter(void);
- extern void oops_exit(void);
- void print_oops_end_marker(void);
-@@ -455,25 +455,6 @@ extern atomic_t panic_cpu;
- #define PANIC_CPU_INVALID -1
-
- /*
-- * A variant of panic() called from NMI context. We return if we've already
-- * panicked on this CPU. If another CPU already panicked, loop in
-- * nmi_panic_self_stop() which can provide architecture dependent code such
-- * as saving register state for crash dump.
-- */
--#define nmi_panic(regs, fmt, ...) \
--do { \
-- int old_cpu, cpu; \
-- \
-- cpu = raw_smp_processor_id(); \
-- old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \
-- \
-- if (old_cpu == PANIC_CPU_INVALID) \
-- panic(fmt, ##__VA_ARGS__); \
-- else if (old_cpu != cpu) \
-- nmi_panic_self_stop(regs); \
--} while (0)
--
--/*
- * Only to be used by arch init code. If the user over-wrote the default
- * CONFIG_PANIC_TIMEOUT, honor it.
- */
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -72,6 +72,26 @@ void __weak nmi_panic_self_stop(struct p
-
- atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
-
-+/*
-+ * A variant of panic() called from NMI context. We return if we've already
-+ * panicked on this CPU. If another CPU already panicked, loop in
-+ * nmi_panic_self_stop() which can provide architecture dependent code such
-+ * as saving register state for crash dump.
-+ */
-+void nmi_panic(struct pt_regs *regs, const char *msg)
-+{
-+ int old_cpu, cpu;
-+
-+ cpu = raw_smp_processor_id();
-+ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);
-+
-+ if (old_cpu == PANIC_CPU_INVALID)
-+ panic("%s", msg);
-+ else if (old_cpu != cpu)
-+ nmi_panic_self_stop(regs);
-+}
-+EXPORT_SYMBOL(nmi_panic);
-+
- /**
- * panic - halt the system
- * @fmt: The text string to print
diff --git a/patches/panic-disable-random-on-rt.patch b/patches/panic-disable-random-on-rt.patch
index e9f300080a8c9f..046423fdce1f8c 100644
--- a/patches/panic-disable-random-on-rt.patch
+++ b/patches/panic-disable-random-on-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -439,9 +439,11 @@ static u64 oops_id;
+@@ -444,9 +444,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
diff --git a/patches/panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch b/patches/panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch
deleted file mode 100644
index 876d058c986d3b..00000000000000
--- a/patches/panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch
+++ /dev/null
@@ -1,245 +0,0 @@
-From 957548a86594805bce67b7b5c8360e78a0c658e1 Mon Sep 17 00:00:00 2001
-From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
-Date: Mon, 14 Dec 2015 11:19:10 +0100
-Subject: [PATCH] panic, x86: Allow CPUs to save registers even if looping
- in NMI context
-
-Currently, kdump_nmi_shootdown_cpus(), a subroutine of crash_kexec(),
-sends an NMI IPI to CPUs which haven't called panic() to stop them,
-save their register information and do some cleanups for crash dumping.
-However, if such a CPU is infinitely looping in NMI context, we fail to
-save its register information into the crash dump.
-
-For example, this can happen when unknown NMIs are broadcast to all
-CPUs as follows:
-
- CPU 0 CPU 1
- =========================== ==========================
- receive an unknown NMI
- unknown_nmi_error()
- panic() receive an unknown NMI
- spin_trylock(&panic_lock) unknown_nmi_error()
- crash_kexec() panic()
- spin_trylock(&panic_lock)
- panic_smp_self_stop()
- infinite loop
- kdump_nmi_shootdown_cpus()
- issue NMI IPI -----------> blocked until IRET
- infinite loop...
-
-Here, since CPU 1 is in NMI context, the second NMI from CPU 0 is
-blocked until CPU 1 executes IRET. However, CPU 1 never executes IRET,
-so the NMI is not handled and the callback function to save registers is
-never called.
-
-In practice, this can happen on some servers which broadcast NMIs to all
-CPUs when the NMI button is pushed.
-
-To save registers in this case, we need to:
-
- a) Return from NMI handler instead of looping infinitely
- or
- b) Call the callback function directly from the infinite loop
-
-Inherently, a) is risky because NMI is also used to prevent corrupted
-data from being propagated to devices. So, we chose b).
-
-This patch does the following:
-
-1. Move the infinite looping of CPUs which haven't called panic() in NMI
- context (actually done by panic_smp_self_stop()) outside of panic() to
- enable us to refer pt_regs. Please note that panic_smp_self_stop() is
- still used for normal context.
-
-2. Call a callback of kdump_nmi_shootdown_cpus() directly to save
- registers and do some cleanups after setting waiting_for_crash_ipi which
- is used for counting down the number of CPUs which handled the callback
-
-Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
-Acked-by: Michal Hocko <mhocko@suse.com>
-Cc: Aaron Tomlin <atomlin@redhat.com>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: Andy Lutomirski <luto@kernel.org>
-Cc: Baoquan He <bhe@redhat.com>
-Cc: Chris Metcalf <cmetcalf@ezchip.com>
-Cc: Dave Young <dyoung@redhat.com>
-Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Cc: Don Zickus <dzickus@redhat.com>
-Cc: Eric Biederman <ebiederm@xmission.com>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Gobinda Charan Maji <gobinda.cemk07@gmail.com>
-Cc: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
-Cc: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
-Cc: "H. Peter Anvin" <hpa@zytor.com>
-Cc: Ingo Molnar <mingo@kernel.org>
-Cc: Javi Merino <javi.merino@arm.com>
-Cc: Jiang Liu <jiang.liu@linux.intel.com>
-Cc: Jonathan Corbet <corbet@lwn.net>
-Cc: kexec@lists.infradead.org
-Cc: linux-doc@vger.kernel.org
-Cc: lkml <linux-kernel@vger.kernel.org>
-Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
-Cc: Michal Nazarewicz <mina86@mina86.com>
-Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
-Cc: Oleg Nesterov <oleg@redhat.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Prarit Bhargava <prarit@redhat.com>
-Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
-Cc: Seth Jennings <sjenning@redhat.com>
-Cc: Stefan Lippers-Hollmann <s.l-h@gmx.de>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Ulrich Obergfell <uobergfe@redhat.com>
-Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
-Cc: Vivek Goyal <vgoyal@redhat.com>
-Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
-Link: http://lkml.kernel.org/r/20151210014628.25437.75256.stgit@softrs
-[ Cleanup comments, fixup formatting. ]
-Signed-off-by: Borislav Petkov <bp@suse.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/nmi.c | 6 +++---
- arch/x86/kernel/reboot.c | 20 ++++++++++++++++++++
- include/linux/kernel.h | 16 ++++++++++++----
- kernel/panic.c | 9 +++++++++
- kernel/watchdog.c | 2 +-
- 5 files changed, 45 insertions(+), 8 deletions(-)
-
---- a/arch/x86/kernel/nmi.c
-+++ b/arch/x86/kernel/nmi.c
-@@ -231,7 +231,7 @@ pci_serr_error(unsigned char reason, str
- #endif
-
- if (panic_on_unrecovered_nmi)
-- nmi_panic("NMI: Not continuing");
-+ nmi_panic(regs, "NMI: Not continuing");
-
- pr_emerg("Dazed and confused, but trying to continue\n");
-
-@@ -256,7 +256,7 @@ io_check_error(unsigned char reason, str
- show_regs(regs);
-
- if (panic_on_io_nmi) {
-- nmi_panic("NMI IOCK error: Not continuing");
-+ nmi_panic(regs, "NMI IOCK error: Not continuing");
-
- /*
- * If we end up here, it means we have received an NMI while
-@@ -305,7 +305,7 @@ unknown_nmi_error(unsigned char reason,
-
- pr_emerg("Do you have a strange power saving mode enabled?\n");
- if (unknown_nmi_panic || panic_on_unrecovered_nmi)
-- nmi_panic("NMI: Not continuing");
-+ nmi_panic(regs, "NMI: Not continuing");
-
- pr_emerg("Dazed and confused, but trying to continue\n");
- }
---- a/arch/x86/kernel/reboot.c
-+++ b/arch/x86/kernel/reboot.c
-@@ -726,6 +726,7 @@ static int crashing_cpu;
- static nmi_shootdown_cb shootdown_callback;
-
- static atomic_t waiting_for_crash_ipi;
-+static int crash_ipi_issued;
-
- static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
- {
-@@ -788,6 +789,9 @@ void nmi_shootdown_cpus(nmi_shootdown_cb
-
- smp_send_nmi_allbutself();
-
-+ /* Kick CPUs looping in NMI context. */
-+ WRITE_ONCE(crash_ipi_issued, 1);
-+
- msecs = 1000; /* Wait at most a second for the other cpus to stop */
- while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
- mdelay(1);
-@@ -796,6 +800,22 @@ void nmi_shootdown_cpus(nmi_shootdown_cb
-
- /* Leave the nmi callback set */
- }
-+
-+/* Override the weak function in kernel/panic.c */
-+void nmi_panic_self_stop(struct pt_regs *regs)
-+{
-+ while (1) {
-+ /*
-+ * Wait for the crash dumping IPI to be issued, and then
-+ * call its callback directly.
-+ */
-+ if (READ_ONCE(crash_ipi_issued))
-+ crash_nmi_callback(0, regs); /* Don't return */
-+
-+ cpu_relax();
-+ }
-+}
-+
- #else /* !CONFIG_SMP */
- void nmi_shootdown_cpus(nmi_shootdown_cb callback)
- {
---- a/include/linux/kernel.h
-+++ b/include/linux/kernel.h
-@@ -255,6 +255,7 @@ extern long (*panic_blink)(int state);
- __printf(1, 2)
- void panic(const char *fmt, ...)
- __noreturn __cold;
-+void nmi_panic_self_stop(struct pt_regs *);
- extern void oops_enter(void);
- extern void oops_exit(void);
- void print_oops_end_marker(void);
-@@ -455,14 +456,21 @@ extern atomic_t panic_cpu;
-
- /*
- * A variant of panic() called from NMI context. We return if we've already
-- * panicked on this CPU.
-+ * panicked on this CPU. If another CPU already panicked, loop in
-+ * nmi_panic_self_stop() which can provide architecture dependent code such
-+ * as saving register state for crash dump.
- */
--#define nmi_panic(fmt, ...) \
-+#define nmi_panic(regs, fmt, ...) \
- do { \
-- int cpu = raw_smp_processor_id(); \
-+ int old_cpu, cpu; \
- \
-- if (atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu) != cpu) \
-+ cpu = raw_smp_processor_id(); \
-+ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \
-+ \
-+ if (old_cpu == PANIC_CPU_INVALID) \
- panic(fmt, ##__VA_ARGS__); \
-+ else if (old_cpu != cpu) \
-+ nmi_panic_self_stop(regs); \
- } while (0)
-
- /*
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -61,6 +61,15 @@ void __weak panic_smp_self_stop(void)
- cpu_relax();
- }
-
-+/*
-+ * Stop ourselves in NMI context if another CPU has already panicked. Arch code
-+ * may override this to prepare for crash dumping, e.g. save regs info.
-+ */
-+void __weak nmi_panic_self_stop(struct pt_regs *regs)
-+{
-+ panic_smp_self_stop();
-+}
-+
- atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
-
- /**
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -351,7 +351,7 @@ static void watchdog_overflow_callback(s
- trigger_allbutself_cpu_backtrace();
-
- if (hardlockup_panic)
-- nmi_panic("Hard LOCKUP");
-+ nmi_panic(regs, "Hard LOCKUP");
-
- __this_cpu_write(hard_watchdog_warn, true);
- return;
diff --git a/patches/panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch b/patches/panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch
deleted file mode 100644
index 75fe083009f0c3..00000000000000
--- a/patches/panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch
+++ /dev/null
@@ -1,188 +0,0 @@
-From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
-Date: Mon, 14 Dec 2015 11:19:09 +0100
-Subject: [PATCH] panic, x86: Fix re-entrance problem due to panic on NMI
-
-If panic on NMI happens just after panic() on the same CPU, panic() is
-recursively called. Kernel stalls, as a result, after failing to acquire
-panic_lock.
-
-To avoid this problem, don't call panic() in NMI context if we've
-already entered panic().
-
-For that, introduce nmi_panic() macro to reduce code duplication. In
-the case of panic on NMI, don't return from NMI handlers if another CPU
-already panicked.
-
-Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
-Acked-by: Michal Hocko <mhocko@suse.com>
-Cc: Aaron Tomlin <atomlin@redhat.com>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: Andy Lutomirski <luto@kernel.org>
-Cc: Baoquan He <bhe@redhat.com>
-Cc: Chris Metcalf <cmetcalf@ezchip.com>
-Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Cc: Don Zickus <dzickus@redhat.com>
-Cc: "Eric W. Biederman" <ebiederm@xmission.com>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Gobinda Charan Maji <gobinda.cemk07@gmail.com>
-Cc: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
-Cc: "H. Peter Anvin" <hpa@zytor.com>
-Cc: Ingo Molnar <mingo@kernel.org>
-Cc: Javi Merino <javi.merino@arm.com>
-Cc: Jonathan Corbet <corbet@lwn.net>
-Cc: kexec@lists.infradead.org
-Cc: linux-doc@vger.kernel.org
-Cc: lkml <linux-kernel@vger.kernel.org>
-Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
-Cc: Michal Nazarewicz <mina86@mina86.com>
-Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Prarit Bhargava <prarit@redhat.com>
-Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
-Cc: Rusty Russell <rusty@rustcorp.com.au>
-Cc: Seth Jennings <sjenning@redhat.com>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Ulrich Obergfell <uobergfe@redhat.com>
-Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
-Cc: Vivek Goyal <vgoyal@redhat.com>
-Link: http://lkml.kernel.org/r/20151210014626.25437.13302.stgit@softrs
-[ Cleanup comments, fixup formatting. ]
-Signed-off-by: Borislav Petkov <bp@suse.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/nmi.c | 16 ++++++++++++----
- include/linux/kernel.h | 20 ++++++++++++++++++++
- kernel/panic.c | 16 +++++++++++++---
- kernel/watchdog.c | 2 +-
- 4 files changed, 46 insertions(+), 8 deletions(-)
-
---- a/arch/x86/kernel/nmi.c
-+++ b/arch/x86/kernel/nmi.c
-@@ -231,7 +231,7 @@ pci_serr_error(unsigned char reason, str
- #endif
-
- if (panic_on_unrecovered_nmi)
-- panic("NMI: Not continuing");
-+ nmi_panic("NMI: Not continuing");
-
- pr_emerg("Dazed and confused, but trying to continue\n");
-
-@@ -255,8 +255,16 @@ io_check_error(unsigned char reason, str
- reason, smp_processor_id());
- show_regs(regs);
-
-- if (panic_on_io_nmi)
-- panic("NMI IOCK error: Not continuing");
-+ if (panic_on_io_nmi) {
-+ nmi_panic("NMI IOCK error: Not continuing");
-+
-+ /*
-+ * If we end up here, it means we have received an NMI while
-+ * processing panic(). Simply return without delaying and
-+ * re-enabling NMIs.
-+ */
-+ return;
-+ }
-
- /* Re-enable the IOCK line, wait for a few seconds */
- reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
-@@ -297,7 +305,7 @@ unknown_nmi_error(unsigned char reason,
-
- pr_emerg("Do you have a strange power saving mode enabled?\n");
- if (unknown_nmi_panic || panic_on_unrecovered_nmi)
-- panic("NMI: Not continuing");
-+ nmi_panic("NMI: Not continuing");
-
- pr_emerg("Dazed and confused, but trying to continue\n");
- }
---- a/include/linux/kernel.h
-+++ b/include/linux/kernel.h
-@@ -446,6 +446,26 @@ extern int sysctl_panic_on_stackoverflow
- extern bool crash_kexec_post_notifiers;
-
- /*
-+ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
-+ * holds a CPU number which is executing panic() currently. A value of
-+ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
-+ */
-+extern atomic_t panic_cpu;
-+#define PANIC_CPU_INVALID -1
-+
-+/*
-+ * A variant of panic() called from NMI context. We return if we've already
-+ * panicked on this CPU.
-+ */
-+#define nmi_panic(fmt, ...) \
-+do { \
-+ int cpu = raw_smp_processor_id(); \
-+ \
-+ if (atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu) != cpu) \
-+ panic(fmt, ##__VA_ARGS__); \
-+} while (0)
-+
-+/*
- * Only to be used by arch init code. If the user over-wrote the default
- * CONFIG_PANIC_TIMEOUT, honor it.
- */
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -61,6 +61,8 @@ void __weak panic_smp_self_stop(void)
- cpu_relax();
- }
-
-+atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
-+
- /**
- * panic - halt the system
- * @fmt: The text string to print
-@@ -71,17 +73,17 @@ void __weak panic_smp_self_stop(void)
- */
- void panic(const char *fmt, ...)
- {
-- static DEFINE_SPINLOCK(panic_lock);
- static char buf[1024];
- va_list args;
- long i, i_next = 0;
- int state = 0;
-+ int old_cpu, this_cpu;
-
- /*
- * Disable local interrupts. This will prevent panic_smp_self_stop
- * from deadlocking the first cpu that invokes the panic, since
- * there is nothing to prevent an interrupt handler (that runs
-- * after the panic_lock is acquired) from invoking panic again.
-+ * after setting panic_cpu) from invoking panic() again.
- */
- local_irq_disable();
-
-@@ -94,8 +96,16 @@ void panic(const char *fmt, ...)
- * multiple parallel invocations of panic, all other CPUs either
- * stop themself or will wait until they are stopped by the 1st CPU
- * with smp_send_stop().
-+ *
-+ * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
-+ * comes here, so go ahead.
-+ * `old_cpu == this_cpu' means we came from nmi_panic() which sets
-+ * panic_cpu to this CPU. In this case, this is also the 1st CPU.
- */
-- if (!spin_trylock(&panic_lock))
-+ this_cpu = raw_smp_processor_id();
-+ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
-+
-+ if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
- panic_smp_self_stop();
-
- console_verbose();
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -351,7 +351,7 @@ static void watchdog_overflow_callback(s
- trigger_allbutself_cpu_backtrace();
-
- if (hardlockup_panic)
-- panic("Hard LOCKUP");
-+ nmi_panic("Hard LOCKUP");
-
- __this_cpu_write(hard_watchdog_warn, true);
- return;
diff --git a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index 97ce2141e9bcf5..43e2fbcc8611e4 100644
--- a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -30,9 +30,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -334,11 +334,7 @@ static inline int rcu_preempt_depth(void
+@@ -341,11 +341,7 @@ static inline int rcu_preempt_depth(void
+ /* Internal to kernel */
void rcu_init(void);
- void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline void rcu_bh_qs(void) { }
@@ -40,12 +40,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_bh_qs(void);
-#endif
void rcu_check_callbacks(int user);
- struct notifier_block;
- int rcu_cpu_notify(struct notifier_block *self,
+ void rcu_report_dead(unsigned int cpu);
+
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -266,7 +266,14 @@ void rcu_sched_qs(void)
- }
+@@ -254,7 +254,14 @@ void rcu_sched_qs(void)
+ this_cpu_ptr(&rcu_sched_data), true);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "../time/tick-internal.h"
#ifdef CONFIG_RCU_BOOST
-@@ -1346,7 +1347,7 @@ static void rcu_prepare_kthreads(int cpu
+@@ -1338,7 +1339,7 @@ static void rcu_prepare_kthreads(int cpu
#endif /* #else #ifdef CONFIG_RCU_BOOST */
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Check to see if any future RCU-related work will need to be done
-@@ -1363,7 +1364,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1355,7 +1356,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
? 0 : rcu_cpu_has_callbacks(NULL);
}
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
* after it.
-@@ -1459,6 +1462,8 @@ static bool __maybe_unused rcu_try_advan
+@@ -1451,6 +1454,8 @@ static bool __maybe_unused rcu_try_advan
return cbs_ready;
}
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
-@@ -1504,6 +1509,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1496,6 +1501,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
*nextevt = basemono + dj * TICK_NSEC;
return 0;
}
diff --git a/patches/pci-access-use-__wake_up_all_locked.patch b/patches/pci-access-use-__wake_up_all_locked.patch
index 3efbf833ce5d1b..15fa74557ae06b 100644
--- a/patches/pci-access-use-__wake_up_all_locked.patch
+++ b/patches/pci-access-use-__wake_up_all_locked.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
-@@ -561,7 +561,7 @@ void pci_cfg_access_unlock(struct pci_de
+@@ -672,7 +672,7 @@ void pci_cfg_access_unlock(struct pci_de
WARN_ON(!dev->block_cfg_access);
dev->block_cfg_access = 0;
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index 7963bd868397ac..43ee5b9a7a2066 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -7228,6 +7228,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -7261,6 +7261,7 @@ static void perf_swevent_init_hrtimer(st
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/peter_zijlstra-frob-rcu.patch b/patches/peter_zijlstra-frob-rcu.patch
index 40bb2650f983c9..85b03377706916 100644
--- a/patches/peter_zijlstra-frob-rcu.patch
+++ b/patches/peter_zijlstra-frob-rcu.patch
@@ -155,7 +155,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -432,7 +432,7 @@ void rcu_read_unlock_special(struct task
+@@ -428,7 +428,7 @@ void rcu_read_unlock_special(struct task
}
/* Hardware IRQ handlers cannot block, complain if they get here. */
diff --git a/patches/peterz-srcu-crypto-chain.patch b/patches/peterz-srcu-crypto-chain.patch
index b612cb52ccb313..23d072fbd9382e 100644
--- a/patches/peterz-srcu-crypto-chain.patch
+++ b/patches/peterz-srcu-crypto-chain.patch
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
-@@ -719,13 +719,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+@@ -718,13 +718,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
int crypto_register_notifier(struct notifier_block *nb)
{
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
-@@ -143,7 +143,7 @@ static inline int crypto_is_moribund(str
+@@ -146,7 +146,7 @@ static inline int crypto_is_moribund(str
static inline void crypto_notify(unsigned long val, void *v)
{
diff --git a/patches/ping-sysrq.patch b/patches/ping-sysrq.patch
index d4aa3f4b0b0ed7..854f85fa886847 100644
--- a/patches/ping-sysrq.patch
+++ b/patches/ping-sysrq.patch
@@ -105,7 +105,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
return true;
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -818,6 +818,13 @@ static struct ctl_table ipv4_net_table[]
+@@ -681,6 +681,13 @@ static struct ctl_table ipv4_net_table[]
.proc_handler = proc_dointvec
},
{
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index dac394b24e3585..460ff2e2c41aa2 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1544,6 +1544,9 @@ struct task_struct {
+@@ -1565,6 +1565,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
const struct cred __rcu *real_cred; /* objective and real subjective task
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1218,6 +1218,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1228,6 +1228,9 @@ static void rt_mutex_init_task(struct ta
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
-@@ -650,7 +651,7 @@ static int posix_cpu_timer_set(struct k_
+@@ -620,7 +621,7 @@ static int posix_cpu_timer_set(struct k_
/*
* Disarm any old timer after extracting its expiry time.
*/
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = 0;
old_incr = timer->it.cpu.incr;
-@@ -1091,7 +1092,7 @@ void posix_cpu_timer_schedule(struct k_i
+@@ -1063,7 +1064,7 @@ void posix_cpu_timer_schedule(struct k_i
/*
* Now re-arm for the new expiry time.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arm_timer(timer);
unlock_task_sighand(p, &flags);
-@@ -1182,13 +1183,13 @@ static inline int fastpath_timer_check(s
+@@ -1152,13 +1153,13 @@ static inline int fastpath_timer_check(s
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The fast path checks that there are no expired thread or thread
-@@ -1242,6 +1243,190 @@ void run_posix_cpu_timers(struct task_st
+@@ -1212,6 +1213,190 @@ void run_posix_cpu_timers(struct task_st
}
}
diff --git a/patches/power-use-generic-rwsem-on-rt.patch b/patches/power-use-generic-rwsem-on-rt.patch
index 7b49387491271b..2027a699651acf 100644
--- a/patches/power-use-generic-rwsem-on-rt.patch
+++ b/patches/power-use-generic-rwsem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT
+@@ -57,10 +57,11 @@ config LOCKDEP_SUPPORT
config RWSEM_GENERIC_SPINLOCK
bool
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index b7effe28ad2358..08c9c8af88cd66 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -142,6 +142,7 @@ config PPC
+@@ -139,6 +139,7 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
-@@ -160,6 +160,7 @@ int main(void)
+@@ -162,6 +162,7 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -133,16 +133,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq restore_user
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
-@@ -683,7 +683,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
- #else
- beq restore
+@@ -644,7 +644,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+ bl restore_math
+ b restore
#endif
-1: andi. r0,r4,_TIF_NEED_RESCHED
+1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -745,10 +745,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+@@ -706,10 +706,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
-@@ -765,7 +773,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+@@ -726,7 +734,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
diff --git a/patches/preempt-lazy-check-preempt_schedule.patch b/patches/preempt-lazy-check-preempt_schedule.patch
index 04036a9f4fdad0..7d88d204c49cd0 100644
--- a/patches/preempt-lazy-check-preempt_schedule.patch
+++ b/patches/preempt-lazy-check-preempt_schedule.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3450,6 +3450,30 @@ static void __sched notrace preempt_sche
+@@ -3500,6 +3500,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3464,6 +3488,8 @@ asmlinkage __visible void __sched notrac
+@@ -3514,6 +3538,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_schedule_common();
}
-@@ -3490,15 +3516,9 @@ asmlinkage __visible void __sched notrac
+@@ -3540,15 +3566,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 41137f487c862a..a2bf8cf0980df4 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -64,8 +64,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/sched/sched.h | 9 +++++++
kernel/trace/trace.c | 37 ++++++++++++++++++------------
kernel/trace/trace.h | 2 +
- kernel/trace/trace_output.c | 13 +++++++++-
- 13 files changed, 204 insertions(+), 29 deletions(-)
+ kernel/trace/trace_output.c | 14 +++++++++--
+ 13 files changed, 205 insertions(+), 29 deletions(-)
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -165,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2967,6 +2967,43 @@ static inline int test_tsk_need_resched(
+@@ -3009,6 +3009,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -211,7 +211,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
set_tsk_thread_flag(current, TIF_SIGPENDING);
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -102,7 +102,17 @@ static inline int test_ti_thread_flag(st
+@@ -103,7 +103,17 @@ static inline int test_ti_thread_flag(st
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
@@ -232,7 +232,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -68,6 +68,7 @@ struct trace_entry {
+@@ -58,6 +58,7 @@ struct trace_entry {
int pid;
unsigned short migrate_disable;
unsigned short padding;
@@ -257,7 +257,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -603,6 +603,38 @@ void resched_curr(struct rq *rq)
+@@ -475,6 +475,38 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -296,7 +296,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2344,6 +2376,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2392,6 +2424,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -306,7 +306,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3132,6 +3167,7 @@ void migrate_disable(void)
+@@ -3180,6 +3215,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -314,7 +314,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
-@@ -3171,6 +3207,7 @@ void migrate_enable(void)
+@@ -3219,6 +3255,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
@@ -322,7 +322,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(migrate_enable);
#endif
-@@ -3308,6 +3345,7 @@ static void __sched notrace __schedule(b
+@@ -3358,6 +3395,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
@@ -330,7 +330,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -3453,6 +3491,14 @@ asmlinkage __visible void __sched notrac
+@@ -3503,6 +3541,14 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -345,7 +345,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
preempt_disable_notrace();
/*
-@@ -5193,7 +5239,9 @@ void init_idle(struct task_struct *idle,
+@@ -5246,7 +5292,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -358,7 +358,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -3135,7 +3135,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3319,7 +3319,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3159,7 +3159,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3343,7 +3343,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -376,7 +376,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -3299,7 +3299,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3488,7 +3488,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -385,7 +385,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -3481,7 +3481,7 @@ static void __account_cfs_rq_runtime(str
+@@ -3670,7 +3670,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -394,7 +394,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -4093,7 +4093,7 @@ static void hrtick_start_fair(struct rq
+@@ -4282,7 +4282,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -403,7 +403,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -5177,7 +5177,7 @@ static void check_preempt_wakeup(struct
+@@ -5422,7 +5422,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -412,7 +412,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7928,7 +7928,7 @@ static void task_fork_fair(struct task_s
+@@ -8173,7 +8173,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -421,7 +421,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -7953,7 +7953,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -8198,7 +8198,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -444,7 +444,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1300,6 +1300,15 @@ extern void init_sched_fair_class(void);
+@@ -1304,6 +1304,15 @@ extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -462,7 +462,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1652,6 +1652,7 @@ tracing_generic_entry_update(struct trac
+@@ -1657,6 +1657,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -470,8 +470,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1661,7 +1662,8 @@ tracing_generic_entry_update(struct trac
- #endif
+@@ -1667,7 +1668,8 @@ tracing_generic_entry_update(struct trac
+ ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
@@ -480,7 +480,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -2557,15 +2559,17 @@ get_total_entries(struct trace_buffer *b
+@@ -2563,15 +2565,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
@@ -507,7 +507,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2591,11 +2595,14 @@ static void print_func_help_header_irq(s
+@@ -2597,11 +2601,14 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
seq_puts(m, "# _-----=> irqs-off\n"
"# / _----=> need-resched\n"
@@ -537,11 +537,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -125,6 +126,7 @@ enum trace_flag_type {
- TRACE_FLAG_HARDIRQ = 0x08,
+@@ -126,6 +127,7 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
-+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x40,
+ TRACE_FLAG_NMI = 0x40,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
};
#define TRACE_BUF_SIZE 1024
@@ -555,18 +555,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
char irqs_off;
int hardirq;
int softirq;
-@@ -413,6 +414,8 @@ int trace_print_lat_fmt(struct trace_seq
- need_resched = '.';
+@@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq
break;
}
+
+ need_resched_lazy =
+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
-
++
hardsoft_irq =
- (hardirq && softirq) ? 'H' :
-@@ -420,14 +423,20 @@ int trace_print_lat_fmt(struct trace_seq
- softirq ? 's' :
- '.';
+ (nmi && hardirq) ? 'Z' :
+ nmi ? 'z' :
+@@ -424,14 +428,20 @@ int trace_print_lat_fmt(struct trace_seq
+ softirq ? 's' :
+ '.' ;
- trace_seq_printf(s, "%c%c%c",
- irqs_off, need_resched, hardsoft_irq);
diff --git a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index 432688fd87263c..c55445f09bfe86 100644
--- a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -15,7 +15,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -271,6 +271,13 @@ asmlinkage void early_printk(const char
+@@ -276,6 +276,13 @@ asmlinkage void early_printk(const char
*/
static bool __read_mostly printk_killswitch;
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index 444c4596dffad9..5bda7a70ed50b6 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -117,9 +117,11 @@ int no_printk(const char *fmt, ...)
+@@ -117,9 +117,11 @@ do { \
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -241,6 +241,58 @@ struct printk_log {
+@@ -246,6 +246,58 @@ struct printk_log {
*/
static DEFINE_RAW_SPINLOCK(logbuf_lock);
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1672,6 +1724,13 @@ asmlinkage int vprintk_emit(int facility
+@@ -1620,6 +1672,13 @@ asmlinkage int vprintk_emit(int facility
/* cpu currently holding logbuf_lock in this function */
static unsigned int logbuf_cpu = UINT_MAX;
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
-@@ -1961,26 +2020,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
+@@ -1901,26 +1960,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
#endif /* CONFIG_PRINTK */
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
-@@ -299,6 +299,8 @@ static int is_softlockup(unsigned long t
+@@ -315,6 +315,8 @@ static int is_softlockup(unsigned long t
#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
-@@ -333,6 +335,13 @@ static void watchdog_overflow_callback(s
+@@ -349,6 +351,13 @@ static void watchdog_overflow_callback(s
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
@@ -153,7 +153,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
print_modules();
-@@ -350,6 +359,7 @@ static void watchdog_overflow_callback(s
+@@ -366,6 +375,7 @@ static void watchdog_overflow_callback(s
!test_and_set_bit(0, &hardlockup_allcpu_dumped))
trigger_allbutself_cpu_backtrace();
diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch
index bce01c13ffa7c9..5e9b8bb1571851 100644
--- a/patches/printk-rt-aware.patch
+++ b/patches/printk-rt-aware.patch
@@ -7,8 +7,8 @@ interrupts while printing to a serial console.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/printk/printk.c | 27 +++++++++++++++++++++++----
- 1 file changed, 23 insertions(+), 4 deletions(-)
+ kernel/printk/printk.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -28,42 +28,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1577,6 +1579,15 @@ static inline int can_use_console(unsign
- static int console_trylock_for_printk(void)
- {
- unsigned int cpu = smp_processor_id();
+@@ -1821,13 +1823,23 @@ asmlinkage int vprintk_emit(int facility
+
+ /* If called from the scheduler, we can not call up(). */
+ if (!in_sched) {
++ int may_trylock = 1;
++
+ lockdep_off();
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int lock = !early_boot_irqs_disabled && (preempt_count() == 0) &&
-+ !irqs_disabled();
-+#else
-+ int lock = 1;
++ /*
++ * we can't take a sleeping lock with IRQs or preeption disabled
++ * so we can't print in these contexts
++ */
++ if (!(preempt_count() == 0 && !irqs_disabled()))
++ may_trylock = 0;
+#endif
-+
-+ if (!lock)
-+ return 0;
-
- if (!console_trylock())
- return 0;
-@@ -1879,8 +1890,7 @@ asmlinkage int vprintk_emit(int facility
- * console_sem which would prevent anyone from printing to
- * console
- */
-- preempt_disable();
--
-+ migrate_disable();
/*
* Try to acquire and then immediately release the console
* semaphore. The release will print out buffers and wake up
-@@ -1888,7 +1898,7 @@ asmlinkage int vprintk_emit(int facility
+ * /dev/kmsg and syslog() users.
*/
- if (console_trylock_for_printk())
+- if (console_trylock())
++ if (may_trylock && console_trylock())
console_unlock();
-- preempt_enable();
-+ migrate_enable();
lockdep_on();
}
-
-@@ -2248,11 +2258,16 @@ static void console_cont_flush(char *tex
+@@ -2229,11 +2241,16 @@ static void console_cont_flush(char *tex
goto out;
len = cont_print_text(text, size);
@@ -80,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
out:
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2362,13 +2377,17 @@ void console_unlock(void)
+@@ -2355,13 +2372,17 @@ void console_unlock(void)
console_idx = log_next(console_idx);
console_seq++;
console_prev = msg->flags;
diff --git a/patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch b/patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch
index 413a1077b48334..beaf1c852010ba 100644
--- a/patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch
+++ b/patches/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
-@@ -129,12 +129,14 @@ static bool ptrace_freeze_traced(struct
+@@ -128,12 +128,14 @@ static bool ptrace_freeze_traced(struct
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 5ff503d717e598..3cb6bb7003cf20 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -24,13 +24,13 @@ taken in case the caller is interrupted between looking into ->state and
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/sched.h | 48 +++++++++++++++++++++++++++++++++++++++++++++---
- kernel/ptrace.c | 7 ++++++-
+ kernel/ptrace.c | 9 ++++++++-
kernel/sched/core.c | 17 +++++++++++++++--
- 3 files changed, 66 insertions(+), 6 deletions(-)
+ 3 files changed, 68 insertions(+), 6 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -240,10 +240,7 @@ extern char ___assert_task_state[1 - 2*!
+@@ -241,10 +241,7 @@ extern char ___assert_task_state[1 - 2*!
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
-@@ -2984,6 +2981,51 @@ static inline int signal_pending_state(l
+@@ -3026,6 +3023,51 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
@@ -95,23 +95,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* explicit rescheduling in places that are safe. The return
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
-@@ -129,7 +129,12 @@ static bool ptrace_freeze_traced(struct
+@@ -128,7 +128,14 @@ static bool ptrace_freeze_traced(struct
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
- task->state = __TASK_TRACED;
-+ raw_spin_lock_irq(&task->pi_lock);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ if (task->state & __TASK_TRACED)
+ task->state = __TASK_TRACED;
+ else
+ task->saved_state = __TASK_TRACED;
-+ raw_spin_unlock_irq(&task->pi_lock);
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1426,6 +1426,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1317,6 +1317,18 @@ int migrate_swap(struct task_struct *cur
return ret;
}
@@ -130,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1470,7 +1482,7 @@ unsigned long wait_task_inactive(struct
+@@ -1361,7 +1373,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -139,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1485,7 +1497,8 @@ unsigned long wait_task_inactive(struct
+@@ -1376,7 +1388,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/radix-tree-rt-aware.patch b/patches/radix-tree-rt-aware.patch
index e2b4d350dc1c22..ba964a24a90bfc 100644
--- a/patches/radix-tree-rt-aware.patch
+++ b/patches/radix-tree-rt-aware.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
-@@ -277,8 +277,13 @@ radix_tree_gang_lookup(struct radix_tree
+@@ -294,8 +294,13 @@ radix_tree_gang_lookup(struct radix_tree
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
-@@ -303,7 +308,7 @@ unsigned long radix_tree_locate_item(str
+@@ -320,7 +325,7 @@ unsigned long radix_tree_locate_item(str
static inline void radix_tree_preload_end(void)
{
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
-@@ -196,13 +196,14 @@ radix_tree_node_alloc(struct radix_tree_
+@@ -240,13 +240,14 @@ radix_tree_node_alloc(struct radix_tree_
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Update the allocation stack trace as this is more useful
* for debugging.
-@@ -242,6 +243,7 @@ radix_tree_node_free(struct radix_tree_n
+@@ -287,6 +288,7 @@ radix_tree_node_free(struct radix_tree_n
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
-@@ -310,6 +312,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
+@@ -355,6 +357,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index 7432a44be326fd..f23fa40dfd13ac 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
add_interrupt_bench(cycles);
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
-@@ -61,6 +61,7 @@ struct irq_desc {
+@@ -64,6 +64,7 @@ struct irq_desc {
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
@@ -84,9 +84,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ u64 ip = regs ? instruction_pointer(regs) : 0;
irqreturn_t retval = IRQ_NONE;
unsigned int flags = 0, irq = desc->irq_data.irq;
- struct irqaction *action = desc->action;
-@@ -176,7 +178,11 @@ irqreturn_t handle_irq_event_percpu(stru
- action = action->next;
+ struct irqaction *action;
+@@ -174,7 +176,11 @@ irqreturn_t handle_irq_event_percpu(stru
+ retval |= res;
}
- add_interrupt_randomness(irq, flags);
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
note_interrupt(desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -1045,6 +1045,12 @@ static int irq_thread(void *data)
+@@ -1043,6 +1043,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index eca80c4e010b26..af02638fd7b151 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -2962,18 +2967,17 @@ static void
+@@ -2946,18 +2951,17 @@ static void
/*
* Do RCU core processing for the current CPU.
*/
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -2985,18 +2989,105 @@ static void invoke_rcu_callbacks(struct
+@@ -2969,18 +2973,105 @@ static void invoke_rcu_callbacks(struct
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4617,7 +4708,6 @@ void __init rcu_init(void)
+@@ -4648,7 +4739,6 @@ void __init rcu_init(void)
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
@@ -178,7 +178,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We don't need protection against CPU-hotplug here because
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -565,12 +565,10 @@ extern struct rcu_state rcu_bh_state;
+@@ -580,12 +580,10 @@ extern struct rcu_state rcu_bh_state;
extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef RCU_TREE_NONCORE
-@@ -590,10 +588,9 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -605,10 +603,9 @@ void call_rcu(struct rcu_head *head, rcu
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -247,7 +247,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
-@@ -646,15 +638,6 @@ static void rcu_preempt_check_callbacks(
+@@ -635,15 +627,6 @@ static void rcu_preempt_check_callbacks(
t->rcu_read_unlock_special.b.need_qs = true;
}
@@ -263,7 +263,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Queue a preemptible-RCU callback for invocation after a grace period.
*/
-@@ -931,6 +914,19 @@ void exit_rcu(void)
+@@ -925,6 +908,19 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_RCU_BOOST
#include "../locking/rtmutex_common.h"
-@@ -962,16 +958,6 @@ static void rcu_initiate_boost_trace(str
+@@ -956,16 +952,6 @@ static void rcu_initiate_boost_trace(str
#endif /* #else #ifdef CONFIG_RCU_TRACE */
@@ -300,7 +300,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1116,23 +1102,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1109,23 +1095,6 @@ static void rcu_initiate_boost(struct rc
}
/*
@@ -324,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
-@@ -1187,67 +1156,6 @@ static int rcu_spawn_one_boost_kthread(s
+@@ -1179,67 +1148,6 @@ static int rcu_spawn_one_boost_kthread(s
return 0;
}
@@ -392,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1277,26 +1185,12 @@ static void rcu_boost_kthread_setaffinit
+@@ -1269,26 +1177,12 @@ static void rcu_boost_kthread_setaffinit
free_cpumask_var(cm);
}
@@ -419,8 +419,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
-@@ -1319,11 +1213,6 @@ static void rcu_initiate_boost(struct rc
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+@@ -1311,11 +1205,6 @@ static void rcu_initiate_boost(struct rc
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
-static void invoke_rcu_callbacks_kthread(void)
diff --git a/patches/rcu-disable-more-spots-of-rcu_bh.patch b/patches/rcu-disable-more-spots-of-rcu_bh.patch
index e6ea86c222f154..328cd9b4a04045 100644
--- a/patches/rcu-disable-more-spots-of-rcu_bh.patch
+++ b/patches/rcu-disable-more-spots-of-rcu_bh.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -449,11 +449,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
+@@ -440,11 +440,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
/*
* Return the number of RCU BH batches started thus far for debug & stats.
*/
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Return the number of RCU batches completed thus far for debug & stats.
-@@ -558,9 +560,11 @@ void rcutorture_get_gp_data(enum rcutort
+@@ -549,9 +551,11 @@ void rcutorture_get_gp_data(enum rcutort
case RCU_FLAVOR:
rsp = rcu_state_p;
break;
@@ -39,19 +39,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
case RCU_SCHED_FLAVOR:
rsp = &rcu_sched_state;
break;
-@@ -4606,7 +4610,9 @@ void __init rcu_init(void)
+@@ -4637,7 +4641,9 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
+#ifndef CONFIG_PREEMPT_RT_FULL
- rcu_init_one(&rcu_bh_state, &rcu_bh_data);
+ rcu_init_one(&rcu_bh_state);
+#endif
- rcu_init_one(&rcu_sched_state, &rcu_sched_data);
+ rcu_init_one(&rcu_sched_state);
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -557,7 +557,9 @@ extern struct list_head rcu_struct_flavo
+@@ -572,7 +572,9 @@ extern struct list_head rcu_struct_flavo
*/
extern struct rcu_state rcu_sched_state;
diff --git a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
index 4f5760e1dee128..ba26849ccf838a 100644
--- a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -614,7 +614,7 @@ config RCU_FANOUT_LEAF
+@@ -610,7 +610,7 @@ config RCU_FANOUT_LEAF
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
diff --git a/patches/rcu-make-RCU_BOOST-default-on-RT.patch b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
index 4abde47d1286f6..fbb76d895bfc61 100644
--- a/patches/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -498,7 +498,7 @@ config TINY_RCU
+@@ -494,7 +494,7 @@ config TINY_RCU
config RCU_EXPERT
bool "Make expert-level adjustments to RCU configuration"
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
help
This option needs to be enabled if you wish to make
expert-level adjustments to RCU configuration. By default,
-@@ -641,7 +641,7 @@ config TREE_RCU_TRACE
+@@ -637,7 +637,7 @@ config TREE_RCU_TRACE
config RCU_BOOST
bool "Enable RCU priority boosting"
depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index d398665373b1f1..7f2d9e88470d8f 100644
--- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -169,6 +169,9 @@ void call_rcu(struct rcu_head *head,
+@@ -177,6 +177,9 @@ void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -192,6 +195,7 @@ void call_rcu(struct rcu_head *head,
+@@ -200,6 +203,7 @@ void call_rcu(struct rcu_head *head,
*/
void call_rcu_bh(struct rcu_head *head,
rcu_callback_t func);
@@ -50,9 +50,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
-@@ -330,7 +334,11 @@ static inline int rcu_preempt_depth(void
+@@ -337,7 +341,11 @@ static inline int rcu_preempt_depth(void
+ /* Internal to kernel */
void rcu_init(void);
- void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline void rcu_bh_qs(void) { }
@@ -60,9 +60,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_bh_qs(void);
+#endif
void rcu_check_callbacks(int user);
- struct notifier_block;
- int rcu_cpu_notify(struct notifier_block *self,
-@@ -496,7 +504,14 @@ extern struct lockdep_map rcu_callback_m
+ void rcu_report_dead(unsigned int cpu);
+
+@@ -505,7 +513,14 @@ extern struct lockdep_map rcu_callback_m
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -944,10 +959,14 @@ static inline void rcu_read_unlock(void)
+@@ -953,10 +968,14 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -957,10 +976,14 @@ static inline void rcu_read_lock_bh(void
+@@ -966,10 +985,14 @@ static inline void rcu_read_lock_bh(void
*/
static inline void rcu_read_unlock_bh(void)
{
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_sched_force_quiescent_state(void);
void rcu_idle_enter(void);
-@@ -105,6 +111,14 @@ extern int rcu_scheduler_active __read_m
+@@ -107,6 +113,14 @@ extern int rcu_scheduler_active __read_m
bool rcu_is_watching(void);
@@ -163,15 +163,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* __LINUX_RCUTREE_H */
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -266,6 +266,7 @@ void rcu_sched_qs(void)
- }
+@@ -254,6 +254,7 @@ void rcu_sched_qs(void)
+ this_cpu_ptr(&rcu_sched_data), true);
}
+#ifndef CONFIG_PREEMPT_RT_FULL
void rcu_bh_qs(void)
{
if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
-@@ -275,6 +276,7 @@ void rcu_bh_qs(void)
+@@ -263,6 +264,7 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
-@@ -459,6 +461,7 @@ unsigned long rcu_batches_completed_sche
+@@ -450,6 +452,7 @@ unsigned long rcu_batches_completed_sche
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
-@@ -486,6 +489,13 @@ void rcu_bh_force_quiescent_state(void)
+@@ -477,6 +480,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
@@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Force a quiescent state for RCU-sched.
*/
-@@ -3116,6 +3126,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -3099,6 +3109,7 @@ void call_rcu_sched(struct rcu_head *hea
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -209,7 +209,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
-@@ -3124,6 +3135,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -3107,6 +3118,7 @@ void call_rcu_bh(struct rcu_head *head,
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -217,7 +217,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3215,6 +3227,7 @@ void synchronize_sched(void)
+@@ -3198,6 +3210,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -225,7 +225,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3241,6 +3254,7 @@ void synchronize_rcu_bh(void)
+@@ -3224,6 +3237,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -233,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -4103,6 +4117,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -4104,6 +4118,7 @@ static void _rcu_barrier(struct rcu_stat
mutex_unlock(&rsp->barrier_mutex);
}
@@ -241,7 +241,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -4111,6 +4126,7 @@ void rcu_barrier_bh(void)
+@@ -4112,6 +4127,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -251,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
-@@ -276,6 +276,7 @@ int rcu_read_lock_held(void)
+@@ -295,6 +295,7 @@ int rcu_read_lock_held(void)
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@@ -259,7 +259,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
-@@ -302,6 +303,7 @@ int rcu_read_lock_bh_held(void)
+@@ -321,6 +322,7 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch b/patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch
index 9270dcbf6f30b8..0f95146d0820a6 100644
--- a/patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch
+++ b/patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
-@@ -390,6 +390,7 @@ static struct rcu_torture_ops rcu_ops =
+@@ -409,6 +409,7 @@ static struct rcu_torture_ops rcu_ops =
.name = "rcu"
};
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Definitions for rcu_bh torture testing.
*/
-@@ -429,6 +430,12 @@ static struct rcu_torture_ops rcu_bh_ops
+@@ -448,6 +449,12 @@ static struct rcu_torture_ops rcu_bh_ops
.name = "rcu_bh"
};
diff --git a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index 017e4f31aa69d0..f444f9cce2fe99 100644
--- a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -271,7 +271,12 @@ static void rcu_preempt_qs(void);
+@@ -259,7 +259,12 @@ static void rcu_preempt_qs(void);
void rcu_bh_qs(void)
{
diff --git a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
index 15422647c7ffb4..66ddf64c23e0f0 100644
--- a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -107,9 +107,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -184,9 +184,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
*/
void pin_current_cpu(void)
{
diff --git a/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch b/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
index beec55cc03094a..e1f9487d39ff5f 100644
--- a/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
+++ b/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
@@ -74,7 +74,7 @@ This issue was first reported in:
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
-@@ -230,8 +230,6 @@ int __cpu_disable(void)
+@@ -234,8 +234,6 @@ int __cpu_disable(void)
flush_cache_louis();
local_flush_tlb_all();
@@ -83,7 +83,7 @@ This issue was first reported in:
return 0;
}
-@@ -247,6 +245,9 @@ void __cpu_die(unsigned int cpu)
+@@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu)
pr_err("CPU%u: cpu didn't die\n", cpu);
return;
}
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index 94ae7bc4d54608..26135d8ed1a44e 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/spinlock_api_smp.h | 4
include/linux/spinlock_rt.h | 173 +++++++++++++
include/linux/spinlock_types.h | 11
- include/linux/spinlock_types_rt.h | 51 ++++
+ include/linux/spinlock_types_rt.h | 48 +++
kernel/futex.c | 10
kernel/locking/Makefile | 9
kernel/locking/rt.c | 476 ++++++++++++++++++++++++++++++++++++++
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/locking/spinlock.c | 7
kernel/locking/spinlock_debug.c | 5
kernel/sched/core.c | 7
- 23 files changed, 1597 insertions(+), 56 deletions(-)
+ 23 files changed, 1594 insertions(+), 56 deletions(-)
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -405,7 +405,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ RW_DEP_MAP_INIT(name) }
+
+#define DEFINE_RWLOCK(name) \
-+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
++ rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+#endif
--- a/include/linux/rwsem.h
@@ -585,7 +585,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -309,6 +309,11 @@ extern char ___assert_task_state[1 - 2*!
+@@ -310,6 +310,11 @@ extern char ___assert_task_state[1 - 2*!
#endif
@@ -597,7 +597,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Task command name length */
#define TASK_COMM_LEN 16
-@@ -967,8 +972,18 @@ struct wake_q_head {
+@@ -981,8 +986,18 @@ struct wake_q_head {
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
@@ -860,7 +860,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* __LINUX_SPINLOCK_TYPES_H */
--- /dev/null
+++ b/include/linux/spinlock_types_rt.h
-@@ -0,0 +1,51 @@
+@@ -0,0 +1,48 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
+
@@ -905,16 +905,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
+ SPIN_DEP_MAP_INIT(name) }
+
-+#define __DEFINE_SPINLOCK(name) \
-+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
-+
+#define DEFINE_SPINLOCK(name) \
-+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#endif
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1212,6 +1212,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1263,6 +1263,7 @@ static int wake_futex_pi(u32 __user *uad
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
WAKE_Q(wake_q);
@@ -922,7 +919,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool deboost;
int ret = 0;
-@@ -1278,7 +1279,8 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1329,7 +1330,8 @@ static int wake_futex_pi(u32 __user *uad
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
@@ -932,7 +929,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First unlock HB so the waiter does not spin on it once he got woken
-@@ -1288,6 +1290,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1339,6 +1341,7 @@ static int wake_futex_pi(u32 __user *uad
*/
spin_unlock(&hb->lock);
wake_up_q(&wake_q);
@@ -940,7 +937,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (deboost)
rt_mutex_adjust_prio(current);
-@@ -2728,10 +2731,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2813,10 +2816,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -954,14 +951,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(ret != 0))
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
-@@ -1,5 +1,5 @@
+@@ -2,7 +2,7 @@
+ # and is generally not a function of system call inputs.
+ KCOV_INSTRUMENT := n
-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
+obj-y += semaphore.o percpu-rwsem.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
-@@ -8,7 +8,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS
+@@ -11,7 +11,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
endif
@@ -973,7 +972,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -22,7 +26,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+@@ -25,7 +29,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
@@ -2148,7 +2147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -547,7 +547,7 @@ void wake_q_add(struct wake_q_head *head
+@@ -419,7 +419,7 @@ void wake_q_add(struct wake_q_head *head
head->lastp = &node->next;
}
@@ -2157,7 +2156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct wake_q_node *node = head->first;
-@@ -564,7 +564,10 @@ void wake_up_q(struct wake_q_head *head)
+@@ -436,7 +436,10 @@ void wake_up_q(struct wake_q_head *head)
* wake_up_process() implies a wmb() to pair with the queueing
* in wake_q_add() so as not to miss wakeups.
*/
diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch
index 2d100d36fb47f9..4fee07d7ab3787 100644
--- a/patches/rt-introduce-cpu-chill.patch
+++ b/patches/rt-introduce-cpu-chill.patch
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1794,6 +1794,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
+@@ -1788,6 +1788,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
diff --git a/patches/rt-local-irq-lock.patch b/patches/rt-local-irq-lock.patch
index fdd9b7d2140c1f..b529ef37a3a437 100644
--- a/patches/rt-local-irq-lock.patch
+++ b/patches/rt-local-irq-lock.patch
@@ -12,13 +12,13 @@ is held and the owner is preempted.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/locallock.h | 264 ++++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/locallock.h | 266 ++++++++++++++++++++++++++++++++++++++++++++++
include/linux/percpu.h | 29 +++++
- 2 files changed, 293 insertions(+)
+ 2 files changed, 295 insertions(+)
--- /dev/null
+++ b/include/linux/locallock.h
-@@ -0,0 +1,264 @@
+@@ -0,0 +1,266 @@
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
+
@@ -262,7 +262,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#define local_lock(lvar) preempt_disable()
+#define local_unlock(lvar) preempt_enable()
+#define local_lock_irq(lvar) local_irq_disable()
++#define local_lock_irq_on(lvar, cpu) local_irq_disable()
+#define local_unlock_irq(lvar) local_irq_enable()
++#define local_unlock_irq_on(lvar, cpu) local_irq_enable()
+#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
+
@@ -285,24 +287,24 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
-@@ -24,6 +24,35 @@
- PERCPU_MODULE_RESERVE)
+@@ -18,6 +18,35 @@
+ #define PERCPU_MODULE_RESERVE 0
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+
-+#define get_local_var(var) (*({ \
-+ migrate_disable(); \
-+ this_cpu_ptr(&var); }))
++#define get_local_var(var) (*({ \
++ migrate_disable(); \
++ this_cpu_ptr(&var); }))
+
+#define put_local_var(var) do { \
+ (void)&(var); \
+ migrate_enable(); \
+} while (0)
+
-+# define get_local_ptr(var) ({ \
-+ migrate_disable(); \
-+ this_cpu_ptr(var); })
++# define get_local_ptr(var) ({ \
++ migrate_disable(); \
++ this_cpu_ptr(var); })
+
+# define put_local_ptr(var) do { \
+ (void)(var); \
diff --git a/patches/rtmutex-Make-wait_lock-irq-safe.patch b/patches/rtmutex-Make-wait_lock-irq-safe.patch
deleted file mode 100644
index d36807b15a4b7d..00000000000000
--- a/patches/rtmutex-Make-wait_lock-irq-safe.patch
+++ /dev/null
@@ -1,597 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 13 Jan 2016 11:25:38 +0100
-Subject: rtmutex: Make wait_lock irq safe
-
-Sasha reported a lockdep splat about a potential deadlock between RCU boosting
-rtmutex and the posix timer it_lock.
-
-CPU0 CPU1
-
-rtmutex_lock(&rcu->rt_mutex)
- spin_lock(&rcu->rt_mutex.wait_lock)
- local_irq_disable()
- spin_lock(&timer->it_lock)
- spin_lock(&rcu->mutex.wait_lock)
---> Interrupt
- spin_lock(&timer->it_lock)
-
-This is caused by the following code sequence on CPU1
-
- rcu_read_lock()
- x = lookup();
- if (x)
- spin_lock_irqsave(&x->it_lock);
- rcu_read_unlock();
- return x;
-
-We could fix that in the posix timer code by keeping rcu read locked across
-the spinlocked and irq disabled section, but the above sequence is common and
-there is no reason not to support it.
-
-Taking rt_mutex.wait_lock irq safe prevents the deadlock.
-
-Reported-by: Sasha Levin <sasha.levin@oracle.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/futex.c | 18 +++---
- kernel/locking/rtmutex.c | 135 +++++++++++++++++++++++++----------------------
- 2 files changed, 81 insertions(+), 72 deletions(-)
-
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -1223,7 +1223,7 @@ static int wake_futex_pi(u32 __user *uad
- if (pi_state->owner != current)
- return -EINVAL;
-
-- raw_spin_lock(&pi_state->pi_mutex.wait_lock);
-+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-
- /*
-@@ -1259,22 +1259,22 @@ static int wake_futex_pi(u32 __user *uad
- ret = -EINVAL;
- }
- if (ret) {
-- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
-+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- return ret;
- }
-
-- raw_spin_lock_irq(&pi_state->owner->pi_lock);
-+ raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
-- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
-+ raw_spin_unlock(&pi_state->owner->pi_lock);
-
-- raw_spin_lock_irq(&new_owner->pi_lock);
-+ raw_spin_lock(&new_owner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &new_owner->pi_state_list);
- pi_state->owner = new_owner;
-- raw_spin_unlock_irq(&new_owner->pi_lock);
-+ raw_spin_unlock(&new_owner->pi_lock);
-
-- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
-+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-
- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
-
-@@ -2139,11 +2139,11 @@ static int fixup_owner(u32 __user *uaddr
- * we returned due to timeout or signal without taking the
- * rt_mutex. Too late.
- */
-- raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
-+ raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
- owner = rt_mutex_owner(&q->pi_state->pi_mutex);
- if (!owner)
- owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
-- raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
-+ raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
- ret = fixup_pi_state_owner(uaddr, q, owner);
- goto out;
- }
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -99,13 +99,14 @@ static inline void mark_rt_mutex_waiters
- * 2) Drop lock->wait_lock
- * 3) Try to unlock the lock with cmpxchg
- */
--static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
-+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
-+ unsigned long flags)
- __releases(lock->wait_lock)
- {
- struct task_struct *owner = rt_mutex_owner(lock);
-
- clear_rt_mutex_waiters(lock);
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
- /*
- * If a new waiter comes in between the unlock and the cmpxchg
- * we have two situations:
-@@ -147,11 +148,12 @@ static inline void mark_rt_mutex_waiters
- /*
- * Simple slow path only version: lock->owner is protected by lock->wait_lock.
- */
--static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
-+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
-+ unsigned long flags)
- __releases(lock->wait_lock)
- {
- lock->owner = NULL;
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
- return true;
- }
- #endif
-@@ -433,7 +435,6 @@ static int rt_mutex_adjust_prio_chain(st
- int ret = 0, depth = 0;
- struct rt_mutex *lock;
- bool detect_deadlock;
-- unsigned long flags;
- bool requeue = true;
-
- detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
-@@ -476,7 +477,7 @@ static int rt_mutex_adjust_prio_chain(st
- /*
- * [1] Task cannot go away as we did a get_task() before !
- */
-- raw_spin_lock_irqsave(&task->pi_lock, flags);
-+ raw_spin_lock_irq(&task->pi_lock);
-
- /*
- * [2] Get the waiter on which @task is blocked on.
-@@ -560,7 +561,7 @@ static int rt_mutex_adjust_prio_chain(st
- * operations.
- */
- if (!raw_spin_trylock(&lock->wait_lock)) {
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-+ raw_spin_unlock_irq(&task->pi_lock);
- cpu_relax();
- goto retry;
- }
-@@ -591,7 +592,7 @@ static int rt_mutex_adjust_prio_chain(st
- /*
- * No requeue[7] here. Just release @task [8]
- */
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-+ raw_spin_unlock(&task->pi_lock);
- put_task_struct(task);
-
- /*
-@@ -599,14 +600,14 @@ static int rt_mutex_adjust_prio_chain(st
- * If there is no owner of the lock, end of chain.
- */
- if (!rt_mutex_owner(lock)) {
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
- return 0;
- }
-
- /* [10] Grab the next task, i.e. owner of @lock */
- task = rt_mutex_owner(lock);
- get_task_struct(task);
-- raw_spin_lock_irqsave(&task->pi_lock, flags);
-+ raw_spin_lock(&task->pi_lock);
-
- /*
- * No requeue [11] here. We just do deadlock detection.
-@@ -621,8 +622,8 @@ static int rt_mutex_adjust_prio_chain(st
- top_waiter = rt_mutex_top_waiter(lock);
-
- /* [13] Drop locks */
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock(&task->pi_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
-
- /* If owner is not blocked, end of chain. */
- if (!next_lock)
-@@ -643,7 +644,7 @@ static int rt_mutex_adjust_prio_chain(st
- rt_mutex_enqueue(lock, waiter);
-
- /* [8] Release the task */
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-+ raw_spin_unlock(&task->pi_lock);
- put_task_struct(task);
-
- /*
-@@ -661,14 +662,14 @@ static int rt_mutex_adjust_prio_chain(st
- */
- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
- wake_up_process(rt_mutex_top_waiter(lock)->task);
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
- return 0;
- }
-
- /* [10] Grab the next task, i.e. the owner of @lock */
- task = rt_mutex_owner(lock);
- get_task_struct(task);
-- raw_spin_lock_irqsave(&task->pi_lock, flags);
-+ raw_spin_lock(&task->pi_lock);
-
- /* [11] requeue the pi waiters if necessary */
- if (waiter == rt_mutex_top_waiter(lock)) {
-@@ -722,8 +723,8 @@ static int rt_mutex_adjust_prio_chain(st
- top_waiter = rt_mutex_top_waiter(lock);
-
- /* [13] Drop the locks */
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock(&task->pi_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
-
- /*
- * Make the actual exit decisions [12], based on the stored
-@@ -746,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(st
- goto again;
-
- out_unlock_pi:
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-+ raw_spin_unlock_irq(&task->pi_lock);
- out_put_task:
- put_task_struct(task);
-
-@@ -756,7 +757,7 @@ static int rt_mutex_adjust_prio_chain(st
- /*
- * Try to take an rt-mutex
- *
-- * Must be called with lock->wait_lock held.
-+ * Must be called with lock->wait_lock held and interrupts disabled
- *
- * @lock: The lock to be acquired.
- * @task: The task which wants to acquire the lock
-@@ -766,8 +767,6 @@ static int rt_mutex_adjust_prio_chain(st
- static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- struct rt_mutex_waiter *waiter)
- {
-- unsigned long flags;
--
- /*
- * Before testing whether we can acquire @lock, we set the
- * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
-@@ -852,7 +851,7 @@ static int try_to_take_rt_mutex(struct r
- * case, but conditionals are more expensive than a redundant
- * store.
- */
-- raw_spin_lock_irqsave(&task->pi_lock, flags);
-+ raw_spin_lock(&task->pi_lock);
- task->pi_blocked_on = NULL;
- /*
- * Finish the lock acquisition. @task is the new owner. If
-@@ -861,7 +860,7 @@ static int try_to_take_rt_mutex(struct r
- */
- if (rt_mutex_has_waiters(lock))
- rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-+ raw_spin_unlock(&task->pi_lock);
-
- takeit:
- /* We got the lock. */
-@@ -883,7 +882,7 @@ static int try_to_take_rt_mutex(struct r
- *
- * Prepare waiter and propagate pi chain
- *
-- * This must be called with lock->wait_lock held.
-+ * This must be called with lock->wait_lock held and interrupts disabled
- */
- static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
- struct rt_mutex_waiter *waiter,
-@@ -894,7 +893,6 @@ static int task_blocks_on_rt_mutex(struc
- struct rt_mutex_waiter *top_waiter = waiter;
- struct rt_mutex *next_lock;
- int chain_walk = 0, res;
-- unsigned long flags;
-
- /*
- * Early deadlock detection. We really don't want the task to
-@@ -908,7 +906,7 @@ static int task_blocks_on_rt_mutex(struc
- if (owner == task)
- return -EDEADLK;
-
-- raw_spin_lock_irqsave(&task->pi_lock, flags);
-+ raw_spin_lock(&task->pi_lock);
- __rt_mutex_adjust_prio(task);
- waiter->task = task;
- waiter->lock = lock;
-@@ -921,12 +919,12 @@ static int task_blocks_on_rt_mutex(struc
-
- task->pi_blocked_on = waiter;
-
-- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-+ raw_spin_unlock(&task->pi_lock);
-
- if (!owner)
- return 0;
-
-- raw_spin_lock_irqsave(&owner->pi_lock, flags);
-+ raw_spin_lock(&owner->pi_lock);
- if (waiter == rt_mutex_top_waiter(lock)) {
- rt_mutex_dequeue_pi(owner, top_waiter);
- rt_mutex_enqueue_pi(owner, waiter);
-@@ -941,7 +939,7 @@ static int task_blocks_on_rt_mutex(struc
- /* Store the lock on which owner is blocked or NULL */
- next_lock = task_blocked_on_lock(owner);
-
-- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-+ raw_spin_unlock(&owner->pi_lock);
- /*
- * Even if full deadlock detection is on, if the owner is not
- * blocked itself, we can avoid finding this out in the chain
-@@ -957,12 +955,12 @@ static int task_blocks_on_rt_mutex(struc
- */
- get_task_struct(owner);
-
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
-
- res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
- next_lock, waiter, task);
-
-- raw_spin_lock(&lock->wait_lock);
-+ raw_spin_lock_irq(&lock->wait_lock);
-
- return res;
- }
-@@ -971,15 +969,14 @@ static int task_blocks_on_rt_mutex(struc
- * Remove the top waiter from the current tasks pi waiter tree and
- * queue it up.
- *
-- * Called with lock->wait_lock held.
-+ * Called with lock->wait_lock held and interrupts disabled.
- */
- static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
- struct rt_mutex *lock)
- {
- struct rt_mutex_waiter *waiter;
-- unsigned long flags;
-
-- raw_spin_lock_irqsave(&current->pi_lock, flags);
-+ raw_spin_lock(&current->pi_lock);
-
- waiter = rt_mutex_top_waiter(lock);
-
-@@ -1001,7 +998,7 @@ static void mark_wakeup_next_waiter(stru
- */
- lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
-
-- raw_spin_unlock_irqrestore(&current->pi_lock, flags);
-+ raw_spin_unlock(&current->pi_lock);
-
- wake_q_add(wake_q, waiter->task);
- }
-@@ -1009,7 +1006,7 @@ static void mark_wakeup_next_waiter(stru
- /*
- * Remove a waiter from a lock and give up
- *
-- * Must be called with lock->wait_lock held and
-+ * Must be called with lock->wait_lock held and interrupts disabled. I must
- * have just failed to try_to_take_rt_mutex().
- */
- static void remove_waiter(struct rt_mutex *lock,
-@@ -1018,12 +1015,11 @@ static void remove_waiter(struct rt_mute
- bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
- struct task_struct *owner = rt_mutex_owner(lock);
- struct rt_mutex *next_lock;
-- unsigned long flags;
-
-- raw_spin_lock_irqsave(&current->pi_lock, flags);
-+ raw_spin_lock(&current->pi_lock);
- rt_mutex_dequeue(lock, waiter);
- current->pi_blocked_on = NULL;
-- raw_spin_unlock_irqrestore(&current->pi_lock, flags);
-+ raw_spin_unlock(&current->pi_lock);
-
- /*
- * Only update priority if the waiter was the highest priority
-@@ -1032,7 +1028,7 @@ static void remove_waiter(struct rt_mute
- if (!owner || !is_top_waiter)
- return;
-
-- raw_spin_lock_irqsave(&owner->pi_lock, flags);
-+ raw_spin_lock(&owner->pi_lock);
-
- rt_mutex_dequeue_pi(owner, waiter);
-
-@@ -1044,7 +1040,7 @@ static void remove_waiter(struct rt_mute
- /* Store the lock on which owner is blocked or NULL */
- next_lock = task_blocked_on_lock(owner);
-
-- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-+ raw_spin_unlock(&owner->pi_lock);
-
- /*
- * Don't walk the chain, if the owner task is not blocked
-@@ -1056,12 +1052,12 @@ static void remove_waiter(struct rt_mute
- /* gets dropped in rt_mutex_adjust_prio_chain()! */
- get_task_struct(owner);
-
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
-
- rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
- next_lock, NULL, current);
-
-- raw_spin_lock(&lock->wait_lock);
-+ raw_spin_lock_irq(&lock->wait_lock);
- }
-
- /*
-@@ -1097,11 +1093,11 @@ void rt_mutex_adjust_pi(struct task_stru
- * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
- * @lock: the rt_mutex to take
- * @state: the state the task should block in (TASK_INTERRUPTIBLE
-- * or TASK_UNINTERRUPTIBLE)
-+ * or TASK_UNINTERRUPTIBLE)
- * @timeout: the pre-initialized and started timer, or NULL for none
- * @waiter: the pre-initialized rt_mutex_waiter
- *
-- * lock->wait_lock must be held by the caller.
-+ * Must be called with lock->wait_lock held and interrupts disabled
- */
- static int __sched
- __rt_mutex_slowlock(struct rt_mutex *lock, int state,
-@@ -1129,13 +1125,13 @@ static int __sched
- break;
- }
-
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
-
- debug_rt_mutex_print_deadlock(waiter);
-
- schedule();
-
-- raw_spin_lock(&lock->wait_lock);
-+ raw_spin_lock_irq(&lock->wait_lock);
- set_current_state(state);
- }
-
-@@ -1172,17 +1168,26 @@ rt_mutex_slowlock(struct rt_mutex *lock,
- enum rtmutex_chainwalk chwalk)
- {
- struct rt_mutex_waiter waiter;
-+ unsigned long flags;
- int ret = 0;
-
- debug_rt_mutex_init_waiter(&waiter);
- RB_CLEAR_NODE(&waiter.pi_tree_entry);
- RB_CLEAR_NODE(&waiter.tree_entry);
-
-- raw_spin_lock(&lock->wait_lock);
-+ /*
-+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
-+ * be called in early boot if the cmpxchg() fast path is disabled
-+ * (debug, no architecture support). In this case we will acquire the
-+ * rtmutex with lock->wait_lock held. But we cannot unconditionally
-+ * enable interrupts in that early boot case. So we need to use the
-+ * irqsave/restore variants.
-+ */
-+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
- /* Try to acquire the lock again: */
- if (try_to_take_rt_mutex(lock, current, NULL)) {
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
- return 0;
- }
-
-@@ -1211,7 +1216,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
- */
- fixup_rt_mutex_waiters(lock);
-
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
- /* Remove pending timer: */
- if (unlikely(timeout))
-@@ -1227,6 +1232,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
- */
- static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
- {
-+ unsigned long flags;
- int ret;
-
- /*
-@@ -1238,10 +1244,10 @@ static inline int rt_mutex_slowtrylock(s
- return 0;
-
- /*
-- * The mutex has currently no owner. Lock the wait lock and
-- * try to acquire the lock.
-+ * The mutex has currently no owner. Lock the wait lock and try to
-+ * acquire the lock. We use irqsave here to support early boot calls.
- */
-- raw_spin_lock(&lock->wait_lock);
-+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
- ret = try_to_take_rt_mutex(lock, current, NULL);
-
-@@ -1251,7 +1257,7 @@ static inline int rt_mutex_slowtrylock(s
- */
- fixup_rt_mutex_waiters(lock);
-
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
- return ret;
- }
-@@ -1263,7 +1269,10 @@ static inline int rt_mutex_slowtrylock(s
- static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
- struct wake_q_head *wake_q)
- {
-- raw_spin_lock(&lock->wait_lock);
-+ unsigned long flags;
-+
-+ /* irqsave required to support early boot calls */
-+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
- debug_rt_mutex_unlock(lock);
-
-@@ -1302,10 +1311,10 @@ static bool __sched rt_mutex_slowunlock(
- */
- while (!rt_mutex_has_waiters(lock)) {
- /* Drops lock->wait_lock ! */
-- if (unlock_rt_mutex_safe(lock) == true)
-+ if (unlock_rt_mutex_safe(lock, flags) == true)
- return false;
- /* Relock the rtmutex and try again */
-- raw_spin_lock(&lock->wait_lock);
-+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
- }
-
- /*
-@@ -1316,7 +1325,7 @@ static bool __sched rt_mutex_slowunlock(
- */
- mark_wakeup_next_waiter(wake_q, lock);
-
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
- /* check PI boosting */
- return true;
-@@ -1596,10 +1605,10 @@ int rt_mutex_start_proxy_lock(struct rt_
- {
- int ret;
-
-- raw_spin_lock(&lock->wait_lock);
-+ raw_spin_lock_irq(&lock->wait_lock);
-
- if (try_to_take_rt_mutex(lock, task, NULL)) {
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
- return 1;
- }
-
-@@ -1620,7 +1629,7 @@ int rt_mutex_start_proxy_lock(struct rt_
- if (unlikely(ret))
- remove_waiter(lock, waiter);
-
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
-
- debug_rt_mutex_print_deadlock(waiter);
-
-@@ -1668,7 +1677,7 @@ int rt_mutex_finish_proxy_lock(struct rt
- {
- int ret;
-
-- raw_spin_lock(&lock->wait_lock);
-+ raw_spin_lock_irq(&lock->wait_lock);
-
- set_current_state(TASK_INTERRUPTIBLE);
-
-@@ -1684,7 +1693,7 @@ int rt_mutex_finish_proxy_lock(struct rt
- */
- fixup_rt_mutex_waiters(lock);
-
-- raw_spin_unlock(&lock->wait_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
-
- return ret;
- }
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 80f6adab1059d0..bbfc25f1ba43cc 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1822,6 +1822,16 @@ static int futex_requeue(u32 __user *uad
+@@ -1886,6 +1886,16 @@ static int futex_requeue(u32 __user *uad
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -27,12 +27,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * tried to enqueue it on the rtmutex.
+ */
+ this->pi_state = NULL;
-+ free_pi_state(pi_state);
++ put_pi_state(pi_state);
+ continue;
} else if (ret) {
- /* -EDEADLK */
- this->pi_state = NULL;
-@@ -2691,7 +2701,7 @@ static int futex_wait_requeue_pi(u32 __u
+ /*
+ * rt_mutex_start_proxy_lock() detected a
+@@ -2776,7 +2786,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2750,20 +2760,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2835,20 +2845,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -2772,14 +2817,15 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2857,14 +2902,15 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -120,13 +120,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
- free_pi_state(q.pi_state);
+ put_pi_state(q.pi_state);
- spin_unlock(q.lock_ptr);
+ spin_unlock(&hb2->lock);
}
} else {
/*
-@@ -2792,7 +2838,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2877,7 +2923,8 @@ static int futex_wait_requeue_pi(u32 __u
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
debug_rt_mutex_free_waiter(&rt_waiter);
diff --git a/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch b/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
index 9bb1840d48d269..7a4689046cef86 100644
--- a/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
+++ b/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
@@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -127,8 +127,8 @@ struct hotplug_pcp {
+@@ -204,8 +204,8 @@ struct hotplug_pcp {
};
#ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch b/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
index b87293766678d3..2e9046666606ad 100644
--- a/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
+++ b/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
-@@ -1230,7 +1230,7 @@ static int sc16is7xx_probe(struct device
+@@ -1251,7 +1251,7 @@ static int sc16is7xx_probe(struct device
/* Setup interrupt */
ret = devm_request_irq(dev, irq, sc16is7xx_irq,
diff --git a/patches/sched-cputime-Clarify-vtime-symbols-and-document-the.patch b/patches/sched-cputime-Clarify-vtime-symbols-and-document-the.patch
deleted file mode 100644
index c312211c70c96c..00000000000000
--- a/patches/sched-cputime-Clarify-vtime-symbols-and-document-the.patch
+++ /dev/null
@@ -1,89 +0,0 @@
-From: Frederic Weisbecker <fweisbec@gmail.com>
-Date: Thu, 19 Nov 2015 16:47:30 +0100
-Subject: [PATCH] sched/cputime: Clarify vtime symbols and document them
-
-VTIME_SLEEPING state happens either when:
-
-1) The task is sleeping and no tickless delta is to be added on the task
- cputime stats.
-2) The CPU isn't running vtime at all, so the same properties of 1) applies.
-
-Lets rename the vtime symbol to reflect both states.
-
-Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: Chris Metcalf <cmetcalf@ezchip.com>
-Cc: Christoph Lameter <cl@linux.com>
-Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Luiz Capitulino <lcapitulino@redhat.com>
-Cc: Mike Galbraith <efault@gmx.de>
-Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
-Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Link: http://lkml.kernel.org/r/1447948054-28668-4-git-send-email-fweisbec@gmail.com
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/sched.h | 5 ++++-
- kernel/fork.c | 2 +-
- kernel/sched/cputime.c | 6 +++---
- 3 files changed, 8 insertions(+), 5 deletions(-)
-
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1524,8 +1524,11 @@ struct task_struct {
- seqlock_t vtime_seqlock;
- unsigned long long vtime_snap;
- enum {
-- VTIME_SLEEPING = 0,
-+ /* Task is sleeping or running in a CPU with VTIME inactive */
-+ VTIME_INACTIVE = 0,
-+ /* Task runs in userspace in a CPU with VTIME active */
- VTIME_USER,
-+ /* Task runs in kernelspace in a CPU with VTIME active */
- VTIME_SYS,
- } vtime_snap_whence;
- #endif
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -1351,7 +1351,7 @@ static struct task_struct *copy_process(
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- seqlock_init(&p->vtime_seqlock);
- p->vtime_snap = 0;
-- p->vtime_snap_whence = VTIME_SLEEPING;
-+ p->vtime_snap_whence = VTIME_INACTIVE;
- #endif
-
- #if defined(SPLIT_RSS_COUNTING)
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -680,7 +680,7 @@ static cputime_t get_vtime_delta(struct
- {
- unsigned long long delta = vtime_delta(tsk);
-
-- WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
-+ WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
- tsk->vtime_snap += delta;
-
- /* CHECKME: always safe to convert nsecs to cputime? */
-@@ -764,7 +764,7 @@ void vtime_account_idle(struct task_stru
- void arch_vtime_task_switch(struct task_struct *prev)
- {
- write_seqlock(&prev->vtime_seqlock);
-- prev->vtime_snap_whence = VTIME_SLEEPING;
-+ prev->vtime_snap_whence = VTIME_INACTIVE;
- write_sequnlock(&prev->vtime_seqlock);
-
- write_seqlock(&current->vtime_seqlock);
-@@ -829,7 +829,7 @@ fetch_task_cputime(struct task_struct *t
- *s_dst = *s_src;
-
- /* Task is sleeping, nothing to add */
-- if (t->vtime_snap_whence == VTIME_SLEEPING ||
-+ if (t->vtime_snap_whence == VTIME_INACTIVE ||
- is_idle_task(t))
- continue;
-
diff --git a/patches/sched-cputime-Convert-vtime_seqlock-to-seqcount.patch b/patches/sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
deleted file mode 100644
index 148082500f4c3e..00000000000000
--- a/patches/sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
+++ /dev/null
@@ -1,206 +0,0 @@
-From: Frederic Weisbecker <fweisbec@gmail.com>
-Date: Thu, 19 Nov 2015 16:47:34 +0100
-Subject: [PATCH] sched/cputime: Convert vtime_seqlock to seqcount
-
-The cputime can only be updated by the current task itself, even in
-vtime case. So we can safely use seqcount instead of seqlock as there
-is no writer concurrency involved.
-
-[ bigeasy: safe since 6a61671bb2f3 ("cputime: Safely read cputime of
-full dynticks CPUs") ]
-
-Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: Chris Metcalf <cmetcalf@ezchip.com>
-Cc: Christoph Lameter <cl@linux.com>
-Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Luiz Capitulino <lcapitulino@redhat.com>
-Cc: Mike Galbraith <efault@gmx.de>
-Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
-Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Link: http://lkml.kernel.org/r/1447948054-28668-8-git-send-email-fweisbec@gmail.com
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/init_task.h | 2 +-
- include/linux/sched.h | 2 +-
- kernel/fork.c | 2 +-
- kernel/sched/cputime.c | 46 ++++++++++++++++++++++++----------------------
- 4 files changed, 27 insertions(+), 25 deletions(-)
-
---- a/include/linux/init_task.h
-+++ b/include/linux/init_task.h
-@@ -150,7 +150,7 @@ extern struct task_group root_task_group
-
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- # define INIT_VTIME(tsk) \
-- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
-+ .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \
- .vtime_snap = 0, \
- .vtime_snap_whence = VTIME_SYS,
- #else
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1521,7 +1521,7 @@ struct task_struct {
- cputime_t gtime;
- struct prev_cputime prev_cputime;
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-- seqlock_t vtime_seqlock;
-+ seqcount_t vtime_seqcount;
- unsigned long long vtime_snap;
- enum {
- /* Task is sleeping or running in a CPU with VTIME inactive */
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -1349,7 +1349,7 @@ static struct task_struct *copy_process(
- prev_cputime_init(&p->prev_cputime);
-
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-- seqlock_init(&p->vtime_seqlock);
-+ seqcount_init(&p->vtime_seqcount);
- p->vtime_snap = 0;
- p->vtime_snap_whence = VTIME_INACTIVE;
- #endif
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -696,37 +696,37 @@ static void __vtime_account_system(struc
-
- void vtime_account_system(struct task_struct *tsk)
- {
-- write_seqlock(&tsk->vtime_seqlock);
-+ write_seqcount_begin(&tsk->vtime_seqcount);
- __vtime_account_system(tsk);
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seqcount);
- }
-
- void vtime_gen_account_irq_exit(struct task_struct *tsk)
- {
-- write_seqlock(&tsk->vtime_seqlock);
-+ write_seqcount_begin(&tsk->vtime_seqcount);
- __vtime_account_system(tsk);
- if (context_tracking_in_user())
- tsk->vtime_snap_whence = VTIME_USER;
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seqcount);
- }
-
- void vtime_account_user(struct task_struct *tsk)
- {
- cputime_t delta_cpu;
-
-- write_seqlock(&tsk->vtime_seqlock);
-+ write_seqcount_begin(&tsk->vtime_seqcount);
- delta_cpu = get_vtime_delta(tsk);
- tsk->vtime_snap_whence = VTIME_SYS;
- account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seqcount);
- }
-
- void vtime_user_enter(struct task_struct *tsk)
- {
-- write_seqlock(&tsk->vtime_seqlock);
-+ write_seqcount_begin(&tsk->vtime_seqcount);
- __vtime_account_system(tsk);
- tsk->vtime_snap_whence = VTIME_USER;
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seqcount);
- }
-
- void vtime_guest_enter(struct task_struct *tsk)
-@@ -738,19 +738,19 @@ void vtime_guest_enter(struct task_struc
- * synchronization against the reader (task_gtime())
- * that can thus safely catch up with a tickless delta.
- */
-- write_seqlock(&tsk->vtime_seqlock);
-+ write_seqcount_begin(&tsk->vtime_seqcount);
- __vtime_account_system(tsk);
- current->flags |= PF_VCPU;
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seqcount);
- }
- EXPORT_SYMBOL_GPL(vtime_guest_enter);
-
- void vtime_guest_exit(struct task_struct *tsk)
- {
-- write_seqlock(&tsk->vtime_seqlock);
-+ write_seqcount_begin(&tsk->vtime_seqcount);
- __vtime_account_system(tsk);
- current->flags &= ~PF_VCPU;
-- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seqcount);
- }
- EXPORT_SYMBOL_GPL(vtime_guest_exit);
-
-@@ -763,24 +763,26 @@ void vtime_account_idle(struct task_stru
-
- void arch_vtime_task_switch(struct task_struct *prev)
- {
-- write_seqlock(&prev->vtime_seqlock);
-+ write_seqcount_begin(&prev->vtime_seqcount);
- prev->vtime_snap_whence = VTIME_INACTIVE;
-- write_sequnlock(&prev->vtime_seqlock);
-+ write_seqcount_end(&prev->vtime_seqcount);
-
-- write_seqlock(&current->vtime_seqlock);
-+ write_seqcount_begin(&current->vtime_seqcount);
- current->vtime_snap_whence = VTIME_SYS;
- current->vtime_snap = sched_clock_cpu(smp_processor_id());
-- write_sequnlock(&current->vtime_seqlock);
-+ write_seqcount_end(&current->vtime_seqcount);
- }
-
- void vtime_init_idle(struct task_struct *t, int cpu)
- {
- unsigned long flags;
-
-- write_seqlock_irqsave(&t->vtime_seqlock, flags);
-+ local_irq_save(flags);
-+ write_seqcount_begin(&t->vtime_seqcount);
- t->vtime_snap_whence = VTIME_SYS;
- t->vtime_snap = sched_clock_cpu(cpu);
-- write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
-+ write_seqcount_end(&t->vtime_seqcount);
-+ local_irq_restore(flags);
- }
-
- cputime_t task_gtime(struct task_struct *t)
-@@ -792,13 +794,13 @@ cputime_t task_gtime(struct task_struct
- return t->gtime;
-
- do {
-- seq = read_seqbegin(&t->vtime_seqlock);
-+ seq = read_seqcount_begin(&t->vtime_seqcount);
-
- gtime = t->gtime;
- if (t->flags & PF_VCPU)
- gtime += vtime_delta(t);
-
-- } while (read_seqretry(&t->vtime_seqlock, seq));
-+ } while (read_seqcount_retry(&t->vtime_seqcount, seq));
-
- return gtime;
- }
-@@ -821,7 +823,7 @@ fetch_task_cputime(struct task_struct *t
- *udelta = 0;
- *sdelta = 0;
-
-- seq = read_seqbegin(&t->vtime_seqlock);
-+ seq = read_seqcount_begin(&t->vtime_seqcount);
-
- if (u_dst)
- *u_dst = *u_src;
-@@ -845,7 +847,7 @@ fetch_task_cputime(struct task_struct *t
- if (t->vtime_snap_whence == VTIME_SYS)
- *sdelta = delta;
- }
-- } while (read_seqretry(&t->vtime_seqlock, seq));
-+ } while (read_seqcount_retry(&t->vtime_seqcount, seq));
- }
-
-
diff --git a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index 7620c76e13531a..75a27727b0a612 100644
--- a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -697,6 +697,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -694,6 +694,7 @@ void init_dl_task_timer(struct sched_dl_
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index c175ef65b72106..eb42239a7335cc 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1831,6 +1831,9 @@ struct task_struct {
+@@ -1865,6 +1865,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
-@@ -2040,6 +2043,15 @@ extern struct pid *cad_pid;
+@@ -2077,6 +2080,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -2047,6 +2059,7 @@ static inline void put_task_struct(struc
+@@ -2084,6 +2096,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void task_cputime(struct task_struct *t,
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -244,7 +244,9 @@ static inline void put_signal_struct(str
+@@ -253,7 +253,9 @@ static inline void put_signal_struct(str
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
-@@ -261,7 +263,18 @@ void __put_task_struct(struct task_struc
+@@ -270,7 +272,18 @@ void __put_task_struct(struct task_struc
if (!profile_handoff_task(tsk))
free_task(tsk);
}
diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch
index fe17c667fbba97..8283585ac888f0 100644
--- a/patches/sched-disable-rt-group-sched-on-rt.patch
+++ b/patches/sched-disable-rt-group-sched-on-rt.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1106,6 +1106,7 @@ config CFS_BANDWIDTH
+@@ -1029,6 +1029,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/patches/sched-limit-nr-migrate.patch b/patches/sched-limit-nr-migrate.patch
index ac934db3442c8d..d1a24b83a8232e 100644
--- a/patches/sched-limit-nr-migrate.patch
+++ b/patches/sched-limit-nr-migrate.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -260,7 +260,11 @@ late_initcall(sched_init_debug);
+@@ -128,7 +128,11 @@ const_debug unsigned int sysctl_sched_fe
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 78ab45f37db76f..e61553c194a7aa 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -292,6 +292,11 @@ void synchronize_rcu(void);
+@@ -300,6 +300,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -317,6 +322,8 @@ static inline int rcu_preempt_depth(void
+@@ -325,6 +330,8 @@ static inline int rcu_preempt_depth(void
return 0;
}
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7658,7 +7658,7 @@ void __init sched_init(void)
+@@ -7534,7 +7534,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 8ffacbd2993f63..c3561909065565 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page-flags-layout.h>
#include <asm/page.h>
#include <asm/mmu.h>
-@@ -504,6 +505,9 @@ struct mm_struct {
+@@ -502,6 +503,9 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __user *bd_addr;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2606,12 +2606,24 @@ extern struct mm_struct * mm_alloc(void)
+@@ -2640,12 +2640,24 @@ extern struct mm_struct * mm_alloc(void)
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Grab a reference to a task's mm, if it is not already going away */
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -702,6 +702,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -712,6 +712,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2593,8 +2593,12 @@ static struct rq *finish_task_switch(str
+@@ -2642,8 +2642,12 @@ static struct rq *finish_task_switch(str
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5245,6 +5249,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5299,6 +5303,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -107,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -5259,7 +5265,11 @@ void idle_task_exit(void)
+@@ -5313,7 +5319,11 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5632,6 +5642,10 @@ migration_call(struct notifier_block *nf
+@@ -5509,6 +5519,10 @@ migration_call(struct notifier_block *nf
case CPU_DEAD:
calc_load_migrate(rq);
diff --git a/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch b/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
index 358c84627ccff8..5d6a0fc046ce91 100644
--- a/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
+++ b/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1836,6 +1836,11 @@ extern int arch_task_struct_size __read_
+@@ -1871,6 +1871,11 @@ extern int arch_task_struct_size __read_
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define TNF_SHARED 0x04
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1624,7 +1624,7 @@ int select_task_rq(struct task_struct *p
+@@ -1515,7 +1515,7 @@ int select_task_rq(struct task_struct *p
{
lockdep_assert_held(&p->pi_lock);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
dl_rq->dl_nr_migratory--;
update_dl_migration(dl_rq);
-@@ -989,7 +989,7 @@ static void enqueue_task_dl(struct rq *r
+@@ -966,7 +966,7 @@ static void enqueue_task_dl(struct rq *r
enqueue_dl_entity(&p->dl, pi_se, flags);
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
enqueue_pushable_dl_task(rq, p);
}
-@@ -1067,9 +1067,9 @@ select_task_rq_dl(struct task_struct *p,
+@@ -1040,9 +1040,9 @@ select_task_rq_dl(struct task_struct *p,
* try to make it stay here, it might be important.
*/
if (unlikely(dl_task(curr)) &&
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int target = find_later_rq(p);
if (target != -1 &&
-@@ -1090,7 +1090,7 @@ static void check_preempt_equal_dl(struc
+@@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struc
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
return;
-@@ -1098,7 +1098,7 @@ static void check_preempt_equal_dl(struc
+@@ -1071,7 +1071,7 @@ static void check_preempt_equal_dl(struc
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
return;
-@@ -1212,7 +1212,7 @@ static void put_prev_task_dl(struct rq *
+@@ -1185,7 +1185,7 @@ static void put_prev_task_dl(struct rq *
{
update_curr_dl(rq);
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
enqueue_pushable_dl_task(rq, p);
}
-@@ -1335,7 +1335,7 @@ static int find_later_rq(struct task_str
+@@ -1286,7 +1286,7 @@ static int find_later_rq(struct task_str
if (unlikely(!later_mask))
return -1;
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -1;
/*
-@@ -1480,7 +1480,7 @@ static struct task_struct *pick_next_pus
+@@ -1431,7 +1431,7 @@ static struct task_struct *pick_next_pus
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!dl_task(p));
-@@ -1519,7 +1519,7 @@ static int push_dl_task(struct rq *rq)
+@@ -1470,7 +1470,7 @@ static int push_dl_task(struct rq *rq)
*/
if (dl_task(rq->curr) &&
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
resched_curr(rq);
return 0;
}
-@@ -1666,9 +1666,9 @@ static void task_woken_dl(struct rq *rq,
+@@ -1617,9 +1617,9 @@ static void task_woken_dl(struct rq *rq,
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
@@ -146,8 +146,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!dl_entity_preempt(&p->dl, &rq->curr->dl))) {
push_dl_tasks(rq);
}
-@@ -1769,7 +1769,7 @@ static void switched_to_dl(struct rq *rq
- {
+@@ -1723,7 +1723,7 @@ static void switched_to_dl(struct rq *rq
+
if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP
- if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (dl_task(rq->curr))
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -326,7 +326,7 @@ static void inc_rt_migration(struct sche
+@@ -334,7 +334,7 @@ static void inc_rt_migration(struct sche
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total++;
@@ -166,7 +166,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rt_rq->rt_nr_migratory++;
update_rt_migration(rt_rq);
-@@ -343,7 +343,7 @@ static void dec_rt_migration(struct sche
+@@ -351,7 +351,7 @@ static void dec_rt_migration(struct sche
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total--;
@@ -175,16 +175,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rt_rq->rt_nr_migratory--;
update_rt_migration(rt_rq);
-@@ -1262,7 +1262,7 @@ enqueue_task_rt(struct rq *rq, struct ta
+@@ -1324,7 +1324,7 @@ enqueue_task_rt(struct rq *rq, struct ta
- enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
+ enqueue_rt_entity(rt_se, flags);
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_task(rq, p);
}
-@@ -1351,7 +1351,7 @@ select_task_rq_rt(struct task_struct *p,
+@@ -1413,7 +1413,7 @@ select_task_rq_rt(struct task_struct *p,
* will have to sort it out.
*/
if (curr && unlikely(rt_task(curr)) &&
@@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
curr->prio <= p->prio)) {
int target = find_lowest_rq(p);
-@@ -1375,7 +1375,7 @@ static void check_preempt_equal_prio(str
+@@ -1437,7 +1437,7 @@ static void check_preempt_equal_prio(str
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
@@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return;
-@@ -1383,7 +1383,7 @@ static void check_preempt_equal_prio(str
+@@ -1445,7 +1445,7 @@ static void check_preempt_equal_prio(str
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
@@ -211,7 +211,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
&& cpupri_find(&rq->rd->cpupri, p, NULL))
return;
-@@ -1517,7 +1517,7 @@ static void put_prev_task_rt(struct rq *
+@@ -1579,7 +1579,7 @@ static void put_prev_task_rt(struct rq *
* The previous task needs to be made eligible for pushing
* if it is still active
*/
@@ -220,7 +220,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
enqueue_pushable_task(rq, p);
}
-@@ -1567,7 +1567,7 @@ static int find_lowest_rq(struct task_st
+@@ -1629,7 +1629,7 @@ static int find_lowest_rq(struct task_st
if (unlikely(!lowest_mask))
return -1;
@@ -229,7 +229,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -1; /* No other targets possible */
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
-@@ -1699,7 +1699,7 @@ static struct task_struct *pick_next_pus
+@@ -1761,7 +1761,7 @@ static struct task_struct *pick_next_pus
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
@@ -238,7 +238,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p));
-@@ -2059,9 +2059,9 @@ static void task_woken_rt(struct rq *rq,
+@@ -2121,9 +2121,9 @@ static void task_woken_rt(struct rq *rq,
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
@@ -250,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->curr->prio <= p->prio))
push_rt_tasks(rq);
}
-@@ -2134,7 +2134,7 @@ static void switched_to_rt(struct rq *rq
+@@ -2196,7 +2196,7 @@ static void switched_to_rt(struct rq *rq
*/
if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index fd59d1672b516a..9b895ebd399a74 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1376,6 +1376,7 @@ struct tlbflush_unmap_batch {
+@@ -1393,6 +1393,7 @@ struct tlbflush_unmap_batch {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2483,6 +2484,7 @@ extern void xtime_update(unsigned long t
+@@ -2517,6 +2518,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void kick_process(struct task_struct *tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1949,8 +1949,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -1931,8 +1931,25 @@ try_to_wake_up(struct task_struct *p, un
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2083,6 +2100,18 @@ int wake_up_process(struct task_struct *
+@@ -2061,6 +2078,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1100,6 +1100,7 @@ static inline void finish_lock_switch(st
+@@ -1128,6 +1128,7 @@ static inline void finish_lock_switch(st
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch
index 06d58b4c333738..0516ff49638839 100644
--- a/patches/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1956,8 +1956,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -1938,8 +1938,10 @@ try_to_wake_up(struct task_struct *p, un
* if the wakeup condition is true.
*/
if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch b/patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch
index fa5eb8ef713f30..38b80d6b8d0624 100644
--- a/patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch
+++ b/patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We have to ensure that we have at least one bit
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -1441,7 +1441,7 @@ static struct rq *find_lock_later_rq(str
+@@ -1392,7 +1392,7 @@ static struct rq *find_lock_later_rq(str
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu,
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index fbf094715990eb..a673c48f44ee16 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3254,8 +3254,10 @@ static void __sched notrace __schedule(b
+@@ -3305,8 +3305,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
@@ -34,4 +34,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (prev->flags & PF_WQ_WORKER && !prev->saved_state) {
struct task_struct *to_wakeup;
- to_wakeup = wq_worker_sleeping(prev, cpu);
+ to_wakeup = wq_worker_sleeping(prev);
diff --git a/patches/seqlock-prevent-rt-starvation.patch b/patches/seqlock-prevent-rt-starvation.patch
index f5b231e4b0705d..e84613c86f24df 100644
--- a/patches/seqlock-prevent-rt-starvation.patch
+++ b/patches/seqlock-prevent-rt-starvation.patch
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/net/dst.h
+++ b/include/net/dst.h
-@@ -437,7 +437,7 @@ static inline void dst_confirm(struct ds
+@@ -449,7 +449,7 @@ static inline void dst_confirm(struct ds
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
{
diff --git a/patches/series b/patches/series
index d754ddd9c918e6..c019ba64861e01 100644
--- a/patches/series
+++ b/patches/series
@@ -5,35 +5,7 @@
############################################################
# UPSTREAM changes queued
############################################################
-rtmutex-Make-wait_lock-irq-safe.patch
-tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch
-kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch
-panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch
-panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch
-panic-change-nmi_panic-from-macro-to-function.patch
-sched-cputime-Clarify-vtime-symbols-and-document-the.patch
-sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
-
-# AT91 queue in ARM-SOC
-0001-clk-at91-make-use-of-syscon-to-share-PMC-registers-i.patch
-0002-clk-at91-make-use-of-syscon-regmap-internally.patch
-0003-clk-at91-remove-IRQ-handling-and-use-polling.patch
-0004-clk-at91-pmc-merge-at91_pmc_init-in-atmel_pmc_probe.patch
-0005-clk-at91-pmc-move-pmc-structures-to-C-file.patch
-0006-ARM-at91-pm-simply-call-at91_pm_init.patch
-0007-ARM-at91-pm-find-and-remap-the-pmc.patch
-0008-ARM-at91-pm-move-idle-functions-to-pm.c.patch
-0009-ARM-at91-remove-useless-includes-and-function-protot.patch
-0010-usb-gadget-atmel-access-the-PMC-using-regmap.patch
-0011-clk-at91-pmc-drop-at91_pmc_base.patch
-0012-clk-at91-pmc-remove-useless-capacities-handling.patch
-0013-clk-at91-remove-useless-includes.patch
-# SWAIT queue in TIP
-0001-wait.-ch-Introduce-the-simple-waitqueue-swait-implem.patch
-0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch
-0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch
-0004-rcu-Do-not-call-rcu_nocb_gp_cleanup-while-holding-rn.patch
-0005-rcu-Use-simple-wait-queues-where-possible-in-rcutree.patch
+ARM-imx-always-use-TWD-on-IMX6Q.patch
############################################################
# UPSTREAM FIXES, patches pending
@@ -44,11 +16,9 @@ sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
############################################################
sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch
sched-provide-a-tsk_nr_cpus_allowed-helper.patch
-drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch
-drivers-media-vsp1_video-fix-compile-error.patch
sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
-f2fs_Mutex_cant_be_used_by_down_write_nest_lock().patch
-ARM-imx-always-use-TWD-on-IMX6Q.patch
+crypto-ccp-remove-rwlocks_types.h.patch
+infiniband-ulp-ipoib-remove-pkey_mutex.patch
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
@@ -63,15 +33,12 @@ rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patc
# Stuff broken upstream, need to be sent
############################################################
rtmutex--Handle-non-enqueued-waiters-gracefully.patch
-kernel-sched-fix-preempt_disable_ip-recodring-for-pr.patch
# Wants a different fix for upstream
-iommu-amd--Use-WARN_ON_NORT.patch
############################################################
# Submitted on LKML
############################################################
-genirq-Add-default-affinity-mask-command-line-option.patch
# SPARC part of erly printk consolidation
sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -202,6 +169,7 @@ kconfig-preempt-rt-full.patch
# WARN/BUG_ON_RT
bug-rt-dependend-variants.patch
+iommu-amd--Use-WARN_ON_NORT.patch
# LOCAL_IRQ_RT/NON_RT
local-irq-rt-depending-variants.patch
@@ -348,6 +316,8 @@ rtmutex-trylock-is-okay-on-RT.patch
# RAID5
md-raid5-percpu-handling-rt-aware.patch
+#
+i915_compile_fix.patch
# FUTEX/RTMUTEX
rtmutex-futex-prepare-rt.patch
@@ -364,21 +334,15 @@ rtmutex_dont_include_rcu.patch
rt-add-rt-locks.patch
rtmutex-Use-chainwalking-control-enum.patch
rtmutex-add-a-first-shot-of-ww_mutex.patch
-
ptrace-fix-ptrace-vs-tasklist_lock-race.patch
-# RTMUTEX Fallout
-tasklist-lock-fix-section-conflict.patch
-#fold
-ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch
-
# RCU
peter_zijlstra-frob-rcu.patch
rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
-rcu-disable-more-spots-of-rcu_bh.patch
rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch
+rcu-disable-more-spots-of-rcu_bh.patch
# LGLOCKS - lovely
lglocks-rt.patch
@@ -387,7 +351,6 @@ lockinglglocks_Use_preempt_enabledisable_nort()_in_lg_double_locklg_double_unloc
# STOP machine (depend on lglock & rtmutex)
stomp-machine-create-lg_global_trylock_relax-primiti.patch
stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
-kernel-stop_machine-partly-revert-stop_machine-Use-r.patch
# DRIVERS SERIAL
drivers-tty-fix-omap-lock-crap.patch
@@ -431,8 +394,6 @@ cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
# block
blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
block-blk-mq-use-swait.patch
-# XXX melt
-block-mq-drop-per-ctx-cpu_lock.patch
# BLOCK LIVELOCK PREVENTION
block-use-cpu-chill.patch
@@ -602,7 +563,6 @@ cpufreq-drop-K8-s-driver-from-beeing-selected.patch
drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
# I915
-i915_compile_fix.patch
drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index bf6bc953dd4259..ee5ed64adafd89 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1568,6 +1568,7 @@ struct task_struct {
+@@ -1589,6 +1589,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int valid_signal(unsigned long sig)
--- a/kernel/exit.c
+++ b/kernel/exit.c
-@@ -144,7 +144,7 @@ static void __exit_signal(struct task_st
+@@ -143,7 +143,7 @@ static void __exit_signal(struct task_st
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1343,6 +1343,7 @@ static struct task_struct *copy_process(
+@@ -1352,6 +1352,7 @@ static struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index fc2f5dcc3fece9..806824a6d22ade 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2564,6 +2564,7 @@ struct softnet_data {
+@@ -2773,6 +2773,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -203,6 +203,7 @@ struct sk_buff_head {
+@@ -283,6 +283,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1464,6 +1465,12 @@ static inline void skb_queue_head_init(s
+@@ -1537,6 +1538,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -207,14 +207,14 @@ static inline struct hlist_head *dev_ind
+@@ -209,14 +209,14 @@ static inline struct hlist_head *dev_ind
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -4050,7 +4050,7 @@ static void flush_backlog(void *arg)
+@@ -4265,7 +4265,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -4059,10 +4059,13 @@ static void flush_backlog(void *arg)
+@@ -4274,10 +4274,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -7474,6 +7477,9 @@ static int dev_cpu_callback(struct notif
+@@ -7790,6 +7793,9 @@ static int dev_cpu_callback(struct notif
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NOTIFY_OK;
}
-@@ -7775,8 +7781,9 @@ static int __init net_dev_init(void)
+@@ -8091,8 +8097,9 @@ static int __init net_dev_init(void)
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 810e48b8a1ef2d..976fbfb5f1707a 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1748,7 +1748,7 @@ endchoice
+@@ -1746,7 +1746,7 @@ endchoice
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch
index ef8211a5f57418..48956de951256b 100644
--- a/patches/slub-enable-irqs-for-no-wait.patch
+++ b/patches/slub-enable-irqs-for-no-wait.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1405,14 +1405,17 @@ static struct page *allocate_slab(struct
+@@ -1418,14 +1418,17 @@ static struct page *allocate_slab(struct
gfp_t alloc_gfp;
void *start, *p;
int idx, order;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1483,11 +1486,7 @@ static struct page *allocate_slab(struct
+@@ -1496,11 +1499,7 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch
index b4eeadf2c26825..d6b1d5e8234efe 100644
--- a/patches/softirq-disable-softirq-stacks-for-rt.patch
+++ b/patches/softirq-disable-softirq-stacks-for-rt.patch
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void fixup_irqs(void)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -867,6 +867,7 @@ END(native_load_gs_index)
+@@ -799,6 +799,7 @@ END(native_load_gs_index)
jmp 2b
.previous
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
pushq %rbp
-@@ -879,6 +880,7 @@ ENTRY(do_softirq_own_stack)
+@@ -811,6 +812,7 @@ ENTRY(do_softirq_own_stack)
decl PER_CPU_VAR(irq_count)
ret
END(do_softirq_own_stack)
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -447,7 +447,7 @@ struct softirq_action
+@@ -458,7 +458,7 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 56cd596b5567b9..fe2e349d0ec789 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -14,38 +14,12 @@ Reported-by: Carsten Emde <cbe@osadl.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- block/blk-iopoll.c | 3 +++
block/blk-softirq.c | 3 +++
include/linux/preempt.h | 3 +++
+ lib/irq_poll.c | 5 +++++
net/core/dev.c | 7 +++++++
- 4 files changed, 16 insertions(+)
+ 4 files changed, 18 insertions(+)
---- a/block/blk-iopoll.c
-+++ b/block/blk-iopoll.c
-@@ -35,6 +35,7 @@ void blk_iopoll_sched(struct blk_iopoll
- list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
- EXPORT_SYMBOL(blk_iopoll_sched);
-
-@@ -132,6 +133,7 @@ static void blk_iopoll_softirq(struct so
- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
-
- local_irq_enable();
-+ preempt_check_resched_rt();
- }
-
- /**
-@@ -201,6 +203,7 @@ static int blk_iopoll_cpu_notify(struct
- this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
- local_irq_enable();
-+ preempt_check_resched_rt();
- }
-
- return NOTIFY_OK;
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
@@ -93,9 +67,51 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */
+--- a/lib/irq_poll.c
++++ b/lib/irq_poll.c
+@@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop
+ list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(irq_poll_sched);
+
+@@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *
+ local_irq_save(flags);
+ __irq_poll_complete(iop);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(irq_poll_complete);
+
+@@ -95,6 +97,7 @@ static void irq_poll_softirq(struct soft
+ }
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Even though interrupts have been re-enabled, this
+ * access is safe because interrupts can only add new
+@@ -132,6 +135,7 @@ static void irq_poll_softirq(struct soft
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ /**
+@@ -199,6 +203,7 @@ static int irq_poll_cpu_notify(struct no
+ this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2246,6 +2246,7 @@ static inline void __netif_reschedule(st
+@@ -2264,6 +2264,7 @@ static inline void __netif_reschedule(st
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -103,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -2327,6 +2328,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2345,6 +2346,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -111,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3524,6 +3526,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3730,6 +3732,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -119,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4515,6 +4518,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4735,6 +4738,7 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -127,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4528,6 +4532,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4748,6 +4752,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -135,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4609,6 +4614,7 @@ void __napi_schedule(struct napi_struct
+@@ -4829,6 +4834,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -143,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -7459,6 +7465,7 @@ static int dev_cpu_callback(struct notif
+@@ -7775,6 +7781,7 @@ static int dev_cpu_callback(struct notif
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index e76ddde39ead9a..5bc8e60f001aee 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* _LINUX_BH_H */
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -444,10 +444,11 @@ struct softirq_action
+@@ -455,10 +455,11 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
-@@ -455,6 +456,9 @@ static inline void do_softirq_own_stack(
+@@ -466,6 +467,9 @@ static inline void do_softirq_own_stack(
__do_softirq();
}
#endif
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-@@ -462,6 +466,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -473,6 +477,7 @@ extern void __raise_softirq_irqoff(unsig
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -619,6 +624,12 @@ void tasklet_hrtimer_cancel(struct taskl
+@@ -630,6 +635,12 @@ void tasklet_hrtimer_cancel(struct taskl
tasklet_kill(&ttimer->tasklet);
}
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Are we in NMI context?
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1834,6 +1834,8 @@ struct task_struct {
+@@ -1868,6 +1868,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
-@@ -2099,6 +2101,7 @@ extern void thread_group_cputime_adjuste
+@@ -2136,6 +2138,7 @@ extern void thread_group_cputime_adjuste
/*
* Per process flags
*/
@@ -191,14 +191,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
--- a/init/main.c
+++ b/init/main.c
-@@ -530,6 +530,7 @@ asmlinkage __visible void __init start_k
+@@ -507,6 +507,7 @@ asmlinkage __visible void __init start_k
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
+ softirq_early_init();
+ boot_cpu_state_init();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
- build_all_zonelists(NULL, NULL);
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -26,6 +26,7 @@
@@ -377,7 +377,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -233,10 +388,8 @@ asmlinkage __visible void __do_softirq(v
+@@ -233,10 +388,8 @@ asmlinkage __visible void __softirq_entr
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
@@ -388,7 +388,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
-@@ -255,36 +408,7 @@ asmlinkage __visible void __do_softirq(v
+@@ -255,36 +408,7 @@ asmlinkage __visible void __softirq_entr
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -785,7 +785,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.thread_comm = "ksoftirqd/%u",
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -758,14 +758,7 @@ static bool can_stop_idle_tick(int cpu,
+@@ -866,14 +866,7 @@ static bool can_stop_idle_tick(int cpu,
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -803,7 +803,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3595,11 +3595,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3801,11 +3801,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 2625c5f76536a5..5914cdb70068ad 100644
--- a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -10,7 +10,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
-@@ -189,12 +189,10 @@ config NR_CPUS
+@@ -184,12 +184,10 @@ config NR_CPUS
source kernel/Kconfig.hz
config RWSEM_GENERIC_SPINLOCK
diff --git a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch b/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
index 90536050c2ee58..e9b32347d90cdc 100644
--- a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
+++ b/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
@@ -12,43 +12,21 @@ now do that trylock()/relax() across an entire herd of locks. Joy.
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/stop_machine.c | 25 +++++++++++++++----------
- 1 file changed, 15 insertions(+), 10 deletions(-)
+ kernel/stop_machine.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -276,7 +276,7 @@ int stop_two_cpus(unsigned int cpu1, uns
- struct cpu_stop_work work1, work2;
- struct multi_stop_data msdata;
+@@ -313,18 +313,21 @@ static DEFINE_MUTEX(stop_cpus_mutex);
-- preempt_disable();
-+ preempt_disable_nort();
- msdata = (struct multi_stop_data){
- .fn = fn,
- .data = arg,
-@@ -296,11 +296,11 @@ int stop_two_cpus(unsigned int cpu1, uns
- if (cpu1 > cpu2)
- swap(cpu1, cpu2);
- if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
-- preempt_enable();
-+ preempt_enable_nort();
- return -ENOENT;
- }
-
-- preempt_enable();
-+ preempt_enable_nort();
-
- wait_for_stop_done(&done);
-
-@@ -333,17 +333,20 @@ static DEFINE_MUTEX(stop_cpus_mutex);
-
- static void queue_stop_cpus_work(const struct cpumask *cpumask,
+ static bool queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
- struct cpu_stop_done *done)
+ struct cpu_stop_done *done, bool inactive)
{
struct cpu_stop_work *work;
unsigned int cpu;
+ bool queued = false;
/*
- * Disable preemption while queueing to avoid getting
@@ -66,16 +44,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_cpu(cpu, cpumask) {
work = &per_cpu(cpu_stopper.stop_work, cpu);
work->fn = fn;
-@@ -360,7 +363,7 @@ static int __stop_cpus(const struct cpum
+@@ -344,7 +347,7 @@ static int __stop_cpus(const struct cpum
struct cpu_stop_done done;
cpu_stop_init_done(&done, cpumask_weight(cpumask));
-- queue_stop_cpus_work(cpumask, fn, arg, &done);
-+ queue_stop_cpus_work(cpumask, fn, arg, &done, false);
- wait_for_stop_done(&done);
- return done.executed ? done.ret : -ENOENT;
- }
-@@ -558,6 +561,8 @@ static int __init cpu_stop_init(void)
+- if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
++ if (!queue_stop_cpus_work(cpumask, fn, arg, &done, false))
+ return -ENOENT;
+ wait_for_completion(&done.completion);
+ return done.ret;
+@@ -532,6 +535,8 @@ static int __init cpu_stop_init(void)
INIT_LIST_HEAD(&stopper->works);
}
@@ -84,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
stop_machine_unpark(raw_smp_processor_id());
stop_machine_initialized = true;
-@@ -654,7 +659,7 @@ int stop_machine_from_inactive_cpu(cpu_s
+@@ -626,7 +631,7 @@ int stop_machine_from_inactive_cpu(cpu_s
set_state(&msdata, MULTI_STOP_PREPARE);
cpu_stop_init_done(&done, num_active_cpus());
queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
diff --git a/patches/stop-machine-raw-lock.patch b/patches/stop-machine-raw-lock.patch
index 6155df97d0e418..2879faa2e0f673 100644
--- a/patches/stop-machine-raw-lock.patch
+++ b/patches/stop-machine-raw-lock.patch
@@ -6,20 +6,12 @@ Use raw-locks in stomp_machine() to allow locking in irq-off regions.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/stop_machine.c | 64 ++++++++++++++++++++++++++++++++++----------------
- 1 file changed, 44 insertions(+), 20 deletions(-)
+ kernel/stop_machine.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -30,14 +30,14 @@ struct cpu_stop_done {
- atomic_t nr_todo; /* nr left to execute */
- bool executed; /* actually executed? */
- int ret; /* collected return value */
-- struct completion completion; /* fired if nr_todo reaches 0 */
-+ struct task_struct *waiter; /* woken when nr_todo reaches 0 */
- };
-
- /* the actual stopper, one per every possible cpu, enabled on online cpus */
+@@ -36,7 +36,7 @@ struct cpu_stop_done {
struct cpu_stopper {
struct task_struct *thread;
@@ -28,69 +20,24 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */
-@@ -59,7 +59,7 @@ static void cpu_stop_init_done(struct cp
- {
- memset(done, 0, sizeof(*done));
- atomic_set(&done->nr_todo, nr_todo);
-- init_completion(&done->completion);
-+ done->waiter = current;
- }
-
- /* signal completion unless @done is NULL */
-@@ -68,8 +68,10 @@ static void cpu_stop_signal_done(struct
- if (done) {
- if (executed)
- done->executed = true;
-- if (atomic_dec_and_test(&done->nr_todo))
-- complete(&done->completion);
-+ if (atomic_dec_and_test(&done->nr_todo)) {
-+ wake_up_process(done->waiter);
-+ done->waiter = NULL;
-+ }
- }
- }
-
-@@ -86,12 +88,28 @@ static void cpu_stop_queue_work(unsigned
- struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+@@ -82,14 +82,14 @@ static bool cpu_stop_queue_work(unsigned
unsigned long flags;
+ bool enabled;
- spin_lock_irqsave(&stopper->lock, flags);
+ raw_spin_lock_irqsave(&stopper->lock, flags);
- if (stopper->enabled)
+ enabled = stopper->enabled;
+ if (enabled)
__cpu_stop_queue_work(stopper, work);
- else
- cpu_stop_signal_done(work->done, false);
+ else if (work->done)
+ cpu_stop_signal_done(work->done);
- spin_unlock_irqrestore(&stopper->lock, flags);
-+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
-+}
-+
-+static void wait_for_stop_done(struct cpu_stop_done *done)
-+{
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ while (atomic_read(&done->nr_todo)) {
-+ schedule();
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ }
-+ /*
-+ * We need to wait until cpu_stop_signal_done() has cleared
-+ * done->waiter.
-+ */
-+ while (done->waiter)
-+ cpu_relax();
-+ set_current_state(TASK_RUNNING);
- }
-
- /**
-@@ -125,7 +143,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
- cpu_stop_init_done(&done, 1);
- cpu_stop_queue_work(cpu, &work);
-- wait_for_completion(&done.completion);
-+ wait_for_stop_done(&done);
- return done.executed ? done.ret : -ENOENT;
++ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ return enabled;
}
-@@ -224,8 +242,8 @@ static int cpu_stop_queue_two_works(int
+@@ -224,8 +224,8 @@ static int cpu_stop_queue_two_works(int
int err;
lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
@@ -101,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
err = -ENOENT;
if (!stopper1->enabled || !stopper2->enabled)
-@@ -235,8 +253,8 @@ static int cpu_stop_queue_two_works(int
+@@ -235,8 +235,8 @@ static int cpu_stop_queue_two_works(int
__cpu_stop_queue_work(stopper1, work1);
__cpu_stop_queue_work(stopper2, work2);
unlock:
@@ -112,25 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
return err;
-@@ -284,7 +302,7 @@ int stop_two_cpus(unsigned int cpu1, uns
-
- preempt_enable();
-
-- wait_for_completion(&done.completion);
-+ wait_for_stop_done(&done);
-
- return done.executed ? done.ret : -ENOENT;
- }
-@@ -343,7 +361,7 @@ static int __stop_cpus(const struct cpum
-
- cpu_stop_init_done(&done, cpumask_weight(cpumask));
- queue_stop_cpus_work(cpumask, fn, arg, &done);
-- wait_for_completion(&done.completion);
-+ wait_for_stop_done(&done);
- return done.executed ? done.ret : -ENOENT;
- }
-
-@@ -422,9 +440,9 @@ static int cpu_stop_should_run(unsigned
+@@ -425,9 +425,9 @@ static int cpu_stop_should_run(unsigned
unsigned long flags;
int run;
@@ -142,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return run;
}
-@@ -436,13 +454,13 @@ static void cpu_stopper_thread(unsigned
+@@ -438,13 +438,13 @@ static void cpu_stopper_thread(unsigned
repeat:
work = NULL;
@@ -158,21 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (work) {
cpu_stop_fn_t fn = work->fn;
-@@ -474,7 +492,13 @@ static void cpu_stopper_thread(unsigned
- kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
- ksym_buf), arg);
-
-+ /*
-+ * Make sure that the wakeup and setting done->waiter
-+ * to NULL is atomic.
-+ */
-+ local_irq_disable();
- cpu_stop_signal_done(done, true);
-+ local_irq_enable();
- goto repeat;
- }
- }
-@@ -530,7 +554,7 @@ static int __init cpu_stop_init(void)
+@@ -528,7 +528,7 @@ static int __init cpu_stop_init(void)
for_each_possible_cpu(cpu) {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
@@ -181,12 +96,3 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
INIT_LIST_HEAD(&stopper->works);
}
-@@ -634,7 +658,7 @@ int stop_machine_from_inactive_cpu(cpu_s
- ret = multi_cpu_stop(&msdata);
-
- /* Busy wait for completion. */
-- while (!completion_done(&done.completion))
-+ while (atomic_read(&done.nr_todo))
- cpu_relax();
-
- mutex_unlock(&stop_cpus_mutex);
diff --git a/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
index cf212bd2ff1c67..a4bb4178fd6f1d 100644
--- a/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+++ b/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -15,9 +15,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -450,6 +450,16 @@ static void cpu_stopper_thread(unsigned
+@@ -452,6 +452,16 @@ static void cpu_stopper_thread(unsigned
struct cpu_stop_done *done = work->done;
- char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
+ int ret;
+ /*
+ * Wait until the stopper finished scheduling on all
@@ -29,6 +29,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ */
+ lg_global_unlock(&stop_cpus_lock);
+
- /* cpu stop callbacks are not allowed to sleep */
- preempt_disable();
-
+ /* cpu stop callbacks must not sleep, make in_atomic() == T */
+ preempt_count_inc();
+ ret = fn(arg);
diff --git a/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index 275b36799b187f..4c851dd5736a6c 100644
--- a/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
-@@ -340,7 +340,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -342,7 +342,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
goto out;
}
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
atomic_long_inc(&pool->sp_stats.packets);
-@@ -376,7 +376,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -378,7 +378,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
atomic_long_inc(&pool->sp_stats.threads_woken);
wake_up_process(rqstp->rq_task);
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
}
rcu_read_unlock();
-@@ -397,7 +397,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -399,7 +399,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
goto redo_search;
}
rqstp = NULL;
diff --git a/patches/suspend-prevernt-might-sleep-splats.patch b/patches/suspend-prevernt-might-sleep-splats.patch
index fb132e3aa3d42b..e0b981a02873ca 100644
--- a/patches/suspend-prevernt-might-sleep-splats.patch
+++ b/patches/suspend-prevernt-might-sleep-splats.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -482,6 +482,7 @@ extern enum system_states {
+@@ -484,6 +484,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
diff --git a/patches/sysfs-realtime-entry.patch b/patches/sysfs-realtime-entry.patch
index ec75898877cf9f..e06abca2f27d8d 100644
--- a/patches/sysfs-realtime-entry.patch
+++ b/patches/sysfs-realtime-entry.patch
@@ -35,10 +35,10 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
-@@ -203,6 +212,9 @@ static struct attribute * kernel_attrs[]
- &vmcoreinfo_attr.attr,
- #endif
+@@ -225,6 +234,9 @@ static struct attribute * kernel_attrs[]
&rcu_expedited_attr.attr,
+ &rcu_normal_attr.attr,
+ #endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ &realtime_attr.attr,
+#endif
diff --git a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index 4ab5c45e78b6c1..1f66e18dc0af80 100644
--- a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -483,8 +483,9 @@ static inline struct task_struct *this_c
+@@ -494,8 +494,9 @@ static inline struct task_struct *this_c
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
-@@ -509,27 +510,36 @@ struct tasklet_struct name = { NULL, 0,
+@@ -520,27 +521,36 @@ struct tasklet_struct name = { NULL, 0,
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
-@@ -578,12 +588,7 @@ static inline void tasklet_disable(struc
+@@ -589,12 +599,7 @@ static inline void tasklet_disable(struc
smp_mb();
}
diff --git a/patches/tasklist-lock-fix-section-conflict.patch b/patches/tasklist-lock-fix-section-conflict.patch
deleted file mode 100644
index 0c0768f3fd7080..00000000000000
--- a/patches/tasklist-lock-fix-section-conflict.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-Subject: rwlocks: Fix section mismatch
-From: John Kacur <jkacur@redhat.com>
-Date: Mon, 19 Sep 2011 11:09:27 +0200 (CEST)
-
-This fixes the following build error for the preempt-rt kernel.
-
-make kernel/fork.o
- CC kernel/fork.o
-kernel/fork.c:90: error: section of tasklist_lock conflicts with previous declaration
-make[2]: *** [kernel/fork.o] Error 1
-make[1]: *** [kernel/fork.o] Error 2
-
-The rt kernel cache aligns the RWLOCK in DEFINE_RWLOCK by default.
-The non-rt kernels explicitly cache align only the tasklist_lock in
-kernel/fork.c
-That can create a build conflict. This fixes the build problem by making the
-non-rt kernels cache align RWLOCKs by default. The side effect is that
-the other RWLOCKs are also cache aligned for non-rt.
-
-This is a short term solution for rt only.
-The longer term solution would be to push the cache aligned DEFINE_RWLOCK
-to mainline. If there are objections, then we could create a
-DEFINE_RWLOCK_CACHE_ALIGNED or something of that nature.
-
-Signed-off-by: John Kacur <jkacur@redhat.com>
-Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
-Link: http://lkml.kernel.org/r/alpine.LFD.2.00.1109191104010.23118@localhost6.localdomain6
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/rwlock_types.h | 3 ++-
- kernel/fork.c | 2 +-
- 2 files changed, 3 insertions(+), 2 deletions(-)
-
---- a/include/linux/rwlock_types.h
-+++ b/include/linux/rwlock_types.h
-@@ -47,6 +47,7 @@ typedef struct {
- RW_DEP_MAP_INIT(lockname) }
- #endif
-
--#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
-+#define DEFINE_RWLOCK(name) \
-+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
-
- #endif /* __LINUX_RWLOCK_TYPES_H */
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -108,7 +108,7 @@ int max_threads; /* tunable limit on nr
-
- DEFINE_PER_CPU(unsigned long, process_counts) = 0;
-
--__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
-+DEFINE_RWLOCK(tasklist_lock); /* outer */
-
- #ifdef CONFIG_PROVE_RCU
- int lockdep_tasklist_lock_is_held(void)
diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch
index 279d97e0e10187..a47006b93b3101 100644
--- a/patches/timekeeping-split-jiffies-lock.patch
+++ b/patches/timekeeping-split-jiffies-lock.patch
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return period;
}
-@@ -578,10 +583,10 @@ static ktime_t tick_nohz_stop_sched_tick
+@@ -670,10 +675,10 @@ static ktime_t tick_nohz_stop_sched_tick
/* Read jiffies and the time when jiffies were updated last */
do {
@@ -129,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (rcu_needs_cpu(basemono, &next_rcu) ||
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
-@@ -2047,8 +2047,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2319,8 +2319,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
diff --git a/patches/timers-preempt-rt-support.patch b/patches/timers-preempt-rt-support.patch
index 264c375d35617d..d87595a0106f41 100644
--- a/patches/timers-preempt-rt-support.patch
+++ b/patches/timers-preempt-rt-support.patch
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_lock(&base->lock);
if (base->active_timers) {
if (time_before_eq(base->next_timer, base->timer_jiffies))
-@@ -1621,7 +1629,7 @@ static void migrate_timers(int cpu)
+@@ -1632,7 +1640,7 @@ static void migrate_timers(int cpu)
BUG_ON(cpu_online(cpu));
old_base = per_cpu_ptr(&tvec_bases, cpu);
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
-@@ -1645,7 +1653,7 @@ static void migrate_timers(int cpu)
+@@ -1656,7 +1664,7 @@ static void migrate_timers(int cpu)
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch
index 866be4023ae1f1..2ffc37e1e32400 100644
--- a/patches/timers-prepare-for-full-preemption.patch
+++ b/patches/timers-prepare-for-full-preemption.patch
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define del_timer_sync(t) del_timer(t)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -618,11 +618,14 @@ void resched_cpu(int cpu)
+@@ -490,11 +490,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -638,6 +641,8 @@ int get_nohz_timer_target(void)
+@@ -510,6 +513,8 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu();
unlock:
rcu_read_unlock();
@@ -137,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irq(&base->lock);
}
-@@ -1645,6 +1677,9 @@ static void __init init_timer_cpu(int cp
+@@ -1656,6 +1688,9 @@ static void __init init_timer_cpu(int cp
base->cpu = cpu;
spin_lock_init(&base->lock);
diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
index 6ab92fbe31f143..88cef85cdc117f 100644
--- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3296,7 +3296,16 @@ asmlinkage __visible void __sched notrac
+@@ -3347,7 +3347,16 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
diff --git a/patches/tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch b/patches/tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch
deleted file mode 100644
index 54c5bb9b7016bf..00000000000000
--- a/patches/tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch
+++ /dev/null
@@ -1,392 +0,0 @@
-From: Yang Shi <yang.shi@linaro.org>
-Date: Thu, 3 Mar 2016 01:08:57 -0800
-Subject: [PATCH] tracing, writeback: Replace cgroup path to cgroup ino
-
-commit 5634cc2aa9aebc77bc862992e7805469dcf83dac ("writeback: update writeback
-tracepoints to report cgroup") made writeback tracepoints print out cgroup
-path when CGROUP_WRITEBACK is enabled, but it may trigger the below bug on -rt
-kernel since kernfs_path and kernfs_path_len are called by tracepoints, which
-acquire spin lock that is sleepable on -rt kernel.
-
-BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:930
-in_atomic(): 1, irqs_disabled(): 0, pid: 625, name: kworker/u16:3
-INFO: lockdep is turned off.
-Preemption disabled at:[<ffffffc000374a5c>] wb_writeback+0xec/0x830
-
-CPU: 7 PID: 625 Comm: kworker/u16:3 Not tainted 4.4.1-rt5 #20
-Hardware name: Freescale Layerscape 2085a RDB Board (DT)
-Workqueue: writeback wb_workfn (flush-7:0)
-Call trace:
-[<ffffffc00008d708>] dump_backtrace+0x0/0x200
-[<ffffffc00008d92c>] show_stack+0x24/0x30
-[<ffffffc0007b0f40>] dump_stack+0x88/0xa8
-[<ffffffc000127d74>] ___might_sleep+0x2ec/0x300
-[<ffffffc000d5d550>] rt_spin_lock+0x38/0xb8
-[<ffffffc0003e0548>] kernfs_path_len+0x30/0x90
-[<ffffffc00036b360>] trace_event_raw_event_writeback_work_class+0xe8/0x2e8
-[<ffffffc000374f90>] wb_writeback+0x620/0x830
-[<ffffffc000376224>] wb_workfn+0x61c/0x950
-[<ffffffc000110adc>] process_one_work+0x3ac/0xb30
-[<ffffffc0001112fc>] worker_thread+0x9c/0x7a8
-[<ffffffc00011a9e8>] kthread+0x190/0x1b0
-[<ffffffc000086ca0>] ret_from_fork+0x10/0x30
-
-With unlocked kernfs_* functions, synchronize_sched() has to be called in
-kernfs_rename which could be called in syscall path, but it is problematic.
-So, print out cgroup ino instead of path name, which could be converted to
-path name by userland.
-
-Withouth CGROUP_WRITEBACK enabled, it just prints out root dir. But, root
-dir ino vary from different filesystems, so printing out -1U to indicate
-an invalid cgroup ino.
-
-Link: http://lkml.kernel.org/r/1456996137-8354-1-git-send-email-yang.shi@linaro.org
-
-Acked-by: Tejun Heo <tj@kernel.org>
-Signed-off-by: Yang Shi <yang.shi@linaro.org>
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
----
- include/trace/events/writeback.h | 121 ++++++++++++++-------------------------
- 1 file changed, 45 insertions(+), 76 deletions(-)
-
---- a/include/trace/events/writeback.h
-+++ b/include/trace/events/writeback.h
-@@ -134,58 +134,28 @@ DEFINE_EVENT(writeback_dirty_inode_templ
- #ifdef CREATE_TRACE_POINTS
- #ifdef CONFIG_CGROUP_WRITEBACK
-
--static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
-+static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
- {
-- return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
-+ return wb->memcg_css->cgroup->kn->ino;
- }
-
--static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
--{
-- struct cgroup *cgrp = wb->memcg_css->cgroup;
-- char *path;
--
-- path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
-- WARN_ON_ONCE(path != buf);
--}
--
--static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
--{
-- if (wbc->wb)
-- return __trace_wb_cgroup_size(wbc->wb);
-- else
-- return 2;
--}
--
--static inline void __trace_wbc_assign_cgroup(char *buf,
-- struct writeback_control *wbc)
-+static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
- {
- if (wbc->wb)
-- __trace_wb_assign_cgroup(buf, wbc->wb);
-+ return __trace_wb_assign_cgroup(wbc->wb);
- else
-- strcpy(buf, "/");
-+ return -1U;
- }
--
- #else /* CONFIG_CGROUP_WRITEBACK */
-
--static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
--{
-- return 2;
--}
--
--static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
--{
-- strcpy(buf, "/");
--}
--
--static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
-+static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
- {
-- return 2;
-+ return -1U;
- }
-
--static inline void __trace_wbc_assign_cgroup(char *buf,
-- struct writeback_control *wbc)
-+static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
- {
-- strcpy(buf, "/");
-+ return -1U;
- }
-
- #endif /* CONFIG_CGROUP_WRITEBACK */
-@@ -201,7 +171,7 @@ DECLARE_EVENT_CLASS(writeback_write_inod
- __array(char, name, 32)
- __field(unsigned long, ino)
- __field(int, sync_mode)
-- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
-+ __field(unsigned int, cgroup_ino)
- ),
-
- TP_fast_assign(
-@@ -209,14 +179,14 @@ DECLARE_EVENT_CLASS(writeback_write_inod
- dev_name(inode_to_bdi(inode)->dev), 32);
- __entry->ino = inode->i_ino;
- __entry->sync_mode = wbc->sync_mode;
-- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
-+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
- ),
-
-- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
-+ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
- __entry->name,
- __entry->ino,
- __entry->sync_mode,
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- );
-
-@@ -246,7 +216,7 @@ DECLARE_EVENT_CLASS(writeback_work_class
- __field(int, range_cyclic)
- __field(int, for_background)
- __field(int, reason)
-- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
-+ __field(unsigned int, cgroup_ino)
- ),
- TP_fast_assign(
- strncpy(__entry->name,
-@@ -258,10 +228,10 @@ DECLARE_EVENT_CLASS(writeback_work_class
- __entry->range_cyclic = work->range_cyclic;
- __entry->for_background = work->for_background;
- __entry->reason = work->reason;
-- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
-+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- ),
- TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
-- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
-+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
- __entry->name,
- MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
- __entry->nr_pages,
-@@ -270,7 +240,7 @@ DECLARE_EVENT_CLASS(writeback_work_class
- __entry->range_cyclic,
- __entry->for_background,
- __print_symbolic(__entry->reason, WB_WORK_REASON),
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- );
- #define DEFINE_WRITEBACK_WORK_EVENT(name) \
-@@ -300,15 +270,15 @@ DECLARE_EVENT_CLASS(writeback_class,
- TP_ARGS(wb),
- TP_STRUCT__entry(
- __array(char, name, 32)
-- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
-+ __field(unsigned int, cgroup_ino)
- ),
- TP_fast_assign(
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
-- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
-+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- ),
-- TP_printk("bdi %s: cgroup=%s",
-+ TP_printk("bdi %s: cgroup_ino=%u",
- __entry->name,
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- );
- #define DEFINE_WRITEBACK_EVENT(name) \
-@@ -347,7 +317,7 @@ DECLARE_EVENT_CLASS(wbc_class,
- __field(int, range_cyclic)
- __field(long, range_start)
- __field(long, range_end)
-- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
-+ __field(unsigned int, cgroup_ino)
- ),
-
- TP_fast_assign(
-@@ -361,12 +331,12 @@ DECLARE_EVENT_CLASS(wbc_class,
- __entry->range_cyclic = wbc->range_cyclic;
- __entry->range_start = (long)wbc->range_start;
- __entry->range_end = (long)wbc->range_end;
-- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
-+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
- ),
-
- TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
- "bgrd=%d reclm=%d cyclic=%d "
-- "start=0x%lx end=0x%lx cgroup=%s",
-+ "start=0x%lx end=0x%lx cgroup_ino=%u",
- __entry->name,
- __entry->nr_to_write,
- __entry->pages_skipped,
-@@ -377,7 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class,
- __entry->range_cyclic,
- __entry->range_start,
- __entry->range_end,
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- )
-
-@@ -398,7 +368,7 @@ TRACE_EVENT(writeback_queue_io,
- __field(long, age)
- __field(int, moved)
- __field(int, reason)
-- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
-+ __field(unsigned int, cgroup_ino)
- ),
- TP_fast_assign(
- unsigned long *older_than_this = work->older_than_this;
-@@ -408,15 +378,15 @@ TRACE_EVENT(writeback_queue_io,
- (jiffies - *older_than_this) * 1000 / HZ : -1;
- __entry->moved = moved;
- __entry->reason = work->reason;
-- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
-+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- ),
-- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
-+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
- __entry->name,
- __entry->older, /* older_than_this in jiffies */
- __entry->age, /* older_than_this in relative milliseconds */
- __entry->moved,
- __print_symbolic(__entry->reason, WB_WORK_REASON),
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- );
-
-@@ -484,7 +454,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
- __field(unsigned long, dirty_ratelimit)
- __field(unsigned long, task_ratelimit)
- __field(unsigned long, balanced_dirty_ratelimit)
-- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
-+ __field(unsigned int, cgroup_ino)
- ),
-
- TP_fast_assign(
-@@ -496,13 +466,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
- __entry->task_ratelimit = KBps(task_ratelimit);
- __entry->balanced_dirty_ratelimit =
- KBps(wb->balanced_dirty_ratelimit);
-- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
-+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- ),
-
- TP_printk("bdi %s: "
- "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
- "dirty_ratelimit=%lu task_ratelimit=%lu "
-- "balanced_dirty_ratelimit=%lu cgroup=%s",
-+ "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
- __entry->bdi,
- __entry->write_bw, /* write bandwidth */
- __entry->avg_write_bw, /* avg write bandwidth */
-@@ -510,7 +480,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
- __entry->dirty_ratelimit, /* base ratelimit */
- __entry->task_ratelimit, /* ratelimit with position control */
- __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- );
-
-@@ -548,7 +518,7 @@ TRACE_EVENT(balance_dirty_pages,
- __field( long, pause)
- __field(unsigned long, period)
- __field( long, think)
-- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
-+ __field(unsigned int, cgroup_ino)
- ),
-
- TP_fast_assign(
-@@ -571,7 +541,7 @@ TRACE_EVENT(balance_dirty_pages,
- __entry->period = period * 1000 / HZ;
- __entry->pause = pause * 1000 / HZ;
- __entry->paused = (jiffies - start_time) * 1000 / HZ;
-- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
-+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- ),
-
-
-@@ -580,7 +550,7 @@ TRACE_EVENT(balance_dirty_pages,
- "bdi_setpoint=%lu bdi_dirty=%lu "
- "dirty_ratelimit=%lu task_ratelimit=%lu "
- "dirtied=%u dirtied_pause=%u "
-- "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
-+ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
- __entry->bdi,
- __entry->limit,
- __entry->setpoint,
-@@ -595,7 +565,7 @@ TRACE_EVENT(balance_dirty_pages,
- __entry->pause, /* ms */
- __entry->period, /* ms */
- __entry->think, /* ms */
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- );
-
-@@ -609,8 +579,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
- __field(unsigned long, ino)
- __field(unsigned long, state)
- __field(unsigned long, dirtied_when)
-- __dynamic_array(char, cgroup,
-- __trace_wb_cgroup_size(inode_to_wb(inode)))
-+ __field(unsigned int, cgroup_ino)
- ),
-
- TP_fast_assign(
-@@ -619,16 +588,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
- __entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
- __entry->dirtied_when = inode->dirtied_when;
-- __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
-+ __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
- ),
-
-- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
-+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
- __entry->name,
- __entry->ino,
- show_inode_state(__entry->state),
- __entry->dirtied_when,
- (jiffies - __entry->dirtied_when) / HZ,
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- );
-
-@@ -684,7 +653,7 @@ DECLARE_EVENT_CLASS(writeback_single_ino
- __field(unsigned long, writeback_index)
- __field(long, nr_to_write)
- __field(unsigned long, wrote)
-- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
-+ __field(unsigned int, cgroup_ino)
- ),
-
- TP_fast_assign(
-@@ -696,11 +665,11 @@ DECLARE_EVENT_CLASS(writeback_single_ino
- __entry->writeback_index = inode->i_mapping->writeback_index;
- __entry->nr_to_write = nr_to_write;
- __entry->wrote = nr_to_write - wbc->nr_to_write;
-- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
-+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
- ),
-
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
-- "index=%lu to_write=%ld wrote=%lu cgroup=%s",
-+ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
- __entry->name,
- __entry->ino,
- show_inode_state(__entry->state),
-@@ -709,7 +678,7 @@ DECLARE_EVENT_CLASS(writeback_single_ino
- __entry->writeback_index,
- __entry->nr_to_write,
- __entry->wrote,
-- __get_str(cgroup)
-+ __entry->cgroup_ino
- )
- );
-
diff --git a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 492612dfdd69bc..42b3efebfb4b78 100644
--- a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -2843,10 +2843,8 @@ void serial8250_console_write(struct uar
+@@ -3092,10 +3092,8 @@ void serial8250_console_write(struct uar
serial8250_rpm_get(up);
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 5ab91835904059..9bb3dbc133ddde 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3584,7 +3584,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3790,7 +3790,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3594,13 +3594,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3800,13 +3800,13 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/usb-use-_nort-in-giveback.patch b/patches/usb-use-_nort-in-giveback.patch
index df2b37a2e1bfb8..b8bb3df0d20e31 100644
--- a/patches/usb-use-_nort-in-giveback.patch
+++ b/patches/usb-use-_nort-in-giveback.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
-@@ -1735,9 +1735,9 @@ static void __usb_hcd_giveback_urb(struc
+@@ -1759,9 +1759,9 @@ static void __usb_hcd_giveback_urb(struc
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
*/
diff --git a/patches/work-queue-work-around-irqsafe-timer-optimization.patch b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
index 9d123ada69abce..5eef1d053465cb 100644
--- a/patches/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "workqueue_internal.h"
-@@ -1285,7 +1286,7 @@ static int try_to_grab_pending(struct wo
+@@ -1303,7 +1304,7 @@ static int try_to_grab_pending(struct wo
local_unlock_irqrestore(pendingb_lock, *flags);
if (work_is_canceling(work))
return -ENOENT;
diff --git a/patches/work-simple-Simple-work-queue-implemenation.patch b/patches/work-simple-Simple-work-queue-implemenation.patch
index 04f9a3a81fee19..3904befdc41f1f 100644
--- a/patches/work-simple-Simple-work-queue-implemenation.patch
+++ b/patches/work-simple-Simple-work-queue-implemenation.patch
@@ -46,7 +46,7 @@ Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif /* _LINUX_SWORK_H */
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
-@@ -13,7 +13,7 @@ endif
+@@ -17,7 +17,7 @@ endif
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 30d1fd1d1b2152..80c6388ba2f565 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -24,14 +24,14 @@ Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/sched/core.c | 80 ++++++++------------------------------------
- kernel/workqueue.c | 55 ++++++++++++------------------
+ kernel/sched/core.c | 81 ++++++++------------------------------------
+ kernel/workqueue.c | 52 ++++++++++++----------------
kernel/workqueue_internal.h | 5 +-
- 3 files changed, 41 insertions(+), 99 deletions(-)
+ 3 files changed, 41 insertions(+), 97 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1735,10 +1735,6 @@ static inline void ttwu_activate(struct
+@@ -1626,10 +1626,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2055,52 +2051,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2032,53 +2028,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -86,7 +86,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
-
- ttwu_do_wakeup(rq, p, 0);
-- ttwu_stat(p, smp_processor_id(), 0);
+- if (schedstat_enabled())
+- ttwu_stat(p, smp_processor_id(), 0);
-out:
- raw_spin_unlock(&p->pi_lock);
-}
@@ -95,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3271,21 +3221,6 @@ static void __sched notrace __schedule(b
+@@ -3322,21 +3271,6 @@ static void __sched notrace __schedule(b
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -110,14 +111,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- if (prev->flags & PF_WQ_WORKER && !prev->saved_state) {
- struct task_struct *to_wakeup;
-
-- to_wakeup = wq_worker_sleeping(prev, cpu);
+- to_wakeup = wq_worker_sleeping(prev);
- if (to_wakeup)
- try_to_wake_up_local(to_wakeup);
- }
}
switch_count = &prev->nvcsw;
}
-@@ -3318,6 +3253,14 @@ static inline void sched_submit_work(str
+@@ -3369,6 +3303,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -132,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3326,6 +3269,12 @@ static inline void sched_submit_work(str
+@@ -3377,6 +3319,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -145,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3336,6 +3285,7 @@ asmlinkage __visible void __sched schedu
+@@ -3387,6 +3335,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
@@ -155,15 +156,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -850,44 +850,31 @@ static void wake_up_worker(struct worker
+@@ -867,43 +867,32 @@ static void wake_up_worker(struct worker
}
/**
- * wq_worker_waking_up - a worker is waking up
- * @task: task waking up
-- * @cpu: CPU @task is waking up to
+ * wq_worker_running - a worker is running again
-+ * @task: task returning from sleep
+ * @cpu: CPU @task is waking up to
*
- * This function is called during try_to_wake_up() when a worker is
- * being awoken.
@@ -190,8 +190,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
-- * @cpu: CPU in question, must be the current CPU number
-- *
+ *
- * This function is called during schedule() when a busy worker is
- * going to sleep. Worker on the same cpu can be woken up by
- * returning pointer to its task.
@@ -204,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * This function is called from schedule() when a busy worker is
+ * going to sleep.
*/
--struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
+-struct task_struct *wq_worker_sleeping(struct task_struct *task)
+void wq_worker_sleeping(struct task_struct *task)
{
- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
@@ -212,7 +211,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct worker_pool *pool;
/*
-@@ -896,14 +883,15 @@ struct task_struct *wq_worker_sleeping(s
+@@ -912,13 +901,15 @@ struct task_struct *wq_worker_sleeping(s
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
@@ -222,17 +221,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pool = worker->pool;
- /* this can only happen on the local cpu */
-- if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
+- if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
- return NULL;
+ if (WARN_ON_ONCE(worker->sleeping))
+ return;
-
++
+ worker->sleeping = 1;
+ spin_lock_irq(&pool->lock);
+
/*
* The counterpart of the following dec_and_test, implied mb,
- * worklist not empty test sequence is in insert_work().
-@@ -916,9 +904,12 @@ struct task_struct *wq_worker_sleeping(s
+@@ -932,9 +923,12 @@ struct task_struct *wq_worker_sleeping(s
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
@@ -263,7 +262,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* sched/core.c and workqueue.c.
*/
-void wq_worker_waking_up(struct task_struct *task, int cpu);
--struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
+-struct task_struct *wq_worker_sleeping(struct task_struct *task);
+void wq_worker_running(struct task_struct *task);
+void wq_worker_sleeping(struct task_struct *task);
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 765da8c9d53023..59e5d64ab78704 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -37,13 +37,13 @@ Cc: Richard Weinberger <richard.weinberger@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
---
- kernel/sched/core.c | 7 ++++-
- kernel/workqueue.c | 61 ++++++++++++++++++++++++++++++++++++++++------------
- 2 files changed, 53 insertions(+), 15 deletions(-)
+ kernel/sched/core.c | 7 ++++--
+ kernel/workqueue.c | 60 ++++++++++++++++++++++++++++++++++++++++------------
+ 2 files changed, 52 insertions(+), 15 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3367,9 +3367,8 @@ static void __sched notrace __schedule(b
+@@ -3417,9 +3417,8 @@ STACK_FRAME_NON_STANDARD(__schedule); /*
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3377,6 +3376,10 @@ static inline void sched_submit_work(str
+@@ -3427,6 +3426,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);
@@ -79,7 +79,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
* A: pool->attach_mutex protected.
*
* PL: wq_pool_mutex protected.
-@@ -411,6 +416,31 @@ static void workqueue_sysfs_unregister(s
+@@ -428,6 +433,31 @@ static void workqueue_sysfs_unregister(s
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
@@ -111,7 +111,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -843,10 +873,16 @@ static struct worker *first_idle_worker(
+@@ -860,10 +890,16 @@ static struct worker *first_idle_worker(
*/
static void wake_up_worker(struct worker_pool *pool)
{
@@ -129,7 +129,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -874,7 +910,7 @@ void wq_worker_running(struct task_struc
+@@ -892,7 +928,7 @@ void wq_worker_running(struct task_struc
*/
void wq_worker_sleeping(struct task_struct *task)
{
@@ -138,12 +138,12 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
struct worker_pool *pool;
/*
-@@ -891,25 +927,18 @@ void wq_worker_sleeping(struct task_stru
+@@ -909,26 +945,18 @@ void wq_worker_sleeping(struct task_stru
return;
worker->sleeping = 1;
- spin_lock_irq(&pool->lock);
-+
+
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
@@ -168,7 +168,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -1600,7 +1629,9 @@ static void worker_enter_idle(struct wor
+@@ -1655,7 +1683,9 @@ static void worker_enter_idle(struct wor
worker->last_active = jiffies;
/* idle_list is LIFO */
@@ -178,7 +178,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1633,7 +1664,9 @@ static void worker_leave_idle(struct wor
+@@ -1688,7 +1718,9 @@ static void worker_leave_idle(struct wor
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
@@ -188,7 +188,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
static struct worker *alloc_worker(int node)
-@@ -1799,7 +1832,9 @@ static void destroy_worker(struct worker
+@@ -1854,7 +1886,9 @@ static void destroy_worker(struct worker
pool->nr_workers--;
pool->nr_idle--;
diff --git a/patches/workqueue-use-locallock.patch b/patches/workqueue-use-locallock.patch
index 34761ce1f34141..b44d6da0bc1780 100644
--- a/patches/workqueue-use-locallock.patch
+++ b/patches/workqueue-use-locallock.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "workqueue_internal.h"
-@@ -331,6 +332,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
+@@ -348,6 +349,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-@@ -1111,9 +1114,9 @@ static void put_pwq_unlocked(struct pool
+@@ -1127,9 +1130,9 @@ static void put_pwq_unlocked(struct pool
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -1215,7 +1218,7 @@ static int try_to_grab_pending(struct wo
+@@ -1233,7 +1236,7 @@ static int try_to_grab_pending(struct wo
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1279,7 +1282,7 @@ static int try_to_grab_pending(struct wo
+@@ -1297,7 +1300,7 @@ static int try_to_grab_pending(struct wo
spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
-@@ -1351,7 +1354,7 @@ static void __queue_work(int cpu, struct
+@@ -1402,7 +1405,7 @@ static void __queue_work(int cpu, struct
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_work_activate(work);
-@@ -1456,14 +1459,14 @@ bool queue_work_on(int cpu, struct workq
+@@ -1508,14 +1511,14 @@ bool queue_work_on(int cpu, struct workq
bool ret = false;
unsigned long flags;
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1530,14 +1533,14 @@ bool queue_delayed_work_on(int cpu, stru
+@@ -1582,14 +1585,14 @@ bool queue_delayed_work_on(int cpu, stru
unsigned long flags;
/* read the comment in __queue_work() */
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1572,7 +1575,7 @@ bool mod_delayed_work_on(int cpu, struct
+@@ -1624,7 +1627,7 @@ bool mod_delayed_work_on(int cpu, struct
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -2846,7 +2849,7 @@ static bool __cancel_work_timer(struct w
+@@ -2942,7 +2945,7 @@ static bool __cancel_work_timer(struct w
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
flush_work(work);
clear_work_data(work);
-@@ -2901,10 +2904,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -2997,10 +3000,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -2939,7 +2942,7 @@ bool cancel_delayed_work(struct delayed_
+@@ -3035,7 +3038,7 @@ bool cancel_delayed_work(struct delayed_
set_work_pool_and_clear_pending(&dwork->work,
get_work_pool_id(&dwork->work));
diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch
index 8a70b6cb026ce8..aa6acc43e1064a 100644
--- a/patches/workqueue-use-rcu.patch
+++ b/patches/workqueue-use-rcu.patch
@@ -9,8 +9,8 @@ protected by preempt or irq disabled regions.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/workqueue.c | 96 +++++++++++++++++++++++++++++------------------------
- 1 file changed, 53 insertions(+), 43 deletions(-)
+ kernel/workqueue.c | 95 +++++++++++++++++++++++++++++------------------------
+ 1 file changed, 52 insertions(+), 43 deletions(-)
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* MD: wq_mayday_lock protected.
*/
-@@ -183,7 +183,7 @@ struct worker_pool {
+@@ -185,7 +185,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* from get_work_pool().
*/
struct rcu_head rcu;
-@@ -212,7 +212,7 @@ struct pool_workqueue {
+@@ -214,7 +214,7 @@ struct pool_workqueue {
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
-@@ -338,20 +338,20 @@ static void workqueue_sysfs_unregister(s
+@@ -355,20 +355,20 @@ static void workqueue_sysfs_unregister(s
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
-@@ -363,7 +363,7 @@ static void workqueue_sysfs_unregister(s
+@@ -380,7 +380,7 @@ static void workqueue_sysfs_unregister(s
* @pool: iteration cursor
* @pi: integer used for iteration
*
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
-@@ -395,7 +395,7 @@ static void workqueue_sysfs_unregister(s
+@@ -412,7 +412,7 @@ static void workqueue_sysfs_unregister(s
* @pwq: iteration cursor
* @wq: the target workqueue
*
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -557,7 +557,7 @@ static int worker_pool_assign_id(struct
+@@ -574,7 +574,7 @@ static int worker_pool_assign_id(struct
* @wq: the target workqueue
* @node: the node ID
*
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
-@@ -701,8 +701,8 @@ static struct pool_workqueue *get_work_p
+@@ -718,8 +718,8 @@ static struct pool_workqueue *get_work_p
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -1108,7 +1108,7 @@ static void put_pwq_unlocked(struct pool
+@@ -1124,7 +1124,7 @@ static void put_pwq_unlocked(struct pool
{
if (pwq) {
/*
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
-@@ -1234,6 +1234,7 @@ static int try_to_grab_pending(struct wo
+@@ -1252,6 +1252,7 @@ static int try_to_grab_pending(struct wo
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1272,10 +1273,12 @@ static int try_to_grab_pending(struct wo
+@@ -1290,10 +1291,12 @@ static int try_to_grab_pending(struct wo
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@@ -145,16 +145,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
-@@ -1356,6 +1359,8 @@ static void __queue_work(int cpu, struct
+@@ -1407,6 +1410,7 @@ static void __queue_work(int cpu, struct
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
-+
+ rcu_read_lock();
retry:
if (req_cpu == WORK_CPU_UNBOUND)
- cpu = raw_smp_processor_id();
-@@ -1412,10 +1417,8 @@ static void __queue_work(int cpu, struct
+ cpu = wq_select_unbound_cpu(raw_smp_processor_id());
+@@ -1463,10 +1467,8 @@ static void __queue_work(int cpu, struct
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -167,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1431,7 +1434,9 @@ static void __queue_work(int cpu, struct
+@@ -1484,7 +1486,9 @@ static void __queue_work(int cpu, struct
insert_work(pwq, work, worklist, work_flags);
@@ -177,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -2716,14 +2721,14 @@ static bool start_flush_work(struct work
+@@ -2811,14 +2815,14 @@ static bool start_flush_work(struct work
might_sleep();
@@ -195,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2750,10 +2755,11 @@ static bool start_flush_work(struct work
+@@ -2847,10 +2851,11 @@ static bool start_flush_work(struct work
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
@@ -208,7 +207,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return false;
}
-@@ -3161,7 +3167,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3259,7 +3264,7 @@ static void rcu_free_pool(struct rcu_hea
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -217,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3215,8 +3221,8 @@ static void put_unbound_pool(struct work
+@@ -3313,8 +3318,8 @@ static void put_unbound_pool(struct work
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -228,7 +227,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -3323,14 +3329,14 @@ static void pwq_unbound_release_workfn(s
+@@ -3421,14 +3426,14 @@ static void pwq_unbound_release_workfn(s
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -245,7 +244,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -3983,7 +3989,7 @@ void destroy_workqueue(struct workqueue_
+@@ -4078,7 +4083,7 @@ void destroy_workqueue(struct workqueue_
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@@ -254,7 +253,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4076,7 +4082,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4171,7 +4176,8 @@ bool workqueue_congested(int cpu, struct
struct pool_workqueue *pwq;
bool ret;
@@ -264,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4087,7 +4094,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4182,7 +4188,8 @@ bool workqueue_congested(int cpu, struct
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -274,7 +273,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4113,15 +4121,15 @@ unsigned int work_busy(struct work_struc
+@@ -4208,15 +4215,15 @@ unsigned int work_busy(struct work_struc
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -294,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4310,7 +4318,7 @@ void show_workqueue_state(void)
+@@ -4405,7 +4412,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -303,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4361,7 +4369,7 @@ void show_workqueue_state(void)
+@@ -4458,7 +4465,7 @@ void show_workqueue_state(void)
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -312,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -4711,16 +4719,16 @@ bool freeze_workqueues_busy(void)
+@@ -4808,16 +4815,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -332,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -4910,7 +4918,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5007,7 +5014,8 @@ static ssize_t wq_pool_ids_show(struct d
const char *delim = "";
int node, written = 0;
@@ -342,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -4918,7 +4927,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5015,7 +5023,8 @@ static ssize_t wq_pool_ids_show(struct d
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/patches/x86-UV-raw_spinlock-conversion.patch b/patches/x86-UV-raw_spinlock-conversion.patch
index 0dcf84e3bdf534..11869e7c3aaf38 100644
--- a/patches/x86-UV-raw_spinlock-conversion.patch
+++ b/patches/x86-UV-raw_spinlock-conversion.patch
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct uv_blade_info *uv_blade_info;
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
-@@ -947,7 +947,7 @@ void __init uv_system_init(void)
+@@ -950,7 +950,7 @@ void __init uv_system_init(void)
uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
uv_blade_info[blade].nr_online_cpus = 0;
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 5fa84b0893b128..71a343418f83ea 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5788,6 +5788,13 @@ int kvm_arch_init(void *opaque)
+@@ -5850,6 +5850,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-mce-timer-hrtimer.patch b/patches/x86-mce-timer-hrtimer.patch
index 812ed9c329dc15..2215344ce34f3e 100644
--- a/patches/x86-mce-timer-hrtimer.patch
+++ b/patches/x86-mce-timer-hrtimer.patch
@@ -34,7 +34,7 @@ fold in:
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1236,7 +1237,7 @@ void mce_log_therm_throt_event(__u64 sta
+@@ -1240,7 +1241,7 @@ void mce_log_therm_throt_event(__u64 sta
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -43,7 +43,7 @@ fold in:
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1245,32 +1246,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1249,32 +1250,18 @@ static unsigned long mce_adjust_timer_de
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@@ -82,7 +82,7 @@ fold in:
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1293,7 +1280,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1297,7 +1284,7 @@ static void mce_timer_fn(unsigned long d
done:
__this_cpu_write(mce_next_interval, iv);
@@ -91,7 +91,7 @@ fold in:
}
/*
-@@ -1301,7 +1288,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1305,7 +1292,7 @@ static void mce_timer_fn(unsigned long d
*/
void mce_timer_kick(unsigned long interval)
{
@@ -100,7 +100,7 @@ fold in:
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
-@@ -1316,7 +1303,7 @@ static void mce_timer_delete_all(void)
+@@ -1320,7 +1307,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -109,7 +109,7 @@ fold in:
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1639,7 +1626,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1654,7 +1641,7 @@ static void __mcheck_cpu_clear_vendor(st
}
}
@@ -118,7 +118,7 @@ fold in:
{
unsigned long iv = check_interval * HZ;
-@@ -1648,16 +1635,17 @@ static void mce_start_timer(unsigned int
+@@ -1663,16 +1650,17 @@ static void mce_start_timer(unsigned int
per_cpu(mce_next_interval, cpu) = iv;
@@ -140,7 +140,7 @@ fold in:
mce_start_timer(cpu, t);
}
-@@ -2376,6 +2364,8 @@ static void mce_disable_cpu(void *h)
+@@ -2393,6 +2381,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
@@ -149,7 +149,7 @@ fold in:
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
-@@ -2398,6 +2388,7 @@ static void mce_reenable_cpu(void *h)
+@@ -2415,6 +2405,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
}
@@ -157,7 +157,7 @@ fold in:
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2405,7 +2396,6 @@ static int
+@@ -2422,7 +2413,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -165,7 +165,7 @@ fold in:
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
-@@ -2425,11 +2415,9 @@ mce_cpu_callback(struct notifier_block *
+@@ -2442,11 +2432,9 @@ mce_cpu_callback(struct notifier_block *
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
diff --git a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
index 7793973e85851e..a8fcca8fc4bc7a 100644
--- a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -68,7 +68,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1313,6 +1314,56 @@ static void mce_do_trigger(struct work_s
+@@ -1317,6 +1318,56 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -125,7 +125,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1320,19 +1371,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1324,19 +1375,8 @@ static DECLARE_WORK(mce_trigger_work, mc
*/
int mce_notify_irq(void)
{
@@ -146,7 +146,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
return 1;
}
return 0;
-@@ -2456,6 +2496,10 @@ static __init int mcheck_init_device(voi
+@@ -2473,6 +2513,10 @@ static __init int mcheck_init_device(voi
goto err_out;
}
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 6f860841f25635..e84821e11c9530 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ANON_INODES
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -220,7 +220,7 @@ long syscall_trace_enter(struct pt_regs
+@@ -202,7 +202,7 @@ long syscall_trace_enter(struct pt_regs
#define EXIT_TO_USERMODE_LOOP_FLAGS \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
{
-@@ -236,7 +236,7 @@ static void exit_to_usermode_loop(struct
+@@ -218,7 +218,7 @@ static void exit_to_usermode_loop(struct
/* We have work to do. */
local_irq_enable();
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
call preempt_schedule_irq
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -579,7 +579,23 @@ GLOBAL(retint_user)
+@@ -511,7 +511,23 @@ GLOBAL(retint_user)
bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
-@@ -152,6 +156,8 @@ struct thread_info {
+@@ -155,6 +159,8 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BLANK();
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -89,4 +90,5 @@ void common(void) {
+@@ -85,4 +86,5 @@ void common(void) {
BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
diff --git a/patches/x86-use-gen-rwsem-spinlocks-rt.patch b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
index 7643217743fadd..291ac5200fc213 100644
--- a/patches/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -212,8 +212,11 @@ config ARCH_MAY_HAVE_PC_FDC
+@@ -230,8 +230,11 @@ config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API