summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-10-06 10:19:21 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-10-06 10:19:21 +0200
commit592d49034169263d89f13bffb9e8837967af40aa (patch)
tree2a908158fdc483358421fabaefd7a52290fa2e66
parent8596973fc83d29b923628e287cf65685b94bfbec (diff)
download4.9-rt-patches-592d49034169263d89f13bffb9e8837967af40aa.tar.gz
[ANNOUNCE] 4.8-rt1
Dear RT folks! I'm pleased to announce the v4.8-rt1 patch set. Changes since v4.6.7-rt14: - rebased to v4.8 Known issues - CPU hotplug got a little better but can deadlock. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.8-rt1 The RT patch against 4.8 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patch-4.8-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/ARM-imx-always-use-TWD-on-IMX6Q.patch30
-rw-r--r--patches/HACK-printk-drop-the-logbuf_lock-more-often.patch10
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch6
-rw-r--r--patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch2
-rw-r--r--patches/Using-BUG_ON-as-an-assert-is-_never_-acceptable.patch48
-rw-r--r--patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch10
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch30
-rw-r--r--patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch76
-rw-r--r--patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch50
-rw-r--r--patches/arm-convert-boot-lock-to-raw.patch20
-rw-r--r--patches/arm-lazy-preempt-correct-resched-condition.patch31
-rw-r--r--patches/arm-preempt-lazy-support.patch68
-rw-r--r--patches/arm64-xen--Make-XEN-depend-on-non-rt.patch2
-rw-r--r--patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch2
-rw-r--r--patches/block-blk-mq-use-swait.patch8
-rw-r--r--patches/block-mq-don-t-complete-requests-via-IPI.patch14
-rw-r--r--patches/block-mq-drop-preempt-disable.patch6
-rw-r--r--patches/block-shorten-interrupt-disabled-regions.patch10
-rw-r--r--patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch72
-rw-r--r--patches/cgroups-use-simple-wait-in-css_release.patch6
-rw-r--r--patches/completion-use-simple-wait-queues.patch22
-rw-r--r--patches/cond-resched-softirq-rt.patch6
-rw-r--r--patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch20
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch46
-rw-r--r--patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch12
-rw-r--r--patches/cpu_down_move_migrate_enable_back.patch4
-rw-r--r--patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch2
-rw-r--r--patches/cpumask-disable-offstack-on-rt.patch4
-rw-r--r--patches/crypto-ccp-remove-rwlocks_types.h.patch22
-rw-r--r--patches/debugobjects-rt.patch2
-rw-r--r--patches/dm-make-rt-aware.patch8
-rw-r--r--patches/driver-net-ethernet-tile-Initialize-timer-as-pinned.patch40
-rw-r--r--patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch26
-rw-r--r--patches/drivers-net-8139-disable-irq-nosync.patch2
-rw-r--r--patches/drivers-net-fix-livelock-issues.patch126
-rw-r--r--patches/drivers-random-reduce-preempt-disabled-region.patch4
-rw-r--r--patches/drivers-tty-metag_da-Initialize-timer-as-pinned.patch37
-rw-r--r--patches/drivers-tty-mips_ejtag-Initialize-timer-as-pinned.patch40
-rw-r--r--patches/drivers-tty-pl011-irq-disable-madness.patch4
-rw-r--r--patches/drm-i915-Use-consistent-forcewake-auto-release-timeo.patch151
-rw-r--r--patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch4
-rw-r--r--patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch14
-rw-r--r--patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch8
-rw-r--r--patches/dump-stack-don-t-disable-preemption-during-trace.patch14
-rw-r--r--patches/fs-aio-simple-simple-work.patch10
-rw-r--r--patches/fs-dcache-include-wait.h.patch23
-rw-r--r--patches/fs-dcache-init-in_lookup_hashtable.patch27
-rw-r--r--patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch64
-rw-r--r--patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch37
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch214
-rw-r--r--patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch138
-rw-r--r--patches/fs-replace-bh_uptodate_lock-for-rt.patch12
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch6
-rw-r--r--patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch (renamed from patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch)2
-rw-r--r--patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch158
-rw-r--r--patches/genirq-force-threading.patch2
-rw-r--r--patches/genirq-update-irq_set_irqchip_state-documentation.patch2
-rw-r--r--patches/gpu_don_t_check_for_the_lock_owner.patch32
-rw-r--r--patches/hlist-Add-hlist_is_singular_node-helper.patch38
-rw-r--r--patches/hotplug-light-get-online-cpus.patch12
-rw-r--r--patches/hotplug-use-migrate-disable.patch4
-rw-r--r--patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch62
-rw-r--r--patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch53
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch10
-rw-r--r--patches/hwlatdetect.patch2
-rw-r--r--patches/i2c-omap-drop-the-lock-hard-irq-context.patch33
-rw-r--r--patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch4
-rw-r--r--patches/i915_compile_fix.patch23
-rw-r--r--patches/infiniband-mellanox-ib-use-nort-irq.patch4
-rw-r--r--patches/infiniband-ulp-ipoib-remove-pkey_mutex.patch25
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch22
-rw-r--r--patches/iommu-amd--Use-WARN_ON_NORT.patch4
-rw-r--r--patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch81
-rw-r--r--patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch58
-rw-r--r--patches/ipc-sem-rework-semaphore-wakeups.patch8
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch4
-rw-r--r--patches/irqwork-Move-irq-safe-work-to-irq-context.patch4
-rw-r--r--patches/irqwork-push_most_work_into_softirq_context.patch4
-rw-r--r--patches/jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch64
-rw-r--r--patches/jump-label-rt.patch4
-rw-r--r--patches/kconfig-disable-a-few-options-rt.patch4
-rw-r--r--patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch6
-rw-r--r--patches/kernel-futex-don-t-deboost-too-early.patch14
-rw-r--r--patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch8
-rw-r--r--patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch4
-rw-r--r--patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch2
-rw-r--r--patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch4
-rw-r--r--patches/kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch26
-rw-r--r--patches/kgb-serial-hackaround.patch2
-rw-r--r--patches/latency-hist.patch10
-rw-r--r--patches/leds-trigger-disable-CPU-trigger-on-RT.patch2
-rw-r--r--patches/lglocks-rt.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch8
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/lockinglglocks_Use_preempt_enabledisable_nort.patch (renamed from patches/lockinglglocks_Use_preempt_enabledisable_nort()_in_lg_double_locklg_double_unlock.patch)0
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch8
-rw-r--r--patches/mips-disable-highmem-on-rt.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch41
-rw-r--r--patches/mm-disable-sloub-rt.patch6
-rw-r--r--patches/mm-enable-slub.patch86
-rw-r--r--patches/mm-filemap-don-t-plant-shadow-entries-without-radix-.patch185
-rw-r--r--patches/mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch40
-rw-r--r--patches/mm-make-vmstat-rt-aware.patch64
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch10
-rw-r--r--patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch2
-rw-r--r--patches/mm-page-alloc-use-local-lock-on-target-cpu.patch2
-rw-r--r--patches/mm-page_alloc-reduce-lock-sections-further.patch62
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch53
-rw-r--r--patches/mm-perform-lru_add_drain_all-remotely.patch43
-rw-r--r--patches/mm-protect-activate-switch-mm.patch2
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch2
-rw-r--r--patches/mm-vmalloc-use-get-cpu-light.patch10
-rw-r--r--patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch74
-rw-r--r--patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch145
-rw-r--r--patches/mmci-remove-bogus-irq-save.patch4
-rw-r--r--patches/move_sched_delayed_work_to_helper.patch54
-rw-r--r--patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch273
-rw-r--r--patches/net-add-back-the-missing-serialization-in-ip_send_un.patch26
-rw-r--r--patches/net-another-local-irq-disable-alloc-atomic-headache.patch8
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch14
-rw-r--r--patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch4
-rw-r--r--patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch8
-rw-r--r--patches/net-ipv4-inet-Initialize-timers-as-pinned.patch65
-rw-r--r--patches/net-make-devnet_rename_seq-a-mutex.patch12
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch117
-rw-r--r--patches/net-prevent-abba-deadlock.patch2
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch4
-rw-r--r--patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch2
-rw-r--r--patches/net-tx-action-avoid-livelock-on-rt.patch92
-rw-r--r--patches/net-use-cpu-chill.patch8
-rw-r--r--patches/net-wireless-warn-nort.patch4
-rw-r--r--patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch2
-rw-r--r--patches/oleg-signal-rt-fix.patch4
-rw-r--r--patches/panic-disable-random-on-rt.patch2
-rw-r--r--patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch12
-rw-r--r--patches/perf-make-swevent-hrtimer-irqsafe.patch2
-rw-r--r--patches/peter_zijlstra-frob-rcu.patch2
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch4
-rw-r--r--patches/power-disable-highmem-on-rt.patch2
-rw-r--r--patches/powerpc-preempt-lazy-support.patch32
-rw-r--r--patches/preempt-lazy-check-preempt_schedule.patch73
-rw-r--r--patches/preempt-lazy-support.patch168
-rw-r--r--patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch2
-rw-r--r--patches/printk-kill.patch12
-rw-r--r--patches/printk-rt-aware.patch10
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch12
-rw-r--r--patches/radix-tree-rt-aware.patch34
-rw-r--r--patches/random-make-it-work-on-rt.patch47
-rw-r--r--patches/rbtree-include-rcu.h-because-we-use-it.patch24
-rw-r--r--patches/rcu-Eliminate-softirq-processing-from-rcutree.patch24
-rw-r--r--patches/rcu-disable-more-spots-of-rcu_bh.patch63
-rw-r--r--patches/rcu-disable-rcu-fast-no-hz-on-rt.patch2
-rw-r--r--patches/rcu-make-RCU_BOOST-default-on-RT.patch4
-rw-r--r--patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch142
-rw-r--r--patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch36
-rw-r--r--patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch2
-rw-r--r--patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch2
-rw-r--r--patches/relay-fix-timer-madness.patch2
-rw-r--r--patches/rt-add-rt-locks.patch186
-rw-r--r--patches/rt-introduce-cpu-chill.patch2
-rw-r--r--patches/rtmutex-Use-chainwalking-control-enum.patch27
-rw-r--r--patches/rtmutex-add-a-first-shot-of-ww_mutex.patch28
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch10
-rw-r--r--patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch271
-rw-r--r--patches/rtmutex-trylock-is-okay-on-RT.patch4
-rw-r--r--patches/rtmutex_dont_include_rcu.patch170
-rw-r--r--patches/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch2
-rw-r--r--patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch2
-rw-r--r--patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch2
-rw-r--r--patches/sched-delay-put-task.patch14
-rw-r--r--patches/sched-disable-rt-group-sched-on-rt.patch2
-rw-r--r--patches/sched-lazy_preempt-avoid-a-warning-in-the-RT-case.patch20
-rw-r--r--patches/sched-limit-nr-migrate.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch6
-rw-r--r--patches/sched-mmdrop-delayed.patch60
-rw-r--r--patches/sched-preempt-Fix-preempt_count-manipulations.patch51
-rw-r--r--patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch261
-rw-r--r--patches/sched-rt-mutex-wakeup.patch10
-rw-r--r--patches/sched-ttwu-ensure-success-return-is-correct.patch2
-rw-r--r--patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch57
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/scsi-fcoe-Fix-get_cpu-put_cpu_light-imbalance-in-fco.patch27
-rw-r--r--patches/scsi-fcoe-rt-aware.patch41
-rw-r--r--patches/seqlock-prevent-rt-starvation.patch10
-rw-r--r--patches/series88
-rw-r--r--patches/signal-Use-hrtimer-for-sigtimedwait.patch77
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch4
-rw-r--r--patches/skbufhead-raw-lock.patch16
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/slub-enable-irqs-for-no-wait.patch6
-rw-r--r--patches/softirq-disable-softirq-stacks-for-rt.patch16
-rw-r--r--patches/softirq-preempt-fix-3-re.patch14
-rw-r--r--patches/softirq-split-locks.patch16
-rw-r--r--patches/sparc64-use-generic-rwsem-spinlocks-rt.patch2
-rw-r--r--patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch14
-rw-r--r--patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch8
-rw-r--r--patches/stop-machine-raw-lock.patch14
-rw-r--r--patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch2
-rw-r--r--patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch6
-rw-r--r--patches/suspend-prevernt-might-sleep-splats.patch18
-rw-r--r--patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch6
-rw-r--r--patches/tick-broadcast--Make-hrtimer-irqsafe.patch2
-rw-r--r--patches/tick-sched-Remove-pointless-empty-function.patch70
-rw-r--r--patches/timekeeping-split-jiffies-lock.patch4
-rw-r--r--patches/timer-Forward-wheel-clock-whenever-possible.patch240
-rw-r--r--patches/timer-Give-a-few-structs-and-members-proper-names.patch421
-rw-r--r--patches/timer-Make-pinned-a-timer-property.patch144
-rw-r--r--patches/timer-Move-__run_timers-function.patch91
-rw-r--r--patches/timer-Only-wake-softirq-if-necessary.patch34
-rw-r--r--patches/timer-Optimization-for-same-expiry-time-in-mod_timer.patch130
-rw-r--r--patches/timer-Optimize-collect-timers-for-NOHZ.patch128
-rw-r--r--patches/timer-Reduce-the-CPU-index-space-to-256k.patch34
-rw-r--r--patches/timer-Remove-mod_timer_pinned.patch116
-rw-r--r--patches/timer-Remove-slack-leftovers.patch161
-rw-r--r--patches/timer-Split-out-index-calculation.patch105
-rw-r--r--patches/timer-Switch-to-a-non-cascading-wheel.patch1169
-rw-r--r--patches/timer-add-setup_deferrable_timer-macro.patch26
-rw-r--r--patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch2
-rw-r--r--patches/timer-fd-avoid-live-lock.patch2
-rw-r--r--patches/timer-make-the-base-lock-raw.patch30
-rw-r--r--patches/timers-prepare-for-full-preemption.patch16
-rw-r--r--patches/tracing-Show-the-preempt-count-of-when-the-event-was.patch41
-rw-r--r--patches/tracing-account-for-preempt-off-in-preempt_schedule.patch4
-rw-r--r--patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch2
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
-rw-r--r--patches/usb-use-_nort-in-giveback.patch2
-rw-r--r--patches/work-queue-work-around-irqsafe-timer-optimization.patch2
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch28
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch16
-rw-r--r--patches/workqueue-use-locallock.patch20
-rw-r--r--patches/workqueue-use-rcu.patch44
-rw-r--r--patches/x86-UV-raw_spinlock-conversion.patch48
-rw-r--r--patches/x86-apic-uv-Initialize-timer-as-pinned.patch40
-rw-r--r--patches/x86-crypto-reduce-preempt-disabled-regions.patch10
-rw-r--r--patches/x86-io-apic-migra-no-unmask.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-mce-Initialize-timer-as-pinned.patch40
-rw-r--r--patches/x86-mce-timer-hrtimer.patch24
-rw-r--r--patches/x86-mce-use-swait-queue-for-mce-wakeups.patch6
-rw-r--r--patches/x86-mm-disable-preemption-during-CR3-read-write.patch67
-rw-r--r--patches/x86-preempt-lazy-fixup-should_resched.patch49
-rw-r--r--patches/x86-preempt-lazy.patch98
-rw-r--r--patches/x86-use-gen-rwsem-spinlocks-rt.patch2
246 files changed, 3259 insertions, 6523 deletions
diff --git a/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch b/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch
deleted file mode 100644
index 32a5205eb2951b..00000000000000
--- a/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 6 Apr 2016 17:30:28 +0200
-Subject: [PATCH] ARM: imx: always use TWD on IMX6Q
-
-There is no reason to limit the TWD to be used on SMP kernels only if the
-hardware has it available.
-On Wandboard i.MX6SOLO, running PREEMPT-RT and cyclictest I see as max
-immediately after start in idle:
-UP : ~90us
-SMP: ~50us
-UP + TWD: ~20us.
-Based on this numbers I prefer the TWD over the slightly slower MXC
-timer.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/mach-imx/Kconfig | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/arm/mach-imx/Kconfig
-+++ b/arch/arm/mach-imx/Kconfig
-@@ -526,7 +526,7 @@ config SOC_IMX6Q
- bool "i.MX6 Quad/DualLite support"
- select ARM_ERRATA_764369 if SMP
- select HAVE_ARM_SCU if SMP
-- select HAVE_ARM_TWD if SMP
-+ select HAVE_ARM_TWD
- select PCI_DOMAINS if PCI
- select PINCTRL_IMX6Q
- select SOC_IMX6
diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
index 8780a1b733a84c..0b9be8c6d7c39c 100644
--- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1268,6 +1268,7 @@ static int syslog_print_all(char __user
+@@ -1399,6 +1399,7 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
-@@ -1279,6 +1280,14 @@ static int syslog_print_all(char __user
+@@ -1410,6 +1411,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
enum log_flags prev;
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Find first record that fits, including all following records,
-@@ -1294,6 +1303,14 @@ static int syslog_print_all(char __user
+@@ -1425,6 +1434,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* move first record forward until length fits into the buffer */
-@@ -1307,6 +1324,14 @@ static int syslog_print_all(char __user
+@@ -1438,6 +1455,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* last message fitting into this dump */
-@@ -1347,6 +1372,7 @@ static int syslog_print_all(char __user
+@@ -1478,6 +1503,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index f978063030b058..202bebac6b9aec 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
-@@ -582,7 +582,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -584,7 +584,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
kvm_timer_flush_hwstate(vcpu);
kvm_vgic_flush_hwstate(vcpu);
-@@ -603,7 +603,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -605,7 +605,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -659,7 +659,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -661,7 +661,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_vgic_sync_hwstate(vcpu);
diff --git a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index f2452555eb90de..26032cc3f29c34 100644
--- a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -1870,6 +1870,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -1938,6 +1938,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_PINNED);
apic->lapic_timer.timer.function = apic_timer_fn;
diff --git a/patches/Using-BUG_ON-as-an-assert-is-_never_-acceptable.patch b/patches/Using-BUG_ON-as-an-assert-is-_never_-acceptable.patch
new file mode 100644
index 00000000000000..0f0bd690a77be5
--- /dev/null
+++ b/patches/Using-BUG_ON-as-an-assert-is-_never_-acceptable.patch
@@ -0,0 +1,48 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 3 Oct 2016 21:03:48 -0700
+Subject: [PATCH] Using BUG_ON() as an assert() is _never_ acceptable
+
+Upstream commit 21f54ddae449f4bdd9f1498124901d67202243d9
+
+That just generally kills the machine, and makes debugging only much
+harder, since the traces may long be gone.
+
+Debugging by assert() is a disease. Don't do it. If you can continue,
+you're much better off doing so with a live machine where you have a
+much higher chance that the report actually makes it to the system logs,
+rather than result in a machine that is just completely dead.
+
+The only valid situation for BUG_ON() is when continuing is not an
+option, because there is massive corruption. But if you are just
+verifying that something is true, you warn about your broken assumptions
+(preferably just once), and limp on.
+
+Fixes: 22f2ac51b6d6 ("mm: workingset: fix crash in shadow node shrinker caused by replace_page_cache_page()")
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Miklos Szeredi <miklos@szeredi.hu>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ include/linux/swap.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -257,7 +257,7 @@ static inline void workingset_node_pages
+
+ static inline void workingset_node_pages_dec(struct radix_tree_node *node)
+ {
+- VM_BUG_ON(!workingset_node_pages(node));
++ VM_WARN_ON_ONCE(!workingset_node_pages(node));
+ node->count--;
+ }
+
+@@ -273,7 +273,7 @@ static inline void workingset_node_shado
+
+ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
+ {
+- VM_BUG_ON(!workingset_node_shadows(node));
++ VM_WARN_ON_ONCE(!workingset_node_shadows(node));
+ node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
+ }
+
diff --git a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index aab925b93c201f..f374772183e46a 100644
--- a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Mutex for _OSI support */
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
-@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
+@@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto exit;
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
-@@ -374,7 +374,7 @@ acpi_status acpi_write_bit_register(u32
+@@ -373,7 +373,7 @@ acpi_status acpi_write_bit_register(u32
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* At this point, we know that the parent register is one of the
-@@ -435,7 +435,7 @@ acpi_status acpi_write_bit_register(u32
+@@ -434,7 +434,7 @@ acpi_status acpi_write_bit_register(u32
unlock_and_exit:
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Delete the reader/writer lock */
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
-@@ -127,6 +127,7 @@
+@@ -131,6 +131,7 @@
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define acpi_cpu_flags unsigned long
/* Use native linux version of acpi_os_allocate_zeroed */
-@@ -145,6 +146,20 @@
+@@ -149,6 +150,20 @@
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index e6a68d6994e846..34de1d18e5778d 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -12,21 +12,21 @@ indicate that support for full RT preemption is now available.
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
---
arch/arm64/Kconfig | 1 +
- arch/arm64/include/asm/thread_info.h | 3 +++
+ arch/arm64/include/asm/thread_info.h | 6 +++++-
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kernel/entry.S | 13 ++++++++++---
- 4 files changed, 15 insertions(+), 3 deletions(-)
+ 4 files changed, 17 insertions(+), 4 deletions(-)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -81,6 +81,7 @@ config ARM64
+@@ -90,6 +90,7 @@ config ARM64
+ select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select HAVE_RCU_TABLE_FREE
+ select HAVE_PREEMPT_LAZY
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
- select IOMMU_DMA if IOMMU_SUPPORT
- select IRQ_DOMAIN
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -49,6 +49,7 @@ struct thread_info {
@@ -53,9 +53,19 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#define _TIF_NOHZ (1 << TIF_NOHZ)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+@@ -132,7 +135,8 @@ static inline struct thread_info *curren
+ #define _TIF_32BIT (1 << TIF_32BIT)
+
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
++ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
++ _TIF_NEED_RESCHED_LAZY)
+
+ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
-@@ -36,6 +36,7 @@ int main(void)
+@@ -37,6 +37,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -65,7 +75,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -426,11 +426,16 @@ ENDPROC(el1_sync)
+@@ -434,11 +434,16 @@ ENDPROC(el1_sync)
#ifdef CONFIG_PREEMPT
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
@@ -85,7 +95,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
-@@ -444,6 +449,7 @@ ENDPROC(el1_irq)
+@@ -452,6 +457,7 @@ ENDPROC(el1_irq)
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
@@ -93,7 +103,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
ret x24
#endif
-@@ -690,6 +696,7 @@ ENDPROC(cpu_switch_to)
+@@ -708,6 +714,7 @@ ENDPROC(cpu_switch_to)
*/
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
diff --git a/patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch b/patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch
deleted file mode 100644
index cc3e344a5ed206..00000000000000
--- a/patches/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 22 Jan 2016 21:33:39 +0100
-Subject: arm+arm64: lazy-preempt: add TIF_NEED_RESCHED_LAZY to _TIF_WORK_MASK
-
-_TIF_WORK_MASK is used to check for TIF_NEED_RESCHED so we need to check
-for TIF_NEED_RESCHED_LAZY here, too.
-
-Reported-by: Grygorii Strashko <grygorii.strashko@ti.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/include/asm/thread_info.h | 7 ++++---
- arch/arm/kernel/entry-common.S | 9 +++++++--
- arch/arm64/include/asm/thread_info.h | 3 ++-
- 3 files changed, 13 insertions(+), 6 deletions(-)
-
---- a/arch/arm/include/asm/thread_info.h
-+++ b/arch/arm/include/asm/thread_info.h
-@@ -143,8 +143,8 @@ extern int vfp_restore_user_hwstate(stru
- #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
- #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
- #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
--#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
--#define TIF_NEED_RESCHED_LAZY 8
-+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
-+#define TIF_NEED_RESCHED_LAZY 7
-
- #define TIF_NOHZ 12 /* in adaptive nohz mode */
- #define TIF_USING_IWMMXT 17
-@@ -170,7 +170,8 @@ extern int vfp_restore_user_hwstate(stru
- * Change these and you break ASM code in entry-common.S
- */
- #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
-+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-+ _TIF_NEED_RESCHED_LAZY)
-
- #endif /* __KERNEL__ */
- #endif /* __ASM_ARM_THREAD_INFO_H */
---- a/arch/arm/kernel/entry-common.S
-+++ b/arch/arm/kernel/entry-common.S
-@@ -36,7 +36,9 @@
- UNWIND(.cantunwind )
- disable_irq_notrace @ disable interrupts
- ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
-- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
-+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
-+ bne fast_work_pending
-+ tst r1, #_TIF_SECCOMP
- bne fast_work_pending
-
- /* perform architecture specific actions before user return */
-@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
- str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
- disable_irq_notrace @ disable interrupts
- ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
-- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
-+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
-+ bne do_slower_path
-+ tst r1, #_TIF_SECCOMP
- beq no_work_pending
-+do_slower_path:
- UNWIND(.fnend )
- ENDPROC(ret_fast_syscall)
-
---- a/arch/arm64/include/asm/thread_info.h
-+++ b/arch/arm64/include/asm/thread_info.h
-@@ -135,7 +135,8 @@ static inline struct thread_info *curren
- #define _TIF_32BIT (1 << TIF_32BIT)
-
- #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
-+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
-+ _TIF_NEED_RESCHED_LAZY)
-
- #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
index f6201495041f06..86067558f73e87 100644
--- a/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+++ b/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
@@ -13,9 +13,9 @@ commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6.
Patch based on what Sami PietikƤinen <Sami.Pietikainen@wapice.com> suggested].
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/clocksource/timer-atmel-pit.c | 17 +++++++++--------
- drivers/clocksource/timer-atmel-st.c | 32 ++++++++++++++++++++++----------
- 2 files changed, 31 insertions(+), 18 deletions(-)
+ drivers/clocksource/timer-atmel-pit.c | 18 +++++++++---------
+ drivers/clocksource/timer-atmel-st.c | 34 ++++++++++++++++++++++------------
+ 2 files changed, 31 insertions(+), 21 deletions(-)
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -44,24 +44,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* update clocksource counter */
data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
-@@ -181,7 +190,6 @@ static void __init at91sam926x_pit_commo
- {
- unsigned long pit_rate;
- unsigned bits;
-- int ret;
-
- /*
- * Use our actual MCK to figure out how many MCK/16 ticks per
-@@ -206,13 +214,6 @@ static void __init at91sam926x_pit_commo
- data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
- clocksource_register_hz(&data->clksrc, pit_rate);
+@@ -211,15 +220,6 @@ static int __init at91sam926x_pit_common
+ return ret;
+ }
- /* Set up irq handler */
- ret = request_irq(data->irq, at91sam926x_pit_interrupt,
- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- "at91_tick", data);
-- if (ret)
-- panic(pr_fmt("Unable to setup IRQ\n"));
+- if (ret) {
+- pr_err("Unable to setup IRQ\n");
+- return ret;
+- }
-
/* Set up and register clockevents */
data->clkevt.name = "pit";
@@ -115,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* PIT for periodic irqs; fixed rate of 1/HZ */
irqmask = AT91_ST_PITS;
regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
-@@ -198,7 +217,7 @@ static void __init atmel_st_timer_init(s
+@@ -198,7 +217,7 @@ static int __init atmel_st_timer_init(st
{
struct clk *sclk;
unsigned int sclk_rate, val;
@@ -123,24 +117,28 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ int ret;
regmap_st = syscon_node_to_regmap(node);
- if (IS_ERR(regmap_st))
-@@ -210,17 +229,10 @@ static void __init atmel_st_timer_init(s
+ if (IS_ERR(regmap_st)) {
+@@ -212,21 +231,12 @@ static int __init atmel_st_timer_init(st
regmap_read(regmap_st, AT91_ST_SR, &val);
/* Get the interrupts property */
- irq = irq_of_parse_and_map(node, 0);
-- if (!irq)
+- if (!irq) {
+ atmel_st_irq = irq_of_parse_and_map(node, 0);
-+ if (!atmel_st_irq)
- panic(pr_fmt("Unable to get IRQ from DT\n"));
++ if (!atmel_st_irq) {
+ pr_err("Unable to get IRQ from DT\n");
+ return -EINVAL;
+ }
- /* Make IRQs happen for the system timer */
- ret = request_irq(irq, at91rm9200_timer_interrupt,
- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- "at91_tick", regmap_st);
-- if (ret)
-- panic(pr_fmt("Unable to setup IRQ\n"));
+- if (ret) {
+- pr_err("Unable to setup IRQ\n");
+- return ret;
+- }
-
sclk = of_clk_get(node, 0);
- if (IS_ERR(sclk))
- panic(pr_fmt("Unable to get slow clock\n"));
+ if (IS_ERR(sclk)) {
+ pr_err("Unable to get slow clock\n");
diff --git a/patches/arm-convert-boot-lock-to-raw.patch b/patches/arm-convert-boot-lock-to-raw.patch
index 5ca037db214b8c..5a45693ab81c36 100644
--- a/patches/arm-convert-boot-lock-to-raw.patch
+++ b/patches/arm-convert-boot-lock-to-raw.patch
@@ -167,16 +167,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
-@@ -43,7 +43,7 @@
- /* SCU base address */
- static void __iomem *scu_base;
+@@ -64,7 +64,7 @@ static const struct omap_smp_config omap
+ .startup_addr = omap5_secondary_startup,
+ };
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void)
{
-@@ -74,8 +74,8 @@ static void omap4_secondary_init(unsigne
+@@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigne
/*
* Synchronise with the boot thread.
*/
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -89,7 +89,7 @@ static int omap4_boot_secondary(unsigned
+@@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned
* Set synchronisation state between this boot processor
* and the secondary one
*/
@@ -196,7 +196,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -166,7 +166,7 @@ static int omap4_boot_secondary(unsigned
+@@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
-@@ -30,7 +30,7 @@ static void write_pen_release(int val)
+@@ -32,7 +32,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
@@ -376,7 +376,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void versatile_secondary_init(unsigned int cpu)
{
-@@ -43,8 +43,8 @@ void versatile_secondary_init(unsigned i
+@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned i
/*
* Synchronise with the boot thread.
*/
@@ -387,7 +387,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -55,7 +55,7 @@ int versatile_boot_secondary(unsigned in
+@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned in
* Set synchronisation state between this boot processor
* and the secondary one
*/
@@ -396,7 +396,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This is really belt and braces; we hold unintended secondary
-@@ -85,7 +85,7 @@ int versatile_boot_secondary(unsigned in
+@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned in
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
diff --git a/patches/arm-lazy-preempt-correct-resched-condition.patch b/patches/arm-lazy-preempt-correct-resched-condition.patch
deleted file mode 100644
index b7f4f1a19cca31..00000000000000
--- a/patches/arm-lazy-preempt-correct-resched-condition.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 24 May 2016 12:56:38 +0200
-Subject: [PATCH] arm: lazy preempt: correct resched condition
-
-If we get out of preempt_schedule_irq() then we check for NEED_RESCHED
-and call the former function again if set because the preemption counter
-has be zero at this point.
-However the counter for lazy-preempt might not be zero therefore we have
-to check the counter before looking at the need_resched_lazy flag.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/kernel/entry-armv.S | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
---- a/arch/arm/kernel/entry-armv.S
-+++ b/arch/arm/kernel/entry-armv.S
-@@ -244,7 +244,11 @@ ENDPROC(__irq_svc)
- bne 1b
- tst r0, #_TIF_NEED_RESCHED_LAZY
- reteq r8 @ go again
-- b 1b
-+ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
-+ teq r0, #0 @ if preempt lazy count != 0
-+ beq 1b
-+ ret r8 @ go again
-+
- #endif
-
- __und_fault:
diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch
index f26f3c4963ccd1..471b51beeff4ac 100644
--- a/patches/arm-preempt-lazy-support.patch
+++ b/patches/arm-preempt-lazy-support.patch
@@ -7,15 +7,16 @@ Implement the arm pieces for lazy preempt.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/Kconfig | 1 +
- arch/arm/include/asm/thread_info.h | 3 +++
+ arch/arm/include/asm/thread_info.h | 8 ++++++--
arch/arm/kernel/asm-offsets.c | 1 +
- arch/arm/kernel/entry-armv.S | 13 +++++++++++--
+ arch/arm/kernel/entry-armv.S | 19 ++++++++++++++++---
+ arch/arm/kernel/entry-common.S | 9 +++++++--
arch/arm/kernel/signal.c | 3 ++-
- 5 files changed, 18 insertions(+), 3 deletions(-)
+ 6 files changed, 33 insertions(+), 8 deletions(-)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -71,6 +71,7 @@ config ARM
+@@ -75,6 +75,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -33,11 +34,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
-@@ -143,6 +144,7 @@ extern int vfp_restore_user_hwstate(stru
+@@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(stru
+ #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
- #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
-+#define TIF_NEED_RESCHED_LAZY 8
+-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
++#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
++#define TIF_NEED_RESCHED_LAZY 7
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
@@ -49,6 +52,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+@@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(stru
+ * Change these and you break ASM code in entry-common.S
+ */
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
++ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
++ _TIF_NEED_RESCHED_LAZY)
+
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_ARM_THREAD_INFO_H */
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -65,6 +65,7 @@ int main(void)
@@ -61,9 +74,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
-@@ -215,11 +215,18 @@ ENDPROC(__dabt_svc)
+@@ -220,11 +220,18 @@ ENDPROC(__dabt_svc)
+
#ifdef CONFIG_PREEMPT
- get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
- ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
@@ -82,15 +95,48 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
svc_exit r5, irq = 1 @ return from exception
-@@ -234,6 +241,8 @@ ENDPROC(__irq_svc)
+@@ -239,8 +246,14 @@ ENDPROC(__irq_svc)
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
+ bne 1b
+ tst r0, #_TIF_NEED_RESCHED_LAZY
reteq r8 @ go again
- b 1b
+- b 1b
++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r0, #0 @ if preempt lazy count != 0
++ beq 1b
++ ret r8 @ go again
++
#endif
+
+ __und_fault:
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -36,7 +36,9 @@
+ UNWIND(.cantunwind )
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++ bne fast_work_pending
++ tst r1, #_TIF_SECCOMP
+ bne fast_work_pending
+
+ /* perform architecture specific actions before user return */
+@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++ bne do_slower_path
++ tst r1, #_TIF_SECCOMP
+ beq no_work_pending
++do_slower_path:
+ UNWIND(.fnend )
+ ENDPROC(ret_fast_syscall)
+
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, un
diff --git a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
index cbc0c40a1b953f..976186027a2102 100644
--- a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -624,7 +624,7 @@ config XEN_DOM0
+@@ -689,7 +689,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64"
diff --git a/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch b/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
index fa08979005bcdd..55bbfbd12ac320 100644
--- a/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
+++ b/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1641,7 +1641,7 @@ static int blk_mq_hctx_notify(void *data
+@@ -1687,7 +1687,7 @@ static int blk_mq_hctx_notify(void *data
{
struct blk_mq_hw_ctx *hctx = data;
diff --git a/patches/block-blk-mq-use-swait.patch b/patches/block-blk-mq-use-swait.patch
index b699a8b0bc2418..a12466db7f6cc0 100644
--- a/patches/block-blk-mq-use-swait.patch
+++ b/patches/block-blk-mq-use-swait.patch
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -660,7 +660,7 @@ int blk_queue_enter(struct request_queue
+@@ -662,7 +662,7 @@ int blk_queue_enter(struct request_queue
if (nowait)
return -EBUSY;
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
-@@ -680,7 +680,7 @@ static void blk_queue_usage_counter_rele
+@@ -682,7 +682,7 @@ static void blk_queue_usage_counter_rele
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void blk_rq_timed_out_timer(unsigned long data)
-@@ -749,7 +749,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -751,7 +751,7 @@ struct request_queue *blk_alloc_queue_no
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -458,7 +458,7 @@ struct request_queue {
+@@ -468,7 +468,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
diff --git a/patches/block-mq-don-t-complete-requests-via-IPI.patch b/patches/block-mq-don-t-complete-requests-via-IPI.patch
index 8412a0941fb61f..ef9764e6c647fe 100644
--- a/patches/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/patches/block-mq-don-t-complete-requests-via-IPI.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->__sector = (sector_t) -1;
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -196,6 +196,9 @@ static void blk_mq_rq_ctx_init(struct re
+@@ -197,6 +197,9 @@ static void blk_mq_rq_ctx_init(struct re
rq->resid_len = 0;
rq->sense = NULL;
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
-@@ -323,6 +326,17 @@ void blk_mq_end_request(struct request *
+@@ -379,6 +382,17 @@ void blk_mq_end_request(struct request *
}
EXPORT_SYMBOL(blk_mq_end_request);
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
-@@ -330,6 +344,8 @@ static void __blk_mq_complete_request_re
+@@ -386,6 +400,8 @@ static void __blk_mq_complete_request_re
rq->q->softirq_done_fn(rq);
}
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -346,10 +362,14 @@ static void blk_mq_ipi_complete_request(
+@@ -402,10 +418,14 @@ static void blk_mq_ipi_complete_request(
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
-@@ -218,6 +218,7 @@ static inline u16 blk_mq_unique_tag_to_t
+@@ -222,6 +222,7 @@ static inline u16 blk_mq_unique_tag_to_t
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
@@ -91,11 +91,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_start_request(struct request *rq);
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -90,6 +90,7 @@ struct request {
+@@ -89,6 +89,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
+ struct work_struct work;
- unsigned long fifo_time;
+ u64 fifo_time;
};
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index 8a250ea7892d2a..19f888d387e4e4 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -341,7 +341,7 @@ static void blk_mq_ipi_complete_request(
+@@ -397,7 +397,7 @@ static void blk_mq_ipi_complete_request(
return;
}
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
-@@ -353,7 +353,7 @@ static void blk_mq_ipi_complete_request(
+@@ -409,7 +409,7 @@ static void blk_mq_ipi_complete_request(
} else {
rq->q->softirq_done_fn(rq);
}
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void __blk_mq_complete_request(struct request *rq)
-@@ -868,14 +868,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+@@ -938,14 +938,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
return;
if (!async) {
diff --git a/patches/block-shorten-interrupt-disabled-regions.patch b/patches/block-shorten-interrupt-disabled-regions.patch
index 325baa85b80df4..b4c8af16d845d1 100644
--- a/patches/block-shorten-interrupt-disabled-regions.patch
+++ b/patches/block-shorten-interrupt-disabled-regions.patch
@@ -47,7 +47,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -3209,7 +3209,7 @@ static void queue_unplugged(struct reque
+@@ -3171,7 +3171,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3257,7 +3257,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3219,7 +3219,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -64,7 +64,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3277,11 +3276,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3239,11 +3238,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@@ -76,7 +76,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3294,7 +3288,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3256,7 +3250,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -85,7 +85,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
/*
-@@ -3321,8 +3315,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3283,8 +3277,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index a61c501b4e50b7..c9521885a0a9f3 100644
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -32,33 +32,73 @@ What happens:
Fix it by replacing get/put_cpu_var() with get/put_cpu_light().
-
Reported-by: Nikita Yushchenko <nyushchenko@dev.rtsoft.ru>
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+[bigeasy: use memcg_stock_ll as a locallock since it is now IRQ-off region]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- mm/memcontrol.c | 7 +++++--
- 1 file changed, 5 insertions(+), 2 deletions(-)
+ mm/memcontrol.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1828,14 +1828,17 @@ static void drain_local_stock(struct wor
- */
- static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- {
-- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
-+ struct memcg_stock_pcp *stock;
-+ int cpu = get_cpu_light();
-+
-+ stock = &per_cpu(memcg_stock, cpu);
+@@ -1727,6 +1727,7 @@ struct memcg_stock_pcp {
+ #define FLUSHING_CACHED_CHARGE 0
+ };
+ static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
++static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
+ static DEFINE_MUTEX(percpu_charge_mutex);
+
+ /**
+@@ -1749,7 +1750,7 @@ static bool consume_stock(struct mem_cgr
+ if (nr_pages > CHARGE_BATCH)
+ return ret;
+
+- local_irq_save(flags);
++ local_lock_irqsave(memcg_stock_ll, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
+@@ -1757,7 +1758,7 @@ static bool consume_stock(struct mem_cgr
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(memcg_stock_ll, flags);
+
+ return ret;
+ }
+@@ -1784,13 +1785,13 @@ static void drain_local_stock(struct wor
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+- local_irq_save(flags);
++ local_lock_irqsave(memcg_stock_ll, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ drain_stock(stock);
+ clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(memcg_stock_ll, flags);
+ }
+
+ /*
+@@ -1802,7 +1803,7 @@ static void refill_stock(struct mem_cgro
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(memcg_stock_ll, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
- drain_stock(stock);
- stock->cached = memcg;
+@@ -1811,7 +1812,7 @@ static void refill_stock(struct mem_cgro
}
stock->nr_pages += nr_pages;
-- put_cpu_var(memcg_stock);
-+ put_cpu_light();
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(memcg_stock_ll, flags);
}
/*
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index f0ddcbdfa4aed0..aee9727ae2cb2e 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -5011,10 +5011,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -5027,10 +5027,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -5055,8 +5055,8 @@ static void css_release(struct percpu_re
+@@ -5071,8 +5071,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5698,6 +5698,7 @@ static int __init cgroup_wq_init(void)
+@@ -5716,6 +5716,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 10c03f93f7ef8a..553b844a8b30ce 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
break;
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1393,7 +1393,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1509,7 +1509,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
-@@ -194,6 +194,12 @@ struct platform_freeze_ops {
+@@ -193,6 +193,12 @@ struct platform_freeze_ops {
void (*end)(void);
};
@@ -136,8 +136,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct;
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
-@@ -649,6 +649,10 @@ static void power_down(void)
- cpu_relax();
+@@ -681,6 +681,10 @@ static int load_image_and_restore(void)
+ return error;
}
+#ifndef CONFIG_SUSPEND
@@ -147,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
-@@ -661,6 +665,8 @@ int hibernate(void)
+@@ -694,6 +698,8 @@ int hibernate(void)
return -EPERM;
}
@@ -156,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -726,6 +732,7 @@ int hibernate(void)
+@@ -771,6 +777,7 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
@@ -166,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -521,6 +521,8 @@ static int enter_state(suspend_state_t s
+@@ -523,6 +523,8 @@ static int enter_state(suspend_state_t s
return error;
}
@@ -175,7 +175,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
-@@ -535,6 +537,8 @@ int pm_suspend(suspend_state_t state)
+@@ -537,6 +539,8 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
@@ -184,7 +184,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = enter_state(state);
if (error) {
suspend_stats.fail++;
-@@ -542,6 +546,7 @@ int pm_suspend(suspend_state_t state)
+@@ -544,6 +548,7 @@ int pm_suspend(suspend_state_t state)
} else {
suspend_stats.success++;
}
@@ -286,7 +286,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3142,7 +3142,10 @@ void migrate_disable(void)
+@@ -3317,7 +3317,10 @@ void migrate_disable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@@ -298,7 +298,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -3169,7 +3172,10 @@ void migrate_enable(void)
+@@ -3344,7 +3347,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index aa2581b244d3a9..d1563b850ab29d 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -3029,12 +3029,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -3258,12 +3258,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4813,6 +4813,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5021,6 +5021,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4826,6 +4827,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5034,6 +5035,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index 2bf54bfe456795..ee85da7f3d159c 100644
--- a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -19,8 +19,8 @@ Cc: Clark Williams <clark.williams@gmail.com>
Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/cpu.c | 34 +++++++++++++++++++++++++++-------
- 1 file changed, 27 insertions(+), 7 deletions(-)
+ kernel/cpu.c | 32 +++++++++++++++++++++++++-------
+ 1 file changed, 25 insertions(+), 7 deletions(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -153,12 +159,26 @@ static struct {
+@@ -153,12 +159,24 @@ static struct {
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -56,19 +56,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
-+# define hotplug_trylock() rt_spin_trylock(&cpu_hotplug.lock)
-+# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
++# define hotplug_lock() rt_spin_lock__no_mg(&cpu_hotplug.lock)
++# define hotplug_unlock() rt_spin_unlock__no_mg(&cpu_hotplug.lock)
+#else
+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
-+# define hotplug_trylock() mutex_trylock(&cpu_hotplug.lock)
+# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
+#endif
+
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -195,8 +215,8 @@ void pin_current_cpu(void)
+@@ -195,8 +213,8 @@ void pin_current_cpu(void)
return;
}
preempt_enable();
@@ -79,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -269,9 +289,9 @@ void get_online_cpus(void)
+@@ -269,9 +287,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -91,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -324,11 +344,11 @@ void cpu_hotplug_begin(void)
+@@ -324,11 +342,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -105,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -337,7 +357,7 @@ void cpu_hotplug_begin(void)
+@@ -337,7 +355,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index a928a4574d10ba..fa4b8495d2dc2a 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -50,13 +50,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 7 +
- kernel/cpu.c | 240 ++++++++++++++++++++++++++++++++++++++++----------
+ kernel/cpu.c | 238 +++++++++++++++++++++++++++++++++++++++++---------
kernel/sched/core.c | 78 ++++++++++++++++
- 3 files changed, 281 insertions(+), 44 deletions(-)
+ 3 files changed, 281 insertions(+), 42 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2325,6 +2325,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -2429,6 +2429,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2337,6 +2341,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -2441,6 +2445,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -158,27 +152,13 @@ static struct {
+@@ -158,25 +152,13 @@ static struct {
#endif
} cpu_hotplug = {
.active_writer = NULL,
@@ -113,19 +113,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
-#ifdef CONFIG_PREEMPT_RT_FULL
--# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
--# define hotplug_trylock() rt_spin_trylock(&cpu_hotplug.lock)
--# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
+-# define hotplug_lock() rt_spin_lock__no_mg(&cpu_hotplug.lock)
+-# define hotplug_unlock() rt_spin_unlock__no_mg(&cpu_hotplug.lock)
-#else
-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
--# define hotplug_trylock() mutex_trylock(&cpu_hotplug.lock)
-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
-#endif
-
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -186,12 +166,42 @@ static struct {
+@@ -184,12 +166,42 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -158,8 +156,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
-+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
++# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
++# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
+#else
+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
@@ -168,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
/**
-@@ -205,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -203,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
void pin_current_cpu(void)
{
struct hotplug_pcp *hp;
@@ -212,7 +210,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -237,26 +268,84 @@ void unpin_current_cpu(void)
+@@ -235,26 +268,84 @@ void unpin_current_cpu(void)
wake_up_process(hp->unplug);
}
@@ -304,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
-@@ -264,23 +353,83 @@ static int sync_unplug_thread(void *data
+@@ -262,23 +353,83 @@ static int sync_unplug_thread(void *data
static int cpu_unplug_begin(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
@@ -395,7 +393,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void get_online_cpus(void)
-@@ -289,9 +438,9 @@ void get_online_cpus(void)
+@@ -287,9 +438,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -407,7 +405,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -344,11 +493,11 @@ void cpu_hotplug_begin(void)
+@@ -342,11 +493,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -421,7 +419,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -357,7 +506,7 @@ void cpu_hotplug_begin(void)
+@@ -355,7 +506,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
@@ -430,7 +428,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpuhp_lock_release();
}
-@@ -838,6 +987,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -828,6 +979,9 @@ static int takedown_cpu(unsigned int cpu
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu);
@@ -442,7 +440,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* interrupt affinities.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1091,6 +1091,84 @@ void do_set_cpus_allowed(struct task_str
+@@ -1129,6 +1129,84 @@ void do_set_cpus_allowed(struct task_str
enqueue_task(rq, p, ENQUEUE_RESTORE);
}
@@ -484,8 +482,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct migration_arg arg;
+ struct cpumask *cpumask;
+ struct cpumask *mask;
-+ unsigned long flags;
+ unsigned int dest_cpu;
++ struct rq_flags rf;
+ struct rq *rq;
+
+ /*
@@ -496,7 +494,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ return 0;
+
+ mutex_lock(&sched_down_mutex);
-+ rq = task_rq_lock(p, &flags);
++ rq = task_rq_lock(p, &rf);
+
+ cpumask = this_cpu_ptr(&sched_cpumasks);
+ mask = &p->cpus_allowed;
@@ -505,7 +503,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+ if (!cpumask_weight(cpumask)) {
+ /* It's only on this CPU? */
-+ task_rq_unlock(rq, p, &flags);
++ task_rq_unlock(rq, p, &rf);
+ mutex_unlock(&sched_down_mutex);
+ return 0;
+ }
@@ -515,7 +513,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ arg.task = p;
+ arg.dest_cpu = dest_cpu;
+
-+ task_rq_unlock(rq, p, &flags);
++ task_rq_unlock(rq, p, &rf);
+
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
diff --git a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index 936b653fd3ac9f..574893c165a563 100644
--- a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1669,12 +1669,13 @@ void hrtimer_init_sleeper(struct hrtimer
+@@ -1649,12 +1649,13 @@ void hrtimer_init_sleeper(struct hrtimer
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_start_expires(&t->timer, mode);
if (likely(t->task))
-@@ -1716,7 +1717,8 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1696,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(s
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
rmtp = restart->nanosleep.rmtp;
-@@ -1733,8 +1735,10 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1713,8 +1715,10 @@ long __sched hrtimer_nanosleep_restart(s
return ret;
}
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct restart_block *restart;
struct hrtimer_sleeper t;
-@@ -1747,7 +1751,7 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1727,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
/* Absolute timers do not update the rmtp value and restart: */
-@@ -1774,6 +1778,12 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1754,6 +1758,12 @@ long hrtimer_nanosleep(struct timespec *
return ret;
}
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
-@@ -1800,7 +1810,8 @@ void cpu_chill(void)
+@@ -1780,7 +1790,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
diff --git a/patches/cpu_down_move_migrate_enable_back.patch b/patches/cpu_down_move_migrate_enable_back.patch
index 17aa462e194560..8d6f0338a41430 100644
--- a/patches/cpu_down_move_migrate_enable_back.patch
+++ b/patches/cpu_down_move_migrate_enable_back.patch
@@ -34,7 +34,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1125,6 +1125,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1117,6 +1117,7 @@ static int __ref _cpu_down(unsigned int
goto restore_cpus;
}
@@ -42,7 +42,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
cpu_hotplug_begin();
ret = cpu_unplug_begin(cpu);
if (ret) {
-@@ -1172,7 +1173,6 @@ static int __ref _cpu_down(unsigned int
+@@ -1164,7 +1165,6 @@ static int __ref _cpu_down(unsigned int
cpu_unplug_done(cpu);
out_cancel:
cpu_hotplug_done();
diff --git a/patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
index e3f833c1032e95..7ab6f98f1b5cc9 100644
--- a/patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
+++ b/patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
-@@ -123,7 +123,7 @@ config X86_POWERNOW_K7_ACPI
+@@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
diff --git a/patches/cpumask-disable-offstack-on-rt.patch b/patches/cpumask-disable-offstack-on-rt.patch
index 834b6217b924af..3aec33b0e22499 100644
--- a/patches/cpumask-disable-offstack-on-rt.patch
+++ b/patches/cpumask-disable-offstack-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -892,7 +892,7 @@ config IOMMU_HELPER
+@@ -888,7 +888,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
If unsure, say N.
--- a/lib/Kconfig
+++ b/lib/Kconfig
-@@ -397,6 +397,7 @@ config CHECK_SIGNATURE
+@@ -400,6 +400,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
diff --git a/patches/crypto-ccp-remove-rwlocks_types.h.patch b/patches/crypto-ccp-remove-rwlocks_types.h.patch
deleted file mode 100644
index e8ebfe3addffc3..00000000000000
--- a/patches/crypto-ccp-remove-rwlocks_types.h.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 11 May 2016 11:56:18 +0200
-Subject: crypto/ccp: remove rwlocks_types.h
-
-Users of rwlocks should include spinlock.h instead including this
-header file. The current users of rwlocks_types.h are internal.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/crypto/ccp/ccp-dev.c | 1 -
- 1 file changed, 1 deletion(-)
-
---- a/drivers/crypto/ccp/ccp-dev.c
-+++ b/drivers/crypto/ccp/ccp-dev.c
-@@ -16,7 +16,6 @@
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/spinlock.h>
--#include <linux/rwlock_types.h>
- #include <linux/types.h>
- #include <linux/mutex.h>
- #include <linux/delay.h>
diff --git a/patches/debugobjects-rt.patch b/patches/debugobjects-rt.patch
index 5a35a8eb17f096..571317237f9ff7 100644
--- a/patches/debugobjects-rt.patch
+++ b/patches/debugobjects-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
-@@ -309,7 +309,10 @@ static void
+@@ -308,7 +308,10 @@ static void
struct debug_obj *obj;
unsigned long flags;
diff --git a/patches/dm-make-rt-aware.patch b/patches/dm-make-rt-aware.patch
index 2bbb6804e0ca14..875046aa1077e8 100644
--- a/patches/dm-make-rt-aware.patch
+++ b/patches/dm-make-rt-aware.patch
@@ -10,12 +10,12 @@ Reported-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- drivers/md/dm.c | 2 +-
+ drivers/md/dm-rq.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
---- a/drivers/md/dm.c
-+++ b/drivers/md/dm.c
-@@ -2187,7 +2187,7 @@ static void dm_request_fn(struct request
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -802,7 +802,7 @@ static void dm_old_request_fn(struct req
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
queue_kthread_work(&md->kworker, &tio->work);
diff --git a/patches/driver-net-ethernet-tile-Initialize-timer-as-pinned.patch b/patches/driver-net-ethernet-tile-Initialize-timer-as-pinned.patch
deleted file mode 100644
index 975115f8219909..00000000000000
--- a/patches/driver-net-ethernet-tile-Initialize-timer-as-pinned.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:19 +0000
-Subject: [PATCH 05/22] driver/net/ethernet/tile: Initialize timer as pinned
-
-Pinned timers must carry that attribute in the timer itself. No functional
-change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/net/ethernet/tile/tilepro.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/tile/tilepro.c
-+++ b/drivers/net/ethernet/tile/tilepro.c
-@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(str
- static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
- {
- if (!info->egress_timer_scheduled) {
-- mod_timer_pinned(&info->egress_timer, jiffies + 1);
-+ mod_timer(&info->egress_timer, jiffies + 1);
- info->egress_timer_scheduled = true;
- }
- }
-@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_
- BUG();
-
- /* Initialize the egress timer. */
-- init_timer(&info->egress_timer);
-+ init_timer_pinned(&info->egress_timer);
- info->egress_timer.data = (long)info;
- info->egress_timer.function = tile_net_handle_egress_timer;
-
diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index 95d530b64b4563..a431d20a789e6d 100644
--- a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -520,6 +520,8 @@ static struct zram_meta *zram_meta_alloc
+@@ -519,6 +519,8 @@ static struct zram_meta *zram_meta_alloc
goto out_error;
}
@@ -24,9 +24,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return meta;
out_error:
-@@ -568,12 +570,12 @@ static int zram_decompress_page(struct z
+@@ -567,12 +569,12 @@ static int zram_decompress_page(struct z
unsigned long handle;
- size_t size;
+ unsigned int size;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
@@ -39,16 +39,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
clear_page(mem);
return 0;
}
-@@ -584,7 +586,7 @@ static int zram_decompress_page(struct z
- else
- ret = zcomp_decompress(zram->comp, cmem, size, mem);
+@@ -587,7 +589,7 @@ static int zram_decompress_page(struct z
+ zcomp_stream_put(zram->comp);
+ }
zs_unmap_object(meta->mem_pool, handle);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret)) {
-@@ -604,14 +606,14 @@ static int zram_bvec_read(struct zram *z
+@@ -607,14 +609,14 @@ static int zram_bvec_read(struct zram *z
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
-@@ -689,10 +691,10 @@ static int zram_bvec_write(struct zram *
+@@ -691,10 +693,10 @@ static int zram_bvec_write(struct zram *
if (user_mem)
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic64_inc(&zram->stats.zero_pages);
ret = 0;
-@@ -752,12 +754,12 @@ static int zram_bvec_write(struct zram *
+@@ -785,12 +787,12 @@ static int zram_bvec_write(struct zram *
* Free memory associated with this sector
* before overwriting unused sectors.
*/
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Update stats */
atomic64_add(clen, &zram->stats.compr_data_size);
-@@ -800,9 +802,9 @@ static void zram_bio_discard(struct zram
+@@ -833,9 +835,9 @@ static void zram_bio_discard(struct zram
}
while (n >= PAGE_SIZE) {
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic64_inc(&zram->stats.notify_free);
index++;
n -= PAGE_SIZE;
-@@ -928,9 +930,9 @@ static void zram_slot_free_notify(struct
+@@ -964,9 +966,9 @@ static void zram_slot_free_notify(struct
zram = bdev->bd_disk->private_data;
meta = zram->meta;
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
-@@ -72,6 +72,9 @@ enum zram_pageflags {
+@@ -73,6 +73,9 @@ enum zram_pageflags {
struct zram_table_entry {
unsigned long handle;
unsigned long value;
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
struct zram_stats {
-@@ -119,4 +122,42 @@ struct zram {
+@@ -120,4 +123,42 @@ struct zram {
*/
bool claim; /* Protected by bdev->bd_mutex */
};
diff --git a/patches/drivers-net-8139-disable-irq-nosync.patch b/patches/drivers-net-8139-disable-irq-nosync.patch
index fd12422bc784eb..842d45fcd1dbd0 100644
--- a/patches/drivers-net-8139-disable-irq-nosync.patch
+++ b/patches/drivers-net-8139-disable-irq-nosync.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
-@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(stru
+@@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(stru
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
diff --git a/patches/drivers-net-fix-livelock-issues.patch b/patches/drivers-net-fix-livelock-issues.patch
deleted file mode 100644
index 80d97450f9ab88..00000000000000
--- a/patches/drivers-net-fix-livelock-issues.patch
+++ /dev/null
@@ -1,126 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sat, 20 Jun 2009 11:36:54 +0200
-Subject: drivers/net: fix livelock issues
-
-Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro
-optimization. The reason is that the softirq thread is rescheduling
-itself on that return value. Depending on priorities it starts to
-monoplize the CPU and livelock on UP systems.
-
-Remove it.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +-----
- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +--
- drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +--
- drivers/net/ethernet/neterion/s2io.c | 7 +------
- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++----
- drivers/net/ethernet/tehuti/tehuti.c | 9 ++-------
- drivers/net/rionet.c | 6 +-----
- 7 files changed, 9 insertions(+), 31 deletions(-)
-
---- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
-+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
-@@ -2217,11 +2217,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
- }
-
- tpd_req = atl1c_cal_tpd_req(skb);
-- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
-- if (netif_msg_pktdata(adapter))
-- dev_info(&adapter->pdev->dev, "tx locked\n");
-- return NETDEV_TX_LOCKED;
-- }
-+ spin_lock_irqsave(&adapter->tx_lock, flags);
-
- if (atl1c_tpd_avail(adapter, type) < tpd_req) {
- /* no enough descriptor, just stop queue */
---- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
-+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
-@@ -1880,8 +1880,7 @@ static netdev_tx_t atl1e_xmit_frame(stru
- return NETDEV_TX_OK;
- }
- tpd_req = atl1e_cal_tdp_req(skb);
-- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
-- return NETDEV_TX_LOCKED;
-+ spin_lock_irqsave(&adapter->tx_lock, flags);
-
- if (atl1e_tpd_avail(adapter) < tpd_req) {
- /* no enough descriptor, just stop queue */
---- a/drivers/net/ethernet/chelsio/cxgb/sge.c
-+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
-@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb
- struct cmdQ *q = &sge->cmdQ[qid];
- unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
-
-- if (!spin_trylock(&q->lock))
-- return NETDEV_TX_LOCKED;
-+ spin_lock(&q->lock);
-
- reclaim_completed_tx(sge, q);
-
---- a/drivers/net/ethernet/neterion/s2io.c
-+++ b/drivers/net/ethernet/neterion/s2io.c
-@@ -4084,12 +4084,7 @@ static netdev_tx_t s2io_xmit(struct sk_b
- [skb->priority & (MAX_TX_FIFOS - 1)];
- fifo = &mac_control->fifos[queue];
-
-- if (do_spin_lock)
-- spin_lock_irqsave(&fifo->tx_lock, flags);
-- else {
-- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
-- return NETDEV_TX_LOCKED;
-- }
-+ spin_lock_irqsave(&fifo->tx_lock, flags);
-
- if (sp->config.multiq) {
- if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
---- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
-+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
-@@ -2137,10 +2137,8 @@ static int pch_gbe_xmit_frame(struct sk_
- struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
- unsigned long flags;
-
-- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
-- /* Collision - tell upper layer to requeue */
-- return NETDEV_TX_LOCKED;
-- }
-+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
-+
- if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
---- a/drivers/net/ethernet/tehuti/tehuti.c
-+++ b/drivers/net/ethernet/tehuti/tehuti.c
-@@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struc
- unsigned long flags;
-
- ENTER;
-- local_irq_save(flags);
-- if (!spin_trylock(&priv->tx_lock)) {
-- local_irq_restore(flags);
-- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
-- BDX_DRV_NAME, ndev->name);
-- return NETDEV_TX_LOCKED;
-- }
-+
-+ spin_lock_irqsave(&priv->tx_lock, flags);
-
- /* build tx descriptor */
- BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
---- a/drivers/net/rionet.c
-+++ b/drivers/net/rionet.c
-@@ -179,11 +179,7 @@ static int rionet_start_xmit(struct sk_b
- unsigned long flags;
- int add_num = 1;
-
-- local_irq_save(flags);
-- if (!spin_trylock(&rnet->tx_lock)) {
-- local_irq_restore(flags);
-- return NETDEV_TX_LOCKED;
-- }
-+ spin_lock_irqsave(&rnet->tx_lock, flags);
-
- if (is_multicast_ether_addr(eth->h_dest))
- add_num = nets[rnet->mport->id].nact;
diff --git a/patches/drivers-random-reduce-preempt-disabled-region.patch b/patches/drivers-random-reduce-preempt-disabled-region.patch
index 9181e5eed2255a..c41b980a45eccb 100644
--- a/patches/drivers-random-reduce-preempt-disabled-region.patch
+++ b/patches/drivers-random-reduce-preempt-disabled-region.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -799,8 +799,6 @@ static void add_timer_randomness(struct
+@@ -1028,8 +1028,6 @@ static void add_timer_randomness(struct
} sample;
long delta, delta2, delta3;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
-@@ -841,7 +839,6 @@ static void add_timer_randomness(struct
+@@ -1070,7 +1068,6 @@ static void add_timer_randomness(struct
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
diff --git a/patches/drivers-tty-metag_da-Initialize-timer-as-pinned.patch b/patches/drivers-tty-metag_da-Initialize-timer-as-pinned.patch
deleted file mode 100644
index 465ef3c020498b..00000000000000
--- a/patches/drivers-tty-metag_da-Initialize-timer-as-pinned.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:21 +0000
-Subject: [PATCH 06/22] drivers/tty/metag_da: Initialize timer as pinned
-
-Pinned timers must carry that attribute in the timer itself. No functional
-change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/metag_da.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/tty/metag_da.c
-+++ b/drivers/tty/metag_da.c
-@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long
- if (channel >= 0)
- fetch_data(channel);
-
-- mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
-+ mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
- }
-
- static void add_poll_timer(struct timer_list *poll_timer)
- {
-- setup_timer(poll_timer, dashtty_timer, 0);
-+ setup_pinned_timer(poll_timer, dashtty_timer, 0);
- poll_timer->expires = jiffies + DA_TTY_POLL;
-
- /*
diff --git a/patches/drivers-tty-mips_ejtag-Initialize-timer-as-pinned.patch b/patches/drivers-tty-mips_ejtag-Initialize-timer-as-pinned.patch
deleted file mode 100644
index 010ddaecf79a61..00000000000000
--- a/patches/drivers-tty-mips_ejtag-Initialize-timer-as-pinned.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:22 +0000
-Subject: [PATCH 07/22] drivers/tty/mips_ejtag: Initialize timer as pinned
-
-Pinned timers must carry that attribute in the timer itself. No functional
-change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/mips_ejtag_fdc.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/tty/mips_ejtag_fdc.c
-+++ b/drivers/tty/mips_ejtag_fdc.c
-@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(uns
-
- mips_ejtag_fdc_handle(priv);
- if (!priv->removing)
-- mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
-+ mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
- }
-
- /* TTY Port operations */
-@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(stru
- raw_spin_unlock_irq(&priv->lock);
- } else {
- /* If we didn't get an usable IRQ, poll instead */
-- setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
-+ setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
- (unsigned long)priv);
- priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
- /*
diff --git a/patches/drivers-tty-pl011-irq-disable-madness.patch b/patches/drivers-tty-pl011-irq-disable-madness.patch
index 72116fbff9b6c4..dd90606a710de6 100644
--- a/patches/drivers-tty-pl011-irq-disable-madness.patch
+++ b/patches/drivers-tty-pl011-irq-disable-madness.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
-@@ -2166,13 +2166,19 @@ pl011_console_write(struct console *co,
+@@ -2167,13 +2167,19 @@ pl011_console_write(struct console *co,
clk_enable(uap->clk);
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the CR then disable the interrupts
-@@ -2196,8 +2202,7 @@ pl011_console_write(struct console *co,
+@@ -2197,8 +2203,7 @@ pl011_console_write(struct console *co,
pl011_write(old_cr, uap, REG_CR);
if (locked)
diff --git a/patches/drm-i915-Use-consistent-forcewake-auto-release-timeo.patch b/patches/drm-i915-Use-consistent-forcewake-auto-release-timeo.patch
deleted file mode 100644
index 0f2cdf98c7ca31..00000000000000
--- a/patches/drm-i915-Use-consistent-forcewake-auto-release-timeo.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
-Date: Thu, 7 Apr 2016 17:04:32 +0100
-Subject: [PATCH] drm/i915: Use consistent forcewake auto-release timeout
- across kernel configs
-
-Upstream commit fde61b596b994195b9dd83feb325df95d99702ce
-
-Because it is based on jiffies, current implementation releases the
-forcewake at any time between straight away and between 1ms and 10ms,
-depending on the kernel configuration (CONFIG_HZ).
-
-This is probably not what has been desired, since the dynamics of keeping
-parts of the GPU awake should not be correlated with this kernel
-configuration parameter.
-
-Change the auto-release mechanism to use hrtimers and set the timeout to
-1ms with a 1ms of slack. This should make the GPU power consistent
-across kernel configs, and timer slack should enable some timer coalescing
-where multiple force-wake domains exist, or with unrelated timers.
-
-For GlBench/T-Rex this decreases the number of forcewake releases from
-~480 to ~300 per second, and for a heavy combined OGL/OCL test from
-~670 to ~360 (HZ=1000 kernel).
-
-Even though this reduction can be attributed to the average release period
-extending from 0-1ms to 1-2ms, as discussed above, it will make the
-forcewake timeout consistent for different CONFIG_HZ values.
-
-Real life measurements with the above workload has shown that, with this
-patch, both manage to auto-release the forcewake between 2-4 times per
-10ms, even though the number of forcewake gets is dramatically different.
-
-T-Rex requests between 5-10 explicit gets and 5-10 implict gets in each
-10ms period, while the OGL/OCL test requests 250 and 380 times in the same
-period.
-
-The two data points together suggest that the nature of the forwake
-accesses is bursty and that further changes and potential timeout
-extensions, or moving the start of timeout from the first to the last
-automatic forcewake grab, should be carefully measured for power and
-performance effects.
-
-v2:
- * Commit spelling. (Dave Gordon)
- * More discussion on numbers in the commit. (Chris Wilson)
-
-Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
-Reviewed-by: Dave Gordon <david.s.gordon@intel.com>
-Cc: Chris Wilson <chris@chris-wilson.co.uk>
-Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/gpu/drm/i915/i915_drv.h | 2 +-
- drivers/gpu/drm/i915/intel_uncore.c | 25 ++++++++++++++++---------
- 2 files changed, 17 insertions(+), 10 deletions(-)
-
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -714,7 +714,7 @@ struct intel_uncore {
- struct drm_i915_private *i915;
- enum forcewake_domain_id id;
- unsigned wake_count;
-- struct timer_list timer;
-+ struct hrtimer timer;
- i915_reg_t reg_set;
- u32 val_set;
- u32 val_clear;
---- a/drivers/gpu/drm/i915/intel_uncore.c
-+++ b/drivers/gpu/drm/i915/intel_uncore.c
-@@ -60,7 +60,11 @@ fw_domain_reset(const struct intel_uncor
- static inline void
- fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
- {
-- mod_timer_pinned(&d->timer, jiffies + 1);
-+ d->wake_count++;
-+ hrtimer_start_range_ns(&d->timer,
-+ ktime_set(0, NSEC_PER_MSEC),
-+ NSEC_PER_MSEC,
-+ HRTIMER_MODE_REL);
- }
-
- static inline void
-@@ -224,9 +228,11 @@ static int __gen6_gt_wait_for_fifo(struc
- return ret;
- }
-
--static void intel_uncore_fw_release_timer(unsigned long arg)
-+static enum hrtimer_restart
-+intel_uncore_fw_release_timer(struct hrtimer *timer)
- {
-- struct intel_uncore_forcewake_domain *domain = (void *)arg;
-+ struct intel_uncore_forcewake_domain *domain =
-+ container_of(timer, struct intel_uncore_forcewake_domain, timer);
- unsigned long irqflags;
-
- assert_rpm_device_not_suspended(domain->i915);
-@@ -240,6 +246,8 @@ static void intel_uncore_fw_release_time
- 1 << domain->id);
-
- spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
-+
-+ return HRTIMER_NORESTART;
- }
-
- void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
-@@ -259,16 +267,16 @@ void intel_uncore_forcewake_reset(struct
- active_domains = 0;
-
- for_each_fw_domain(domain, dev_priv, id) {
-- if (del_timer_sync(&domain->timer) == 0)
-+ if (hrtimer_cancel(&domain->timer) == 0)
- continue;
-
-- intel_uncore_fw_release_timer((unsigned long)domain);
-+ intel_uncore_fw_release_timer(&domain->timer);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- for_each_fw_domain(domain, dev_priv, id) {
-- if (timer_pending(&domain->timer))
-+ if (hrtimer_active(&domain->timer))
- active_domains |= (1 << id);
- }
-
-@@ -491,7 +499,6 @@ static void __intel_uncore_forcewake_put
- if (--domain->wake_count)
- continue;
-
-- domain->wake_count++;
- fw_domain_arm_timer(domain);
- }
- }
-@@ -732,7 +739,6 @@ static inline void __force_wake_get(stru
- continue;
- }
-
-- domain->wake_count++;
- fw_domain_arm_timer(domain);
- }
-
-@@ -1150,7 +1156,8 @@ static void fw_domain_init(struct drm_i9
- d->i915 = dev_priv;
- d->id = domain_id;
-
-- setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
-+ hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ d->timer.function = intel_uncore_fw_release_timer;
-
- dev_priv->uncore.fw_domains |= (1 << domain_id);
-
diff --git a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index 50bfea3ba1fddc..219bbe19ecfa2f 100644
--- a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -1314,7 +1314,9 @@ i915_gem_ringbuffer_submission(struct i9
+@@ -1302,7 +1302,9 @@ i915_gem_ringbuffer_submission(struct i9
if (ret)
return ret;
@@ -55,4 +55,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
i915_gem_execbuffer_move_to_active(vmas, params->request);
- i915_gem_execbuffer_retire_commands(params);
+
diff --git a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
index a277bf8ce52451..9e89971002b33e 100644
--- a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+++ b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static bool
format_is_yuv(uint32_t format)
-@@ -64,6 +65,8 @@ static int usecs_to_scanlines(const stru
+@@ -64,6 +65,8 @@ int intel_usecs_to_scanlines(const struc
1000 * adjusted_mode->crtc_htotal);
}
@@ -78,8 +78,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
-@@ -96,7 +99,7 @@ void intel_pipe_update_start(struct inte
- min = vblank_start - usecs_to_scanlines(adjusted_mode, 100);
+@@ -94,7 +97,7 @@ void intel_pipe_update_start(struct inte
+ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
max = vblank_start - 1;
- local_irq_disable();
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (min <= 0 || max <= 0)
return;
-@@ -126,11 +129,11 @@ void intel_pipe_update_start(struct inte
+@@ -124,11 +127,11 @@ void intel_pipe_update_start(struct inte
break;
}
@@ -101,9 +101,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
finish_wait(wq, &wait);
-@@ -164,7 +167,7 @@ void intel_pipe_update_end(struct intel_
-
- trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+@@ -180,7 +183,7 @@ void intel_pipe_update_end(struct intel_
+ crtc->base.state->event = NULL;
+ }
- local_irq_enable();
+ local_unlock_irq(pipe_update_lock);
diff --git a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index 371961b14ab507..02986216e785bd 100644
--- a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -830,6 +830,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(stru
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -881,6 +882,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(stru
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -1863,6 +1863,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1869,6 +1869,7 @@ int radeon_get_crtc_scanoutpos(struct dr
struct radeon_device *rdev = dev->dev_private;
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -1955,6 +1956,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1961,6 +1962,7 @@ int radeon_get_crtc_scanoutpos(struct dr
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/patches/dump-stack-don-t-disable-preemption-during-trace.patch b/patches/dump-stack-don-t-disable-preemption-during-trace.patch
index 62da320930ba9a..94597b5d8a5daa 100644
--- a/patches/dump-stack-don-t-disable-preemption-during-trace.patch
+++ b/patches/dump-stack-don-t-disable-preemption-during-trace.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int graph = 0;
u32 *prev_esp;
-@@ -86,7 +86,7 @@ void dump_trace(struct task_struct *task
+@@ -84,7 +84,7 @@ void dump_trace(struct task_struct *task
break;
touch_nmi_watchdog();
}
@@ -45,19 +45,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
- const unsigned cpu = get_cpu();
+ const unsigned cpu = get_cpu_light();
- struct thread_info *tinfo;
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long dummy;
-@@ -241,7 +241,7 @@ void dump_trace(struct task_struct *task
+ unsigned used = 0;
+@@ -239,7 +239,7 @@ void dump_trace(struct task_struct *task
* This handles the process stack:
*/
- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+ bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
- put_cpu();
+ put_cpu_light();
}
EXPORT_SYMBOL(dump_trace);
-@@ -255,7 +255,7 @@ show_stack_log_lvl(struct task_struct *t
+@@ -253,7 +253,7 @@ show_stack_log_lvl(struct task_struct *t
int cpu;
int i;
@@ -66,8 +66,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpu = smp_processor_id();
irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
-@@ -291,7 +291,7 @@ show_stack_log_lvl(struct task_struct *t
- pr_cont(" %016lx", *stack++);
+@@ -299,7 +299,7 @@ show_stack_log_lvl(struct task_struct *t
+ stack++;
touch_nmi_watchdog();
}
- preempt_enable();
diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch
index caeb72bb005bd0..fddfd6821f2484 100644
--- a/patches/fs-aio-simple-simple-work.patch
+++ b/patches/fs-aio-simple-simple-work.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* signals when all in-flight requests are done
-@@ -253,6 +254,7 @@ static int __init aio_setup(void)
+@@ -258,6 +259,7 @@ static int __init aio_setup(void)
.mount = aio_mount,
.kill_sb = kill_anon_super,
};
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
-@@ -568,9 +570,9 @@ static int kiocb_cancel(struct aio_kiocb
+@@ -578,9 +580,9 @@ static int kiocb_cancel(struct aio_kiocb
return cancel(&kiocb->common);
}
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_debug("freeing %p\n", ctx);
-@@ -589,8 +591,8 @@ static void free_ioctx_reqs(struct percp
+@@ -599,8 +601,8 @@ static void free_ioctx_reqs(struct percp
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -598,9 +600,9 @@ static void free_ioctx_reqs(struct percp
+@@ -608,9 +610,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -619,6 +621,14 @@ static void free_ioctx_users(struct perc
+@@ -629,6 +631,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}
diff --git a/patches/fs-dcache-include-wait.h.patch b/patches/fs-dcache-include-wait.h.patch
new file mode 100644
index 00000000000000..ed35f8edddba71
--- /dev/null
+++ b/patches/fs-dcache-include-wait.h.patch
@@ -0,0 +1,23 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Sep 2016 11:55:23 +0200
+Subject: fs/dcache: include wait.h
+
+Since commit d9171b934526 ("parallel lookups machinery, part 4 (and
+last)") dcache.h is using but does not include wait.h. It works as long
+as it is included somehow earlier and fails otherwise.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/dcache.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -11,6 +11,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/lockref.h>
+ #include <linux/stringhash.h>
++#include <linux/wait.h>
+
+ struct path;
+ struct vfsmount;
diff --git a/patches/fs-dcache-init-in_lookup_hashtable.patch b/patches/fs-dcache-init-in_lookup_hashtable.patch
new file mode 100644
index 00000000000000..61f4f1f6303a10
--- /dev/null
+++ b/patches/fs-dcache-init-in_lookup_hashtable.patch
@@ -0,0 +1,27 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Sep 2016 17:57:03 +0200
+Subject: [PATCH] fs/dcache: init in_lookup_hashtable
+
+in_lookup_hashtable was introduced in commit 94bdd655caba ("parallel
+lookups machinery, part 3") and never initialized but since it is in
+the data it is all zeros. But we need this for -RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/dcache.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -3601,6 +3601,11 @@ EXPORT_SYMBOL(d_genocide);
+
+ void __init vfs_caches_init_early(void)
+ {
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
++ INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
++
+ dcache_init_early();
+ inode_init_early();
+ }
diff --git a/patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch b/patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch
deleted file mode 100644
index 185bf2e8cd886c..00000000000000
--- a/patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 8 Sep 2016 18:33:52 +0200
-Subject: [PATCH] fs/dcache: resched/chill only if we make no progress
-
-Upstream commit 47be61845c77 ("fs/dcache.c: avoid soft-lockup in
-dput()") changed the condition _when_ cpu_relax() / cond_resched() was
-invoked. This change was adapted in -RT into mostly the same thing
-except that if cond_resched() did nothing we had to do cpu_chill() to
-force the task off CPU for a tiny little bit in case the task had RT
-priority and did not want to leave the CPU.
-This change resulted in a performance regression (in my testcase the
-build time on /dev/shm increased from 19min to 24min). The reason is
-that with this change cpu_chill() was invoked even dput() made progress
-(dentry_kill() returned a different dentry) instead only if we were
-trying this operation on the same dentry over and over again.
-
-This patch brings back to the old behavior back to cond_resched() &
-chill if we make no progress. A little improvement is to invoke
-cpu_chill() only if we are a RT task (and avoid the sleep otherwise).
-Otherwise the scheduler should remove us from the CPU if we make no
-progress.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- fs/dcache.c | 18 ++++++++++++------
- 1 file changed, 12 insertions(+), 6 deletions(-)
-
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -748,6 +748,8 @@ static inline bool fast_dput(struct dent
- */
- void dput(struct dentry *dentry)
- {
-+ struct dentry *parent;
-+
- if (unlikely(!dentry))
- return;
-
-@@ -784,14 +786,18 @@ void dput(struct dentry *dentry)
- return;
-
- kill_it:
-- dentry = dentry_kill(dentry);
-- if (dentry) {
-+ parent = dentry_kill(dentry);
-+ if (parent) {
- int r;
-
-- /* the task with the highest priority won't schedule */
-- r = cond_resched();
-- if (!r)
-- cpu_chill();
-+ if (parent == dentry) {
-+ /* the task with the highest priority won't schedule */
-+ r = cond_resched();
-+ if (!r)
-+ cpu_chill();
-+ } else {
-+ dentry = parent;
-+ }
- goto repeat;
- }
- }
diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index 94e96c6e9bb896..ede7118777a7e0 100644
--- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -11,9 +11,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/autofs4/autofs_i.h | 1 +
fs/autofs4/expire.c | 2 +-
- fs/dcache.c | 10 ++++++++--
+ fs/dcache.c | 20 ++++++++++++++++----
fs/namespace.c | 3 ++-
- 4 files changed, 12 insertions(+), 4 deletions(-)
+ 4 files changed, 20 insertions(+), 6 deletions(-)
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -46,21 +46,38 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
-@@ -785,7 +786,12 @@ void dput(struct dentry *dentry)
+@@ -750,6 +751,8 @@ static inline bool fast_dput(struct dent
+ */
+ void dput(struct dentry *dentry)
+ {
++ struct dentry *parent;
++
+ if (unlikely(!dentry))
+ return;
+
+@@ -788,9 +791,18 @@ void dput(struct dentry *dentry)
+ return;
+
kill_it:
- dentry = dentry_kill(dentry);
- if (dentry) {
+- dentry = dentry_kill(dentry);
+- if (dentry) {
- cond_resched();
++ parent = dentry_kill(dentry);
++ if (parent) {
+ int r;
+
-+ /* the task with the highest priority won't schedule */
-+ r = cond_resched();
-+ if (!r)
-+ cpu_chill();
++ if (parent == dentry) {
++ /* the task with the highest priority won't schedule */
++ r = cond_resched();
++ if (!r)
++ cpu_chill();
++ } else {
++ dentry = parent;
++ }
goto repeat;
}
}
-@@ -2319,7 +2325,7 @@ void d_delete(struct dentry * dentry)
+@@ -2321,7 +2333,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
new file mode 100644
index 00000000000000..d1acdfdda3c107
--- /dev/null
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -0,0 +1,214 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Sep 2016 14:35:49 +0200
+Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue
+
+__d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
+which disables preemption. As a workaround convert it to swait.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/cifs/readdir.c | 2 +-
+ fs/dcache.c | 27 +++++++++++++++------------
+ fs/fuse/dir.c | 2 +-
+ fs/namei.c | 4 ++--
+ fs/nfs/dir.c | 4 ++--
+ fs/nfs/unlink.c | 4 ++--
+ fs/proc/base.c | 2 +-
+ fs/proc/proc_sysctl.c | 2 +-
+ include/linux/dcache.h | 4 ++--
+ include/linux/nfs_xdr.h | 2 +-
+ kernel/sched/swait.c | 1 +
+ 11 files changed, 29 insertions(+), 25 deletions(-)
+
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent,
+ struct inode *inode;
+ struct super_block *sb = parent->d_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2393,21 +2393,24 @@ static inline void end_dir_add(struct in
+
+ static void d_wait_lookup(struct dentry *dentry)
+ {
+- if (d_in_lookup(dentry)) {
+- DECLARE_WAITQUEUE(wait, current);
+- add_wait_queue(dentry->d_wait, &wait);
+- do {
+- set_current_state(TASK_UNINTERRUPTIBLE);
+- spin_unlock(&dentry->d_lock);
+- schedule();
+- spin_lock(&dentry->d_lock);
+- } while (d_in_lookup(dentry));
+- }
++ struct swait_queue __wait;
++
++ if (!d_in_lookup(dentry))
++ return;
++
++ INIT_LIST_HEAD(&__wait.task_list);
++ do {
++ prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
++ spin_unlock(&dentry->d_lock);
++ schedule();
++ spin_lock(&dentry->d_lock);
++ } while (d_in_lookup(dentry));
++ finish_swait(dentry->d_wait, &__wait);
+ }
+
+ struct dentry *d_alloc_parallel(struct dentry *parent,
+ const struct qstr *name,
+- wait_queue_head_t *wq)
++ struct swait_queue_head *wq)
+ {
+ unsigned int hash = name->hash;
+ struct hlist_bl_head *b = in_lookup_hash(parent, hash);
+@@ -2516,7 +2519,7 @@ void __d_lookup_done(struct dentry *dent
+ hlist_bl_lock(b);
+ dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
+ __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
+- wake_up_all(dentry->d_wait);
++ swake_up_all(dentry->d_wait);
+ dentry->d_wait = NULL;
+ hlist_bl_unlock(b);
+ INIT_HLIST_NODE(&dentry->d_u.d_alias);
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1174,7 +1174,7 @@ static int fuse_direntplus_link(struct f
+ struct inode *dir = d_inode(parent);
+ struct fuse_conn *fc;
+ struct inode *inode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ if (!o->nodeid) {
+ /*
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1629,7 +1629,7 @@ static struct dentry *lookup_slow(const
+ {
+ struct dentry *dentry = ERR_PTR(-ENOENT), *old;
+ struct inode *inode = dir->d_inode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ inode_lock_shared(inode);
+ /* Don't go there if it's already dead */
+@@ -3086,7 +3086,7 @@ static int lookup_open(struct nameidata
+ struct dentry *dentry;
+ int error, create_error = 0;
+ umode_t mode = op->mode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ if (unlikely(IS_DEADDIR(dir_inode)))
+ return -ENOENT;
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -485,7 +485,7 @@ static
+ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+ {
+ struct qstr filename = QSTR_INIT(entry->name, entry->len);
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct dentry *dentry;
+ struct dentry *alias;
+ struct inode *dir = d_inode(parent);
+@@ -1484,7 +1484,7 @@ int nfs_atomic_open(struct inode *dir, s
+ struct file *file, unsigned open_flags,
+ umode_t mode, int *opened)
+ {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct nfs_open_context *ctx;
+ struct dentry *res;
+ struct iattr attr = { .ia_valid = ATTR_OPEN };
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
+@@ -12,7 +12,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/nfs_fs.h>
+ #include <linux/sched.h>
+-#include <linux/wait.h>
++#include <linux/swait.h>
+ #include <linux/namei.h>
+ #include <linux/fsnotify.h>
+
+@@ -205,7 +205,7 @@ nfs_async_unlink(struct dentry *dentry,
+ goto out_free_name;
+ }
+ data->res.dir_attr = &data->dir_attr;
+- init_waitqueue_head(&data->wq);
++ init_swait_queue_head(&data->wq);
+
+ status = -EBUSY;
+ spin_lock(&dentry->d_lock);
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1819,7 +1819,7 @@ bool proc_fill_cache(struct file *file,
+
+ child = d_hash_and_lookup(dir, &qname);
+ if (!child) {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ child = d_alloc_parallel(dir, &qname, &wq);
+ if (IS_ERR(child))
+ goto end_instantiate;
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -627,7 +627,7 @@ static bool proc_sys_fill_cache(struct f
+
+ child = d_lookup(dir, &qname);
+ if (!child) {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ child = d_alloc_parallel(dir, &qname, &wq);
+ if (IS_ERR(child))
+ return false;
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -101,7 +101,7 @@ struct dentry {
+
+ union {
+ struct list_head d_lru; /* LRU list */
+- wait_queue_head_t *d_wait; /* in-lookup ones only */
++ struct swait_queue_head *d_wait; /* in-lookup ones only */
+ };
+ struct list_head d_child; /* child of parent list */
+ struct list_head d_subdirs; /* our children */
+@@ -231,7 +231,7 @@ extern void d_set_d_op(struct dentry *de
+ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
+ extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
+- wait_queue_head_t *);
++ struct swait_queue_head *);
+ extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+ extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+ extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1484,7 +1484,7 @@ struct nfs_unlinkdata {
+ struct nfs_removeargs args;
+ struct nfs_removeres res;
+ struct dentry *dentry;
+- wait_queue_head_t wq;
++ struct swait_queue_head wq;
+ struct rpc_cred *cred;
+ struct nfs_fattr dir_attr;
+ long timeout;
+--- a/kernel/sched/swait.c
++++ b/kernel/sched/swait.c
+@@ -74,6 +74,7 @@ void swake_up_all(struct swait_queue_hea
+ if (!swait_active(q))
+ return;
+
++ WARN_ON(irqs_disabled());
+ raw_spin_lock_irq(&q->lock);
+ list_splice_init(&q->task_list, &tmp);
+ while (!list_empty(&tmp)) {
diff --git a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
new file mode 100644
index 00000000000000..3590c36ff2ea69
--- /dev/null
+++ b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -0,0 +1,138 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 15 Sep 2016 10:51:27 +0200
+Subject: [PATCH] fs/nfs: turn rmdir_sem into a semaphore
+
+The RW semaphore had a reader side which used the _non_owner version
+because it most likely took the reader lock in one thread and released it
+in another which would cause lockdep to complain if the "regular"
+version was used.
+On -RT we need the owner because the rw lock is turned into a rtmutex.
+The semaphores on the hand are "plain simple" and should work as
+expected. We can't have multiple readers but on -RT we don't allow
+multiple readers anyway so that is not a loss.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/nfs/dir.c | 8 ++++++++
+ fs/nfs/inode.c | 4 ++++
+ fs/nfs/unlink.c | 31 +++++++++++++++++++++++++++----
+ include/linux/nfs_fs.h | 4 ++++
+ 4 files changed, 43 insertions(+), 4 deletions(-)
+
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1799,7 +1799,11 @@ int nfs_rmdir(struct inode *dir, struct
+
+ trace_nfs_rmdir_enter(dir, dentry);
+ if (d_really_is_positive(dentry)) {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ down(&NFS_I(d_inode(dentry))->rmdir_sem);
++#else
+ down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
++#endif
+ error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
+ /* Ensure the VFS deletes this inode */
+ switch (error) {
+@@ -1809,7 +1813,11 @@ int nfs_rmdir(struct inode *dir, struct
+ case -ENOENT:
+ nfs_dentry_handle_enoent(dentry);
+ }
++#ifdef CONFIG_PREEMPT_RT_BASE
++ up(&NFS_I(d_inode(dentry))->rmdir_sem);
++#else
+ up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
++#endif
+ } else
+ error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
+ trace_nfs_rmdir_exit(dir, dentry, error);
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1957,7 +1957,11 @@ static void init_once(void *foo)
+ nfsi->nrequests = 0;
+ nfsi->commit_info.ncommit = 0;
+ atomic_set(&nfsi->commit_info.rpcs_out, 0);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ sema_init(&nfsi->rmdir_sem, 1);
++#else
+ init_rwsem(&nfsi->rmdir_sem);
++#endif
+ nfs4_init_once(nfsi);
+ }
+
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
+@@ -51,6 +51,29 @@ static void nfs_async_unlink_done(struct
+ rpc_restart_call_prepare(task);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void nfs_down_anon(struct semaphore *sema)
++{
++ down(sema);
++}
++
++static void nfs_up_anon(struct semaphore *sema)
++{
++ up(sema);
++}
++
++#else
++static void nfs_down_anon(struct rw_semaphore *rwsem)
++{
++ down_read_non_owner(rwsem);
++}
++
++static void nfs_up_anon(struct rw_semaphore *rwsem)
++{
++ up_read_non_owner(rwsem);
++}
++#endif
++
+ /**
+ * nfs_async_unlink_release - Release the sillydelete data.
+ * @task: rpc_task of the sillydelete
+@@ -64,7 +87,7 @@ static void nfs_async_unlink_release(voi
+ struct dentry *dentry = data->dentry;
+ struct super_block *sb = dentry->d_sb;
+
+- up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
++ nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
+ d_lookup_done(dentry);
+ nfs_free_unlinkdata(data);
+ dput(dentry);
+@@ -117,10 +140,10 @@ static int nfs_call_unlink(struct dentry
+ struct inode *dir = d_inode(dentry->d_parent);
+ struct dentry *alias;
+
+- down_read_non_owner(&NFS_I(dir)->rmdir_sem);
++ nfs_down_anon(&NFS_I(dir)->rmdir_sem);
+ alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq);
+ if (IS_ERR(alias)) {
+- up_read_non_owner(&NFS_I(dir)->rmdir_sem);
++ nfs_up_anon(&NFS_I(dir)->rmdir_sem);
+ return 0;
+ }
+ if (!d_in_lookup(alias)) {
+@@ -142,7 +165,7 @@ static int nfs_call_unlink(struct dentry
+ ret = 0;
+ spin_unlock(&alias->d_lock);
+ dput(alias);
+- up_read_non_owner(&NFS_I(dir)->rmdir_sem);
++ nfs_up_anon(&NFS_I(dir)->rmdir_sem);
+ /*
+ * If we'd displaced old cached devname, free it. At that
+ * point dentry is definitely not a root, so we won't need
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -165,7 +165,11 @@ struct nfs_inode {
+
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct semaphore rmdir_sem;
++#else
+ struct rw_semaphore rmdir_sem;
++#endif
+
+ #if IS_ENABLED(CONFIG_NFS_V4)
+ struct nfs4_cached_acl *nfs4_acl;
diff --git a/patches/fs-replace-bh_uptodate_lock-for-rt.patch b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
index 153a6283046e76..f1ad50c6910480 100644
--- a/patches/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -300,8 +300,7 @@ static void end_buffer_async_read(struct
+@@ -301,8 +301,7 @@ static void end_buffer_async_read(struct
* decide that the page is now completely done.
*/
first = page_buffers(page);
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -314,8 +313,7 @@ static void end_buffer_async_read(struct
+@@ -315,8 +314,7 @@ static void end_buffer_async_read(struct
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If none of the buffers had errors and they are all
-@@ -327,9 +325,7 @@ static void end_buffer_async_read(struct
+@@ -328,9 +326,7 @@ static void end_buffer_async_read(struct
return;
still_busy:
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -357,8 +353,7 @@ void end_buffer_async_write(struct buffe
+@@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffe
}
first = page_buffers(page);
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_write(bh);
unlock_buffer(bh);
-@@ -370,15 +365,12 @@ void end_buffer_async_write(struct buffe
+@@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffe
}
tmp = tmp->b_this_page;
}
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3314,6 +3306,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3384,6 +3376,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index aef8584cc7785f..d5f6d263ef6efd 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1669,6 +1669,8 @@ tracing_generic_entry_update(struct trac
+@@ -1909,6 +1909,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2566,9 +2568,10 @@ static void print_lat_help_header(struct
+@@ -2897,9 +2899,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
-@@ -188,6 +188,8 @@ static int trace_define_common_fields(vo
+@@ -187,6 +187,8 @@ static int trace_define_common_fields(vo
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
diff --git a/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index 032f845b13b7ce..edc01e4b0a261b 100644
--- a/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -874,7 +874,9 @@ void exit_pi_state_list(struct task_stru
+@@ -895,7 +895,9 @@ void exit_pi_state_list(struct task_stru
* task still owns the PI-state:
*/
if (head->next != next) {
diff --git a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index 471619ef97fb24..09e5a4325c286e 100644
--- a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -5,117 +5,67 @@ Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
Joe Korty reported, that __irq_set_affinity_locked() schedules a
workqueue while holding a rawlock which results in a might_sleep()
warning.
-This patch moves the invokation into a process context so that we only
-wakeup() a process while holding the lock.
+This patch uses swork_queue() instead.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/interrupt.h | 2 +
- kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++--
- 2 files changed, 78 insertions(+), 3 deletions(-)
+ drivers/scsi/qla2xxx/qla_isr.c | 4 +++
+ include/linux/interrupt.h | 5 ++++
+ kernel/irq/manage.c | 43 ++++++++++++++++++++++++++++++++++++++---
+ 3 files changed, 49 insertions(+), 3 deletions(-)
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3125,7 +3125,11 @@ qla24xx_enable_msix(struct qla_hw_data *
+ * kref_put().
+ */
+ kref_get(&qentry->irq_notify.kref);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ swork_queue(&qentry->irq_notify.swork);
++#else
+ schedule_work(&qentry->irq_notify.work);
++#endif
+ }
+
+ /*
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -217,6 +217,7 @@ extern void resume_device_irqs(void);
- * @irq: Interrupt to which notification applies
- * @kref: Reference count, for internal use
- * @work: Work item, for internal use
-+ * @list: List item for deferred callbacks
- * @notify: Function to be called on change. This will be
- * called in process context.
- * @release: Function to be called on release. This will be
-@@ -228,6 +229,7 @@ struct irq_affinity_notify {
+@@ -14,6 +14,7 @@
+ #include <linux/hrtimer.h>
+ #include <linux/kref.h>
+ #include <linux/workqueue.h>
++#include <linux/swork.h>
+
+ #include <linux/atomic.h>
+ #include <asm/ptrace.h>
+@@ -229,7 +230,11 @@ extern void resume_device_irqs(void);
+ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct swork_event swork;
++#else
struct work_struct work;
-+ struct list_head list;
++#endif
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -181,6 +181,62 @@ static inline void
- irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
-+static struct task_struct *set_affinity_helper;
-+static LIST_HEAD(affinity_list);
-+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
-+
-+static int set_affinity_thread(void *unused)
-+{
-+ while (1) {
-+ struct irq_affinity_notify *notify;
-+ int empty;
-+
-+ set_current_state(TASK_INTERRUPTIBLE);
-+
-+ raw_spin_lock_irq(&affinity_list_lock);
-+ empty = list_empty(&affinity_list);
-+ raw_spin_unlock_irq(&affinity_list_lock);
-+
-+ if (empty)
-+ schedule();
-+ if (kthread_should_stop())
-+ break;
-+ set_current_state(TASK_RUNNING);
-+try_next:
-+ notify = NULL;
-+
-+ raw_spin_lock_irq(&affinity_list_lock);
-+ if (!list_empty(&affinity_list)) {
-+ notify = list_first_entry(&affinity_list,
-+ struct irq_affinity_notify, list);
-+ list_del_init(&notify->list);
-+ }
-+ raw_spin_unlock_irq(&affinity_list_lock);
-+
-+ if (!notify)
-+ continue;
-+ _irq_affinity_notify(notify);
-+ goto try_next;
-+ }
-+ return 0;
-+}
-+
-+static void init_helper_thread(void)
-+{
-+ if (set_affinity_helper)
-+ return;
-+ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
-+ "affinity-cb");
-+ WARN_ON(IS_ERR(set_affinity_helper));
-+}
-+#else
-+
-+static inline void init_helper_thread(void) { }
-+
-+#endif
-+
- int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
- {
-@@ -220,7 +276,17 @@ int irq_set_affinity_locked(struct irq_d
+@@ -235,7 +235,12 @@ int irq_set_affinity_locked(struct irq_d
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ raw_spin_lock(&affinity_list_lock);
-+ if (list_empty(&desc->affinity_notify->list))
-+ list_add_tail(&affinity_list,
-+ &desc->affinity_notify->list);
-+ raw_spin_unlock(&affinity_list_lock);
-+ wake_up_process(set_affinity_helper);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ swork_queue(&desc->affinity_notify->swork);
+#else
schedule_work(&desc->affinity_notify->work);
+#endif
}
irqd_set(data, IRQD_AFFINITY_SET);
-@@ -258,10 +324,8 @@ int irq_set_affinity_hint(unsigned int i
+@@ -273,10 +278,8 @@ int irq_set_affinity_hint(unsigned int i
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
@@ -127,26 +77,52 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
-@@ -283,6 +347,13 @@ static void irq_affinity_notify(struct w
+@@ -298,6 +301,35 @@ static void irq_affinity_notify(struct w
kref_put(&notify->kref, notify->release);
}
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void init_helper_thread(void)
++{
++ static int init_sworker_once;
++
++ if (init_sworker_once)
++ return;
++ if (WARN_ON(swork_get()))
++ return;
++ init_sworker_once = 1;
++}
++
++static void irq_affinity_notify(struct swork_event *swork)
++{
++ struct irq_affinity_notify *notify =
++ container_of(swork, struct irq_affinity_notify, swork);
++ _irq_affinity_notify(notify);
++}
++
++#else
++
+static void irq_affinity_notify(struct work_struct *work)
+{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
+ _irq_affinity_notify(notify);
+}
++#endif
+
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
-@@ -312,6 +383,8 @@ irq_set_affinity_notifier(unsigned int i
+@@ -326,7 +358,12 @@ irq_set_affinity_notifier(unsigned int i
+ if (notify) {
notify->irq = irq;
kref_init(&notify->kref);
- INIT_WORK(&notify->work, irq_affinity_notify);
-+ INIT_LIST_HEAD(&notify->list);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ INIT_SWORK(&notify->swork, irq_affinity_notify);
+ init_helper_thread();
++#else
+ INIT_WORK(&notify->work, irq_affinity_notify);
++#endif
}
raw_spin_lock_irqsave(&desc->lock, flags);
diff --git a/patches/genirq-force-threading.patch b/patches/genirq-force-threading.patch
index 497fa443068271..a20c2ee121b300 100644
--- a/patches/genirq-force-threading.patch
+++ b/patches/genirq-force-threading.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -390,9 +390,13 @@ extern int irq_set_irqchip_state(unsigne
+@@ -398,9 +398,13 @@ extern int irq_set_irqchip_state(unsigne
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
index 8c6e6f3a57e0c1..91cace952f611a 100644
--- a/patches/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -2084,7 +2084,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2111,7 +2111,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
diff --git a/patches/gpu_don_t_check_for_the_lock_owner.patch b/patches/gpu_don_t_check_for_the_lock_owner.patch
new file mode 100644
index 00000000000000..123656cbd3671c
--- /dev/null
+++ b/patches/gpu_don_t_check_for_the_lock_owner.patch
@@ -0,0 +1,32 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 14 Jul 2015 14:26:34 +0200
+Subject: gpu: don't check for the lock owner.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_gem_shrinker.c | 2 +-
+ drivers/gpu/drm/msm/msm_gem_shrinker.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mu
+ if (!mutex_is_locked(mutex))
+ return false;
+
+-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
++#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
+ return mutex->owner == task;
+ #else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+@@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mu
+ if (!mutex_is_locked(mutex))
+ return false;
+
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
+ return mutex->owner == task;
+ #else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/patches/hlist-Add-hlist_is_singular_node-helper.patch b/patches/hlist-Add-hlist_is_singular_node-helper.patch
deleted file mode 100644
index f81ebe5c39ff81..00000000000000
--- a/patches/hlist-Add-hlist_is_singular_node-helper.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:27 +0000
-Subject: [PATCH 11/22] hlist: Add hlist_is_singular_node() helper
-
-Required to figure out whether the entry is the only one in the hlist.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/list.h | 10 ++++++++++
- 1 file changed, 10 insertions(+)
-
---- a/include/linux/list.h
-+++ b/include/linux/list.h
-@@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hli
- }
-
- /*
-+ * Check whether the node is the only node of the head without
-+ * accessing head.
-+ */
-+static inline bool hlist_is_singular_node(struct hlist_node *n,
-+ struct hlist_head *h)
-+{
-+ return !n->next && n->pprev == &h->first;
-+}
-+
-+/*
- * Move a list from one list head to another. Fixup the pprev
- * reference of the first entry if it exists.
- */
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 2053bd9b9f074e..44ee873498420a 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -221,9 +221,6 @@ static inline void cpu_notifier_register
+@@ -192,9 +192,6 @@ static inline void cpu_notifier_register
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
-@@ -233,6 +230,8 @@ extern void get_online_cpus(void);
+@@ -204,6 +201,8 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
-@@ -250,6 +249,8 @@ static inline void cpu_hotplug_done(void
+@@ -221,6 +220,8 @@ static inline void cpu_hotplug_done(void
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void get_online_cpus(void)
{
-@@ -807,6 +901,8 @@ static int __ref _cpu_down(unsigned int
+@@ -799,6 +893,8 @@ static int __ref _cpu_down(unsigned int
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
bool hasdied = false;
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -814,7 +910,27 @@ static int __ref _cpu_down(unsigned int
+@@ -806,7 +902,27 @@ static int __ref _cpu_down(unsigned int
if (!cpu_present(cpu))
return -EINVAL;
@@ -186,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpuhp_tasks_frozen = tasks_frozen;
-@@ -853,6 +969,8 @@ static int __ref _cpu_down(unsigned int
+@@ -845,6 +961,8 @@ static int __ref _cpu_down(unsigned int
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
out:
diff --git a/patches/hotplug-use-migrate-disable.patch b/patches/hotplug-use-migrate-disable.patch
index 5182f0db5489c7..2c0fa02fcbac5a 100644
--- a/patches/hotplug-use-migrate-disable.patch
+++ b/patches/hotplug-use-migrate-disable.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -918,14 +918,13 @@ static int __ref _cpu_down(unsigned int
+@@ -910,14 +910,13 @@ static int __ref _cpu_down(unsigned int
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpu_hotplug_begin();
ret = cpu_unplug_begin(cpu);
-@@ -974,6 +973,7 @@ static int __ref _cpu_down(unsigned int
+@@ -966,6 +965,7 @@ static int __ref _cpu_down(unsigned int
cpu_unplug_done(cpu);
out_cancel:
cpu_hotplug_done();
diff --git a/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
index 85434fd685cd1b..df0ebb63cb1725 100644
--- a/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ b/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
@@ -42,72 +42,46 @@ Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca
from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which
makes a similar change.
-add a helper thread which does the call to schedule_work and wake up that
-thread instead of calling schedule_work directly.
-
-
Signed-off-by: Yang Shi <yang.shi@windriver.com>
+[bigeasy: use swork_queue() instead a helper thread]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/time/hrtimer.c | 40 ++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 40 insertions(+)
+ kernel/time/hrtimer.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -48,6 +48,7 @@
- #include <linux/sched/rt.h>
- #include <linux/sched/deadline.h>
- #include <linux/timer.h>
-+#include <linux/kthread.h>
- #include <linux/freezer.h>
-
- #include <asm/uaccess.h>
-@@ -707,6 +708,44 @@ static void clock_was_set_work(struct wo
-
- static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+@@ -696,6 +696,29 @@ static void hrtimer_switch_to_hres(void)
+ retrigger_next_event(NULL);
+ }
+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * RT can not call schedule_work from real interrupt context.
-+ * Need to make a thread to do the real work.
-+ */
-+static struct task_struct *clock_set_delay_thread;
-+static bool do_clock_set_delay;
+
-+static int run_clock_set_delay(void *ignore)
++static struct swork_event clock_set_delay_work;
++
++static void run_clock_set_delay(struct swork_event *event)
+{
-+ while (!kthread_should_stop()) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (do_clock_set_delay) {
-+ do_clock_set_delay = false;
-+ schedule_work(&hrtimer_work);
-+ }
-+ schedule();
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
++ clock_was_set();
+}
+
+void clock_was_set_delayed(void)
+{
-+ do_clock_set_delay = true;
-+ /* Make visible before waking up process */
-+ smp_wmb();
-+ wake_up_process(clock_set_delay_thread);
++ swork_queue(&clock_set_delay_work);
+}
+
+static __init int create_clock_set_delay_thread(void)
+{
-+ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd");
-+ BUG_ON(!clock_set_delay_thread);
++ WARN_ON(swork_get());
++ INIT_SWORK(&clock_set_delay_work, run_clock_set_delay);
+ return 0;
+}
+early_initcall(create_clock_set_delay_thread);
+#else /* PREEMPT_RT_FULL */
- /*
- * Called from timekeeping and resume code to reprogramm the hrtimer
- * interrupt device on all cpus.
-@@ -715,6 +754,7 @@ void clock_was_set_delayed(void)
++
+ static void clock_was_set_work(struct work_struct *work)
+ {
+ clock_was_set();
+@@ -711,6 +734,7 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 5b432c637667c0..98c116e71b88ec 100644
--- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -15,10 +15,10 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/hrtimer.h | 7 ++
kernel/sched/core.c | 1
kernel/sched/rt.c | 1
- kernel/time/hrtimer.c | 137 +++++++++++++++++++++++++++++++++++++++++++----
+ kernel/time/hrtimer.c | 144 ++++++++++++++++++++++++++++++++++++++++++++---
kernel/time/tick-sched.c | 1
kernel/watchdog.c | 1
- 6 files changed, 139 insertions(+), 9 deletions(-)
+ 6 files changed, 146 insertions(+), 9 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -66,7 +66,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
unsigned int clock_was_set_seq;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -306,6 +306,7 @@ static void init_rq_hrtick(struct rq *rq
+@@ -345,6 +345,7 @@ static void init_rq_hrtick(struct rq *rq
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
@@ -86,7 +86,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -724,11 +724,8 @@ static inline int hrtimer_is_hres_enable
+@@ -720,11 +720,8 @@ static inline int hrtimer_is_hres_enable
static inline void hrtimer_switch_to_hres(void) { }
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
@@ -100,7 +100,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
-@@ -877,7 +874,7 @@ void hrtimer_wait_for_timer(const struct
+@@ -873,7 +870,7 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
@@ -109,7 +109,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
wait_event(base->cpu_base->wait,
!(hrtimer_callback_running(timer)));
}
-@@ -927,6 +924,11 @@ static void __remove_hrtimer(struct hrti
+@@ -923,6 +920,11 @@ static void __remove_hrtimer(struct hrti
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
@@ -121,7 +121,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
-@@ -1167,6 +1169,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1163,6 +1165,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@@ -129,7 +129,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
-@@ -1207,6 +1210,7 @@ bool hrtimer_active(const struct hrtimer
+@@ -1203,6 +1206,7 @@ bool hrtimer_active(const struct hrtimer
seq = raw_read_seqcount_begin(&cpu_base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
@@ -137,7 +137,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
cpu_base->running == timer)
return true;
-@@ -1305,12 +1309,112 @@ static void __run_hrtimer(struct hrtimer
+@@ -1301,12 +1305,112 @@ static void __run_hrtimer(struct hrtimer
cpu_base->running = NULL;
}
@@ -250,7 +250,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
for (; active; base++, active >>= 1) {
struct timerqueue_node *node;
-@@ -1350,9 +1454,14 @@ static void __hrtimer_run_queues(struct
+@@ -1346,9 +1450,14 @@ static void __hrtimer_run_queues(struct
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
break;
@@ -266,7 +266,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
#ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1494,8 +1603,6 @@ void hrtimer_run_queues(void)
+@@ -1490,8 +1599,6 @@ void hrtimer_run_queues(void)
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
@@ -275,7 +275,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
/*
-@@ -1517,6 +1624,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1513,6 +1620,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -283,7 +283,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1651,6 +1759,7 @@ static void init_hrtimers_cpu(int cpu)
+@@ -1647,6 +1755,7 @@ int hrtimers_prepare_cpu(unsigned int cp
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -291,38 +291,43 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
cpu_base->cpu = cpu;
-@@ -1755,11 +1864,21 @@ static struct notifier_block hrtimers_nb
- .notifier_call = hrtimer_cpu_notify,
- };
+@@ -1723,9 +1832,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
+
+ #endif /* CONFIG_HOTPLUG_CPU */
+#ifdef CONFIG_PREEMPT_RT_BASE
++
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+ hrtimer_rt_run_pending();
+}
++
++static void hrtimers_open_softirq(void)
++{
++ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
++}
++
++#else
++static void hrtimers_open_softirq(void) { }
+#endif
+
void __init hrtimers_init(void)
{
- hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
- (void *)(long)smp_processor_id());
- register_cpu_notifier(&hrtimers_nb);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-+#endif
+ hrtimers_prepare_cpu(smp_processor_id());
++ hrtimers_open_softirq();
}
/**
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -1194,6 +1194,7 @@ void tick_setup_sched_timer(void)
+@@ -1195,6 +1195,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ts->sched_timer.irqsafe = 1;
ts->sched_timer.function = tick_sched_timer;
- /* Get the next period (per cpu) */
+ /* Get the next period (per-CPU) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -523,6 +523,7 @@ static void watchdog_enable(unsigned int
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index 4fd28e70b5a4fa..67e796f4c06071 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -860,6 +860,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -856,6 +856,32 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -1077,7 +1103,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1073,7 +1099,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1468,6 +1494,8 @@ void hrtimer_run_queues(void)
+@@ -1464,6 +1490,8 @@ void hrtimer_run_queues(void)
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
@@ -103,16 +103,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1627,6 +1655,9 @@ static void init_hrtimers_cpu(int cpu)
+@@ -1623,6 +1651,9 @@ int hrtimers_prepare_cpu(unsigned int cp
cpu_base->cpu = cpu;
hrtimer_init_hres(cpu_base);
+#ifdef CONFIG_PREEMPT_RT_BASE
+ init_waitqueue_head(&cpu_base->wait);
+#endif
+ return 0;
}
- #ifdef CONFIG_HOTPLUG_CPU
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
diff --git a/patches/hwlatdetect.patch b/patches/hwlatdetect.patch
index 5c1859eefeb32e..93df5c5368a47b 100644
--- a/patches/hwlatdetect.patch
+++ b/patches/hwlatdetect.patch
@@ -122,7 +122,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
depends on PCI
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
-@@ -39,6 +39,7 @@ obj-$(CONFIG_C2PORT) += c2port/
+@@ -38,6 +38,7 @@ obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
diff --git a/patches/i2c-omap-drop-the-lock-hard-irq-context.patch b/patches/i2c-omap-drop-the-lock-hard-irq-context.patch
deleted file mode 100644
index cf711e0e9237e0..00000000000000
--- a/patches/i2c-omap-drop-the-lock-hard-irq-context.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 21 Mar 2013 11:35:49 +0100
-Subject: i2c/omap: drop the lock hard irq context
-
-The lock is taken while reading two registers. On RT the first lock is
-taken in hard irq where it might sleep and in the threaded irq.
-The threaded irq runs in oneshot mode so the hard irq does not run until
-the thread the completes so there is no reason to grab the lock.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/i2c/busses/i2c-omap.c | 5 +----
- 1 file changed, 1 insertion(+), 4 deletions(-)
-
---- a/drivers/i2c/busses/i2c-omap.c
-+++ b/drivers/i2c/busses/i2c-omap.c
-@@ -995,15 +995,12 @@ omap_i2c_isr(int irq, void *dev_id)
- u16 mask;
- u16 stat;
-
-- spin_lock(&omap->lock);
-- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
- stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
-+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
-
- if (stat & mask)
- ret = IRQ_WAKE_THREAD;
-
-- spin_unlock(&omap->lock);
--
- return ret;
- }
-
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index 8582799ec45877..f7fe1ce4fc43a6 100644
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -18,9 +18,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -11496,7 +11496,7 @@ void intel_check_page_flip(struct drm_de
+@@ -11613,7 +11613,7 @@ void intel_check_page_flip(struct drm_i9
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
- WARN_ON(!in_interrupt());
+ WARN_ON_NONRT(!in_interrupt());
diff --git a/patches/i915_compile_fix.patch b/patches/i915_compile_fix.patch
deleted file mode 100644
index 05f39cdeae6b6d..00000000000000
--- a/patches/i915_compile_fix.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: gpu/i915: don't open code these things
-
-The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions")
-the owner check is still there.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/gpu/drm/i915/i915_gem_shrinker.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
-+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
-@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mu
- if (!mutex_is_locked(mutex))
- return false;
-
--#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
-+#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
- return mutex->owner == task;
- #else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/patches/infiniband-mellanox-ib-use-nort-irq.patch b/patches/infiniband-mellanox-ib-use-nort-irq.patch
index 06a8b945da1914..327f8669e68d6f 100644
--- a/patches/infiniband-mellanox-ib-use-nort-irq.patch
+++ b/patches/infiniband-mellanox-ib-use-nort-irq.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -883,7 +883,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -897,7 +897,7 @@ void ipoib_mcast_restart_task(struct wor
ipoib_dbg_mcast(priv, "restarting multicast task\n");
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netif_addr_lock(dev);
spin_lock(&priv->lock);
-@@ -965,7 +965,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -979,7 +979,7 @@ void ipoib_mcast_restart_task(struct wor
spin_unlock(&priv->lock);
netif_addr_unlock(dev);
diff --git a/patches/infiniband-ulp-ipoib-remove-pkey_mutex.patch b/patches/infiniband-ulp-ipoib-remove-pkey_mutex.patch
deleted file mode 100644
index 5e135a65aef8c9..00000000000000
--- a/patches/infiniband-ulp-ipoib-remove-pkey_mutex.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 11 May 2016 11:52:23 +0200
-Subject: infiniband/ulp/ipoib: remove pkey_mutex
-
-The last user of pkey_mutex was removed in db84f8803759 ("IB/ipoib: Use
-P_Key change event instead of P_Key polling mechanism") but the lock
-remained.
-This patch removes it.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 2 --
- 1 file changed, 2 deletions(-)
-
---- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
-@@ -51,8 +51,6 @@ MODULE_PARM_DESC(data_debug_level,
- "Enable data path debug tracing if > 0");
- #endif
-
--static DEFINE_MUTEX(pkey_mutex);
--
- struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
- struct ib_pd *pd, struct ib_ah_attr *attr)
- {
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index 880f244e7f3375..cdefe6d736d4ed 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -221,6 +221,9 @@ static inline void cpu_notifier_register
+@@ -192,6 +192,9 @@ static inline void cpu_notifier_register
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT_NOTIFIERS
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1429,6 +1429,12 @@ struct task_struct {
+@@ -1495,6 +1495,12 @@ struct task_struct {
#endif
unsigned int policy;
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1875,14 +1881,6 @@ extern int arch_task_struct_size __read_
+@@ -1946,14 +1952,6 @@ extern int arch_task_struct_size __read_
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -3164,6 +3162,31 @@ static inline void set_task_cpu(struct t
+@@ -3394,6 +3392,31 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1051,6 +1051,11 @@ void do_set_cpus_allowed(struct task_str
+@@ -1089,6 +1089,11 @@ void do_set_cpus_allowed(struct task_str
lockdep_assert_held(&p->pi_lock);
@@ -162,16 +162,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
queued = task_on_rq_queued(p);
running = task_current(rq, p);
-@@ -1112,7 +1117,7 @@ static int __set_cpus_allowed_ptr(struct
- do_set_cpus_allowed(p, new_mask);
+@@ -1168,7 +1173,7 @@ static int __set_cpus_allowed_ptr(struct
+ }
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
- dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -3062,6 +3067,69 @@ static inline void schedule_debug(struct
+ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+@@ -3237,6 +3242,69 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
@@ -243,7 +243,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -559,6 +559,9 @@ void print_rt_rq(struct seq_file *m, int
+@@ -552,6 +552,9 @@ void print_rt_rq(struct seq_file *m, int
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
@@ -253,7 +253,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#undef PN
#undef P
-@@ -954,6 +957,10 @@ void proc_sched_show_task(struct task_st
+@@ -947,6 +950,10 @@ void proc_sched_show_task(struct task_st
#endif
P(policy);
P(prio);
diff --git a/patches/iommu-amd--Use-WARN_ON_NORT.patch b/patches/iommu-amd--Use-WARN_ON_NORT.patch
index aeed6219085eea..c813995349cdc5 100644
--- a/patches/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/patches/iommu-amd--Use-WARN_ON_NORT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
-@@ -2165,10 +2165,10 @@ static int __attach_device(struct iommu_
+@@ -1832,10 +1832,10 @@ static int __attach_device(struct iommu_
int ret;
/*
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* lock domain */
spin_lock(&domain->lock);
-@@ -2331,10 +2331,10 @@ static void __detach_device(struct iommu
+@@ -2003,10 +2003,10 @@ static void __detach_device(struct iommu
struct protection_domain *domain;
/*
diff --git a/patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch b/patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
new file mode 100644
index 00000000000000..8b459e50a97a38
--- /dev/null
+++ b/patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
@@ -0,0 +1,81 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 15 Sep 2016 16:58:19 +0200
+Subject: [PATCH] iommu/iova: don't disable preempt around this_cpu_ptr()
+
+Commit 583248e6620a ("iommu/iova: Disable preemption around use of
+this_cpu_ptr()") disables preemption while accessing a per-CPU variable.
+This does keep lockdep quiet. However I don't see the point why it is
+bad if we get migrated after its access to another CPU.
+__iova_rcache_insert() and __iova_rcache_get() immediately locks the
+variable after obtaining it - before accessing its members.
+_If_ we get migrated away after retrieving the address of cpu_rcache
+before taking the lock then the *other* task on the same CPU will
+retrieve the same address of cpu_rcache and will spin on the lock.
+
+alloc_iova_fast() disables preemption while invoking
+free_cpu_cached_iovas() on each CPU. The function itself uses
+per_cpu_ptr() which does not trigger a warning (like this_cpu_ptr()
+does) because it assumes the caller knows what he does because he might
+access the data structure from a different CPU (which means he needs
+protection against concurrent access).
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/iommu/iova.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -22,6 +22,7 @@
+ #include <linux/slab.h>
+ #include <linux/smp.h>
+ #include <linux/bitops.h>
++#include <linux/cpu.h>
+
+ static bool iova_rcache_insert(struct iova_domain *iovad,
+ unsigned long pfn,
+@@ -420,10 +421,8 @@ alloc_iova_fast(struct iova_domain *iova
+
+ /* Try replenishing IOVAs by flushing rcache. */
+ flushed_rcache = true;
+- preempt_disable();
+ for_each_online_cpu(cpu)
+ free_cpu_cached_iovas(cpu, iovad);
+- preempt_enable();
+ goto retry;
+ }
+
+@@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct
+ bool can_insert = false;
+ unsigned long flags;
+
+- cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
++ cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_full(cpu_rcache->loaded)) {
+@@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct
+ iova_magazine_push(cpu_rcache->loaded, iova_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+- put_cpu_ptr(rcache->cpu_rcaches);
+
+ if (mag_to_free) {
+ iova_magazine_free_pfns(mag_to_free, iovad);
+@@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(s
+ bool has_pfn = false;
+ unsigned long flags;
+
+- cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
++ cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_empty(cpu_rcache->loaded)) {
+@@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(s
+ iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+- put_cpu_ptr(rcache->cpu_rcaches);
+
+ return iova_pfn;
+ }
diff --git a/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch b/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
new file mode 100644
index 00000000000000..899d0a382eec73
--- /dev/null
+++ b/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
@@ -0,0 +1,58 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 15 Sep 2016 17:16:44 +0200
+Subject: [PATCH] iommu/vt-d: don't disable preemption while accessing
+ deferred_flush()
+
+get_cpu() disables preemption and returns the current CPU number. The
+CPU number is later only used once while retrieving the address of the
+local's CPU deferred_flush pointer.
+We can instead use raw_cpu_ptr() while we remain preemptible. The worst
+thing that can happen is that flush_unmaps_timeout() is invoked multiple
+times: once by taskA after seeing HIGH_WATER_MARK and then preempted to
+another CPU and then by taskB which saw HIGH_WATER_MARK on the same CPU
+as taskA. It is also likely that ->size got from HIGH_WATER_MARK to 0
+right after its read because another CPU invoked flush_unmaps_timeout()
+for this CPU.
+The access to flush_data is protected by a spinlock so even if we get
+migrated to another CPU or preempted - the data structure is protected.
+
+While at it, I marked deferred_flush static since I can't find a
+reference to it outside of this file.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/iommu/intel-iommu.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -479,7 +479,7 @@ struct deferred_flush_data {
+ struct deferred_flush_table *tables;
+ };
+
+-DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
++static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
+
+ /* bitmap for indexing intel_iommus */
+ static int g_num_of_iommus;
+@@ -3626,10 +3626,8 @@ static void add_unmap(struct dmar_domain
+ struct intel_iommu *iommu;
+ struct deferred_flush_entry *entry;
+ struct deferred_flush_data *flush_data;
+- unsigned int cpuid;
+
+- cpuid = get_cpu();
+- flush_data = per_cpu_ptr(&deferred_flush, cpuid);
++ flush_data = raw_cpu_ptr(&deferred_flush);
+
+ /* Flush all CPUs' entries to avoid deferring too much. If
+ * this becomes a bottleneck, can just flush us, and rely on
+@@ -3662,8 +3660,6 @@ static void add_unmap(struct dmar_domain
+ }
+ flush_data->size++;
+ spin_unlock_irqrestore(&flush_data->lock, flags);
+-
+- put_cpu();
+ }
+
+ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
diff --git a/patches/ipc-sem-rework-semaphore-wakeups.patch b/patches/ipc-sem-rework-semaphore-wakeups.patch
index 9dcc32866a81b5..7d9258650e5bf6 100644
--- a/patches/ipc-sem-rework-semaphore-wakeups.patch
+++ b/patches/ipc-sem-rework-semaphore-wakeups.patch
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/ipc/sem.c
+++ b/ipc/sem.c
-@@ -697,6 +697,13 @@ static int perform_atomic_semop(struct s
+@@ -686,6 +686,13 @@ static int perform_atomic_semop(struct s
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
-@@ -708,6 +715,7 @@ static void wake_up_sem_queue_prepare(st
+@@ -697,6 +704,7 @@ static void wake_up_sem_queue_prepare(st
q->pid = error;
list_add_tail(&q->list, pt);
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -721,6 +729,7 @@ static void wake_up_sem_queue_prepare(st
+@@ -710,6 +718,7 @@ static void wake_up_sem_queue_prepare(st
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct sem_queue *q, *t;
int did_something;
-@@ -733,6 +742,7 @@ static void wake_up_sem_queue_do(struct
+@@ -722,6 +731,7 @@ static void wake_up_sem_queue_do(struct
}
if (did_something)
preempt_enable();
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index a3228348855fe3..142eafe83ada65 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -938,7 +938,15 @@ irq_forced_thread_fn(struct irq_desc *de
+@@ -881,7 +881,15 @@ irq_forced_thread_fn(struct irq_desc *de
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -1388,6 +1396,9 @@ static int
+@@ -1338,6 +1346,9 @@ static int
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
diff --git a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
index 9254680a86df3e..931ee1d4d35173 100644
--- a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -55,7 +55,7 @@ Cc: stable-rt@vger.kernel.org
* Synchronize against the irq_work @entry, ensures the entry is not
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1642,7 +1642,7 @@ void update_process_times(int user_tick)
+@@ -1630,7 +1630,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -64,7 +64,7 @@ Cc: stable-rt@vger.kernel.org
if (in_irq())
irq_work_tick();
#endif
-@@ -1682,9 +1682,7 @@ static void run_timer_softirq(struct sof
+@@ -1670,9 +1670,7 @@ static void run_timer_softirq(struct sof
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index 7bb75c907e78bb..b49f364d2c5b0a 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1642,7 +1642,7 @@ void update_process_times(int user_tick)
+@@ -1630,7 +1630,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (in_irq())
irq_work_tick();
#endif
-@@ -1682,6 +1682,10 @@ static void run_timer_softirq(struct sof
+@@ -1670,6 +1670,10 @@ static void run_timer_softirq(struct sof
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/patches/jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch b/patches/jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch
new file mode 100644
index 00000000000000..77e9be35adc56a
--- /dev/null
+++ b/patches/jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch
@@ -0,0 +1,64 @@
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 19 Sep 2016 14:30:43 +0200
+Subject: [PATCH] jbd2: Fix lockdep annotation in add_transaction_credits()
+
+Thomas has reported a lockdep splat hitting in
+add_transaction_credits(). The problem is that that function calls
+jbd2_might_wait_for_commit() while holding j_state_lock which is wrong
+(we do not really wait for transaction commit while holding that lock).
+
+Fix the problem by moving jbd2_might_wait_for_commit() into places where
+we are ready to wait for transaction commit and thus j_state_lock is
+unlocked.
+
+Fixes: 1eaa566d368b214d99cbb973647c1b0b8102a9ae
+Reported-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/jbd2/transaction.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -159,6 +159,7 @@ static void wait_transaction_locked(jour
+ read_unlock(&journal->j_state_lock);
+ if (need_to_start)
+ jbd2_log_start_commit(journal, tid);
++ jbd2_might_wait_for_commit(journal);
+ schedule();
+ finish_wait(&journal->j_wait_transaction_locked, &wait);
+ }
+@@ -182,8 +183,6 @@ static int add_transaction_credits(journ
+ int needed;
+ int total = blocks + rsv_blocks;
+
+- jbd2_might_wait_for_commit(journal);
+-
+ /*
+ * If the current transaction is locked down for commit, wait
+ * for the lock to be released.
+@@ -214,6 +213,7 @@ static int add_transaction_credits(journ
+ if (atomic_read(&journal->j_reserved_credits) + total >
+ journal->j_max_transaction_buffers) {
+ read_unlock(&journal->j_state_lock);
++ jbd2_might_wait_for_commit(journal);
+ wait_event(journal->j_wait_reserved,
+ atomic_read(&journal->j_reserved_credits) + total <=
+ journal->j_max_transaction_buffers);
+@@ -238,6 +238,7 @@ static int add_transaction_credits(journ
+ if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
+ atomic_sub(total, &t->t_outstanding_credits);
+ read_unlock(&journal->j_state_lock);
++ jbd2_might_wait_for_commit(journal);
+ write_lock(&journal->j_state_lock);
+ if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
+ __jbd2_log_wait_for_space(journal);
+@@ -255,6 +256,7 @@ static int add_transaction_credits(journ
+ sub_reserved_credits(journal, rsv_blocks);
+ atomic_sub(total, &t->t_outstanding_credits);
+ read_unlock(&journal->j_state_lock);
++ jbd2_might_wait_for_commit(journal);
+ wait_event(journal->j_wait_reserved,
+ atomic_read(&journal->j_reserved_credits) + rsv_blocks
+ <= journal->j_max_transaction_buffers / 2);
diff --git a/patches/jump-label-rt.patch b/patches/jump-label-rt.patch
index 4643147ee9bc4d..aa6a73639ac4a3 100644
--- a/patches/jump-label-rt.patch
+++ b/patches/jump-label-rt.patch
@@ -24,10 +24,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -35,7 +35,7 @@ config ARM
- select HARDIRQS_SW_RESEND
+@@ -36,7 +36,7 @@ config ARM
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+ select HAVE_ARCH_HARDENED_USERCOPY
- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
diff --git a/patches/kconfig-disable-a-few-options-rt.patch b/patches/kconfig-disable-a-few-options-rt.patch
index bf0c195d4ba80f..26325d6f706a3f 100644
--- a/patches/kconfig-disable-a-few-options-rt.patch
+++ b/patches/kconfig-disable-a-few-options-rt.patch
@@ -22,12 +22,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
help
--- a/mm/Kconfig
+++ b/mm/Kconfig
-@@ -391,7 +391,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+@@ -410,7 +410,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
config TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support"
- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
select COMPACTION
+ select RADIX_TREE_MULTIORDER
help
- Transparent Hugepages allows the kernel to use huge pages and
diff --git a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index 7197bd82884bb7..fcdaec6a1c3db5 100644
--- a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -75,9 +75,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -991,6 +1001,7 @@ static int takedown_cpu(unsigned int cpu
- else
- synchronize_rcu();
+@@ -983,6 +993,7 @@ static int takedown_cpu(unsigned int cpu
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int err;
+ __cpu_unplug_wait(cpu);
/* Park the smpboot threads */
diff --git a/patches/kernel-futex-don-t-deboost-too-early.patch b/patches/kernel-futex-don-t-deboost-too-early.patch
index 7be17e7b606ac1..845cacf21bafe4 100644
--- a/patches/kernel-futex-don-t-deboost-too-early.patch
+++ b/patches/kernel-futex-don-t-deboost-too-early.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
-@@ -112,6 +113,7 @@ static inline unsigned long spin_lock_tr
+@@ -111,6 +112,7 @@ static inline unsigned long spin_lock_tr
#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
#define spin_unlock(lock) rt_spin_unlock(lock)
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
do { \
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1347,7 +1347,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1368,7 +1368,7 @@ static int wake_futex_pi(u32 __user *uad
* deboost first (and lose our higher priority), then the task might get
* scheduled away before the wake up can take place.
*/
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (deboost)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -934,13 +934,14 @@ static inline void rt_spin_lock_fastlock
+@@ -933,13 +933,14 @@ static inline void rt_spin_lock_fastlock
slowfn(lock);
}
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_SMP
/*
-@@ -1075,7 +1076,7 @@ static void mark_wakeup_next_waiter(stru
+@@ -1074,7 +1075,7 @@ static void mark_wakeup_next_waiter(stru
/*
* Slow path to release a rt_mutex spin_lock style
*/
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned long flags;
WAKE_Q(wake_q);
-@@ -1090,7 +1091,7 @@ static void noinline __sched rt_spin_lo
+@@ -1089,7 +1090,7 @@ static void noinline __sched rt_spin_lo
if (!rt_mutex_has_waiters(lock)) {
lock->owner = NULL;
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
-@@ -1101,6 +1102,33 @@ static void noinline __sched rt_spin_lo
+@@ -1100,6 +1101,33 @@ static void noinline __sched rt_spin_lo
/* Undo pi boosting.when necessary */
rt_mutex_adjust_prio(current);
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
-@@ -1158,6 +1186,17 @@ void __lockfunc rt_spin_unlock(spinlock_
+@@ -1157,6 +1185,17 @@ void __lockfunc rt_spin_unlock(spinlock_
}
EXPORT_SYMBOL(rt_spin_unlock);
diff --git a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index e43e3b1cbba217..4ad41d5b52e046 100644
--- a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1096,6 +1096,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1088,6 +1088,7 @@ static int __ref _cpu_down(unsigned int
bool hasdied = false;
int mycpu;
cpumask_var_t cpumask;
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -1106,6 +1107,12 @@ static int __ref _cpu_down(unsigned int
+@@ -1098,6 +1099,12 @@ static int __ref _cpu_down(unsigned int
/* Move the downtaker off the unplug cpu */
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
-@@ -1114,7 +1121,8 @@ static int __ref _cpu_down(unsigned int
+@@ -1106,7 +1113,8 @@ static int __ref _cpu_down(unsigned int
if (mycpu == cpu) {
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
migrate_enable();
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
cpu_hotplug_begin();
-@@ -1168,6 +1176,9 @@ static int __ref _cpu_down(unsigned int
+@@ -1160,6 +1168,9 @@ static int __ref _cpu_down(unsigned int
/* This post dead nonsense must die */
if (!ret && hasdied)
cpu_notify_nofail(CPU_POST_DEAD, cpu);
diff --git a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
index 49b05de6ca2ade..cc4a5a2744c2ba 100644
--- a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
+++ b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3118,7 +3118,7 @@ void migrate_disable(void)
+@@ -3293,7 +3293,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
-@@ -3145,7 +3145,7 @@ void migrate_enable(void)
+@@ -3320,7 +3320,7 @@ void migrate_enable(void)
{
struct task_struct *p = current;
diff --git a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index f19b2a6acf1a03..f7495d82b0c6fa 100644
--- a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -963,6 +963,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -1042,6 +1042,7 @@ static void __perf_mux_hrtimer_init(stru
raw_spin_lock_init(&cpuctx->hrtimer_lock);
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
timer->function = perf_mux_hrtimer_handler;
diff --git a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index 589c691cc46ee0..651f7a56a88a63 100644
--- a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1502,6 +1502,11 @@ static void call_console_drivers(int lev
+@@ -1631,6 +1631,11 @@ static void call_console_drivers(int lev
if (!console_drivers)
return;
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
migrate_disable();
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
-@@ -2434,6 +2439,11 @@ void console_unblank(void)
+@@ -2565,6 +2570,11 @@ void console_unblank(void)
{
struct console *c;
diff --git a/patches/kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch b/patches/kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch
deleted file mode 100644
index f279849d57722d..00000000000000
--- a/patches/kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 19 May 2016 17:12:34 +0200
-Subject: [PATCH] kernel/rtmutex: only warn once on a try lock from bad
- context
-
-One warning should be enough to get one motivated to fix this. It is
-possible that this happens more than once and so starts flooding the
-output. Later the prints will be suppressed so we only get half of it.
-Depending on the console system used it might not be helpfull.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/locking/rtmutex.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1479,7 +1479,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
- int __sched rt_mutex_trylock(struct rt_mutex *lock)
- {
- #ifdef CONFIG_PREEMPT_RT_FULL
-- if (WARN_ON(in_irq() || in_nmi()))
-+ if (WARN_ON_ONCE(in_irq() || in_nmi()))
- #else
- if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
- #endif
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index 3b0c43ea71e407..a7244ccb9e89f3 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -33,7 +33,7 @@ Jason.
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-@@ -3094,6 +3095,8 @@ void serial8250_console_write(struct uar
+@@ -3112,6 +3113,8 @@ void serial8250_console_write(struct uar
if (port->sysrq || oops_in_progress)
locked = 0;
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 8b0db63d4f3efd..d9bd5abd622a08 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -236,7 +236,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int start_pid;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1821,6 +1821,12 @@ struct task_struct {
+@@ -1892,6 +1892,12 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "tick-internal.h"
-@@ -995,7 +996,16 @@ void hrtimer_start_range_ns(struct hrtim
+@@ -991,7 +992,16 @@ void hrtimer_start_range_ns(struct hrtim
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
timer_stats_hrtimer_set_start_info(timer);
@@ -384,7 +384,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
leftmost = enqueue_hrtimer(timer, new_base);
if (!leftmost)
goto unlock;
-@@ -1269,6 +1279,8 @@ static void __run_hrtimer(struct hrtimer
+@@ -1265,6 +1275,8 @@ static void __run_hrtimer(struct hrtimer
cpu_base->running = NULL;
}
@@ -393,7 +393,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
-@@ -1288,6 +1300,15 @@ static void __hrtimer_run_queues(struct
+@@ -1284,6 +1296,15 @@ static void __hrtimer_run_queues(struct
timer = container_of(node, struct hrtimer, node);
@@ -538,7 +538,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
depends on !GENERIC_TRACER
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
-@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f
+@@ -41,6 +41,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/patches/leds-trigger-disable-CPU-trigger-on-RT.patch b/patches/leds-trigger-disable-CPU-trigger-on-RT.patch
index b275ed2a2c1296..e59b962998aed0 100644
--- a/patches/leds-trigger-disable-CPU-trigger-on-RT.patch
+++ b/patches/leds-trigger-disable-CPU-trigger-on-RT.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
-@@ -61,7 +61,7 @@ config LEDS_TRIGGER_BACKLIGHT
+@@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
diff --git a/patches/lglocks-rt.patch b/patches/lglocks-rt.patch
index b9f2e05f920f22..e024ff3b5a6594 100644
--- a/patches/lglocks-rt.patch
+++ b/patches/lglocks-rt.patch
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+# define lg_do_unlock(l) arch_spin_unlock(l)
+#else
+# define lg_lock_ptr struct rt_mutex
-+# define lg_do_lock(l) __rt_spin_lock(l)
++# define lg_do_lock(l) __rt_spin_lock__no_mg(l)
+# define lg_do_unlock(l) __rt_spin_unlock(l)
+#endif
/*
diff --git a/patches/localversion.patch b/patches/localversion.patch
index e1f3b8d87864f2..a02382e6df7098 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt14
++-rt1
diff --git a/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch b/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
index bce6d3d9d0b223..62db27d2f1bd6e 100644
--- a/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
+++ b/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
@@ -38,7 +38,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
-@@ -713,6 +713,7 @@ static inline void __ftrace_enabled_rest
+@@ -714,6 +714,7 @@ static inline void __ftrace_enabled_rest
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
@@ -46,7 +46,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
static inline unsigned long get_lock_parent_ip(void)
{
unsigned long addr = CALLER_ADDR0;
-@@ -724,6 +725,7 @@ static inline unsigned long get_lock_par
+@@ -725,6 +726,7 @@ static inline unsigned long get_lock_par
return addr;
return CALLER_ADDR2;
}
@@ -82,7 +82,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
sections, with microsecond accuracy.
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
-@@ -962,6 +962,7 @@ config TIMER_STATS
+@@ -977,6 +977,7 @@ config TIMER_STATS
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
@@ -90,7 +90,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
default y
help
If you say Y here then the kernel will use a debug variant of the
-@@ -1144,8 +1145,17 @@ config LOCK_TORTURE_TEST
+@@ -1159,8 +1160,17 @@ config LOCK_TORTURE_TEST
endmenu # lock debugging
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index dbf16445867799..8a1d3cdb0bb7a0 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_IRQSOFF_TRACER) || \
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -3648,6 +3648,7 @@ static void check_flags(unsigned long fl
+@@ -3686,6 +3686,7 @@ static void check_flags(unsigned long fl
}
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3662,6 +3663,7 @@ static void check_flags(unsigned long fl
+@@ -3700,6 +3701,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/lockinglglocks_Use_preempt_enabledisable_nort()_in_lg_double_locklg_double_unlock.patch b/patches/lockinglglocks_Use_preempt_enabledisable_nort.patch
index 8c13841b08f44a..8c13841b08f44a 100644
--- a/patches/lockinglglocks_Use_preempt_enabledisable_nort()_in_lg_double_locklg_double_unlock.patch
+++ b/patches/lockinglglocks_Use_preempt_enabledisable_nort.patch
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index d23329601c0cd8..35b9d2b73e6526 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -20,7 +20,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -1918,8 +1918,9 @@ static void raid_run_ops(struct stripe_h
+@@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_h
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -31,7 +31,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -1975,7 +1976,8 @@ static void raid_run_ops(struct stripe_h
+@@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_h
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -40,8 +40,8 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
+ put_cpu_light();
}
- static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
-@@ -6415,6 +6417,7 @@ static int raid5_alloc_percpu(struct r5c
+ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
+@@ -6438,6 +6440,7 @@ static int raid5_alloc_percpu(struct r5c
__func__, cpu);
break;
}
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
index 88fba7dd83f597..8424c9fd88c022 100644
--- a/patches/mips-disable-highmem-on-rt.patch
+++ b/patches/mips-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2416,7 +2416,7 @@ config CPU_R4400_WORKAROUNDS
+@@ -2480,7 +2480,7 @@ config MIPS_ASID_BITS_VARIABLE
#
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index 70afad7a5d7b0d..50e81be9f092e5 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -12,12 +12,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/swap.h | 1 +
mm/compaction.c | 6 ++++--
mm/page_alloc.c | 2 ++
- mm/swap.c | 39 +++++++++++++++++++++++----------------
- 4 files changed, 30 insertions(+), 18 deletions(-)
+ mm/swap.c | 38 ++++++++++++++++++++++----------------
+ 4 files changed, 29 insertions(+), 18 deletions(-)
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
-@@ -297,6 +297,7 @@ extern unsigned long nr_free_pagecache_p
+@@ -290,6 +290,7 @@ extern unsigned long nr_free_pagecache_p
/* linux/mm/swap.c */
@@ -27,8 +27,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void lru_cache_add_file(struct page *page);
--- a/mm/compaction.c
+++ b/mm/compaction.c
-@@ -1409,10 +1409,12 @@ static int compact_zone(struct zone *zon
- cc->migrate_pfn & ~((1UL << cc->order) - 1);
+@@ -1585,10 +1585,12 @@ static enum compact_result compact_zone(
+ block_start_pfn(cc->migrate_pfn, cc->order);
if (cc->last_migrated_pfn < current_block_start) {
- cpu = get_cpu();
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -6276,7 +6276,9 @@ static int page_alloc_cpu_notify(struct
+@@ -6590,7 +6590,9 @@ static int page_alloc_cpu_notify(struct
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
@@ -64,17 +64,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/hugetlb.h>
#include <linux/page_idle.h>
-@@ -48,6 +49,9 @@ static DEFINE_PER_CPU(struct pagevec, lr
- static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
- static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
-
+@@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lr
+ #ifdef CONFIG_SMP
+ static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
+ #endif
+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
+DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
-+
+
/*
* This path almost never happens for VM activity - pages are normally
- * freed via pagevecs. But it gets used by networking.
-@@ -237,11 +241,11 @@ void rotate_reclaimable_page(struct page
+@@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page
unsigned long flags;
get_page(page);
@@ -88,9 +87,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -292,12 +296,13 @@ static bool need_activate_page_drain(int
- void activate_page(struct page *page)
+@@ -294,12 +297,13 @@ void activate_page(struct page *page)
{
+ page = compound_head(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
@@ -104,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -323,7 +328,7 @@ void activate_page(struct page *page)
+@@ -326,7 +330,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
@@ -113,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/*
-@@ -345,7 +350,7 @@ static void __lru_cache_activate_page(st
+@@ -348,7 +352,7 @@ static void __lru_cache_activate_page(st
}
}
@@ -122,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -387,12 +392,12 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
@@ -137,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -590,9 +595,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -593,9 +597,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -149,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -624,11 +629,12 @@ void deactivate_file_page(struct page *p
+@@ -627,11 +631,12 @@ void deactivate_file_page(struct page *p
return;
if (likely(get_page_unless_zero(page))) {
@@ -164,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -643,19 +649,20 @@ void deactivate_file_page(struct page *p
+@@ -646,19 +651,20 @@ void deactivate_file_page(struct page *p
void deactivate_page(struct page *page)
{
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index 9f68cc881fc0b8..23a5671fdf7124 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,15 +13,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1718,6 +1718,7 @@ choice
+@@ -1761,6 +1761,7 @@ choice
config SLAB
bool "SLAB"
+ depends on !PREEMPT_RT_FULL
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
- well in all environments. It organizes cache hot objects in
-@@ -1736,6 +1737,7 @@ config SLUB
+@@ -1781,6 +1782,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch
index 7413036b5898a8..e6940b53b9b62a 100644
--- a/patches/mm-enable-slub.patch
+++ b/patches/mm-enable-slub.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -415,7 +415,11 @@ static inline void slab_post_alloc_hook(
+@@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct list_head slabs_partial; /* partial list first, better asm code */
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1143,7 +1143,7 @@ static noinline int free_debug_processin
+@@ -1145,7 +1145,7 @@ static noinline int free_debug_processin
unsigned long uninitialized_var(flags);
int ret = 0;
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
slab_lock(page);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1178,7 +1178,7 @@ static noinline int free_debug_processin
+@@ -1180,7 +1180,7 @@ static noinline int free_debug_processin
bulk_cnt, cnt);
slab_unlock(page);
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
-@@ -1306,6 +1306,12 @@ static inline void dec_slabs_node(struct
+@@ -1308,6 +1308,12 @@ static inline void dec_slabs_node(struct
#endif /* CONFIG_SLUB_DEBUG */
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1415,7 +1421,11 @@ static struct page *allocate_slab(struct
+@@ -1530,7 +1536,11 @@ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1486,7 +1496,11 @@ static struct page *allocate_slab(struct
+@@ -1605,7 +1615,11 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_disable();
if (!page)
return NULL;
-@@ -1543,6 +1557,16 @@ static void __free_slab(struct kmem_cach
+@@ -1664,6 +1678,16 @@ static void __free_slab(struct kmem_cach
__free_pages(page, order);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1574,6 +1598,12 @@ static void free_slab(struct kmem_cache
+@@ -1695,6 +1719,12 @@ static void free_slab(struct kmem_cache
}
call_rcu(head, rcu_free_slab);
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
__free_slab(s, page);
}
-@@ -1681,7 +1711,7 @@ static void *get_partial_node(struct kme
+@@ -1802,7 +1832,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1706,7 +1736,7 @@ static void *get_partial_node(struct kme
+@@ -1827,7 +1857,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return object;
}
-@@ -1952,7 +1982,7 @@ static void deactivate_slab(struct kmem_
+@@ -2073,7 +2103,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
m = M_FULL;
-@@ -1963,7 +1993,7 @@ static void deactivate_slab(struct kmem_
+@@ -2084,7 +2114,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -1998,7 +2028,7 @@ static void deactivate_slab(struct kmem_
+@@ -2119,7 +2149,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -2030,10 +2060,10 @@ static void unfreeze_partials(struct kme
+@@ -2151,10 +2181,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
do {
-@@ -2062,7 +2092,7 @@ static void unfreeze_partials(struct kme
+@@ -2183,7 +2213,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (discard_page) {
page = discard_page;
-@@ -2101,14 +2131,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2222,14 +2252,21 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2180,7 +2217,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2301,7 +2338,22 @@ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
@@ -224,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2216,10 +2268,10 @@ static unsigned long count_partial(struc
+@@ -2337,10 +2389,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
@@ -237,7 +237,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2357,8 +2409,10 @@ static inline void *get_freelist(struct
+@@ -2478,8 +2530,10 @@ static inline void *get_freelist(struct
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -249,7 +249,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *freelist;
struct page *page;
-@@ -2418,6 +2472,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2539,6 +2593,13 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return freelist;
new_slab:
-@@ -2449,7 +2510,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2570,7 +2631,7 @@ static void *___slab_alloc(struct kmem_c
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
@@ -272,7 +272,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2461,6 +2522,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2582,6 +2643,7 @@ static void *__slab_alloc(struct kmem_ca
{
void *p;
unsigned long flags;
@@ -280,7 +280,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2472,8 +2534,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2593,8 +2655,9 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
@@ -291,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return p;
}
-@@ -2659,7 +2722,7 @@ static void __slab_free(struct kmem_cach
+@@ -2780,7 +2843,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
n = NULL;
}
prior = page->freelist;
-@@ -2691,7 +2754,7 @@ static void __slab_free(struct kmem_cach
+@@ -2812,7 +2875,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -309,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2733,7 +2796,7 @@ static void __slab_free(struct kmem_cach
+@@ -2854,7 +2917,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
slab_empty:
-@@ -2748,7 +2811,7 @@ static void __slab_free(struct kmem_cach
+@@ -2869,7 +2932,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@@ -327,7 +327,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -2935,6 +2998,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3074,6 +3137,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
void **p)
{
struct kmem_cache_cpu *c;
@@ -335,7 +335,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/* memcg and kmem_cache debug support */
-@@ -2958,7 +3022,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3097,7 +3161,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -344,7 +344,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(!p[i]))
goto error;
-@@ -2970,6 +3034,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3109,6 +3173,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -352,7 +352,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
-@@ -3117,7 +3182,7 @@ static void
+@@ -3256,7 +3321,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -361,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3450,6 +3515,10 @@ static void list_slab_objects(struct kme
+@@ -3600,6 +3665,10 @@ static void list_slab_objects(struct kme
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
@@ -372,7 +372,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *addr = page_address(page);
void *p;
unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3470,6 +3539,7 @@ static void list_slab_objects(struct kme
+@@ -3620,6 +3689,7 @@ static void list_slab_objects(struct kme
slab_unlock(page);
kfree(map);
#endif
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -3482,7 +3552,7 @@ static void free_partial(struct kmem_cac
+@@ -3633,7 +3703,7 @@ static void free_partial(struct kmem_cac
struct page *page, *h;
BUG_ON(irqs_disabled());
@@ -389,16 +389,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3492,7 +3562,7 @@ static void free_partial(struct kmem_cac
+@@ -3643,7 +3713,7 @@ static void free_partial(struct kmem_cac
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
- }
- /*
-@@ -3706,7 +3776,7 @@ int __kmem_cache_shrink(struct kmem_cach
+ list_for_each_entry_safe(page, h, &discard, lru)
+ discard_slab(s, page);
+@@ -3901,7 +3971,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -3737,7 +3807,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3932,7 +4002,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -416,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -3913,6 +3983,12 @@ void __init kmem_cache_init(void)
+@@ -4108,6 +4178,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
@@ -429,7 +429,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -4156,7 +4232,7 @@ static int validate_slab_node(struct kme
+@@ -4354,7 +4430,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
@@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -4178,7 +4254,7 @@ static int validate_slab_node(struct kme
+@@ -4376,7 +4452,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -447,7 +447,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return count;
}
-@@ -4366,12 +4442,12 @@ static int list_locations(struct kmem_ca
+@@ -4564,12 +4640,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
diff --git a/patches/mm-filemap-don-t-plant-shadow-entries-without-radix-.patch b/patches/mm-filemap-don-t-plant-shadow-entries-without-radix-.patch
new file mode 100644
index 00000000000000..afa386b49af2b4
--- /dev/null
+++ b/patches/mm-filemap-don-t-plant-shadow-entries-without-radix-.patch
@@ -0,0 +1,185 @@
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Tue, 4 Oct 2016 22:02:08 +0200
+Subject: [PATCH] mm: filemap: don't plant shadow entries without radix tree
+ node
+
+Upstream commit d3798ae8c6f3767c726403c2ca6ecc317752c9dd
+
+When the underflow checks were added to workingset_node_shadow_dec(),
+they triggered immediately:
+
+ kernel BUG at ./include/linux/swap.h:276!
+ invalid opcode: 0000 [#1] SMP
+ Modules linked in: isofs usb_storage fuse xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_REJECT nf_reject_ipv6
+ soundcore wmi acpi_als pinctrl_sunrisepoint kfifo_buf tpm_tis industrialio acpi_pad pinctrl_intel tpm_tis_core tpm nfsd auth_rpcgss nfs_acl lockd grace sunrpc dm_crypt
+ CPU: 0 PID: 20929 Comm: blkid Not tainted 4.8.0-rc8-00087-gbe67d60ba944 #1
+ Hardware name: System manufacturer System Product Name/Z170-K, BIOS 1803 05/06/2016
+ task: ffff8faa93ecd940 task.stack: ffff8faa7f478000
+ RIP: page_cache_tree_insert+0xf1/0x100
+ Call Trace:
+ __add_to_page_cache_locked+0x12e/0x270
+ add_to_page_cache_lru+0x4e/0xe0
+ mpage_readpages+0x112/0x1d0
+ blkdev_readpages+0x1d/0x20
+ __do_page_cache_readahead+0x1ad/0x290
+ force_page_cache_readahead+0xaa/0x100
+ page_cache_sync_readahead+0x3f/0x50
+ generic_file_read_iter+0x5af/0x740
+ blkdev_read_iter+0x35/0x40
+ __vfs_read+0xe1/0x130
+ vfs_read+0x96/0x130
+ SyS_read+0x55/0xc0
+ entry_SYSCALL_64_fastpath+0x13/0x8f
+ Code: 03 00 48 8b 5d d8 65 48 33 1c 25 28 00 00 00 44 89 e8 75 19 48 83 c4 18 5b 41 5c 41 5d 41 5e 5d c3 0f 0b 41 bd ef ff ff ff eb d7 <0f> 0b e8 88 68 ef ff 0f 1f 84 00
+ RIP page_cache_tree_insert+0xf1/0x100
+
+This is a long-standing bug in the way shadow entries are accounted in
+the radix tree nodes. The shrinker needs to know when radix tree nodes
+contain only shadow entries, no pages, so node->count is split in half
+to count shadows in the upper bits and pages in the lower bits.
+
+Unfortunately, the radix tree implementation doesn't know of this and
+assumes all entries are in node->count. When there is a shadow entry
+directly in root->rnode and the tree is later extended, the radix tree
+implementation will copy that entry into the new node and and bump its
+node->count, i.e. increases the page count bits. Once the shadow gets
+removed and we subtract from the upper counter, node->count underflows
+and triggers the warning. Afterwards, without node->count reaching 0
+again, the radix tree node is leaked.
+
+Limit shadow entries to when we have actual radix tree nodes and can
+count them properly. That means we lose the ability to detect refaults
+from files that had only the first page faulted in at eviction time.
+
+Fixes: 449dd6984d0e ("mm: keep page cache radix tree nodes in check")
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Reported-and-tested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ include/linux/radix-tree.h | 6 ++---
+ lib/radix-tree.c | 14 ++-----------
+ mm/filemap.c | 46 +++++++++++++++++++++++++++++----------------
+ 3 files changed, 36 insertions(+), 30 deletions(-)
+
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct rad
+ struct radix_tree_node *node);
+ void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
+ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
+-struct radix_tree_node *radix_tree_replace_clear_tags(
+- struct radix_tree_root *root,
+- unsigned long index, void *entry);
++void radix_tree_clear_tags(struct radix_tree_root *root,
++ struct radix_tree_node *node,
++ void **slot);
+ unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
+ void **results, unsigned long first_index,
+ unsigned int max_items);
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tre
+ }
+ EXPORT_SYMBOL(radix_tree_delete);
+
+-struct radix_tree_node *radix_tree_replace_clear_tags(
+- struct radix_tree_root *root,
+- unsigned long index, void *entry)
++void radix_tree_clear_tags(struct radix_tree_root *root,
++ struct radix_tree_node *node,
++ void **slot)
+ {
+- struct radix_tree_node *node;
+- void **slot;
+-
+- __radix_tree_lookup(root, index, &node, &slot);
+-
+ if (node) {
+ unsigned int tag, offset = get_slot_offset(node, slot);
+ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+@@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_repla
+ /* Clear root node tags */
+ root->gfp_mask &= __GFP_BITS_MASK;
+ }
+-
+- radix_tree_replace_slot(slot, entry);
+- return node;
+ }
+
+ /**
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct
+ static void page_cache_tree_delete(struct address_space *mapping,
+ struct page *page, void *shadow)
+ {
+- struct radix_tree_node *node;
+ int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ VM_BUG_ON_PAGE(nr != 1 && shadow, page);
+
+- if (shadow) {
+- mapping->nrexceptional += nr;
+- /*
+- * Make sure the nrexceptional update is committed before
+- * the nrpages update so that final truncate racing
+- * with reclaim does not see both counters 0 at the
+- * same time and miss a shadow entry.
+- */
+- smp_wmb();
+- }
+- mapping->nrpages -= nr;
+-
+ for (i = 0; i < nr; i++) {
+- node = radix_tree_replace_clear_tags(&mapping->page_tree,
+- page->index + i, shadow);
++ struct radix_tree_node *node;
++ void **slot;
++
++ __radix_tree_lookup(&mapping->page_tree, page->index + i,
++ &node, &slot);
++
++ radix_tree_clear_tags(&mapping->page_tree, node, slot);
++
+ if (!node) {
+ VM_BUG_ON_PAGE(nr != 1, page);
+- return;
++ /*
++ * We need a node to properly account shadow
++ * entries. Don't plant any without. XXX
++ */
++ shadow = NULL;
+ }
+
++ radix_tree_replace_slot(slot, shadow);
++
++ if (!node)
++ break;
++
+ workingset_node_pages_dec(node);
+ if (shadow)
+ workingset_node_shadows_inc(node);
+@@ -219,6 +221,18 @@ static void page_cache_tree_delete(struc
+ &node->private_list);
+ }
+ }
++
++ if (shadow) {
++ mapping->nrexceptional += nr;
++ /*
++ * Make sure the nrexceptional update is committed before
++ * the nrpages update so that final truncate racing
++ * with reclaim does not see both counters 0 at the
++ * same time and miss a shadow entry.
++ */
++ smp_wmb();
++ }
++ mapping->nrpages -= nr;
+ }
+
+ /*
diff --git a/patches/mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch b/patches/mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch
new file mode 100644
index 00000000000000..feebd3f366cab7
--- /dev/null
+++ b/patches/mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch
@@ -0,0 +1,40 @@
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Tue, 4 Oct 2016 16:58:06 +0200
+Subject: [PATCH] mm: filemap: fix mapping->nrpages double accounting in fuse
+
+Upstream commit 3ddf40e8c31964b744ff10abb48c8e36a83ec6e7
+
+Commit 22f2ac51b6d6 ("mm: workingset: fix crash in shadow node shrinker
+caused by replace_page_cache_page()") switched replace_page_cache() from
+raw radix tree operations to page_cache_tree_insert() but didn't take
+into account that the latter function, unlike the raw radix tree op,
+handles mapping->nrpages. As a result, that counter is bumped for each
+page replacement rather than balanced out even.
+
+The mapping->nrpages counter is used to skip needless radix tree walks
+when invalidating, truncating, syncing inodes without pages, as well as
+statistics for userspace. Since the error is positive, we'll do more
+page cache tree walks than necessary; we won't miss a necessary one.
+And we'll report more buffer pages to userspace than there are. The
+error is limited to fuse inodes.
+
+Fixes: 22f2ac51b6d6 ("mm: workingset: fix crash in shadow node shrinker caused by replace_page_cache_page()")
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Miklos Szeredi <miklos@szeredi.hu>
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ mm/filemap.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -633,7 +633,6 @@ int replace_page_cache_page(struct page
+ __delete_from_page_cache(old, NULL);
+ error = page_cache_tree_insert(mapping, new, NULL);
+ BUG_ON(error);
+- mapping->nrpages++;
+
+ /*
+ * hugetlb pages do not participate in page cache accounting.
diff --git a/patches/mm-make-vmstat-rt-aware.patch b/patches/mm-make-vmstat-rt-aware.patch
index 34b5f818f923fa..a87d95ca0cd9a9 100644
--- a/patches/mm-make-vmstat-rt-aware.patch
+++ b/patches/mm-make-vmstat-rt-aware.patch
@@ -11,8 +11,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/vmstat.h | 4 ++++
- mm/vmstat.c | 6 ++++++
- 2 files changed, 10 insertions(+)
+ mm/vmstat.c | 12 ++++++++++++
+ 2 files changed, 16 insertions(+)
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void count_vm_events(enum vm_event_item item, long delta)
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
-@@ -226,6 +226,7 @@ void __mod_zone_page_state(struct zone *
+@@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *
long x;
long t;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
-@@ -235,6 +236,7 @@ void __mod_zone_page_state(struct zone *
+@@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *
x = 0;
}
__this_cpu_write(*p, x);
@@ -54,7 +54,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__mod_zone_page_state);
-@@ -267,6 +269,7 @@ void __inc_zone_state(struct zone *zone,
+@@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist
+ long x;
+ long t;
+
++ preempt_disable_rt();
+ x = delta + __this_cpu_read(*p);
+
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist
+ x = 0;
+ }
+ __this_cpu_write(*p, x);
++ preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_node_page_state);
+
+@@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone,
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -62,15 +78,31 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
-@@ -275,6 +278,7 @@ void __inc_zone_state(struct zone *zone,
+@@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone,
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
+ preempt_enable_rt();
}
+ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data
+ s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v > t)) {
+@@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data
+ node_page_state_add(v + overstep, pgdat, item);
+ __this_cpu_write(*p, -overstep);
+ }
++ preempt_enable_rt();
+ }
+
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-@@ -289,6 +293,7 @@ void __dec_zone_state(struct zone *zone,
+@@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone,
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -78,11 +110,27 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
-@@ -297,6 +302,7 @@ void __dec_zone_state(struct zone *zone,
+@@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone,
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
+ preempt_enable_rt();
}
+ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data
+ s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v < - t)) {
+@@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data
+ node_page_state_add(v - overstep, pgdat, item);
+ __this_cpu_write(*p, overstep);
+ }
++ preempt_enable_rt();
+ }
+
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index d13c3a7eba8455..7f9493205a53b5 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1848,7 +1848,7 @@ static void drain_all_stock(struct mem_c
+@@ -1824,7 +1824,7 @@ static void drain_all_stock(struct mem_c
return;
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -1865,7 +1865,7 @@ static void drain_all_stock(struct mem_c
+@@ -1841,7 +1841,7 @@ static void drain_all_stock(struct mem_c
schedule_work_on(cpu, &stock->work);
}
}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index f5fff39dc6ea3c..72808f8ad82b36 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4579,12 +4582,12 @@ static int mem_cgroup_move_account(struc
+@@ -4566,12 +4569,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5436,10 +5439,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5444,10 +5447,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5491,14 +5494,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5503,14 +5506,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5817,6 +5820,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5845,6 +5848,7 @@ void mem_cgroup_swapout(struct page *pag
{
struct mem_cgroup *memcg, *swap_memcg;
unsigned short oldid;
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5857,12 +5861,16 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5885,12 +5889,16 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
diff --git a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index c730252d654edf..a0dbef2aee64ec 100644
--- a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -5652,10 +5652,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5668,10 +5668,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
diff --git a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
index f47973383d2eed..a208a1650cc82a 100644
--- a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -280,9 +280,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+@@ -281,9 +281,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
#ifdef CONFIG_PREEMPT_RT_BASE
# define cpu_lock_irqsave(cpu, flags) \
diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch
index 419cccfceb4d23..30c1024c88e923 100644
--- a/patches/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch
@@ -8,13 +8,13 @@ call free_pages_bulk() outside of the percpu page allocator locks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- mm/page_alloc.c | 87 +++++++++++++++++++++++++++++++++++++++-----------------
- 1 file changed, 62 insertions(+), 25 deletions(-)
+ mm/page_alloc.c | 94 +++++++++++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 66 insertions(+), 28 deletions(-)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -829,7 +829,7 @@ static inline int free_pages_check(struc
- }
+@@ -1069,7 +1069,7 @@ static bool bulkfree_pcp_prepare(struct
+ #endif /* CONFIG_DEBUG_VM */
/*
- * Frees a number of pages from the PCP lists
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -840,18 +840,53 @@ static inline int free_pages_check(struc
+@@ -1080,19 +1080,58 @@ static bool bulkfree_pcp_prepare(struct
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -31,21 +31,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
- int migratetype = 0;
- int batch_free = 0;
- int to_free = count;
unsigned long nr_scanned;
+ bool isolated_pageblocks;
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->lock, flags);
- spin_lock(&zone->lock);
- nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+ isolated_pageblocks = has_isolate_pageblock(zone);
+ nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
- __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
+ __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
+ while (!list_empty(list)) {
-+ struct page *page = list_first_entry(list, struct page, lru);
++ struct page *page;
+ int mt; /* migratetype of the to-be-freed page */
+
++ page = list_first_entry(list, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+
@@ -53,14 +55,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ /* MIGRATE_ISOLATE page should not go to pcplists */
+ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+ /* Pageblock could have been isolated meanwhile */
-+ if (unlikely(has_isolate_pageblock(zone)))
++ if (unlikely(isolated_pageblocks))
+ mt = get_pageblock_migratetype(page);
+
++ if (bulkfree_pcp_prepare(page))
++ continue;
++
+ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+ trace_mm_page_pcpu_drain(page, 0, mt);
-+ to_free--;
++ count--;
+ }
-+ WARN_ON(to_free != 0);
++ WARN_ON(count != 0);
+ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
@@ -71,16 +76,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ */
-+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
++static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
+ struct list_head *dst)
+{
+ int migratetype = 0;
+ int batch_free = 0;
+
- while (to_free) {
+ while (count) {
struct page *page;
struct list_head *list;
-@@ -867,7 +902,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1108,7 +1147,7 @@ static void free_pcppages_bulk(struct zo
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -89,8 +94,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -875,24 +910,12 @@ static void free_pcppages_bulk(struct zo
- batch_free = to_free;
+@@ -1116,27 +1155,12 @@ static void free_pcppages_bulk(struct zo
+ batch_free = count;
do {
- int mt; /* migratetype of the to-be-freed page */
@@ -103,19 +108,22 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- /* MIGRATE_ISOLATE page should not go to pcplists */
- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
- /* Pageblock could have been isolated meanwhile */
-- if (unlikely(has_isolate_pageblock(zone)))
+- if (unlikely(isolated_pageblocks))
- mt = get_pageblock_migratetype(page);
-
+- if (bulkfree_pcp_prepare(page))
+- continue;
+-
- __free_one_page(page, page_to_pfn(page), zone, 0, mt);
- trace_mm_page_pcpu_drain(page, 0, mt);
+ list_add(&page->lru, dst);
- } while (--to_free && --batch_free && !list_empty(list));
+ } while (--count && --batch_free && !list_empty(list));
}
- spin_unlock(&zone->lock);
}
static void free_one_page(struct zone *zone,
-@@ -901,7 +924,9 @@ static void free_one_page(struct zone *z
+@@ -1145,7 +1169,9 @@ static void free_one_page(struct zone *z
int migratetype)
{
unsigned long nr_scanned;
@@ -123,10 +131,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->lock, flags);
- nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+ nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
- __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-@@ -911,7 +936,7 @@ static void free_one_page(struct zone *z
+ __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
+@@ -1155,7 +1181,7 @@ static void free_one_page(struct zone *z
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
@@ -134,8 +142,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ spin_unlock_irqrestore(&zone->lock, flags);
}
- static int free_tail_pages_check(struct page *head_page, struct page *page)
-@@ -2030,16 +2055,18 @@ static int rmqueue_bulk(struct zone *zon
+ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
+@@ -2232,16 +2258,18 @@ static int rmqueue_bulk(struct zone *zon
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -155,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2055,16 +2082,21 @@ static void drain_pages_zone(unsigned in
+@@ -2257,16 +2285,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -179,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2242,8 +2274,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2448,8 +2481,13 @@ void free_hot_cold_page(struct page *pag
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index b1625d089b855f..d28885b0a03074 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -24,8 +24,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#include <linux/locallock.h>
#include <linux/page_owner.h>
#include <linux/kthread.h>
-
-@@ -275,6 +276,18 @@ EXPORT_SYMBOL(nr_node_ids);
+ #include <linux/memcontrol.h>
+@@ -276,6 +277,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1072,10 +1085,10 @@ static void __free_pages_ok(struct page
+@@ -1228,10 +1241,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -56,8 +56,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ local_unlock_irqrestore(pa_lock, flags);
}
- static void __init __free_pages_boot_core(struct page *page,
-@@ -2019,14 +2032,14 @@ void drain_zone_pages(struct zone *zone,
+ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+@@ -2221,14 +2234,14 @@ void drain_zone_pages(struct zone *zone,
unsigned long flags;
int to_drain, batch;
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2043,7 +2056,7 @@ static void drain_pages_zone(unsigned in
+@@ -2245,7 +2258,7 @@ static void drain_pages_zone(unsigned in
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2051,7 +2064,7 @@ static void drain_pages_zone(unsigned in
+@@ -2253,7 +2266,7 @@ static void drain_pages_zone(unsigned in
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2137,8 +2150,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2339,8 +2352,17 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#ifdef CONFIG_HIBERNATION
-@@ -2194,7 +2216,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2400,7 +2422,7 @@ void free_hot_cold_page(struct page *pag
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__count_vm_event(PGFREE);
/*
-@@ -2225,7 +2247,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2431,7 +2453,7 @@ void free_hot_cold_page(struct page *pag
}
out:
@@ -128,28 +128,28 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2360,7 +2382,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2568,7 +2590,7 @@ struct page *buffered_rmqueue(struct zon
struct per_cpu_pages *pcp;
struct list_head *list;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
-@@ -2384,7 +2406,7 @@ struct page *buffered_rmqueue(struct zon
+ do {
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+@@ -2595,7 +2617,7 @@ struct page *buffered_rmqueue(struct zon
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
- spin_lock_irqsave(&zone->lock, flags);
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
- page = NULL;
- if (alloc_flags & ALLOC_HARDER) {
-@@ -2394,11 +2416,13 @@ struct page *buffered_rmqueue(struct zon
- }
- if (!page)
- page = __rmqueue(zone, order, migratetype);
+ do {
+ page = NULL;
+@@ -2607,22 +2629,24 @@ struct page *buffered_rmqueue(struct zon
+ if (!page)
+ page = __rmqueue(zone, order, migratetype);
+ } while (page && check_new_pages(page, order));
- spin_unlock(&zone->lock);
- if (!page)
+ if (!page) {
@@ -161,10 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ spin_unlock(&zone->lock);
}
- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -2408,13 +2432,13 @@ struct page *buffered_rmqueue(struct zon
-
- __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
@@ -178,7 +175,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -6241,6 +6265,7 @@ static int page_alloc_cpu_notify(struct
+@@ -6554,6 +6578,7 @@ static int page_alloc_cpu_notify(struct
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -186,7 +183,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -7165,7 +7190,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7370,7 +7395,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -195,7 +192,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7174,7 +7199,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7379,7 +7404,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/mm-perform-lru_add_drain_all-remotely.patch b/patches/mm-perform-lru_add_drain_all-remotely.patch
index c7c3995a941771..63193f823abf69 100644
--- a/patches/mm-perform-lru_add_drain_all-remotely.patch
+++ b/patches/mm-perform-lru_add_drain_all-remotely.patch
@@ -19,12 +19,12 @@ Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- mm/swap.c | 37 ++++++++++++++++++++++++++++++-------
- 1 file changed, 30 insertions(+), 7 deletions(-)
+ mm/swap.c | 42 ++++++++++++++++++++++++++++++++----------
+ 1 file changed, 32 insertions(+), 10 deletions(-)
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -595,9 +595,15 @@ void lru_add_drain_cpu(int cpu)
+@@ -597,9 +597,15 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -40,40 +40,49 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -665,12 +671,32 @@ void lru_add_drain(void)
+@@ -667,12 +673,15 @@ void lru_add_drain(void)
local_unlock_cpu(swapvec_lock);
}
-+
+-static void lru_add_drain_per_cpu(struct work_struct *dummy)
+#ifdef CONFIG_PREEMPT_RT_BASE
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
-+{
+ {
+- lru_add_drain();
+ local_lock_on(swapvec_lock, cpu);
+ lru_add_drain_cpu(cpu);
+ local_unlock_on(swapvec_lock, cpu);
-+}
-+
+ }
+
+-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+#else
-+
- static void lru_add_drain_per_cpu(struct work_struct *dummy)
- {
- lru_add_drain();
+
+ /*
+ * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+@@ -692,6 +701,22 @@ static int __init lru_init(void)
}
+ early_initcall(lru_init);
- static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
++static void lru_add_drain_per_cpu(struct work_struct *dummy)
++{
++ lru_add_drain();
++}
++
++static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+ INIT_WORK(work, lru_add_drain_per_cpu);
-+ schedule_work_on(cpu, work);
++ queue_work_on(cpu, lru_add_drain_wq, work);
+ cpumask_set_cpu(cpu, has_work);
+}
+#endif
-
++
void lru_add_drain_all(void)
{
-@@ -683,21 +709,18 @@ void lru_add_drain_all(void)
+ static DEFINE_MUTEX(lock);
+@@ -703,21 +728,18 @@ void lru_add_drain_all(void)
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
@@ -85,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
- need_activate_page_drain(cpu)) {
- INIT_WORK(work, lru_add_drain_per_cpu);
-- schedule_work_on(cpu, work);
+- queue_work_on(cpu, lru_add_drain_wq, work);
- cpumask_set_cpu(cpu, &has_work);
- }
+ need_activate_page_drain(cpu))
diff --git a/patches/mm-protect-activate-switch-mm.patch b/patches/mm-protect-activate-switch-mm.patch
index 696c03a29a7174..e0e005ce5a41db 100644
--- a/patches/mm-protect-activate-switch-mm.patch
+++ b/patches/mm-protect-activate-switch-mm.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -961,12 +961,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1012,12 +1012,14 @@ static int exec_mmap(struct mm_struct *m
}
}
task_lock(tsk);
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 412e5f5315948f..8d776a51673b15 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/page.h>
#include <asm/ptrace.h>
-@@ -1883,6 +1884,12 @@ struct task_struct {
+@@ -1954,6 +1955,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif
diff --git a/patches/mm-vmalloc-use-get-cpu-light.patch b/patches/mm-vmalloc-use-get-cpu-light.patch
index 2bb2ee53052f90..d43b7bda94811f 100644
--- a/patches/mm-vmalloc-use-get-cpu-light.patch
+++ b/patches/mm-vmalloc-use-get-cpu-light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -819,7 +819,7 @@ static void *new_vmap_block(unsigned int
+@@ -845,7 +845,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -862,11 +862,12 @@ static void *new_vmap_block(unsigned int
+@@ -888,11 +888,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -935,6 +936,7 @@ static void *vb_alloc(unsigned long size
+@@ -961,6 +962,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -949,7 +951,8 @@ static void *vb_alloc(unsigned long size
+@@ -975,7 +977,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -972,7 +975,7 @@ static void *vb_alloc(unsigned long size
+@@ -998,7 +1001,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index cfe0da6f4e3584..755fcf546c8db8 100644
--- a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -10,10 +10,10 @@ so I catch users of it which will be introduced later.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/swap.h | 4 +++-
- mm/filemap.c | 11 ++++++++---
+ mm/filemap.c | 13 +++++++++----
mm/truncate.c | 7 +++++--
mm/workingset.c | 23 ++++++++++++-----------
- 4 files changed, 28 insertions(+), 17 deletions(-)
+ 4 files changed, 29 insertions(+), 18 deletions(-)
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/page.h>
struct notifier_block;
-@@ -252,7 +253,8 @@ struct swap_info_struct {
+@@ -243,7 +244,8 @@ struct swap_info_struct {
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
@@ -37,18 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/mm/filemap.c
+++ b/mm/filemap.c
-@@ -169,7 +169,9 @@ static void page_cache_tree_delete(struc
- if (!workingset_node_pages(node) &&
- list_empty(&node->private_list)) {
- node->private_data = mapping;
-- list_lru_add(&workingset_shadow_nodes, &node->private_list);
-+ local_lock(workingset_shadow_lock);
-+ list_lru_add(&__workingset_shadow_nodes, &node->private_list);
-+ local_unlock(workingset_shadow_lock);
- }
- }
-
-@@ -618,9 +620,12 @@ static int page_cache_tree_insert(struct
+@@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct
* node->private_list is protected by
* mapping->tree_lock.
*/
@@ -63,26 +52,39 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
return 0;
}
---- a/mm/truncate.c
-+++ b/mm/truncate.c
-@@ -63,9 +63,12 @@ static void clear_exceptional_entry(stru
- * protected by mapping->tree_lock.
- */
- if (!workingset_node_shadows(node) &&
-- !list_empty(&node->private_list))
-- list_lru_del(&workingset_shadow_nodes,
-+ !list_empty(&node->private_list)) {
+@@ -217,8 +220,10 @@ static void page_cache_tree_delete(struc
+ if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
+ list_empty(&node->private_list)) {
+ node->private_data = mapping;
+- list_lru_add(&workingset_shadow_nodes,
+- &node->private_list);
+ local_lock(workingset_shadow_lock);
-+ list_lru_del(&__workingset_shadow_nodes,
- &node->private_list);
++ list_lru_add(&__workingset_shadow_nodes,
++ &node->private_list);
+ local_unlock(workingset_shadow_lock);
-+ }
- __radix_tree_delete_node(&mapping->page_tree, node);
+ }
}
+
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -62,9 +62,12 @@ static void clear_exceptional_entry(stru
+ * protected by mapping->tree_lock.
+ */
+ if (!workingset_node_shadows(node) &&
+- !list_empty(&node->private_list))
+- list_lru_del(&workingset_shadow_nodes,
++ !list_empty(&node->private_list)) {
++ local_lock(workingset_shadow_lock);
++ list_lru_del(&__workingset_shadow_nodes,
+ &node->private_list);
++ local_unlock(workingset_shadow_lock);
++ }
+ __radix_tree_delete_node(&mapping->page_tree, node);
unlock:
+ spin_unlock_irq(&mapping->tree_lock);
--- a/mm/workingset.c
+++ b/mm/workingset.c
-@@ -335,7 +335,8 @@ void workingset_activation(struct page *
+@@ -334,7 +334,8 @@ void workingset_activation(struct page *
* point where they would still be useful.
*/
@@ -92,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static unsigned long count_shadow_nodes(struct shrinker *shrinker,
struct shrink_control *sc)
-@@ -345,9 +346,9 @@ static unsigned long count_shadow_nodes(
+@@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(
unsigned long pages;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -103,9 +105,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
+ local_unlock_irq(workingset_shadow_lock);
- if (memcg_kmem_enabled())
+ if (memcg_kmem_enabled()) {
pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
-@@ -440,9 +441,9 @@ static enum lru_status shadow_lru_isolat
+@@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolat
spin_unlock(&mapping->tree_lock);
ret = LRU_REMOVED_RETRY;
out:
@@ -117,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(lru_lock);
return ret;
}
-@@ -453,10 +454,10 @@ static unsigned long scan_shadow_nodes(s
+@@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(s
unsigned long ret;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -131,8 +133,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -494,7 +495,7 @@ static int __init workingset_init(void)
- printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+@@ -492,7 +493,7 @@ static int __init workingset_init(void)
+ pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
@@ -140,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -502,7 +503,7 @@ static int __init workingset_init(void)
+@@ -500,7 +501,7 @@ static int __init workingset_init(void)
goto err_list_lru;
return 0;
err_list_lru:
diff --git a/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch b/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
index d3b55e4c8a67a6..0c032dc8f1e813 100644
--- a/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
+++ b/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
@@ -5,29 +5,160 @@ Subject: [PATCH] mm/zsmalloc: Use get/put_cpu_light in
Otherwise, we get a ___might_sleep() splat.
+
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+[bigeasy: replace the bitspin_lock() with a mutex]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- mm/zsmalloc.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
+ mm/zsmalloc.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 69 insertions(+), 4 deletions(-)
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
-@@ -1292,7 +1292,7 @@ void *zs_map_object(struct zs_pool *pool
+@@ -71,7 +71,19 @@
+ #define ZS_MAX_ZSPAGE_ORDER 2
+ #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++
++struct zsmalloc_handle {
++ unsigned long addr;
++ struct mutex lock;
++};
++
++#define ZS_HANDLE_SIZE (sizeof(struct zsmalloc_handle))
++
++#else
++
+ #define ZS_HANDLE_SIZE (sizeof(unsigned long))
++#endif
+
+ /*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+@@ -351,9 +363,26 @@ static void destroy_cache(struct zs_pool
+
+ static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
+ {
+- return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
+- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
++ void *p;
++
++ p = kmem_cache_alloc(pool->handle_cachep,
++ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
++#ifdef CONFIG_PREEMPT_RT_BASE
++ if (p) {
++ struct zsmalloc_handle *zh = p;
++
++ mutex_init(&zh->lock);
++ }
++#endif
++ return (unsigned long)p;
++}
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
++{
++ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
+ }
++#endif
+
+ static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
+ {
+@@ -373,12 +402,18 @@ static void cache_free_zspage(struct zs_
+
+ static void record_obj(unsigned long handle, unsigned long obj)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ WRITE_ONCE(zh->addr, obj);
++#else
+ /*
+ * lsb of @obj represents handle lock while other bits
+ * represent object value the handle is pointing so
+ * updating shouldn't do store tearing.
+ */
+ WRITE_ONCE(*(unsigned long *)handle, obj);
++#endif
+ }
+
+ /* zpool driver */
+@@ -902,7 +937,13 @@ static unsigned long location_to_obj(str
+
+ static unsigned long handle_to_obj(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return zh->addr;
++#else
+ return *(unsigned long *)handle;
++#endif
+ }
+
+ static unsigned long obj_to_head(struct page *page, void *obj)
+@@ -916,22 +957,46 @@ static unsigned long obj_to_head(struct
+
+ static inline int testpin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_is_locked(&zh->lock);
++#else
+ return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static inline int trypin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_trylock(&zh->lock);
++#else
+ return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void pin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_lock(&zh->lock);
++#else
+ bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void unpin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_unlock(&zh->lock);
++#else
+ bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void reset_page(struct page *page)
+@@ -1423,7 +1488,7 @@ void *zs_map_object(struct zs_pool *pool
class = pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ off = (class->size * obj_idx) & ~PAGE_MASK;
- area = &get_cpu_var(zs_map_area);
+ area = per_cpu_ptr(&zs_map_area, get_cpu_light());
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
-@@ -1345,7 +1345,7 @@ void zs_unmap_object(struct zs_pool *poo
+@@ -1477,7 +1542,7 @@ void zs_unmap_object(struct zs_pool *poo
__zs_unmap_object(area, pages, off, class->size);
}
- put_cpu_var(zs_map_area);
+ put_cpu_light();
+
+ migrate_read_unlock(zspage);
unpin_tag(handle);
- }
- EXPORT_SYMBOL_GPL(zs_unmap_object);
diff --git a/patches/mmci-remove-bogus-irq-save.patch b/patches/mmci-remove-bogus-irq-save.patch
index 67523a158ec56e..058493ddb58373 100644
--- a/patches/mmci-remove-bogus-irq-save.patch
+++ b/patches/mmci-remove-bogus-irq-save.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
-@@ -1155,15 +1155,12 @@ static irqreturn_t mmci_pio_irq(int irq,
+@@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq,
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
void __iomem *base = host->base;
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
unsigned int remain, len;
char *buffer;
-@@ -1203,8 +1200,6 @@ static irqreturn_t mmci_pio_irq(int irq,
+@@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq,
sg_miter_stop(sg_miter);
diff --git a/patches/move_sched_delayed_work_to_helper.patch b/patches/move_sched_delayed_work_to_helper.patch
index af52d55c193cb2..4d8783c3884e59 100644
--- a/patches/move_sched_delayed_work_to_helper.patch
+++ b/patches/move_sched_delayed_work_to_helper.patch
@@ -18,60 +18,44 @@ a notifier on boot up for your check and wake up the thread when
needed. This will be a todo.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-
+[bigeasy: use swork_queue() instead a helper thread]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/time/ntp.c | 43 +++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 43 insertions(+)
+ kernel/time/ntp.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
-@@ -10,6 +10,7 @@
- #include <linux/workqueue.h>
- #include <linux/hrtimer.h>
- #include <linux/jiffies.h>
-+#include <linux/kthread.h>
+@@ -17,6 +17,7 @@
+ #include <linux/module.h>
+ #include <linux/rtc.h>
#include <linux/math64.h>
- #include <linux/timex.h>
- #include <linux/time.h>
-@@ -568,10 +569,52 @@ static void sync_cmos_clock(struct work_
++#include <linux/swork.h>
+
+ #include "ntp_internal.h"
+ #include "timekeeping_internal.h"
+@@ -568,10 +569,35 @@ static void sync_cmos_clock(struct work_
&sync_cmos_work, timespec64_to_jiffies(&next));
}
+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * RT can not call schedule_delayed_work from real interrupt context.
-+ * Need to make a thread to do the real work.
-+ */
-+static struct task_struct *cmos_delay_thread;
-+static bool do_cmos_delay;
+
-+static int run_cmos_delay(void *ignore)
++static void run_clock_set_delay(struct swork_event *event)
+{
-+ while (!kthread_should_stop()) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (do_cmos_delay) {
-+ do_cmos_delay = false;
-+ queue_delayed_work(system_power_efficient_wq,
-+ &sync_cmos_work, 0);
-+ }
-+ schedule();
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
++ queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
+}
+
++static struct swork_event ntp_cmos_swork;
++
+void ntp_notify_cmos_timer(void)
+{
-+ do_cmos_delay = true;
-+ /* Make visible before waking up process */
-+ smp_wmb();
-+ wake_up_process(cmos_delay_thread);
++ swork_queue(&ntp_cmos_swork);
+}
+
+static __init int create_cmos_delay_thread(void)
+{
-+ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd");
-+ BUG_ON(!cmos_delay_thread);
++ WARN_ON(swork_get());
++ INIT_SWORK(&ntp_cmos_swork, run_clock_set_delay);
+ return 0;
+}
+early_initcall(create_cmos_delay_thread);
diff --git a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
new file mode 100644
index 00000000000000..b27c745ce50779
--- /dev/null
+++ b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -0,0 +1,273 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Sep 2016 17:36:35 +0200
+Subject: [PATCH] net/Qdisc: use a seqlock instead seqcount
+
+The seqcount disables preemption on -RT while it is held which can't
+remove. Also we don't want the reader to spin for ages if the writer is
+scheduled out. The seqlock on the other hand will serialize / sleep on
+the lock while writer is active.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/seqlock.h | 9 +++++++++
+ include/net/gen_stats.h | 9 +++++----
+ include/net/net_seq_lock.h | 15 +++++++++++++++
+ include/net/sch_generic.h | 21 ++++++++++++++++++---
+ net/core/gen_estimator.c | 6 +++---
+ net/core/gen_stats.c | 8 ++++----
+ net/sched/sch_api.c | 2 +-
+ net/sched/sch_generic.c | 12 ++++++++++++
+ 8 files changed, 67 insertions(+), 15 deletions(-)
+ create mode 100644 include/net/net_seq_lock.h
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -481,6 +481,15 @@ static inline void write_seqlock(seqlock
+ __raw_write_seqcount_begin(&sl->seqcount);
+ }
+
++static inline int try_write_seqlock(seqlock_t *sl)
++{
++ if (spin_trylock(&sl->lock)) {
++ __raw_write_seqcount_begin(&sl->seqcount);
++ return 1;
++ }
++ return 0;
++}
++
+ static inline void write_sequnlock(seqlock_t *sl)
+ {
+ __raw_write_seqcount_end(&sl->seqcount);
+--- a/include/net/gen_stats.h
++++ b/include/net/gen_stats.h
+@@ -5,6 +5,7 @@
+ #include <linux/socket.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/pkt_sched.h>
++#include <net/net_seq_lock.h>
+
+ struct gnet_stats_basic_cpu {
+ struct gnet_stats_basic_packed bstats;
+@@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct
+ spinlock_t *lock, struct gnet_dump *d,
+ int padattr);
+
+-int gnet_stats_copy_basic(const seqcount_t *running,
++int gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
+-void __gnet_stats_copy_basic(const seqcount_t *running,
++void __gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
+@@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock,
+- seqcount_t *running, struct nlattr *opt);
++ net_seqlock_t *running, struct nlattr *opt);
+ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_rate_est64 *rate_est);
+ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock,
+- seqcount_t *running, struct nlattr *opt);
++ net_seqlock_t *running, struct nlattr *opt);
+ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
+ const struct gnet_stats_rate_est64 *rate_est);
+ #endif
+--- /dev/null
++++ b/include/net/net_seq_lock.h
+@@ -0,0 +1,15 @@
++#ifndef __NET_NET_SEQ_LOCK_H__
++#define __NET_NET_SEQ_LOCK_H__
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define net_seqlock_t seqlock_t
++# define net_seq_begin(__r) read_seqbegin(__r)
++# define net_seq_retry(__r, __s) read_seqretry(__r, __s)
++
++#else
++# define net_seqlock_t seqcount_t
++# define net_seq_begin(__r) read_seqcount_begin(__r)
++# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s)
++#endif
++
++#endif
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -10,6 +10,7 @@
+ #include <linux/dynamic_queue_limits.h>
+ #include <net/gen_stats.h>
+ #include <net/rtnetlink.h>
++#include <net/net_seq_lock.h>
+
+ struct Qdisc_ops;
+ struct qdisc_walker;
+@@ -78,7 +79,7 @@ struct Qdisc {
+ struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
+ struct sk_buff_head q;
+ struct gnet_stats_basic_packed bstats;
+- seqcount_t running;
++ net_seqlock_t running;
+ struct gnet_stats_queue qstats;
+ unsigned long state;
+ struct Qdisc *next_sched;
+@@ -90,13 +91,22 @@ struct Qdisc {
+ spinlock_t busylock ____cacheline_aligned_in_smp;
+ };
+
+-static inline bool qdisc_is_running(const struct Qdisc *qdisc)
++static inline bool qdisc_is_running(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ return spin_is_locked(&qdisc->running.lock) ? true : false;
++#else
+ return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
++#endif
+ }
+
+ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ if (try_write_seqlock(&qdisc->running))
++ return true;
++ return false;
++#else
+ if (qdisc_is_running(qdisc))
+ return false;
+ /* Variant of write_seqcount_begin() telling lockdep a trylock
+@@ -105,11 +115,16 @@ static inline bool qdisc_run_begin(struc
+ raw_write_seqcount_begin(&qdisc->running);
+ seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
+ return true;
++#endif
+ }
+
+ static inline void qdisc_run_end(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ write_sequnlock(&qdisc->running);
++#else
+ write_seqcount_end(&qdisc->running);
++#endif
+ }
+
+ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+@@ -300,7 +315,7 @@ static inline spinlock_t *qdisc_root_sle
+ return qdisc_lock(root);
+ }
+
+-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
++static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
+ {
+ struct Qdisc *root = qdisc_root_sleeping(qdisc);
+
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -84,7 +84,7 @@ struct gen_estimator
+ struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_rate_est64 *rate_est;
+ spinlock_t *stats_lock;
+- seqcount_t *running;
++ net_seqlock_t *running;
+ int ewma_log;
+ u32 last_packets;
+ unsigned long avpps;
+@@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock,
+- seqcount_t *running,
++ net_seqlock_t *running,
+ struct nlattr *opt)
+ {
+ struct gen_estimator *est;
+@@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_st
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock,
+- seqcount_t *running, struct nlattr *opt)
++ net_seqlock_t *running, struct nlattr *opt)
+ {
+ gen_kill_estimator(bstats, rate_est);
+ return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -130,7 +130,7 @@ static void
+ }
+
+ void
+-__gnet_stats_copy_basic(const seqcount_t *running,
++__gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+@@ -143,10 +143,10 @@ void
+ }
+ do {
+ if (running)
+- seq = read_seqcount_begin(running);
++ seq = net_seq_begin(running);
+ bstats->bytes = b->bytes;
+ bstats->packets = b->packets;
+- } while (running && read_seqcount_retry(running, seq));
++ } while (running && net_seq_retry(running, seq));
+ }
+ EXPORT_SYMBOL(__gnet_stats_copy_basic);
+
+@@ -164,7 +164,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
+ * if the room in the socket buffer was not sufficient.
+ */
+ int
+-gnet_stats_copy_basic(const seqcount_t *running,
++gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -975,7 +975,7 @@ qdisc_create(struct net_device *dev, str
+ rcu_assign_pointer(sch->stab, stab);
+ }
+ if (tca[TCA_RATE]) {
+- seqcount_t *running;
++ net_seqlock_t *running;
+
+ err = -EOPNOTSUPP;
+ if (sch->flags & TCQ_F_MQROOT)
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -426,7 +426,11 @@ struct Qdisc noop_qdisc = {
+ .list = LIST_HEAD_INIT(noop_qdisc.list),
+ .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
+ .dev_queue = &noop_netdev_queue,
++#ifdef CONFIG_PREEMPT_RT_BASE
++ .running = __SEQLOCK_UNLOCKED(noop_qdisc.running),
++#else
+ .running = SEQCNT_ZERO(noop_qdisc.running),
++#endif
+ .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
+ };
+ EXPORT_SYMBOL(noop_qdisc);
+@@ -620,9 +624,17 @@ struct Qdisc *qdisc_alloc(struct netdev_
+ lockdep_set_class(&sch->busylock,
+ dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++ seqlock_init(&sch->running);
++ lockdep_set_class(&sch->running.seqcount,
++ dev->qdisc_running_key ?: &qdisc_running_key);
++ lockdep_set_class(&sch->running.lock,
++ dev->qdisc_running_key ?: &qdisc_running_key);
++#else
+ seqcount_init(&sch->running);
+ lockdep_set_class(&sch->running,
+ dev->qdisc_running_key ?: &qdisc_running_key);
++#endif
+
+ sch->ops = ops;
+ sch->enqueue = ops->enqueue;
diff --git a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
index 2c83f004fc3b28..5653133835ddcb 100644
--- a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
+++ b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
@@ -58,30 +58,36 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This routine will send an RST to the other tcp.
*
-@@ -689,10 +691,13 @@ static void tcp_v4_send_reset(const stru
+@@ -692,6 +694,8 @@ static void tcp_v4_send_reset(const stru
offsetof(struct inet_timewait_sock, tw_bound_dev_if));
arg.tos = ip_hdr(skb)->tos;
+
+ local_lock(tcp_sk_lock);
+ local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
- ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
- &arg, arg.iov[0].iov_len);
+@@ -701,6 +705,7 @@ static void tcp_v4_send_reset(const stru
+ __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+ local_bh_enable();
+ local_unlock(tcp_sk_lock);
- TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
- TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
-@@ -774,10 +779,12 @@ static void tcp_v4_send_ack(struct net *
+ #ifdef CONFIG_TCP_MD5SIG
+ out:
+@@ -776,6 +781,7 @@ static void tcp_v4_send_ack(struct net *
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
+ local_lock(tcp_sk_lock);
+ local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
- ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
- &arg, arg.iov[0].iov_len);
-+ local_unlock(tcp_sk_lock);
+@@ -784,6 +790,7 @@ static void tcp_v4_send_ack(struct net *
- TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+ __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ local_bh_enable();
++ local_unlock(tcp_sk_lock);
}
+
+ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
diff --git a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
index 30f9e3ec298dbf..a8609207de3d21 100644
--- a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -63,6 +63,7 @@
+@@ -64,6 +64,7 @@
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <linux/if_vlan.h>
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <net/protocol.h>
#include <net/dst.h>
-@@ -359,6 +360,7 @@ struct napi_alloc_cache {
+@@ -360,6 +361,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -366,10 +368,10 @@ static void *__netdev_alloc_frag(unsigne
+@@ -367,10 +369,10 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
void *data;
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return data;
}
-@@ -437,13 +439,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+@@ -438,13 +440,13 @@ struct sk_buff *__netdev_alloc_skb(struc
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 154198c01a8193..36aad344c8b952 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -7789,7 +7789,7 @@ static int dev_cpu_callback(struct notif
+@@ -7991,7 +7991,7 @@ static int dev_cpu_callback(struct notif
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index 857149f7a3c34d..bf948beeb63e60 100644
--- a/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -361,6 +361,7 @@ struct napi_alloc_cache {
+@@ -362,6 +362,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -390,9 +391,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -391,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void *napi_alloc_frag(unsigned int fragsz)
-@@ -486,9 +491,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+@@ -487,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
len += NET_SKB_PAD + NET_IP_ALIGN;
-@@ -506,7 +512,10 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -507,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(!data))
return NULL;
-@@ -517,7 +526,7 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -518,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct
}
/* use OR instead of assignment to avoid clearing of bits in mask */
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skb->pfmemalloc = 1;
skb->head_frag = 1;
-@@ -761,23 +770,26 @@ EXPORT_SYMBOL(consume_skb);
+@@ -762,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
void __kfree_skb_flush(void)
{
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* record skb to CPU local list */
nc->skb_cache[nc->skb_count++] = skb;
-@@ -792,6 +804,7 @@ static inline void _kfree_skb_defer(stru
+@@ -793,6 +805,7 @@ static inline void _kfree_skb_defer(stru
nc->skb_cache);
nc->skb_count = 0;
}
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index ac5a8aa422bf2f..6658efd1395b3f 100644
--- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -20,8 +20,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3037,7 +3037,11 @@ static inline int __dev_xmit_skb(struct
- * This permits __QDISC___STATE_RUNNING owner to get the lock more
+@@ -3084,7 +3084,11 @@ static inline int __dev_xmit_skb(struct
+ * This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
+#ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index 2738349ca2c2bf..13ec1f2add9975 100644
--- a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -23,8 +23,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#include <linux/locallock.h>
#include <uapi/linux/netfilter/x_tables.h>
- /**
-@@ -292,6 +293,8 @@ void xt_free_table_info(struct xt_table_
+ /* Test a struct->invflags and a boolean for inequality */
+@@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* xt_tee_enabled - true if x_tables needs to handle reentrancy
*
* Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -312,6 +315,9 @@ static inline unsigned int xt_write_recs
+@@ -320,6 +323,9 @@ static inline unsigned int xt_write_recs
{
unsigned int addend;
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -342,6 +348,7 @@ static inline void xt_write_recseq_end(u
+@@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(u
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
diff --git a/patches/net-ipv4-inet-Initialize-timers-as-pinned.patch b/patches/net-ipv4-inet-Initialize-timers-as-pinned.patch
deleted file mode 100644
index be993ee580c273..00000000000000
--- a/patches/net-ipv4-inet-Initialize-timers-as-pinned.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:23 +0000
-Subject: [PATCH 08/22] net/ipv4/inet: Initialize timers as pinned
-
-Pinned timers must carry that attribute in the timer itself. No functional
-change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/ipv4/inet_connection_sock.c | 7 ++++---
- net/ipv4/inet_timewait_sock.c | 5 +++--
- 2 files changed, 7 insertions(+), 5 deletions(-)
-
---- a/net/ipv4/inet_connection_sock.c
-+++ b/net/ipv4/inet_connection_sock.c
-@@ -603,7 +603,7 @@ static void reqsk_timer_handler(unsigned
- if (req->num_timeout++ == 0)
- atomic_dec(&queue->young);
- timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
-- mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
-+ mod_timer(&req->rsk_timer, jiffies + timeo);
- return;
- }
- drop:
-@@ -617,8 +617,9 @@ static void reqsk_queue_hash_req(struct
- req->num_timeout = 0;
- req->sk = NULL;
-
-- setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
-- mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
-+ setup_pinned_timer(&req->rsk_timer, reqsk_timer_handler,
-+ (unsigned long)req);
-+ mod_timer(&req->rsk_timer, jiffies + timeout);
-
- inet_ehash_insert(req_to_sk(req), NULL);
- /* before letting lookups find us, make sure all req fields
---- a/net/ipv4/inet_timewait_sock.c
-+++ b/net/ipv4/inet_timewait_sock.c
-@@ -188,7 +188,8 @@ struct inet_timewait_sock *inet_twsk_all
- tw->tw_prot = sk->sk_prot_creator;
- atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
- twsk_net_set(tw, sock_net(sk));
-- setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw);
-+ setup_pinned_timer(&tw->tw_timer, tw_timer_handler,
-+ (unsigned long)tw);
- /*
- * Because we use RCU lookups, we should not set tw_refcnt
- * to a non null value before everything is setup for this
-@@ -248,7 +249,7 @@ void __inet_twsk_schedule(struct inet_ti
-
- tw->tw_kill = timeo <= 4*HZ;
- if (!rearm) {
-- BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
-+ BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
- atomic_inc(&tw->tw_dr->tw_count);
- } else {
- mod_timer_pending(&tw->tw_timer, jiffies + timeo);
diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch
index 85140e9f57c347..c731a8f7a69555 100644
--- a/patches/net-make-devnet_rename_seq-a-mutex.patch
+++ b/patches/net-make-devnet_rename_seq-a-mutex.patch
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -188,6 +188,7 @@ static unsigned int napi_gen_id = NR_CPU
+@@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPU
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void dev_base_seq_inc(struct net *net)
{
-@@ -886,7 +887,8 @@ int netdev_get_name(struct net *net, cha
+@@ -888,7 +889,8 @@ int netdev_get_name(struct net *net, cha
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto retry;
}
-@@ -1155,20 +1157,17 @@ int dev_change_name(struct net_device *d
+@@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *d
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1181,11 +1180,12 @@ int dev_change_name(struct net_device *d
+@@ -1183,11 +1182,12 @@ int dev_change_name(struct net_device *d
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netdev_adjacent_rename_links(dev, oldname);
-@@ -1206,7 +1206,8 @@ int dev_change_name(struct net_device *d
+@@ -1208,7 +1208,8 @@ int dev_change_name(struct net_device *d
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1219,6 +1220,11 @@ int dev_change_name(struct net_device *d
+@@ -1221,6 +1222,11 @@ int dev_change_name(struct net_device *d
}
return err;
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 19618c020fc83c..f81b8159b1909e 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -15,73 +15,49 @@ the recursion properly on -RT.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/netdevice.h | 9 +++++++++
+ include/linux/netdevice.h | 41 ++++++++++++++++++++++++++++++++++++++++-
include/linux/sched.h | 3 +++
- net/core/dev.c | 41 ++++++++++++++++++++++++++++++++++++++---
- 3 files changed, 50 insertions(+), 3 deletions(-)
+ net/core/dev.c | 9 +++++----
+ net/core/filter.c | 6 +++---
+ 4 files changed, 51 insertions(+), 8 deletions(-)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2396,11 +2396,20 @@ void netdev_freemem(struct net_device *d
+@@ -2409,14 +2409,53 @@ void netdev_freemem(struct net_device *d
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
+-DECLARE_PER_CPU(int, xmit_recursion);
+ #define XMIT_RECURSION_LIMIT 10
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline int dev_recursion_level(void)
+{
+ return current->xmit_recursion;
+}
+
-+#else
-+
- DECLARE_PER_CPU(int, xmit_recursion);
- static inline int dev_recursion_level(void)
- {
- return this_cpu_read(xmit_recursion);
- }
-+#endif
-
- struct net_device *dev_get_by_index(struct net *net, int ifindex);
- struct net_device *__dev_get_by_index(struct net *net, int ifindex);
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1886,6 +1886,9 @@ struct task_struct {
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int xmit_recursion;
-+#endif
- int pagefault_disabled;
- #ifdef CONFIG_MMU
- struct task_struct *oom_reaper_list;
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -3098,9 +3098,44 @@ static void skb_update_prio(struct sk_bu
- #define skb_update_prio(skb)
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
+static inline int xmit_rec_read(void)
+{
-+ return current->xmit_recursion;
++ return current->xmit_recursion;
+}
+
+static inline void xmit_rec_inc(void)
+{
-+ current->xmit_recursion++;
++ current->xmit_recursion++;
+}
+
+static inline void xmit_rec_dec(void)
+{
-+ current->xmit_recursion--;
++ current->xmit_recursion--;
+}
+
+#else
+
- DEFINE_PER_CPU(int, xmit_recursion);
- EXPORT_SYMBOL(xmit_recursion);
++DECLARE_PER_CPU(int, xmit_recursion);
+
+ static inline int dev_recursion_level(void)
+ {
+ return this_cpu_read(xmit_recursion);
+ }
+static inline int xmit_rec_read(void)
+{
@@ -99,19 +75,45 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
+#endif
+
- #define RECURSION_LIMIT 10
+ struct net_device *dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1957,6 +1957,9 @@ struct task_struct {
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ unsigned long task_state_change;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int xmit_recursion;
++#endif
+ int pagefault_disabled;
+ #ifdef CONFIG_MMU
+ struct task_struct *oom_reaper_list;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3147,8 +3147,10 @@ static void skb_update_prio(struct sk_bu
+ #define skb_update_prio(skb)
+ #endif
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ DEFINE_PER_CPU(int, xmit_recursion);
+ EXPORT_SYMBOL(xmit_recursion);
++#endif
/**
-@@ -3346,7 +3381,7 @@ static int __dev_queue_xmit(struct sk_bu
+ * dev_loopback_xmit - loop back @skb
+@@ -3392,8 +3394,7 @@ static int __dev_queue_xmit(struct sk_bu
+ int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
-
-- if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
-+ if (xmit_rec_read() > RECURSION_LIMIT)
+- if (unlikely(__this_cpu_read(xmit_recursion) >
+- XMIT_RECURSION_LIMIT))
++ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -3356,9 +3391,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3403,9 +3404,9 @@ static int __dev_queue_xmit(struct sk_bu
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -123,3 +125,26 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1592,7 +1592,7 @@ static inline int __bpf_tx_skb(struct ne
+ {
+ int ret;
+
+- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
++ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
+ net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
+ kfree_skb(skb);
+ return -ENETDOWN;
+@@ -1600,9 +1600,9 @@ static inline int __bpf_tx_skb(struct ne
+
+ skb->dev = dev;
+
+- __this_cpu_inc(xmit_recursion);
++ xmit_rec_inc();
+ ret = dev_queue_xmit(skb);
+- __this_cpu_dec(xmit_recursion);
++ xmit_rec_dec();
+
+ return ret;
+ }
diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch
index f699a9d39299b2..c15ce7815a141d 100644
--- a/patches/net-prevent-abba-deadlock.patch
+++ b/patches/net-prevent-abba-deadlock.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -2421,12 +2421,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2508,12 +2508,11 @@ void lock_sock_nested(struct sock *sk, i
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index cdd5d1bd3cf9c3..28053a98c27ed9 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -476,6 +476,14 @@ extern void thread_do_softirq(void);
+@@ -487,6 +487,14 @@ extern void thread_do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5211,7 +5211,7 @@ static void net_rx_action(struct softirq
+@@ -5237,7 +5237,7 @@ static void net_rx_action(struct softirq
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index 92829e408434c6..60ef1ab680cfed 100644
--- a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -894,7 +894,7 @@ void dev_deactivate_many(struct list_hea
+@@ -917,7 +917,7 @@ void dev_deactivate_many(struct list_hea
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
diff --git a/patches/net-tx-action-avoid-livelock-on-rt.patch b/patches/net-tx-action-avoid-livelock-on-rt.patch
deleted file mode 100644
index d97286982d7abd..00000000000000
--- a/patches/net-tx-action-avoid-livelock-on-rt.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-Subject: net: Avoid livelock in net_tx_action() on RT
-From: Steven Rostedt <srostedt@redhat.com>
-Date: Thu, 06 Oct 2011 10:48:39 -0400
-
-qdisc_lock is taken w/o disabling interrupts or bottom halfs. So code
-holding a qdisc_lock() can be interrupted and softirqs can run on the
-return of interrupt in !RT.
-
-The spin_trylock() in net_tx_action() makes sure, that the softirq
-does not deadlock. When the lock can't be acquired q is requeued and
-the NET_TX softirq is raised. That causes the softirq to run over and
-over.
-
-That works in mainline as do_softirq() has a retry loop limit and
-leaves the softirq processing in the interrupt return path and
-schedules ksoftirqd. The task which holds qdisc_lock cannot be
-preempted, so the lock is released and either ksoftirqd or the next
-softirq in the return from interrupt path can proceed. Though it's a
-bit strange to actually run MAX_SOFTIRQ_RESTART (10) loops before it
-decides to bail out even if it's clear in the first iteration :)
-
-On RT all softirq processing is done in a FIFO thread and we don't
-have a loop limit, so ksoftirqd preempts the lock holder forever and
-unqueues and requeues until the reset button is hit.
-
-Due to the forced threading of ksoftirqd on RT we actually cannot
-deadlock on qdisc_lock because it's a "sleeping lock". So it's safe to
-replace the spin_trylock() with a spin_lock(). When contended,
-ksoftirqd is scheduled out and the lock holder can proceed.
-
-[ tglx: Massaged changelog and code comments ]
-
-Solved-by: Thomas Gleixner <tglx@linuxtronix.de>
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Tested-by: Carsten Emde <cbe@osadl.org>
-Cc: Clark Williams <williams@redhat.com>
-Cc: John Kacur <jkacur@redhat.com>
-Cc: Luis Claudio R. Goncalves <lclaudio@redhat.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- net/core/dev.c | 32 +++++++++++++++++++++++++++++++-
- 1 file changed, 31 insertions(+), 1 deletion(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -3848,6 +3848,36 @@ int netif_rx_ni(struct sk_buff *skb)
- }
- EXPORT_SYMBOL(netif_rx_ni);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * RT runs ksoftirqd as a real time thread and the root_lock is a
-+ * "sleeping spinlock". If the trylock fails then we can go into an
-+ * infinite loop when ksoftirqd preempted the task which actually
-+ * holds the lock, because we requeue q and raise NET_TX softirq
-+ * causing ksoftirqd to loop forever.
-+ *
-+ * It's safe to use spin_lock on RT here as softirqs run in thread
-+ * context and cannot deadlock against the thread which is holding
-+ * root_lock.
-+ *
-+ * On !RT the trylock might fail, but there we bail out from the
-+ * softirq loop after 10 attempts which we can't do on RT. And the
-+ * task holding root_lock cannot be preempted, so the only downside of
-+ * that trylock is that we need 10 loops to decide that we should have
-+ * given up in the first one :)
-+ */
-+static inline int take_root_lock(spinlock_t *lock)
-+{
-+ spin_lock(lock);
-+ return 1;
-+}
-+#else
-+static inline int take_root_lock(spinlock_t *lock)
-+{
-+ return spin_trylock(lock);
-+}
-+#endif
-+
- static void net_tx_action(struct softirq_action *h)
- {
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -3895,7 +3925,7 @@ static void net_tx_action(struct softirq
- head = head->next_sched;
-
- root_lock = qdisc_lock(q);
-- if (spin_trylock(root_lock)) {
-+ if (take_root_lock(root_lock)) {
- smp_mb__before_atomic();
- clear_bit(__QDISC_STATE_SCHED,
- &q->state);
diff --git a/patches/net-use-cpu-chill.patch b/patches/net-use-cpu-chill.patch
index e7f2794d9da0b2..83fbc13cbee234 100644
--- a/patches/net-use-cpu-chill.patch
+++ b/patches/net-use-cpu-chill.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-@@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expi
+@@ -695,7 +696,7 @@ static void prb_retire_rx_blk_timer_expi
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -956,7 +957,7 @@ static void prb_retire_current_block(str
+@@ -957,7 +958,7 @@ static void prb_retire_current_block(str
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -49,9 +49,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/llist.h>
+#include <linux/delay.h>
+ #include "rds_single_path.h"
#include "ib_mr.h"
-
-@@ -209,7 +210,7 @@ static inline void wait_clean_list_grace
+@@ -210,7 +211,7 @@ static inline void wait_clean_list_grace
for_each_online_cpu(cpu) {
flag = &per_cpu(clean_list_grace, cpu);
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
diff --git a/patches/net-wireless-warn-nort.patch b/patches/net-wireless-warn-nort.patch
index 84feed94cbc192..17026bbe8e4198 100644
--- a/patches/net-wireless-warn-nort.patch
+++ b/patches/net-wireless-warn-nort.patch
@@ -12,12 +12,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
-@@ -3679,7 +3679,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -4064,7 +4064,7 @@ void ieee80211_rx_napi(struct ieee80211_
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- WARN_ON_ONCE(softirq_count() == 0);
+ WARN_ON_ONCE_NONRT(softirq_count() == 0);
- if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
+ if (WARN_ON(status->band >= NUM_NL80211_BANDS))
goto drop;
diff --git a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
index 62bc044100d5ad..520b9724d1f7df 100644
--- a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
+++ b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -7538,7 +7538,7 @@ EXPORT_SYMBOL(free_netdev);
+@@ -7740,7 +7740,7 @@ EXPORT_SYMBOL(free_netdev);
void synchronize_net(void)
{
might_sleep();
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 7a1b0c76aa6544..192679b97356a5 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -221,6 +221,13 @@ static void exit_to_usermode_loop(struct
+@@ -155,6 +155,13 @@ static void exit_to_usermode_loop(struct
if (cached_flags & _TIF_NEED_RESCHED)
schedule();
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1600,6 +1600,10 @@ struct task_struct {
+@@ -1670,6 +1670,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
diff --git a/patches/panic-disable-random-on-rt.patch b/patches/panic-disable-random-on-rt.patch
index 046423fdce1f8c..25e01410fdc092 100644
--- a/patches/panic-disable-random-on-rt.patch
+++ b/patches/panic-disable-random-on-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -444,9 +444,11 @@ static u64 oops_id;
+@@ -449,9 +449,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
diff --git a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index 43e2fbcc8611e4..7d85b3eb3365f5 100644
--- a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -341,11 +341,7 @@ static inline int rcu_preempt_depth(void
+@@ -343,11 +343,7 @@ static inline int rcu_preempt_depth(void
/* Internal to kernel */
void rcu_init(void);
void rcu_sched_qs(void);
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -254,7 +254,14 @@ void rcu_sched_qs(void)
+@@ -259,7 +259,14 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "../time/tick-internal.h"
#ifdef CONFIG_RCU_BOOST
-@@ -1338,7 +1339,7 @@ static void rcu_prepare_kthreads(int cpu
+@@ -1244,7 +1245,7 @@ static void rcu_prepare_kthreads(int cpu
#endif /* #else #ifdef CONFIG_RCU_BOOST */
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Check to see if any future RCU-related work will need to be done
-@@ -1355,7 +1356,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1261,7 +1262,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
? 0 : rcu_cpu_has_callbacks(NULL);
}
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
* after it.
-@@ -1451,6 +1454,8 @@ static bool __maybe_unused rcu_try_advan
+@@ -1357,6 +1360,8 @@ static bool __maybe_unused rcu_try_advan
return cbs_ready;
}
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
-@@ -1496,6 +1501,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1402,6 +1407,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
*nextevt = basemono + dj * TICK_NSEC;
return 0;
}
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index 43ee5b9a7a2066..b48c5ca1283200 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -7261,6 +7261,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -8215,6 +8215,7 @@ static void perf_swevent_init_hrtimer(st
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/peter_zijlstra-frob-rcu.patch b/patches/peter_zijlstra-frob-rcu.patch
index 85b03377706916..bf2e31c9c21929 100644
--- a/patches/peter_zijlstra-frob-rcu.patch
+++ b/patches/peter_zijlstra-frob-rcu.patch
@@ -155,7 +155,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -428,7 +428,7 @@ void rcu_read_unlock_special(struct task
+@@ -426,7 +426,7 @@ void rcu_read_unlock_special(struct task
}
/* Hardware IRQ handlers cannot block, complain if they get here. */
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 45800d7f64f05f..74db6d2839d581 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1565,6 +1565,9 @@ struct task_struct {
+@@ -1635,6 +1635,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
const struct cred __rcu *real_cred; /* objective and real subjective task
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1228,6 +1228,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1274,6 +1274,9 @@ static void rt_mutex_init_task(struct ta
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch
index 33863c99cc0d1b..01feea4f2ef3d7 100644
--- a/patches/power-disable-highmem-on-rt.patch
+++ b/patches/power-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -320,7 +320,7 @@ menu "Kernel options"
+@@ -327,7 +327,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index 08c9c8af88cd66..d0ab2260baa754 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -139,6 +139,7 @@ config PPC
+@@ -141,6 +141,7 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
@@ -25,16 +25,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select CLONE_BACKWARDS
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -42,6 +42,8 @@ struct thread_info {
+@@ -43,6 +43,8 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
-+ int preempt_lazy_count; /* 0 => preemptable,
++ int preempt_lazy_count; /* 0 => preemptable,
+ <0 => BUG */
unsigned long local_flags; /* private flags for thread */
-
- /* low level flags - has atomic operations done on it */
-@@ -82,8 +84,7 @@ static inline struct thread_info *curren
+ #ifdef CONFIG_LIVEPATCH
+ unsigned long *livepatch_sp;
+@@ -88,8 +90,7 @@ static inline struct thread_info *curren
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
-@@ -101,6 +102,8 @@ static inline struct thread_info *curren
+@@ -107,6 +108,8 @@ static inline struct thread_info *curren
#if defined(CONFIG_PPC64)
#define TIF_ELF2ABI 18 /* function descriptors must die! */
#endif
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -119,14 +122,16 @@ static inline struct thread_info *curren
+@@ -125,14 +128,16 @@ static inline struct thread_info *curren
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
-@@ -162,6 +162,7 @@ int main(void)
+@@ -156,6 +156,7 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -818,7 +818,14 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -835,7 +835,14 @@ user_exc_return: /* r10 contains MSR_KE
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
-@@ -829,11 +836,11 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -846,11 +853,11 @@ user_exc_return: /* r10 contains MSR_KE
*/
bl trace_hardirqs_off
#endif
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
-@@ -1154,7 +1161,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1175,7 +1182,7 @@ do_resched: /* r10 contains MSR_KERNEL
+@@ -1192,7 +1199,7 @@ do_resched: /* r10 contains MSR_KERNEL
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq restore_user
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
-@@ -644,7 +644,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -657,7 +657,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
bl restore_math
b restore
#endif
@@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -706,10 +706,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -719,10 +719,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
-@@ -726,7 +734,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -739,7 +747,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
diff --git a/patches/preempt-lazy-check-preempt_schedule.patch b/patches/preempt-lazy-check-preempt_schedule.patch
deleted file mode 100644
index b1fe601e93bcd8..00000000000000
--- a/patches/preempt-lazy-check-preempt_schedule.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 20 Jan 2016 15:13:30 +0100
-Subject: preempt-lazy: Add the lazy-preemption check to preempt_schedule()
-
-Probably in the rebase onto v4.1 this check got moved into less commonly used
-preempt_schedule_notrace(). This patch ensures that both functions use it.
-
-Reported-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/core.c | 36 ++++++++++++++++++++++++++++--------
- 1 file changed, 28 insertions(+), 8 deletions(-)
-
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3501,6 +3501,30 @@ static void __sched notrace preempt_sche
- } while (need_resched());
- }
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+/*
-+ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
-+ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
-+ * preempt_lazy_count counter >0.
-+ */
-+static __always_inline int preemptible_lazy(void)
-+{
-+ if (test_thread_flag(TIF_NEED_RESCHED))
-+ return 1;
-+ if (current_thread_info()->preempt_lazy_count)
-+ return 0;
-+ return 1;
-+}
-+
-+#else
-+
-+static int preemptible_lazy(void)
-+{
-+ return 1;
-+}
-+
-+#endif
-+
- #ifdef CONFIG_PREEMPT
- /*
- * this is the entry point to schedule() from in-kernel preemption
-@@ -3515,6 +3539,8 @@ asmlinkage __visible void __sched notrac
- */
- if (likely(!preemptible()))
- return;
-+ if (!preemptible_lazy())
-+ return;
-
- preempt_schedule_common();
- }
-@@ -3541,15 +3567,9 @@ asmlinkage __visible void __sched notrac
-
- if (likely(!preemptible()))
- return;
--
--#ifdef CONFIG_PREEMPT_LAZY
-- /*
-- * Check for lazy preemption
-- */
-- if (current_thread_info()->preempt_lazy_count &&
-- !test_thread_flag(TIF_NEED_RESCHED))
-+ if (!preemptible_lazy())
- return;
--#endif
-+
- do {
- preempt_disable_notrace();
- /*
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 020e2379c4da1c..c6eecfaba082cd 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -52,58 +52,20 @@ performance.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- arch/x86/include/asm/preempt.h | 18 +++++++++++++-
- include/linux/preempt.h | 29 ++++++++++++++++++++++-
- include/linux/sched.h | 37 ++++++++++++++++++++++++++++++
- include/linux/thread_info.h | 12 +++++++++
- include/linux/trace_events.h | 1
- kernel/Kconfig.preempt | 6 ++++
- kernel/sched/core.c | 50 ++++++++++++++++++++++++++++++++++++++++-
- kernel/sched/fair.c | 16 ++++++-------
- kernel/sched/features.h | 3 ++
- kernel/sched/sched.h | 9 +++++++
- kernel/trace/trace.c | 37 ++++++++++++++++++------------
- kernel/trace/trace.h | 2 +
- kernel/trace/trace_output.c | 14 +++++++++--
- 13 files changed, 205 insertions(+), 29 deletions(-)
+ include/linux/preempt.h | 29 ++++++++++++++++-
+ include/linux/sched.h | 37 ++++++++++++++++++++++
+ include/linux/thread_info.h | 12 ++++++-
+ include/linux/trace_events.h | 1
+ kernel/Kconfig.preempt | 6 +++
+ kernel/sched/core.c | 72 +++++++++++++++++++++++++++++++++++++++++--
+ kernel/sched/fair.c | 16 ++++-----
+ kernel/sched/features.h | 3 +
+ kernel/sched/sched.h | 9 +++++
+ kernel/trace/trace.c | 37 +++++++++++++---------
+ kernel/trace/trace.h | 2 +
+ kernel/trace/trace_output.c | 14 +++++++-
+ 12 files changed, 209 insertions(+), 29 deletions(-)
---- a/arch/x86/include/asm/preempt.h
-+++ b/arch/x86/include/asm/preempt.h
-@@ -79,17 +79,33 @@ static __always_inline void __preempt_co
- * a decrement which hits zero means we have no preempt_count and should
- * reschedule.
- */
--static __always_inline bool __preempt_count_dec_and_test(void)
-+static __always_inline bool ____preempt_count_dec_and_test(void)
- {
- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
- }
-
-+static __always_inline bool __preempt_count_dec_and_test(void)
-+{
-+ if (____preempt_count_dec_and_test())
-+ return true;
-+#ifdef CONFIG_PREEMPT_LAZY
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
-+ return false;
-+#endif
-+}
-+
- /*
- * Returns true when we need to resched and can (barring IRQ state).
- */
- static __always_inline bool should_resched(int preempt_offset)
- {
-+#ifdef CONFIG_PREEMPT_LAZY
-+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
-+ test_thread_flag(TIF_NEED_RESCHED_LAZY));
-+#else
- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
-+#endif
- }
-
- #ifdef CONFIG_PREEMPT
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -153,6 +153,20 @@ extern void preempt_count_sub(int val);
@@ -165,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -3009,6 +3009,43 @@ static inline int test_tsk_need_resched(
+@@ -3238,6 +3238,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -228,8 +190,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#define tif_need_resched_lazy() 0
+#endif
- #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
- /*
+ #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+ static inline int arch_within_stack_frames(const void * const stack,
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -58,6 +58,7 @@ struct trace_entry {
@@ -257,7 +219,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -475,6 +475,38 @@ void resched_curr(struct rq *rq)
+@@ -510,6 +510,38 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -296,7 +258,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2392,6 +2424,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2522,6 +2554,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -306,7 +268,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3181,6 +3216,7 @@ void migrate_disable(void)
+@@ -3356,6 +3391,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -314,7 +276,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
-@@ -3220,6 +3256,7 @@ void migrate_enable(void)
+@@ -3395,6 +3431,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
@@ -322,30 +284,66 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(migrate_enable);
#endif
-@@ -3359,6 +3396,7 @@ static void __sched notrace __schedule(b
+@@ -3535,6 +3572,7 @@ static void __sched notrace __schedule(b
- next = pick_next_task(rq, prev);
+ next = pick_next_task(rq, prev, cookie);
clear_tsk_need_resched(prev);
+ clear_tsk_need_resched_lazy(prev);
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -3504,6 +3542,14 @@ asmlinkage __visible void __sched notrac
+@@ -3654,6 +3692,30 @@ static void __sched notrace preempt_sche
+ } while (need_resched());
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++/*
++ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
++ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
++ * preempt_lazy_count counter >0.
++ */
++static __always_inline int preemptible_lazy(void)
++{
++ if (test_thread_flag(TIF_NEED_RESCHED))
++ return 1;
++ if (current_thread_info()->preempt_lazy_count)
++ return 0;
++ return 1;
++}
++
++#else
++
++static inline int preemptible_lazy(void)
++{
++ return 1;
++}
++
++#endif
++
+ #ifdef CONFIG_PREEMPT
+ /*
+ * this is the entry point to schedule() from in-kernel preemption
+@@ -3668,7 +3730,8 @@ asmlinkage __visible void __sched notrac
+ */
+ if (likely(!preemptible()))
+ return;
+-
++ if (!preemptible_lazy())
++ return;
+ preempt_schedule_common();
+ }
+ NOKPROBE_SYMBOL(preempt_schedule);
+@@ -3695,6 +3758,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
-+#ifdef CONFIG_PREEMPT_LAZY
-+ /*
-+ * Check for lazy preemption
-+ */
-+ if (current_thread_info()->preempt_lazy_count &&
-+ !test_thread_flag(TIF_NEED_RESCHED))
++ if (!preemptible_lazy())
+ return;
-+#endif
++
do {
- preempt_disable_notrace();
/*
-@@ -5249,7 +5295,9 @@ void init_idle(struct task_struct *idle,
+ * Because the function tracer can trace preempt_count_sub()
+@@ -5458,7 +5524,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -358,7 +356,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -3333,7 +3333,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3486,7 +3486,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -367,7 +365,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3357,7 +3357,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3510,7 +3510,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -376,7 +374,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -3502,7 +3502,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3655,7 +3655,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -385,7 +383,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -3684,7 +3684,7 @@ static void __account_cfs_rq_runtime(str
+@@ -3837,7 +3837,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -394,7 +392,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -4296,7 +4296,7 @@ static void hrtick_start_fair(struct rq
+@@ -4465,7 +4465,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -403,7 +401,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -5441,7 +5441,7 @@ static void check_preempt_wakeup(struct
+@@ -5654,7 +5654,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -412,7 +410,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -8192,7 +8192,7 @@ static void task_fork_fair(struct task_s
+@@ -8380,7 +8380,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -421,7 +419,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -8217,7 +8217,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -8404,7 +8404,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -444,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1304,6 +1304,15 @@ extern void init_sched_fair_class(void);
+@@ -1317,6 +1317,15 @@ extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -462,7 +460,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1657,6 +1657,7 @@ tracing_generic_entry_update(struct trac
+@@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -470,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1667,7 +1668,8 @@ tracing_generic_entry_update(struct trac
+@@ -1907,7 +1908,8 @@ tracing_generic_entry_update(struct trac
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -480,7 +478,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -2563,15 +2565,17 @@ get_total_entries(struct trace_buffer *b
+@@ -2894,15 +2896,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
@@ -507,7 +505,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2597,11 +2601,14 @@ static void print_func_help_header_irq(s
+@@ -2928,11 +2932,14 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
seq_puts(m, "# _-----=> irqs-off\n"
"# / _----=> need-resched\n"
@@ -529,7 +527,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -117,6 +117,7 @@ struct kretprobe_trace_entry_head {
+@@ -123,6 +123,7 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
@@ -537,7 +535,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -126,6 +127,7 @@ enum trace_flag_type {
+@@ -132,6 +133,7 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
diff --git a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index c55445f09bfe86..7158abb2a6a2ae 100644
--- a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -15,7 +15,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -276,6 +276,13 @@ asmlinkage void early_printk(const char
+@@ -381,6 +381,13 @@ asmlinkage void early_printk(const char
*/
static bool __read_mostly printk_killswitch;
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index 5bda7a70ed50b6..177b4836aa0e1c 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -117,9 +117,11 @@ do { \
+@@ -125,9 +125,11 @@ struct va_format {
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
@@ -25,12 +25,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+static inline void printk_kill(void) { }
#endif
- typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
+ #ifdef CONFIG_PRINTK_NMI
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -246,6 +246,58 @@ struct printk_log {
+@@ -351,6 +351,58 @@ struct printk_log {
*/
- static DEFINE_RAW_SPINLOCK(logbuf_lock);
+ DEFINE_RAW_SPINLOCK(logbuf_lock);
+#ifdef CONFIG_EARLY_PRINTK
+struct console *early_console;
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1620,6 +1672,13 @@ asmlinkage int vprintk_emit(int facility
+@@ -1750,6 +1802,13 @@ asmlinkage int vprintk_emit(int facility
/* cpu currently holding logbuf_lock in this function */
static unsigned int logbuf_cpu = UINT_MAX;
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
-@@ -1901,26 +1960,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
+@@ -2023,26 +2082,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
#endif /* CONFIG_PRINTK */
diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch
index 5e9b8bb1571851..e376f296a667a9 100644
--- a/patches/printk-rt-aware.patch
+++ b/patches/printk-rt-aware.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1502,6 +1502,7 @@ static void call_console_drivers(int lev
+@@ -1631,6 +1631,7 @@ static void call_console_drivers(int lev
if (!console_drivers)
return;
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
-@@ -1517,6 +1518,7 @@ static void call_console_drivers(int lev
+@@ -1646,6 +1647,7 @@ static void call_console_drivers(int lev
else
con->write(con, text, len);
}
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1821,13 +1823,23 @@ asmlinkage int vprintk_emit(int facility
+@@ -1960,13 +1962,23 @@ asmlinkage int vprintk_emit(int facility
/* If called from the scheduler, we can not call up(). */
if (!in_sched) {
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
console_unlock();
lockdep_on();
}
-@@ -2229,11 +2241,16 @@ static void console_cont_flush(char *tex
+@@ -2358,11 +2370,16 @@ static void console_cont_flush(char *tex
goto out;
len = cont_print_text(text, size);
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
out:
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2355,13 +2372,17 @@ void console_unlock(void)
+@@ -2486,13 +2503,17 @@ void console_unlock(void)
console_idx = log_next(console_idx);
console_seq++;
console_prev = msg->flags;
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 3cb6bb7003cf20..9fff059c370cc8 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -241,10 +241,7 @@ extern char ___assert_task_state[1 - 2*!
+@@ -243,10 +243,7 @@ extern char ___assert_task_state[1 - 2*!
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
-@@ -3026,6 +3023,51 @@ static inline int signal_pending_state(l
+@@ -3255,6 +3252,51 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1317,6 +1317,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1373,6 +1373,18 @@ int migrate_swap(struct task_struct *cur
return ret;
}
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1361,7 +1373,7 @@ unsigned long wait_task_inactive(struct
+@@ -1417,7 +1429,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1376,7 +1388,8 @@ unsigned long wait_task_inactive(struct
+@@ -1432,7 +1444,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
@@ -149,5 +149,5 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!match_state || p->state == match_state ||
+ p->saved_state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
- task_rq_unlock(rq, p, &flags);
+ task_rq_unlock(rq, p, &rf);
diff --git a/patches/radix-tree-rt-aware.patch b/patches/radix-tree-rt-aware.patch
index a91a5af3f3d493..a9de68711c0a03 100644
--- a/patches/radix-tree-rt-aware.patch
+++ b/patches/radix-tree-rt-aware.patch
@@ -8,27 +8,33 @@ user tries to grab any locks after invoking it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/radix-tree.h | 7 ++++++-
+ include/linux/radix-tree.h | 12 +++++++++++-
lib/radix-tree.c | 5 ++++-
- 2 files changed, 10 insertions(+), 2 deletions(-)
+ 2 files changed, 15 insertions(+), 2 deletions(-)
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
-@@ -294,8 +294,13 @@ radix_tree_gang_lookup(struct radix_tree
+@@ -289,9 +289,19 @@ unsigned int radix_tree_gang_lookup(stru
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
-+#ifndef CONFIG_PREEMPT_RT_FULL
- int radix_tree_preload(gfp_t gfp_mask);
- int radix_tree_maybe_preload(gfp_t gfp_mask);
-+#else
++#ifdef CONFIG_PREEMPT_RT_FULL
+static inline int radix_tree_preload(gfp_t gm) { return 0; }
+static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
++static inline int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
++{
++ return 0;
++};
++
++#else
+ int radix_tree_preload(gfp_t gfp_mask);
+ int radix_tree_maybe_preload(gfp_t gfp_mask);
+ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
+#endif
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
-@@ -320,7 +325,7 @@ unsigned long radix_tree_locate_item(str
+@@ -316,7 +326,7 @@ unsigned long radix_tree_locate_item(str
static inline void radix_tree_preload_end(void)
{
@@ -39,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
-@@ -241,13 +241,14 @@ radix_tree_node_alloc(struct radix_tree_
+@@ -290,13 +290,14 @@ radix_tree_node_alloc(struct radix_tree_
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
@@ -55,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Update the allocation stack trace as this is more useful
* for debugging.
-@@ -287,6 +288,7 @@ radix_tree_node_free(struct radix_tree_n
+@@ -336,6 +337,7 @@ radix_tree_node_free(struct radix_tree_n
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
@@ -63,11 +69,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
-@@ -361,6 +363,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
- return 0;
+@@ -455,6 +457,7 @@ int radix_tree_maybe_preload_order(gfp_t
+
+ return __radix_tree_preload(gfp_mask, nr_nodes);
}
- EXPORT_SYMBOL(radix_tree_maybe_preload);
+#endif
/*
- * Return the maximum key which can be store into a
+ * The maximum index which can be stored in a radix tree
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index e114aa02abfe6f..868344527f364c 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -11,15 +11,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/char/random.c | 11 +++++------
+ drivers/hv/vmbus_drv.c | 4 +++-
include/linux/irqdesc.h | 1 +
include/linux/random.h | 2 +-
kernel/irq/handle.c | 8 +++++++-
kernel/irq/manage.c | 6 ++++++
- 5 files changed, 20 insertions(+), 8 deletions(-)
+ 6 files changed, 23 insertions(+), 9 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -891,28 +891,27 @@ static __u32 get_reg(struct fast_pool *f
+@@ -1120,28 +1120,27 @@ static __u32 get_reg(struct fast_pool *f
return *(ptr + f->reg_idx++);
}
@@ -53,6 +54,26 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
fast_mix(fast_pool);
add_interrupt_bench(cycles);
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -761,6 +761,8 @@ static void vmbus_isr(void)
+ void *page_addr;
+ struct hv_message *msg;
+ union hv_synic_event_flags *event;
++ struct pt_regs *regs = get_irq_regs();
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+ bool handled = false;
+
+ page_addr = hv_context.synic_event_page[cpu];
+@@ -808,7 +810,7 @@ static void vmbus_isr(void)
+ tasklet_schedule(hv_context.msg_dpc[cpu]);
+ }
+
+- add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
++ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip);
+ }
+
+
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -64,6 +64,7 @@ struct irq_desc {
@@ -62,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ u64 random_ip;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
- #ifdef CONFIG_SMP
+ const struct cpumask *percpu_affinity;
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -20,7 +20,7 @@ struct random_ready_callback {
@@ -76,31 +97,27 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern int add_random_ready_callback(struct random_ready_callback *rdy);
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
-@@ -134,6 +134,8 @@ void __irq_wake_thread(struct irq_desc *
-
- irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+@@ -181,10 +181,16 @@ irqreturn_t handle_irq_event_percpu(stru
{
+ irqreturn_t retval;
+ unsigned int flags = 0;
+ struct pt_regs *regs = get_irq_regs();
+ u64 ip = regs ? instruction_pointer(regs) : 0;
- irqreturn_t retval = IRQ_NONE;
- unsigned int flags = 0, irq = desc->irq_data.irq;
- struct irqaction *action;
-@@ -174,7 +176,11 @@ irqreturn_t handle_irq_event_percpu(stru
- retval |= res;
- }
-- add_interrupt_randomness(irq, flags);
+ retval = __handle_irq_event_percpu(desc, &flags);
+
+- add_interrupt_randomness(desc->irq_data.irq, flags);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ desc->random_ip = ip;
+#else
-+ add_interrupt_randomness(irq, flags, ip);
++ add_interrupt_randomness(desc->irq_data.irq, flags, ip);
+#endif
if (!noirqdebug)
note_interrupt(desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -1043,6 +1043,12 @@ static int irq_thread(void *data)
+@@ -1023,6 +1023,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/patches/rbtree-include-rcu.h-because-we-use-it.patch b/patches/rbtree-include-rcu.h-because-we-use-it.patch
new file mode 100644
index 00000000000000..d1eca93c043710
--- /dev/null
+++ b/patches/rbtree-include-rcu.h-because-we-use-it.patch
@@ -0,0 +1,24 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Sep 2016 11:52:17 +0200
+Subject: rbtree: include rcu.h because we use it
+
+Since commit c1adf20052d8 ("Introduce rb_replace_node_rcu()")
+rbtree_augmented.h uses RCU related data structures but does not include
+them. It works as long as gets somehow included before that and fails
+otherwise.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rbtree_augmented.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/rbtree_augmented.h
++++ b/include/linux/rbtree_augmented.h
+@@ -26,6 +26,7 @@
+
+ #include <linux/compiler.h>
+ #include <linux/rbtree.h>
++#include <linux/rcupdate.h>
+
+ /*
+ * Please note - only struct rb_augment_callbacks and the prototypes for
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index af02638fd7b151..6b3df0d9c1bffb 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -2946,18 +2951,17 @@ static void
+@@ -3041,18 +3046,17 @@ static void
/*
* Do RCU core processing for the current CPU.
*/
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -2969,18 +2973,105 @@ static void invoke_rcu_callbacks(struct
+@@ -3064,18 +3068,105 @@ static void invoke_rcu_callbacks(struct
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4648,7 +4739,6 @@ void __init rcu_init(void)
+@@ -4237,7 +4328,6 @@ void __init rcu_init(void)
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
@@ -178,7 +178,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We don't need protection against CPU-hotplug here because
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -580,12 +580,10 @@ extern struct rcu_state rcu_bh_state;
+@@ -595,12 +595,10 @@ extern struct rcu_state rcu_bh_state;
extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef RCU_TREE_NONCORE
-@@ -605,10 +603,9 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -620,10 +618,9 @@ void call_rcu(struct rcu_head *head, rcu
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -247,7 +247,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
-@@ -635,15 +627,6 @@ static void rcu_preempt_check_callbacks(
+@@ -633,15 +625,6 @@ static void rcu_preempt_check_callbacks(
t->rcu_read_unlock_special.b.need_qs = true;
}
@@ -263,7 +263,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Queue a preemptible-RCU callback for invocation after a grace period.
*/
-@@ -925,6 +908,19 @@ void exit_rcu(void)
+@@ -830,6 +813,19 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_RCU_BOOST
#include "../locking/rtmutex_common.h"
-@@ -956,16 +952,6 @@ static void rcu_initiate_boost_trace(str
+@@ -861,16 +857,6 @@ static void rcu_initiate_boost_trace(str
#endif /* #else #ifdef CONFIG_RCU_TRACE */
@@ -300,7 +300,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1109,23 +1095,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1014,23 +1000,6 @@ static void rcu_initiate_boost(struct rc
}
/*
@@ -324,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
-@@ -1179,67 +1148,6 @@ static int rcu_spawn_one_boost_kthread(s
+@@ -1084,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(s
return 0;
}
@@ -392,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1269,26 +1177,12 @@ static void rcu_boost_kthread_setaffinit
+@@ -1175,26 +1083,12 @@ static void rcu_boost_kthread_setaffinit
free_cpumask_var(cm);
}
@@ -419,7 +419,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
-@@ -1311,11 +1205,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1217,11 +1111,6 @@ static void rcu_initiate_boost(struct rc
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
diff --git a/patches/rcu-disable-more-spots-of-rcu_bh.patch b/patches/rcu-disable-more-spots-of-rcu_bh.patch
deleted file mode 100644
index 328cd9b4a04045..00000000000000
--- a/patches/rcu-disable-more-spots-of-rcu_bh.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 10 Feb 2016 18:30:56 +0100
-Subject: rcu: disable more spots of rcu_bh
-
-We don't use ru_bh on -RT but we still fork a thread for it and keep it
-as a flavour. No more.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/rcu/tree.c | 6 ++++++
- kernel/rcu/tree.h | 2 ++
- 2 files changed, 8 insertions(+)
-
---- a/kernel/rcu/tree.c
-+++ b/kernel/rcu/tree.c
-@@ -440,11 +440,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
- /*
- * Return the number of RCU BH batches started thus far for debug & stats.
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- unsigned long rcu_batches_started_bh(void)
- {
- return rcu_bh_state.gpnum;
- }
- EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
-+#endif
-
- /*
- * Return the number of RCU batches completed thus far for debug & stats.
-@@ -549,9 +551,11 @@ void rcutorture_get_gp_data(enum rcutort
- case RCU_FLAVOR:
- rsp = rcu_state_p;
- break;
-+#ifndef CONFIG_PREEMPT_RT_FULL
- case RCU_BH_FLAVOR:
- rsp = &rcu_bh_state;
- break;
-+#endif
- case RCU_SCHED_FLAVOR:
- rsp = &rcu_sched_state;
- break;
-@@ -4637,7 +4641,9 @@ void __init rcu_init(void)
-
- rcu_bootup_announce();
- rcu_init_geometry();
-+#ifndef CONFIG_PREEMPT_RT_FULL
- rcu_init_one(&rcu_bh_state);
-+#endif
- rcu_init_one(&rcu_sched_state);
- if (dump_tree)
- rcu_dump_rcu_node_tree(&rcu_sched_state);
---- a/kernel/rcu/tree.h
-+++ b/kernel/rcu/tree.h
-@@ -572,7 +572,9 @@ extern struct list_head rcu_struct_flavo
- */
- extern struct rcu_state rcu_sched_state;
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- extern struct rcu_state rcu_bh_state;
-+#endif
-
- #ifdef CONFIG_PREEMPT_RCU
- extern struct rcu_state rcu_preempt_state;
diff --git a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
index ba26849ccf838a..768a8d4b9f7385 100644
--- a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -610,7 +610,7 @@ config RCU_FANOUT_LEAF
+@@ -613,7 +613,7 @@ config RCU_FANOUT_LEAF
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
diff --git a/patches/rcu-make-RCU_BOOST-default-on-RT.patch b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
index fbb76d895bfc61..d7e3e6d48005d8 100644
--- a/patches/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -494,7 +494,7 @@ config TINY_RCU
+@@ -496,7 +496,7 @@ config TINY_RCU
config RCU_EXPERT
bool "Make expert-level adjustments to RCU configuration"
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
help
This option needs to be enabled if you wish to make
expert-level adjustments to RCU configuration. By default,
-@@ -637,7 +637,7 @@ config TREE_RCU_TRACE
+@@ -640,7 +640,7 @@ config TREE_RCU_TRACE
config RCU_BOOST
bool "Enable RCU priority boosting"
depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index 7f2d9e88470d8f..b930f2e450919f 100644
--- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -25,14 +25,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rcupdate.h | 23 +++++++++++++++++++++++
- include/linux/rcutree.h | 18 ++++++++++++++++--
- kernel/rcu/tree.c | 16 ++++++++++++++++
+ include/linux/rcutree.h | 21 ++++++++++++++++++---
+ kernel/rcu/rcutorture.c | 7 +++++++
+ kernel/rcu/tree.c | 24 ++++++++++++++++++++++++
+ kernel/rcu/tree.h | 2 ++
kernel/rcu/update.c | 2 ++
- 4 files changed, 57 insertions(+), 2 deletions(-)
+ 6 files changed, 76 insertions(+), 3 deletions(-)
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -177,6 +177,9 @@ void call_rcu(struct rcu_head *head,
+@@ -179,6 +179,9 @@ void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -42,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -200,6 +203,7 @@ void call_rcu(struct rcu_head *head,
+@@ -202,6 +205,7 @@ void call_rcu(struct rcu_head *head,
*/
void call_rcu_bh(struct rcu_head *head,
rcu_callback_t func);
@@ -50,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
-@@ -337,7 +341,11 @@ static inline int rcu_preempt_depth(void
+@@ -339,7 +343,11 @@ static inline int rcu_preempt_depth(void
/* Internal to kernel */
void rcu_init(void);
void rcu_sched_qs(void);
@@ -62,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
-@@ -505,7 +513,14 @@ extern struct lockdep_map rcu_callback_m
+@@ -508,7 +516,14 @@ extern struct lockdep_map rcu_callback_m
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
@@ -77,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -953,10 +968,14 @@ static inline void rcu_read_unlock(void)
+@@ -906,10 +921,14 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
@@ -92,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -966,10 +985,14 @@ static inline void rcu_read_lock_bh(void
+@@ -919,10 +938,14 @@ static inline void rcu_read_lock_bh(void
*/
static inline void rcu_read_unlock_bh(void)
{
@@ -133,12 +135,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_barrier_sched(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
-@@ -85,12 +93,10 @@ unsigned long rcu_batches_started(void);
- unsigned long rcu_batches_started_bh(void);
+@@ -82,17 +90,14 @@ void cond_synchronize_sched(unsigned lon
+ extern unsigned long rcutorture_testseq;
+ extern unsigned long rcutorture_vernum;
+ unsigned long rcu_batches_started(void);
+-unsigned long rcu_batches_started_bh(void);
unsigned long rcu_batches_started_sched(void);
unsigned long rcu_batches_completed(void);
-unsigned long rcu_batches_completed_bh(void);
unsigned long rcu_batches_completed_sched(void);
+ unsigned long rcu_exp_batches_completed(void);
+ unsigned long rcu_exp_batches_completed_sched(void);
void show_rcu_gp_kthreads(void);
void rcu_force_quiescent_state(void);
@@ -146,24 +153,49 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_sched_force_quiescent_state(void);
void rcu_idle_enter(void);
-@@ -107,6 +113,14 @@ extern int rcu_scheduler_active __read_m
+@@ -109,6 +114,16 @@ extern int rcu_scheduler_active __read_m
bool rcu_is_watching(void);
+#ifndef CONFIG_PREEMPT_RT_FULL
+void rcu_bh_force_quiescent_state(void);
++unsigned long rcu_batches_started_bh(void);
+unsigned long rcu_batches_completed_bh(void);
+#else
+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
+# define rcu_batches_completed_bh rcu_batches_completed
++# define rcu_batches_started_bh rcu_batches_completed
+#endif
+
void rcu_all_qs(void);
- #endif /* __LINUX_RCUTREE_H */
+ /* RCUtree hotplug events */
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops =
+ .name = "rcu"
+ };
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Definitions for rcu_bh torture testing.
+ */
+@@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops
+ .name = "rcu_bh"
+ };
+
++#else
++static struct rcu_torture_ops rcu_bh_ops = {
++ .ttype = INVALID_RCU_FLAVOR,
++};
++#endif
++
+ /*
+ * Don't even think about trying any of these in real life!!!
+ * The names includes "busted", and they really means it!
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -254,6 +254,7 @@ void rcu_sched_qs(void)
+@@ -259,6 +259,7 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
@@ -171,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_bh_qs(void)
{
if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
-@@ -263,6 +264,7 @@ void rcu_bh_qs(void)
+@@ -268,6 +269,7 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
@@ -179,7 +211,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
-@@ -450,6 +452,7 @@ unsigned long rcu_batches_completed_sche
+@@ -448,11 +450,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
+ /*
+ * Return the number of RCU BH batches started thus far for debug & stats.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ unsigned long rcu_batches_started_bh(void)
+ {
+ return rcu_bh_state.gpnum;
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
++#endif
+
+ /*
+ * Return the number of RCU batches completed thus far for debug & stats.
+@@ -472,6 +476,7 @@ unsigned long rcu_batches_completed_sche
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
@@ -187,7 +233,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
-@@ -477,6 +480,13 @@ void rcu_bh_force_quiescent_state(void)
+@@ -480,6 +485,7 @@ unsigned long rcu_batches_completed_bh(v
+ return rcu_bh_state.completed;
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
++#endif
+
+ /*
+ * Return the number of RCU expedited batches completed thus far for
+@@ -503,6 +509,7 @@ unsigned long rcu_exp_batches_completed_
+ }
+ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Force a quiescent state.
+ */
+@@ -521,6 +528,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
@@ -201,7 +263,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Force a quiescent state for RCU-sched.
*/
-@@ -3099,6 +3109,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -571,9 +585,11 @@ void rcutorture_get_gp_data(enum rcutort
+ case RCU_FLAVOR:
+ rsp = rcu_state_p;
+ break;
++#ifndef CONFIG_PREEMPT_RT_FULL
+ case RCU_BH_FLAVOR:
+ rsp = &rcu_bh_state;
+ break;
++#endif
+ case RCU_SCHED_FLAVOR:
+ rsp = &rcu_sched_state;
+ break;
+@@ -3192,6 +3208,7 @@ void call_rcu_sched(struct rcu_head *hea
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -209,7 +283,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
-@@ -3107,6 +3118,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -3200,6 +3217,7 @@ void call_rcu_bh(struct rcu_head *head,
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -217,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3198,6 +3210,7 @@ void synchronize_sched(void)
+@@ -3291,6 +3309,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -225,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3224,6 +3237,7 @@ void synchronize_rcu_bh(void)
+@@ -3317,6 +3336,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -233,7 +307,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -4104,6 +4118,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -3695,6 +3715,7 @@ static void _rcu_barrier(struct rcu_stat
mutex_unlock(&rsp->barrier_mutex);
}
@@ -241,7 +315,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -4112,6 +4127,7 @@ void rcu_barrier_bh(void)
+@@ -3703,6 +3724,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -249,6 +323,28 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
+@@ -4196,7 +4218,9 @@ void __init rcu_init(void)
+
+ rcu_bootup_announce();
+ rcu_init_geometry();
++#ifndef CONFIG_PREEMPT_RT_FULL
+ rcu_init_one(&rcu_bh_state);
++#endif
+ rcu_init_one(&rcu_sched_state);
+ if (dump_tree)
+ rcu_dump_rcu_node_tree(&rcu_sched_state);
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -587,7 +587,9 @@ extern struct list_head rcu_struct_flavo
+ */
+ extern struct rcu_state rcu_sched_state;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern struct rcu_state rcu_bh_state;
++#endif
+
+ #ifdef CONFIG_PREEMPT_RCU
+ extern struct rcu_state rcu_preempt_state;
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -295,6 +295,7 @@ int rcu_read_lock_held(void)
diff --git a/patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch b/patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch
deleted file mode 100644
index 0f95146d0820a6..00000000000000
--- a/patches/rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From: Clark Williams <williams@redhat.com>
-Date: Fri, 26 Feb 2016 13:19:20 -0600
-Subject: rcu/torture: Comment out rcu_bh ops on PREEMPT_RT_FULL
-
-RT has dropped support of rcu_bh, comment out in rcutorture.
-
-Signed-off-by: Clark Williams <williams@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/rcu/rcutorture.c | 7 +++++++
- 1 file changed, 7 insertions(+)
-
---- a/kernel/rcu/rcutorture.c
-+++ b/kernel/rcu/rcutorture.c
-@@ -409,6 +409,7 @@ static struct rcu_torture_ops rcu_ops =
- .name = "rcu"
- };
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Definitions for rcu_bh torture testing.
- */
-@@ -448,6 +449,12 @@ static struct rcu_torture_ops rcu_bh_ops
- .name = "rcu_bh"
- };
-
-+#else
-+static struct rcu_torture_ops rcu_bh_ops = {
-+ .ttype = INVALID_RCU_FLAVOR,
-+};
-+#endif
-+
- /*
- * Don't even think about trying any of these in real life!!!
- * The names includes "busted", and they really means it!
diff --git a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index f444f9cce2fe99..58dc9da1aee186 100644
--- a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -259,7 +259,12 @@ static void rcu_preempt_qs(void);
+@@ -264,7 +264,12 @@ static void rcu_preempt_qs(void);
void rcu_bh_qs(void)
{
diff --git a/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index 206dfeb9786b6f..250dc3c70a4870 100644
--- a/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -319,6 +319,30 @@ unsigned long arch_randomize_brk(struct
+@@ -323,6 +323,30 @@ unsigned long arch_randomize_brk(struct
}
#ifdef CONFIG_MMU
diff --git a/patches/relay-fix-timer-madness.patch b/patches/relay-fix-timer-madness.patch
index c5d30f96212041..4ac41bfce74842 100644
--- a/patches/relay-fix-timer-madness.patch
+++ b/patches/relay-fix-timer-madness.patch
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
del_timer_sync(&buf->timer);
-@@ -736,15 +741,6 @@ size_t relay_switch_subbuf(struct rchan_
+@@ -767,15 +772,6 @@ size_t relay_switch_subbuf(struct rchan_
else
buf->early_bytes += buf->chan->subbuf_size -
buf->padding[old_subbuf];
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index 31c7532ccd0565..0b6a6c231fef31 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -11,32 +11,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/kernel.h | 4
include/linux/locallock.h | 6
- include/linux/mutex.h | 20 +
+ include/linux/mutex.h | 20 -
include/linux/mutex_rt.h | 84 ++++++
include/linux/rtmutex.h | 29 +-
include/linux/rwlock_rt.h | 99 +++++++
include/linux/rwlock_types_rt.h | 33 ++
include/linux/rwsem.h | 6
- include/linux/rwsem_rt.h | 152 ++++++++++++
+ include/linux/rwsem_rt.h | 167 ++++++++++++
include/linux/sched.h | 19 +
include/linux/spinlock.h | 12
include/linux/spinlock_api_smp.h | 4
- include/linux/spinlock_rt.h | 173 +++++++++++++
+ include/linux/spinlock_rt.h | 162 ++++++++++++
include/linux/spinlock_types.h | 11
include/linux/spinlock_types_rt.h | 48 +++
kernel/futex.c | 10
kernel/locking/Makefile | 9
- kernel/locking/rt.c | 476 ++++++++++++++++++++++++++++++++++++++
- kernel/locking/rtmutex.c | 422 +++++++++++++++++++++++++++++++--
- kernel/locking/rtmutex_common.h | 14 +
+ kernel/locking/rt.c | 498 ++++++++++++++++++++++++++++++++++++++
+ kernel/locking/rtmutex.c | 460 +++++++++++++++++++++++++++++++++--
+ kernel/locking/rtmutex_common.h | 14 -
kernel/locking/spinlock.c | 7
kernel/locking/spinlock_debug.c | 5
kernel/sched/core.c | 7
- 23 files changed, 1594 insertions(+), 56 deletions(-)
+ 23 files changed, 1658 insertions(+), 56 deletions(-)
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -188,6 +188,9 @@ extern int _cond_resched(void);
+@@ -194,6 +194,9 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
-@@ -195,6 +198,7 @@ extern int _cond_resched(void);
+@@ -201,6 +204,7 @@ extern int _cond_resched(void);
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
@@ -61,9 +61,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
*/
+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define spin_lock_local(lock) rt_spin_lock(lock)
-+# define spin_trylock_local(lock) rt_spin_trylock(lock)
-+# define spin_unlock_local(lock) rt_spin_unlock(lock)
++# define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
++# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
++# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
+#else
# define spin_lock_local(lock) spin_lock(lock)
# define spin_trylock_local(lock) spin_trylock(lock)
@@ -410,7 +410,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
-@@ -18,6 +18,10 @@
+@@ -19,6 +19,10 @@
#include <linux/osq_lock.h>
#endif
@@ -421,7 +421,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-@@ -177,4 +181,6 @@ extern void up_read_non_owner(struct rw_
+@@ -184,4 +188,6 @@ extern void up_read_non_owner(struct rw_
# define up_read_non_owner(sem) up_read(sem)
#endif
@@ -430,7 +430,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* _LINUX_RWSEM_H */
--- /dev/null
+++ b/include/linux/rwsem_rt.h
-@@ -0,0 +1,152 @@
+@@ -0,0 +1,167 @@
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
+
@@ -485,8 +485,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+} while (0)
+
+extern void rt_down_write(struct rw_semaphore *rwsem);
++extern int rt_down_write_killable(struct rw_semaphore *rwsem);
+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
++extern int rt_down_write_killable_nested(struct rw_semaphore *rwsem,
++ int subclass);
+extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
+ struct lockdep_map *nest);
+extern void rt__down_read(struct rw_semaphore *rwsem);
@@ -533,6 +536,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ rt_down_write(sem);
+}
+
++static inline int down_write_killable(struct rw_semaphore *sem)
++{
++ return rt_down_write_killable(sem);
++}
++
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+ return rt_down_write_trylock(sem);
@@ -567,6 +575,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+{
+ rt_down_write_nested(sem, subclass);
+}
++
++static inline int down_write_killable_nested(struct rw_semaphore *sem,
++ int subclass)
++{
++ return rt_down_write_killable_nested(sem, subclass);
++}
++
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void down_write_nest_lock(struct rw_semaphore *sem,
+ struct rw_semaphore *nest_lock)
@@ -585,7 +600,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -310,6 +310,11 @@ extern char ___assert_task_state[1 - 2*!
+@@ -312,6 +312,11 @@ extern char ___assert_task_state[1 - 2*!
#endif
@@ -597,7 +612,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Task command name length */
#define TASK_COMM_LEN 16
-@@ -981,8 +986,18 @@ struct wake_q_head {
+@@ -1009,8 +1014,18 @@ struct wake_q_head {
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
@@ -665,7 +680,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* __LINUX_SPINLOCK_API_SMP_H */
--- /dev/null
+++ b/include/linux/spinlock_rt.h
-@@ -0,0 +1,173 @@
+@@ -0,0 +1,162 @@
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
@@ -686,6 +701,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ __rt_spin_lock_init(slock, #slock, &__key); \
+} while (0)
+
++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
++
+extern void __lockfunc rt_spin_lock(spinlock_t *lock);
+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
@@ -700,19 +719,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * lockdep-less calls, for derived types like rwlock:
+ * (for trylock they can use rt_mutex_trylock() directly.
+ */
++extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+
-+#define spin_lock(lock) \
-+ do { \
-+ migrate_disable(); \
-+ rt_spin_lock(lock); \
-+ } while (0)
++#define spin_lock(lock) rt_spin_lock(lock)
+
+#define spin_lock_bh(lock) \
+ do { \
+ local_bh_disable(); \
-+ migrate_disable(); \
+ rt_spin_lock(lock); \
+ } while (0)
+
@@ -723,24 +738,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#define spin_trylock(lock) \
+({ \
+ int __locked; \
-+ migrate_disable(); \
+ __locked = spin_do_trylock(lock); \
-+ if (!__locked) \
-+ migrate_enable(); \
+ __locked; \
+})
+
+#ifdef CONFIG_LOCKDEP
+# define spin_lock_nested(lock, subclass) \
+ do { \
-+ migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+
+#define spin_lock_bh_nested(lock, subclass) \
+ do { \
+ local_bh_disable(); \
-+ migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+
@@ -748,7 +758,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
-+ migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+#else
@@ -784,16 +793,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+/* FIXME: we need rt_spin_lock_nest_lock */
+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
+
-+#define spin_unlock(lock) \
-+ do { \
-+ rt_spin_unlock(lock); \
-+ migrate_enable(); \
-+ } while (0)
++#define spin_unlock(lock) rt_spin_unlock(lock)
+
+#define spin_unlock_bh(lock) \
+ do { \
+ rt_spin_unlock(lock); \
-+ migrate_enable(); \
+ local_bh_enable(); \
+ } while (0)
+
@@ -911,7 +915,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1271,6 +1271,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1292,6 +1292,7 @@ static int wake_futex_pi(u32 __user *uad
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
WAKE_Q(wake_q);
@@ -919,7 +923,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool deboost;
int ret = 0;
-@@ -1337,7 +1338,8 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1358,7 +1359,8 @@ static int wake_futex_pi(u32 __user *uad
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
@@ -929,7 +933,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First unlock HB so the waiter does not spin on it once he got woken
-@@ -1347,6 +1349,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1368,6 +1370,7 @@ static int wake_futex_pi(u32 __user *uad
*/
spin_unlock(&hb->lock);
wake_up_q(&wake_q);
@@ -937,7 +941,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (deboost)
rt_mutex_adjust_prio(current);
-@@ -2821,10 +2824,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2842,10 +2845,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -985,7 +989,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
--- /dev/null
+++ b/kernel/locking/rt.c
-@@ -0,0 +1,476 @@
+@@ -0,0 +1,498 @@
+/*
+ * kernel/rt.c
+ *
@@ -1223,7 +1227,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+void __lockfunc rt_write_lock(rwlock_t *rwlock)
+{
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
-+ migrate_disable();
+ __rt_spin_lock(&rwlock->lock);
+}
+EXPORT_SYMBOL(rt_write_lock);
@@ -1237,7 +1240,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * recursive read locks succeed when current owns the lock
+ */
+ if (rt_mutex_owner(lock) != current) {
-+ migrate_disable();
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(lock);
+ }
@@ -1348,6 +1350,30 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+EXPORT_SYMBOL(rt_down_write);
+
++int rt_down_write_killable(struct rw_semaphore *rwsem)
++{
++ int ret;
++
++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&rwsem->lock);
++ if (ret)
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_write_killable);
++
++int rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ int ret;
++
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&rwsem->lock);
++ if (ret)
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_write_killable_nested);
++
+void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
+{
+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
@@ -1578,7 +1604,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -884,6 +918,314 @@ static int try_to_take_rt_mutex(struct r
+@@ -884,6 +918,352 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -1681,7 +1707,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
+ raw_spin_unlock(&self->pi_lock);
+
-+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
+ BUG_ON(ret);
+
+ for (;;) {
@@ -1766,8 +1792,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ rt_mutex_adjust_prio(current);
+}
+
++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock__no_mg);
++
+void __lockfunc rt_spin_lock(spinlock_t *lock)
+{
++ migrate_disable();
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+}
@@ -1775,24 +1809,41 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+{
++ migrate_disable();
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
+}
+EXPORT_SYMBOL(__rt_spin_lock);
+
++void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++}
++EXPORT_SYMBOL(__rt_spin_lock__no_mg);
++
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+{
++ migrate_disable();
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock_nested);
+#endif
+
++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock__no_mg);
++
+void __lockfunc rt_spin_unlock(spinlock_t *lock)
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++ migrate_enable();
+}
+EXPORT_SYMBOL(rt_spin_unlock);
+
@@ -1814,12 +1865,27 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+EXPORT_SYMBOL(rt_spin_unlock_wait);
+
++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
++{
++ int ret;
++
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock__no_mg);
++
+int __lockfunc rt_spin_trylock(spinlock_t *lock)
+{
-+ int ret = rt_mutex_trylock(&lock->lock);
++ int ret;
+
++ migrate_disable();
++ ret = rt_mutex_trylock(&lock->lock);
+ if (ret)
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock);
@@ -1858,12 +1924,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
-+ migrate_disable();
+ rt_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ rt_spin_unlock(lock);
-+ migrate_enable();
+ return 0;
+}
+EXPORT_SYMBOL(atomic_dec_and_spin_lock);
@@ -1893,7 +1957,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Task blocks on lock.
*
-@@ -996,6 +1338,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -996,6 +1376,7 @@ static int task_blocks_on_rt_mutex(struc
* Called with lock->wait_lock held and interrupts disabled.
*/
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -1901,7 +1965,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
-@@ -1024,7 +1367,10 @@ static void mark_wakeup_next_waiter(stru
+@@ -1024,7 +1405,10 @@ static void mark_wakeup_next_waiter(stru
raw_spin_unlock(&current->pi_lock);
@@ -1913,7 +1977,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1105,11 +1451,11 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1105,11 +1489,11 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
next_lock = waiter->lock;
@@ -1926,7 +1990,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
next_lock, NULL, task);
}
-@@ -1196,9 +1542,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1196,9 +1580,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
unsigned long flags;
int ret = 0;
@@ -1937,7 +2001,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1292,7 +1636,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1292,7 +1674,8 @@ static inline int rt_mutex_slowtrylock(s
* Return whether the current task needs to undo a potential priority boosting.
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -1947,7 +2011,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long flags;
-@@ -1348,7 +1693,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1348,7 +1731,7 @@ static bool __sched rt_mutex_slowunlock(
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
@@ -1956,7 +2020,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-@@ -1405,17 +1750,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1405,17 +1788,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -1979,7 +2043,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Undo pi boosting if necessary: */
if (deboost)
-@@ -1552,13 +1900,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+@@ -1552,13 +1938,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
* required or not.
*/
bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -1996,7 +2060,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -1591,13 +1940,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1591,13 +1978,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -2011,7 +2075,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1612,7 +1960,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1612,7 +1998,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -2020,7 +2084,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
rt_mutex_deadlock_account_lock(lock, proxy_owner);
-@@ -1774,3 +2122,25 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -1774,3 +2160,25 @@ int rt_mutex_finish_proxy_lock(struct rt
return ret;
}
@@ -2147,7 +2211,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -419,7 +419,7 @@ void wake_q_add(struct wake_q_head *head
+@@ -454,7 +454,7 @@ void wake_q_add(struct wake_q_head *head
head->lastp = &node->next;
}
@@ -2156,7 +2220,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct wake_q_node *node = head->first;
-@@ -436,7 +436,10 @@ void wake_up_q(struct wake_q_head *head)
+@@ -471,7 +471,10 @@ void wake_up_q(struct wake_q_head *head)
* wake_up_process() implies a wmb() to pair with the queueing
* in wake_q_add() so as not to miss wakeups.
*/
diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch
index 4fee07d7ab3787..917f783b13182b 100644
--- a/patches/rt-introduce-cpu-chill.patch
+++ b/patches/rt-introduce-cpu-chill.patch
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1788,6 +1788,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
+@@ -1768,6 +1768,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
diff --git a/patches/rtmutex-Use-chainwalking-control-enum.patch b/patches/rtmutex-Use-chainwalking-control-enum.patch
deleted file mode 100644
index 9d45aa2dd1b07d..00000000000000
--- a/patches/rtmutex-Use-chainwalking-control-enum.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From: "bmouring@ni.com" <bmouring@ni.com>
-Date: Tue, 15 Dec 2015 17:07:30 -0600
-Subject: rtmutex: Use chainwalking control enum
-
-In 8930ed80 (rtmutex: Cleanup deadlock detector debug logic),
-chainwalking control enums were introduced to limit the deadlock
-detection logic. One of the calls to task_blocks_on_rt_mutex was
-missed when converting to use the enums.
-
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Brad Mouring <brad.mouring@ni.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/locking/rtmutex.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1017,7 +1017,7 @@ static void noinline __sched rt_spin_lo
- __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
- raw_spin_unlock(&self->pi_lock);
-
-- ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
-+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
- BUG_ON(ret);
-
- for (;;) {
diff --git a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
index 70852937256a6b..ce71dfb5aa6026 100644
--- a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
#include "rtmutex_common.h"
-@@ -1219,6 +1220,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1296,6 +1297,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT_FULL */
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
static inline int
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
-@@ -1473,7 +1508,8 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1550,7 +1585,8 @@ void rt_mutex_adjust_pi(struct task_stru
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
int ret = 0;
-@@ -1496,6 +1532,12 @@ static int __sched
+@@ -1573,6 +1609,12 @@ static int __sched
break;
}
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irq(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1530,13 +1572,90 @@ static void rt_mutex_handle_deadlock(int
+@@ -1607,13 +1649,90 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
struct rt_mutex_waiter waiter;
unsigned long flags;
-@@ -1556,6 +1675,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1633,6 +1752,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return 0;
}
-@@ -1570,13 +1691,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1647,13 +1768,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (likely(!ret))
/* sleep on the mutex */
@@ -226,7 +226,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
/*
-@@ -1709,31 +1840,36 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1786,31 +1917,36 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -267,7 +267,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
static inline int
-@@ -1780,7 +1916,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1857,7 +1993,7 @@ void __sched rt_mutex_lock(struct rt_mut
{
might_sleep();
@@ -276,7 +276,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1797,7 +1933,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1874,7 +2010,7 @@ int __sched rt_mutex_lock_interruptible(
{
might_sleep();
@@ -285,7 +285,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1810,7 +1946,7 @@ int rt_mutex_timed_futex_lock(struct rt_
+@@ -1887,7 +2023,7 @@ int rt_mutex_timed_futex_lock(struct rt_
might_sleep();
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
@@ -294,7 +294,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
-@@ -1829,7 +1965,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1906,7 +2042,7 @@ int __sched rt_mutex_lock_killable(struc
{
might_sleep();
@@ -303,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1853,6 +1989,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1930,6 +2066,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -311,7 +311,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2107,7 +2244,7 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2184,7 +2321,7 @@ int rt_mutex_finish_proxy_lock(struct rt
set_current_state(TASK_INTERRUPTIBLE);
/* sleep on the mutex */
@@ -320,7 +320,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
if (unlikely(ret))
remove_waiter(lock, waiter);
-@@ -2123,24 +2260,88 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2200,24 +2337,88 @@ int rt_mutex_finish_proxy_lock(struct rt
return ret;
}
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index b8bdacad98276a..8ddbc2f20da47a 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1894,6 +1894,16 @@ static int futex_requeue(u32 __user *uad
+@@ -1915,6 +1915,16 @@ static int futex_requeue(u32 __user *uad
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -2784,7 +2794,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2805,7 +2815,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2843,20 +2853,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2864,20 +2874,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -2865,14 +2910,15 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2886,14 +2931,15 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -126,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
/*
-@@ -2885,7 +2931,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2906,7 +2952,8 @@ static int futex_wait_requeue_pi(u32 __u
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
debug_rt_mutex_free_waiter(&rt_waiter);
diff --git a/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch b/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
deleted file mode 100644
index 7a4689046cef86..00000000000000
--- a/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
+++ /dev/null
@@ -1,271 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 5 Feb 2016 18:26:11 +0100
-Subject: rtmutex: push down migrate_disable() into rt_spin_lock()
-
-No point in having the migrate disable/enable invocations in all the
-macro/inlines. That's just more code for no win as we do a function
-call anyway. Move it to the core code and save quite some text size.
-
- text data bss dec filename
-11034127 3676912 14901248 29612287 vmlinux.before
-10990437 3676848 14901248 29568533 vmlinux.after
-
-~-40KiB
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/locallock.h | 6 +++---
- include/linux/spinlock_rt.h | 25 +++++++------------------
- kernel/cpu.c | 4 ++--
- kernel/locking/lglock.c | 2 +-
- kernel/locking/rt.c | 2 --
- kernel/locking/rtmutex.c | 44 +++++++++++++++++++++++++++++++++++++++++---
- 6 files changed, 54 insertions(+), 29 deletions(-)
-
---- a/include/linux/locallock.h
-+++ b/include/linux/locallock.h
-@@ -43,9 +43,9 @@ struct local_irq_lock {
- * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
- */
- #ifdef CONFIG_PREEMPT_RT_FULL
--# define spin_lock_local(lock) rt_spin_lock(lock)
--# define spin_trylock_local(lock) rt_spin_trylock(lock)
--# define spin_unlock_local(lock) rt_spin_unlock(lock)
-+# define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
-+# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
-+# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
- #else
- # define spin_lock_local(lock) spin_lock(lock)
- # define spin_trylock_local(lock) spin_trylock(lock)
---- a/include/linux/spinlock_rt.h
-+++ b/include/linux/spinlock_rt.h
-@@ -18,6 +18,10 @@ do { \
- __rt_spin_lock_init(slock, #slock, &__key); \
- } while (0)
-
-+void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
-+void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
-+int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
-+
- extern void __lockfunc rt_spin_lock(spinlock_t *lock);
- extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
- extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
-@@ -32,20 +36,16 @@ extern int atomic_dec_and_spin_lock(atom
- * lockdep-less calls, for derived types like rwlock:
- * (for trylock they can use rt_mutex_trylock() directly.
- */
-+extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
- extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
- extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
- extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
-
--#define spin_lock(lock) \
-- do { \
-- migrate_disable(); \
-- rt_spin_lock(lock); \
-- } while (0)
-+#define spin_lock(lock) rt_spin_lock(lock)
-
- #define spin_lock_bh(lock) \
- do { \
- local_bh_disable(); \
-- migrate_disable(); \
- rt_spin_lock(lock); \
- } while (0)
-
-@@ -56,24 +56,19 @@ extern int __lockfunc __rt_spin_trylock(
- #define spin_trylock(lock) \
- ({ \
- int __locked; \
-- migrate_disable(); \
- __locked = spin_do_trylock(lock); \
-- if (!__locked) \
-- migrate_enable(); \
- __locked; \
- })
-
- #ifdef CONFIG_LOCKDEP
- # define spin_lock_nested(lock, subclass) \
- do { \
-- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
-
- #define spin_lock_bh_nested(lock, subclass) \
- do { \
- local_bh_disable(); \
-- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
-
-@@ -81,7 +76,6 @@ extern int __lockfunc __rt_spin_trylock(
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
-- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
- #else
-@@ -117,16 +111,11 @@ static inline unsigned long spin_lock_tr
- /* FIXME: we need rt_spin_lock_nest_lock */
- #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
-
--#define spin_unlock(lock) \
-- do { \
-- rt_spin_unlock(lock); \
-- migrate_enable(); \
-- } while (0)
-+#define spin_unlock(lock) rt_spin_unlock(lock)
-
- #define spin_unlock_bh(lock) \
- do { \
- rt_spin_unlock(lock); \
-- migrate_enable(); \
- local_bh_enable(); \
- } while (0)
-
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -204,8 +204,8 @@ struct hotplug_pcp {
- };
-
- #ifdef CONFIG_PREEMPT_RT_FULL
--# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
--# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
-+# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
-+# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
- #else
- # define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
- # define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
---- a/kernel/locking/lglock.c
-+++ b/kernel/locking/lglock.c
-@@ -10,7 +10,7 @@
- # define lg_do_unlock(l) arch_spin_unlock(l)
- #else
- # define lg_lock_ptr struct rt_mutex
--# define lg_do_lock(l) __rt_spin_lock(l)
-+# define lg_do_lock(l) __rt_spin_lock__no_mg(l)
- # define lg_do_unlock(l) __rt_spin_unlock(l)
- #endif
- /*
---- a/kernel/locking/rt.c
-+++ b/kernel/locking/rt.c
-@@ -235,7 +235,6 @@ EXPORT_SYMBOL(rt_read_trylock);
- void __lockfunc rt_write_lock(rwlock_t *rwlock)
- {
- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
-- migrate_disable();
- __rt_spin_lock(&rwlock->lock);
- }
- EXPORT_SYMBOL(rt_write_lock);
-@@ -249,7 +248,6 @@ void __lockfunc rt_read_lock(rwlock_t *r
- * recursive read locks succeed when current owns the lock
- */
- if (rt_mutex_owner(lock) != current) {
-- migrate_disable();
- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
- __rt_spin_lock(lock);
- }
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1103,8 +1103,16 @@ static void noinline __sched rt_spin_lo
- rt_mutex_adjust_prio(current);
- }
-
-+void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
-+{
-+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-+}
-+EXPORT_SYMBOL(rt_spin_lock__no_mg);
-+
- void __lockfunc rt_spin_lock(spinlock_t *lock)
- {
-+ migrate_disable();
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- }
-@@ -1112,24 +1120,41 @@ EXPORT_SYMBOL(rt_spin_lock);
-
- void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
- {
-+ migrate_disable();
- rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
- }
- EXPORT_SYMBOL(__rt_spin_lock);
-
-+void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
-+{
-+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
-+}
-+EXPORT_SYMBOL(__rt_spin_lock__no_mg);
-+
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
- {
-+ migrate_disable();
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- }
- EXPORT_SYMBOL(rt_spin_lock_nested);
- #endif
-
-+void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
-+{
-+ /* NOTE: we always pass in '1' for nested, for simplicity */
-+ spin_release(&lock->dep_map, 1, _RET_IP_);
-+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
-+}
-+EXPORT_SYMBOL(rt_spin_unlock__no_mg);
-+
- void __lockfunc rt_spin_unlock(spinlock_t *lock)
- {
- /* NOTE: we always pass in '1' for nested, for simplicity */
- spin_release(&lock->dep_map, 1, _RET_IP_);
- rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
-+ migrate_enable();
- }
- EXPORT_SYMBOL(rt_spin_unlock);
-
-@@ -1156,12 +1181,27 @@ int __lockfunc __rt_spin_trylock(struct
- return rt_mutex_trylock(lock);
- }
-
-+int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
-+{
-+ int ret;
-+
-+ ret = rt_mutex_trylock(&lock->lock);
-+ if (ret)
-+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-+ return ret;
-+}
-+EXPORT_SYMBOL(rt_spin_trylock__no_mg);
-+
- int __lockfunc rt_spin_trylock(spinlock_t *lock)
- {
-- int ret = rt_mutex_trylock(&lock->lock);
-+ int ret;
-
-+ migrate_disable();
-+ ret = rt_mutex_trylock(&lock->lock);
- if (ret)
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-+ else
-+ migrate_enable();
- return ret;
- }
- EXPORT_SYMBOL(rt_spin_trylock);
-@@ -1200,12 +1240,10 @@ int atomic_dec_and_spin_lock(atomic_t *a
- /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
- if (atomic_add_unless(atomic, -1, 1))
- return 0;
-- migrate_disable();
- rt_spin_lock(lock);
- if (atomic_dec_and_test(atomic))
- return 1;
- rt_spin_unlock(lock);
-- migrate_enable();
- return 0;
- }
- EXPORT_SYMBOL(atomic_dec_and_spin_lock);
diff --git a/patches/rtmutex-trylock-is-okay-on-RT.patch b/patches/rtmutex-trylock-is-okay-on-RT.patch
index 7f5cce53774406..e7dc522b1140dc 100644
--- a/patches/rtmutex-trylock-is-okay-on-RT.patch
+++ b/patches/rtmutex-trylock-is-okay-on-RT.patch
@@ -18,9 +18,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (WARN_ON(in_irq() || in_nmi()))
++ if (WARN_ON_ONCE(in_irq() || in_nmi()))
+#else
- if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
+#endif
return 0;
diff --git a/patches/rtmutex_dont_include_rcu.patch b/patches/rtmutex_dont_include_rcu.patch
index d3774cde4636a6..2578a1998fac18 100644
--- a/patches/rtmutex_dont_include_rcu.patch
+++ b/patches/rtmutex_dont_include_rcu.patch
@@ -13,63 +13,145 @@ The RCU header pulls in spinlock.h and fails due not yet defined types:
| extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
| ^
-This patch moves the only RCU user from the header file into c file so the
-inclusion can be avoided.
+This patch moves the required RCU function from the rcupdate.h header file into
+a new header file which can be included by both users.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/rbtree.h | 11 ++---------
- lib/rbtree.c | 11 +++++++++++
- 2 files changed, 13 insertions(+), 9 deletions(-)
+ include/linux/rbtree.h | 2 -
+ include/linux/rcu_assign_pointer.h | 53 +++++++++++++++++++++++++++++++++++++
+ include/linux/rcupdate.h | 49 ----------------------------------
+ 3 files changed, 55 insertions(+), 49 deletions(-)
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
-@@ -31,7 +31,6 @@
+@@ -31,7 +31,7 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
-#include <linux/rcupdate.h>
++#include <linux/rcu_assign_pointer.h>
struct rb_node {
unsigned long __rb_parent_color;
-@@ -86,14 +85,8 @@ static inline void rb_link_node(struct r
- *rb_link = node;
- }
-
--static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
-- struct rb_node **rb_link)
--{
-- node->__rb_parent_color = (unsigned long)parent;
-- node->rb_left = node->rb_right = NULL;
--
-- rcu_assign_pointer(*rb_link, node);
--}
-+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
-+ struct rb_node **rb_link);
+--- /dev/null
++++ b/include/linux/rcu_assign_pointer.h
+@@ -0,0 +1,53 @@
++#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
++#define __LINUX_RCU_ASSIGN_POINTER_H__
++#include <linux/compiler.h>
++
++/**
++ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
++ * @v: The value to statically initialize with.
++ */
++#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
++
++/**
++ * rcu_assign_pointer() - assign to RCU-protected pointer
++ * @p: pointer to assign to
++ * @v: value to assign (publish)
++ *
++ * Assigns the specified value to the specified RCU-protected
++ * pointer, ensuring that any concurrent RCU readers will see
++ * any prior initialization.
++ *
++ * Inserts memory barriers on architectures that require them
++ * (which is most of them), and also prevents the compiler from
++ * reordering the code that initializes the structure after the pointer
++ * assignment. More importantly, this call documents which pointers
++ * will be dereferenced by RCU read-side code.
++ *
++ * In some special cases, you may use RCU_INIT_POINTER() instead
++ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
++ * to the fact that it does not constrain either the CPU or the compiler.
++ * That said, using RCU_INIT_POINTER() when you should have used
++ * rcu_assign_pointer() is a very bad thing that results in
++ * impossible-to-diagnose memory corruption. So please be careful.
++ * See the RCU_INIT_POINTER() comment header for details.
++ *
++ * Note that rcu_assign_pointer() evaluates each of its arguments only
++ * once, appearances notwithstanding. One of the "extra" evaluations
++ * is in typeof() and the other visible only to sparse (__CHECKER__),
++ * neither of which actually execute the argument. As with most cpp
++ * macros, this execute-arguments-only-once property is important, so
++ * please be careful when making changes to rcu_assign_pointer() and the
++ * other macros that it invokes.
++ */
++#define rcu_assign_pointer(p, v) \
++({ \
++ uintptr_t _r_a_p__v = (uintptr_t)(v); \
++ \
++ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
++ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
++ else \
++ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
++ _r_a_p__v; \
++})
++
++#endif
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -46,6 +46,7 @@
+ #include <linux/compiler.h>
+ #include <linux/ktime.h>
+ #include <linux/irqflags.h>
++#include <linux/rcu_assign_pointer.h>
- #define rb_entry_safe(ptr, type, member) \
- ({ typeof(ptr) ____ptr = (ptr); \
---- a/lib/rbtree.c
-+++ b/lib/rbtree.c
-@@ -23,6 +23,7 @@
+ #include <asm/barrier.h>
- #include <linux/rbtree_augmented.h>
- #include <linux/export.h>
-+#include <linux/rcupdate.h>
+@@ -628,54 +629,6 @@ static inline void rcu_preempt_sleep_che
+ })
- /*
- * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
-@@ -590,3 +591,13 @@ struct rb_node *rb_first_postorder(const
- return rb_left_deepest_node(root->rb_node);
- }
- EXPORT_SYMBOL(rb_first_postorder);
-+
-+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
-+ struct rb_node **rb_link)
-+{
-+ node->__rb_parent_color = (unsigned long)parent;
-+ node->rb_left = node->rb_right = NULL;
-+
-+ rcu_assign_pointer(*rb_link, node);
-+}
-+EXPORT_SYMBOL(rb_link_node_rcu);
+ /**
+- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
+- * @v: The value to statically initialize with.
+- */
+-#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
+-
+-/**
+- * rcu_assign_pointer() - assign to RCU-protected pointer
+- * @p: pointer to assign to
+- * @v: value to assign (publish)
+- *
+- * Assigns the specified value to the specified RCU-protected
+- * pointer, ensuring that any concurrent RCU readers will see
+- * any prior initialization.
+- *
+- * Inserts memory barriers on architectures that require them
+- * (which is most of them), and also prevents the compiler from
+- * reordering the code that initializes the structure after the pointer
+- * assignment. More importantly, this call documents which pointers
+- * will be dereferenced by RCU read-side code.
+- *
+- * In some special cases, you may use RCU_INIT_POINTER() instead
+- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
+- * to the fact that it does not constrain either the CPU or the compiler.
+- * That said, using RCU_INIT_POINTER() when you should have used
+- * rcu_assign_pointer() is a very bad thing that results in
+- * impossible-to-diagnose memory corruption. So please be careful.
+- * See the RCU_INIT_POINTER() comment header for details.
+- *
+- * Note that rcu_assign_pointer() evaluates each of its arguments only
+- * once, appearances notwithstanding. One of the "extra" evaluations
+- * is in typeof() and the other visible only to sparse (__CHECKER__),
+- * neither of which actually execute the argument. As with most cpp
+- * macros, this execute-arguments-only-once property is important, so
+- * please be careful when making changes to rcu_assign_pointer() and the
+- * other macros that it invokes.
+- */
+-#define rcu_assign_pointer(p, v) \
+-({ \
+- uintptr_t _r_a_p__v = (uintptr_t)(v); \
+- \
+- if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
+- WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
+- else \
+- smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
+- _r_a_p__v; \
+-})
+-
+-/**
+ * rcu_access_pointer() - fetch RCU pointer with no dereferencing
+ * @p: The pointer to read
+ *
diff --git a/patches/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch b/patches/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
index 8ec5a8e74817a5..1774c4cef06ea8 100644
--- a/patches/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
+++ b/patches/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
-@@ -255,7 +255,7 @@ static unsigned int sas_ata_qc_issue(str
+@@ -252,7 +252,7 @@ static unsigned int sas_ata_qc_issue(str
out:
spin_lock(ap->lock);
diff --git a/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch b/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
index 2e9046666606ad..aeb61bfe25a74d 100644
--- a/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
+++ b/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
-@@ -1251,7 +1251,7 @@ static int sc16is7xx_probe(struct device
+@@ -1240,7 +1240,7 @@ static int sc16is7xx_probe(struct device
/* Setup interrupt */
ret = devm_request_irq(dev, irq, sc16is7xx_irq,
diff --git a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index 75a27727b0a612..7620c76e13531a 100644
--- a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -694,6 +694,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -697,6 +697,7 @@ void init_dl_task_timer(struct sched_dl_
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index eb42239a7335cc..2c4f1b610f85d1 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1865,6 +1865,9 @@ struct task_struct {
+@@ -1936,6 +1936,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
-@@ -2077,6 +2080,15 @@ extern struct pid *cad_pid;
+@@ -2174,6 +2177,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -39,17 +39,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -2084,6 +2096,7 @@ static inline void put_task_struct(struc
+@@ -2181,6 +2193,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
+#endif
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- extern void task_cputime(struct task_struct *t,
+ struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+ struct task_struct *try_get_task_struct(struct task_struct **ptask);
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -253,7 +253,9 @@ static inline void put_signal_struct(str
+@@ -251,7 +251,9 @@ static inline void put_signal_struct(str
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
-@@ -270,7 +272,18 @@ void __put_task_struct(struct task_struc
+@@ -268,7 +270,18 @@ void __put_task_struct(struct task_struc
if (!profile_handoff_task(tsk))
free_task(tsk);
}
diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch
index 8283585ac888f0..e50bb883204052 100644
--- a/patches/sched-disable-rt-group-sched-on-rt.patch
+++ b/patches/sched-disable-rt-group-sched-on-rt.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1029,6 +1029,7 @@ config CFS_BANDWIDTH
+@@ -1054,6 +1054,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/patches/sched-lazy_preempt-avoid-a-warning-in-the-RT-case.patch b/patches/sched-lazy_preempt-avoid-a-warning-in-the-RT-case.patch
deleted file mode 100644
index 0223da8789643d..00000000000000
--- a/patches/sched-lazy_preempt-avoid-a-warning-in-the-RT-case.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 14 Jul 2016 14:57:07 +0200
-Subject: [PATCH] sched: lazy_preempt: avoid a warning in the !RT case
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/core.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3518,7 +3518,7 @@ static __always_inline int preemptible_l
-
- #else
-
--static int preemptible_lazy(void)
-+static inline int preemptible_lazy(void)
- {
- return 1;
- }
diff --git a/patches/sched-limit-nr-migrate.patch b/patches/sched-limit-nr-migrate.patch
index d1a24b83a8232e..81270b7802fb59 100644
--- a/patches/sched-limit-nr-migrate.patch
+++ b/patches/sched-limit-nr-migrate.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -128,7 +128,11 @@ const_debug unsigned int sysctl_sched_fe
+@@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_fe
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index aff8567e5a745b..0ed31683f73b1f 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -300,6 +300,11 @@ void synchronize_rcu(void);
+@@ -301,6 +301,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -325,6 +330,8 @@ static inline int rcu_preempt_depth(void
+@@ -326,6 +331,8 @@ static inline int rcu_preempt_depth(void
return 0;
}
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7537,7 +7537,7 @@ void __init sched_init(void)
+@@ -7697,7 +7697,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 87d7c5027e73bc..03fb860a319abc 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -8,9 +8,9 @@ we want to do in task switch and oder atomic contexts.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/mm_types.h | 4 ++++
- include/linux/sched.h | 12 ++++++++++++
+ include/linux/sched.h | 11 +++++++++++
kernel/fork.c | 13 +++++++++++++
- kernel/sched/core.c | 18 ++++++++++++++++--
+ kernel/sched/core.c | 19 +++++++++++++++++--
4 files changed, 45 insertions(+), 2 deletions(-)
--- a/include/linux/mm_types.h
@@ -21,9 +21,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/uprobes.h>
+#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
+ #include <linux/workqueue.h>
#include <asm/page.h>
- #include <asm/mmu.h>
-@@ -502,6 +503,9 @@ struct mm_struct {
+@@ -508,6 +509,9 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
@@ -35,14 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __user *bd_addr;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2640,12 +2640,24 @@ extern struct mm_struct * mm_alloc(void)
-
- /* mmdrop drops the mm and the page tables */
- extern void __mmdrop(struct mm_struct *);
-+
- static inline void mmdrop(struct mm_struct * mm)
- {
- if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+@@ -2857,6 +2857,17 @@ static inline void mmdrop(struct mm_stru
__mmdrop(mm);
}
@@ -57,12 +50,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+# define mmdrop_delayed(mm) mmdrop(mm)
+#endif
+
- /* mmput gets rid of the mappings and all user-space */
- extern void mmput(struct mm_struct *);
- /* Grab a reference to a task's mm, if it is not already going away */
+ static inline bool mmget_not_zero(struct mm_struct *mm)
+ {
+ return atomic_inc_not_zero(&mm->mm_users);
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -712,6 +712,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -715,6 +715,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -79,12 +72,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+#endif
+
- /*
- * Decrement the use count and release all resources for an mm.
- */
+ static inline void __mmput(struct mm_struct *mm)
+ {
+ VM_BUG_ON(atomic_read(&mm->mm_users));
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2642,8 +2642,12 @@ static struct rq *finish_task_switch(str
+@@ -2776,8 +2776,12 @@ static struct rq *finish_task_switch(str
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -98,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5302,6 +5306,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5513,6 +5517,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -107,8 +100,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -5316,7 +5322,11 @@ void idle_task_exit(void)
- switch_mm(mm, &init_mm, current);
+@@ -5527,7 +5533,12 @@ void idle_task_exit(void)
+ switch_mm_irqs_off(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
- mmdrop(mm);
@@ -117,17 +110,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * call mmdrop() nor mmdrop_delayed() from here.
+ */
+ per_cpu(idle_last_mm, smp_processor_id()) = mm;
++
}
/*
-@@ -5512,6 +5522,10 @@ migration_call(struct notifier_block *nf
-
- case CPU_DEAD:
- calc_load_migrate(rq);
-+ if (per_cpu(idle_last_mm, cpu)) {
-+ mmdrop(per_cpu(idle_last_mm, cpu));
-+ per_cpu(idle_last_mm, cpu) = NULL;
-+ }
- break;
+@@ -7402,6 +7413,10 @@ int sched_cpu_dying(unsigned int cpu)
+ update_max_interval();
+ nohz_balance_exit_idle(cpu);
+ hrtick_clear(rq);
++ if (per_cpu(idle_last_mm, cpu)) {
++ mmdrop(per_cpu(idle_last_mm, cpu));
++ per_cpu(idle_last_mm, cpu) = NULL;
++ }
+ return 0;
+ }
#endif
- }
diff --git a/patches/sched-preempt-Fix-preempt_count-manipulations.patch b/patches/sched-preempt-Fix-preempt_count-manipulations.patch
deleted file mode 100644
index c46c0980d8f34e..00000000000000
--- a/patches/sched-preempt-Fix-preempt_count-manipulations.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 16 May 2016 15:01:11 +0200
-Subject: [PATCH] sched,preempt: Fix preempt_count manipulations
-
-Vikram reported that his ARM64 compiler managed to 'optimize' away the
-preempt_count manipulations in code like:
-
- preempt_enable_no_resched();
- put_user();
- preempt_disable();
-
-Irrespective of that fact that that is horrible code that should be
-fixed for many reasons, it does highlight a deficiency in the generic
-preempt_count manipulators. As it is never right to combine/elide
-preempt_count manipulations like this.
-
-Therefore sprinkle some volatile in the two generic accessors to
-ensure the compiler is aware of the fact that the preempt_count is
-observed outside of the regular program-order view and thus cannot be
-optimized away like this.
-
-x86; the only arch not using the generic code is not affected as we
-do all this in asm in order to use the segment base per-cpu stuff.
-
-Cc: stable@vger.kernel.org
-Cc: stable-rt@vger.kernel.org
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Fixes: a787870924db ("sched, arch: Create asm/preempt.h")
-Reported-by: Vikram Mulukutla <markivx@codeaurora.org>
-Tested-by: Vikram Mulukutla <markivx@codeaurora.org>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/asm-generic/preempt.h | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/include/asm-generic/preempt.h
-+++ b/include/asm-generic/preempt.h
-@@ -7,10 +7,10 @@
-
- static __always_inline int preempt_count(void)
- {
-- return current_thread_info()->preempt_count;
-+ return READ_ONCE(current_thread_info()->preempt_count);
- }
-
--static __always_inline int *preempt_count_ptr(void)
-+static __always_inline volatile int *preempt_count_ptr(void)
- {
- return &current_thread_info()->preempt_count;
- }
diff --git a/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch b/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
deleted file mode 100644
index 18c6174d114926..00000000000000
--- a/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
+++ /dev/null
@@ -1,261 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 18 Jan 2016 17:21:59 +0100
-Subject: sched: provide a tsk_nr_cpus_allowed() helper
-
-tsk_nr_cpus_allowed() is an accessor for task->nr_cpus_allowed which allows
-us to change the representation of ->nr_cpus_allowed if required.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/sched.h | 5 +++++
- kernel/sched/core.c | 2 +-
- kernel/sched/deadline.c | 28 ++++++++++++++--------------
- kernel/sched/rt.c | 24 ++++++++++++------------
- 4 files changed, 32 insertions(+), 27 deletions(-)
-
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1871,6 +1871,11 @@ extern int arch_task_struct_size __read_
- /* Future-safe accessor for struct task_struct's cpus_allowed. */
- #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-
-+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
-+{
-+ return p->nr_cpus_allowed;
-+}
-+
- #define TNF_MIGRATED 0x01
- #define TNF_NO_GROUP 0x02
- #define TNF_SHARED 0x04
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -1515,7 +1515,7 @@ int select_task_rq(struct task_struct *p
- {
- lockdep_assert_held(&p->pi_lock);
-
-- if (p->nr_cpus_allowed > 1)
-+ if (tsk_nr_cpus_allowed(p) > 1)
- cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
-
- /*
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -134,7 +134,7 @@ static void inc_dl_migration(struct sche
- {
- struct task_struct *p = dl_task_of(dl_se);
-
-- if (p->nr_cpus_allowed > 1)
-+ if (tsk_nr_cpus_allowed(p) > 1)
- dl_rq->dl_nr_migratory++;
-
- update_dl_migration(dl_rq);
-@@ -144,7 +144,7 @@ static void dec_dl_migration(struct sche
- {
- struct task_struct *p = dl_task_of(dl_se);
-
-- if (p->nr_cpus_allowed > 1)
-+ if (tsk_nr_cpus_allowed(p) > 1)
- dl_rq->dl_nr_migratory--;
-
- update_dl_migration(dl_rq);
-@@ -966,7 +966,7 @@ static void enqueue_task_dl(struct rq *r
-
- enqueue_dl_entity(&p->dl, pi_se, flags);
-
-- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
-+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
- enqueue_pushable_dl_task(rq, p);
- }
-
-@@ -1040,9 +1040,9 @@ select_task_rq_dl(struct task_struct *p,
- * try to make it stay here, it might be important.
- */
- if (unlikely(dl_task(curr)) &&
-- (curr->nr_cpus_allowed < 2 ||
-+ (tsk_nr_cpus_allowed(curr) < 2 ||
- !dl_entity_preempt(&p->dl, &curr->dl)) &&
-- (p->nr_cpus_allowed > 1)) {
-+ (tsk_nr_cpus_allowed(p) > 1)) {
- int target = find_later_rq(p);
-
- if (target != -1 &&
-@@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struc
- * Current can't be migrated, useless to reschedule,
- * let's hope p can move out.
- */
-- if (rq->curr->nr_cpus_allowed == 1 ||
-+ if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
- cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
- return;
-
-@@ -1071,7 +1071,7 @@ static void check_preempt_equal_dl(struc
- * p is migratable, so let's not schedule it and
- * see if it is pushed or pulled somewhere else.
- */
-- if (p->nr_cpus_allowed != 1 &&
-+ if (tsk_nr_cpus_allowed(p) != 1 &&
- cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
- return;
-
-@@ -1185,7 +1185,7 @@ static void put_prev_task_dl(struct rq *
- {
- update_curr_dl(rq);
-
-- if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
-+ if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
- enqueue_pushable_dl_task(rq, p);
- }
-
-@@ -1286,7 +1286,7 @@ static int find_later_rq(struct task_str
- if (unlikely(!later_mask))
- return -1;
-
-- if (task->nr_cpus_allowed == 1)
-+ if (tsk_nr_cpus_allowed(task) == 1)
- return -1;
-
- /*
-@@ -1432,7 +1432,7 @@ static struct task_struct *pick_next_pus
-
- BUG_ON(rq->cpu != task_cpu(p));
- BUG_ON(task_current(rq, p));
-- BUG_ON(p->nr_cpus_allowed <= 1);
-+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
-
- BUG_ON(!task_on_rq_queued(p));
- BUG_ON(!dl_task(p));
-@@ -1471,7 +1471,7 @@ static int push_dl_task(struct rq *rq)
- */
- if (dl_task(rq->curr) &&
- dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
-- rq->curr->nr_cpus_allowed > 1) {
-+ tsk_nr_cpus_allowed(rq->curr) > 1) {
- resched_curr(rq);
- return 0;
- }
-@@ -1618,9 +1618,9 @@ static void task_woken_dl(struct rq *rq,
- {
- if (!task_running(rq, p) &&
- !test_tsk_need_resched(rq->curr) &&
-- p->nr_cpus_allowed > 1 &&
-+ tsk_nr_cpus_allowed(p) > 1 &&
- dl_task(rq->curr) &&
-- (rq->curr->nr_cpus_allowed < 2 ||
-+ (tsk_nr_cpus_allowed(rq->curr) < 2 ||
- !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
- push_dl_tasks(rq);
- }
-@@ -1724,7 +1724,7 @@ static void switched_to_dl(struct rq *rq
-
- if (task_on_rq_queued(p) && rq->curr != p) {
- #ifdef CONFIG_SMP
-- if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
-+ if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
- queue_push_tasks(rq);
- #else
- if (dl_task(rq->curr))
---- a/kernel/sched/rt.c
-+++ b/kernel/sched/rt.c
-@@ -334,7 +334,7 @@ static void inc_rt_migration(struct sche
- rt_rq = &rq_of_rt_rq(rt_rq)->rt;
-
- rt_rq->rt_nr_total++;
-- if (p->nr_cpus_allowed > 1)
-+ if (tsk_nr_cpus_allowed(p) > 1)
- rt_rq->rt_nr_migratory++;
-
- update_rt_migration(rt_rq);
-@@ -351,7 +351,7 @@ static void dec_rt_migration(struct sche
- rt_rq = &rq_of_rt_rq(rt_rq)->rt;
-
- rt_rq->rt_nr_total--;
-- if (p->nr_cpus_allowed > 1)
-+ if (tsk_nr_cpus_allowed(p) > 1)
- rt_rq->rt_nr_migratory--;
-
- update_rt_migration(rt_rq);
-@@ -1324,7 +1324,7 @@ enqueue_task_rt(struct rq *rq, struct ta
-
- enqueue_rt_entity(rt_se, flags);
-
-- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
-+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
- enqueue_pushable_task(rq, p);
- }
-
-@@ -1413,7 +1413,7 @@ select_task_rq_rt(struct task_struct *p,
- * will have to sort it out.
- */
- if (curr && unlikely(rt_task(curr)) &&
-- (curr->nr_cpus_allowed < 2 ||
-+ (tsk_nr_cpus_allowed(curr) < 2 ||
- curr->prio <= p->prio)) {
- int target = find_lowest_rq(p);
-
-@@ -1437,7 +1437,7 @@ static void check_preempt_equal_prio(str
- * Current can't be migrated, useless to reschedule,
- * let's hope p can move out.
- */
-- if (rq->curr->nr_cpus_allowed == 1 ||
-+ if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
- !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
- return;
-
-@@ -1445,7 +1445,7 @@ static void check_preempt_equal_prio(str
- * p is migratable, so let's not schedule it and
- * see if it is pushed or pulled somewhere else.
- */
-- if (p->nr_cpus_allowed != 1
-+ if (tsk_nr_cpus_allowed(p) != 1
- && cpupri_find(&rq->rd->cpupri, p, NULL))
- return;
-
-@@ -1579,7 +1579,7 @@ static void put_prev_task_rt(struct rq *
- * The previous task needs to be made eligible for pushing
- * if it is still active
- */
-- if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
-+ if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
- enqueue_pushable_task(rq, p);
- }
-
-@@ -1629,7 +1629,7 @@ static int find_lowest_rq(struct task_st
- if (unlikely(!lowest_mask))
- return -1;
-
-- if (task->nr_cpus_allowed == 1)
-+ if (tsk_nr_cpus_allowed(task) == 1)
- return -1; /* No other targets possible */
-
- if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
-@@ -1762,7 +1762,7 @@ static struct task_struct *pick_next_pus
-
- BUG_ON(rq->cpu != task_cpu(p));
- BUG_ON(task_current(rq, p));
-- BUG_ON(p->nr_cpus_allowed <= 1);
-+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
-
- BUG_ON(!task_on_rq_queued(p));
- BUG_ON(!rt_task(p));
-@@ -2122,9 +2122,9 @@ static void task_woken_rt(struct rq *rq,
- {
- if (!task_running(rq, p) &&
- !test_tsk_need_resched(rq->curr) &&
-- p->nr_cpus_allowed > 1 &&
-+ tsk_nr_cpus_allowed(p) > 1 &&
- (dl_task(rq->curr) || rt_task(rq->curr)) &&
-- (rq->curr->nr_cpus_allowed < 2 ||
-+ (tsk_nr_cpus_allowed(rq->curr) < 2 ||
- rq->curr->prio <= p->prio))
- push_rt_tasks(rq);
- }
-@@ -2197,7 +2197,7 @@ static void switched_to_rt(struct rq *rq
- */
- if (task_on_rq_queued(p) && rq->curr != p) {
- #ifdef CONFIG_SMP
-- if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
-+ if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
- queue_push_tasks(rq);
- #else
- if (p->prio < rq->curr->prio)
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index 9b895ebd399a74..7ac693fb818063 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1393,6 +1393,7 @@ struct tlbflush_unmap_batch {
+@@ -1459,6 +1459,7 @@ struct tlbflush_unmap_batch {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2517,6 +2518,7 @@ extern void xtime_update(unsigned long t
+@@ -2649,6 +2650,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void kick_process(struct task_struct *tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1931,8 +1931,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2023,8 +2023,25 @@ try_to_wake_up(struct task_struct *p, un
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2061,6 +2078,18 @@ int wake_up_process(struct task_struct *
+@@ -2172,6 +2189,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1128,6 +1128,7 @@ static inline void finish_lock_switch(st
+@@ -1138,6 +1138,7 @@ static inline void finish_lock_switch(st
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch
index 0516ff49638839..ef541e1c4a209d 100644
--- a/patches/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1938,8 +1938,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2030,8 +2030,10 @@ try_to_wake_up(struct task_struct *p, un
* if the wakeup condition is true.
*/
if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch b/patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch
deleted file mode 100644
index 89f28df83efb46..00000000000000
--- a/patches/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 18 Jan 2016 17:10:39 +0100
-Subject: sched: use tsk_cpus_allowed() instead of accessing
- ->cpus_allowed
-
-Use the future-safe accessor for struct task_struct's.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/cpudeadline.c | 4 ++--
- kernel/sched/cpupri.c | 4 ++--
- kernel/sched/deadline.c | 2 +-
- 3 files changed, 5 insertions(+), 5 deletions(-)
-
---- a/kernel/sched/cpudeadline.c
-+++ b/kernel/sched/cpudeadline.c
-@@ -103,10 +103,10 @@ int cpudl_find(struct cpudl *cp, struct
- const struct sched_dl_entity *dl_se = &p->dl;
-
- if (later_mask &&
-- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
-+ cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) {
- best_cpu = cpumask_any(later_mask);
- goto out;
-- } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
-+ } else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) &&
- dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
- best_cpu = cpudl_maximum(cp);
- if (later_mask)
---- a/kernel/sched/cpupri.c
-+++ b/kernel/sched/cpupri.c
-@@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struc
- if (skip)
- continue;
-
-- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
-+ if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids)
- continue;
-
- if (lowest_mask) {
-- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
-+ cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask);
-
- /*
- * We have to ensure that we have at least one bit
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -1392,7 +1392,7 @@ static struct rq *find_lock_later_rq(str
- if (double_lock_balance(rq, later_rq)) {
- if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(later_rq->cpu,
-- &task->cpus_allowed) ||
-+ tsk_cpus_allowed(task)) ||
- task_running(rq, task) ||
- !dl_task(task) ||
- !task_on_rq_queued(task))) {
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index c921f4a01a90ff..6a31d23f1baea8 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3306,8 +3306,10 @@ static void __sched notrace __schedule(b
+@@ -3482,8 +3482,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/scsi-fcoe-Fix-get_cpu-put_cpu_light-imbalance-in-fco.patch b/patches/scsi-fcoe-Fix-get_cpu-put_cpu_light-imbalance-in-fco.patch
deleted file mode 100644
index e2a663d98256d9..00000000000000
--- a/patches/scsi-fcoe-Fix-get_cpu-put_cpu_light-imbalance-in-fco.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Thu, 28 Jul 2016 06:04:49 +0200
-Subject: [PATCH] scsi/fcoe: Fix get_cpu()/put_cpu_light() imbalance in
- fcoe_recv_frame()
-
-During master->rt merge, I stumbled across the buglet below.
-
-Fix get_cpu()/put_cpu_light() imbalance.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Mike Gabraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/scsi/fcoe/fcoe.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/scsi/fcoe/fcoe.c
-+++ b/drivers/scsi/fcoe/fcoe.c
-@@ -1814,7 +1814,7 @@ static void fcoe_recv_frame(struct sk_bu
- */
- hp = (struct fcoe_hdr *) skb_network_header(skb);
-
-- stats = per_cpu_ptr(lport->stats, get_cpu());
-+ stats = per_cpu_ptr(lport->stats, get_cpu_light());
- if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
- if (stats->ErrorFrames < 5)
- printk(KERN_WARNING "fcoe: FCoE version "
diff --git a/patches/scsi-fcoe-rt-aware.patch b/patches/scsi-fcoe-rt-aware.patch
index 731be3a4db0116..a39b3f4b109344 100644
--- a/patches/scsi-fcoe-rt-aware.patch
+++ b/patches/scsi-fcoe-rt-aware.patch
@@ -7,32 +7,14 @@ for migrate_diable() only.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- drivers/scsi/fcoe/fcoe.c | 18 +++++++++---------
+ drivers/scsi/fcoe/fcoe.c | 16 ++++++++--------
drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++--
drivers/scsi/libfc/fc_exch.c | 4 ++--
- 3 files changed, 13 insertions(+), 13 deletions(-)
+ 3 files changed, 12 insertions(+), 12 deletions(-)
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
-@@ -1286,7 +1286,7 @@ static void fcoe_percpu_thread_destroy(u
- struct sk_buff *skb;
- #ifdef CONFIG_SMP
- struct fcoe_percpu_s *p0;
-- unsigned targ_cpu = get_cpu();
-+ unsigned targ_cpu = get_cpu_light();
- #endif /* CONFIG_SMP */
-
- FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
-@@ -1342,7 +1342,7 @@ static void fcoe_percpu_thread_destroy(u
- kfree_skb(skb);
- spin_unlock_bh(&p->fcoe_rx_list.lock);
- }
-- put_cpu();
-+ put_cpu_light();
- #else
- /*
- * This a non-SMP scenario where the singular Rx thread is
-@@ -1566,11 +1566,11 @@ static int fcoe_rcv(struct sk_buff *skb,
+@@ -1455,11 +1455,11 @@ static int fcoe_rcv(struct sk_buff *skb,
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
@@ -47,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return rc;
}
-@@ -1766,11 +1766,11 @@ static inline int fcoe_filter_frames(str
+@@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(str
return 0;
}
@@ -61,7 +43,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return -EINVAL;
}
-@@ -1846,13 +1846,13 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_bu
+ */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+
+- stats = per_cpu_ptr(lport->stats, get_cpu());
++ stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats->ErrorFrames < 5)
+ printk(KERN_WARNING "fcoe: FCoE version "
+@@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_bu
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
@@ -79,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
-@@ -831,7 +831,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
INIT_LIST_HEAD(&del_list);
@@ -88,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -867,7 +867,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
sel_time = fcf->time;
}
}
diff --git a/patches/seqlock-prevent-rt-starvation.patch b/patches/seqlock-prevent-rt-starvation.patch
index e84613c86f24df..9b97b8277e66c3 100644
--- a/patches/seqlock-prevent-rt-starvation.patch
+++ b/patches/seqlock-prevent-rt-starvation.patch
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* raw_write_seqcount_barrier - do a seq write barrier
* @s: pointer to seqcount_t
-@@ -425,10 +435,32 @@ typedef struct {
+@@ -428,10 +438,32 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
-@@ -443,36 +475,36 @@ static inline unsigned read_seqretry(con
+@@ -446,36 +478,36 @@ static inline unsigned read_seqretry(con
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irq(&sl->lock);
}
-@@ -481,7 +513,7 @@ static inline unsigned long __write_seql
+@@ -484,7 +516,7 @@ static inline unsigned long __write_seql
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flags;
}
-@@ -491,7 +523,7 @@ static inline unsigned long __write_seql
+@@ -494,7 +526,7 @@ static inline unsigned long __write_seql
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/net/dst.h
+++ b/include/net/dst.h
-@@ -449,7 +449,7 @@ static inline void dst_confirm(struct ds
+@@ -446,7 +446,7 @@ static inline void dst_confirm(struct ds
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
{
diff --git a/patches/series b/patches/series
index 8f9d915fc264e3..6e769c6a538de9 100644
--- a/patches/series
+++ b/patches/series
@@ -1,55 +1,24 @@
###########################################################
# DELTA against a known Linus release
###########################################################
+Using-BUG_ON-as-an-assert-is-_never_-acceptable.patch
+mm-filemap-don-t-plant-shadow-entries-without-radix-.patch
+mm-filemap-fix-mapping-nrpages-double-accounting-in-.patch
############################################################
# UPSTREAM changes queued
############################################################
-ARM-imx-always-use-TWD-on-IMX6Q.patch
-tracing-Show-the-preempt-count-of-when-the-event-was.patch
-
-# timer: "Refactor the timer wheel v4' + 2 prerequisites
-drm-i915-Use-consistent-forcewake-auto-release-timeo.patch
-timer-add-setup_deferrable_timer-macro.patch
-
-timer-Make-pinned-a-timer-property.patch
-x86-apic-uv-Initialize-timer-as-pinned.patch
-x86-mce-Initialize-timer-as-pinned.patch
-driver-net-ethernet-tile-Initialize-timer-as-pinned.patch
-drivers-tty-metag_da-Initialize-timer-as-pinned.patch
-drivers-tty-mips_ejtag-Initialize-timer-as-pinned.patch
-net-ipv4-inet-Initialize-timers-as-pinned.patch
-timer-Remove-mod_timer_pinned.patch
-signal-Use-hrtimer-for-sigtimedwait.patch
-hlist-Add-hlist_is_singular_node-helper.patch
-timer-Give-a-few-structs-and-members-proper-names.patch
-timer-Reduce-the-CPU-index-space-to-256k.patch
-timer-Switch-to-a-non-cascading-wheel.patch
-timer-Remove-slack-leftovers.patch
-timer-Move-__run_timers-function.patch
-timer-Optimize-collect-timers-for-NOHZ.patch
-tick-sched-Remove-pointless-empty-function.patch
-timer-Forward-wheel-clock-whenever-possible.patch
-timer-Only-wake-softirq-if-necessary.patch
-timer-Split-out-index-calculation.patch
-timer-Optimization-for-same-expiry-time-in-mod_timer.patch
############################################################
# UPSTREAM FIXES, patches pending
############################################################
timer-make-the-base-lock-raw.patch
+jbd2-Fix-lockdep-annotation-in-add_transaction_credi.patch
############################################################
# Stuff broken upstream, patches submitted
############################################################
-sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch
-sched-provide-a-tsk_nr_cpus_allowed-helper.patch
sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
-crypto-ccp-remove-rwlocks_types.h.patch
-infiniband-ulp-ipoib-remove-pkey_mutex.patch
-sched-preempt-Fix-preempt_count-manipulations.patch
-x86-mm-disable-preemption-during-CR3-read-write.patch
-lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
@@ -64,6 +33,12 @@ rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patc
# Stuff broken upstream, need to be sent
############################################################
rtmutex--Handle-non-enqueued-waiters-gracefully.patch
+fs-dcache-include-wait.h.patch
+rbtree-include-rcu.h-because-we-use-it.patch
+fs-dcache-init-in_lookup_hashtable.patch
+iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
+iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
+lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
# Wants a different fix for upstream
@@ -240,10 +215,8 @@ list_bl.h-make-list-head-locking-RT-safe.patch
list_bl-fixup-bogus-lockdep-warning.patch
genirq-disable-irqpoll-on-rt.patch
genirq-force-threading.patch
-genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
# DRIVERS NET
-drivers-net-fix-livelock-issues.patch
drivers-net-vortex-fix-locking-issues.patch
# MM PAGE_ALLOC
@@ -299,7 +272,6 @@ hrtimer-enfore-64byte-alignment.patch
hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
timer-fd-avoid-live-lock.patch
-hrtimer-Move-schedule_work-call-to-helper-thread.patch
tick-broadcast--Make-hrtimer-irqsafe.patch
# POSIX-CPU-TIMERS
@@ -345,17 +317,15 @@ kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
softirq-split-timer-softirqs-out-of-ksoftirqd.patch
rtmutex-trylock-is-okay-on-RT.patch
-kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch
-# RAID5
-md-raid5-percpu-handling-rt-aware.patch
-#
-i915_compile_fix.patch
+# compile fix due to rtmutex locks
+gpu_don_t_check_for_the_lock_owner.patch
+fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
# FUTEX/RTMUTEX
rtmutex-futex-prepare-rt.patch
futex-requeue-pi-fix.patch
-0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
# RTMUTEX
pid.h-include-atomic.h.patch
@@ -365,7 +335,7 @@ spinlock-types-separate-raw.patch
rtmutex-avoid-include-hell.patch
rtmutex_dont_include_rcu.patch
rt-add-rt-locks.patch
-rtmutex-Use-chainwalking-control-enum.patch
+kernel-futex-don-t-deboost-too-early.patch
rtmutex-add-a-first-shot-of-ww_mutex.patch
ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -374,12 +344,10 @@ peter_zijlstra-frob-rcu.patch
rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
-rcutorture-comment-out-rcu_bh-ops-on-PREEMPT_RT_FULL.patch
-rcu-disable-more-spots-of-rcu_bh.patch
# LGLOCKS - lovely
lglocks-rt.patch
-lockinglglocks_Use_preempt_enabledisable_nort()_in_lg_double_locklg_double_unlock.patch
+lockinglglocks_Use_preempt_enabledisable_nort.patch
# STOP machine (depend on lglock & rtmutex)
stomp-machine-create-lg_global_trylock_relax-primiti.patch
@@ -396,6 +364,8 @@ wait.h-include-atomic.h.patch
work-simple-Simple-work-queue-implemenation.patch
completion-use-simple-wait-queues.patch
fs-aio-simple-simple-work.patch
+genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+hrtimer-Move-schedule_work-call-to-helper-thread.patch
# FS
fs-namespace-preemption-fix.patch
@@ -419,6 +389,7 @@ block-mq-use-cpu_light.patch
block-mq-drop-preempt-disable.patch
block-mq-don-t-complete-requests-via-IPI.patch
dump-stack-don-t-disable-preemption-during-trace.patch
+md-raid5-percpu-handling-rt-aware.patch
# CPU CHILL
rt-introduce-cpu-chill.patch
@@ -433,8 +404,8 @@ block-use-cpu-chill.patch
# FS LIVELOCK PREVENTION
fs-dcache-use-cpu-chill-in-trylock-loops.patch
-fs-dcache-resched-chill-only-if-we-make-no-progress.patch
net-use-cpu-chill.patch
+fs-dcache-use-swait_queue-instead-of-waitqueue.patch
# WORKQUEUE more fixes
workqueue-use-rcu.patch
@@ -452,6 +423,9 @@ debugobjects-rt.patch
# JUMPLABEL
jump-label-rt.patch
+# SEQLOCKS
+seqlock-prevent-rt-starvation.patch
+
# NETWORKING
sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
@@ -460,12 +434,10 @@ net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
net-move-xmit_recursion-to-per-task-variable-on-RT.patch
net-provide-a-way-to-delegate-processing-a-softirq-t.patch
net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+net-Qdisc-use-a-seqlock-instead-seqcount.patch
net-add-back-the-missing-serialization-in-ip_send_un.patch
net-add-a-lock-around-icmp_sk.patch
-# NETWORK livelock fix
-net-tx-action-avoid-livelock-on-rt.patch
-
# NETWORK DEBUGGING AID
ping-sysrq.patch
@@ -522,7 +494,6 @@ KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
# SCSI/FCOE
scsi-fcoe-rt-aware.patch
-scsi-fcoe-Fix-get_cpu-put_cpu_light-imbalance-in-fco.patch
sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
# X86 crypto
@@ -541,9 +512,6 @@ cpumask-disable-offstack-on-rt.patch
# RANDOM
random-make-it-work-on-rt.patch
-# SEQLOCKS
-seqlock-prevent-rt-starvation.patch
-
# HOTPLUG
cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
cpu-rt-rework-cpu-down.patch
@@ -553,8 +521,6 @@ kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
cpu_down_move_migrate_enable_back.patch
hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
-rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
-kernel-futex-don-t-deboost-too-early.patch
rt-locking-Reenable-migration-accross-schedule.patch
# SCSCI QLA2xxx
@@ -585,21 +551,15 @@ rcu-make-RCU_BOOST-default-on-RT.patch
# PREEMPT LAZY
preempt-lazy-support.patch
-preempt-lazy-check-preempt_schedule.patch
-sched-lazy_preempt-avoid-a-warning-in-the-RT-case.patch
x86-preempt-lazy.patch
-x86-preempt-lazy-fixup-should_resched.patch
arm-preempt-lazy-support.patch
-arm-lazy-preempt-correct-resched-condition.patch
powerpc-preempt-lazy-support.patch
arch-arm64-Add-lazy-preempt-support.patch
-arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch
# LEDS
leds-trigger-disable-CPU-trigger-on-RT.patch
# DRIVERS
-i2c-omap-drop-the-lock-hard-irq-context.patch
mmci-remove-bogus-irq-save.patch
cpufreq-drop-K8-s-driver-from-beeing-selected.patch
drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
diff --git a/patches/signal-Use-hrtimer-for-sigtimedwait.patch b/patches/signal-Use-hrtimer-for-sigtimedwait.patch
deleted file mode 100644
index 8d2f263aa91a75..00000000000000
--- a/patches/signal-Use-hrtimer-for-sigtimedwait.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:25 +0000
-Subject: [PATCH 10/22] signal: Use hrtimer for sigtimedwait
-
-We've converted most timeout related syscalls to hrtimers. sigtimedwait() did
-not get this treatment. Convert it so we get a reasonable accuracy and remove
-the user space exposure to the timer wheel properties.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Cyril Hrubis <chrubis@suse.cz>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/signal.c | 24 ++++++++++--------------
- 1 file changed, 10 insertions(+), 14 deletions(-)
-
---- a/kernel/signal.c
-+++ b/kernel/signal.c
-@@ -2751,23 +2751,18 @@ int copy_siginfo_to_user(siginfo_t __use
- * @ts: upper bound on process time suspension
- */
- int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
-- const struct timespec *ts)
-+ const struct timespec *ts)
- {
-+ ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
- struct task_struct *tsk = current;
-- long timeout = MAX_SCHEDULE_TIMEOUT;
- sigset_t mask = *which;
-- int sig;
-+ int sig, ret = 0;
-
- if (ts) {
- if (!timespec_valid(ts))
- return -EINVAL;
-- timeout = timespec_to_jiffies(ts);
-- /*
-- * We can be close to the next tick, add another one
-- * to ensure we will wait at least the time asked for.
-- */
-- if (ts->tv_sec || ts->tv_nsec)
-- timeout++;
-+ timeout = timespec_to_ktime(*ts);
-+ to = &timeout;
- }
-
- /*
-@@ -2778,7 +2773,7 @@ int do_sigtimedwait(const sigset_t *whic
-
- spin_lock_irq(&tsk->sighand->siglock);
- sig = dequeue_signal(tsk, &mask, info);
-- if (!sig && timeout) {
-+ if (!sig && timeout.tv64) {
- /*
- * None ready, temporarily unblock those we're interested
- * while we are sleeping in so that we'll be awakened when
-@@ -2790,8 +2785,9 @@ int do_sigtimedwait(const sigset_t *whic
- recalc_sigpending();
- spin_unlock_irq(&tsk->sighand->siglock);
-
-- timeout = freezable_schedule_timeout_interruptible(timeout);
--
-+ __set_current_state(TASK_INTERRUPTIBLE);
-+ ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
-+ HRTIMER_MODE_REL);
- spin_lock_irq(&tsk->sighand->siglock);
- __set_task_blocked(tsk, &tsk->real_blocked);
- sigemptyset(&tsk->real_blocked);
-@@ -2801,7 +2797,7 @@ int do_sigtimedwait(const sigset_t *whic
-
- if (sig)
- return sig;
-- return timeout ? -EINTR : -EAGAIN;
-+ return ret ? -EINTR : -EAGAIN;
- }
-
- /**
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 5b3d87fcb8c161..5a9024255c4338 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1589,6 +1589,7 @@ struct task_struct {
+@@ -1659,6 +1659,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1352,6 +1352,7 @@ static struct task_struct *copy_process(
+@@ -1399,6 +1399,7 @@ static struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index a5cdaf1527879b..4163d9e2445f3a 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2776,6 +2776,7 @@ struct softnet_data {
+@@ -2794,6 +2794,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -283,6 +283,7 @@ struct sk_buff_head {
+@@ -284,6 +284,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1538,6 +1539,12 @@ static inline void skb_queue_head_init(s
+@@ -1565,6 +1566,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -209,14 +209,14 @@ static inline struct hlist_head *dev_ind
+@@ -211,14 +211,14 @@ static inline struct hlist_head *dev_ind
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -4265,7 +4265,7 @@ static void flush_backlog(void *arg)
+@@ -4322,7 +4322,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -4274,10 +4274,13 @@ static void flush_backlog(void *arg)
+@@ -4331,10 +4331,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -7790,6 +7793,9 @@ static int dev_cpu_callback(struct notif
+@@ -7992,6 +7995,9 @@ static int dev_cpu_callback(struct notif
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NOTIFY_OK;
}
-@@ -8091,8 +8097,9 @@ static int __init net_dev_init(void)
+@@ -8293,8 +8299,9 @@ static int __init net_dev_init(void)
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 9cae4816650c6a..d0679b2f9aa26b 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1747,7 +1747,7 @@ endchoice
+@@ -1801,7 +1801,7 @@ config SLAB_FREELIST_RANDOM
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch
index 48956de951256b..b4af19555631ed 100644
--- a/patches/slub-enable-irqs-for-no-wait.patch
+++ b/patches/slub-enable-irqs-for-no-wait.patch
@@ -12,10 +12,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1418,14 +1418,17 @@ static struct page *allocate_slab(struct
- gfp_t alloc_gfp;
+@@ -1533,14 +1533,17 @@ static struct page *allocate_slab(struct
void *start, *p;
int idx, order;
+ bool shuffle;
+ bool enableirqs = false;
flags &= gfp_allowed_mask;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1496,11 +1499,7 @@ static struct page *allocate_slab(struct
+@@ -1615,11 +1618,7 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch
index d6b1d5e8234efe..49ad29a8debc5d 100644
--- a/patches/softirq-disable-softirq-stacks-for-rt.patch
+++ b/patches/softirq-disable-softirq-stacks-for-rt.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
-@@ -614,6 +614,7 @@ void irq_ctx_init(void)
+@@ -633,6 +633,7 @@ void irq_ctx_init(void)
}
}
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void)
{
struct thread_info *curtp, *irqtp;
-@@ -631,6 +632,7 @@ void do_softirq_own_stack(void)
+@@ -650,6 +651,7 @@ void do_softirq_own_stack(void)
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
}
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void fixup_irqs(void)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -799,6 +799,7 @@ END(native_load_gs_index)
+@@ -817,6 +817,7 @@ END(native_load_gs_index)
jmp 2b
.previous
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
pushq %rbp
-@@ -811,6 +812,7 @@ ENTRY(do_softirq_own_stack)
+@@ -829,6 +830,7 @@ ENTRY(do_softirq_own_stack)
decl PER_CPU_VAR(irq_count)
ret
END(do_softirq_own_stack)
@@ -127,15 +127,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
-@@ -128,6 +128,7 @@ void irq_ctx_init(int cpu)
+@@ -127,6 +127,7 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
}
+#ifndef CONFIG_PREEMPT_RT_FULL
void do_softirq_own_stack(void)
{
- struct thread_info *curstk;
-@@ -146,6 +147,7 @@ void do_softirq_own_stack(void)
+ struct irq_stack *irqstk;
+@@ -143,6 +144,7 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp);
}
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -458,7 +458,7 @@ struct softirq_action
+@@ -464,7 +464,7 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index fe2e349d0ec789..888f3d79e9a237 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NOTIFY_OK;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2264,6 +2264,7 @@ static inline void __netif_reschedule(st
+@@ -2268,6 +2268,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -2345,6 +2346,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2349,6 +2350,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3730,6 +3732,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3777,6 +3779,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4735,6 +4738,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4795,6 +4798,7 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4748,6 +4752,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4808,6 +4812,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4829,6 +4834,7 @@ void __napi_schedule(struct napi_struct
+@@ -4889,6 +4894,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -7775,6 +7781,7 @@ static int dev_cpu_callback(struct notif
+@@ -7977,6 +7983,7 @@ static int dev_cpu_callback(struct notif
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 2d06ad6e540632..1fd26eea5e189f 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* _LINUX_BH_H */
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -455,10 +455,11 @@ struct softirq_action
+@@ -461,10 +461,11 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
-@@ -466,6 +467,9 @@ static inline void do_softirq_own_stack(
+@@ -472,6 +473,9 @@ static inline void do_softirq_own_stack(
__do_softirq();
}
#endif
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-@@ -473,6 +477,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -479,6 +483,7 @@ extern void __raise_softirq_irqoff(unsig
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -630,6 +635,12 @@ void tasklet_hrtimer_cancel(struct taskl
+@@ -636,6 +641,12 @@ void tasklet_hrtimer_cancel(struct taskl
tasklet_kill(&ttimer->tasklet);
}
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Are we in NMI context?
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1868,6 +1868,8 @@ struct task_struct {
+@@ -1939,6 +1939,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
-@@ -2136,6 +2138,7 @@ extern void thread_group_cputime_adjuste
+@@ -2236,6 +2238,7 @@ extern void thread_group_cputime_adjuste
/*
* Per process flags
*/
@@ -785,7 +785,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.thread_comm = "ksoftirqd/%u",
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -878,14 +878,7 @@ static bool can_stop_idle_tick(int cpu,
+@@ -879,14 +879,7 @@ static bool can_stop_idle_tick(int cpu,
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -803,7 +803,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3801,11 +3801,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3848,11 +3848,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 5914cdb70068ad..66c1f9b861cc0e 100644
--- a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -10,7 +10,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
-@@ -184,12 +184,10 @@ config NR_CPUS
+@@ -187,12 +187,10 @@ config NR_CPUS
source kernel/Kconfig.hz
config RWSEM_GENERIC_SPINLOCK
diff --git a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch b/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
index e6f243fc0a2958..6d207a44009025 100644
--- a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
+++ b/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
@@ -31,14 +31,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define lglock spinlock
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
-@@ -34,6 +34,7 @@ extern int atomic_dec_and_spin_lock(atom
- */
+@@ -40,6 +40,7 @@ extern int atomic_dec_and_spin_lock(atom
+ extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
- #define spin_lock(lock) \
- do { \
+ #define spin_lock(lock) rt_spin_lock(lock)
+
--- a/kernel/locking/lglock.c
+++ b/kernel/locking/lglock.c
@@ -127,3 +127,28 @@ void lg_global_unlock(struct lglock *lg)
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1151,6 +1151,11 @@ void __lockfunc rt_spin_unlock_wait(spin
+@@ -1215,6 +1215,11 @@ void __lockfunc rt_spin_unlock_wait(spin
}
EXPORT_SYMBOL(rt_spin_unlock_wait);
@@ -81,6 +81,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ return rt_mutex_trylock(lock);
+}
+
- int __lockfunc rt_spin_trylock(spinlock_t *lock)
+ int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
{
- int ret = rt_mutex_trylock(&lock->lock);
+ int ret;
diff --git a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch b/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
index e9b32347d90cdc..dc0ba8d4cd4ff5 100644
--- a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
+++ b/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -313,18 +313,21 @@ static DEFINE_MUTEX(stop_cpus_mutex);
+@@ -321,18 +321,21 @@ static DEFINE_MUTEX(stop_cpus_mutex);
static bool queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_cpu(cpu, cpumask) {
work = &per_cpu(cpu_stopper.stop_work, cpu);
work->fn = fn;
-@@ -344,7 +347,7 @@ static int __stop_cpus(const struct cpum
+@@ -352,7 +355,7 @@ static int __stop_cpus(const struct cpum
struct cpu_stop_done done;
cpu_stop_init_done(&done, cpumask_weight(cpumask));
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -ENOENT;
wait_for_completion(&done.completion);
return done.ret;
-@@ -532,6 +535,8 @@ static int __init cpu_stop_init(void)
+@@ -540,6 +543,8 @@ static int __init cpu_stop_init(void)
INIT_LIST_HEAD(&stopper->works);
}
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
stop_machine_unpark(raw_smp_processor_id());
stop_machine_initialized = true;
-@@ -626,7 +631,7 @@ int stop_machine_from_inactive_cpu(cpu_s
+@@ -634,7 +639,7 @@ int stop_machine_from_inactive_cpu(cpu_s
set_state(&msdata, MULTI_STOP_PREPARE);
cpu_stop_init_done(&done, num_active_cpus());
queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
diff --git a/patches/stop-machine-raw-lock.patch b/patches/stop-machine-raw-lock.patch
index 2879faa2e0f673..f52693b1a464d7 100644
--- a/patches/stop-machine-raw-lock.patch
+++ b/patches/stop-machine-raw-lock.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -36,7 +36,7 @@ struct cpu_stop_done {
+@@ -37,7 +37,7 @@ struct cpu_stop_done {
struct cpu_stopper {
struct task_struct *thread;
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */
-@@ -82,14 +82,14 @@ static bool cpu_stop_queue_work(unsigned
+@@ -83,14 +83,14 @@ static bool cpu_stop_queue_work(unsigned
unsigned long flags;
bool enabled;
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return enabled;
}
-@@ -224,8 +224,8 @@ static int cpu_stop_queue_two_works(int
+@@ -232,8 +232,8 @@ static int cpu_stop_queue_two_works(int
int err;
lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
err = -ENOENT;
if (!stopper1->enabled || !stopper2->enabled)
-@@ -235,8 +235,8 @@ static int cpu_stop_queue_two_works(int
+@@ -243,8 +243,8 @@ static int cpu_stop_queue_two_works(int
__cpu_stop_queue_work(stopper1, work1);
__cpu_stop_queue_work(stopper2, work2);
unlock:
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
return err;
-@@ -425,9 +425,9 @@ static int cpu_stop_should_run(unsigned
+@@ -433,9 +433,9 @@ static int cpu_stop_should_run(unsigned
unsigned long flags;
int run;
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return run;
}
-@@ -438,13 +438,13 @@ static void cpu_stopper_thread(unsigned
+@@ -446,13 +446,13 @@ static void cpu_stopper_thread(unsigned
repeat:
work = NULL;
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (work) {
cpu_stop_fn_t fn = work->fn;
-@@ -528,7 +528,7 @@ static int __init cpu_stop_init(void)
+@@ -536,7 +536,7 @@ static int __init cpu_stop_init(void)
for_each_possible_cpu(cpu) {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
diff --git a/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
index a4bb4178fd6f1d..83e60c5ddba861 100644
--- a/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+++ b/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -452,6 +452,16 @@ static void cpu_stopper_thread(unsigned
+@@ -460,6 +460,16 @@ static void cpu_stopper_thread(unsigned
struct cpu_stop_done *done = work->done;
int ret;
diff --git a/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index 5a1e1eb8430b18..c248ca0aa50940 100644
--- a/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
-@@ -344,7 +344,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -396,7 +396,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
goto out;
}
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
atomic_long_inc(&pool->sp_stats.packets);
-@@ -380,7 +380,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -432,7 +432,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
atomic_long_inc(&pool->sp_stats.threads_woken);
wake_up_process(rqstp->rq_task);
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
}
rcu_read_unlock();
-@@ -401,7 +401,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -453,7 +453,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
goto redo_search;
}
rqstp = NULL;
diff --git a/patches/suspend-prevernt-might-sleep-splats.patch b/patches/suspend-prevernt-might-sleep-splats.patch
index e0b981a02873ca..512b07aadf73d4 100644
--- a/patches/suspend-prevernt-might-sleep-splats.patch
+++ b/patches/suspend-prevernt-might-sleep-splats.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -484,6 +484,7 @@ extern enum system_states {
+@@ -491,6 +491,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TAINT_PROPRIETARY_MODULE 0
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
-@@ -285,6 +285,8 @@ static int create_image(int platform_mod
+@@ -286,6 +286,8 @@ static int create_image(int platform_mod
local_irq_disable();
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = syscore_suspend();
if (error) {
printk(KERN_ERR "PM: Some system devices failed to power down, "
-@@ -314,6 +316,7 @@ static int create_image(int platform_mod
+@@ -315,6 +317,7 @@ static int create_image(int platform_mod
syscore_resume();
Enable_irqs:
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
Enable_cpus:
-@@ -438,6 +441,7 @@ static int resume_target_kernel(bool pla
+@@ -444,6 +447,7 @@ static int resume_target_kernel(bool pla
goto Enable_cpus;
local_irq_disable();
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = syscore_suspend();
if (error)
-@@ -471,6 +475,7 @@ static int resume_target_kernel(bool pla
+@@ -477,6 +481,7 @@ static int resume_target_kernel(bool pla
syscore_resume();
Enable_irqs:
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
Enable_cpus:
-@@ -556,6 +561,7 @@ int hibernation_platform_enter(void)
+@@ -562,6 +567,7 @@ int hibernation_platform_enter(void)
goto Enable_cpus;
local_irq_disable();
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
-@@ -568,6 +574,7 @@ int hibernation_platform_enter(void)
+@@ -574,6 +580,7 @@ int hibernation_platform_enter(void)
Power_up:
syscore_resume();
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Enable_cpus:
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -359,6 +359,8 @@ static int suspend_enter(suspend_state_t
+@@ -361,6 +361,8 @@ static int suspend_enter(suspend_state_t
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
-@@ -375,6 +377,8 @@ static int suspend_enter(suspend_state_t
+@@ -377,6 +379,8 @@ static int suspend_enter(suspend_state_t
syscore_resume();
}
diff --git a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index 1f66e18dc0af80..803a0bd02b1b4c 100644
--- a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -494,8 +494,9 @@ static inline struct task_struct *this_c
+@@ -500,8 +500,9 @@ static inline struct task_struct *this_c
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
-@@ -520,27 +521,36 @@ struct tasklet_struct name = { NULL, 0,
+@@ -526,27 +527,36 @@ struct tasklet_struct name = { NULL, 0,
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
-@@ -589,12 +599,7 @@ static inline void tasklet_disable(struc
+@@ -595,12 +605,7 @@ static inline void tasklet_disable(struc
smp_mb();
}
diff --git a/patches/tick-broadcast--Make-hrtimer-irqsafe.patch b/patches/tick-broadcast--Make-hrtimer-irqsafe.patch
index 1123a4de5e43df..ae524bc7ed7f56 100644
--- a/patches/tick-broadcast--Make-hrtimer-irqsafe.patch
+++ b/patches/tick-broadcast--Make-hrtimer-irqsafe.patch
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
-@@ -106,5 +106,6 @@ void tick_setup_hrtimer_broadcast(void)
+@@ -107,5 +107,6 @@ void tick_setup_hrtimer_broadcast(void)
{
hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
bctimer.function = bc_handler;
diff --git a/patches/tick-sched-Remove-pointless-empty-function.patch b/patches/tick-sched-Remove-pointless-empty-function.patch
deleted file mode 100644
index 8dcbae1957996e..00000000000000
--- a/patches/tick-sched-Remove-pointless-empty-function.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:35 +0000
-Subject: [PATCH 18/22] tick/sched: Remove pointless empty function
-
-This was a failed attempt to optimize the timer expiry in idle, which was
-disabled and never revisited. Remove the cruft.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/tick-sched.c | 33 +--------------------------------
- 1 file changed, 1 insertion(+), 32 deletions(-)
-
---- a/kernel/time/tick-sched.c
-+++ b/kernel/time/tick-sched.c
-@@ -1091,35 +1091,6 @@ static void tick_nohz_switch_to_nohz(voi
- tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
- }
-
--/*
-- * When NOHZ is enabled and the tick is stopped, we need to kick the
-- * tick timer from irq_enter() so that the jiffies update is kept
-- * alive during long running softirqs. That's ugly as hell, but
-- * correctness is key even if we need to fix the offending softirq in
-- * the first place.
-- *
-- * Note, this is different to tick_nohz_restart. We just kick the
-- * timer and do not touch the other magic bits which need to be done
-- * when idle is left.
-- */
--static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
--{
--#if 0
-- /* Switch back to 2.6.27 behaviour */
-- ktime_t delta;
--
-- /*
-- * Do not touch the tick device, when the next expiry is either
-- * already reached or less/equal than the tick period.
-- */
-- delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
-- if (delta.tv64 <= tick_period.tv64)
-- return;
--
-- tick_nohz_restart(ts, now);
--#endif
--}
--
- static inline void tick_nohz_irq_enter(void)
- {
- struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
-@@ -1130,10 +1101,8 @@ static inline void tick_nohz_irq_enter(v
- now = ktime_get();
- if (ts->idle_active)
- tick_nohz_stop_idle(ts, now);
-- if (ts->tick_stopped) {
-+ if (ts->tick_stopped)
- tick_nohz_update_jiffies(now);
-- tick_nohz_kick_tick(ts, now);
-- }
- }
-
- #else
diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch
index a47006b93b3101..ec12aae229a5bb 100644
--- a/patches/timekeeping-split-jiffies-lock.patch
+++ b/patches/timekeeping-split-jiffies-lock.patch
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(kti
return;
- /* Reevalute with jiffies_lock held */
+ /* Reevaluate with jiffies_lock held */
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
@@ -129,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (rcu_needs_cpu(basemono, &next_rcu) ||
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
-@@ -2319,8 +2319,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2325,8 +2325,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
diff --git a/patches/timer-Forward-wheel-clock-whenever-possible.patch b/patches/timer-Forward-wheel-clock-whenever-possible.patch
deleted file mode 100644
index f5b1dfe5d8c7dd..00000000000000
--- a/patches/timer-Forward-wheel-clock-whenever-possible.patch
+++ /dev/null
@@ -1,240 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:36 +0000
-Subject: [PATCH 19/22] timer: Forward wheel clock whenever possible
-
-The wheel clock is stale when a cpu goes into a long idle sleep. This has the
-side effect, that timers which are queued end up in the outer wheel
-levels. That results in coarser granularity.
-
-To solve this, we keep track of the idle state and forward the wheel clock
-whenever it's possible.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/tick-internal.h | 1
- kernel/time/tick-sched.c | 12 ++++
- kernel/time/timer.c | 128 ++++++++++++++++++++++++++++++++++++--------
- 3 files changed, 120 insertions(+), 21 deletions(-)
-
---- a/kernel/time/tick-internal.h
-+++ b/kernel/time/tick-internal.h
-@@ -164,3 +164,4 @@ static inline void timers_update_migrati
- DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
-
- extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
-+void timer_clear_idle(void);
---- a/kernel/time/tick-sched.c
-+++ b/kernel/time/tick-sched.c
-@@ -700,6 +700,12 @@ static ktime_t tick_nohz_stop_sched_tick
- delta = next_tick - basemono;
- if (delta <= (u64)TICK_NSEC) {
- tick.tv64 = 0;
-+
-+ /*
-+ * Tell the timer code that the base is not idle, i.e. undo
-+ * the effect of get_next_timer_interrupt().
-+ */
-+ timer_clear_idle();
- /*
- * We've not stopped the tick yet, and there's a timer in the
- * next period, so no point in stopping it either, bail.
-@@ -808,6 +814,12 @@ static void tick_nohz_restart_sched_tick
- tick_do_update_jiffies64(now);
- update_cpu_load_nohz(active);
-
-+ /*
-+ * Clear the timer idle flag, so we avoid IPIs on remote queueing and
-+ * the clock forward checks in the enqueue path.
-+ */
-+ timer_clear_idle();
-+
- calc_load_exit_idle();
- touch_softlockup_watchdog_sched();
- /*
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -196,9 +196,11 @@ struct timer_base {
- spinlock_t lock;
- struct timer_list *running_timer;
- unsigned long clk;
-+ unsigned long next_expiry;
- unsigned int cpu;
- bool migration_enabled;
- bool nohz_active;
-+ bool is_idle;
- DECLARE_BITMAP(pending_map, WHEEL_SIZE);
- struct hlist_head vectors[WHEEL_SIZE];
- } ____cacheline_aligned;
-@@ -519,24 +521,37 @@ static void internal_add_timer(struct ti
- {
- __internal_add_timer(base, timer);
-
-+ if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
-+ return;
-+
- /*
-- * Check whether the other CPU is in dynticks mode and needs
-- * to be triggered to reevaluate the timer wheel. We are
-- * protected against the other CPU fiddling with the timer by
-- * holding the timer base lock. This also makes sure that a
-- * CPU on the way to stop its tick can not evaluate the timer
-- * wheel.
-- *
-- * Spare the IPI for deferrable timers on idle targets though.
-- * The next busy ticks will take care of it. Except full dynticks
-- * require special care against races with idle_cpu(), lets deal
-- * with that later.
-- */
-- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
-- if (!(timer->flags & TIMER_DEFERRABLE) ||
-- tick_nohz_full_cpu(base->cpu))
-+ * This wants some optimizing similar to the below, but we do that
-+ * when we switch from push to pull for deferrable timers.
-+ */
-+ if (timer->flags & TIMER_DEFERRABLE) {
-+ if (tick_nohz_full_cpu(base->cpu))
- wake_up_nohz_cpu(base->cpu);
-+ return;
- }
-+
-+ /*
-+ * We might have to IPI the remote CPU if the base is idle and the
-+ * timer is not deferrable. If the other cpu is on the way to idle
-+ * then it can't set base->is_idle as we hold base lock.
-+ */
-+ if (!base->is_idle)
-+ return;
-+
-+ /* Check whether this is the new first expiring timer */
-+ if (time_after_eq(timer->expires, base->next_expiry))
-+ return;
-+
-+ /*
-+ * Set the next expiry time and kick the cpu so it can reevaluate the
-+ * wheel
-+ */
-+ base->next_expiry = timer->expires;
-+ wake_up_nohz_cpu(base->cpu);
- }
-
- #ifdef CONFIG_TIMER_STATS
-@@ -859,10 +874,11 @@ static inline struct timer_base *get_tim
- return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
- }
-
--static inline struct timer_base *get_target_base(struct timer_base *base,
-- unsigned tflags)
-+#ifdef CONFIG_NO_HZ_COMMON
-+static inline struct timer_base *__get_target_base(struct timer_base *base,
-+ unsigned tflags)
- {
--#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
-+#ifdef CONFIG_SMP
- if ((tflags & TIMER_PINNED) || !base->migration_enabled)
- return get_timer_this_cpu_base(tflags);
- return get_timer_cpu_base(tflags, get_nohz_timer_target());
-@@ -871,6 +887,43 @@ static inline struct timer_base *get_tar
- #endif
- }
-
-+static inline void forward_timer_base(struct timer_base *base)
-+{
-+ /*
-+ * We only forward the base when it's idle and we have a delta between
-+ * base clock and jiffies.
-+ */
-+ if (!base->is_idle || (long) (jiffies - base->clk) < 2)
-+ return;
-+
-+ /*
-+ * If the next expiry value is > jiffies, then we fast forward to
-+ * jiffies otherwise we forward to the next expiry value.
-+ */
-+ if (time_after(base->next_expiry, jiffies))
-+ base->clk = jiffies;
-+ else
-+ base->clk = base->next_expiry;
-+}
-+#else
-+static inline struct timer_base *__get_target_base(struct timer_base *base,
-+ unsigned tflags)
-+{
-+ return get_timer_this_cpu_base(tflags);
-+}
-+
-+static inline void forward_timer_base(struct timer_base *base) { }
-+#endif
-+
-+static inline struct timer_base *get_target_base(struct timer_base *base,
-+ unsigned tflags)
-+{
-+ struct timer_base *target = __get_target_base(base, tflags);
-+
-+ forward_timer_base(target);
-+ return target;
-+}
-+
- /*
- * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
- * that all timers which are tied to this base are locked, and the base itself
-@@ -1432,16 +1485,49 @@ u64 get_next_timer_interrupt(unsigned lo
-
- spin_lock(&base->lock);
- nextevt = __next_timer_interrupt(base);
-- spin_unlock(&base->lock);
-+ base->next_expiry = nextevt;
-+ /*
-+ * We have a fresh next event. Check whether we can forward the base.
-+ */
-+ if (time_after(nextevt, jiffies))
-+ base->clk = jiffies;
-+ else if (time_after(nextevt, base->clk))
-+ base->clk = nextevt;
-
-- if (time_before_eq(nextevt, basej))
-+ if (time_before_eq(nextevt, basej)) {
- expires = basem;
-- else
-+ base->is_idle = false;
-+ } else {
- expires = basem + (nextevt - basej) * TICK_NSEC;
-+ /*
-+ * If we expect to sleep more than a tick, mark the base idle.
-+ */
-+ if ((expires - basem) > TICK_NSEC)
-+ base->is_idle = true;
-+ }
-+ spin_unlock(&base->lock);
-
- return cmp_next_hrtimer_event(basem, expires);
- }
-
-+/**
-+ * timer_clear_idle - Clear the idle state of the timer base
-+ *
-+ * Called with interrupts disabled
-+ */
-+void timer_clear_idle(void)
-+{
-+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
-+
-+ /*
-+ * We do this unlocked. The worst outcome is a remote enqueue sending
-+ * a pointless IPI, but taking the lock would just make the window for
-+ * sending the IPI a few instructions smaller for the cost of taking
-+ * the lock in the exit from idle path.
-+ */
-+ base->is_idle = false;
-+}
-+
- static int collect_expired_timers(struct timer_base *base,
- struct hlist_head *heads)
- {
diff --git a/patches/timer-Give-a-few-structs-and-members-proper-names.patch b/patches/timer-Give-a-few-structs-and-members-proper-names.patch
deleted file mode 100644
index 23e201d24177fe..00000000000000
--- a/patches/timer-Give-a-few-structs-and-members-proper-names.patch
+++ /dev/null
@@ -1,421 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:28 +0000
-Subject: [PATCH 12/22] timer: Give a few structs and members proper names
-
-Some of the names are not longer correct and others are simply too long to
-type. Clean it up before we switch the wheel implementation over to the new
-scheme.
-
-No functional change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 118 ++++++++++++++++++++++++++--------------------------
- 1 file changed, 59 insertions(+), 59 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -77,10 +77,10 @@ struct tvec_root {
- struct hlist_head vec[TVR_SIZE];
- };
-
--struct tvec_base {
-+struct timer_base {
- spinlock_t lock;
- struct timer_list *running_timer;
-- unsigned long timer_jiffies;
-+ unsigned long clk;
- unsigned long next_timer;
- unsigned long active_timers;
- unsigned long all_timers;
-@@ -95,7 +95,7 @@ struct tvec_base {
- } ____cacheline_aligned;
-
-
--static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
-+static DEFINE_PER_CPU(struct timer_base, timer_bases);
-
- #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
- unsigned int sysctl_timer_migration = 1;
-@@ -106,15 +106,15 @@ void timers_update_migration(bool update
- unsigned int cpu;
-
- /* Avoid the loop, if nothing to update */
-- if (this_cpu_read(tvec_bases.migration_enabled) == on)
-+ if (this_cpu_read(timer_bases.migration_enabled) == on)
- return;
-
- for_each_possible_cpu(cpu) {
-- per_cpu(tvec_bases.migration_enabled, cpu) = on;
-+ per_cpu(timer_bases.migration_enabled, cpu) = on;
- per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
- if (!update_nohz)
- continue;
-- per_cpu(tvec_bases.nohz_active, cpu) = true;
-+ per_cpu(timer_bases.nohz_active, cpu) = true;
- per_cpu(hrtimer_bases.nohz_active, cpu) = true;
- }
- }
-@@ -134,18 +134,18 @@ int timer_migration_handler(struct ctl_t
- return ret;
- }
-
--static inline struct tvec_base *get_target_base(struct tvec_base *base,
-+static inline struct timer_base *get_target_base(struct timer_base *base,
- int pinned)
- {
- if (pinned || !base->migration_enabled)
-- return this_cpu_ptr(&tvec_bases);
-- return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
-+ return this_cpu_ptr(&timer_bases);
-+ return per_cpu_ptr(&timer_bases, get_nohz_timer_target());
- }
- #else
--static inline struct tvec_base *get_target_base(struct tvec_base *base,
-+static inline struct timer_base *get_target_base(struct timer_base *base,
- int pinned)
- {
-- return this_cpu_ptr(&tvec_bases);
-+ return this_cpu_ptr(&timer_bases);
- }
- #endif
-
-@@ -371,10 +371,10 @@ void set_timer_slack(struct timer_list *
- EXPORT_SYMBOL_GPL(set_timer_slack);
-
- static void
--__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
-+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
- {
- unsigned long expires = timer->expires;
-- unsigned long idx = expires - base->timer_jiffies;
-+ unsigned long idx = expires - base->clk;
- struct hlist_head *vec;
-
- if (idx < TVR_SIZE) {
-@@ -394,7 +394,7 @@ static void
- * Can happen if you add a timer with expires == jiffies,
- * or you set a timer to go off in the past
- */
-- vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
-+ vec = base->tv1.vec + (base->clk & TVR_MASK);
- } else {
- int i;
- /* If the timeout is larger than MAX_TVAL (on 64-bit
-@@ -403,7 +403,7 @@ static void
- */
- if (idx > MAX_TVAL) {
- idx = MAX_TVAL;
-- expires = idx + base->timer_jiffies;
-+ expires = idx + base->clk;
- }
- i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
- vec = base->tv5.vec + i;
-@@ -412,11 +412,11 @@ static void
- hlist_add_head(&timer->entry, vec);
- }
-
--static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
-+static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
- {
- /* Advance base->jiffies, if the base is empty */
- if (!base->all_timers++)
-- base->timer_jiffies = jiffies;
-+ base->clk = jiffies;
-
- __internal_add_timer(base, timer);
- /*
-@@ -722,7 +722,7 @@ static inline void detach_timer(struct t
- }
-
- static inline void
--detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
-+detach_expired_timer(struct timer_list *timer, struct timer_base *base)
- {
- detach_timer(timer, true);
- if (!(timer->flags & TIMER_DEFERRABLE))
-@@ -730,7 +730,7 @@ detach_expired_timer(struct timer_list *
- base->all_timers--;
- }
-
--static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
-+static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
- bool clear_pending)
- {
- if (!timer_pending(timer))
-@@ -740,16 +740,16 @@ static int detach_if_pending(struct time
- if (!(timer->flags & TIMER_DEFERRABLE)) {
- base->active_timers--;
- if (timer->expires == base->next_timer)
-- base->next_timer = base->timer_jiffies;
-+ base->next_timer = base->clk;
- }
- /* If this was the last timer, advance base->jiffies */
- if (!--base->all_timers)
-- base->timer_jiffies = jiffies;
-+ base->clk = jiffies;
- return 1;
- }
-
- /*
-- * We are using hashed locking: holding per_cpu(tvec_bases).lock
-+ * We are using hashed locking: holding per_cpu(timer_bases).lock
- * means that all timers which are tied to this base via timer->base are
- * locked, and the base itself is locked too.
- *
-@@ -759,16 +759,16 @@ static int detach_if_pending(struct time
- * When the timer's base is locked and removed from the list, the
- * TIMER_MIGRATING flag is set, FIXME
- */
--static struct tvec_base *lock_timer_base(struct timer_list *timer,
-+static struct timer_base *lock_timer_base(struct timer_list *timer,
- unsigned long *flags)
- __acquires(timer->base->lock)
- {
- for (;;) {
- u32 tf = timer->flags;
-- struct tvec_base *base;
-+ struct timer_base *base;
-
- if (!(tf & TIMER_MIGRATING)) {
-- base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
-+ base = per_cpu_ptr(&timer_bases, tf & TIMER_CPUMASK);
- spin_lock_irqsave(&base->lock, *flags);
- if (timer->flags == tf)
- return base;
-@@ -781,7 +781,7 @@ static struct tvec_base *lock_timer_base
- static inline int
- __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
- {
-- struct tvec_base *base, *new_base;
-+ struct timer_base *base, *new_base;
- unsigned long flags;
- int ret = 0;
-
-@@ -948,8 +948,8 @@ EXPORT_SYMBOL(add_timer);
- */
- void add_timer_on(struct timer_list *timer, int cpu)
- {
-- struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
-- struct tvec_base *base;
-+ struct timer_base *new_base = per_cpu_ptr(&timer_bases, cpu);
-+ struct timer_base *base;
- unsigned long flags;
-
- timer_stats_timer_set_start_info(timer);
-@@ -990,7 +990,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
- */
- int del_timer(struct timer_list *timer)
- {
-- struct tvec_base *base;
-+ struct timer_base *base;
- unsigned long flags;
- int ret = 0;
-
-@@ -1016,7 +1016,7 @@ EXPORT_SYMBOL(del_timer);
- */
- int try_to_del_timer_sync(struct timer_list *timer)
- {
-- struct tvec_base *base;
-+ struct timer_base *base;
- unsigned long flags;
- int ret = -1;
-
-@@ -1100,7 +1100,7 @@ int del_timer_sync(struct timer_list *ti
- EXPORT_SYMBOL(del_timer_sync);
- #endif
-
--static int cascade(struct tvec_base *base, struct tvec *tv, int index)
-+static int cascade(struct timer_base *base, struct tvec *tv, int index)
- {
- /* cascade all the timers from tv up one level */
- struct timer_list *timer;
-@@ -1164,7 +1164,7 @@ static void call_timer_fn(struct timer_l
- }
- }
-
--#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
-+#define INDEX(N) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
-
- /**
- * __run_timers - run all expired timers (if any) on this CPU.
-@@ -1173,23 +1173,23 @@ static void call_timer_fn(struct timer_l
- * This function cascades all vectors and executes all expired timer
- * vectors.
- */
--static inline void __run_timers(struct tvec_base *base)
-+static inline void __run_timers(struct timer_base *base)
- {
- struct timer_list *timer;
-
- spin_lock_irq(&base->lock);
-
-- while (time_after_eq(jiffies, base->timer_jiffies)) {
-+ while (time_after_eq(jiffies, base->clk)) {
- struct hlist_head work_list;
- struct hlist_head *head = &work_list;
- int index;
-
- if (!base->all_timers) {
-- base->timer_jiffies = jiffies;
-+ base->clk = jiffies;
- break;
- }
-
-- index = base->timer_jiffies & TVR_MASK;
-+ index = base->clk & TVR_MASK;
-
- /*
- * Cascade timers:
-@@ -1199,7 +1199,7 @@ static inline void __run_timers(struct t
- (!cascade(base, &base->tv3, INDEX(1))) &&
- !cascade(base, &base->tv4, INDEX(2)))
- cascade(base, &base->tv5, INDEX(3));
-- ++base->timer_jiffies;
-+ ++base->clk;
- hlist_move_list(base->tv1.vec + index, head);
- while (!hlist_empty(head)) {
- void (*fn)(unsigned long);
-@@ -1237,16 +1237,16 @@ static inline void __run_timers(struct t
- * is used on S/390 to stop all activity when a CPU is idle.
- * This function needs to be called with interrupts disabled.
- */
--static unsigned long __next_timer_interrupt(struct tvec_base *base)
-+static unsigned long __next_timer_interrupt(struct timer_base *base)
- {
-- unsigned long timer_jiffies = base->timer_jiffies;
-- unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
-+ unsigned long clk = base->clk;
-+ unsigned long expires = clk + NEXT_TIMER_MAX_DELTA;
- int index, slot, array, found = 0;
- struct timer_list *nte;
- struct tvec *varray[4];
-
- /* Look for timer events in tv1. */
-- index = slot = timer_jiffies & TVR_MASK;
-+ index = slot = clk & TVR_MASK;
- do {
- hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
- if (nte->flags & TIMER_DEFERRABLE)
-@@ -1265,8 +1265,8 @@ static unsigned long __next_timer_interr
- cascade:
- /* Calculate the next cascade event */
- if (index)
-- timer_jiffies += TVR_SIZE - index;
-- timer_jiffies >>= TVR_BITS;
-+ clk += TVR_SIZE - index;
-+ clk >>= TVR_BITS;
-
- /* Check tv2-tv5. */
- varray[0] = &base->tv2;
-@@ -1277,7 +1277,7 @@ static unsigned long __next_timer_interr
- for (array = 0; array < 4; array++) {
- struct tvec *varp = varray[array];
-
-- index = slot = timer_jiffies & TVN_MASK;
-+ index = slot = clk & TVN_MASK;
- do {
- hlist_for_each_entry(nte, varp->vec + slot, entry) {
- if (nte->flags & TIMER_DEFERRABLE)
-@@ -1301,8 +1301,8 @@ static unsigned long __next_timer_interr
- } while (slot != index);
-
- if (index)
-- timer_jiffies += TVN_SIZE - index;
-- timer_jiffies >>= TVN_BITS;
-+ clk += TVN_SIZE - index;
-+ clk >>= TVN_BITS;
- }
- return expires;
- }
-@@ -1350,7 +1350,7 @@ static u64 cmp_next_hrtimer_event(u64 ba
- */
- u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
- {
-- struct tvec_base *base = this_cpu_ptr(&tvec_bases);
-+ struct timer_base *base = this_cpu_ptr(&timer_bases);
- u64 expires = KTIME_MAX;
- unsigned long nextevt;
-
-@@ -1363,7 +1363,7 @@ u64 get_next_timer_interrupt(unsigned lo
-
- spin_lock(&base->lock);
- if (base->active_timers) {
-- if (time_before_eq(base->next_timer, base->timer_jiffies))
-+ if (time_before_eq(base->next_timer, base->clk))
- base->next_timer = __next_timer_interrupt(base);
- nextevt = base->next_timer;
- if (time_before_eq(nextevt, basej))
-@@ -1402,9 +1402,9 @@ void update_process_times(int user_tick)
- */
- static void run_timer_softirq(struct softirq_action *h)
- {
-- struct tvec_base *base = this_cpu_ptr(&tvec_bases);
-+ struct timer_base *base = this_cpu_ptr(&timer_bases);
-
-- if (time_after_eq(jiffies, base->timer_jiffies))
-+ if (time_after_eq(jiffies, base->clk))
- __run_timers(base);
- }
-
-@@ -1549,7 +1549,7 @@ signed long __sched schedule_timeout_idl
- EXPORT_SYMBOL(schedule_timeout_idle);
-
- #ifdef CONFIG_HOTPLUG_CPU
--static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
-+static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
- {
- struct timer_list *timer;
- int cpu = new_base->cpu;
-@@ -1565,13 +1565,13 @@ static void migrate_timer_list(struct tv
-
- static void migrate_timers(int cpu)
- {
-- struct tvec_base *old_base;
-- struct tvec_base *new_base;
-+ struct timer_base *old_base;
-+ struct timer_base *new_base;
- int i;
-
- BUG_ON(cpu_online(cpu));
-- old_base = per_cpu_ptr(&tvec_bases, cpu);
-- new_base = get_cpu_ptr(&tvec_bases);
-+ old_base = per_cpu_ptr(&timer_bases, cpu);
-+ new_base = get_cpu_ptr(&timer_bases);
- /*
- * The caller is globally serialized and nobody else
- * takes two locks at once, deadlock is not possible.
-@@ -1595,7 +1595,7 @@ static void migrate_timers(int cpu)
-
- spin_unlock(&old_base->lock);
- spin_unlock_irq(&new_base->lock);
-- put_cpu_ptr(&tvec_bases);
-+ put_cpu_ptr(&timer_bases);
- }
-
- static int timer_cpu_notify(struct notifier_block *self,
-@@ -1623,13 +1623,13 @@ static inline void timer_register_cpu_no
-
- static void __init init_timer_cpu(int cpu)
- {
-- struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
-+ struct timer_base *base = per_cpu_ptr(&timer_bases, cpu);
-
- base->cpu = cpu;
- spin_lock_init(&base->lock);
-
-- base->timer_jiffies = jiffies;
-- base->next_timer = base->timer_jiffies;
-+ base->clk = jiffies;
-+ base->next_timer = base->clk;
- }
-
- static void __init init_timer_cpus(void)
diff --git a/patches/timer-Make-pinned-a-timer-property.patch b/patches/timer-Make-pinned-a-timer-property.patch
deleted file mode 100644
index 53e82de75aa237..00000000000000
--- a/patches/timer-Make-pinned-a-timer-property.patch
+++ /dev/null
@@ -1,144 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:15 +0000
-Subject: [PATCH 01/22] timer: Make pinned a timer property
-
-We want to move the timer migration from a push to a pull model. This requires
-to store the pinned attribute of a timer in the timer itself. This must happen
-at initialization time.
-
-Add the helper macros for this.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/timer.h | 25 ++++++++++++++++++++++---
- kernel/time/timer.c | 10 +++++-----
- 2 files changed, 27 insertions(+), 8 deletions(-)
-
---- a/include/linux/timer.h
-+++ b/include/linux/timer.h
-@@ -62,7 +62,8 @@ struct timer_list {
- #define TIMER_MIGRATING 0x00080000
- #define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
- #define TIMER_DEFERRABLE 0x00100000
--#define TIMER_IRQSAFE 0x00200000
-+#define TIMER_PINNED 0x00200000
-+#define TIMER_IRQSAFE 0x00400000
-
- #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
- .entry = { .next = TIMER_ENTRY_STATIC }, \
-@@ -78,9 +79,15 @@ struct timer_list {
- #define TIMER_INITIALIZER(_function, _expires, _data) \
- __TIMER_INITIALIZER((_function), (_expires), (_data), 0)
-
-+#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \
-+ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED)
-+
- #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \
- __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
-
-+#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \
-+ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED)
-+
- #define DEFINE_TIMER(_name, _function, _expires, _data) \
- struct timer_list _name = \
- TIMER_INITIALIZER(_function, _expires, _data)
-@@ -124,8 +131,12 @@ static inline void init_timer_on_stack_k
-
- #define init_timer(timer) \
- __init_timer((timer), 0)
-+#define init_timer_pinned(timer) \
-+ __init_timer((timer), TIMER_PINNED)
- #define init_timer_deferrable(timer) \
- __init_timer((timer), TIMER_DEFERRABLE)
-+#define init_timer_pinned_deferrable(timer) \
-+ __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED)
- #define init_timer_on_stack(timer) \
- __init_timer_on_stack((timer), 0)
-
-@@ -145,12 +156,20 @@ static inline void init_timer_on_stack_k
-
- #define setup_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), 0)
-+#define setup_pinned_timer(timer, fn, data) \
-+ __setup_timer((timer), (fn), (data), TIMER_PINNED)
- #define setup_deferrable_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
-+#define setup_pinned_deferrable_timer(timer, fn, data) \
-+ __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
- #define setup_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), 0)
-+#define setup_pinned_timer_on_stack(timer, fn, data) \
-+ __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
- #define setup_deferrable_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
-+#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
-+ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
-
- /**
- * timer_pending - is a timer pending?
-@@ -175,8 +194,8 @@ extern int mod_timer_pinned(struct timer
-
- extern void set_timer_slack(struct timer_list *time, int slack_hz);
-
--#define TIMER_NOT_PINNED 0
--#define TIMER_PINNED 1
-+#define MOD_TIMER_NOT_PINNED 0
-+#define MOD_TIMER_PINNED 1
- /*
- * The jiffies value which is added to now, when there is no timer
- * in the timer wheel:
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -797,7 +797,7 @@ static inline int
-
- debug_activate(timer, expires);
-
-- new_base = get_target_base(base, pinned);
-+ new_base = get_target_base(base, pinned || timer->flags & TIMER_PINNED);
-
- if (base != new_base) {
- /*
-@@ -840,7 +840,7 @@ static inline int
- */
- int mod_timer_pending(struct timer_list *timer, unsigned long expires)
- {
-- return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
-+ return __mod_timer(timer, expires, true, MOD_TIMER_NOT_PINNED);
- }
- EXPORT_SYMBOL(mod_timer_pending);
-
-@@ -915,7 +915,7 @@ int mod_timer(struct timer_list *timer,
- if (timer_pending(timer) && timer->expires == expires)
- return 1;
-
-- return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
-+ return __mod_timer(timer, expires, false, MOD_TIMER_NOT_PINNED);
- }
- EXPORT_SYMBOL(mod_timer);
-
-@@ -943,7 +943,7 @@ int mod_timer_pinned(struct timer_list *
- if (timer->expires == expires && timer_pending(timer))
- return 1;
-
-- return __mod_timer(timer, expires, false, TIMER_PINNED);
-+ return __mod_timer(timer, expires, false, MOD_TIMER_PINNED);
- }
- EXPORT_SYMBOL(mod_timer_pinned);
-
-@@ -1527,7 +1527,7 @@ signed long __sched schedule_timeout(sig
- expire = timeout + jiffies;
-
- setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-- __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
-+ __mod_timer(&timer, expire, false, MOD_TIMER_NOT_PINNED);
- schedule();
- del_singleshot_timer_sync(&timer);
-
diff --git a/patches/timer-Move-__run_timers-function.patch b/patches/timer-Move-__run_timers-function.patch
deleted file mode 100644
index 2ceee9302bb7e0..00000000000000
--- a/patches/timer-Move-__run_timers-function.patch
+++ /dev/null
@@ -1,91 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:33 +0000
-Subject: [PATCH 16/22] timer: Move __run_timers() function
-
-Move __run_timers() below __next_timer_interrupt() and next_pending_bucket()
-in preparation for __run_timers() NOHZ optimization.
-
-No functional change.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 52 ++++++++++++++++++++++++++--------------------------
- 1 file changed, 26 insertions(+), 26 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1292,32 +1292,6 @@ static int collect_expired_timers(struct
- return levels;
- }
-
--/**
-- * __run_timers - run all expired timers (if any) on this CPU.
-- * @base: the timer vector to be processed.
-- */
--static inline void __run_timers(struct timer_base *base)
--{
-- struct hlist_head heads[LVL_DEPTH];
-- int levels;
--
-- if (!time_after_eq(jiffies, base->clk))
-- return;
--
-- spin_lock_irq(&base->lock);
--
-- while (time_after_eq(jiffies, base->clk)) {
--
-- levels = collect_expired_timers(base, heads);
-- base->clk++;
--
-- while (levels--)
-- expire_timers(base, heads + levels);
-- }
-- base->running_timer = NULL;
-- spin_unlock_irq(&base->lock);
--}
--
- #ifdef CONFIG_NO_HZ_COMMON
- /*
- * Find the next pending bucket of a level. Search from @offset + @clk upwards
-@@ -1487,6 +1461,32 @@ void update_process_times(int user_tick)
- run_posix_cpu_timers(p);
- }
-
-+/**
-+ * __run_timers - run all expired timers (if any) on this CPU.
-+ * @base: the timer vector to be processed.
-+ */
-+static inline void __run_timers(struct timer_base *base)
-+{
-+ struct hlist_head heads[LVL_DEPTH];
-+ int levels;
-+
-+ if (!time_after_eq(jiffies, base->clk))
-+ return;
-+
-+ spin_lock_irq(&base->lock);
-+
-+ while (time_after_eq(jiffies, base->clk)) {
-+
-+ levels = collect_expired_timers(base, heads);
-+ base->clk++;
-+
-+ while (levels--)
-+ expire_timers(base, heads + levels);
-+ }
-+ base->running_timer = NULL;
-+ spin_unlock_irq(&base->lock);
-+}
-+
- /*
- * This function runs timers and the timer-tq in bottom half context.
- */
diff --git a/patches/timer-Only-wake-softirq-if-necessary.patch b/patches/timer-Only-wake-softirq-if-necessary.patch
deleted file mode 100644
index 22f297d82d68d3..00000000000000
--- a/patches/timer-Only-wake-softirq-if-necessary.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:37 +0000
-Subject: [PATCH 20/22] timer: Only wake softirq if necessary
-
-With the wheel forwading in place and with the HZ=1000 4ms folding we can
-avoid running the softirq at all.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 11 +++++++++++
- 1 file changed, 11 insertions(+)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1623,7 +1623,18 @@ static void run_timer_softirq(struct sof
- */
- void run_local_timers(void)
- {
-+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
-+
- hrtimer_run_queues();
-+ /* Raise the softirq only if required. */
-+ if (time_before(jiffies, base->clk)) {
-+ if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
-+ return;
-+ /* CPU is awake, so check the deferrable base. */
-+ base++;
-+ if (time_before(jiffies, base->clk))
-+ return;
-+ }
- raise_softirq(TIMER_SOFTIRQ);
- }
-
diff --git a/patches/timer-Optimization-for-same-expiry-time-in-mod_timer.patch b/patches/timer-Optimization-for-same-expiry-time-in-mod_timer.patch
deleted file mode 100644
index da3390e3b336a0..00000000000000
--- a/patches/timer-Optimization-for-same-expiry-time-in-mod_timer.patch
+++ /dev/null
@@ -1,130 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:40 +0000
-Subject: [PATCH 22/22] timer: Optimization for same expiry time in mod_timer()
-
-The existing optimization for same expiry time in mod_timer() checks whether
-the timer expiry time is the same as the new requested expiry time. In the old
-timer wheel implementation this does not take the slack batching into account,
-neither does the new implementation evaluate whether the new expiry time will
-requeue the timer to the same bucket.
-
-To optimize that, we can calculate the resulting bucket and check if the new
-expiry time is different from the current expiry time. This calculation
-happens outside the base lock held region. If the resulting bucket is the same
-we can avoid taking the base lock and requeueing the timer.
-
-If the timer needs to be requeued then we have to check under the base lock
-whether the base time has changed between the lockless calculation and taking
-the lock. If it has changed we need to recalculate under the lock.
-
-This optimization takes effect for timers which are enqueued into the less
-granular wheel levels (1 and above). With a simple test case the functionality
-has been verified:
-
- Before After
-Match: 5.5% 86.6%
-Requeue: 94.5% 13.4%
-Recalc: <0.01%
-
-In the non optimized case the timer is requeued in 94.5% of the cases. With
-the index optimization in place the requeue rate drops to 13.4%. The case
-where the lockless index calculation has to be redone is less than 0.01%.
-
-With a real world test case (networking) we observed the following changes:
-
- Before After
-Match: 97.8% 99.7%
-Requeue: 2.2% 0.3%
-Recalc: <0.001%
-
-That means two percent less lock/requeue/unlock operations in one of the hot
-path use cases of timers.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 51 +++++++++++++++++++++++++++++++++++----------------
- 1 file changed, 35 insertions(+), 16 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -975,28 +975,36 @@ static inline int
- __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
- {
- struct timer_base *base, *new_base;
-- unsigned long flags;
-+ unsigned int idx = UINT_MAX;
-+ unsigned long clk = 0, flags;
- int ret = 0;
-
- /*
-- * TODO: Calculate the array bucket of the timer right here w/o
-- * holding the base lock. This allows to check not only
-- * timer->expires == expires below, but also whether the timer
-- * ends up in the same bucket. If we really need to requeue
-- * the timer then we check whether base->clk have
-- * advanced between here and locking the timer base. If
-- * jiffies advanced we have to recalc the array bucket with the
-- * lock held.
-- */
--
-- /*
-- * This is a common optimization triggered by the
-- * networking code - if the timer is re-modified
-- * to be the same thing then just return:
-+ * This is a common optimization triggered by the networking code - if
-+ * the timer is re-modified to be the same thing or ends up in the
-+ * same array bucket then just return:
- */
- if (timer_pending(timer)) {
- if (timer->expires == expires)
- return 1;
-+ /*
-+ * Take the current timer_jiffies of base, but without holding
-+ * the lock!
-+ */
-+ base = get_timer_base(timer->flags);
-+ clk = base->clk;
-+
-+ idx = calc_wheel_index(expires, clk);
-+
-+ /*
-+ * Retrieve and compare the array index of the pending
-+ * timer. If it matches set the expiry to the new value so a
-+ * subsequent call will exit in the expires check above.
-+ */
-+ if (idx == timer_get_idx(timer)) {
-+ timer->expires = expires;
-+ return 1;
-+ }
- }
-
- timer_stats_timer_set_start_info(timer);
-@@ -1033,7 +1041,18 @@ static inline int
- }
-
- timer->expires = expires;
-- internal_add_timer(base, timer);
-+ /*
-+ * If idx was calculated above and the base time did not advance
-+ * between calculating idx and taking the lock, only enqueue_timer()
-+ * and trigger_dyntick_cpu() is required. Otherwise we need to
-+ * (re)calculate the wheel index via internal_add_timer().
-+ */
-+ if (idx != UINT_MAX && clk == base->clk) {
-+ enqueue_timer(base, timer, idx);
-+ trigger_dyntick_cpu(base, timer);
-+ } else {
-+ internal_add_timer(base, timer);
-+ }
-
- out_unlock:
- spin_unlock_irqrestore(&base->lock, flags);
diff --git a/patches/timer-Optimize-collect-timers-for-NOHZ.patch b/patches/timer-Optimize-collect-timers-for-NOHZ.patch
deleted file mode 100644
index 9f1cdd55c6b5de..00000000000000
--- a/patches/timer-Optimize-collect-timers-for-NOHZ.patch
+++ /dev/null
@@ -1,128 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:34 +0000
-Subject: [PATCH 17/22] timer: Optimize collect timers for NOHZ
-
-After a NOHZ idle sleep the wheel must be forwarded to current jiffies. There
-might be expired timers so the current code loops and checks the epxired
-buckets for timers. This can take quite some time for long NOHZ idle periods.
-
-The pending bitmask in the timer base allows us to do a quick search for the
-next expiring timer and therefor a fast forward of the base time which
-prevents pointless long lasting loops.
-
-For a 3 second idle sleep this reduces the catchup time from ~1ms to 5us.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 49 +++++++++++++++++++++++++++++++++++++++++--------
- 1 file changed, 41 insertions(+), 8 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1267,8 +1267,8 @@ static void expire_timers(struct timer_b
- }
- }
-
--static int collect_expired_timers(struct timer_base *base,
-- struct hlist_head *heads)
-+static int __collect_expired_timers(struct timer_base *base,
-+ struct hlist_head *heads)
- {
- unsigned long clk = base->clk;
- struct hlist_head *vec;
-@@ -1294,9 +1294,9 @@ static int collect_expired_timers(struct
-
- #ifdef CONFIG_NO_HZ_COMMON
- /*
-- * Find the next pending bucket of a level. Search from @offset + @clk upwards
-- * and if nothing there, search from start of the level (@offset) up to
-- * @offset + clk.
-+ * Find the next pending bucket of a level. Search from level start (@offset)
-+ * + @clk upwards and if nothing there, search from start of the level
-+ * (@offset) up to @offset + clk.
- */
- static int next_pending_bucket(struct timer_base *base, unsigned offset,
- unsigned clk)
-@@ -1313,14 +1313,14 @@ static int next_pending_bucket(struct ti
- }
-
- /*
-- * Search the first expiring timer in the various clock levels.
-+ * Search the first expiring timer in the various clock levels. Caller must
-+ * hold base->lock.
- */
- static unsigned long __next_timer_interrupt(struct timer_base *base)
- {
- unsigned long clk, next, adj;
- unsigned lvl, offset = 0;
-
-- spin_lock(&base->lock);
- next = base->clk + NEXT_TIMER_MAX_DELTA;
- clk = base->clk;
- for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
-@@ -1373,7 +1373,6 @@ static unsigned long __next_timer_interr
- clk >>= LVL_CLK_SHIFT;
- clk += adj;
- }
-- spin_unlock(&base->lock);
- return next;
- }
-
-@@ -1431,7 +1430,10 @@ u64 get_next_timer_interrupt(unsigned lo
- if (cpu_is_offline(smp_processor_id()))
- return expires;
-
-+ spin_lock(&base->lock);
- nextevt = __next_timer_interrupt(base);
-+ spin_unlock(&base->lock);
-+
- if (time_before_eq(nextevt, basej))
- expires = basem;
- else
-@@ -1439,6 +1441,37 @@ u64 get_next_timer_interrupt(unsigned lo
-
- return cmp_next_hrtimer_event(basem, expires);
- }
-+
-+static int collect_expired_timers(struct timer_base *base,
-+ struct hlist_head *heads)
-+{
-+ /*
-+ * NOHZ optimization. After a long idle sleep we need to forward the
-+ * base to current jiffies. Avoid a loop by searching the bitfield for
-+ * the next expiring timer.
-+ */
-+ if ((long)(jiffies - base->clk) > 2) {
-+ unsigned long next = __next_timer_interrupt(base);
-+
-+ /*
-+ * If the next timer is ahead of time forward to current
-+ * jiffies, otherwise forward to the next expiry time.
-+ */
-+ if (time_after(next, jiffies)) {
-+ /* The call site will increment clock! */
-+ base->clk = jiffies - 1;
-+ return 0;
-+ }
-+ base->clk = next;
-+ }
-+ return __collect_expired_timers(base, heads);
-+}
-+#else
-+static inline int collect_expired_timers(struct timer_base *base,
-+ struct hlist_head *heads)
-+{
-+ return __collect_expired_timers(base, heads);
-+}
- #endif
-
- /*
diff --git a/patches/timer-Reduce-the-CPU-index-space-to-256k.patch b/patches/timer-Reduce-the-CPU-index-space-to-256k.patch
deleted file mode 100644
index e153377d4b95c5..00000000000000
--- a/patches/timer-Reduce-the-CPU-index-space-to-256k.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:29 +0000
-Subject: [PATCH 13/22] timer: Reduce the CPU index space to 256k
-
-We want to store the array index in the flags space. 256k CPUs should be
-enough for a while.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/timer.h | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
---- a/include/linux/timer.h
-+++ b/include/linux/timer.h
-@@ -58,12 +58,12 @@ struct timer_list {
- * workqueue locking issues. It's not meant for executing random crap
- * with interrupts disabled. Abuse is monitored!
- */
--#define TIMER_CPUMASK 0x0007FFFF
--#define TIMER_MIGRATING 0x00080000
-+#define TIMER_CPUMASK 0x0003FFFF
-+#define TIMER_MIGRATING 0x00040000
- #define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
--#define TIMER_DEFERRABLE 0x00100000
--#define TIMER_PINNED 0x00200000
--#define TIMER_IRQSAFE 0x00400000
-+#define TIMER_DEFERRABLE 0x00080000
-+#define TIMER_PINNED 0x00100000
-+#define TIMER_IRQSAFE 0x00200000
-
- #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
- .entry = { .next = TIMER_ENTRY_STATIC }, \
diff --git a/patches/timer-Remove-mod_timer_pinned.patch b/patches/timer-Remove-mod_timer_pinned.patch
deleted file mode 100644
index d61b24e8c027c7..00000000000000
--- a/patches/timer-Remove-mod_timer_pinned.patch
+++ /dev/null
@@ -1,116 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:24 +0000
-Subject: [PATCH 09/22] timer: Remove mod_timer_pinned
-
-We switched all users to initialize the timers as pinned and call
-mod_timer(). Remove the now unused function.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/timer.h | 3 ---
- kernel/time/timer.c | 39 +++++----------------------------------
- 2 files changed, 5 insertions(+), 37 deletions(-)
-
---- a/include/linux/timer.h
-+++ b/include/linux/timer.h
-@@ -190,12 +190,9 @@ extern void add_timer_on(struct timer_li
- extern int del_timer(struct timer_list * timer);
- extern int mod_timer(struct timer_list *timer, unsigned long expires);
- extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
--extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
-
- extern void set_timer_slack(struct timer_list *time, int slack_hz);
-
--#define MOD_TIMER_NOT_PINNED 0
--#define MOD_TIMER_PINNED 1
- /*
- * The jiffies value which is added to now, when there is no timer
- * in the timer wheel:
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -779,8 +779,7 @@ static struct tvec_base *lock_timer_base
- }
-
- static inline int
--__mod_timer(struct timer_list *timer, unsigned long expires,
-- bool pending_only, int pinned)
-+__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
- {
- struct tvec_base *base, *new_base;
- unsigned long flags;
-@@ -797,7 +796,7 @@ static inline int
-
- debug_activate(timer, expires);
-
-- new_base = get_target_base(base, pinned || timer->flags & TIMER_PINNED);
-+ new_base = get_target_base(base, timer->flags & TIMER_PINNED);
-
- if (base != new_base) {
- /*
-@@ -840,7 +839,7 @@ static inline int
- */
- int mod_timer_pending(struct timer_list *timer, unsigned long expires)
- {
-- return __mod_timer(timer, expires, true, MOD_TIMER_NOT_PINNED);
-+ return __mod_timer(timer, expires, true);
- }
- EXPORT_SYMBOL(mod_timer_pending);
-
-@@ -915,39 +914,11 @@ int mod_timer(struct timer_list *timer,
- if (timer_pending(timer) && timer->expires == expires)
- return 1;
-
-- return __mod_timer(timer, expires, false, MOD_TIMER_NOT_PINNED);
-+ return __mod_timer(timer, expires, false);
- }
- EXPORT_SYMBOL(mod_timer);
-
- /**
-- * mod_timer_pinned - modify a timer's timeout
-- * @timer: the timer to be modified
-- * @expires: new timeout in jiffies
-- *
-- * mod_timer_pinned() is a way to update the expire field of an
-- * active timer (if the timer is inactive it will be activated)
-- * and to ensure that the timer is scheduled on the current CPU.
-- *
-- * Note that this does not prevent the timer from being migrated
-- * when the current CPU goes offline. If this is a problem for
-- * you, use CPU-hotplug notifiers to handle it correctly, for
-- * example, cancelling the timer when the corresponding CPU goes
-- * offline.
-- *
-- * mod_timer_pinned(timer, expires) is equivalent to:
-- *
-- * del_timer(timer); timer->expires = expires; add_timer(timer);
-- */
--int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
--{
-- if (timer->expires == expires && timer_pending(timer))
-- return 1;
--
-- return __mod_timer(timer, expires, false, MOD_TIMER_PINNED);
--}
--EXPORT_SYMBOL(mod_timer_pinned);
--
--/**
- * add_timer - start a timer
- * @timer: the timer to be added
- *
-@@ -1527,7 +1498,7 @@ signed long __sched schedule_timeout(sig
- expire = timeout + jiffies;
-
- setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-- __mod_timer(&timer, expire, false, MOD_TIMER_NOT_PINNED);
-+ __mod_timer(&timer, expire, false);
- schedule();
- del_singleshot_timer_sync(&timer);
-
diff --git a/patches/timer-Remove-slack-leftovers.patch b/patches/timer-Remove-slack-leftovers.patch
deleted file mode 100644
index 24787332b7a2a3..00000000000000
--- a/patches/timer-Remove-slack-leftovers.patch
+++ /dev/null
@@ -1,161 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:31 +0000
-Subject: [PATCH 15/22] timer: Remove slack leftovers
-
-We now have implicit batching in the timer wheel. The slack is not longer
-used. Remove it.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- block/genhd.c | 5 -----
- drivers/mmc/host/jz4740_mmc.c | 2 --
- drivers/power/bq27xxx_battery.c | 5 +----
- drivers/usb/host/ohci-hcd.c | 1 -
- drivers/usb/host/xhci.c | 2 --
- include/linux/timer.h | 4 ----
- kernel/time/timer.c | 19 -------------------
- lib/random32.c | 1 -
- 8 files changed, 1 insertion(+), 38 deletions(-)
-
---- a/block/genhd.c
-+++ b/block/genhd.c
-@@ -1524,12 +1524,7 @@ static void __disk_unblock_events(struct
- if (--ev->block)
- goto out_unlock;
-
-- /*
-- * Not exactly a latency critical operation, set poll timer
-- * slack to 25% and kick event check.
-- */
- intv = disk_events_poll_jiffies(disk);
-- set_timer_slack(&ev->dwork.timer, intv / 4);
- if (check_now)
- queue_delayed_work(system_freezable_power_efficient_wq,
- &ev->dwork, 0);
---- a/drivers/mmc/host/jz4740_mmc.c
-+++ b/drivers/mmc/host/jz4740_mmc.c
-@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platf
- jz4740_mmc_clock_disable(host);
- setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
- (unsigned long)host);
-- /* It is not important when it times out, it just needs to timeout. */
-- set_timer_slack(&host->timeout_timer, HZ);
-
- host->use_dma = true;
- if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
---- a/drivers/power/bq27xxx_battery.c
-+++ b/drivers/power/bq27xxx_battery.c
-@@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct
-
- bq27xxx_battery_update(di);
-
-- if (poll_interval > 0) {
-- /* The timer does not have to be accurate. */
-- set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
-+ if (poll_interval > 0)
- schedule_delayed_work(&di->work, poll_interval * HZ);
-- }
- }
-
- /*
---- a/drivers/usb/host/ohci-hcd.c
-+++ b/drivers/usb/host/ohci-hcd.c
-@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *o
-
- setup_timer(&ohci->io_watchdog, io_watchdog_func,
- (unsigned long) ohci);
-- set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
-
- ohci->hcca = dma_alloc_coherent (hcd->self.controller,
- sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
---- a/drivers/usb/host/xhci.c
-+++ b/drivers/usb/host/xhci.c
-@@ -490,8 +490,6 @@ static void compliance_mode_recovery_tim
- xhci->comp_mode_recovery_timer.expires = jiffies +
- msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
-
-- set_timer_slack(&xhci->comp_mode_recovery_timer,
-- msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
- add_timer(&xhci->comp_mode_recovery_timer);
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Compliance mode recovery timer initialized");
---- a/include/linux/timer.h
-+++ b/include/linux/timer.h
-@@ -19,7 +19,6 @@ struct timer_list {
- void (*function)(unsigned long);
- unsigned long data;
- u32 flags;
-- int slack;
-
- #ifdef CONFIG_TIMER_STATS
- int start_pid;
-@@ -73,7 +72,6 @@ struct timer_list {
- .expires = (_expires), \
- .data = (_data), \
- .flags = (_flags), \
-- .slack = -1, \
- __TIMER_LOCKDEP_MAP_INITIALIZER( \
- __FILE__ ":" __stringify(__LINE__)) \
- }
-@@ -193,8 +191,6 @@ extern int del_timer(struct timer_list *
- extern int mod_timer(struct timer_list *timer, unsigned long expires);
- extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
-
--extern void set_timer_slack(struct timer_list *time, int slack_hz);
--
- /*
- * The jiffies value which is added to now, when there is no timer
- * in the timer wheel:
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -447,24 +447,6 @@ unsigned long round_jiffies_up_relative(
- }
- EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
-
--/**
-- * set_timer_slack - set the allowed slack for a timer
-- * @timer: the timer to be modified
-- * @slack_hz: the amount of time (in jiffies) allowed for rounding
-- *
-- * Set the amount of time, in jiffies, that a certain timer has
-- * in terms of slack. By setting this value, the timer subsystem
-- * will schedule the actual timer somewhere between
-- * the time mod_timer() asks for, and that time plus the slack.
-- *
-- * By setting the slack to -1, a percentage of the delay is used
-- * instead.
-- */
--void set_timer_slack(struct timer_list *timer, int slack_hz)
--{
-- timer->slack = slack_hz;
--}
--EXPORT_SYMBOL_GPL(set_timer_slack);
-
- static inline unsigned int timer_get_idx(struct timer_list *timer)
- {
-@@ -790,7 +772,6 @@ static void do_init_timer(struct timer_l
- {
- timer->entry.pprev = NULL;
- timer->flags = flags | raw_smp_processor_id();
-- timer->slack = -1;
- #ifdef CONFIG_TIMER_STATS
- timer->start_site = NULL;
- timer->start_pid = -1;
---- a/lib/random32.c
-+++ b/lib/random32.c
-@@ -233,7 +233,6 @@ static void __prandom_timer(unsigned lon
-
- static void __init __prandom_start_seed_timer(void)
- {
-- set_timer_slack(&seed_timer, HZ);
- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
- add_timer(&seed_timer);
- }
diff --git a/patches/timer-Split-out-index-calculation.patch b/patches/timer-Split-out-index-calculation.patch
deleted file mode 100644
index 6bf4ebe1982124..00000000000000
--- a/patches/timer-Split-out-index-calculation.patch
+++ /dev/null
@@ -1,105 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:39 +0000
-Subject: [PATCH 21/22] timer: Split out index calculation
-
-For further optimizations we need to seperate index calculation and
-queueing. No functional change.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 47 ++++++++++++++++++++++++++++++++---------------
- 1 file changed, 32 insertions(+), 15 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -471,12 +471,9 @@ static inline unsigned calc_index(unsign
- return LVL_OFFS(lvl) + (expires & LVL_MASK);
- }
-
--static void
--__internal_add_timer(struct timer_base *base, struct timer_list *timer)
-+static int calc_wheel_index(unsigned long expires, unsigned long clk)
- {
-- unsigned long expires = timer->expires;
-- unsigned long delta = expires - base->clk;
-- struct hlist_head *vec;
-+ unsigned long delta = expires - clk;
- unsigned int idx;
-
- if (delta < LVL_START(1)) {
-@@ -496,7 +493,7 @@ static void
- } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
- idx = calc_index(expires, 7);
- } else if ((long) delta < 0) {
-- idx = base->clk & LVL_MASK;
-+ idx = clk & LVL_MASK;
- } else {
- /*
- * Force expire obscene large timeouts to expire at the
-@@ -507,20 +504,33 @@ static void
-
- idx = calc_index(expires, LVL_DEPTH - 1);
- }
-- /*
-- * Enqueue the timer into the array bucket, mark it pending in
-- * the bitmap and store the index in the timer flags.
-- */
-- vec = base->vectors + idx;
-- hlist_add_head(&timer->entry, vec);
-+ return idx;
-+}
-+
-+/*
-+ * Enqueue the timer into the hash bucket, mark it pending in
-+ * the bitmap and store the index in the timer flags.
-+ */
-+static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
-+ unsigned int idx)
-+{
-+ hlist_add_head(&timer->entry, base->vectors + idx);
- __set_bit(idx, base->pending_map);
- timer_set_idx(timer, idx);
- }
-
--static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
-+static void
-+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
- {
-- __internal_add_timer(base, timer);
-+ unsigned int idx;
-+
-+ idx = calc_wheel_index(timer->expires, base->clk);
-+ enqueue_timer(base, timer, idx);
-+}
-
-+static void
-+trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
-+{
- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
- return;
-
-@@ -551,7 +561,14 @@ static void internal_add_timer(struct ti
- * wheel
- */
- base->next_expiry = timer->expires;
-- wake_up_nohz_cpu(base->cpu);
-+ wake_up_nohz_cpu(base->cpu);
-+}
-+
-+static void
-+internal_add_timer(struct timer_base *base, struct timer_list *timer)
-+{
-+ __internal_add_timer(base, timer);
-+ trigger_dyntick_cpu(base, timer);
- }
-
- #ifdef CONFIG_TIMER_STATS
diff --git a/patches/timer-Switch-to-a-non-cascading-wheel.patch b/patches/timer-Switch-to-a-non-cascading-wheel.patch
deleted file mode 100644
index c43ad1b47ceb83..00000000000000
--- a/patches/timer-Switch-to-a-non-cascading-wheel.patch
+++ /dev/null
@@ -1,1169 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:30 +0000
-Subject: [PATCH 14/22] timer: Switch to a non cascading wheel
-
-The current timer wheel has some drawbacks:
-
-1) Cascading
-
- Cascading can be an unbound operation and is completely pointless in most
- cases because the vast majority of the timer wheel timers are canceled or
- rearmed before expiration.
-
-2) No fast lookup of the next expiring timer
-
- In NOHZ scenarios the first timer soft interrupt after a long NOHZ period
- must fast forward the base time to current jiffies. As we have no way to
- find the next expiring timer fast, the code loops and increments the base
- time by one and checks for expired timers in each step.
-
-After a thorough analysis of real world data gathered on laptops,
-workstations, webservers and other machines (thanks Chris!) I came to the
-conclusion that the current 'classic' timer wheel implementation can be
-modified to address the above issues.
-
-The vast majority of timer wheel timers is canceled or rearmed before
-expiry. Most of them are timeouts for networking and other I/O tasks. The
-nature of timeouts is to catch the exception from normal operation (TCP ack
-timed out, disk does not respond, etc.). For these kind of timeouts the
-accuracy is not really a concern. In case the timeout fires, performance is
-down the drain already.
-
-The few timers which actually expire can be split into two categories:
-
- 1) Short expiry times which expect halfways accurate expiry
-
- 2) Long term expiry times are inaccurate today already due to the batching
- which is done for NOHZ.
-
-So for long term expiry timers we can avoid the cascading property and just
-leave them in the less granular outer wheels until expiry or
-cancelation. Timers which are armed with a timeout larger than the wheel
-capacity are not longer cascaded. We expire them with the longest possible
-timeout (6+ days). We have not observed such timeouts in our data collection,
-but at least we handle them with the least surprising effect.
-
-To avoid extending the wheel levels for HZ=1000 so we can accomodate the
-longest observed timeouts (5 days in the network conntrack code) we reduce the
-first level granularity on HZ=1000 to 4ms, which effectively is the same as
-the HZ=250 behaviour. From our data analysis there is nothing which relies on
-that 1ms granularity and as a side effect we get better batching and timer
-locality for the networking code as well.
-
-Contrary to the classic wheel the granularity of the next wheel is not the
-capacity of the first wheel. The granularities of the wheels are in the
-currently chosen setting 8 times the granularity of the previous wheel. So for
-HZ=250 we end up with the following granularity levels:
-
-Level Offset Granularity Range
- 0 0 4 ms 0 ms - 252 ms
- 1 64 32 ms 256 ms - 2044 ms (256ms - ~2s)
- 2 128 256 ms 2048 ms - 16380 ms (~2s - ~16s)
- 3 192 2048 ms (~2s) 16384 ms - 131068 ms (~16s - ~2m)
- 4 256 16384 ms (~16s) 131072 ms - 1048572 ms (~2m - ~17m)
- 5 320 131072 ms (~2m) 1048576 ms - 8388604 ms (~17m - ~2h)
- 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
- 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
-
-That's a worst case inaccuracy of 12.5% for the timers which are queued at the
-beginning of a level.
-
-So the new wheel concept addresses the old issues:
-
-1) Cascading is avoided (except for extreme long time timers)
-
-2) By keeping the timers in the bucket until expiry/cancelation we can track
- the buckets which have timers enqueued in a bucket bitmap and therefor can
- lookup the next expiring timer fast and time bound.
-
-A further benefit of the concept is, that the slack calculation which is done
-on every timer start is not longer necessary because the granularity levels
-provide natural batching already.
-
-Our extensive testing with various loads did not show any performance
-degradation vs. the current wheel implementation.
-
-This patch does not address the 'fast lookup' issue as we wanted to make sure
-that there is no regression introduced by the wheel redesign. The
-optimizations are in follow up patches.
-
-[ Contains fixes from Anna-Maria Gleixner and Richard Cochran ]
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/timer.h | 2
- kernel/time/timer.c | 825 ++++++++++++++++++++++++++++----------------------
- 2 files changed, 467 insertions(+), 360 deletions(-)
-
---- a/include/linux/timer.h
-+++ b/include/linux/timer.h
-@@ -64,6 +64,8 @@ struct timer_list {
- #define TIMER_DEFERRABLE 0x00080000
- #define TIMER_PINNED 0x00100000
- #define TIMER_IRQSAFE 0x00200000
-+#define TIMER_ARRAYSHIFT 22
-+#define TIMER_ARRAYMASK 0xFFC00000
-
- #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
- .entry = { .next = TIMER_ENTRY_STATIC }, \
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -59,43 +59,151 @@
- EXPORT_SYMBOL(jiffies_64);
-
- /*
-- * per-CPU timer vector definitions:
-+ * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
-+ * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
-+ * level has a different granularity.
-+ *
-+ * The level granularity is: LVL_CLK_DIV ^ lvl
-+ * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
-+ *
-+ * The array level of a newly armed timer depends on the relative expiry
-+ * time. The farther the expiry time is away the higher the array level and
-+ * therefor the granularity becomes.
-+ *
-+ * Contrary to the original timer wheel implementation, which aims for 'exact'
-+ * expiry of the timers, this implementation removes the need for recascading
-+ * the timers into the lower array levels. The previous 'classic' timer wheel
-+ * implementation of the kernel already violated the 'exact' expiry by adding
-+ * slack to the expiry time to provide batched expiration. The granularity
-+ * levels provide implicit batching.
-+ *
-+ * This is an optimization of the original timer wheel implementation for the
-+ * majority of the timer wheel use cases: timeouts. The vast majority of
-+ * timeout timers (networking, disk I/O ...) are canceled before expiry. If
-+ * the timeout expires it indicates that normal operation is disturbed, so it
-+ * does not matter much whether the timeout comes with a slight delay.
-+ *
-+ * The only exception to this are networking timers with a small expiry
-+ * time. They rely on the granularity. Those fit into the first wheel level,
-+ * which has HZ granularity.
-+ *
-+ * We don't have cascading anymore. timers with a expiry time above the
-+ * capacity of the last wheel level are force expired at the maximum timeout
-+ * value of the last wheel level. From data sampling we know that the maximum
-+ * value observed is 5 days (network connection tracking), so this should not
-+ * be an issue.
-+ *
-+ * The currently chosen array constants values are a good compromise between
-+ * array size and granularity.
-+ *
-+ * This results in the following granularity and range levels:
-+ *
-+ * HZ 1000 steps
-+ * Level Offset Granularity Range
-+ * 0 0 1 ms 0 ms - 63 ms
-+ * 1 64 8 ms 64 ms - 511 ms
-+ * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
-+ * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
-+ * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
-+ * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
-+ * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
-+ * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
-+ * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
-+ *
-+ * HZ 300
-+ * Level Offset Granularity Range
-+ * 0 0 3 ms 0 ms - 210 ms
-+ * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
-+ * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
-+ * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
-+ * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
-+ * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
-+ * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
-+ * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
-+ * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
-+ *
-+ * HZ 250
-+ * Level Offset Granularity Range
-+ * 0 0 4 ms 0 ms - 255 ms
-+ * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
-+ * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
-+ * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
-+ * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
-+ * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
-+ * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
-+ * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
-+ * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
-+ *
-+ * HZ 100
-+ * Level Offset Granularity Range
-+ * 0 0 10 ms 0 ms - 630 ms
-+ * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
-+ * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
-+ * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
-+ * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
-+ * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
-+ * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
-+ * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
-+ */
-+
-+/* Clock divisor for the next level */
-+#define LVL_CLK_SHIFT 3
-+#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
-+#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
-+#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
-+#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
-+
-+/*
-+ * The time start value for each level to select the bucket at enqueue
-+ * time.
- */
--#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
--#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
--#define TVN_SIZE (1 << TVN_BITS)
--#define TVR_SIZE (1 << TVR_BITS)
--#define TVN_MASK (TVN_SIZE - 1)
--#define TVR_MASK (TVR_SIZE - 1)
--#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
-+#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
-
--struct tvec {
-- struct hlist_head vec[TVN_SIZE];
--};
-+/* Size of each clock level */
-+#define LVL_BITS 6
-+#define LVL_SIZE (1UL << LVL_BITS)
-+#define LVL_MASK (LVL_SIZE - 1)
-+#define LVL_OFFS(n) ((n) * LVL_SIZE)
-+
-+/* Level depth */
-+#if HZ > 100
-+# define LVL_DEPTH 9
-+# else
-+# define LVL_DEPTH 8
-+#endif
-
--struct tvec_root {
-- struct hlist_head vec[TVR_SIZE];
--};
-+/* The cutoff (max. capacity of the wheel) */
-+#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
-+#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
-+
-+/*
-+ * The resulting wheel size. If NOHZ is configured we allocate two
-+ * wheels so we have a separate storage for the deferrable timers.
-+ */
-+#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+# define NR_BASES 2
-+# define BASE_STD 0
-+# define BASE_DEF 1
-+#else
-+# define NR_BASES 1
-+# define BASE_STD 0
-+# define BASE_DEF 0
-+#endif
-
- struct timer_base {
-- spinlock_t lock;
-- struct timer_list *running_timer;
-- unsigned long clk;
-- unsigned long next_timer;
-- unsigned long active_timers;
-- unsigned long all_timers;
-- int cpu;
-- bool migration_enabled;
-- bool nohz_active;
-- struct tvec_root tv1;
-- struct tvec tv2;
-- struct tvec tv3;
-- struct tvec tv4;
-- struct tvec tv5;
-+ spinlock_t lock;
-+ struct timer_list *running_timer;
-+ unsigned long clk;
-+ unsigned int cpu;
-+ bool migration_enabled;
-+ bool nohz_active;
-+ DECLARE_BITMAP(pending_map, WHEEL_SIZE);
-+ struct hlist_head vectors[WHEEL_SIZE];
- } ____cacheline_aligned;
-
--
--static DEFINE_PER_CPU(struct timer_base, timer_bases);
-+static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
-
- #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
- unsigned int sysctl_timer_migration = 1;
-@@ -106,15 +214,17 @@ void timers_update_migration(bool update
- unsigned int cpu;
-
- /* Avoid the loop, if nothing to update */
-- if (this_cpu_read(timer_bases.migration_enabled) == on)
-+ if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
- return;
-
- for_each_possible_cpu(cpu) {
-- per_cpu(timer_bases.migration_enabled, cpu) = on;
-+ per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
-+ per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
- per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
- if (!update_nohz)
- continue;
-- per_cpu(timer_bases.nohz_active, cpu) = true;
-+ per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
-+ per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
- per_cpu(hrtimer_bases.nohz_active, cpu) = true;
- }
- }
-@@ -133,20 +243,6 @@ int timer_migration_handler(struct ctl_t
- mutex_unlock(&mutex);
- return ret;
- }
--
--static inline struct timer_base *get_target_base(struct timer_base *base,
-- int pinned)
--{
-- if (pinned || !base->migration_enabled)
-- return this_cpu_ptr(&timer_bases);
-- return per_cpu_ptr(&timer_bases, get_nohz_timer_target());
--}
--#else
--static inline struct timer_base *get_target_base(struct timer_base *base,
-- int pinned)
--{
-- return this_cpu_ptr(&timer_bases);
--}
- #endif
-
- static unsigned long round_jiffies_common(unsigned long j, int cpu,
-@@ -370,78 +466,91 @@ void set_timer_slack(struct timer_list *
- }
- EXPORT_SYMBOL_GPL(set_timer_slack);
-
-+static inline unsigned int timer_get_idx(struct timer_list *timer)
-+{
-+ return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
-+}
-+
-+static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
-+{
-+ timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
-+ idx << TIMER_ARRAYSHIFT;
-+}
-+
-+/*
-+ * Helper function to calculate the array index for a given expiry
-+ * time.
-+ */
-+static inline unsigned calc_index(unsigned expires, unsigned lvl)
-+{
-+ expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
-+ return LVL_OFFS(lvl) + (expires & LVL_MASK);
-+}
-+
- static void
- __internal_add_timer(struct timer_base *base, struct timer_list *timer)
- {
- unsigned long expires = timer->expires;
-- unsigned long idx = expires - base->clk;
-+ unsigned long delta = expires - base->clk;
- struct hlist_head *vec;
-+ unsigned int idx;
-
-- if (idx < TVR_SIZE) {
-- int i = expires & TVR_MASK;
-- vec = base->tv1.vec + i;
-- } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
-- int i = (expires >> TVR_BITS) & TVN_MASK;
-- vec = base->tv2.vec + i;
-- } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
-- int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
-- vec = base->tv3.vec + i;
-- } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
-- int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
-- vec = base->tv4.vec + i;
-- } else if ((signed long) idx < 0) {
-- /*
-- * Can happen if you add a timer with expires == jiffies,
-- * or you set a timer to go off in the past
-- */
-- vec = base->tv1.vec + (base->clk & TVR_MASK);
-+ if (delta < LVL_START(1)) {
-+ idx = calc_index(expires, 0);
-+ } else if (delta < LVL_START(2)) {
-+ idx = calc_index(expires, 1);
-+ } else if (delta < LVL_START(3)) {
-+ idx = calc_index(expires, 2);
-+ } else if (delta < LVL_START(4)) {
-+ idx = calc_index(expires, 3);
-+ } else if (delta < LVL_START(5)) {
-+ idx = calc_index(expires, 4);
-+ } else if (delta < LVL_START(6)) {
-+ idx = calc_index(expires, 5);
-+ } else if (delta < LVL_START(7)) {
-+ idx = calc_index(expires, 6);
-+ } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
-+ idx = calc_index(expires, 7);
-+ } else if ((long) delta < 0) {
-+ idx = base->clk & LVL_MASK;
- } else {
-- int i;
-- /* If the timeout is larger than MAX_TVAL (on 64-bit
-- * architectures or with CONFIG_BASE_SMALL=1) then we
-- * use the maximum timeout.
-+ /*
-+ * Force expire obscene large timeouts to expire at the
-+ * capacity limit of the wheel.
- */
-- if (idx > MAX_TVAL) {
-- idx = MAX_TVAL;
-- expires = idx + base->clk;
-- }
-- i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
-- vec = base->tv5.vec + i;
-- }
-+ if (expires >= WHEEL_TIMEOUT_CUTOFF)
-+ expires = WHEEL_TIMEOUT_MAX;
-
-+ idx = calc_index(expires, LVL_DEPTH - 1);
-+ }
-+ /*
-+ * Enqueue the timer into the array bucket, mark it pending in
-+ * the bitmap and store the index in the timer flags.
-+ */
-+ vec = base->vectors + idx;
- hlist_add_head(&timer->entry, vec);
-+ __set_bit(idx, base->pending_map);
-+ timer_set_idx(timer, idx);
- }
-
- static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
- {
-- /* Advance base->jiffies, if the base is empty */
-- if (!base->all_timers++)
-- base->clk = jiffies;
--
- __internal_add_timer(base, timer);
-- /*
-- * Update base->active_timers and base->next_timer
-- */
-- if (!(timer->flags & TIMER_DEFERRABLE)) {
-- if (!base->active_timers++ ||
-- time_before(timer->expires, base->next_timer))
-- base->next_timer = timer->expires;
-- }
-
- /*
- * Check whether the other CPU is in dynticks mode and needs
-- * to be triggered to reevaluate the timer wheel.
-- * We are protected against the other CPU fiddling
-- * with the timer by holding the timer base lock. This also
-- * makes sure that a CPU on the way to stop its tick can not
-- * evaluate the timer wheel.
-+ * to be triggered to reevaluate the timer wheel. We are
-+ * protected against the other CPU fiddling with the timer by
-+ * holding the timer base lock. This also makes sure that a
-+ * CPU on the way to stop its tick can not evaluate the timer
-+ * wheel.
- *
- * Spare the IPI for deferrable timers on idle targets though.
- * The next busy ticks will take care of it. Except full dynticks
- * require special care against races with idle_cpu(), lets deal
- * with that later.
- */
-- if (base->nohz_active) {
-+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
- if (!(timer->flags & TIMER_DEFERRABLE) ||
- tick_nohz_full_cpu(base->cpu))
- wake_up_nohz_cpu(base->cpu);
-@@ -721,54 +830,87 @@ static inline void detach_timer(struct t
- entry->next = LIST_POISON2;
- }
-
--static inline void
--detach_expired_timer(struct timer_list *timer, struct timer_base *base)
--{
-- detach_timer(timer, true);
-- if (!(timer->flags & TIMER_DEFERRABLE))
-- base->active_timers--;
-- base->all_timers--;
--}
--
- static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
- bool clear_pending)
- {
-+ unsigned idx = timer_get_idx(timer);
-+
- if (!timer_pending(timer))
- return 0;
-
-+ if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
-+ __clear_bit(idx, base->pending_map);
-+
- detach_timer(timer, clear_pending);
-- if (!(timer->flags & TIMER_DEFERRABLE)) {
-- base->active_timers--;
-- if (timer->expires == base->next_timer)
-- base->next_timer = base->clk;
-- }
-- /* If this was the last timer, advance base->jiffies */
-- if (!--base->all_timers)
-- base->clk = jiffies;
- return 1;
- }
-
-+static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
-+{
-+ struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
-+
-+ /*
-+ * If the timer is deferrable and nohz is active then we need to use
-+ * the deferrable base.
-+ */
-+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
-+ (tflags & TIMER_DEFERRABLE))
-+ base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
-+ return base;
-+}
-+
-+static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
-+{
-+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
-+
-+ /*
-+ * If the timer is deferrable and nohz is active then we need to use
-+ * the deferrable base.
-+ */
-+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
-+ (tflags & TIMER_DEFERRABLE))
-+ base = this_cpu_ptr(&timer_bases[BASE_DEF]);
-+ return base;
-+}
-+
-+static inline struct timer_base *get_timer_base(u32 tflags)
-+{
-+ return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
-+}
-+
-+static inline struct timer_base *get_target_base(struct timer_base *base,
-+ unsigned tflags)
-+{
-+#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
-+ if ((tflags & TIMER_PINNED) || !base->migration_enabled)
-+ return get_timer_this_cpu_base(tflags);
-+ return get_timer_cpu_base(tflags, get_nohz_timer_target());
-+#else
-+ return get_timer_this_cpu_base(tflags);
-+#endif
-+}
-+
- /*
-- * We are using hashed locking: holding per_cpu(timer_bases).lock
-- * means that all timers which are tied to this base via timer->base are
-- * locked, and the base itself is locked too.
-+ * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
-+ * that all timers which are tied to this base are locked, and the base itself
-+ * is locked too.
- *
- * So __run_timers/migrate_timers can safely modify all timers which could
-- * be found on ->tvX lists.
-+ * be found in the base->vectors array.
- *
-- * When the timer's base is locked and removed from the list, the
-- * TIMER_MIGRATING flag is set, FIXME
-+ * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
-+ * to wait until the migration is done.
- */
- static struct timer_base *lock_timer_base(struct timer_list *timer,
-- unsigned long *flags)
-+ unsigned long *flags)
- __acquires(timer->base->lock)
- {
- for (;;) {
-- u32 tf = timer->flags;
- struct timer_base *base;
-+ u32 tf = timer->flags;
-
- if (!(tf & TIMER_MIGRATING)) {
-- base = per_cpu_ptr(&timer_bases, tf & TIMER_CPUMASK);
-+ base = get_timer_base(tf);
- spin_lock_irqsave(&base->lock, *flags);
- if (timer->flags == tf)
- return base;
-@@ -785,6 +927,27 @@ static inline int
- unsigned long flags;
- int ret = 0;
-
-+ /*
-+ * TODO: Calculate the array bucket of the timer right here w/o
-+ * holding the base lock. This allows to check not only
-+ * timer->expires == expires below, but also whether the timer
-+ * ends up in the same bucket. If we really need to requeue
-+ * the timer then we check whether base->clk have
-+ * advanced between here and locking the timer base. If
-+ * jiffies advanced we have to recalc the array bucket with the
-+ * lock held.
-+ */
-+
-+ /*
-+ * This is a common optimization triggered by the
-+ * networking code - if the timer is re-modified
-+ * to be the same thing then just return:
-+ */
-+ if (timer_pending(timer)) {
-+ if (timer->expires == expires)
-+ return 1;
-+ }
-+
- timer_stats_timer_set_start_info(timer);
- BUG_ON(!timer->function);
-
-@@ -796,15 +959,15 @@ static inline int
-
- debug_activate(timer, expires);
-
-- new_base = get_target_base(base, timer->flags & TIMER_PINNED);
-+ new_base = get_target_base(base, timer->flags);
-
- if (base != new_base) {
- /*
-- * We are trying to schedule the timer on the local CPU.
-+ * We are trying to schedule the timer on the new base.
- * However we can't change timer's base while it is running,
- * otherwise del_timer_sync() can't detect that the timer's
-- * handler yet has not finished. This also guarantees that
-- * the timer is serialized wrt itself.
-+ * handler yet has not finished. This also guarantees that the
-+ * timer is serialized wrt itself.
- */
- if (likely(base->running_timer != timer)) {
- /* See the comment in lock_timer_base() */
-@@ -843,45 +1006,6 @@ int mod_timer_pending(struct timer_list
- }
- EXPORT_SYMBOL(mod_timer_pending);
-
--/*
-- * Decide where to put the timer while taking the slack into account
-- *
-- * Algorithm:
-- * 1) calculate the maximum (absolute) time
-- * 2) calculate the highest bit where the expires and new max are different
-- * 3) use this bit to make a mask
-- * 4) use the bitmask to round down the maximum time, so that all last
-- * bits are zeros
-- */
--static inline
--unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
--{
-- unsigned long expires_limit, mask;
-- int bit;
--
-- if (timer->slack >= 0) {
-- expires_limit = expires + timer->slack;
-- } else {
-- long delta = expires - jiffies;
--
-- if (delta < 256)
-- return expires;
--
-- expires_limit = expires + delta / 256;
-- }
-- mask = expires ^ expires_limit;
-- if (mask == 0)
-- return expires;
--
-- bit = __fls(mask);
--
-- mask = (1UL << bit) - 1;
--
-- expires_limit = expires_limit & ~(mask);
--
-- return expires_limit;
--}
--
- /**
- * mod_timer - modify a timer's timeout
- * @timer: the timer to be modified
-@@ -904,16 +1028,6 @@ unsigned long apply_slack(struct timer_l
- */
- int mod_timer(struct timer_list *timer, unsigned long expires)
- {
-- expires = apply_slack(timer, expires);
--
-- /*
-- * This is a common optimization triggered by the
-- * networking code - if the timer is re-modified
-- * to be the same thing then just return:
-- */
-- if (timer_pending(timer) && timer->expires == expires)
-- return 1;
--
- return __mod_timer(timer, expires, false);
- }
- EXPORT_SYMBOL(mod_timer);
-@@ -948,13 +1062,14 @@ EXPORT_SYMBOL(add_timer);
- */
- void add_timer_on(struct timer_list *timer, int cpu)
- {
-- struct timer_base *new_base = per_cpu_ptr(&timer_bases, cpu);
-- struct timer_base *base;
-+ struct timer_base *new_base, *base;
- unsigned long flags;
-
- timer_stats_timer_set_start_info(timer);
- BUG_ON(timer_pending(timer) || !timer->function);
-
-+ new_base = get_timer_cpu_base(timer->flags, cpu);
-+
- /*
- * If @timer was on a different CPU, it should be migrated with the
- * old base locked to prevent other operations proceeding with the
-@@ -1100,27 +1215,6 @@ int del_timer_sync(struct timer_list *ti
- EXPORT_SYMBOL(del_timer_sync);
- #endif
-
--static int cascade(struct timer_base *base, struct tvec *tv, int index)
--{
-- /* cascade all the timers from tv up one level */
-- struct timer_list *timer;
-- struct hlist_node *tmp;
-- struct hlist_head tv_list;
--
-- hlist_move_list(tv->vec + index, &tv_list);
--
-- /*
-- * We are removing _all_ timers from the list, so we
-- * don't have to detach them individually.
-- */
-- hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
-- /* No accounting, while moving them */
-- __internal_add_timer(base, timer);
-- }
--
-- return index;
--}
--
- static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
- unsigned long data)
- {
-@@ -1164,68 +1258,80 @@ static void call_timer_fn(struct timer_l
- }
- }
-
--#define INDEX(N) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
-+static void expire_timers(struct timer_base *base, struct hlist_head *head)
-+{
-+ while (!hlist_empty(head)) {
-+ struct timer_list *timer;
-+ void (*fn)(unsigned long);
-+ unsigned long data;
-+
-+ timer = hlist_entry(head->first, struct timer_list, entry);
-+ timer_stats_account_timer(timer);
-+
-+ base->running_timer = timer;
-+ detach_timer(timer, true);
-+
-+ fn = timer->function;
-+ data = timer->data;
-+
-+ if (timer->flags & TIMER_IRQSAFE) {
-+ spin_unlock(&base->lock);
-+ call_timer_fn(timer, fn, data);
-+ spin_lock(&base->lock);
-+ } else {
-+ spin_unlock_irq(&base->lock);
-+ call_timer_fn(timer, fn, data);
-+ spin_lock_irq(&base->lock);
-+ }
-+ }
-+}
-+
-+static int collect_expired_timers(struct timer_base *base,
-+ struct hlist_head *heads)
-+{
-+ unsigned long clk = base->clk;
-+ struct hlist_head *vec;
-+ int i, levels = 0;
-+ unsigned int idx;
-+
-+ for (i = 0; i < LVL_DEPTH; i++) {
-+ idx = (clk & LVL_MASK) + i * LVL_SIZE;
-+
-+ if (__test_and_clear_bit(idx, base->pending_map)) {
-+ vec = base->vectors + idx;
-+ hlist_move_list(vec, heads++);
-+ levels++;
-+ }
-+ /* Is it time to look at the next level? */
-+ if (clk & LVL_CLK_MASK)
-+ break;
-+ /* Shift clock for the next level granularity */
-+ clk >>= LVL_CLK_SHIFT;
-+ }
-+ return levels;
-+}
-
- /**
- * __run_timers - run all expired timers (if any) on this CPU.
- * @base: the timer vector to be processed.
-- *
-- * This function cascades all vectors and executes all expired timer
-- * vectors.
- */
- static inline void __run_timers(struct timer_base *base)
- {
-- struct timer_list *timer;
-+ struct hlist_head heads[LVL_DEPTH];
-+ int levels;
-+
-+ if (!time_after_eq(jiffies, base->clk))
-+ return;
-
- spin_lock_irq(&base->lock);
-
- while (time_after_eq(jiffies, base->clk)) {
-- struct hlist_head work_list;
-- struct hlist_head *head = &work_list;
-- int index;
-
-- if (!base->all_timers) {
-- base->clk = jiffies;
-- break;
-- }
--
-- index = base->clk & TVR_MASK;
-+ levels = collect_expired_timers(base, heads);
-+ base->clk++;
-
-- /*
-- * Cascade timers:
-- */
-- if (!index &&
-- (!cascade(base, &base->tv2, INDEX(0))) &&
-- (!cascade(base, &base->tv3, INDEX(1))) &&
-- !cascade(base, &base->tv4, INDEX(2)))
-- cascade(base, &base->tv5, INDEX(3));
-- ++base->clk;
-- hlist_move_list(base->tv1.vec + index, head);
-- while (!hlist_empty(head)) {
-- void (*fn)(unsigned long);
-- unsigned long data;
-- bool irqsafe;
--
-- timer = hlist_entry(head->first, struct timer_list, entry);
-- fn = timer->function;
-- data = timer->data;
-- irqsafe = timer->flags & TIMER_IRQSAFE;
--
-- timer_stats_account_timer(timer);
--
-- base->running_timer = timer;
-- detach_expired_timer(timer, base);
--
-- if (irqsafe) {
-- spin_unlock(&base->lock);
-- call_timer_fn(timer, fn, data);
-- spin_lock(&base->lock);
-- } else {
-- spin_unlock_irq(&base->lock);
-- call_timer_fn(timer, fn, data);
-- spin_lock_irq(&base->lock);
-- }
-- }
-+ while (levels--)
-+ expire_timers(base, heads + levels);
- }
- base->running_timer = NULL;
- spin_unlock_irq(&base->lock);
-@@ -1233,78 +1339,87 @@ static inline void __run_timers(struct t
-
- #ifdef CONFIG_NO_HZ_COMMON
- /*
-- * Find out when the next timer event is due to happen. This
-- * is used on S/390 to stop all activity when a CPU is idle.
-- * This function needs to be called with interrupts disabled.
-+ * Find the next pending bucket of a level. Search from @offset + @clk upwards
-+ * and if nothing there, search from start of the level (@offset) up to
-+ * @offset + clk.
-+ */
-+static int next_pending_bucket(struct timer_base *base, unsigned offset,
-+ unsigned clk)
-+{
-+ unsigned pos, start = offset + clk;
-+ unsigned end = offset + LVL_SIZE;
-+
-+ pos = find_next_bit(base->pending_map, end, start);
-+ if (pos < end)
-+ return pos - start;
-+
-+ pos = find_next_bit(base->pending_map, start, offset);
-+ return pos < start ? pos + LVL_SIZE - start : -1;
-+}
-+
-+/*
-+ * Search the first expiring timer in the various clock levels.
- */
- static unsigned long __next_timer_interrupt(struct timer_base *base)
- {
-- unsigned long clk = base->clk;
-- unsigned long expires = clk + NEXT_TIMER_MAX_DELTA;
-- int index, slot, array, found = 0;
-- struct timer_list *nte;
-- struct tvec *varray[4];
--
-- /* Look for timer events in tv1. */
-- index = slot = clk & TVR_MASK;
-- do {
-- hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
-- if (nte->flags & TIMER_DEFERRABLE)
-- continue;
--
-- found = 1;
-- expires = nte->expires;
-- /* Look at the cascade bucket(s)? */
-- if (!index || slot < index)
-- goto cascade;
-- return expires;
-- }
-- slot = (slot + 1) & TVR_MASK;
-- } while (slot != index);
-+ unsigned long clk, next, adj;
-+ unsigned lvl, offset = 0;
-
--cascade:
-- /* Calculate the next cascade event */
-- if (index)
-- clk += TVR_SIZE - index;
-- clk >>= TVR_BITS;
--
-- /* Check tv2-tv5. */
-- varray[0] = &base->tv2;
-- varray[1] = &base->tv3;
-- varray[2] = &base->tv4;
-- varray[3] = &base->tv5;
--
-- for (array = 0; array < 4; array++) {
-- struct tvec *varp = varray[array];
--
-- index = slot = clk & TVN_MASK;
-- do {
-- hlist_for_each_entry(nte, varp->vec + slot, entry) {
-- if (nte->flags & TIMER_DEFERRABLE)
-- continue;
--
-- found = 1;
-- if (time_before(nte->expires, expires))
-- expires = nte->expires;
-- }
-- /*
-- * Do we still search for the first timer or are
-- * we looking up the cascade buckets ?
-- */
-- if (found) {
-- /* Look at the cascade bucket(s)? */
-- if (!index || slot < index)
-- break;
-- return expires;
-- }
-- slot = (slot + 1) & TVN_MASK;
-- } while (slot != index);
--
-- if (index)
-- clk += TVN_SIZE - index;
-- clk >>= TVN_BITS;
-+ spin_lock(&base->lock);
-+ next = base->clk + NEXT_TIMER_MAX_DELTA;
-+ clk = base->clk;
-+ for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
-+ int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
-+
-+ if (pos >= 0) {
-+ unsigned long tmp = clk + (unsigned long) pos;
-+
-+ tmp <<= LVL_SHIFT(lvl);
-+ if (time_before(tmp, next))
-+ next = tmp;
-+ }
-+ /*
-+ * Clock for the next level. If the current level clock lower
-+ * bits are zero, we look at the next level as is. If not we
-+ * need to advance it by one because that's going to be the
-+ * next expiring bucket in that level. base->clk is the next
-+ * expiring jiffie. So in case of:
-+ *
-+ * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
-+ * 0 0 0 0 0 0
-+ *
-+ * we have to look at all levels @index 0. With
-+ *
-+ * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
-+ * 0 0 0 0 0 2
-+ *
-+ * LVL0 has the next expiring bucket @index 2. The upper
-+ * levels have the next expiring bucket @index 1.
-+ *
-+ * In case that the propagation wraps the next level the same
-+ * rules apply:
-+ *
-+ * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
-+ * 0 0 0 0 F 2
-+ *
-+ * So after looking at LVL0 we get:
-+ *
-+ * LVL5 LVL4 LVL3 LVL2 LVL1
-+ * 0 0 0 1 0
-+ *
-+ * So no propagation from LVL1 to LVL2 because that happened
-+ * with the add already, but then we need to propagate further
-+ * from LVL2 to LVL3.
-+ *
-+ * So the simple check whether the lower bits of the current
-+ * level are 0 or not is sufficient for all cases.
-+ */
-+ adj = clk & LVL_CLK_MASK ? 1 : 0;
-+ clk >>= LVL_CLK_SHIFT;
-+ clk += adj;
- }
-- return expires;
-+ spin_unlock(&base->lock);
-+ return next;
- }
-
- /*
-@@ -1350,7 +1465,7 @@ static u64 cmp_next_hrtimer_event(u64 ba
- */
- u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
- {
-- struct timer_base *base = this_cpu_ptr(&timer_bases);
-+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
- u64 expires = KTIME_MAX;
- unsigned long nextevt;
-
-@@ -1361,17 +1476,11 @@ u64 get_next_timer_interrupt(unsigned lo
- if (cpu_is_offline(smp_processor_id()))
- return expires;
-
-- spin_lock(&base->lock);
-- if (base->active_timers) {
-- if (time_before_eq(base->next_timer, base->clk))
-- base->next_timer = __next_timer_interrupt(base);
-- nextevt = base->next_timer;
-- if (time_before_eq(nextevt, basej))
-- expires = basem;
-- else
-- expires = basem + (nextevt - basej) * TICK_NSEC;
-- }
-- spin_unlock(&base->lock);
-+ nextevt = __next_timer_interrupt(base);
-+ if (time_before_eq(nextevt, basej))
-+ expires = basem;
-+ else
-+ expires = basem + (nextevt - basej) * TICK_NSEC;
-
- return cmp_next_hrtimer_event(basem, expires);
- }
-@@ -1402,10 +1511,11 @@ void update_process_times(int user_tick)
- */
- static void run_timer_softirq(struct softirq_action *h)
- {
-- struct timer_base *base = this_cpu_ptr(&timer_bases);
-+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
-
-- if (time_after_eq(jiffies, base->clk))
-- __run_timers(base);
-+ __run_timers(base);
-+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
-+ __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
- }
-
- /*
-@@ -1556,7 +1666,6 @@ static void migrate_timer_list(struct ti
-
- while (!hlist_empty(head)) {
- timer = hlist_entry(head->first, struct timer_list, entry);
-- /* We ignore the accounting on the dying cpu */
- detach_timer(timer, false);
- timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
- internal_add_timer(new_base, timer);
-@@ -1567,35 +1676,29 @@ static void migrate_timers(int cpu)
- {
- struct timer_base *old_base;
- struct timer_base *new_base;
-- int i;
-+ int b, i;
-
- BUG_ON(cpu_online(cpu));
-- old_base = per_cpu_ptr(&timer_bases, cpu);
-- new_base = get_cpu_ptr(&timer_bases);
-- /*
-- * The caller is globally serialized and nobody else
-- * takes two locks at once, deadlock is not possible.
-- */
-- spin_lock_irq(&new_base->lock);
-- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
-- BUG_ON(old_base->running_timer);
-+ for (b = 0; b < NR_BASES; b++) {
-+ old_base = per_cpu_ptr(&timer_bases[b], cpu);
-+ new_base = get_cpu_ptr(&timer_bases[b]);
-+ /*
-+ * The caller is globally serialized and nobody else
-+ * takes two locks at once, deadlock is not possible.
-+ */
-+ spin_lock_irq(&new_base->lock);
-+ spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-+
-+ BUG_ON(old_base->running_timer);
-+
-+ for (i = 0; i < WHEEL_SIZE; i++)
-+ migrate_timer_list(new_base, old_base->vectors + i);
-
-- for (i = 0; i < TVR_SIZE; i++)
-- migrate_timer_list(new_base, old_base->tv1.vec + i);
-- for (i = 0; i < TVN_SIZE; i++) {
-- migrate_timer_list(new_base, old_base->tv2.vec + i);
-- migrate_timer_list(new_base, old_base->tv3.vec + i);
-- migrate_timer_list(new_base, old_base->tv4.vec + i);
-- migrate_timer_list(new_base, old_base->tv5.vec + i);
-- }
--
-- old_base->active_timers = 0;
-- old_base->all_timers = 0;
--
-- spin_unlock(&old_base->lock);
-- spin_unlock_irq(&new_base->lock);
-- put_cpu_ptr(&timer_bases);
-+ spin_unlock(&old_base->lock);
-+ spin_unlock_irq(&new_base->lock);
-+ put_cpu_ptr(&timer_bases);
-+ }
- }
-
- static int timer_cpu_notify(struct notifier_block *self,
-@@ -1623,13 +1726,15 @@ static inline void timer_register_cpu_no
-
- static void __init init_timer_cpu(int cpu)
- {
-- struct timer_base *base = per_cpu_ptr(&timer_bases, cpu);
--
-- base->cpu = cpu;
-- spin_lock_init(&base->lock);
-+ struct timer_base *base;
-+ int i;
-
-- base->clk = jiffies;
-- base->next_timer = base->clk;
-+ for (i = 0; i < NR_BASES; i++) {
-+ base = per_cpu_ptr(&timer_bases[i], cpu);
-+ base->cpu = cpu;
-+ spin_lock_init(&base->lock);
-+ base->clk = jiffies;
-+ }
- }
-
- static void __init init_timer_cpus(void)
diff --git a/patches/timer-add-setup_deferrable_timer-macro.patch b/patches/timer-add-setup_deferrable_timer-macro.patch
deleted file mode 100644
index 3c7546c54ef447..00000000000000
--- a/patches/timer-add-setup_deferrable_timer-macro.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Lucas Stach <l.stach@pengutronix.de>
-Date: Tue, 12 Jan 2016 18:17:19 +0100
-Subject: [PATCH] timer: add setup_deferrable_timer macro
-
-Upstream commit 6f3ffc19157a14b182d9d0c449cd613cef421fe1
-
-Add the trivial missing macro to setup a deferrable timer.
-
-Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
-Acked-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/timer.h | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/include/linux/timer.h
-+++ b/include/linux/timer.h
-@@ -145,6 +145,8 @@ static inline void init_timer_on_stack_k
-
- #define setup_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), 0)
-+#define setup_deferrable_timer(timer, fn, data) \
-+ __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
- #define setup_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), 0)
- #define setup_deferrable_timer_on_stack(timer, fn, data) \
diff --git a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index beb893e15ac260..eb017a0764b408 100644
--- a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1639,13 +1639,13 @@ void update_process_times(int user_tick)
+@@ -1627,13 +1627,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
diff --git a/patches/timer-fd-avoid-live-lock.patch b/patches/timer-fd-avoid-live-lock.patch
index 8dc713c97cafd5..09ae935ca52ab3 100644
--- a/patches/timer-fd-avoid-live-lock.patch
+++ b/patches/timer-fd-avoid-live-lock.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
-@@ -450,7 +450,10 @@ static int do_timerfd_settime(int ufd, i
+@@ -460,7 +460,10 @@ static int do_timerfd_settime(int ufd, i
break;
}
spin_unlock_irq(&ctx->wqh.lock);
diff --git a/patches/timer-make-the-base-lock-raw.patch b/patches/timer-make-the-base-lock-raw.patch
index 937534e3789c4b..d5dd95ab9efca4 100644
--- a/patches/timer-make-the-base-lock-raw.patch
+++ b/patches/timer-make-the-base-lock-raw.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct timer_list *running_timer;
unsigned long clk;
unsigned long next_expiry;
-@@ -962,10 +962,10 @@ static struct timer_base *lock_timer_bas
+@@ -947,10 +947,10 @@ static struct timer_base *lock_timer_bas
if (!(tf & TIMER_MIGRATING)) {
base = get_timer_base(tf);
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
cpu_relax();
}
-@@ -1032,9 +1032,9 @@ static inline int
+@@ -1017,9 +1017,9 @@ static inline int
/* See the comment in lock_timer_base() */
timer->flags |= TIMER_MIGRATING;
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
}
-@@ -1055,7 +1055,7 @@ static inline int
+@@ -1040,7 +1040,7 @@ static inline int
}
out_unlock:
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -1149,16 +1149,16 @@ void add_timer_on(struct timer_list *tim
+@@ -1134,16 +1134,16 @@ void add_timer_on(struct timer_list *tim
if (base != new_base) {
timer->flags |= TIMER_MIGRATING;
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(add_timer_on);
-@@ -1185,7 +1185,7 @@ int del_timer(struct timer_list *timer)
+@@ -1170,7 +1170,7 @@ int del_timer(struct timer_list *timer)
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, true);
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
return ret;
-@@ -1213,7 +1213,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -1198,7 +1198,7 @@ int try_to_del_timer_sync(struct timer_l
timer_stats_timer_clear_start_info(timer);
ret = detach_if_pending(timer, base, true);
}
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -1345,13 +1345,13 @@ static void expire_timers(struct timer_b
+@@ -1330,13 +1330,13 @@ static void expire_timers(struct timer_b
data = timer->data;
if (timer->flags & TIMER_IRQSAFE) {
@@ -112,16 +112,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
}
-@@ -1519,7 +1519,7 @@ u64 get_next_timer_interrupt(unsigned lo
+@@ -1505,7 +1505,7 @@ u64 get_next_timer_interrupt(unsigned lo
if (cpu_is_offline(smp_processor_id()))
return expires;
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
nextevt = __next_timer_interrupt(base);
+ is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
- /*
-@@ -1541,7 +1541,7 @@ u64 get_next_timer_interrupt(unsigned lo
+@@ -1529,7 +1529,7 @@ u64 get_next_timer_interrupt(unsigned lo
if ((expires - basem) > TICK_NSEC)
base->is_idle = true;
}
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return cmp_next_hrtimer_event(basem, expires);
}
-@@ -1628,7 +1628,7 @@ static inline void __run_timers(struct t
+@@ -1616,7 +1616,7 @@ static inline void __run_timers(struct t
if (!time_after_eq(jiffies, base->clk))
return;
@@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (time_after_eq(jiffies, base->clk)) {
-@@ -1639,7 +1639,7 @@ static inline void __run_timers(struct t
+@@ -1627,7 +1627,7 @@ static inline void __run_timers(struct t
expire_timers(base, heads + levels);
}
base->running_timer = NULL;
@@ -148,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1834,16 +1834,16 @@ static void migrate_timers(int cpu)
+@@ -1822,16 +1822,16 @@ int timers_dead_cpu(unsigned int cpu)
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
@@ -168,8 +168,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ raw_spin_unlock_irq(&new_base->lock);
put_cpu_ptr(&timer_bases);
}
- }
-@@ -1879,7 +1879,7 @@ static void __init init_timer_cpu(int cp
+ return 0;
+@@ -1847,7 +1847,7 @@ static void __init init_timer_cpu(int cp
for (i = 0; i < NR_BASES; i++) {
base = per_cpu_ptr(&timer_bases[i], cpu);
base->cpu = cpu;
diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch
index 328409275a6a1d..e91c8284943b81 100644
--- a/patches/timers-prepare-for-full-preemption.patch
+++ b/patches/timers-prepare-for-full-preemption.patch
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define del_timer_sync(t) del_timer(t)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -490,11 +490,14 @@ void resched_cpu(int cpu)
+@@ -525,11 +525,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -510,6 +513,8 @@ int get_nohz_timer_target(void)
+@@ -548,6 +551,8 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu();
unlock:
rcu_read_unlock();
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
-@@ -1162,6 +1165,33 @@ void add_timer_on(struct timer_list *tim
+@@ -1147,6 +1150,33 @@ void add_timer_on(struct timer_list *tim
}
EXPORT_SYMBOL_GPL(add_timer_on);
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
-@@ -1219,7 +1249,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -1204,7 +1234,7 @@ int try_to_del_timer_sync(struct timer_l
}
EXPORT_SYMBOL(try_to_del_timer_sync);
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
-@@ -1279,7 +1309,7 @@ int del_timer_sync(struct timer_list *ti
+@@ -1264,7 +1294,7 @@ int del_timer_sync(struct timer_list *ti
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL(del_timer_sync);
-@@ -1344,13 +1374,16 @@ static void expire_timers(struct timer_b
+@@ -1329,13 +1359,16 @@ static void expire_timers(struct timer_b
fn = timer->function;
data = timer->data;
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_lock_irq(&base->lock);
}
}
-@@ -1638,8 +1671,8 @@ static inline void __run_timers(struct t
+@@ -1626,8 +1659,8 @@ static inline void __run_timers(struct t
while (levels--)
expire_timers(base, heads + levels);
}
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1881,6 +1914,9 @@ static void __init init_timer_cpu(int cp
+@@ -1849,6 +1882,9 @@ static void __init init_timer_cpu(int cp
base->cpu = cpu;
raw_spin_lock_init(&base->lock);
base->clk = jiffies;
diff --git a/patches/tracing-Show-the-preempt-count-of-when-the-event-was.patch b/patches/tracing-Show-the-preempt-count-of-when-the-event-was.patch
deleted file mode 100644
index 552c50520ece10..00000000000000
--- a/patches/tracing-Show-the-preempt-count-of-when-the-event-was.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
-Date: Fri, 17 Jun 2016 17:40:58 -0400
-Subject: [PATCH] tracing: Show the preempt count of when the event was called
-
-Upstream commit e947841c0dce9db675a957182214ef8091ac3d61
-
-Because tracepoint callbacks are done with preemption enabled, the trace
-events are always called with preempt disable due to the
-rcu_read_lock_sched_notrace() in __DO_TRACE(). This causes the preempt count
-shown in the recorded trace event to be inaccurate. It is always one more
-that what the preempt_count was when the tracepoint was called.
-
-If CONFIG_PREEMPT is enabled, subtract 1 from the preempt_count before
-recording it in the trace buffer.
-
-Link: http://lkml.kernel.org/r/20160525132537.GA10808@linutronix.de
-
-Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/trace/trace_events.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
---- a/kernel/trace/trace_events.c
-+++ b/kernel/trace/trace_events.c
-@@ -244,6 +244,14 @@ void *trace_event_buffer_reserve(struct
-
- local_save_flags(fbuffer->flags);
- fbuffer->pc = preempt_count();
-+ /*
-+ * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
-+ * preemption (adding one to the preempt_count). Since we are
-+ * interested in the preempt_count at the time the tracepoint was
-+ * hit, we need to subtract one to offset the increment.
-+ */
-+ if (IS_ENABLED(CONFIG_PREEMPT))
-+ fbuffer->pc--;
- fbuffer->trace_file = trace_file;
-
- fbuffer->event =
diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
index baa26ea6eb2146..4ff67b8b0237b6 100644
--- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3348,7 +3348,16 @@ asmlinkage __visible void __sched notrac
+@@ -3553,7 +3553,16 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
@@ -43,4 +43,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ stop_critical_timings();
exception_exit(prev_ctx);
- preempt_enable_no_resched_notrace();
+ preempt_latency_stop(1);
diff --git a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 231ef6120cde00..79e6e2003c1e3c 100644
--- a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -3092,10 +3092,8 @@ void serial8250_console_write(struct uar
+@@ -3110,10 +3110,8 @@ void serial8250_console_write(struct uar
serial8250_rpm_get(up);
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 9bb3dbc133ddde..ffe88806eb1d4b 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3790,7 +3790,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3803,7 +3803,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3800,13 +3800,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3813,13 +3813,13 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/usb-use-_nort-in-giveback.patch b/patches/usb-use-_nort-in-giveback.patch
index b8bb3df0d20e31..6469b6d2608774 100644
--- a/patches/usb-use-_nort-in-giveback.patch
+++ b/patches/usb-use-_nort-in-giveback.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
-@@ -1759,9 +1759,9 @@ static void __usb_hcd_giveback_urb(struc
+@@ -1760,9 +1760,9 @@ static void __usb_hcd_giveback_urb(struc
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
*/
diff --git a/patches/work-queue-work-around-irqsafe-timer-optimization.patch b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
index 5eef1d053465cb..7587a4e5c40752 100644
--- a/patches/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "workqueue_internal.h"
-@@ -1303,7 +1304,7 @@ static int try_to_grab_pending(struct wo
+@@ -1277,7 +1278,7 @@ static int try_to_grab_pending(struct wo
local_unlock_irqrestore(pendingb_lock, *flags);
if (work_is_canceling(work))
return -ENOENT;
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 851783abab28fe..5cc25b8c4a9379 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1626,10 +1626,6 @@ static inline void ttwu_activate(struct
+@@ -1701,10 +1701,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2032,53 +2028,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2143,53 +2139,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
--static void try_to_wake_up_local(struct task_struct *p)
+-static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
-{
- struct rq *rq = task_rq(p);
-
@@ -70,11 +70,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- * disabled avoiding further scheduler activity on it and we've
- * not yet picked a replacement task.
- */
-- lockdep_unpin_lock(&rq->lock);
+- lockdep_unpin_lock(&rq->lock, cookie);
- raw_spin_unlock(&rq->lock);
- raw_spin_lock(&p->pi_lock);
- raw_spin_lock(&rq->lock);
-- lockdep_pin_lock(&rq->lock);
+- lockdep_repin_lock(&rq->lock, cookie);
- }
-
- if (!(p->state & TASK_NORMAL))
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- if (!task_on_rq_queued(p))
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
-
-- ttwu_do_wakeup(rq, p, 0);
+- ttwu_do_wakeup(rq, p, 0, cookie);
- if (schedstat_enabled())
- ttwu_stat(p, smp_processor_id(), 0);
-out:
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3323,21 +3272,6 @@ static void __sched notrace __schedule(b
+@@ -3499,21 +3448,6 @@ static void __sched notrace __schedule(b
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -113,12 +113,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
- to_wakeup = wq_worker_sleeping(prev);
- if (to_wakeup)
-- try_to_wake_up_local(to_wakeup);
+- try_to_wake_up_local(to_wakeup, cookie);
- }
}
switch_count = &prev->nvcsw;
}
-@@ -3370,6 +3304,14 @@ static inline void sched_submit_work(str
+@@ -3546,6 +3480,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3378,6 +3320,12 @@ static inline void sched_submit_work(str
+@@ -3554,6 +3496,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3388,6 +3336,7 @@ asmlinkage __visible void __sched schedu
+@@ -3564,6 +3512,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
@@ -156,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -867,43 +867,32 @@ static void wake_up_worker(struct worker
+@@ -841,43 +841,32 @@ static void wake_up_worker(struct worker
}
/**
@@ -211,7 +211,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct worker_pool *pool;
/*
-@@ -912,13 +901,15 @@ struct task_struct *wq_worker_sleeping(s
+@@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(s
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
@@ -231,7 +231,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The counterpart of the following dec_and_test, implied mb,
-@@ -932,9 +923,12 @@ struct task_struct *wq_worker_sleeping(s
+@@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(s
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 0ab3ad3d373b49..c89b904fd1ae99 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3418,9 +3418,8 @@ STACK_FRAME_NON_STANDARD(__schedule); /*
+@@ -3594,9 +3594,8 @@ STACK_FRAME_NON_STANDARD(__schedule); /*
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3428,6 +3427,10 @@ static inline void sched_submit_work(str
+@@ -3604,6 +3603,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);
@@ -111,7 +111,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -860,10 +890,16 @@ static struct worker *first_idle_worker(
+@@ -834,10 +864,16 @@ static struct worker *first_idle_worker(
*/
static void wake_up_worker(struct worker_pool *pool)
{
@@ -129,7 +129,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -892,7 +928,7 @@ void wq_worker_running(struct task_struc
+@@ -866,7 +902,7 @@ void wq_worker_running(struct task_struc
*/
void wq_worker_sleeping(struct task_struct *task)
{
@@ -138,7 +138,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
struct worker_pool *pool;
/*
-@@ -909,26 +945,18 @@ void wq_worker_sleeping(struct task_stru
+@@ -883,26 +919,18 @@ void wq_worker_sleeping(struct task_stru
return;
worker->sleeping = 1;
@@ -168,7 +168,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -1655,7 +1683,9 @@ static void worker_enter_idle(struct wor
+@@ -1629,7 +1657,9 @@ static void worker_enter_idle(struct wor
worker->last_active = jiffies;
/* idle_list is LIFO */
@@ -178,7 +178,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1688,7 +1718,9 @@ static void worker_leave_idle(struct wor
+@@ -1662,7 +1692,9 @@ static void worker_leave_idle(struct wor
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
@@ -188,7 +188,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
static struct worker *alloc_worker(int node)
-@@ -1854,7 +1886,9 @@ static void destroy_worker(struct worker
+@@ -1828,7 +1860,9 @@ static void destroy_worker(struct worker
pool->nr_workers--;
pool->nr_idle--;
diff --git a/patches/workqueue-use-locallock.patch b/patches/workqueue-use-locallock.patch
index b44d6da0bc1780..e03df16434a898 100644
--- a/patches/workqueue-use-locallock.patch
+++ b/patches/workqueue-use-locallock.patch
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-@@ -1127,9 +1130,9 @@ static void put_pwq_unlocked(struct pool
+@@ -1101,9 +1104,9 @@ static void put_pwq_unlocked(struct pool
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -1233,7 +1236,7 @@ static int try_to_grab_pending(struct wo
+@@ -1207,7 +1210,7 @@ static int try_to_grab_pending(struct wo
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1297,7 +1300,7 @@ static int try_to_grab_pending(struct wo
+@@ -1271,7 +1274,7 @@ static int try_to_grab_pending(struct wo
spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
-@@ -1402,7 +1405,7 @@ static void __queue_work(int cpu, struct
+@@ -1376,7 +1379,7 @@ static void __queue_work(int cpu, struct
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_work_activate(work);
-@@ -1508,14 +1511,14 @@ bool queue_work_on(int cpu, struct workq
+@@ -1482,14 +1485,14 @@ bool queue_work_on(int cpu, struct workq
bool ret = false;
unsigned long flags;
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1582,14 +1585,14 @@ bool queue_delayed_work_on(int cpu, stru
+@@ -1556,14 +1559,14 @@ bool queue_delayed_work_on(int cpu, stru
unsigned long flags;
/* read the comment in __queue_work() */
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1624,7 +1627,7 @@ bool mod_delayed_work_on(int cpu, struct
+@@ -1598,7 +1601,7 @@ bool mod_delayed_work_on(int cpu, struct
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -2942,7 +2945,7 @@ static bool __cancel_work_timer(struct w
+@@ -2916,7 +2919,7 @@ static bool __cancel_work_timer(struct w
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
flush_work(work);
clear_work_data(work);
-@@ -2997,10 +3000,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -2971,10 +2974,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -3035,7 +3038,7 @@ bool cancel_delayed_work(struct delayed_
+@@ -3009,7 +3012,7 @@ bool cancel_delayed_work(struct delayed_
set_work_pool_and_clear_pending(&dwork->work,
get_work_pool_id(&dwork->work));
diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch
index dfa04901b6ee93..43499efa3fd590 100644
--- a/patches/workqueue-use-rcu.patch
+++ b/patches/workqueue-use-rcu.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -574,7 +574,7 @@ static int worker_pool_assign_id(struct
+@@ -548,7 +548,7 @@ static int worker_pool_assign_id(struct
* @wq: the target workqueue
* @node: the node ID
*
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
-@@ -718,8 +718,8 @@ static struct pool_workqueue *get_work_p
+@@ -692,8 +692,8 @@ static struct pool_workqueue *get_work_p
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -1124,7 +1124,7 @@ static void put_pwq_unlocked(struct pool
+@@ -1098,7 +1098,7 @@ static void put_pwq_unlocked(struct pool
{
if (pwq) {
/*
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
-@@ -1252,6 +1252,7 @@ static int try_to_grab_pending(struct wo
+@@ -1226,6 +1226,7 @@ static int try_to_grab_pending(struct wo
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1290,10 +1291,12 @@ static int try_to_grab_pending(struct wo
+@@ -1264,10 +1265,12 @@ static int try_to_grab_pending(struct wo
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
-@@ -1407,6 +1410,7 @@ static void __queue_work(int cpu, struct
+@@ -1381,6 +1384,7 @@ static void __queue_work(int cpu, struct
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -153,7 +153,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-@@ -1463,10 +1467,8 @@ static void __queue_work(int cpu, struct
+@@ -1437,10 +1441,8 @@ static void __queue_work(int cpu, struct
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -166,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1484,7 +1486,9 @@ static void __queue_work(int cpu, struct
+@@ -1458,7 +1460,9 @@ static void __queue_work(int cpu, struct
insert_work(pwq, work, worklist, work_flags);
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -2811,14 +2815,14 @@ static bool start_flush_work(struct work
+@@ -2785,14 +2789,14 @@ static bool start_flush_work(struct work
might_sleep();
@@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2847,10 +2851,11 @@ static bool start_flush_work(struct work
+@@ -2821,10 +2825,11 @@ static bool start_flush_work(struct work
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
@@ -207,7 +207,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return false;
}
-@@ -3259,7 +3264,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3233,7 +3238,7 @@ static void rcu_free_pool(struct rcu_hea
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -216,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3313,8 +3318,8 @@ static void put_unbound_pool(struct work
+@@ -3287,8 +3292,8 @@ static void put_unbound_pool(struct work
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -227,7 +227,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -3421,14 +3426,14 @@ static void pwq_unbound_release_workfn(s
+@@ -3395,14 +3400,14 @@ static void pwq_unbound_release_workfn(s
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -244,7 +244,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -4078,7 +4083,7 @@ void destroy_workqueue(struct workqueue_
+@@ -4052,7 +4057,7 @@ void destroy_workqueue(struct workqueue_
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@@ -253,7 +253,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4171,7 +4176,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4145,7 +4150,8 @@ bool workqueue_congested(int cpu, struct
struct pool_workqueue *pwq;
bool ret;
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4182,7 +4188,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4156,7 +4162,8 @@ bool workqueue_congested(int cpu, struct
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -273,7 +273,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4208,15 +4215,15 @@ unsigned int work_busy(struct work_struc
+@@ -4182,15 +4189,15 @@ unsigned int work_busy(struct work_struc
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -293,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4405,7 +4412,7 @@ void show_workqueue_state(void)
+@@ -4379,7 +4386,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -302,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4458,7 +4465,7 @@ void show_workqueue_state(void)
+@@ -4432,7 +4439,7 @@ void show_workqueue_state(void)
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -311,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -4819,16 +4826,16 @@ bool freeze_workqueues_busy(void)
+@@ -4770,16 +4777,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -5018,7 +5025,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4969,7 +4976,8 @@ static ssize_t wq_pool_ids_show(struct d
const char *delim = "";
int node, written = 0;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -5026,7 +5034,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4977,7 +4985,8 @@ static ssize_t wq_pool_ids_show(struct d
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/patches/x86-UV-raw_spinlock-conversion.patch b/patches/x86-UV-raw_spinlock-conversion.patch
index 11869e7c3aaf38..d25fdfc7cfef4b 100644
--- a/patches/x86-UV-raw_spinlock-conversion.patch
+++ b/patches/x86-UV-raw_spinlock-conversion.patch
@@ -8,12 +8,10 @@ Shrug. Lots of hobbyists have a beast in their basement, right?
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/include/asm/uv/uv_bau.h | 14 +++++++-------
- arch/x86/include/asm/uv/uv_hub.h | 2 +-
- arch/x86/kernel/apic/x2apic_uv_x.c | 2 +-
- arch/x86/platform/uv/tlb_uv.c | 26 +++++++++++++-------------
- arch/x86/platform/uv/uv_time.c | 21 +++++++++++++--------
- 5 files changed, 35 insertions(+), 30 deletions(-)
+ arch/x86/include/asm/uv/uv_bau.h | 14 +++++++-------
+ arch/x86/platform/uv/tlb_uv.c | 26 +++++++++++++-------------
+ arch/x86/platform/uv/uv_time.c | 21 +++++++++++++--------
+ 3 files changed, 33 insertions(+), 28 deletions(-)
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -50,31 +48,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
}
---- a/arch/x86/include/asm/uv/uv_hub.h
-+++ b/arch/x86/include/asm/uv/uv_hub.h
-@@ -492,7 +492,7 @@ struct uv_blade_info {
- unsigned short nr_online_cpus;
- unsigned short pnode;
- short memory_nid;
-- spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
-+ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
- unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
- };
- extern struct uv_blade_info *uv_blade_info;
---- a/arch/x86/kernel/apic/x2apic_uv_x.c
-+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
-@@ -950,7 +950,7 @@ void __init uv_system_init(void)
- uv_blade_info[blade].pnode = pnode;
- uv_blade_info[blade].nr_possible_cpus = 0;
- uv_blade_info[blade].nr_online_cpus = 0;
-- spin_lock_init(&uv_blade_info[blade].nmi_lock);
-+ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock);
- min_pnode = min(pnode, min_pnode);
- max_pnode = max(pnode, max_pnode);
- blade++;
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
-@@ -714,9 +714,9 @@ static void destination_plugged(struct b
+@@ -729,9 +729,9 @@ static void destination_plugged(struct b
quiesce_local_uvhub(hmaster);
@@ -86,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
end_uvhub_quiesce(hmaster);
-@@ -736,9 +736,9 @@ static void destination_timeout(struct b
+@@ -751,9 +751,9 @@ static void destination_timeout(struct b
quiesce_local_uvhub(hmaster);
@@ -98,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
end_uvhub_quiesce(hmaster);
-@@ -759,7 +759,7 @@ static void disable_for_period(struct ba
+@@ -774,7 +774,7 @@ static void disable_for_period(struct ba
cycles_t tm1;
hmaster = bcp->uvhub_master;
@@ -107,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!bcp->baudisabled) {
stat->s_bau_disabled++;
tm1 = get_cycles();
-@@ -772,7 +772,7 @@ static void disable_for_period(struct ba
+@@ -787,7 +787,7 @@ static void disable_for_period(struct ba
}
}
}
@@ -116,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -835,7 +835,7 @@ static void record_send_stats(cycles_t t
+@@ -850,7 +850,7 @@ static void record_send_stats(cycles_t t
*/
static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
{
@@ -125,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic_t *v;
v = &hmaster->active_descriptor_count;
-@@ -968,7 +968,7 @@ static int check_enable(struct bau_contr
+@@ -983,7 +983,7 @@ static int check_enable(struct bau_contr
struct bau_control *hmaster;
hmaster = bcp->uvhub_master;
@@ -134,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
stat->s_bau_reenabled++;
for_each_present_cpu(tcpu) {
-@@ -980,10 +980,10 @@ static int check_enable(struct bau_contr
+@@ -995,10 +995,10 @@ static int check_enable(struct bau_contr
tbcp->period_giveups = 0;
}
}
@@ -147,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -1;
}
-@@ -1901,9 +1901,9 @@ static void __init init_per_cpu_tunables
+@@ -1916,9 +1916,9 @@ static void __init init_per_cpu_tunables
bcp->cong_reps = congested_reps;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
diff --git a/patches/x86-apic-uv-Initialize-timer-as-pinned.patch b/patches/x86-apic-uv-Initialize-timer-as-pinned.patch
deleted file mode 100644
index 4f2deba3c165ab..00000000000000
--- a/patches/x86-apic-uv-Initialize-timer-as-pinned.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:16 +0000
-Subject: [PATCH 02/22] x86/apic/uv: Initialize timer as pinned
-
-Pinned timers must carry that attribute in the timer itself. No functional
-change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/apic/x2apic_uv_x.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/arch/x86/kernel/apic/x2apic_uv_x.c
-+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
-@@ -755,7 +755,7 @@ static void uv_heartbeat(unsigned long i
- uv_set_scir_bits(bits);
-
- /* enable next timer period */
-- mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
-+ mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
- }
-
- static void uv_heartbeat_enable(int cpu)
-@@ -764,7 +764,7 @@ static void uv_heartbeat_enable(int cpu)
- struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
-
- uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
-- setup_timer(timer, uv_heartbeat, cpu);
-+ setup_pinned_timer(timer, uv_heartbeat, cpu);
- timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
- add_timer_on(timer, cpu);
- uv_cpu_hub_info(cpu)->scir.enabled = 1;
diff --git a/patches/x86-crypto-reduce-preempt-disabled-regions.patch b/patches/x86-crypto-reduce-preempt-disabled-regions.patch
index 9586322f205ff4..edbaaa56adeae8 100644
--- a/patches/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/patches/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
-@@ -383,14 +383,14 @@ static int ecb_encrypt(struct blkcipher_
+@@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -407,14 +407,14 @@ static int ecb_decrypt(struct blkcipher_
+@@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -431,14 +431,14 @@ static int cbc_encrypt(struct blkcipher_
+@@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -455,14 +455,14 @@ static int cbc_decrypt(struct blkcipher_
+@@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -514,18 +514,20 @@ static int ctr_crypt(struct blkcipher_de
+@@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_de
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
diff --git a/patches/x86-io-apic-migra-no-unmask.patch b/patches/x86-io-apic-migra-no-unmask.patch
index 2d8afd851adf02..c5605fd0fda6aa 100644
--- a/patches/x86-io-apic-migra-no-unmask.patch
+++ b/patches/x86-io-apic-migra-no-unmask.patch
@@ -15,7 +15,7 @@ xXx
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1711,7 +1711,8 @@ static bool io_apic_level_ack_pending(st
+@@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(st
static inline bool ioapic_irqd_mask(struct irq_data *data)
{
/* If we are moving the irq we need to mask it */
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 792681bd01f487..c00fe97ec00a4b 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5855,6 +5855,13 @@ int kvm_arch_init(void *opaque)
+@@ -5865,6 +5865,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-mce-Initialize-timer-as-pinned.patch b/patches/x86-mce-Initialize-timer-as-pinned.patch
deleted file mode 100644
index 3cae7d29e8f836..00000000000000
--- a/patches/x86-mce-Initialize-timer-as-pinned.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Jul 2016 09:50:17 +0000
-Subject: [PATCH 03/22] x86/mce: Initialize timer as pinned
-
-Pinned timers must carry that attribute in the timer itself. No functional
-change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Chris Mason <clm@fb.com>
-Cc: Eric Dumazet <edumazet@google.com>
-Cc: rt@linutronix.de
-Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Cc: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/cpu/mcheck/mce.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/arch/x86/kernel/cpu/mcheck/mce.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce.c
-@@ -1258,7 +1258,7 @@ static void __restart_timer(struct timer
-
- if (timer_pending(t)) {
- if (time_before(when, t->expires))
-- mod_timer_pinned(t, when);
-+ mod_timer(t, when);
- } else {
- t->expires = round_jiffies(when);
- add_timer_on(t, smp_processor_id());
-@@ -1672,7 +1672,7 @@ static void __mcheck_cpu_init_timer(void
- struct timer_list *t = this_cpu_ptr(&mce_timer);
- unsigned int cpu = smp_processor_id();
-
-- setup_timer(t, mce_timer_fn, cpu);
-+ setup_pinned_timer(t, mce_timer_fn, cpu);
- mce_start_timer(cpu, t);
- }
-
diff --git a/patches/x86-mce-timer-hrtimer.patch b/patches/x86-mce-timer-hrtimer.patch
index 97996cda925d8e..1cd6e725f52268 100644
--- a/patches/x86-mce-timer-hrtimer.patch
+++ b/patches/x86-mce-timer-hrtimer.patch
@@ -34,7 +34,7 @@ fold in:
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1240,7 +1241,7 @@ void mce_log_therm_throt_event(__u64 sta
+@@ -1291,7 +1292,7 @@ void mce_log_therm_throt_event(__u64 sta
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -43,7 +43,7 @@ fold in:
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1249,32 +1250,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1300,32 +1301,18 @@ static unsigned long mce_adjust_timer_de
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@@ -82,7 +82,7 @@ fold in:
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1297,7 +1284,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1348,7 +1335,7 @@ static void mce_timer_fn(unsigned long d
done:
__this_cpu_write(mce_next_interval, iv);
@@ -91,7 +91,7 @@ fold in:
}
/*
-@@ -1305,7 +1292,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1356,7 +1343,7 @@ static void mce_timer_fn(unsigned long d
*/
void mce_timer_kick(unsigned long interval)
{
@@ -100,7 +100,7 @@ fold in:
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
-@@ -1320,7 +1307,7 @@ static void mce_timer_delete_all(void)
+@@ -1371,7 +1358,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -109,7 +109,7 @@ fold in:
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1654,7 +1641,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1717,7 +1704,7 @@ static void __mcheck_cpu_clear_vendor(st
}
}
@@ -118,7 +118,7 @@ fold in:
{
unsigned long iv = check_interval * HZ;
-@@ -1663,16 +1650,17 @@ static void mce_start_timer(unsigned int
+@@ -1726,16 +1713,17 @@ static void mce_start_timer(unsigned int
per_cpu(mce_next_interval, cpu) = iv;
@@ -140,7 +140,7 @@ fold in:
mce_start_timer(cpu, t);
}
-@@ -2393,6 +2381,8 @@ static void mce_disable_cpu(void *h)
+@@ -2459,6 +2447,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
@@ -149,15 +149,15 @@ fold in:
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
-@@ -2415,6 +2405,7 @@ static void mce_reenable_cpu(void *h)
+@@ -2481,6 +2471,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
- wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+ wrmsrl(msr_ops.ctl(i), b->ctl);
}
+ __mcheck_cpu_init_timer();
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2422,7 +2413,6 @@ static int
+@@ -2488,7 +2479,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -165,7 +165,7 @@ fold in:
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
-@@ -2442,11 +2432,9 @@ mce_cpu_callback(struct notifier_block *
+@@ -2508,11 +2498,9 @@ mce_cpu_callback(struct notifier_block *
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
diff --git a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
index 90154c99052e08..ac9108589418b6 100644
--- a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -68,7 +68,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1317,6 +1318,56 @@ static void mce_do_trigger(struct work_s
+@@ -1368,6 +1369,56 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -125,7 +125,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1324,19 +1375,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1375,19 +1426,8 @@ static DECLARE_WORK(mce_trigger_work, mc
*/
int mce_notify_irq(void)
{
@@ -146,7 +146,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
return 1;
}
return 0;
-@@ -2473,6 +2513,10 @@ static __init int mcheck_init_device(voi
+@@ -2539,6 +2579,10 @@ static __init int mcheck_init_device(voi
goto err_out;
}
diff --git a/patches/x86-mm-disable-preemption-during-CR3-read-write.patch b/patches/x86-mm-disable-preemption-during-CR3-read-write.patch
deleted file mode 100644
index e667a0c36aa480..00000000000000
--- a/patches/x86-mm-disable-preemption-during-CR3-read-write.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 5 Aug 2016 13:51:17 +0200
-Subject: [PATCH] x86/mm: disable preemption during CR3 read+write
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Usually current->mm (and therefore mm->pgd) stays the same during the
-lifetime of a task so it does not matter if a task gets preempted during
-the read and write of the CR3.
-
-But then, there is this scenario on x86-UP:
-TaskA is in do_exit() and exit_mm() sets current->mm = NULL followed by
-mmput() -> exit_mmap() -> tlb_finish_mmu() -> tlb_flush_mmu() ->
-tlb_flush_mmu_tlbonly() -> tlb_flush() -> flush_tlb_mm_range() ->
-__flush_tlb_up() -> __flush_tlb() -> __native_flush_tlb().
-
-At this point current->mm is NULL but current->active_mm still points to
-the "old" mm.
-Let's preempt taskA _after_ native_read_cr3() by taskB. TaskB has its
-own mm so CR3 has changed.
-Now preempt back to taskA. TaskA has no ->mm set so it borrows taskB's
-mm and so CR3 remains unchanged. Once taskA gets active it continues
-where it was interrupted and that means it writes its old CR3 value
-back. Everything is fine because userland won't need its memory
-anymore.
-
-Now the fun part. Let's preempt taskA one more time and get back to
-taskB. This time switch_mm() won't do a thing because oldmm
-(->active_mm) is the same as mm (as per context_switch()). So we remain
-with a bad CR3 / pgd and return to userland.
-The next thing that happens is handle_mm_fault() with an address for the
-execution of its code in userland. handle_mm_fault() realizes that it
-has a PTE with proper rights so it returns doing nothing. But the CPU
-looks at the wrong pgd and insists that something is wrong and faults
-again. And again. And one more timeā€¦
-
-This pagefault circle continues until the scheduler gets tired of it and
-puts another task on the CPU. It gets little difficult if the task is a
-RT task with a high priority. The system will either freeze or it gets
-fixed by the software watchdog thread which usually runs at RT-max prio.
-But waiting for the watchdog will increase the latency of the RT task
-which is no good.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/tlbflush.h | 7 +++++++
- 1 file changed, 7 insertions(+)
-
---- a/arch/x86/include/asm/tlbflush.h
-+++ b/arch/x86/include/asm/tlbflush.h
-@@ -135,7 +135,14 @@ static inline void cr4_set_bits_and_upda
-
- static inline void __native_flush_tlb(void)
- {
-+ /*
-+ * if current->mm == NULL then we borrow a mm which may change during a
-+ * task switch and therefore we must not be preempted while we write CR3
-+ * back.
-+ */
-+ preempt_disable();
- native_write_cr3(native_read_cr3());
-+ preempt_enable();
- }
-
- static inline void __native_flush_tlb_global_irq_disabled(void)
diff --git a/patches/x86-preempt-lazy-fixup-should_resched.patch b/patches/x86-preempt-lazy-fixup-should_resched.patch
deleted file mode 100644
index 5e771a368d9e78..00000000000000
--- a/patches/x86-preempt-lazy-fixup-should_resched.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 14 Sep 2016 19:18:47 +0200
-Subject: [PATCH] x86/preempt-lazy: fixup should_resched()
-
-should_resched() returns true if NEED_RESCHED is set and the
-preempt_count is 0 _or_ if NEED_RESCHED_LAZY is set ignoring the preempt
-counter. Ignoring the preemp counter is wrong. This patch adds this into
-account.
-While at it, __preempt_count_dec_and_test() ignores preempt_lazy_count
-while checking TIF_NEED_RESCHED_LAZY so we this check, too.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/preempt.h | 17 +++++++++++++++--
- 1 file changed, 15 insertions(+), 2 deletions(-)
-
---- a/arch/x86/include/asm/preempt.h
-+++ b/arch/x86/include/asm/preempt.h
-@@ -89,6 +89,8 @@ static __always_inline bool __preempt_co
- if (____preempt_count_dec_and_test())
- return true;
- #ifdef CONFIG_PREEMPT_LAZY
-+ if (current_thread_info()->preempt_lazy_count)
-+ return false;
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
- #else
- return false;
-@@ -101,8 +103,19 @@ static __always_inline bool __preempt_co
- static __always_inline bool should_resched(int preempt_offset)
- {
- #ifdef CONFIG_PREEMPT_LAZY
-- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
-- test_thread_flag(TIF_NEED_RESCHED_LAZY));
-+ u32 tmp;
-+
-+ tmp = raw_cpu_read_4(__preempt_count);
-+ if (tmp == preempt_offset)
-+ return true;
-+
-+ /* preempt count == 0 ? */
-+ tmp &= ~PREEMPT_NEED_RESCHED;
-+ if (tmp)
-+ return false;
-+ if (current_thread_info()->preempt_lazy_count)
-+ return false;
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
- #else
- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
- #endif
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index e84821e11c9530..ad04b04ea7c9fe 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -10,9 +10,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/entry/common.c | 4 ++--
arch/x86/entry/entry_32.S | 16 ++++++++++++++++
arch/x86/entry/entry_64.S | 16 ++++++++++++++++
- arch/x86/include/asm/thread_info.h | 6 ++++++
+ arch/x86/include/asm/preempt.h | 31 ++++++++++++++++++++++++++++++-
+ arch/x86/include/asm/thread_info.h | 10 ++++++++++
arch/x86/kernel/asm-offsets.c | 2 ++
- 6 files changed, 43 insertions(+), 2 deletions(-)
+ 7 files changed, 77 insertions(+), 3 deletions(-)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ANON_INODES
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -202,7 +202,7 @@ long syscall_trace_enter(struct pt_regs
+@@ -136,7 +136,7 @@ static long syscall_trace_enter(struct p
#define EXIT_TO_USERMODE_LOOP_FLAGS \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
@@ -35,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
{
-@@ -218,7 +218,7 @@ static void exit_to_usermode_loop(struct
+@@ -152,7 +152,7 @@ static void exit_to_usermode_loop(struct
/* We have work to do. */
local_irq_enable();
@@ -46,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -278,8 +278,24 @@ END(ret_from_exception)
+@@ -271,8 +271,24 @@ END(ret_from_exception)
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
need_resched:
@@ -73,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
call preempt_schedule_irq
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -511,7 +511,23 @@ GLOBAL(retint_user)
+@@ -512,7 +512,23 @@ GLOBAL(retint_user)
bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
@@ -97,18 +98,79 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
call preempt_schedule_irq
jmp 0b
1:
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -79,17 +79,46 @@ static __always_inline void __preempt_co
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+-static __always_inline bool __preempt_count_dec_and_test(void)
++static __always_inline bool ____preempt_count_dec_and_test(void)
+ {
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
+ }
+
++static __always_inline bool __preempt_count_dec_and_test(void)
++{
++ if (____preempt_count_dec_and_test())
++ return true;
++#ifdef CONFIG_PREEMPT_LAZY
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++ return false;
++#endif
++}
++
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+ static __always_inline bool should_resched(int preempt_offset)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++ u32 tmp;
++
++ tmp = raw_cpu_read_4(__preempt_count);
++ if (tmp == preempt_offset)
++ return true;
++
++ /* preempt count == 0 ? */
++ tmp &= ~PREEMPT_NEED_RESCHED;
++ if (tmp)
++ return false;
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
++#endif
+ }
+
+ #ifdef CONFIG_PREEMPT
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
-@@ -58,6 +58,8 @@ struct thread_info {
+@@ -57,6 +57,8 @@ struct thread_info {
+ __u32 flags; /* low level flags */
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
- mm_segment_t addr_limit;
+ int preempt_lazy_count; /* 0 => lazy preemptable
-+ <0 => BUG */
- unsigned int sig_on_uaccess_error:1;
- unsigned int uaccess_err:1; /* uaccess failed */
++ <0 => BUG */
};
-@@ -95,6 +97,7 @@ struct thread_info {
+
+ #define INIT_THREAD_INFO(tsk) \
+@@ -73,6 +75,10 @@ struct thread_info {
+
+ #include <asm/asm-offsets.h>
+
++#define GET_THREAD_INFO(reg) \
++ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
++ _ASM_SUB $(THREAD_SIZE),reg ;
++
+ #endif
+
+ /*
+@@ -91,6 +97,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -116,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
-@@ -119,6 +122,7 @@ struct thread_info {
+@@ -115,6 +122,7 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -124,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
-@@ -155,6 +159,8 @@ struct thread_info {
+@@ -151,6 +159,8 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
@@ -135,15 +197,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
-@@ -32,6 +32,7 @@ void common(void) {
+@@ -31,6 +31,7 @@ void common(void) {
+ BLANK();
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_status, thread_info, status);
- OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
BLANK();
- OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -85,4 +86,5 @@ void common(void) {
+ OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
+@@ -88,4 +89,5 @@ void common(void) {
BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
diff --git a/patches/x86-use-gen-rwsem-spinlocks-rt.patch b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
index 291ac5200fc213..fe2b08a5eec027 100644
--- a/patches/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -230,8 +230,11 @@ config ARCH_MAY_HAVE_PC_FDC
+@@ -231,8 +231,11 @@ config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API